summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/cache.c2
-rw-r--r--fs/9p/vfs_file.c3
-rw-r--r--fs/9p/vfs_super.c10
-rw-r--r--fs/Kconfig6
-rw-r--r--fs/Makefile3
-rw-r--r--fs/affs/affs.h16
-rw-r--r--fs/affs/amigaffs.c2
-rw-r--r--fs/affs/amigaffs.h3
-rw-r--r--fs/affs/inode.c4
-rw-r--r--fs/affs/super.c14
-rw-r--r--fs/afs/callback.c1
-rw-r--r--fs/afs/cell.c4
-rw-r--r--fs/afs/cmservice.c16
-rw-r--r--fs/afs/dir.c153
-rw-r--r--fs/afs/dir_edit.c12
-rw-r--r--fs/afs/dynroot.c7
-rw-r--r--fs/afs/file.c18
-rw-r--r--fs/afs/flock.c4
-rw-r--r--fs/afs/fsclient.c73
-rw-r--r--fs/afs/inode.c15
-rw-r--r--fs/afs/internal.h18
-rw-r--r--fs/afs/rxrpc.c13
-rw-r--r--fs/afs/security.c108
-rw-r--r--fs/afs/server.c3
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/afs/vl_list.c4
-rw-r--r--fs/afs/vlclient.c17
-rw-r--r--fs/afs/volume.c2
-rw-r--r--fs/afs/xattr.c16
-rw-r--r--fs/afs/yfsclient.c77
-rw-r--r--fs/aio.c12
-rw-r--r--fs/attr.c21
-rw-r--r--fs/autofs/expire.c5
-rw-r--r--fs/befs/linuxvfs.c2
-rw-r--r--fs/bfs/inode.c2
-rw-r--r--fs/binfmt_elf.c104
-rw-r--r--fs/binfmt_elf_fdpic.c12
-rw-r--r--fs/block_dev.c158
-rw-r--r--fs/btrfs/Kconfig3
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/async-thread.c121
-rw-r--r--fs/btrfs/async-thread.h37
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/block-group.c3189
-rw-r--r--fs/btrfs/block-group.h251
-rw-r--r--fs/btrfs/block-rsv.c48
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/check-integrity.c7
-rw-r--r--fs/btrfs/compression.c290
-rw-r--r--fs/btrfs/compression.h57
-rw-r--r--fs/btrfs/ctree.c729
-rw-r--r--fs/btrfs/ctree.h471
-rw-r--r--fs/btrfs/dedupe.h12
-rw-r--r--fs/btrfs/delalloc-space.c62
-rw-r--r--fs/btrfs/delayed-inode.c36
-rw-r--r--fs/btrfs/delayed-ref.c10
-rw-r--r--fs/btrfs/dev-replace.c5
-rw-r--r--fs/btrfs/dev-replace.h2
-rw-r--r--fs/btrfs/disk-io.c414
-rw-r--r--fs/btrfs/disk-io.h6
-rw-r--r--fs/btrfs/export.c4
-rw-r--r--fs/btrfs/extent-io-tree.h248
-rw-r--r--fs/btrfs/extent-tree.c3595
-rw-r--r--fs/btrfs/extent_io.c180
-rw-r--r--fs/btrfs/extent_io.h237
-rw-r--r--fs/btrfs/extent_map.c8
-rw-r--r--fs/btrfs/extent_map.h11
-rw-r--r--fs/btrfs/file-item.c1
-rw-r--r--fs/btrfs/file.c565
-rw-r--r--fs/btrfs/free-space-cache.c158
-rw-r--r--fs/btrfs/free-space-cache.h61
-rw-r--r--fs/btrfs/free-space-tree.c134
-rw-r--r--fs/btrfs/free-space-tree.h20
-rw-r--r--fs/btrfs/inode-item.c62
-rw-r--r--fs/btrfs/inode-map.c36
-rw-r--r--fs/btrfs/inode.c636
-rw-r--r--fs/btrfs/ioctl.c477
-rw-r--r--fs/btrfs/locking.c355
-rw-r--r--fs/btrfs/locking.h15
-rw-r--r--fs/btrfs/lzo.c61
-rw-r--r--fs/btrfs/math.h28
-rw-r--r--fs/btrfs/misc.h61
-rw-r--r--fs/btrfs/ordered-data.c19
-rw-r--r--fs/btrfs/ordered-data.h2
-rw-r--r--fs/btrfs/print-tree.c6
-rw-r--r--fs/btrfs/props.c8
-rw-r--r--fs/btrfs/qgroup.c101
-rw-r--r--fs/btrfs/qgroup.h2
-rw-r--r--fs/btrfs/raid56.c117
-rw-r--r--fs/btrfs/reada.c49
-rw-r--r--fs/btrfs/ref-verify.c2
-rw-r--r--fs/btrfs/relocation.c64
-rw-r--r--fs/btrfs/root-tree.c2
-rw-r--r--fs/btrfs/scrub.c101
-rw-r--r--fs/btrfs/send.c499
-rw-r--r--fs/btrfs/space-info.c399
-rw-r--r--fs/btrfs/space-info.h33
-rw-r--r--fs/btrfs/struct-funcs.c73
-rw-r--r--fs/btrfs/super.c60
-rw-r--r--fs/btrfs/sysfs.c315
-rw-r--r--fs/btrfs/sysfs.h82
-rw-r--r--fs/btrfs/tests/btrfs-tests.c20
-rw-r--r--fs/btrfs/tests/btrfs-tests.h4
-rw-r--r--fs/btrfs/tests/extent-io-tests.c31
-rw-r--r--fs/btrfs/tests/free-space-tests.c16
-rw-r--r--fs/btrfs/tests/free-space-tree-tests.c102
-rw-r--r--fs/btrfs/tests/inode-tests.c24
-rw-r--r--fs/btrfs/transaction.c136
-rw-r--r--fs/btrfs/transaction.h8
-rw-r--r--fs/btrfs/tree-checker.c647
-rw-r--r--fs/btrfs/tree-log.c241
-rw-r--r--fs/btrfs/volumes.c622
-rw-r--r--fs/btrfs/volumes.h33
-rw-r--r--fs/btrfs/zlib.c63
-rw-r--r--fs/btrfs/zstd.c59
-rw-r--r--fs/buffer.c54
-rw-r--r--fs/ceph/Makefile2
-rw-r--r--fs/ceph/addr.c66
-rw-r--r--fs/ceph/cache.c11
-rw-r--r--fs/ceph/cache.h5
-rw-r--r--fs/ceph/caps.c188
-rw-r--r--fs/ceph/debugfs.c1
-rw-r--r--fs/ceph/dir.c16
-rw-r--r--fs/ceph/export.c60
-rw-r--r--fs/ceph/file.c138
-rw-r--r--fs/ceph/inode.c58
-rw-r--r--fs/ceph/io.c163
-rw-r--r--fs/ceph/io.h12
-rw-r--r--fs/ceph/locks.c11
-rw-r--r--fs/ceph/mds_client.c150
-rw-r--r--fs/ceph/mds_client.h8
-rw-r--r--fs/ceph/mdsmap.c11
-rw-r--r--fs/ceph/snap.c4
-rw-r--r--fs/ceph/super.c679
-rw-r--r--fs/ceph/super.h64
-rw-r--r--fs/ceph/xattr.c95
-rw-r--r--fs/cifs/Kconfig10
-rw-r--r--fs/cifs/Makefile2
-rw-r--r--fs/cifs/cifs_debug.c45
-rw-r--r--fs/cifs/cifs_debug.h67
-rw-r--r--fs/cifs/cifs_fs_sb.h2
-rw-r--r--fs/cifs/cifs_ioctl.h10
-rw-r--r--fs/cifs/cifs_spnego.c2
-rw-r--r--fs/cifs/cifsacl.c83
-rw-r--r--fs/cifs/cifsacl.h83
-rw-r--r--fs/cifs/cifsfs.c111
-rw-r--r--fs/cifs/cifsfs.h3
-rw-r--r--fs/cifs/cifsglob.h120
-rw-r--r--fs/cifs/cifsproto.h18
-rw-r--r--fs/cifs/cifsroot.c94
-rw-r--r--fs/cifs/cifssmb.c202
-rw-r--r--fs/cifs/connect.c389
-rw-r--r--fs/cifs/dfs_cache.c3
-rw-r--r--fs/cifs/dir.c21
-rw-r--r--fs/cifs/export.c2
-rw-r--r--fs/cifs/file.c299
-rw-r--r--fs/cifs/inode.c359
-rw-r--r--fs/cifs/ioctl.c29
-rw-r--r--fs/cifs/misc.c39
-rw-r--r--fs/cifs/netmisc.c18
-rw-r--r--fs/cifs/sess.c259
-rw-r--r--fs/cifs/smb1ops.c11
-rw-r--r--fs/cifs/smb2file.c2
-rw-r--r--fs/cifs/smb2inode.c169
-rw-r--r--fs/cifs/smb2maperror.c2
-rw-r--r--fs/cifs/smb2misc.c175
-rw-r--r--fs/cifs/smb2ops.c392
-rw-r--r--fs/cifs/smb2pdu.c477
-rw-r--r--fs/cifs/smb2pdu.h5
-rw-r--r--fs/cifs/smb2proto.h17
-rw-r--r--fs/cifs/smb2transport.c225
-rw-r--r--fs/cifs/smbdirect.c46
-rw-r--r--fs/cifs/smbencrypt.c18
-rw-r--r--fs/cifs/smbfsctl.h11
-rw-r--r--fs/cifs/trace.h38
-rw-r--r--fs/cifs/transport.c199
-rw-r--r--fs/cifs/xattr.c2
-rw-r--r--fs/coda/inode.c3
-rw-r--r--fs/coda/pioctl.c7
-rw-r--r--fs/compat_binfmt_elf.c4
-rw-r--r--fs/compat_ioctl.c934
-rw-r--r--fs/configfs/configfs_internal.h36
-rw-r--r--fs/configfs/dir.c306
-rw-r--r--fs/configfs/file.c280
-rw-r--r--fs/configfs/inode.c36
-rw-r--r--fs/configfs/mount.c9
-rw-r--r--fs/configfs/symlink.c197
-rw-r--r--fs/coredump.c44
-rw-r--r--fs/cramfs/inode.c73
-rw-r--r--fs/crypto/Kconfig2
-rw-r--r--fs/crypto/Makefile10
-rw-r--r--fs/crypto/bio.c29
-rw-r--r--fs/crypto/crypto.c167
-rw-r--r--fs/crypto/fname.c47
-rw-r--r--fs/crypto/fscrypt_private.h416
-rw-r--r--fs/crypto/hkdf.c181
-rw-r--r--fs/crypto/hooks.c6
-rw-r--r--fs/crypto/keyinfo.c611
-rw-r--r--fs/crypto/keyring.c986
-rw-r--r--fs/crypto/keysetup.c527
-rw-r--r--fs/crypto/keysetup_v1.c336
-rw-r--r--fs/crypto/policy.c473
-rw-r--r--fs/d_path.c6
-rw-r--r--fs/dax.c22
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/debugfs/file.c117
-rw-r--r--fs/debugfs/inode.c32
-rw-r--r--fs/devpts/inode.c2
-rw-r--r--fs/direct-io.c22
-rw-r--r--fs/ecryptfs/file.c1
-rw-r--r--fs/ecryptfs/inode.c84
-rw-r--r--fs/efs/super.c2
-rw-r--r--fs/erofs/Kconfig92
-rw-r--r--fs/erofs/Makefile11
-rw-r--r--fs/erofs/compress.h60
-rw-r--r--fs/erofs/data.c364
-rw-r--r--fs/erofs/decompressor.c338
-rw-r--r--fs/erofs/dir.c142
-rw-r--r--fs/erofs/erofs_fs.h317
-rw-r--r--fs/erofs/inode.c337
-rw-r--r--fs/erofs/internal.h430
-rw-r--r--fs/erofs/namei.c252
-rw-r--r--fs/erofs/super.c644
-rw-r--r--fs/erofs/tagptr.h110
-rw-r--r--fs/erofs/utils.c332
-rw-r--r--fs/erofs/xattr.c704
-rw-r--r--fs/erofs/xattr.h92
-rw-r--r--fs/erofs/zdata.c1401
-rw-r--r--fs/erofs/zdata.h189
-rw-r--r--fs/erofs/zmap.c471
-rw-r--r--fs/erofs/zpvec.h157
-rw-r--r--fs/eventpoll.c56
-rw-r--r--fs/exec.c9
-rw-r--r--fs/exportfs/expfs.c33
-rw-r--r--fs/ext2/balloc.c85
-rw-r--r--fs/ext2/ext2.h12
-rw-r--r--fs/ext2/inode.c9
-rw-r--r--fs/ext2/ioctl.c5
-rw-r--r--fs/ext2/super.c18
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/ext4/Kconfig17
-rw-r--r--fs/ext4/Makefile2
-rw-r--r--fs/ext4/block_validity.c189
-rw-r--r--fs/ext4/dir.c7
-rw-r--r--fs/ext4/ext4.h119
-rw-r--r--fs/ext4/ext4_jbd2.c32
-rw-r--r--fs/ext4/ext4_jbd2.h106
-rw-r--r--fs/ext4/extents.c247
-rw-r--r--fs/ext4/extents_status.c521
-rw-r--r--fs/ext4/extents_status.h8
-rw-r--r--fs/ext4/file.c418
-rw-r--r--fs/ext4/fsync.c72
-rw-r--r--fs/ext4/hash.c2
-rw-r--r--fs/ext4/ialloc.c7
-rw-r--r--fs/ext4/indirect.c125
-rw-r--r--fs/ext4/inline.c2
-rw-r--r--fs/ext4/inode-test.c272
-rw-r--r--fs/ext4/inode.c1085
-rw-r--r--fs/ext4/ioctl.c144
-rw-r--r--fs/ext4/migrate.c103
-rw-r--r--fs/ext4/namei.c54
-rw-r--r--fs/ext4/page-io.c167
-rw-r--r--fs/ext4/readpage.c213
-rw-r--r--fs/ext4/resize.c46
-rw-r--r--fs/ext4/super.c112
-rw-r--r--fs/ext4/sysfs.c6
-rw-r--r--fs/ext4/verity.c367
-rw-r--r--fs/ext4/xattr.c94
-rw-r--r--fs/f2fs/Kconfig5
-rw-r--r--fs/f2fs/Makefile1
-rw-r--r--fs/f2fs/checkpoint.c2
-rw-r--r--fs/f2fs/data.c367
-rw-r--r--fs/f2fs/debug.c4
-rw-r--r--fs/f2fs/dir.c191
-rw-r--r--fs/f2fs/f2fs.h130
-rw-r--r--fs/f2fs/file.c396
-rw-r--r--fs/f2fs/gc.c143
-rw-r--r--fs/f2fs/hash.c37
-rw-r--r--fs/f2fs/inline.c18
-rw-r--r--fs/f2fs/inode.c22
-rw-r--r--fs/f2fs/namei.c69
-rw-r--r--fs/f2fs/node.c60
-rw-r--r--fs/f2fs/recovery.c2
-rw-r--r--fs/f2fs/segment.c202
-rw-r--r--fs/f2fs/segment.h14
-rw-r--r--fs/f2fs/super.c338
-rw-r--r--fs/f2fs/sysfs.c38
-rw-r--r--fs/f2fs/verity.c247
-rw-r--r--fs/f2fs/xattr.c20
-rw-r--r--fs/f2fs/xattr.h2
-rw-r--r--fs/fat/dir.c17
-rw-r--r--fs/fat/fatent.c3
-rw-r--r--fs/fat/file.c13
-rw-r--r--fs/fat/inode.c12
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/fhandle.c2
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/freevxfs/vxfs_super.c2
-rw-r--r--fs/fs-writeback.c190
-rw-r--r--fs/fs_context.c18
-rw-r--r--fs/fs_parser.c18
-rw-r--r--fs/fuse/Kconfig11
-rw-r--r--fs/fuse/Makefile2
-rw-r--r--fs/fuse/cuse.c101
-rw-r--r--fs/fuse/dev.c689
-rw-r--r--fs/fuse/dir.c324
-rw-r--r--fs/fuse/file.c1243
-rw-r--r--fs/fuse/fuse_i.h363
-rw-r--r--fs/fuse/inode.c561
-rw-r--r--fs/fuse/readdir.c74
-rw-r--r--fs/fuse/virtio_fs.c1254
-rw-r--r--fs/fuse/xattr.c76
-rw-r--r--fs/gfs2/aops.c10
-rw-r--r--fs/gfs2/bmap.c384
-rw-r--r--fs/gfs2/dentry.c45
-rw-r--r--fs/gfs2/dir.c3
-rw-r--r--fs/gfs2/dir.h3
-rw-r--r--fs/gfs2/file.c101
-rw-r--r--fs/gfs2/glock.c131
-rw-r--r--fs/gfs2/glock.h6
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/incore.h9
-rw-r--r--fs/gfs2/inode.c79
-rw-r--r--fs/gfs2/lock_dlm.c8
-rw-r--r--fs/gfs2/log.c25
-rw-r--r--fs/gfs2/log.h2
-rw-r--r--fs/gfs2/lops.c34
-rw-r--r--fs/gfs2/lops.h3
-rw-r--r--fs/gfs2/meta_io.c6
-rw-r--r--fs/gfs2/ops_fstype.c504
-rw-r--r--fs/gfs2/quota.c6
-rw-r--r--fs/gfs2/recovery.c8
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/gfs2/super.c380
-rw-r--r--fs/gfs2/super.h3
-rw-r--r--fs/gfs2/sys.c2
-rw-r--r--fs/gfs2/trans.c2
-rw-r--r--fs/gfs2/util.c4
-rw-r--r--fs/gfs2/util.h9
-rw-r--r--fs/hpfs/dir.c1
-rw-r--r--fs/hpfs/file.c1
-rw-r--r--fs/hpfs/hpfs_fn.h6
-rw-r--r--fs/hpfs/super.c2
-rw-r--r--fs/hugetlbfs/inode.c63
-rw-r--r--fs/inode.c36
-rw-r--r--fs/io-wq.c1094
-rw-r--r--fs/io-wq.h127
-rw-r--r--fs/io_uring.c3151
-rw-r--r--fs/ioctl.c92
-rw-r--r--fs/iomap/Makefile18
-rw-r--r--fs/iomap/apply.c32
-rw-r--r--fs/iomap/buffered-io.c756
-rw-r--r--fs/iomap/direct-io.c85
-rw-r--r--fs/iomap/fiemap.c10
-rw-r--r--fs/iomap/seek.c4
-rw-r--r--fs/iomap/swapfile.c3
-rw-r--r--fs/iomap/trace.c12
-rw-r--r--fs/iomap/trace.h191
-rw-r--r--fs/isofs/export.c2
-rw-r--r--fs/isofs/inode.c7
-rw-r--r--fs/jbd2/checkpoint.c2
-rw-r--r--fs/jbd2/commit.c26
-rw-r--r--fs/jbd2/journal.c67
-rw-r--r--fs/jbd2/revoke.c10
-rw-r--r--fs/jbd2/transaction.c413
-rw-r--r--fs/jffs2/fs.c51
-rw-r--r--fs/jffs2/gc.c21
-rw-r--r--fs/jffs2/os-linux.h7
-rw-r--r--fs/jffs2/scan.c5
-rw-r--r--fs/jffs2/super.c170
-rw-r--r--fs/jfs/Kconfig2
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/kernfs/dir.c114
-rw-r--r--fs/kernfs/file.c4
-rw-r--r--fs/kernfs/inode.c11
-rw-r--r--fs/kernfs/kernfs-internal.h2
-rw-r--r--fs/kernfs/mount.c102
-rw-r--r--fs/libfs.c140
-rw-r--r--fs/locks.c73
-rw-r--r--fs/minix/inode.c2
-rw-r--r--fs/namei.c72
-rw-r--r--fs/namespace.c59
-rw-r--r--fs/nfs/delegation.c37
-rw-r--r--fs/nfs/delegation.h3
-rw-r--r--fs/nfs/dir.c43
-rw-r--r--fs/nfs/direct.c115
-rw-r--r--fs/nfs/filelayout/filelayout.c1
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c28
-rw-r--r--fs/nfs/fscache.c7
-rw-r--r--fs/nfs/fscache.h2
-rw-r--r--fs/nfs/inode.c35
-rw-r--r--fs/nfs/internal.h18
-rw-r--r--fs/nfs/nfs3proc.c45
-rw-r--r--fs/nfs/nfs4_fs.h14
-rw-r--r--fs/nfs/nfs4client.c5
-rw-r--r--fs/nfs/nfs4file.c12
-rw-r--r--fs/nfs/nfs4proc.c432
-rw-r--r--fs/nfs/nfs4state.c71
-rw-r--r--fs/nfs/nfs4xdr.c2
-rw-r--r--fs/nfs/nfstrace.h2
-rw-r--r--fs/nfs/pagelist.c19
-rw-r--r--fs/nfs/pnfs.c78
-rw-r--r--fs/nfs/pnfs.h17
-rw-r--r--fs/nfs/pnfs_nfs.c15
-rw-r--r--fs/nfs/proc.c7
-rw-r--r--fs/nfs/read.c35
-rw-r--r--fs/nfs/super.c30
-rw-r--r--fs/nfs/write.c43
-rw-r--r--fs/nfsd/Kconfig3
-rw-r--r--fs/nfsd/Makefile3
-rw-r--r--fs/nfsd/acl.h8
-rw-r--r--fs/nfsd/blocklayout.c3
-rw-r--r--fs/nfsd/export.c13
-rw-r--r--fs/nfsd/filecache.c934
-rw-r--r--fs/nfsd/filecache.h61
-rw-r--r--fs/nfsd/netns.h4
-rw-r--r--fs/nfsd/nfs3proc.c9
-rw-r--r--fs/nfsd/nfs3xdr.c13
-rw-r--r--fs/nfsd/nfs4callback.c35
-rw-r--r--fs/nfsd/nfs4layouts.c12
-rw-r--r--fs/nfsd/nfs4proc.c97
-rw-r--r--fs/nfsd/nfs4recover.c388
-rw-r--r--fs/nfsd/nfs4state.c239
-rw-r--r--fs/nfsd/nfs4xdr.c56
-rw-r--r--fs/nfsd/nfscache.c2
-rw-r--r--fs/nfsd/nfsctl.c23
-rw-r--r--fs/nfsd/nfsproc.c4
-rw-r--r--fs/nfsd/nfssvc.c48
-rw-r--r--fs/nfsd/state.h13
-rw-r--r--fs/nfsd/trace.h140
-rw-r--r--fs/nfsd/vfs.c351
-rw-r--r--fs/nfsd/vfs.h37
-rw-r--r--fs/nfsd/xdr3.h2
-rw-r--r--fs/nfsd/xdr4.h19
-rw-r--r--fs/nilfs2/ioctl.c1
-rw-r--r--fs/notify/dnotify/dnotify.c15
-rw-r--r--fs/notify/fanotify/fanotify_user.c21
-rw-r--r--fs/notify/fdinfo.c2
-rw-r--r--fs/notify/fsnotify.c2
-rw-r--r--fs/notify/fsnotify.h4
-rw-r--r--fs/notify/group.c2
-rw-r--r--fs/notify/inotify/inotify_user.c14
-rw-r--r--fs/notify/mark.c6
-rw-r--r--fs/ntfs/inode.c21
-rw-r--r--fs/ntfs/mft.c12
-rw-r--r--fs/ntfs/namei.c2
-rw-r--r--fs/ntfs/runlist.c2
-rw-r--r--fs/ntfs/super.c2
-rw-r--r--fs/ocfs2/acl.c4
-rw-r--r--fs/ocfs2/alloc.c52
-rw-r--r--fs/ocfs2/aops.c39
-rw-r--r--fs/ocfs2/blockcheck.c26
-rw-r--r--fs/ocfs2/cluster/heartbeat.c103
-rw-r--r--fs/ocfs2/dir.c3
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h1
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c55
-rw-r--r--fs/ocfs2/dlm/dlmdebug.h16
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c7
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c23
-rw-r--r--fs/ocfs2/dlmglue.c29
-rw-r--r--fs/ocfs2/extent_map.c3
-rw-r--r--fs/ocfs2/file.c149
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/ocfs2/ioctl.c3
-rw-r--r--fs/ocfs2/journal.c11
-rw-r--r--fs/ocfs2/journal.h42
-rw-r--r--fs/ocfs2/localalloc.c3
-rw-r--r--fs/ocfs2/namei.c2
-rw-r--r--fs/ocfs2/ocfs2.h3
-rw-r--r--fs/ocfs2/quota_global.c2
-rw-r--r--fs/ocfs2/suballoc.c19
-rw-r--r--fs/ocfs2/super.c14
-rw-r--r--fs/ocfs2/xattr.c3
-rw-r--r--fs/omfs/inode.c4
-rw-r--r--fs/open.c29
-rw-r--r--fs/orangefs/file.c2
-rw-r--r--fs/orangefs/inode.c2
-rw-r--r--fs/orangefs/namei.c2
-rw-r--r--fs/orangefs/orangefs-kernel.h2
-rw-r--r--fs/overlayfs/export.c3
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--fs/overlayfs/ovl_entry.h1
-rw-r--r--fs/overlayfs/super.c73
-rw-r--r--fs/pipe.c237
-rw-r--r--fs/proc/Kconfig8
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/generic.c37
-rw-r--r--fs/proc/internal.h2
-rw-r--r--fs/proc/kcore.c6
-rw-r--r--fs/proc/meminfo.c8
-rw-r--r--fs/proc/page.c28
-rw-r--r--fs/proc/root.c3
-rw-r--r--fs/proc/stat.c56
-rw-r--r--fs/proc/task_mmu.c86
-rw-r--r--fs/proc/vmcore.c8
-rw-r--r--fs/proc_namespace.c2
-rw-r--r--fs/pstore/platform.c2
-rw-r--r--fs/pstore/ram.c2
-rw-r--r--fs/qnx4/inode.c2
-rw-r--r--fs/qnx6/inode.c2
-rw-r--r--fs/quota/dquot.c293
-rw-r--r--fs/quota/quota.c7
-rw-r--r--fs/quota/quota_v1.c1
-rw-r--r--fs/ramfs/inode.c99
-rw-r--r--fs/read_write.c49
-rw-r--r--fs/readdir.c128
-rw-r--r--fs/reiserfs/do_balan.c15
-rw-r--r--fs/reiserfs/file.c10
-rw-r--r--fs/reiserfs/fix_node.c6
-rw-r--r--fs/reiserfs/inode.c12
-rw-r--r--fs/reiserfs/journal.c22
-rw-r--r--fs/reiserfs/lbalance.c3
-rw-r--r--fs/reiserfs/namei.c7
-rw-r--r--fs/reiserfs/objectid.c3
-rw-r--r--fs/reiserfs/prints.c3
-rw-r--r--fs/reiserfs/reiserfs.h2
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/reiserfs/super.c5
-rw-r--r--fs/reiserfs/xattr.c19
-rw-r--r--fs/reiserfs/xattr_acl.c4
-rw-r--r--fs/romfs/super.c48
-rw-r--r--fs/select.c10
-rw-r--r--fs/seq_file.c2
-rw-r--r--fs/splice.c199
-rw-r--r--fs/squashfs/super.c102
-rw-r--r--fs/statfs.c17
-rw-r--r--fs/super.c156
-rw-r--r--fs/sysv/super.c5
-rw-r--r--fs/timerfd.c20
-rw-r--r--fs/tracefs/inode.c4
-rw-r--r--fs/ubifs/auth.c4
-rw-r--r--fs/ubifs/budget.c2
-rw-r--r--fs/ubifs/debug.c13
-rw-r--r--fs/ubifs/file.c21
-rw-r--r--fs/ubifs/ioctl.c20
-rw-r--r--fs/ubifs/journal.c4
-rw-r--r--fs/ubifs/orphan.c19
-rw-r--r--fs/ubifs/sb.c2
-rw-r--r--fs/ubifs/super.c15
-rw-r--r--fs/ubifs/tnc_commit.c34
-rw-r--r--fs/ubifs/tnc_misc.c1
-rw-r--r--fs/udf/balloc.c11
-rw-r--r--fs/udf/ecma_167.h14
-rw-r--r--fs/udf/file.c3
-rw-r--r--fs/udf/ialloc.c3
-rw-r--r--fs/udf/inode.c55
-rw-r--r--fs/udf/super.c261
-rw-r--r--fs/udf/udf_i.h6
-rw-r--r--fs/udf/udfdecl.h11
-rw-r--r--fs/ufs/Kconfig2
-rw-r--r--fs/ufs/super.c7
-rw-r--r--fs/unicode/utf8-core.c2
-rw-r--r--fs/unicode/utf8-selftest.c4
-rw-r--r--fs/userfaultfd.c70
-rw-r--r--fs/utimes.c14
-rw-r--r--fs/verity/Kconfig55
-rw-r--r--fs/verity/Makefile10
-rw-r--r--fs/verity/enable.c377
-rw-r--r--fs/verity/fsverity_private.h185
-rw-r--r--fs/verity/hash_algs.c280
-rw-r--r--fs/verity/init.c61
-rw-r--r--fs/verity/measure.c57
-rw-r--r--fs/verity/open.c356
-rw-r--r--fs/verity/signature.c157
-rw-r--r--fs/verity/verify.c281
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/kmem.c79
-rw-r--r--fs/xfs/kmem.h45
-rw-r--r--fs/xfs/libxfs/xfs_ag.c5
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c1238
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h16
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c1
-rw-r--r--fs/xfs/libxfs/xfs_attr.c103
-rw-r--r--fs/xfs/libxfs/xfs_attr.h6
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c283
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h30
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c3
-rw-r--r--fs/xfs/libxfs/xfs_bit.c1
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c817
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h17
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c16
-rw-r--r--fs/xfs/libxfs/xfs_btree.c111
-rw-r--r--fs/xfs/libxfs/xfs_btree.h47
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c689
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.h77
-rw-r--r--fs/xfs/libxfs/xfs_da_format.c888
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h59
-rw-r--r--fs/xfs/libxfs/xfs_defer.c2
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c86
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h90
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c135
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c282
-rw-r--r--fs/xfs/libxfs/xfs_dir2_leaf.c307
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c1008
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h114
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c430
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c8
-rw-r--r--fs/xfs/libxfs/xfs_format.h14
-rw-r--r--fs/xfs/libxfs/xfs_fs.h14
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c126
-rw-r--r--fs/xfs/libxfs/xfs_iext_tree.c10
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c21
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.h5
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c38
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h18
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h4
-rw-r--r--fs/xfs/libxfs/xfs_log_recover.h4
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c224
-rw-r--r--fs/xfs/libxfs/xfs_refcount.h12
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c436
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h11
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c4
-rw-r--r--fs/xfs/libxfs/xfs_sb.c3
-rw-r--r--fs/xfs/libxfs/xfs_shared.h6
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c8
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c6
-rw-r--r--fs/xfs/libxfs/xfs_types.h10
-rw-r--r--fs/xfs/scrub/agheader.c4
-rw-r--r--fs/xfs/scrub/alloc.c3
-rw-r--r--fs/xfs/scrub/attr.c17
-rw-r--r--fs/xfs/scrub/bitmap.c3
-rw-r--r--fs/xfs/scrub/bmap.c81
-rw-r--r--fs/xfs/scrub/common.h9
-rw-r--r--fs/xfs/scrub/dabtree.c68
-rw-r--r--fs/xfs/scrub/dabtree.h3
-rw-r--r--fs/xfs/scrub/dir.c132
-rw-r--r--fs/xfs/scrub/fscounters.c10
-rw-r--r--fs/xfs/scrub/health.c1
-rw-r--r--fs/xfs/scrub/parent.c27
-rw-r--r--fs/xfs/scrub/quota.c7
-rw-r--r--fs/xfs/scrub/refcount.c3
-rw-r--r--fs/xfs/scrub/repair.c6
-rw-r--r--fs/xfs/scrub/scrub.c1
-rw-r--r--fs/xfs/scrub/symlink.c2
-rw-r--r--fs/xfs/xfs_acl.c32
-rw-r--r--fs/xfs/xfs_aops.c791
-rw-r--r--fs/xfs/xfs_aops.h20
-rw-r--r--fs/xfs/xfs_attr_inactive.c78
-rw-r--r--fs/xfs/xfs_attr_list.c77
-rw-r--r--fs/xfs/xfs_bmap_item.c19
-rw-r--r--fs/xfs/xfs_bmap_util.c281
-rw-r--r--fs/xfs/xfs_bmap_util.h4
-rw-r--r--fs/xfs/xfs_buf.c53
-rw-r--r--fs/xfs/xfs_buf.h7
-rw-r--r--fs/xfs/xfs_buf_item.c10
-rw-r--r--fs/xfs/xfs_dir2_readdir.c137
-rw-r--r--fs/xfs/xfs_discard.c6
-rw-r--r--fs/xfs/xfs_dquot.c50
-rw-r--r--fs/xfs/xfs_dquot.h98
-rw-r--r--fs/xfs/xfs_dquot_item.c2
-rw-r--r--fs/xfs/xfs_dquot_item.h34
-rw-r--r--fs/xfs/xfs_error.c33
-rw-r--r--fs/xfs/xfs_error.h33
-rw-r--r--fs/xfs/xfs_extent_busy.c4
-rw-r--r--fs/xfs/xfs_extfree_item.c17
-rw-r--r--fs/xfs/xfs_file.c151
-rw-r--r--fs/xfs/xfs_filestream.c3
-rw-r--r--fs/xfs/xfs_fsmap.c13
-rw-r--r--fs/xfs/xfs_icache.c10
-rw-r--r--fs/xfs/xfs_icreate_item.c4
-rw-r--r--fs/xfs/xfs_inode.c131
-rw-r--r--fs/xfs/xfs_inode.h31
-rw-r--r--fs/xfs/xfs_inode_item.c17
-rw-r--r--fs/xfs/xfs_ioctl.c228
-rw-r--r--fs/xfs/xfs_ioctl.h7
-rw-r--r--fs/xfs/xfs_ioctl32.c107
-rw-r--r--fs/xfs/xfs_ioctl32.h13
-rw-r--r--fs/xfs/xfs_iomap.c871
-rw-r--r--fs/xfs/xfs_iomap.h13
-rw-r--r--fs/xfs/xfs_iops.c71
-rw-r--r--fs/xfs/xfs_itable.c19
-rw-r--r--fs/xfs/xfs_itable.h13
-rw-r--r--fs/xfs/xfs_iwalk.c7
-rw-r--r--fs/xfs/xfs_iwalk.h13
-rw-r--r--fs/xfs/xfs_linux.h14
-rw-r--r--fs/xfs/xfs_log.c711
-rw-r--r--fs/xfs/xfs_log_cil.c16
-rw-r--r--fs/xfs/xfs_log_priv.h33
-rw-r--r--fs/xfs/xfs_log_recover.c198
-rw-r--r--fs/xfs/xfs_message.c22
-rw-r--r--fs/xfs/xfs_message.h6
-rw-r--r--fs/xfs/xfs_mount.c62
-rw-r--r--fs/xfs/xfs_mount.h64
-rw-r--r--fs/xfs/xfs_mru_cache.c4
-rw-r--r--fs/xfs/xfs_pnfs.c60
-rw-r--r--fs/xfs/xfs_qm.c69
-rw-r--r--fs/xfs/xfs_qm.h6
-rw-r--r--fs/xfs/xfs_qm_bhv.c8
-rw-r--r--fs/xfs/xfs_qm_syscalls.c139
-rw-r--r--fs/xfs/xfs_quotaops.c3
-rw-r--r--fs/xfs/xfs_refcount_item.c25
-rw-r--r--fs/xfs/xfs_reflink.c224
-rw-r--r--fs/xfs/xfs_reflink.h4
-rw-r--r--fs/xfs/xfs_rmap_item.c19
-rw-r--r--fs/xfs/xfs_rtalloc.c7
-rw-r--r--fs/xfs/xfs_super.c1474
-rw-r--r--fs/xfs/xfs_super.h10
-rw-r--r--fs/xfs/xfs_symlink.c1
-rw-r--r--fs/xfs/xfs_symlink.h2
-rw-r--r--fs/xfs/xfs_sysfs.c13
-rw-r--r--fs/xfs/xfs_trace.h134
-rw-r--r--fs/xfs/xfs_trans.c6
-rw-r--r--fs/xfs/xfs_trans_ail.c10
-rw-r--r--fs/xfs/xfs_trans_dquot.c58
-rw-r--r--fs/xfs/xfs_xattr.c3
706 files changed, 51599 insertions, 29068 deletions
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index 995e332eee5c..eb2151fb6049 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -51,6 +51,8 @@ void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
if (!v9ses->cachetag) {
if (v9fs_random_cachetag(v9ses) < 0) {
v9ses->fscache = NULL;
+ kfree(v9ses->cachetag);
+ v9ses->cachetag = NULL;
return;
}
}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 4cc966a31cb3..fe7f0bd2048e 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -513,6 +513,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
v9inode = V9FS_I(inode);
mutex_lock(&v9inode->v_mutex);
if (!v9inode->writeback_fid &&
+ (vma->vm_flags & VM_SHARED) &&
(vma->vm_flags & VM_WRITE)) {
/*
* clone a fid and add it to writeback_fid
@@ -614,6 +615,8 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
(vma->vm_end - vma->vm_start - 1),
};
+ if (!(vma->vm_flags & VM_SHARED))
+ return;
p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 08112fbcaece..74df32be4c6a 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -58,7 +58,7 @@ static int v9fs_set_super(struct super_block *s, void *data)
static int
v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
- int flags, void *data)
+ int flags)
{
int ret;
@@ -69,8 +69,12 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
if (v9fs_proto_dotl(v9ses)) {
sb->s_op = &v9fs_super_ops_dotl;
sb->s_xattr = v9fs_xattr_handlers;
- } else
+ } else {
sb->s_op = &v9fs_super_ops;
+ sb->s_time_max = U32_MAX;
+ }
+
+ sb->s_time_min = 0;
ret = super_setup_bdi(sb);
if (ret)
@@ -128,7 +132,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
retval = PTR_ERR(sb);
goto clunk_fid;
}
- retval = v9fs_fill_super(sb, v9ses, flags, data);
+ retval = v9fs_fill_super(sb, v9ses, flags);
if (retval)
goto release_sb;
diff --git a/fs/Kconfig b/fs/Kconfig
index bfb1c6095c7a..7b623e9fc1b0 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -112,6 +112,8 @@ config MANDATORY_FILE_LOCKING
source "fs/crypto/Kconfig"
+source "fs/verity/Kconfig"
+
source "fs/notify/Kconfig"
source "fs/quota/Kconfig"
@@ -261,6 +263,7 @@ source "fs/romfs/Kconfig"
source "fs/pstore/Kconfig"
source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
+source "fs/erofs/Kconfig"
endif # MISC_FILESYSTEMS
@@ -319,4 +322,7 @@ source "fs/nls/Kconfig"
source "fs/dlm/Kconfig"
source "fs/unicode/Kconfig"
+config IO_WQ
+ bool
+
endmenu
diff --git a/fs/Makefile b/fs/Makefile
index d60089fd689b..1148c555c4d3 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -32,8 +32,10 @@ obj-$(CONFIG_EVENTFD) += eventfd.o
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_AIO) += aio.o
obj-$(CONFIG_IO_URING) += io_uring.o
+obj-$(CONFIG_IO_WQ) += io-wq.o
obj-$(CONFIG_FS_DAX) += dax.o
obj-$(CONFIG_FS_ENCRYPTION) += crypto/
+obj-$(CONFIG_FS_VERITY) += verity/
obj-$(CONFIG_FILE_LOCKING) += locks.o
obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
@@ -130,3 +132,4 @@ obj-$(CONFIG_F2FS_FS) += f2fs/
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
+obj-$(CONFIG_EROFS_FS) += erofs/
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index a92eb6ae2ae2..a755bef7c4c7 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -43,8 +43,8 @@ struct affs_ext_key {
*/
struct affs_inode_info {
atomic_t i_opencnt;
- struct semaphore i_link_lock; /* Protects internal inode access. */
- struct semaphore i_ext_lock; /* Protects internal inode access. */
+ struct mutex i_link_lock; /* Protects internal inode access. */
+ struct mutex i_ext_lock; /* Protects internal inode access. */
#define i_hash_lock i_ext_lock
u32 i_blkcnt; /* block count */
u32 i_extcnt; /* extended block count */
@@ -293,30 +293,30 @@ affs_adjust_bitmapchecksum(struct buffer_head *bh, u32 val)
static inline void
affs_lock_link(struct inode *inode)
{
- down(&AFFS_I(inode)->i_link_lock);
+ mutex_lock(&AFFS_I(inode)->i_link_lock);
}
static inline void
affs_unlock_link(struct inode *inode)
{
- up(&AFFS_I(inode)->i_link_lock);
+ mutex_unlock(&AFFS_I(inode)->i_link_lock);
}
static inline void
affs_lock_dir(struct inode *inode)
{
- down(&AFFS_I(inode)->i_hash_lock);
+ mutex_lock_nested(&AFFS_I(inode)->i_hash_lock, SINGLE_DEPTH_NESTING);
}
static inline void
affs_unlock_dir(struct inode *inode)
{
- up(&AFFS_I(inode)->i_hash_lock);
+ mutex_unlock(&AFFS_I(inode)->i_hash_lock);
}
static inline void
affs_lock_ext(struct inode *inode)
{
- down(&AFFS_I(inode)->i_ext_lock);
+ mutex_lock(&AFFS_I(inode)->i_ext_lock);
}
static inline void
affs_unlock_ext(struct inode *inode)
{
- up(&AFFS_I(inode)->i_ext_lock);
+ mutex_unlock(&AFFS_I(inode)->i_ext_lock);
}
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 14a6c1b90c9f..f708c45d5f66 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -375,7 +375,7 @@ affs_secs_to_datestamp(time64_t secs, struct affs_date *ds)
u32 minute;
s32 rem;
- secs -= sys_tz.tz_minuteswest * 60 + ((8 * 365 + 2) * 24 * 60 * 60);
+ secs -= sys_tz.tz_minuteswest * 60 + AFFS_EPOCH_DELTA;
if (secs < 0)
secs = 0;
days = div_s64_rem(secs, 86400, &rem);
diff --git a/fs/affs/amigaffs.h b/fs/affs/amigaffs.h
index f9bef9056659..81fb396d4dfa 100644
--- a/fs/affs/amigaffs.h
+++ b/fs/affs/amigaffs.h
@@ -32,6 +32,9 @@
#define AFFS_ROOT_BMAPS 25
+/* Seconds since Amiga epoch of 1978/01/01 to UNIX */
+#define AFFS_EPOCH_DELTA ((8 * 365 + 2) * 86400LL)
+
struct affs_date {
__be32 days;
__be32 mins;
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 73598bff8506..a346cf7659f1 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -150,10 +150,10 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino)
}
inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec
- = (be32_to_cpu(tail->change.days) * (24 * 60 * 60) +
+ = (be32_to_cpu(tail->change.days) * 86400LL +
be32_to_cpu(tail->change.mins) * 60 +
be32_to_cpu(tail->change.ticks) / 50 +
- ((8 * 365 + 2) * 24 * 60 * 60)) +
+ AFFS_EPOCH_DELTA) +
sys_tz.tz_minuteswest * 60;
inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_atime.tv_nsec = 0;
affs_brelse(bh);
diff --git a/fs/affs/super.c b/fs/affs/super.c
index e7d036efbaa1..47107c6712a6 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -121,8 +121,8 @@ static void init_once(void *foo)
{
struct affs_inode_info *ei = (struct affs_inode_info *) foo;
- sema_init(&ei->i_link_lock, 1);
- sema_init(&ei->i_ext_lock, 1);
+ mutex_init(&ei->i_link_lock);
+ mutex_init(&ei->i_ext_lock);
inode_init_once(&ei->vfs_inode);
}
@@ -355,6 +355,10 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &affs_sops;
sb->s_flags |= SB_NODIRATIME;
+ sb->s_time_gran = NSEC_PER_SEC;
+ sb->s_time_min = sys_tz.tz_minuteswest * 60 + AFFS_EPOCH_DELTA;
+ sb->s_time_max = 86400LL * U32_MAX + 86400 + sb->s_time_min;
+
sbi = kzalloc(sizeof(struct affs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
@@ -557,14 +561,9 @@ affs_remount(struct super_block *sb, int *flags, char *data)
int root_block;
unsigned long mount_flags;
int res = 0;
- char *new_opts;
char volume[32];
char *prefix = NULL;
- new_opts = kstrdup(data, GFP_KERNEL);
- if (data && !new_opts)
- return -ENOMEM;
-
pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
sync_filesystem(sb);
@@ -575,7 +574,6 @@ affs_remount(struct super_block *sb, int *flags, char *data)
&blocksize, &prefix, volume,
&mount_flags)) {
kfree(prefix);
- kfree(new_opts);
return -EINVAL;
}
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 6cdd7047c809..2dca8df1a18d 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -312,7 +312,6 @@ void afs_break_callbacks(struct afs_server *server, size_t count,
_enter("%p,%zu,", server, count);
ASSERT(server != NULL);
- ASSERTCMP(count, <=, AFSCBMAX);
/* TODO: Sort the callback break list by volume ID */
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index a2a87117d262..fd5133e26a38 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -74,6 +74,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
cell = rcu_dereference_raw(net->ws_cell);
if (cell) {
afs_get_cell(cell);
+ ret = 0;
break;
}
ret = -EDESTADDRREQ;
@@ -108,6 +109,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
done_seqretry(&net->cells_lock, seq);
+ if (ret != 0 && cell)
+ afs_put_cell(net, cell);
+
return ret == 0 ? cell : ERR_PTR(ret);
}
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 4f1b6f466ff5..ff3994a6be23 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -342,14 +342,14 @@ static int afs_deliver_cb_callback(struct afs_call *call)
if (call->count2 != call->count && call->count2 != 0)
return afs_protocol_error(call, -EBADMSG,
afs_eproto_cb_count);
- call->_iter = &call->iter;
- iov_iter_discard(&call->iter, READ, call->count2 * 3 * 4);
+ call->iter = &call->def_iter;
+ iov_iter_discard(&call->def_iter, READ, call->count2 * 3 * 4);
call->unmarshall++;
/* Fall through */
case 4:
_debug("extract discard %zu/%u",
- iov_iter_count(&call->iter), call->count2 * 3 * 4);
+ iov_iter_count(call->iter), call->count2 * 3 * 4);
ret = afs_extract_data(call, false);
if (ret < 0)
@@ -505,18 +505,14 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
struct afs_call *call = container_of(work, struct afs_call, work);
struct afs_uuid *r = call->request;
- struct {
- __be32 match;
- } reply;
-
_enter("");
if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
- reply.match = htonl(0);
+ afs_send_empty_reply(call);
else
- reply.match = htonl(1);
+ rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+ 1, 1, "K-1");
- afs_send_simple_reply(call, &reply, sizeof(reply));
afs_put_call(call);
_leave("");
}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index e640d67274be..497f979018c2 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -440,7 +440,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
* iterate through the data blob that lists the contents of an AFS directory
*/
static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
- struct key *key)
+ struct key *key, afs_dataversion_t *_dir_version)
{
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct afs_xdr_dir_page *dbuf;
@@ -460,6 +460,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
req = afs_read_dir(dvnode, key);
if (IS_ERR(req))
return PTR_ERR(req);
+ *_dir_version = req->data_version;
/* round the file position up to the next entry boundary */
ctx->pos += sizeof(union afs_xdr_dirent) - 1;
@@ -514,7 +515,10 @@ out:
*/
static int afs_readdir(struct file *file, struct dir_context *ctx)
{
- return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file));
+ afs_dataversion_t dir_version;
+
+ return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file),
+ &dir_version);
}
/*
@@ -555,7 +559,8 @@ static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
* - just returns the FID the dentry name maps to if found
*/
static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
- struct afs_fid *fid, struct key *key)
+ struct afs_fid *fid, struct key *key,
+ afs_dataversion_t *_dir_version)
{
struct afs_super_info *as = dir->i_sb->s_fs_info;
struct afs_lookup_one_cookie cookie = {
@@ -568,7 +573,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
_enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
/* search the directory */
- ret = afs_dir_iterate(dir, &cookie.ctx, key);
+ ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version);
if (ret < 0) {
_leave(" = %d [iter]", ret);
return ret;
@@ -642,6 +647,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
struct afs_server *server;
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
struct inode *inode = NULL, *ti;
+ afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
int ret, i;
_enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
@@ -669,12 +675,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
cookie->fids[i].vid = as->volume->vid;
/* search the directory */
- ret = afs_dir_iterate(dir, &cookie->ctx, key);
+ ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version);
if (ret < 0) {
inode = ERR_PTR(ret);
goto out;
}
+ dentry->d_fsdata = (void *)(unsigned long)data_version;
+
inode = ERR_PTR(-ENOENT);
if (!cookie->found)
goto out;
@@ -795,7 +803,12 @@ success:
continue;
if (cookie->inodes[i]) {
- afs_vnode_commit_status(&fc, AFS_FS_I(cookie->inodes[i]),
+ struct afs_vnode *iv = AFS_FS_I(cookie->inodes[i]);
+
+ if (test_bit(AFS_VNODE_UNSET, &iv->flags))
+ continue;
+
+ afs_vnode_commit_status(&fc, iv,
scb->cb_break, NULL, scb);
continue;
}
@@ -951,12 +964,65 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
inode ? AFS_FS_I(inode) : NULL);
} else {
trace_afs_lookup(dvnode, &dentry->d_name,
- inode ? AFS_FS_I(inode) : NULL);
+ IS_ERR_OR_NULL(inode) ? NULL
+ : AFS_FS_I(inode));
}
return d;
}
/*
+ * Check the validity of a dentry under RCU conditions.
+ */
+static int afs_d_revalidate_rcu(struct dentry *dentry)
+{
+ struct afs_vnode *dvnode, *vnode;
+ struct dentry *parent;
+ struct inode *dir, *inode;
+ long dir_version, de_version;
+
+ _enter("%p", dentry);
+
+ /* Check the parent directory is still valid first. */
+ parent = READ_ONCE(dentry->d_parent);
+ dir = d_inode_rcu(parent);
+ if (!dir)
+ return -ECHILD;
+ dvnode = AFS_FS_I(dir);
+ if (test_bit(AFS_VNODE_DELETED, &dvnode->flags))
+ return -ECHILD;
+
+ if (!afs_check_validity(dvnode))
+ return -ECHILD;
+
+ /* We only need to invalidate a dentry if the server's copy changed
+ * behind our back. If we made the change, it's no problem. Note that
+ * on a 32-bit system, we only have 32 bits in the dentry to store the
+ * version.
+ */
+ dir_version = (long)READ_ONCE(dvnode->status.data_version);
+ de_version = (long)READ_ONCE(dentry->d_fsdata);
+ if (de_version != dir_version) {
+ dir_version = (long)READ_ONCE(dvnode->invalid_before);
+ if (de_version - dir_version < 0)
+ return -ECHILD;
+ }
+
+ /* Check to see if the vnode referred to by the dentry still
+ * has a callback.
+ */
+ if (d_really_is_positive(dentry)) {
+ inode = d_inode_rcu(dentry);
+ if (inode) {
+ vnode = AFS_FS_I(inode);
+ if (!afs_check_validity(vnode))
+ return -ECHILD;
+ }
+ }
+
+ return 1; /* Still valid */
+}
+
+/*
* check that a dentry lookup hit has found a valid entry
* - NOTE! the hit can be a negative hit too, so we can't assume we have an
* inode
@@ -968,11 +1034,12 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
struct dentry *parent;
struct inode *inode;
struct key *key;
- long dir_version, de_version;
+ afs_dataversion_t dir_version;
+ long de_version;
int ret;
if (flags & LOOKUP_RCU)
- return -ECHILD;
+ return afs_d_revalidate_rcu(dentry);
if (d_really_is_positive(dentry)) {
vnode = AFS_FS_I(d_inode(dentry));
@@ -1014,20 +1081,20 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
* on a 32-bit system, we only have 32 bits in the dentry to store the
* version.
*/
- dir_version = (long)dir->status.data_version;
+ dir_version = dir->status.data_version;
de_version = (long)dentry->d_fsdata;
- if (de_version == dir_version)
- goto out_valid;
+ if (de_version == (long)dir_version)
+ goto out_valid_noupdate;
- dir_version = (long)dir->invalid_before;
- if (de_version - dir_version >= 0)
+ dir_version = dir->invalid_before;
+ if (de_version - (long)dir_version >= 0)
goto out_valid;
_debug("dir modified");
afs_stat_v(dir, n_reval);
/* search the directory for this vnode */
- ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key);
+ ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version);
switch (ret) {
case 0:
/* the filename maps to something */
@@ -1080,7 +1147,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
}
out_valid:
- dentry->d_fsdata = (void *)dir_version;
+ dentry->d_fsdata = (void *)(unsigned long)dir_version;
+out_valid_noupdate:
dput(parent);
key_put(key);
_leave(" = 1 [valid]");
@@ -1186,6 +1254,20 @@ static void afs_prep_for_new_inode(struct afs_fs_cursor *fc,
}
/*
+ * Note that a dentry got changed. We need to set d_fsdata to the data version
+ * number derived from the result of the operation. It doesn't matter if
+ * d_fsdata goes backwards as we'll just revalidate.
+ */
+static void afs_update_dentry_version(struct afs_fs_cursor *fc,
+ struct dentry *dentry,
+ struct afs_status_cb *scb)
+{
+ if (fc->ac.error == 0)
+ dentry->d_fsdata =
+ (void *)(unsigned long)scb->status.data_version;
+}
+
+/*
* create a directory on an AFS filesystem
*/
static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
@@ -1227,6 +1309,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
afs_check_for_remote_deletion(&fc, dvnode);
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, &scb[0]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
@@ -1319,6 +1402,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, scb);
+ afs_update_dentry_version(&fc, dentry, scb);
ret = afs_end_vnode_operation(&fc);
if (ret == 0) {
afs_dir_remove_subdir(dentry);
@@ -1458,6 +1542,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
&data_version, &scb[0]);
afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
&data_version_2, &scb[1]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
ret = afs_end_vnode_operation(&fc);
if (ret == 0 && !(scb[1].have_status || scb[1].have_error))
ret = afs_dir_remove_link(dvnode, dentry, key);
@@ -1526,6 +1611,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
afs_check_for_remote_deletion(&fc, dvnode);
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, &scb[0]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
@@ -1607,6 +1693,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
NULL, &scb[1]);
ihold(&vnode->vfs_inode);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
d_instantiate(dentry, &vnode->vfs_inode);
mutex_unlock(&vnode->io_lock);
@@ -1686,6 +1773,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
afs_check_for_remote_deletion(&fc, dvnode);
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, &scb[0]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
@@ -1791,6 +1879,17 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
+ /* This bit is potentially nasty as there's a potential race with
+ * afs_d_revalidate{,_rcu}(). We have to change d_fsdata on the dentry
+ * to reflect it's new parent's new data_version after the op, but
+ * d_revalidate may see old_dentry between the op having taken place
+ * and the version being updated.
+ *
+ * So drop the old_dentry for now to make other threads go through
+ * lookup instead - which we hold a lock against.
+ */
+ d_drop(old_dentry);
+
ret = -ERESTARTSYS;
if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) {
afs_dataversion_t orig_data_version;
@@ -1802,9 +1901,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (orig_dvnode != new_dvnode) {
if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
afs_end_vnode_operation(&fc);
- goto error_rehash;
+ goto error_rehash_old;
}
- new_data_version = new_dvnode->status.data_version;
+ new_data_version = new_dvnode->status.data_version + 1;
} else {
new_data_version = orig_data_version;
new_scb = &scb[0];
@@ -1827,7 +1926,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
- goto error_rehash;
+ goto error_rehash_old;
}
if (ret == 0) {
@@ -1853,10 +1952,26 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
drop_nlink(new_inode);
spin_unlock(&new_inode->i_lock);
}
+
+ /* Now we can update d_fsdata on the dentries to reflect their
+ * new parent's data_version.
+ *
+ * Note that if we ever implement RENAME_EXCHANGE, we'll have
+ * to update both dentries with opposing dir versions.
+ */
+ if (new_dvnode != orig_dvnode) {
+ afs_update_dentry_version(&fc, old_dentry, &scb[1]);
+ afs_update_dentry_version(&fc, new_dentry, &scb[1]);
+ } else {
+ afs_update_dentry_version(&fc, old_dentry, &scb[0]);
+ afs_update_dentry_version(&fc, new_dentry, &scb[0]);
+ }
d_move(old_dentry, new_dentry);
goto error_tmp;
}
+error_rehash_old:
+ d_rehash(new_dentry);
error_rehash:
if (rehash)
d_rehash(rehash);
diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
index d4fbe5f85f1b..b108528bf010 100644
--- a/fs/afs/dir_edit.c
+++ b/fs/afs/dir_edit.c
@@ -68,13 +68,11 @@ static int afs_find_contig_bits(union afs_xdr_dir_block *block, unsigned int nr_
static void afs_set_contig_bits(union afs_xdr_dir_block *block,
int bit, unsigned int nr_slots)
{
- u64 mask, before, after;
+ u64 mask;
mask = (1 << nr_slots) - 1;
mask <<= bit;
- before = *(u64 *)block->hdr.bitmap;
-
block->hdr.bitmap[0] |= (u8)(mask >> 0 * 8);
block->hdr.bitmap[1] |= (u8)(mask >> 1 * 8);
block->hdr.bitmap[2] |= (u8)(mask >> 2 * 8);
@@ -83,8 +81,6 @@ static void afs_set_contig_bits(union afs_xdr_dir_block *block,
block->hdr.bitmap[5] |= (u8)(mask >> 5 * 8);
block->hdr.bitmap[6] |= (u8)(mask >> 6 * 8);
block->hdr.bitmap[7] |= (u8)(mask >> 7 * 8);
-
- after = *(u64 *)block->hdr.bitmap;
}
/*
@@ -93,13 +89,11 @@ static void afs_set_contig_bits(union afs_xdr_dir_block *block,
static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
int bit, unsigned int nr_slots)
{
- u64 mask, before, after;
+ u64 mask;
mask = (1 << nr_slots) - 1;
mask <<= bit;
- before = *(u64 *)block->hdr.bitmap;
-
block->hdr.bitmap[0] &= ~(u8)(mask >> 0 * 8);
block->hdr.bitmap[1] &= ~(u8)(mask >> 1 * 8);
block->hdr.bitmap[2] &= ~(u8)(mask >> 2 * 8);
@@ -108,8 +102,6 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
block->hdr.bitmap[5] &= ~(u8)(mask >> 5 * 8);
block->hdr.bitmap[6] &= ~(u8)(mask >> 6 * 8);
block->hdr.bitmap[7] &= ~(u8)(mask >> 7 * 8);
-
- after = *(u64 *)block->hdr.bitmap;
}
/*
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index bcd1bafb0278..4150280509ff 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -10,13 +10,6 @@
#include <linux/dns_resolver.h>
#include "internal.h"
-const struct file_operations afs_dynroot_file_operations = {
- .open = dcache_dir_open,
- .release = dcache_dir_close,
- .iterate_shared = dcache_readdir,
- .llseek = dcache_dir_lseek,
-};
-
/*
* Probe to see if a cell may exist. This prevents positive dentries from
* being created unnecessarily.
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 56b69576274d..8415733f7bc1 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -191,11 +191,13 @@ void afs_put_read(struct afs_read *req)
int i;
if (refcount_dec_and_test(&req->usage)) {
- for (i = 0; i < req->nr_pages; i++)
- if (req->pages[i])
- put_page(req->pages[i]);
- if (req->pages != req->array)
- kfree(req->pages);
+ if (req->pages) {
+ for (i = 0; i < req->nr_pages; i++)
+ if (req->pages[i])
+ put_page(req->pages[i]);
+ if (req->pages != req->array)
+ kfree(req->pages);
+ }
kfree(req);
}
}
@@ -221,7 +223,7 @@ static void afs_file_readpage_read_complete(struct page *page,
/*
* Fetch file data from the volume.
*/
-int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *desc)
+int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *req)
{
struct afs_fs_cursor fc;
struct afs_status_cb *scb;
@@ -244,7 +246,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *de
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_fetch_data(&fc, scb, desc);
+ afs_fs_fetch_data(&fc, scb, req);
}
afs_check_for_remote_deletion(&fc, vnode);
@@ -255,7 +257,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *de
if (ret == 0) {
afs_stat_v(vnode, n_fetches);
- atomic_long_add(desc->actual_len,
+ atomic_long_add(req->actual_len,
&afs_v2net(vnode)->n_fetch_bytes);
}
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index d5e5a6ddc847..0f2a94ba73cb 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -346,8 +346,8 @@ again:
if (ret < 0) {
trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail,
ret);
- pr_warning("AFS: Failed to extend lock on {%llx:%llx} error %d\n",
- vnode->fid.vid, vnode->fid.vnode, ret);
+ pr_warn("AFS: Failed to extend lock on {%llx:%llx} error %d\n",
+ vnode->fid.vid, vnode->fid.vnode, ret);
}
spin_lock(&vnode->lock);
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 1ce73e014139..1f9c5d8e6fe5 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -15,8 +15,6 @@
#include "xdr_fs.h"
#include "protocol_yfs.h"
-static const struct afs_fid afs_zero_fid;
-
static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi)
{
call->cbi = afs_get_cb_interest(cbi);
@@ -325,7 +323,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
int ret;
_enter("{%u,%zu/%llu}",
- call->unmarshall, iov_iter_count(&call->iter), req->actual_len);
+ call->unmarshall, iov_iter_count(call->iter), req->actual_len);
switch (call->unmarshall) {
case 0:
@@ -339,8 +337,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
call->tmp_u = htonl(0);
afs_extract_to_tmp(call);
}
+ /* Fall through */
- /* Fall through - and extract the returned data length */
+ /* extract the returned data length */
case 1:
_debug("extract data length");
ret = afs_extract_data(call, true);
@@ -364,13 +363,14 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
call->bvec[0].bv_len = size;
call->bvec[0].bv_offset = req->offset;
call->bvec[0].bv_page = req->pages[req->index];
- iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
+ iov_iter_bvec(&call->def_iter, READ, call->bvec, 1, size);
ASSERTCMP(size, <=, PAGE_SIZE);
+ /* Fall through */
- /* Fall through - and extract the returned data */
+ /* extract the returned data */
case 2:
_debug("extract data %zu/%llu",
- iov_iter_count(&call->iter), req->remain);
+ iov_iter_count(call->iter), req->remain);
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -392,13 +392,13 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
goto no_more_data;
/* Discard any excess data the server gave us */
- iov_iter_discard(&call->iter, READ, req->actual_len - req->len);
+ afs_extract_discard(call, req->actual_len - req->len);
call->unmarshall = 3;
-
/* Fall through */
+
case 3:
_debug("extract discard %zu/%llu",
- iov_iter_count(&call->iter), req->actual_len - req->len);
+ iov_iter_count(call->iter), req->actual_len - req->len);
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -407,8 +407,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
no_more_data:
call->unmarshall = 4;
afs_extract_to_buf(call, (21 + 3 + 6) * 4);
+ /* Fall through */
- /* Fall through - and extract the metadata */
+ /* extract the metadata */
case 4:
ret = afs_extract_data(call, false);
if (ret < 0)
@@ -489,7 +490,7 @@ static int afs_fs_fetch_data64(struct afs_fs_cursor *fc,
call->key = fc->key;
call->out_scb = scb;
call->out_volsync = NULL;
- call->read_request = req;
+ call->read_request = afs_get_read(req);
/* marshall the parameters */
bp = call->request;
@@ -502,7 +503,6 @@ static int afs_fs_fetch_data64(struct afs_fs_cursor *fc,
bp[6] = 0;
bp[7] = htonl(lower_32_bits(req->len));
- refcount_inc(&req->usage);
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
afs_set_fc_call(call, fc);
@@ -539,7 +539,7 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc,
call->key = fc->key;
call->out_scb = scb;
call->out_volsync = NULL;
- call->read_request = req;
+ call->read_request = afs_get_read(req);
/* marshall the parameters */
bp = call->request;
@@ -550,7 +550,6 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc,
bp[4] = htonl(lower_32_bits(req->pos));
bp[5] = htonl(lower_32_bits(req->len));
- refcount_inc(&req->usage);
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
afs_set_fc_call(call, fc);
@@ -1471,8 +1470,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
case 0:
call->unmarshall++;
afs_extract_to_buf(call, 12 * 4);
+ /* Fall through */
- /* Fall through - and extract the returned status record */
+ /* extract the returned status record */
case 1:
_debug("extract status");
ret = afs_extract_data(call, true);
@@ -1483,8 +1483,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
xdr_decode_AFSFetchVolumeStatus(&bp, call->out_volstatus);
call->unmarshall++;
afs_extract_to_tmp(call);
+ /* Fall through */
- /* Fall through - and extract the volume name length */
+ /* extract the volume name length */
case 2:
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -1498,8 +1499,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the volume name */
+ /* extract the volume name */
case 3:
_debug("extract volname");
ret = afs_extract_data(call, true);
@@ -1511,8 +1513,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
_debug("volname '%s'", p);
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the offline message length */
+ /* extract the offline message length */
case 4:
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -1526,8 +1529,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the offline message */
+ /* extract the offline message */
case 5:
_debug("extract offline");
ret = afs_extract_data(call, true);
@@ -1540,8 +1544,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the message of the day length */
+ /* extract the message of the day length */
case 6:
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -1555,8 +1560,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the message of the day */
+ /* extract the message of the day */
case 7:
_debug("extract motd");
ret = afs_extract_data(call, false);
@@ -1844,14 +1850,15 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
u32 count;
int ret;
- _enter("{%u,%zu}", call->unmarshall, iov_iter_count(&call->iter));
+ _enter("{%u,%zu}", call->unmarshall, iov_iter_count(call->iter));
switch (call->unmarshall) {
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the capabilities word count */
+ /* Extract the capabilities word count */
case 1:
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -1861,10 +1868,11 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
call->count = count;
call->count2 = count;
- iov_iter_discard(&call->iter, READ, count * sizeof(__be32));
+ afs_extract_discard(call, count * sizeof(__be32));
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract capabilities words */
+ /* Extract capabilities words */
case 2:
ret = afs_extract_data(call, false);
if (ret < 0)
@@ -2020,9 +2028,9 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
/* Extract the file status count and array in two steps */
- /* Fall through */
case 1:
_debug("extract status count");
ret = afs_extract_data(call, true);
@@ -2039,8 +2047,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
call->unmarshall++;
more_counts:
afs_extract_to_buf(call, 21 * sizeof(__be32));
-
/* Fall through */
+
case 2:
_debug("extract status array %u", call->count);
ret = afs_extract_data(call, true);
@@ -2060,9 +2068,9 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
call->count = 0;
call->unmarshall++;
afs_extract_to_tmp(call);
+ /* Fall through */
/* Extract the callback count and array in two steps */
- /* Fall through */
case 3:
_debug("extract CB count");
ret = afs_extract_data(call, true);
@@ -2078,8 +2086,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
call->unmarshall++;
more_cbs:
afs_extract_to_buf(call, 3 * sizeof(__be32));
-
/* Fall through */
+
case 4:
_debug("extract CB array");
ret = afs_extract_data(call, true);
@@ -2096,8 +2104,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
afs_extract_to_buf(call, 6 * sizeof(__be32));
call->unmarshall++;
-
/* Fall through */
+
case 5:
ret = afs_extract_data(call, false);
if (ret < 0)
@@ -2193,6 +2201,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
/* extract the returned data length */
case 1:
@@ -2210,6 +2219,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
acl->size = call->count2;
afs_extract_begin(call, acl->data, size);
call->unmarshall++;
+ /* Fall through */
/* extract the returned data */
case 2:
@@ -2219,6 +2229,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
afs_extract_to_buf(call, (21 + 6) * 4);
call->unmarshall++;
+ /* Fall through */
/* extract the metadata */
case 3:
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 7b1c18c32f48..281470fe1183 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -34,8 +34,7 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
{
static unsigned long once_only;
- pr_warn("kAFS: AFS vnode with undefined type %u\n",
- vnode->status.type);
+ pr_warn("kAFS: AFS vnode with undefined type %u\n", vnode->status.type);
pr_warn("kAFS: A=%d m=%o s=%llx v=%llx\n",
vnode->status.abort_code,
vnode->status.mode,
@@ -175,11 +174,11 @@ static void afs_apply_status(struct afs_fs_cursor *fc,
BUG_ON(test_bit(AFS_VNODE_UNSET, &vnode->flags));
if (status->type != vnode->status.type) {
- pr_warning("Vnode %llx:%llx:%x changed type %u to %u\n",
- vnode->fid.vid,
- vnode->fid.vnode,
- vnode->fid.unique,
- status->type, vnode->status.type);
+ pr_warn("Vnode %llx:%llx:%x changed type %u to %u\n",
+ vnode->fid.vid,
+ vnode->fid.vnode,
+ vnode->fid.unique,
+ status->type, vnode->status.type);
afs_protocol_error(NULL, -EBADMSG, afs_eproto_bad_status);
return;
}
@@ -443,7 +442,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
if (root) {
inode->i_op = &afs_dynroot_inode_operations;
- inode->i_fop = &afs_dynroot_file_operations;
+ inode->i_fop = &simple_dir_operations;
} else {
inode->i_op = &afs_autocell_inode_operations;
}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index f66a3be12fd6..1d81fc4c3058 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -115,9 +115,9 @@ struct afs_call {
struct afs_vnode *lvnode; /* vnode being locked */
void *request; /* request data (first part) */
struct address_space *mapping; /* Pages being written from */
- struct iov_iter iter; /* Buffer iterator */
- struct iov_iter *_iter; /* Iterator currently in use */
- union { /* Convenience for ->iter */
+ struct iov_iter def_iter; /* Default buffer/data iterator */
+ struct iov_iter *iter; /* Iterator currently in use */
+ union { /* Convenience for ->def_iter */
struct kvec kvec[1];
struct bio_vec bvec[1];
};
@@ -910,7 +910,6 @@ extern int afs_silly_iput(struct dentry *, struct inode *);
/*
* dynroot.c
*/
-extern const struct file_operations afs_dynroot_file_operations;
extern const struct inode_operations afs_dynroot_inode_operations;
extern const struct dentry_operations afs_dynroot_dentry_operations;
@@ -935,6 +934,12 @@ extern int afs_fetch_data(struct afs_vnode *, struct key *, struct afs_read *);
extern int afs_page_filler(void *, struct page *);
extern void afs_put_read(struct afs_read *);
+static inline struct afs_read *afs_get_read(struct afs_read *req)
+{
+ refcount_inc(&req->usage);
+ return req;
+}
+
/*
* flock.c
*/
@@ -1137,7 +1142,7 @@ static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t si
{
call->kvec[0].iov_base = buf;
call->kvec[0].iov_len = size;
- iov_iter_kvec(&call->iter, READ, call->kvec, 1, size);
+ iov_iter_kvec(&call->def_iter, READ, call->kvec, 1, size);
}
static inline void afs_extract_to_tmp(struct afs_call *call)
@@ -1152,7 +1157,7 @@ static inline void afs_extract_to_tmp64(struct afs_call *call)
static inline void afs_extract_discard(struct afs_call *call, size_t size)
{
- iov_iter_discard(&call->iter, READ, size);
+ iov_iter_discard(&call->def_iter, READ, size);
}
static inline void afs_extract_to_buf(struct afs_call *call, size_t size)
@@ -1217,6 +1222,7 @@ extern void afs_cache_permit(struct afs_vnode *, struct key *, unsigned int,
struct afs_status_cb *);
extern void afs_zap_permits(struct rcu_head *);
extern struct key *afs_request_key(struct afs_cell *);
+extern struct key *afs_request_key_rcu(struct afs_cell *);
extern int afs_check_permit(struct afs_vnode *, struct key *, afs_access_t *);
extern int afs_permission(struct inode *, int);
extern void __exit afs_clean_up_permit_cache(void);
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 0e5269374ac1..58d396592250 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -152,7 +152,7 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
INIT_WORK(&call->async_work, afs_process_async_call);
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->state_lock);
- call->_iter = &call->iter;
+ call->iter = &call->def_iter;
o = atomic_inc_return(&net->nr_outstanding_calls);
trace_afs_call(call, afs_call_trace_alloc, 1, o,
@@ -513,12 +513,12 @@ static void afs_deliver_to_call(struct afs_call *call)
state == AFS_CALL_SV_AWAIT_ACK
) {
if (state == AFS_CALL_SV_AWAIT_ACK) {
- iov_iter_kvec(&call->iter, READ, NULL, 0, 0);
+ iov_iter_kvec(&call->def_iter, READ, NULL, 0, 0);
ret = rxrpc_kernel_recv_data(call->net->socket,
- call->rxcall, &call->iter,
+ call->rxcall, &call->def_iter,
false, &remote_abort,
&call->service_id);
- trace_afs_receive_data(call, &call->iter, false, ret);
+ trace_afs_receive_data(call, &call->def_iter, false, ret);
if (ret == -EINPROGRESS || ret == -EAGAIN)
return;
@@ -637,6 +637,7 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
call->need_attention = false;
__set_current_state(TASK_RUNNING);
afs_deliver_to_call(call);
+ timeout = rtt2;
continue;
}
@@ -858,7 +859,7 @@ static int afs_deliver_cm_op_id(struct afs_call *call)
{
int ret;
- _enter("{%zu}", iov_iter_count(call->_iter));
+ _enter("{%zu}", iov_iter_count(call->iter));
/* the operation ID forms the first four bytes of the request data */
ret = afs_extract_data(call, true);
@@ -974,7 +975,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
int afs_extract_data(struct afs_call *call, bool want_more)
{
struct afs_net *net = call->net;
- struct iov_iter *iter = call->_iter;
+ struct iov_iter *iter = call->iter;
enum afs_call_state state;
u32 remote_abort = 0;
int ret;
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 71e71c07568f..ce9de1e6742b 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -27,8 +27,37 @@ struct key *afs_request_key(struct afs_cell *cell)
_enter("{%x}", key_serial(cell->anonymous_key));
_debug("key %s", cell->anonymous_key->description);
- key = request_key(&key_type_rxrpc, cell->anonymous_key->description,
- NULL);
+ key = request_key_net(&key_type_rxrpc, cell->anonymous_key->description,
+ cell->net->net, NULL);
+ if (IS_ERR(key)) {
+ if (PTR_ERR(key) != -ENOKEY) {
+ _leave(" = %ld", PTR_ERR(key));
+ return key;
+ }
+
+ /* act as anonymous user */
+ _leave(" = {%x} [anon]", key_serial(cell->anonymous_key));
+ return key_get(cell->anonymous_key);
+ } else {
+ /* act as authorised user */
+ _leave(" = {%x} [auth]", key_serial(key));
+ return key;
+ }
+}
+
+/*
+ * Get a key when pathwalk is in rcuwalk mode.
+ */
+struct key *afs_request_key_rcu(struct afs_cell *cell)
+{
+ struct key *key;
+
+ _enter("{%x}", key_serial(cell->anonymous_key));
+
+ _debug("key %s", cell->anonymous_key->description);
+ key = request_key_net_rcu(&key_type_rxrpc,
+ cell->anonymous_key->description,
+ cell->net->net);
if (IS_ERR(key)) {
if (PTR_ERR(key) != -ENOKEY) {
_leave(" = %ld", PTR_ERR(key));
@@ -274,6 +303,40 @@ someone_else_changed_it:
return;
}
+static bool afs_check_permit_rcu(struct afs_vnode *vnode, struct key *key,
+ afs_access_t *_access)
+{
+ const struct afs_permits *permits;
+ int i;
+
+ _enter("{%llx:%llu},%x",
+ vnode->fid.vid, vnode->fid.vnode, key_serial(key));
+
+ /* check the permits to see if we've got one yet */
+ if (key == vnode->volume->cell->anonymous_key) {
+ *_access = vnode->status.anon_access;
+ _leave(" = t [anon %x]", *_access);
+ return true;
+ }
+
+ permits = rcu_dereference(vnode->permit_cache);
+ if (permits) {
+ for (i = 0; i < permits->nr_permits; i++) {
+ if (permits->permits[i].key < key)
+ continue;
+ if (permits->permits[i].key > key)
+ break;
+
+ *_access = permits->permits[i].access;
+ _leave(" = %u [perm %x]", !permits->invalidated, *_access);
+ return !permits->invalidated;
+ }
+ }
+
+ _leave(" = f");
+ return false;
+}
+
/*
* check with the fileserver to see if the directory or parent directory is
* permitted to be accessed with this authorisation, and if so, what access it
@@ -340,33 +403,42 @@ int afs_permission(struct inode *inode, int mask)
struct afs_vnode *vnode = AFS_FS_I(inode);
afs_access_t uninitialized_var(access);
struct key *key;
- int ret;
-
- if (mask & MAY_NOT_BLOCK)
- return -ECHILD;
+ int ret = 0;
_enter("{{%llx:%llu},%lx},%x,",
vnode->fid.vid, vnode->fid.vnode, vnode->flags, mask);
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key)) {
- _leave(" = %ld [key]", PTR_ERR(key));
- return PTR_ERR(key);
- }
+ if (mask & MAY_NOT_BLOCK) {
+ key = afs_request_key_rcu(vnode->volume->cell);
+ if (IS_ERR(key))
+ return -ECHILD;
+
+ ret = -ECHILD;
+ if (!afs_check_validity(vnode) ||
+ !afs_check_permit_rcu(vnode, key, &access))
+ goto error;
+ } else {
+ key = afs_request_key(vnode->volume->cell);
+ if (IS_ERR(key)) {
+ _leave(" = %ld [key]", PTR_ERR(key));
+ return PTR_ERR(key);
+ }
- ret = afs_validate(vnode, key);
- if (ret < 0)
- goto error;
+ ret = afs_validate(vnode, key);
+ if (ret < 0)
+ goto error;
- /* check the permits to see if we've got one yet */
- ret = afs_check_permit(vnode, key, &access);
- if (ret < 0)
- goto error;
+ /* check the permits to see if we've got one yet */
+ ret = afs_check_permit(vnode, key, &access);
+ if (ret < 0)
+ goto error;
+ }
/* interpret the access mask */
_debug("REQ %x ACC %x on %s",
mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file");
+ ret = 0;
if (S_ISDIR(inode->i_mode)) {
if (mask & (MAY_EXEC | MAY_READ | MAY_CHDIR)) {
if (!(access & AFS_ACE_LOOKUP))
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 64d440aaabc0..1686bf188ccd 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -151,7 +151,7 @@ static struct afs_server *afs_install_server(struct afs_net *net,
const struct afs_addr_list *alist;
struct afs_server *server;
struct rb_node **pp, *p;
- int ret = -EEXIST, diff;
+ int diff;
_enter("%p", candidate);
@@ -196,7 +196,6 @@ static struct afs_server *afs_install_server(struct afs_net *net,
hlist_add_head_rcu(&server->addr6_link, &net->fs_addresses6);
write_sequnlock(&net->fs_addr_lock);
- ret = 0;
exists:
afs_get_server(server, afs_server_trace_get_install);
diff --git a/fs/afs/super.c b/fs/afs/super.c
index f18911e8d770..488641b1a418 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -435,6 +435,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
/* fill in the superblock */
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_magic = AFS_FS_MAGIC;
sb->s_op = &afs_super_ops;
if (!as->dyn_root)
diff --git a/fs/afs/vl_list.c b/fs/afs/vl_list.c
index 21eb0c0be912..8fea54eba0c2 100644
--- a/fs/afs/vl_list.c
+++ b/fs/afs/vl_list.c
@@ -279,8 +279,8 @@ struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *cell,
struct afs_addr_list *old = addrs;
write_lock(&server->lock);
- rcu_swap_protected(server->addresses, old,
- lockdep_is_held(&server->lock));
+ old = rcu_replace_pointer(server->addresses, old,
+ lockdep_is_held(&server->lock));
write_unlock(&server->lock);
afs_put_addrlist(old);
}
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index d7e0fd3c00df..516e9a3bb5b4 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -56,23 +56,24 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
struct afs_uuid__xdr *xdr;
struct afs_uuid *uuid;
int j;
+ int n = entry->nr_servers;
tmp = ntohl(uvldb->serverFlags[i]);
if (tmp & AFS_VLSF_DONTUSE ||
(new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
continue;
if (tmp & AFS_VLSF_RWVOL) {
- entry->fs_mask[i] |= AFS_VOL_VTM_RW;
+ entry->fs_mask[n] |= AFS_VOL_VTM_RW;
if (vlflags & AFS_VLF_BACKEXISTS)
- entry->fs_mask[i] |= AFS_VOL_VTM_BAK;
+ entry->fs_mask[n] |= AFS_VOL_VTM_BAK;
}
if (tmp & AFS_VLSF_ROVOL)
- entry->fs_mask[i] |= AFS_VOL_VTM_RO;
- if (!entry->fs_mask[i])
+ entry->fs_mask[n] |= AFS_VOL_VTM_RO;
+ if (!entry->fs_mask[n])
continue;
xdr = &uvldb->serverNumber[i];
- uuid = (struct afs_uuid *)&entry->fs_server[i];
+ uuid = (struct afs_uuid *)&entry->fs_server[n];
uuid->time_low = xdr->time_low;
uuid->time_mid = htons(ntohl(xdr->time_mid));
uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version));
@@ -184,7 +185,7 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call)
int i, ret;
_enter("{%u,%zu/%u}",
- call->unmarshall, iov_iter_count(call->_iter), call->count);
+ call->unmarshall, iov_iter_count(call->iter), call->count);
switch (call->unmarshall) {
case 0:
@@ -315,7 +316,7 @@ static int afs_deliver_vl_get_capabilities(struct afs_call *call)
int ret;
_enter("{%u,%zu/%u}",
- call->unmarshall, iov_iter_count(call->_iter), call->count);
+ call->unmarshall, iov_iter_count(call->iter), call->count);
switch (call->unmarshall) {
case 0:
@@ -424,7 +425,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
int ret;
_enter("{%u,%zu,%u}",
- call->unmarshall, iov_iter_count(call->_iter), call->count2);
+ call->unmarshall, iov_iter_count(call->iter), call->count2);
switch (call->unmarshall) {
case 0:
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 1a414300b654..92ca5e27573b 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -12,8 +12,6 @@
unsigned __read_mostly afs_volume_gc_delay = 10;
unsigned __read_mostly afs_volume_record_life = 60 * 60;
-static const char *const afs_voltypes[] = { "R/W", "R/O", "BAK" };
-
/*
* Allocate a volume record and load it up from a vldb record.
*/
diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
index 5552d034090a..7af41fd5f3ee 100644
--- a/fs/afs/xattr.c
+++ b/fs/afs/xattr.c
@@ -228,11 +228,11 @@ static int afs_xattr_get_yfs(const struct xattr_handler *handler,
break;
case 1:
data = buf;
- dsize = snprintf(buf, sizeof(buf), "%u", yacl->inherit_flag);
+ dsize = scnprintf(buf, sizeof(buf), "%u", yacl->inherit_flag);
break;
case 2:
data = buf;
- dsize = snprintf(buf, sizeof(buf), "%u", yacl->num_cleaned);
+ dsize = scnprintf(buf, sizeof(buf), "%u", yacl->num_cleaned);
break;
case 3:
data = yacl->vol_acl->data;
@@ -370,13 +370,15 @@ static int afs_xattr_get_fid(const struct xattr_handler *handler,
/* The volume ID is 64-bit, the vnode ID is 96-bit and the
* uniquifier is 32-bit.
*/
- len = sprintf(text, "%llx:", vnode->fid.vid);
+ len = scnprintf(text, sizeof(text), "%llx:", vnode->fid.vid);
if (vnode->fid.vnode_hi)
- len += sprintf(text + len, "%x%016llx",
- vnode->fid.vnode_hi, vnode->fid.vnode);
+ len += scnprintf(text + len, sizeof(text) - len, "%x%016llx",
+ vnode->fid.vnode_hi, vnode->fid.vnode);
else
- len += sprintf(text + len, "%llx", vnode->fid.vnode);
- len += sprintf(text + len, ":%x", vnode->fid.unique);
+ len += scnprintf(text + len, sizeof(text) - len, "%llx",
+ vnode->fid.vnode);
+ len += scnprintf(text + len, sizeof(text) - len, ":%x",
+ vnode->fid.unique);
if (size == 0)
return len;
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 18722aaeda33..a26126ac7bf1 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -152,8 +152,8 @@ static void yfs_check_req(struct afs_call *call, __be32 *bp)
pr_err("kAFS: %s: Request buffer overflow (%zu>%u)\n",
call->type->name, len, call->request_size);
else if (len < call->request_size)
- pr_warning("kAFS: %s: Request buffer underflow (%zu<%u)\n",
- call->type->name, len, call->request_size);
+ pr_warn("kAFS: %s: Request buffer underflow (%zu<%u)\n",
+ call->type->name, len, call->request_size);
}
/*
@@ -441,7 +441,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
int ret;
_enter("{%u,%zu/%llu}",
- call->unmarshall, iov_iter_count(&call->iter), req->actual_len);
+ call->unmarshall, iov_iter_count(call->iter), req->actual_len);
switch (call->unmarshall) {
case 0:
@@ -450,8 +450,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
req->offset = req->pos & (PAGE_SIZE - 1);
afs_extract_to_tmp64(call);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the returned data length */
+ /* extract the returned data length */
case 1:
_debug("extract data length");
ret = afs_extract_data(call, true);
@@ -475,13 +476,14 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
call->bvec[0].bv_len = size;
call->bvec[0].bv_offset = req->offset;
call->bvec[0].bv_page = req->pages[req->index];
- iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
+ iov_iter_bvec(&call->def_iter, READ, call->bvec, 1, size);
ASSERTCMP(size, <=, PAGE_SIZE);
+ /* Fall through */
- /* Fall through - and extract the returned data */
+ /* extract the returned data */
case 2:
_debug("extract data %zu/%llu",
- iov_iter_count(&call->iter), req->remain);
+ iov_iter_count(call->iter), req->remain);
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -503,13 +505,13 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
goto no_more_data;
/* Discard any excess data the server gave us */
- iov_iter_discard(&call->iter, READ, req->actual_len - req->len);
+ afs_extract_discard(call, req->actual_len - req->len);
call->unmarshall = 3;
-
/* Fall through */
+
case 3:
_debug("extract discard %zu/%llu",
- iov_iter_count(&call->iter), req->actual_len - req->len);
+ iov_iter_count(call->iter), req->actual_len - req->len);
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -521,8 +523,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSCallBack) +
sizeof(struct yfs_xdr_YFSVolSync));
+ /* Fall through */
- /* Fall through - and extract the metadata */
+ /* extract the metadata */
case 4:
ret = afs_extract_data(call, false);
if (ret < 0)
@@ -539,8 +542,8 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
req->file_size = call->out_scb->status.size;
call->unmarshall++;
-
/* Fall through */
+
case 5:
break;
}
@@ -602,7 +605,7 @@ int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
call->key = fc->key;
call->out_scb = scb;
call->out_volsync = NULL;
- call->read_request = req;
+ call->read_request = afs_get_read(req);
/* marshall the parameters */
bp = call->request;
@@ -613,7 +616,6 @@ int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
bp = xdr_encode_u64(bp, req->len);
yfs_check_req(call, bp);
- refcount_inc(&req->usage);
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
afs_set_fc_call(call, fc);
@@ -1429,8 +1431,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
case 0:
call->unmarshall++;
afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchVolumeStatus));
+ /* Fall through */
- /* Fall through - and extract the returned status record */
+ /* extract the returned status record */
case 1:
_debug("extract status");
ret = afs_extract_data(call, true);
@@ -1441,8 +1444,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
xdr_decode_YFSFetchVolumeStatus(&bp, call->out_volstatus);
call->unmarshall++;
afs_extract_to_tmp(call);
+ /* Fall through */
- /* Fall through - and extract the volume name length */
+ /* extract the volume name length */
case 2:
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -1456,8 +1460,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the volume name */
+ /* extract the volume name */
case 3:
_debug("extract volname");
ret = afs_extract_data(call, true);
@@ -1469,8 +1474,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
_debug("volname '%s'", p);
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the offline message length */
+ /* extract the offline message length */
case 4:
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -1484,8 +1490,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the offline message */
+ /* extract the offline message */
case 5:
_debug("extract offline");
ret = afs_extract_data(call, true);
@@ -1498,8 +1505,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the message of the day length */
+ /* extract the message of the day length */
case 6:
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -1513,8 +1521,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the message of the day */
+ /* extract the message of the day */
case 7:
_debug("extract motd");
ret = afs_extract_data(call, false);
@@ -1526,8 +1535,8 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
_debug("motd '%s'", p);
call->unmarshall++;
-
/* Fall through */
+
case 8:
break;
}
@@ -1805,9 +1814,9 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
/* Extract the file status count and array in two steps */
- /* Fall through */
case 1:
_debug("extract status count");
ret = afs_extract_data(call, true);
@@ -1824,8 +1833,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
call->unmarshall++;
more_counts:
afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus));
-
/* Fall through */
+
case 2:
_debug("extract status array %u", call->count);
ret = afs_extract_data(call, true);
@@ -1845,9 +1854,9 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
call->count = 0;
call->unmarshall++;
afs_extract_to_tmp(call);
+ /* Fall through */
/* Extract the callback count and array in two steps */
- /* Fall through */
case 3:
_debug("extract CB count");
ret = afs_extract_data(call, true);
@@ -1863,8 +1872,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
call->unmarshall++;
more_cbs:
afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSCallBack));
-
/* Fall through */
+
case 4:
_debug("extract CB array");
ret = afs_extract_data(call, true);
@@ -1881,8 +1890,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync));
call->unmarshall++;
-
/* Fall through */
+
case 5:
ret = afs_extract_data(call, false);
if (ret < 0)
@@ -1892,8 +1901,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
xdr_decode_YFSVolSync(&bp, call->out_volsync);
call->unmarshall++;
-
/* Fall through */
+
case 6:
break;
}
@@ -1978,6 +1987,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
/* Extract the file ACL length */
case 1:
@@ -1996,9 +2006,10 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
acl->size = call->count2;
afs_extract_begin(call, acl->data, size);
} else {
- iov_iter_discard(&call->iter, READ, size);
+ afs_extract_discard(call, size);
}
call->unmarshall++;
+ /* Fall through */
/* Extract the file ACL */
case 2:
@@ -2008,6 +2019,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
/* Extract the volume ACL length */
case 3:
@@ -2026,9 +2038,10 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
acl->size = call->count2;
afs_extract_begin(call, acl->data, size);
} else {
- iov_iter_discard(&call->iter, READ, size);
+ afs_extract_discard(call, size);
}
call->unmarshall++;
+ /* Fall through */
/* Extract the volume ACL */
case 4:
@@ -2041,6 +2054,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
call->unmarshall++;
+ /* Fall through */
/* extract the metadata */
case 5:
@@ -2057,6 +2071,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
xdr_decode_YFSVolSync(&bp, call->out_volsync);
call->unmarshall++;
+ /* Fall through */
case 6:
break;
@@ -2155,7 +2170,7 @@ int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl
key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
size = round_up(acl->size, 4);
- call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus,
+ call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(__be32) + size,
diff --git a/fs/aio.c b/fs/aio.c
index 01e0fb9ae45a..a9fbad2ce5e6 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -2056,7 +2056,7 @@ static long do_io_getevents(aio_context_t ctx_id,
* specifies an infinite timeout. Note that the timeout pointed to by
* timeout is relative. Will fail with -ENOSYS if not implemented.
*/
-#if !defined(CONFIG_64BIT_TIME) || defined(CONFIG_64BIT)
+#ifdef CONFIG_64BIT
SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
long, min_nr,
@@ -2179,7 +2179,7 @@ SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
#ifdef CONFIG_COMPAT
struct __compat_aio_sigset {
- compat_sigset_t __user *sigmask;
+ compat_uptr_t sigmask;
compat_size_t sigsetsize;
};
@@ -2193,7 +2193,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
struct old_timespec32 __user *, timeout,
const struct __compat_aio_sigset __user *, usig)
{
- struct __compat_aio_sigset ksig = { NULL, };
+ struct __compat_aio_sigset ksig = { 0, };
struct timespec64 t;
bool interrupted;
int ret;
@@ -2204,7 +2204,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
- ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize);
+ ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
if (ret)
return ret;
@@ -2228,7 +2228,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
struct __kernel_timespec __user *, timeout,
const struct __compat_aio_sigset __user *, usig)
{
- struct __compat_aio_sigset ksig = { NULL, };
+ struct __compat_aio_sigset ksig = { 0, };
struct timespec64 t;
bool interrupted;
int ret;
@@ -2239,7 +2239,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
- ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize);
+ ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
if (ret)
return ret;
diff --git a/fs/attr.c b/fs/attr.c
index d22e8187477f..df28035aa23e 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -183,15 +183,18 @@ void setattr_copy(struct inode *inode, const struct iattr *attr)
inode->i_uid = attr->ia_uid;
if (ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
- if (ia_valid & ATTR_ATIME)
- inode->i_atime = timespec64_trunc(attr->ia_atime,
- inode->i_sb->s_time_gran);
- if (ia_valid & ATTR_MTIME)
- inode->i_mtime = timespec64_trunc(attr->ia_mtime,
- inode->i_sb->s_time_gran);
- if (ia_valid & ATTR_CTIME)
- inode->i_ctime = timespec64_trunc(attr->ia_ctime,
- inode->i_sb->s_time_gran);
+ if (ia_valid & ATTR_ATIME) {
+ inode->i_atime = timestamp_truncate(attr->ia_atime,
+ inode);
+ }
+ if (ia_valid & ATTR_MTIME) {
+ inode->i_mtime = timestamp_truncate(attr->ia_mtime,
+ inode);
+ }
+ if (ia_valid & ATTR_CTIME) {
+ inode->i_ctime = timestamp_truncate(attr->ia_ctime,
+ inode);
+ }
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
index 31d616aa6fc9..a1c7701007e7 100644
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
@@ -459,9 +459,10 @@ static struct dentry *autofs_expire_indirect(struct super_block *sb,
*/
how &= ~AUTOFS_EXP_LEAVES;
found = should_expire(expired, mnt, timeout, how);
- if (!found || found != expired)
- /* Something has changed, continue */
+ if (found != expired) { // something has changed, continue
+ dput(found);
goto next;
+ }
if (expired != dentry)
dput(dentry);
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 462d096ff3e9..64cdf4d8e424 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -893,6 +893,8 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
sb_set_blocksize(sb, (ulong) befs_sb->block_size);
sb->s_op = &befs_sops;
sb->s_export_op = &befs_export_operations;
+ sb->s_time_min = 0;
+ sb->s_time_max = 0xffffffffffffll;
root = befs_iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir)));
if (IS_ERR(root)) {
ret = PTR_ERR(root);
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 5e97bed073d7..f8ce1368218b 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -324,6 +324,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
return -ENOMEM;
mutex_init(&info->bfs_lock);
s->s_fs_info = info;
+ s->s_time_min = 0;
+ s->s_time_max = U32_MAX;
sb_set_blocksize(s, BFS_BSIZE);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index d4e11b2e04f6..ecd8d2698515 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -404,6 +404,17 @@ static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr)
ELF_PAGESTART(cmds[first_idx].p_vaddr);
}
+static int elf_read(struct file *file, void *buf, size_t len, loff_t pos)
+{
+ ssize_t rv;
+
+ rv = kernel_read(file, buf, len, &pos);
+ if (unlikely(rv != len)) {
+ return (rv < 0) ? rv : -EIO;
+ }
+ return 0;
+}
+
/**
* load_elf_phdrs() - load ELF program headers
* @elf_ex: ELF header of the binary whose program headers should be loaded
@@ -418,7 +429,6 @@ static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
{
struct elf_phdr *elf_phdata = NULL;
int retval, err = -1;
- loff_t pos = elf_ex->e_phoff;
unsigned int size;
/*
@@ -439,9 +449,9 @@ static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
goto out;
/* Read in the program headers */
- retval = kernel_read(elf_file, elf_phdata, size, &pos);
- if (retval != size) {
- err = (retval < 0) ? retval : -EIO;
+ retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff);
+ if (retval < 0) {
+ err = retval;
goto out;
}
@@ -544,7 +554,7 @@ static inline int make_prot(u32 p_flags)
an ELF header */
static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
- struct file *interpreter, unsigned long *interp_map_addr,
+ struct file *interpreter,
unsigned long no_base, struct elf_phdr *interp_elf_phdata)
{
struct elf_phdr *eppnt;
@@ -590,8 +600,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
map_addr = elf_map(interpreter, load_addr + vaddr,
eppnt, elf_prot, elf_type, total_size);
total_size = 0;
- if (!*interp_map_addr)
- *interp_map_addr = map_addr;
error = map_addr;
if (BAD_ADDR(map_addr))
goto out;
@@ -670,26 +678,6 @@ out:
* libraries. There is no binary dependent code anywhere else.
*/
-#ifndef STACK_RND_MASK
-#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
-#endif
-
-static unsigned long randomize_stack_top(unsigned long stack_top)
-{
- unsigned long random_variable = 0;
-
- if (current->flags & PF_RANDOMIZE) {
- random_variable = get_random_long();
- random_variable &= STACK_RND_MASK;
- random_variable <<= PAGE_SHIFT;
- }
-#ifdef CONFIG_STACK_GROWSUP
- return PAGE_ALIGN(stack_top) + random_variable;
-#else
- return PAGE_ALIGN(stack_top) - random_variable;
-#endif
-}
-
static int load_elf_binary(struct linux_binprm *bprm)
{
struct file *interpreter = NULL; /* to shut gcc up */
@@ -742,7 +730,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
elf_ppnt = elf_phdata;
for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
char *elf_interpreter;
- loff_t pos;
if (elf_ppnt->p_type != PT_INTERP)
continue;
@@ -760,14 +747,10 @@ static int load_elf_binary(struct linux_binprm *bprm)
if (!elf_interpreter)
goto out_free_ph;
- pos = elf_ppnt->p_offset;
- retval = kernel_read(bprm->file, elf_interpreter,
- elf_ppnt->p_filesz, &pos);
- if (retval != elf_ppnt->p_filesz) {
- if (retval >= 0)
- retval = -EIO;
+ retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz,
+ elf_ppnt->p_offset);
+ if (retval < 0)
goto out_free_interp;
- }
/* make sure path is NULL terminated */
retval = -ENOEXEC;
if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
@@ -786,14 +769,10 @@ static int load_elf_binary(struct linux_binprm *bprm)
would_dump(bprm, interpreter);
/* Get the exec headers */
- pos = 0;
- retval = kernel_read(interpreter, &loc->interp_elf_ex,
- sizeof(loc->interp_elf_ex), &pos);
- if (retval != sizeof(loc->interp_elf_ex)) {
- if (retval >= 0)
- retval = -EIO;
+ retval = elf_read(interpreter, &loc->interp_elf_ex,
+ sizeof(loc->interp_elf_ex), 0);
+ if (retval < 0)
goto out_free_dentry;
- }
break;
@@ -899,7 +878,7 @@ out_free_interp:
the correct location in memory. */
for(i = 0, elf_ppnt = elf_phdata;
i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
- int elf_prot, elf_flags, elf_fixed = MAP_FIXED_NOREPLACE;
+ int elf_prot, elf_flags;
unsigned long k, vaddr;
unsigned long total_size = 0;
@@ -931,13 +910,6 @@ out_free_interp:
*/
}
}
-
- /*
- * Some binaries have overlapping elf segments and then
- * we have to forcefully map over an existing mapping
- * e.g. over this newly established brk mapping.
- */
- elf_fixed = MAP_FIXED;
}
elf_prot = make_prot(elf_ppnt->p_flags);
@@ -950,7 +922,7 @@ out_free_interp:
* the ET_DYN load_addr calculations, proceed normally.
*/
if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
- elf_flags |= elf_fixed;
+ elf_flags |= MAP_FIXED;
} else if (loc->elf_ex.e_type == ET_DYN) {
/*
* This logic is run once for the first LOAD Program
@@ -986,7 +958,7 @@ out_free_interp:
load_bias = ELF_ET_DYN_BASE;
if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd();
- elf_flags |= elf_fixed;
+ elf_flags |= MAP_FIXED;
} else
load_bias = 0;
@@ -1081,11 +1053,8 @@ out_free_interp:
}
if (interpreter) {
- unsigned long interp_map_addr = 0;
-
elf_entry = load_elf_interp(&loc->interp_elf_ex,
interpreter,
- &interp_map_addr,
load_bias, interp_elf_phdata);
if (!IS_ERR((void *)elf_entry)) {
/*
@@ -1141,7 +1110,8 @@ out_free_interp:
* (since it grows up, and may collide early with the stack
* growing down), and into the unused ELF_ET_DYN_BASE region.
*/
- if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && !interpreter)
+ if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
+ loc->elf_ex.e_type == ET_DYN && !interpreter)
current->mm->brk = current->mm->start_brk =
ELF_ET_DYN_BASE;
@@ -1205,11 +1175,10 @@ static int load_elf_library(struct file *file)
unsigned long elf_bss, bss, len;
int retval, error, i, j;
struct elfhdr elf_ex;
- loff_t pos = 0;
error = -ENOEXEC;
- retval = kernel_read(file, &elf_ex, sizeof(elf_ex), &pos);
- if (retval != sizeof(elf_ex))
+ retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0);
+ if (retval < 0)
goto out;
if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
@@ -1234,9 +1203,8 @@ static int load_elf_library(struct file *file)
eppnt = elf_phdata;
error = -ENOEXEC;
- pos = elf_ex.e_phoff;
- retval = kernel_read(file, eppnt, j, &pos);
- if (retval != j)
+ retval = elf_read(file, eppnt, j, elf_ex.e_phoff);
+ if (retval < 0)
goto out_free_ph;
for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
@@ -1515,18 +1483,18 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
* group-wide total, not its individual thread total.
*/
thread_group_cputime(p, &cputime);
- prstatus->pr_utime = ns_to_timeval(cputime.utime);
- prstatus->pr_stime = ns_to_timeval(cputime.stime);
+ prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime);
+ prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime);
} else {
u64 utime, stime;
task_cputime(p, &utime, &stime);
- prstatus->pr_utime = ns_to_timeval(utime);
- prstatus->pr_stime = ns_to_timeval(stime);
+ prstatus->pr_utime = ns_to_kernel_old_timeval(utime);
+ prstatus->pr_stime = ns_to_kernel_old_timeval(stime);
}
- prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
- prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
+ prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
+ prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
}
static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index d86ebd0dcc3d..240f66663543 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1359,17 +1359,17 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
* group-wide total, not its individual thread total.
*/
thread_group_cputime(p, &cputime);
- prstatus->pr_utime = ns_to_timeval(cputime.utime);
- prstatus->pr_stime = ns_to_timeval(cputime.stime);
+ prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime);
+ prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime);
} else {
u64 utime, stime;
task_cputime(p, &utime, &stime);
- prstatus->pr_utime = ns_to_timeval(utime);
- prstatus->pr_stime = ns_to_timeval(stime);
+ prstatus->pr_utime = ns_to_kernel_old_timeval(utime);
+ prstatus->pr_stime = ns_to_kernel_old_timeval(stime);
}
- prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
- prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
+ prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
+ prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap;
prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 4707dfff991b..ee63c2732fa2 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1139,8 +1139,7 @@ static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
* Pointer to the block device containing @bdev on success, ERR_PTR()
* value on failure.
*/
-static struct block_device *bd_start_claiming(struct block_device *bdev,
- void *holder)
+struct block_device *bd_start_claiming(struct block_device *bdev, void *holder)
{
struct gendisk *disk;
struct block_device *whole;
@@ -1187,6 +1186,62 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
return ERR_PTR(err);
}
}
+EXPORT_SYMBOL(bd_start_claiming);
+
+static void bd_clear_claiming(struct block_device *whole, void *holder)
+{
+ lockdep_assert_held(&bdev_lock);
+ /* tell others that we're done */
+ BUG_ON(whole->bd_claiming != holder);
+ whole->bd_claiming = NULL;
+ wake_up_bit(&whole->bd_claiming, 0);
+}
+
+/**
+ * bd_finish_claiming - finish claiming of a block device
+ * @bdev: block device of interest
+ * @whole: whole block device (returned from bd_start_claiming())
+ * @holder: holder that has claimed @bdev
+ *
+ * Finish exclusive open of a block device. Mark the device as exlusively
+ * open by the holder and wake up all waiters for exclusive open to finish.
+ */
+void bd_finish_claiming(struct block_device *bdev, struct block_device *whole,
+ void *holder)
+{
+ spin_lock(&bdev_lock);
+ BUG_ON(!bd_may_claim(bdev, whole, holder));
+ /*
+ * Note that for a whole device bd_holders will be incremented twice,
+ * and bd_holder will be set to bd_may_claim before being set to holder
+ */
+ whole->bd_holders++;
+ whole->bd_holder = bd_may_claim;
+ bdev->bd_holders++;
+ bdev->bd_holder = holder;
+ bd_clear_claiming(whole, holder);
+ spin_unlock(&bdev_lock);
+}
+EXPORT_SYMBOL(bd_finish_claiming);
+
+/**
+ * bd_abort_claiming - abort claiming of a block device
+ * @bdev: block device of interest
+ * @whole: whole block device (returned from bd_start_claiming())
+ * @holder: holder that has claimed @bdev
+ *
+ * Abort claiming of a block device when the exclusive open failed. This can be
+ * also used when exclusive open is not actually desired and we just needed
+ * to block other exclusive openers for a while.
+ */
+void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
+ void *holder)
+{
+ spin_lock(&bdev_lock);
+ bd_clear_claiming(whole, holder);
+ spin_unlock(&bdev_lock);
+}
+EXPORT_SYMBOL(bd_abort_claiming);
#ifdef CONFIG_SYSFS
struct bd_holder_disk {
@@ -1348,11 +1403,7 @@ static void flush_disk(struct block_device *bdev, bool kill_dirty)
"resized disk %s\n",
bdev->bd_disk ? bdev->bd_disk->disk_name : "");
}
-
- if (!bdev->bd_disk)
- return;
- if (disk_part_scan_enabled(bdev->bd_disk))
- bdev->bd_invalidated = 1;
+ bdev->bd_invalidated = 1;
}
/**
@@ -1365,8 +1416,8 @@ static void flush_disk(struct block_device *bdev, bool kill_dirty)
* and adjusts it if it differs. When shrinking the bdev size, its all caches
* are freed.
*/
-void check_disk_size_change(struct gendisk *disk, struct block_device *bdev,
- bool verbose)
+static void check_disk_size_change(struct gendisk *disk,
+ struct block_device *bdev, bool verbose)
{
loff_t disk_size, bdev_size;
@@ -1382,6 +1433,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev,
if (bdev_size > disk_size)
flush_disk(bdev, false);
}
+ bdev->bd_invalidated = 0;
}
/**
@@ -1411,7 +1463,6 @@ int revalidate_disk(struct gendisk *disk)
mutex_lock(&bdev->bd_mutex);
check_disk_size_change(disk, bdev, ret == 0);
- bdev->bd_invalidated = 0;
mutex_unlock(&bdev->bd_mutex);
bdput(bdev);
}
@@ -1457,6 +1508,45 @@ EXPORT_SYMBOL(bd_set_size);
static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
+int bdev_disk_changed(struct block_device *bdev, bool invalidate)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ int ret;
+
+ lockdep_assert_held(&bdev->bd_mutex);
+
+rescan:
+ ret = blk_drop_partitions(disk, bdev);
+ if (ret)
+ return ret;
+
+ if (invalidate)
+ set_capacity(disk, 0);
+ else if (disk->fops->revalidate_disk)
+ disk->fops->revalidate_disk(disk);
+
+ check_disk_size_change(disk, bdev, !invalidate);
+
+ if (get_capacity(disk)) {
+ ret = blk_add_partitions(disk, bdev);
+ if (ret == -EAGAIN)
+ goto rescan;
+ } else {
+ /*
+ * Tell userspace that the media / partition table may have
+ * changed.
+ */
+ kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+ }
+
+ return ret;
+}
+/*
+ * Only exported for for loop and dasd for historic reasons. Don't use in new
+ * code!
+ */
+EXPORT_SYMBOL_GPL(bdev_disk_changed);
+
/*
* bd_mutex locking:
*
@@ -1539,12 +1629,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
* The latter is necessary to prevent ghost
* partitions on a removed medium.
*/
- if (bdev->bd_invalidated) {
- if (!ret)
- rescan_partitions(disk, bdev);
- else if (ret == -ENOMEDIUM)
- invalidate_partitions(disk, bdev);
- }
+ if (bdev->bd_invalidated &&
+ (!ret || ret == -ENOMEDIUM))
+ bdev_disk_changed(bdev, ret == -ENOMEDIUM);
if (ret)
goto out_clear;
@@ -1577,12 +1664,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (bdev->bd_disk->fops->open)
ret = bdev->bd_disk->fops->open(bdev, mode);
/* the same as first opener case, read comment there */
- if (bdev->bd_invalidated) {
- if (!ret)
- rescan_partitions(bdev->bd_disk, bdev);
- else if (ret == -ENOMEDIUM)
- invalidate_partitions(bdev->bd_disk, bdev);
- }
+ if (bdev->bd_invalidated &&
+ (!ret || ret == -ENOMEDIUM))
+ bdev_disk_changed(bdev, ret == -ENOMEDIUM);
if (ret)
goto out_unlock_bdev;
}
@@ -1656,29 +1740,10 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
/* finish claiming */
mutex_lock(&bdev->bd_mutex);
- spin_lock(&bdev_lock);
-
- if (!res) {
- BUG_ON(!bd_may_claim(bdev, whole, holder));
- /*
- * Note that for a whole device bd_holders
- * will be incremented twice, and bd_holder
- * will be set to bd_may_claim before being
- * set to holder
- */
- whole->bd_holders++;
- whole->bd_holder = bd_may_claim;
- bdev->bd_holders++;
- bdev->bd_holder = holder;
- }
-
- /* tell others that we're done */
- BUG_ON(whole->bd_claiming != holder);
- whole->bd_claiming = NULL;
- wake_up_bit(&whole->bd_claiming, 0);
-
- spin_unlock(&bdev_lock);
-
+ if (!res)
+ bd_finish_claiming(bdev, whole, holder);
+ else
+ bd_abort_claiming(bdev, whole, holder);
/*
* Block event polling for write claims if requested. Any
* write holder makes the write_holder state stick until
@@ -1936,6 +2001,9 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (bdev_read_only(I_BDEV(bd_inode)))
return -EPERM;
+ if (IS_SWAPFILE(bd_inode))
+ return -ETXTBSY;
+
if (!iov_iter_count(from))
return 0;
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 212b4a854f2c..75b6d10c9845 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -4,6 +4,9 @@ config BTRFS_FS
tristate "Btrfs filesystem support"
select CRYPTO
select CRYPTO_CRC32C
+ select LIBCRC32C
+ select CRYPTO_XXHASH
+ select CRYPTO_SHA256
select ZLIB_INFLATE
select ZLIB_DEFLATE
select LZO_COMPRESS
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 76a843198bcb..82200dbca5ac 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -11,7 +11,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \
- block-rsv.o delalloc-space.o
+ block-rsv.o delalloc-space.o block-group.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 122cb97c7909..1d32a07bb2d1 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -12,9 +12,11 @@
#include "async-thread.h"
#include "ctree.h"
-#define WORK_DONE_BIT 0
-#define WORK_ORDER_DONE_BIT 1
-#define WORK_HIGH_PRIO_BIT 2
+enum {
+ WORK_DONE_BIT,
+ WORK_ORDER_DONE_BIT,
+ WORK_HIGH_PRIO_BIT,
+};
#define NO_THRESHOLD (-1)
#define DFT_THRESHOLD (32)
@@ -51,24 +53,12 @@ struct btrfs_workqueue {
struct __btrfs_workqueue *high;
};
-static void normal_work_helper(struct btrfs_work *work);
-
-#define BTRFS_WORK_HELPER(name) \
-noinline_for_stack void btrfs_##name(struct work_struct *arg) \
-{ \
- struct btrfs_work *work = container_of(arg, struct btrfs_work, \
- normal_work); \
- normal_work_helper(work); \
-}
-
-struct btrfs_fs_info *
-btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
+struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
{
return wq->fs_info;
}
-struct btrfs_fs_info *
-btrfs_work_owner(const struct btrfs_work *work)
+struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
{
return work->wq->fs_info;
}
@@ -87,29 +77,6 @@ bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
}
-BTRFS_WORK_HELPER(worker_helper);
-BTRFS_WORK_HELPER(delalloc_helper);
-BTRFS_WORK_HELPER(flush_delalloc_helper);
-BTRFS_WORK_HELPER(cache_helper);
-BTRFS_WORK_HELPER(submit_helper);
-BTRFS_WORK_HELPER(fixup_helper);
-BTRFS_WORK_HELPER(endio_helper);
-BTRFS_WORK_HELPER(endio_meta_helper);
-BTRFS_WORK_HELPER(endio_meta_write_helper);
-BTRFS_WORK_HELPER(endio_raid56_helper);
-BTRFS_WORK_HELPER(endio_repair_helper);
-BTRFS_WORK_HELPER(rmw_helper);
-BTRFS_WORK_HELPER(endio_write_helper);
-BTRFS_WORK_HELPER(freespace_write_helper);
-BTRFS_WORK_HELPER(delayed_meta_helper);
-BTRFS_WORK_HELPER(readahead_helper);
-BTRFS_WORK_HELPER(qgroup_rescan_helper);
-BTRFS_WORK_HELPER(extent_refs_helper);
-BTRFS_WORK_HELPER(scrub_helper);
-BTRFS_WORK_HELPER(scrubwrc_helper);
-BTRFS_WORK_HELPER(scrubnc_helper);
-BTRFS_WORK_HELPER(scrubparity_helper);
-
static struct __btrfs_workqueue *
__btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
unsigned int flags, int limit_active, int thresh)
@@ -250,16 +217,16 @@ out:
}
}
-static void run_ordered_work(struct __btrfs_workqueue *wq)
+static void run_ordered_work(struct __btrfs_workqueue *wq,
+ struct btrfs_work *self)
{
struct list_head *list = &wq->ordered_list;
struct btrfs_work *work;
spinlock_t *lock = &wq->list_lock;
unsigned long flags;
+ bool free_self = false;
while (1) {
- void *wtag;
-
spin_lock_irqsave(lock, flags);
if (list_empty(list))
break;
@@ -285,22 +252,53 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
list_del(&work->ordered_list);
spin_unlock_irqrestore(lock, flags);
- /*
- * We don't want to call the ordered free functions with the
- * lock held though. Save the work as tag for the trace event,
- * because the callback could free the structure.
- */
- wtag = work;
- work->ordered_free(work);
- trace_btrfs_all_work_done(wq->fs_info, wtag);
+ if (work == self) {
+ /*
+ * This is the work item that the worker is currently
+ * executing.
+ *
+ * The kernel workqueue code guarantees non-reentrancy
+ * of work items. I.e., if a work item with the same
+ * address and work function is queued twice, the second
+ * execution is blocked until the first one finishes. A
+ * work item may be freed and recycled with the same
+ * work function; the workqueue code assumes that the
+ * original work item cannot depend on the recycled work
+ * item in that case (see find_worker_executing_work()).
+ *
+ * Note that different types of Btrfs work can depend on
+ * each other, and one type of work on one Btrfs
+ * filesystem may even depend on the same type of work
+ * on another Btrfs filesystem via, e.g., a loop device.
+ * Therefore, we must not allow the current work item to
+ * be recycled until we are really done, otherwise we
+ * break the above assumption and can deadlock.
+ */
+ free_self = true;
+ } else {
+ /*
+ * We don't want to call the ordered free functions with
+ * the lock held.
+ */
+ work->ordered_free(work);
+ /* NB: work must not be dereferenced past this point. */
+ trace_btrfs_all_work_done(wq->fs_info, work);
+ }
}
spin_unlock_irqrestore(lock, flags);
+
+ if (free_self) {
+ self->ordered_free(self);
+ /* NB: self must not be dereferenced past this point. */
+ trace_btrfs_all_work_done(wq->fs_info, self);
+ }
}
-static void normal_work_helper(struct btrfs_work *work)
+static void btrfs_work_helper(struct work_struct *normal_work)
{
+ struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
+ normal_work);
struct __btrfs_workqueue *wq;
- void *wtag;
int need_order = 0;
/*
@@ -314,29 +312,26 @@ static void normal_work_helper(struct btrfs_work *work)
if (work->ordered_func)
need_order = 1;
wq = work->wq;
- /* Safe for tracepoints in case work gets freed by the callback */
- wtag = work;
trace_btrfs_work_sched(work);
thresh_exec_hook(wq);
work->func(work);
if (need_order) {
set_bit(WORK_DONE_BIT, &work->flags);
- run_ordered_work(wq);
+ run_ordered_work(wq, work);
+ } else {
+ /* NB: work must not be dereferenced past this point. */
+ trace_btrfs_all_work_done(wq->fs_info, work);
}
- if (!need_order)
- trace_btrfs_all_work_done(wq->fs_info, wtag);
}
-void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
- btrfs_func_t func,
- btrfs_func_t ordered_func,
- btrfs_func_t ordered_free)
+void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
+ btrfs_func_t ordered_func, btrfs_func_t ordered_free)
{
work->func = func;
work->ordered_func = ordered_func;
work->ordered_free = ordered_free;
- INIT_WORK(&work->normal_work, uniq_func);
+ INIT_WORK(&work->normal_work, btrfs_work_helper);
INIT_LIST_HEAD(&work->ordered_list);
work->flags = 0;
}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 7861c9feba5f..a4434301d84d 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -29,49 +29,20 @@ struct btrfs_work {
unsigned long flags;
};
-#define BTRFS_WORK_HELPER_PROTO(name) \
-void btrfs_##name(struct work_struct *arg)
-
-BTRFS_WORK_HELPER_PROTO(worker_helper);
-BTRFS_WORK_HELPER_PROTO(delalloc_helper);
-BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper);
-BTRFS_WORK_HELPER_PROTO(cache_helper);
-BTRFS_WORK_HELPER_PROTO(submit_helper);
-BTRFS_WORK_HELPER_PROTO(fixup_helper);
-BTRFS_WORK_HELPER_PROTO(endio_helper);
-BTRFS_WORK_HELPER_PROTO(endio_meta_helper);
-BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper);
-BTRFS_WORK_HELPER_PROTO(endio_raid56_helper);
-BTRFS_WORK_HELPER_PROTO(endio_repair_helper);
-BTRFS_WORK_HELPER_PROTO(rmw_helper);
-BTRFS_WORK_HELPER_PROTO(endio_write_helper);
-BTRFS_WORK_HELPER_PROTO(freespace_write_helper);
-BTRFS_WORK_HELPER_PROTO(delayed_meta_helper);
-BTRFS_WORK_HELPER_PROTO(readahead_helper);
-BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper);
-BTRFS_WORK_HELPER_PROTO(extent_refs_helper);
-BTRFS_WORK_HELPER_PROTO(scrub_helper);
-BTRFS_WORK_HELPER_PROTO(scrubwrc_helper);
-BTRFS_WORK_HELPER_PROTO(scrubnc_helper);
-BTRFS_WORK_HELPER_PROTO(scrubparity_helper);
-
-
struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
const char *name,
unsigned int flags,
int limit_active,
int thresh);
-void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
- btrfs_func_t func,
- btrfs_func_t ordered_func,
- btrfs_func_t ordered_free);
+void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
+ btrfs_func_t ordered_func, btrfs_func_t ordered_free);
void btrfs_queue_work(struct btrfs_workqueue *wq,
struct btrfs_work *work);
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
void btrfs_set_work_high_priority(struct btrfs_work *work);
-struct btrfs_fs_info *btrfs_work_owner(const struct btrfs_work *work);
-struct btrfs_fs_info *btrfs_workqueue_owner(const struct __btrfs_workqueue *wq);
+struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work);
+struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq);
bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq);
#endif
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 89116afda7a2..e5d85311d5d5 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1483,7 +1483,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
ulist_init(roots);
ulist_init(tmp);
- trans = btrfs_attach_transaction(root);
+ trans = btrfs_join_transaction_nostart(root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
ret = PTR_ERR(trans);
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
new file mode 100644
index 000000000000..6934a5b8708f
--- /dev/null
+++ b/fs/btrfs/block-group.c
@@ -0,0 +1,3189 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "misc.h"
+#include "ctree.h"
+#include "block-group.h"
+#include "space-info.h"
+#include "disk-io.h"
+#include "free-space-cache.h"
+#include "free-space-tree.h"
+#include "disk-io.h"
+#include "volumes.h"
+#include "transaction.h"
+#include "ref-verify.h"
+#include "sysfs.h"
+#include "tree-log.h"
+#include "delalloc-space.h"
+
+/*
+ * Return target flags in extended format or 0 if restripe for this chunk_type
+ * is not in progress
+ *
+ * Should be called with balance_lock held
+ */
+static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
+{
+ struct btrfs_balance_control *bctl = fs_info->balance_ctl;
+ u64 target = 0;
+
+ if (!bctl)
+ return 0;
+
+ if (flags & BTRFS_BLOCK_GROUP_DATA &&
+ bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+ target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
+ } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
+ bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+ target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
+ } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
+ bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+ target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
+ }
+
+ return target;
+}
+
+/*
+ * @flags: available profiles in extended format (see ctree.h)
+ *
+ * Return reduced profile in chunk format. If profile changing is in progress
+ * (either running or paused) picks the target profile (if it's already
+ * available), otherwise falls back to plain reducing.
+ */
+static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
+{
+ u64 num_devices = fs_info->fs_devices->rw_devices;
+ u64 target;
+ u64 raid_type;
+ u64 allowed = 0;
+
+ /*
+ * See if restripe for this chunk_type is in progress, if so try to
+ * reduce to the target profile
+ */
+ spin_lock(&fs_info->balance_lock);
+ target = get_restripe_target(fs_info, flags);
+ if (target) {
+ /* Pick target profile only if it's already available */
+ if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
+ spin_unlock(&fs_info->balance_lock);
+ return extended_to_chunk(target);
+ }
+ }
+ spin_unlock(&fs_info->balance_lock);
+
+ /* First, mask out the RAID levels which aren't possible */
+ for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
+ if (num_devices >= btrfs_raid_array[raid_type].devs_min)
+ allowed |= btrfs_raid_array[raid_type].bg_flag;
+ }
+ allowed &= flags;
+
+ if (allowed & BTRFS_BLOCK_GROUP_RAID6)
+ allowed = BTRFS_BLOCK_GROUP_RAID6;
+ else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
+ allowed = BTRFS_BLOCK_GROUP_RAID5;
+ else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
+ allowed = BTRFS_BLOCK_GROUP_RAID10;
+ else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
+ allowed = BTRFS_BLOCK_GROUP_RAID1;
+ else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
+ allowed = BTRFS_BLOCK_GROUP_RAID0;
+
+ flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+ return extended_to_chunk(flags | allowed);
+}
+
+static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
+{
+ unsigned seq;
+ u64 flags;
+
+ do {
+ flags = orig_flags;
+ seq = read_seqbegin(&fs_info->profiles_lock);
+
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ flags |= fs_info->avail_data_alloc_bits;
+ else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ flags |= fs_info->avail_system_alloc_bits;
+ else if (flags & BTRFS_BLOCK_GROUP_METADATA)
+ flags |= fs_info->avail_metadata_alloc_bits;
+ } while (read_seqretry(&fs_info->profiles_lock, seq));
+
+ return btrfs_reduce_alloc_profile(fs_info, flags);
+}
+
+u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
+{
+ return get_alloc_profile(fs_info, orig_flags);
+}
+
+void btrfs_get_block_group(struct btrfs_block_group *cache)
+{
+ atomic_inc(&cache->count);
+}
+
+void btrfs_put_block_group(struct btrfs_block_group *cache)
+{
+ if (atomic_dec_and_test(&cache->count)) {
+ WARN_ON(cache->pinned > 0);
+ WARN_ON(cache->reserved > 0);
+
+ /*
+ * If not empty, someone is still holding mutex of
+ * full_stripe_lock, which can only be released by caller.
+ * And it will definitely cause use-after-free when caller
+ * tries to release full stripe lock.
+ *
+ * No better way to resolve, but only to warn.
+ */
+ WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
+ kfree(cache->free_space_ctl);
+ kfree(cache);
+ }
+}
+
+/*
+ * This adds the block group to the fs_info rb tree for the block group cache
+ */
+static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
+ struct btrfs_block_group *block_group)
+{
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct btrfs_block_group *cache;
+
+ spin_lock(&info->block_group_cache_lock);
+ p = &info->block_group_cache_tree.rb_node;
+
+ while (*p) {
+ parent = *p;
+ cache = rb_entry(parent, struct btrfs_block_group, cache_node);
+ if (block_group->start < cache->start) {
+ p = &(*p)->rb_left;
+ } else if (block_group->start > cache->start) {
+ p = &(*p)->rb_right;
+ } else {
+ spin_unlock(&info->block_group_cache_lock);
+ return -EEXIST;
+ }
+ }
+
+ rb_link_node(&block_group->cache_node, parent, p);
+ rb_insert_color(&block_group->cache_node,
+ &info->block_group_cache_tree);
+
+ if (info->first_logical_byte > block_group->start)
+ info->first_logical_byte = block_group->start;
+
+ spin_unlock(&info->block_group_cache_lock);
+
+ return 0;
+}
+
+/*
+ * This will return the block group at or after bytenr if contains is 0, else
+ * it will return the block group that contains the bytenr
+ */
+static struct btrfs_block_group *block_group_cache_tree_search(
+ struct btrfs_fs_info *info, u64 bytenr, int contains)
+{
+ struct btrfs_block_group *cache, *ret = NULL;
+ struct rb_node *n;
+ u64 end, start;
+
+ spin_lock(&info->block_group_cache_lock);
+ n = info->block_group_cache_tree.rb_node;
+
+ while (n) {
+ cache = rb_entry(n, struct btrfs_block_group, cache_node);
+ end = cache->start + cache->length - 1;
+ start = cache->start;
+
+ if (bytenr < start) {
+ if (!contains && (!ret || start < ret->start))
+ ret = cache;
+ n = n->rb_left;
+ } else if (bytenr > start) {
+ if (contains && bytenr <= end) {
+ ret = cache;
+ break;
+ }
+ n = n->rb_right;
+ } else {
+ ret = cache;
+ break;
+ }
+ }
+ if (ret) {
+ btrfs_get_block_group(ret);
+ if (bytenr == 0 && info->first_logical_byte > ret->start)
+ info->first_logical_byte = ret->start;
+ }
+ spin_unlock(&info->block_group_cache_lock);
+
+ return ret;
+}
+
+/*
+ * Return the block group that starts at or after bytenr
+ */
+struct btrfs_block_group *btrfs_lookup_first_block_group(
+ struct btrfs_fs_info *info, u64 bytenr)
+{
+ return block_group_cache_tree_search(info, bytenr, 0);
+}
+
+/*
+ * Return the block group that contains the given bytenr
+ */
+struct btrfs_block_group *btrfs_lookup_block_group(
+ struct btrfs_fs_info *info, u64 bytenr)
+{
+ return block_group_cache_tree_search(info, bytenr, 1);
+}
+
+struct btrfs_block_group *btrfs_next_block_group(
+ struct btrfs_block_group *cache)
+{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct rb_node *node;
+
+ spin_lock(&fs_info->block_group_cache_lock);
+
+ /* If our block group was removed, we need a full search. */
+ if (RB_EMPTY_NODE(&cache->cache_node)) {
+ const u64 next_bytenr = cache->start + cache->length;
+
+ spin_unlock(&fs_info->block_group_cache_lock);
+ btrfs_put_block_group(cache);
+ cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
+ }
+ node = rb_next(&cache->cache_node);
+ btrfs_put_block_group(cache);
+ if (node) {
+ cache = rb_entry(node, struct btrfs_block_group, cache_node);
+ btrfs_get_block_group(cache);
+ } else
+ cache = NULL;
+ spin_unlock(&fs_info->block_group_cache_lock);
+ return cache;
+}
+
+bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
+{
+ struct btrfs_block_group *bg;
+ bool ret = true;
+
+ bg = btrfs_lookup_block_group(fs_info, bytenr);
+ if (!bg)
+ return false;
+
+ spin_lock(&bg->lock);
+ if (bg->ro)
+ ret = false;
+ else
+ atomic_inc(&bg->nocow_writers);
+ spin_unlock(&bg->lock);
+
+ /* No put on block group, done by btrfs_dec_nocow_writers */
+ if (!ret)
+ btrfs_put_block_group(bg);
+
+ return ret;
+}
+
+void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
+{
+ struct btrfs_block_group *bg;
+
+ bg = btrfs_lookup_block_group(fs_info, bytenr);
+ ASSERT(bg);
+ if (atomic_dec_and_test(&bg->nocow_writers))
+ wake_up_var(&bg->nocow_writers);
+ /*
+ * Once for our lookup and once for the lookup done by a previous call
+ * to btrfs_inc_nocow_writers()
+ */
+ btrfs_put_block_group(bg);
+ btrfs_put_block_group(bg);
+}
+
+void btrfs_wait_nocow_writers(struct btrfs_block_group *bg)
+{
+ wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
+}
+
+void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
+ const u64 start)
+{
+ struct btrfs_block_group *bg;
+
+ bg = btrfs_lookup_block_group(fs_info, start);
+ ASSERT(bg);
+ if (atomic_dec_and_test(&bg->reservations))
+ wake_up_var(&bg->reservations);
+ btrfs_put_block_group(bg);
+}
+
+void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg)
+{
+ struct btrfs_space_info *space_info = bg->space_info;
+
+ ASSERT(bg->ro);
+
+ if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
+ return;
+
+ /*
+ * Our block group is read only but before we set it to read only,
+ * some task might have had allocated an extent from it already, but it
+ * has not yet created a respective ordered extent (and added it to a
+ * root's list of ordered extents).
+ * Therefore wait for any task currently allocating extents, since the
+ * block group's reservations counter is incremented while a read lock
+ * on the groups' semaphore is held and decremented after releasing
+ * the read access on that semaphore and creating the ordered extent.
+ */
+ down_write(&space_info->groups_sem);
+ up_write(&space_info->groups_sem);
+
+ wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
+}
+
+struct btrfs_caching_control *btrfs_get_caching_control(
+ struct btrfs_block_group *cache)
+{
+ struct btrfs_caching_control *ctl;
+
+ spin_lock(&cache->lock);
+ if (!cache->caching_ctl) {
+ spin_unlock(&cache->lock);
+ return NULL;
+ }
+
+ ctl = cache->caching_ctl;
+ refcount_inc(&ctl->count);
+ spin_unlock(&cache->lock);
+ return ctl;
+}
+
+void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
+{
+ if (refcount_dec_and_test(&ctl->count))
+ kfree(ctl);
+}
+
+/*
+ * When we wait for progress in the block group caching, its because our
+ * allocation attempt failed at least once. So, we must sleep and let some
+ * progress happen before we try again.
+ *
+ * This function will sleep at least once waiting for new free space to show
+ * up, and then it will check the block group free space numbers for our min
+ * num_bytes. Another option is to have it go ahead and look in the rbtree for
+ * a free extent of a given size, but this is a good start.
+ *
+ * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
+ * any of the information in this block group.
+ */
+void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
+ u64 num_bytes)
+{
+ struct btrfs_caching_control *caching_ctl;
+
+ caching_ctl = btrfs_get_caching_control(cache);
+ if (!caching_ctl)
+ return;
+
+ wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
+ (cache->free_space_ctl->free_space >= num_bytes));
+
+ btrfs_put_caching_control(caching_ctl);
+}
+
+int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
+{
+ struct btrfs_caching_control *caching_ctl;
+ int ret = 0;
+
+ caching_ctl = btrfs_get_caching_control(cache);
+ if (!caching_ctl)
+ return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
+
+ wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
+ if (cache->cached == BTRFS_CACHE_ERROR)
+ ret = -EIO;
+ btrfs_put_caching_control(caching_ctl);
+ return ret;
+}
+
+#ifdef CONFIG_BTRFS_DEBUG
+static void fragment_free_space(struct btrfs_block_group *block_group)
+{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ u64 start = block_group->start;
+ u64 len = block_group->length;
+ u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
+ fs_info->nodesize : fs_info->sectorsize;
+ u64 step = chunk << 1;
+
+ while (len > chunk) {
+ btrfs_remove_free_space(block_group, start, chunk);
+ start += step;
+ if (len < step)
+ len = 0;
+ else
+ len -= step;
+ }
+}
+#endif
+
+/*
+ * This is only called by btrfs_cache_block_group, since we could have freed
+ * extents we need to check the pinned_extents for any extents that can't be
+ * used yet since their free space will be released as soon as the transaction
+ * commits.
+ */
+u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
+{
+ struct btrfs_fs_info *info = block_group->fs_info;
+ u64 extent_start, extent_end, size, total_added = 0;
+ int ret;
+
+ while (start < end) {
+ ret = find_first_extent_bit(info->pinned_extents, start,
+ &extent_start, &extent_end,
+ EXTENT_DIRTY | EXTENT_UPTODATE,
+ NULL);
+ if (ret)
+ break;
+
+ if (extent_start <= start) {
+ start = extent_end + 1;
+ } else if (extent_start > start && extent_start < end) {
+ size = extent_start - start;
+ total_added += size;
+ ret = btrfs_add_free_space(block_group, start,
+ size);
+ BUG_ON(ret); /* -ENOMEM or logic error */
+ start = extent_end + 1;
+ } else {
+ break;
+ }
+ }
+
+ if (start < end) {
+ size = end - start;
+ total_added += size;
+ ret = btrfs_add_free_space(block_group, start, size);
+ BUG_ON(ret); /* -ENOMEM or logic error */
+ }
+
+ return total_added;
+}
+
+static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
+{
+ struct btrfs_block_group *block_group = caching_ctl->block_group;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct btrfs_root *extent_root = fs_info->extent_root;
+ struct btrfs_path *path;
+ struct extent_buffer *leaf;
+ struct btrfs_key key;
+ u64 total_found = 0;
+ u64 last = 0;
+ u32 nritems;
+ int ret;
+ bool wakeup = true;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
+
+#ifdef CONFIG_BTRFS_DEBUG
+ /*
+ * If we're fragmenting we don't want to make anybody think we can
+ * allocate from this block group until we've had a chance to fragment
+ * the free space.
+ */
+ if (btrfs_should_fragment_free_space(block_group))
+ wakeup = false;
+#endif
+ /*
+ * We don't want to deadlock with somebody trying to allocate a new
+ * extent for the extent root while also trying to search the extent
+ * root to add free space. So we skip locking and search the commit
+ * root, since its read-only
+ */
+ path->skip_locking = 1;
+ path->search_commit_root = 1;
+ path->reada = READA_FORWARD;
+
+ key.objectid = last;
+ key.offset = 0;
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+
+next:
+ ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ leaf = path->nodes[0];
+ nritems = btrfs_header_nritems(leaf);
+
+ while (1) {
+ if (btrfs_fs_closing(fs_info) > 1) {
+ last = (u64)-1;
+ break;
+ }
+
+ if (path->slots[0] < nritems) {
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ } else {
+ ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
+ if (ret)
+ break;
+
+ if (need_resched() ||
+ rwsem_is_contended(&fs_info->commit_root_sem)) {
+ if (wakeup)
+ caching_ctl->progress = last;
+ btrfs_release_path(path);
+ up_read(&fs_info->commit_root_sem);
+ mutex_unlock(&caching_ctl->mutex);
+ cond_resched();
+ mutex_lock(&caching_ctl->mutex);
+ down_read(&fs_info->commit_root_sem);
+ goto next;
+ }
+
+ ret = btrfs_next_leaf(extent_root, path);
+ if (ret < 0)
+ goto out;
+ if (ret)
+ break;
+ leaf = path->nodes[0];
+ nritems = btrfs_header_nritems(leaf);
+ continue;
+ }
+
+ if (key.objectid < last) {
+ key.objectid = last;
+ key.offset = 0;
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+
+ if (wakeup)
+ caching_ctl->progress = last;
+ btrfs_release_path(path);
+ goto next;
+ }
+
+ if (key.objectid < block_group->start) {
+ path->slots[0]++;
+ continue;
+ }
+
+ if (key.objectid >= block_group->start + block_group->length)
+ break;
+
+ if (key.type == BTRFS_EXTENT_ITEM_KEY ||
+ key.type == BTRFS_METADATA_ITEM_KEY) {
+ total_found += add_new_free_space(block_group, last,
+ key.objectid);
+ if (key.type == BTRFS_METADATA_ITEM_KEY)
+ last = key.objectid +
+ fs_info->nodesize;
+ else
+ last = key.objectid + key.offset;
+
+ if (total_found > CACHING_CTL_WAKE_UP) {
+ total_found = 0;
+ if (wakeup)
+ wake_up(&caching_ctl->wait);
+ }
+ }
+ path->slots[0]++;
+ }
+ ret = 0;
+
+ total_found += add_new_free_space(block_group, last,
+ block_group->start + block_group->length);
+ caching_ctl->progress = (u64)-1;
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static noinline void caching_thread(struct btrfs_work *work)
+{
+ struct btrfs_block_group *block_group;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_caching_control *caching_ctl;
+ int ret;
+
+ caching_ctl = container_of(work, struct btrfs_caching_control, work);
+ block_group = caching_ctl->block_group;
+ fs_info = block_group->fs_info;
+
+ mutex_lock(&caching_ctl->mutex);
+ down_read(&fs_info->commit_root_sem);
+
+ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
+ ret = load_free_space_tree(caching_ctl);
+ else
+ ret = load_extent_tree_free(caching_ctl);
+
+ spin_lock(&block_group->lock);
+ block_group->caching_ctl = NULL;
+ block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
+ spin_unlock(&block_group->lock);
+
+#ifdef CONFIG_BTRFS_DEBUG
+ if (btrfs_should_fragment_free_space(block_group)) {
+ u64 bytes_used;
+
+ spin_lock(&block_group->space_info->lock);
+ spin_lock(&block_group->lock);
+ bytes_used = block_group->length - block_group->used;
+ block_group->space_info->bytes_used += bytes_used >> 1;
+ spin_unlock(&block_group->lock);
+ spin_unlock(&block_group->space_info->lock);
+ fragment_free_space(block_group);
+ }
+#endif
+
+ caching_ctl->progress = (u64)-1;
+
+ up_read(&fs_info->commit_root_sem);
+ btrfs_free_excluded_extents(block_group);
+ mutex_unlock(&caching_ctl->mutex);
+
+ wake_up(&caching_ctl->wait);
+
+ btrfs_put_caching_control(caching_ctl);
+ btrfs_put_block_group(block_group);
+}
+
+int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
+{
+ DEFINE_WAIT(wait);
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct btrfs_caching_control *caching_ctl;
+ int ret = 0;
+
+ caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
+ if (!caching_ctl)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&caching_ctl->list);
+ mutex_init(&caching_ctl->mutex);
+ init_waitqueue_head(&caching_ctl->wait);
+ caching_ctl->block_group = cache;
+ caching_ctl->progress = cache->start;
+ refcount_set(&caching_ctl->count, 1);
+ btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
+
+ spin_lock(&cache->lock);
+ /*
+ * This should be a rare occasion, but this could happen I think in the
+ * case where one thread starts to load the space cache info, and then
+ * some other thread starts a transaction commit which tries to do an
+ * allocation while the other thread is still loading the space cache
+ * info. The previous loop should have kept us from choosing this block
+ * group, but if we've moved to the state where we will wait on caching
+ * block groups we need to first check if we're doing a fast load here,
+ * so we can wait for it to finish, otherwise we could end up allocating
+ * from a block group who's cache gets evicted for one reason or
+ * another.
+ */
+ while (cache->cached == BTRFS_CACHE_FAST) {
+ struct btrfs_caching_control *ctl;
+
+ ctl = cache->caching_ctl;
+ refcount_inc(&ctl->count);
+ prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock(&cache->lock);
+
+ schedule();
+
+ finish_wait(&ctl->wait, &wait);
+ btrfs_put_caching_control(ctl);
+ spin_lock(&cache->lock);
+ }
+
+ if (cache->cached != BTRFS_CACHE_NO) {
+ spin_unlock(&cache->lock);
+ kfree(caching_ctl);
+ return 0;
+ }
+ WARN_ON(cache->caching_ctl);
+ cache->caching_ctl = caching_ctl;
+ cache->cached = BTRFS_CACHE_FAST;
+ spin_unlock(&cache->lock);
+
+ if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
+ mutex_lock(&caching_ctl->mutex);
+ ret = load_free_space_cache(cache);
+
+ spin_lock(&cache->lock);
+ if (ret == 1) {
+ cache->caching_ctl = NULL;
+ cache->cached = BTRFS_CACHE_FINISHED;
+ cache->last_byte_to_unpin = (u64)-1;
+ caching_ctl->progress = (u64)-1;
+ } else {
+ if (load_cache_only) {
+ cache->caching_ctl = NULL;
+ cache->cached = BTRFS_CACHE_NO;
+ } else {
+ cache->cached = BTRFS_CACHE_STARTED;
+ cache->has_caching_ctl = 1;
+ }
+ }
+ spin_unlock(&cache->lock);
+#ifdef CONFIG_BTRFS_DEBUG
+ if (ret == 1 &&
+ btrfs_should_fragment_free_space(cache)) {
+ u64 bytes_used;
+
+ spin_lock(&cache->space_info->lock);
+ spin_lock(&cache->lock);
+ bytes_used = cache->length - cache->used;
+ cache->space_info->bytes_used += bytes_used >> 1;
+ spin_unlock(&cache->lock);
+ spin_unlock(&cache->space_info->lock);
+ fragment_free_space(cache);
+ }
+#endif
+ mutex_unlock(&caching_ctl->mutex);
+
+ wake_up(&caching_ctl->wait);
+ if (ret == 1) {
+ btrfs_put_caching_control(caching_ctl);
+ btrfs_free_excluded_extents(cache);
+ return 0;
+ }
+ } else {
+ /*
+ * We're either using the free space tree or no caching at all.
+ * Set cached to the appropriate value and wakeup any waiters.
+ */
+ spin_lock(&cache->lock);
+ if (load_cache_only) {
+ cache->caching_ctl = NULL;
+ cache->cached = BTRFS_CACHE_NO;
+ } else {
+ cache->cached = BTRFS_CACHE_STARTED;
+ cache->has_caching_ctl = 1;
+ }
+ spin_unlock(&cache->lock);
+ wake_up(&caching_ctl->wait);
+ }
+
+ if (load_cache_only) {
+ btrfs_put_caching_control(caching_ctl);
+ return 0;
+ }
+
+ down_write(&fs_info->commit_root_sem);
+ refcount_inc(&caching_ctl->count);
+ list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
+ up_write(&fs_info->commit_root_sem);
+
+ btrfs_get_block_group(cache);
+
+ btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
+
+ return ret;
+}
+
+static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
+{
+ u64 extra_flags = chunk_to_extended(flags) &
+ BTRFS_EXTENDED_PROFILE_MASK;
+
+ write_seqlock(&fs_info->profiles_lock);
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ fs_info->avail_data_alloc_bits &= ~extra_flags;
+ if (flags & BTRFS_BLOCK_GROUP_METADATA)
+ fs_info->avail_metadata_alloc_bits &= ~extra_flags;
+ if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ fs_info->avail_system_alloc_bits &= ~extra_flags;
+ write_sequnlock(&fs_info->profiles_lock);
+}
+
+/*
+ * Clear incompat bits for the following feature(s):
+ *
+ * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
+ * in the whole filesystem
+ *
+ * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
+ */
+static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
+{
+ bool found_raid56 = false;
+ bool found_raid1c34 = false;
+
+ if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
+ (flags & BTRFS_BLOCK_GROUP_RAID1C3) ||
+ (flags & BTRFS_BLOCK_GROUP_RAID1C4)) {
+ struct list_head *head = &fs_info->space_info;
+ struct btrfs_space_info *sinfo;
+
+ list_for_each_entry_rcu(sinfo, head, list) {
+ down_read(&sinfo->groups_sem);
+ if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
+ found_raid56 = true;
+ if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
+ found_raid56 = true;
+ if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3]))
+ found_raid1c34 = true;
+ if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4]))
+ found_raid1c34 = true;
+ up_read(&sinfo->groups_sem);
+ }
+ if (found_raid56)
+ btrfs_clear_fs_incompat(fs_info, RAID56);
+ if (found_raid1c34)
+ btrfs_clear_fs_incompat(fs_info, RAID1C34);
+ }
+}
+
+int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
+ u64 group_start, struct extent_map *em)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_path *path;
+ struct btrfs_block_group *block_group;
+ struct btrfs_free_cluster *cluster;
+ struct btrfs_root *tree_root = fs_info->tree_root;
+ struct btrfs_key key;
+ struct inode *inode;
+ struct kobject *kobj = NULL;
+ int ret;
+ int index;
+ int factor;
+ struct btrfs_caching_control *caching_ctl = NULL;
+ bool remove_em;
+ bool remove_rsv = false;
+
+ block_group = btrfs_lookup_block_group(fs_info, group_start);
+ BUG_ON(!block_group);
+ BUG_ON(!block_group->ro);
+
+ trace_btrfs_remove_block_group(block_group);
+ /*
+ * Free the reserved super bytes from this block group before
+ * remove it.
+ */
+ btrfs_free_excluded_extents(block_group);
+ btrfs_free_ref_tree_range(fs_info, block_group->start,
+ block_group->length);
+
+ index = btrfs_bg_flags_to_raid_index(block_group->flags);
+ factor = btrfs_bg_type_to_factor(block_group->flags);
+
+ /* make sure this block group isn't part of an allocation cluster */
+ cluster = &fs_info->data_alloc_cluster;
+ spin_lock(&cluster->refill_lock);
+ btrfs_return_cluster_to_free_space(block_group, cluster);
+ spin_unlock(&cluster->refill_lock);
+
+ /*
+ * make sure this block group isn't part of a metadata
+ * allocation cluster
+ */
+ cluster = &fs_info->meta_alloc_cluster;
+ spin_lock(&cluster->refill_lock);
+ btrfs_return_cluster_to_free_space(block_group, cluster);
+ spin_unlock(&cluster->refill_lock);
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * get the inode first so any iput calls done for the io_list
+ * aren't the final iput (no unlinks allowed now)
+ */
+ inode = lookup_free_space_inode(block_group, path);
+
+ mutex_lock(&trans->transaction->cache_write_mutex);
+ /*
+ * Make sure our free space cache IO is done before removing the
+ * free space inode
+ */
+ spin_lock(&trans->transaction->dirty_bgs_lock);
+ if (!list_empty(&block_group->io_list)) {
+ list_del_init(&block_group->io_list);
+
+ WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
+
+ spin_unlock(&trans->transaction->dirty_bgs_lock);
+ btrfs_wait_cache_io(trans, block_group, path);
+ btrfs_put_block_group(block_group);
+ spin_lock(&trans->transaction->dirty_bgs_lock);
+ }
+
+ if (!list_empty(&block_group->dirty_list)) {
+ list_del_init(&block_group->dirty_list);
+ remove_rsv = true;
+ btrfs_put_block_group(block_group);
+ }
+ spin_unlock(&trans->transaction->dirty_bgs_lock);
+ mutex_unlock(&trans->transaction->cache_write_mutex);
+
+ if (!IS_ERR(inode)) {
+ ret = btrfs_orphan_add(trans, BTRFS_I(inode));
+ if (ret) {
+ btrfs_add_delayed_iput(inode);
+ goto out;
+ }
+ clear_nlink(inode);
+ /* One for the block groups ref */
+ spin_lock(&block_group->lock);
+ if (block_group->iref) {
+ block_group->iref = 0;
+ block_group->inode = NULL;
+ spin_unlock(&block_group->lock);
+ iput(inode);
+ } else {
+ spin_unlock(&block_group->lock);
+ }
+ /* One for our lookup ref */
+ btrfs_add_delayed_iput(inode);
+ }
+
+ key.objectid = BTRFS_FREE_SPACE_OBJECTID;
+ key.type = 0;
+ key.offset = block_group->start;
+
+ ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
+ if (ret < 0)
+ goto out;
+ if (ret > 0)
+ btrfs_release_path(path);
+ if (ret == 0) {
+ ret = btrfs_del_item(trans, tree_root, path);
+ if (ret)
+ goto out;
+ btrfs_release_path(path);
+ }
+
+ spin_lock(&fs_info->block_group_cache_lock);
+ rb_erase(&block_group->cache_node,
+ &fs_info->block_group_cache_tree);
+ RB_CLEAR_NODE(&block_group->cache_node);
+
+ if (fs_info->first_logical_byte == block_group->start)
+ fs_info->first_logical_byte = (u64)-1;
+ spin_unlock(&fs_info->block_group_cache_lock);
+
+ down_write(&block_group->space_info->groups_sem);
+ /*
+ * we must use list_del_init so people can check to see if they
+ * are still on the list after taking the semaphore
+ */
+ list_del_init(&block_group->list);
+ if (list_empty(&block_group->space_info->block_groups[index])) {
+ kobj = block_group->space_info->block_group_kobjs[index];
+ block_group->space_info->block_group_kobjs[index] = NULL;
+ clear_avail_alloc_bits(fs_info, block_group->flags);
+ }
+ up_write(&block_group->space_info->groups_sem);
+ clear_incompat_bg_bits(fs_info, block_group->flags);
+ if (kobj) {
+ kobject_del(kobj);
+ kobject_put(kobj);
+ }
+
+ if (block_group->has_caching_ctl)
+ caching_ctl = btrfs_get_caching_control(block_group);
+ if (block_group->cached == BTRFS_CACHE_STARTED)
+ btrfs_wait_block_group_cache_done(block_group);
+ if (block_group->has_caching_ctl) {
+ down_write(&fs_info->commit_root_sem);
+ if (!caching_ctl) {
+ struct btrfs_caching_control *ctl;
+
+ list_for_each_entry(ctl,
+ &fs_info->caching_block_groups, list)
+ if (ctl->block_group == block_group) {
+ caching_ctl = ctl;
+ refcount_inc(&caching_ctl->count);
+ break;
+ }
+ }
+ if (caching_ctl)
+ list_del_init(&caching_ctl->list);
+ up_write(&fs_info->commit_root_sem);
+ if (caching_ctl) {
+ /* Once for the caching bgs list and once for us. */
+ btrfs_put_caching_control(caching_ctl);
+ btrfs_put_caching_control(caching_ctl);
+ }
+ }
+
+ spin_lock(&trans->transaction->dirty_bgs_lock);
+ WARN_ON(!list_empty(&block_group->dirty_list));
+ WARN_ON(!list_empty(&block_group->io_list));
+ spin_unlock(&trans->transaction->dirty_bgs_lock);
+
+ btrfs_remove_free_space_cache(block_group);
+
+ spin_lock(&block_group->space_info->lock);
+ list_del_init(&block_group->ro_list);
+
+ if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
+ WARN_ON(block_group->space_info->total_bytes
+ < block_group->length);
+ WARN_ON(block_group->space_info->bytes_readonly
+ < block_group->length);
+ WARN_ON(block_group->space_info->disk_total
+ < block_group->length * factor);
+ }
+ block_group->space_info->total_bytes -= block_group->length;
+ block_group->space_info->bytes_readonly -= block_group->length;
+ block_group->space_info->disk_total -= block_group->length * factor;
+
+ spin_unlock(&block_group->space_info->lock);
+
+ key.objectid = block_group->start;
+ key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ key.offset = block_group->length;
+
+ mutex_lock(&fs_info->chunk_mutex);
+ spin_lock(&block_group->lock);
+ block_group->removed = 1;
+ /*
+ * At this point trimming can't start on this block group, because we
+ * removed the block group from the tree fs_info->block_group_cache_tree
+ * so no one can't find it anymore and even if someone already got this
+ * block group before we removed it from the rbtree, they have already
+ * incremented block_group->trimming - if they didn't, they won't find
+ * any free space entries because we already removed them all when we
+ * called btrfs_remove_free_space_cache().
+ *
+ * And we must not remove the extent map from the fs_info->mapping_tree
+ * to prevent the same logical address range and physical device space
+ * ranges from being reused for a new block group. This is because our
+ * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
+ * completely transactionless, so while it is trimming a range the
+ * currently running transaction might finish and a new one start,
+ * allowing for new block groups to be created that can reuse the same
+ * physical device locations unless we take this special care.
+ *
+ * There may also be an implicit trim operation if the file system
+ * is mounted with -odiscard. The same protections must remain
+ * in place until the extents have been discarded completely when
+ * the transaction commit has completed.
+ */
+ remove_em = (atomic_read(&block_group->trimming) == 0);
+ spin_unlock(&block_group->lock);
+
+ mutex_unlock(&fs_info->chunk_mutex);
+
+ ret = remove_block_group_free_space(trans, block_group);
+ if (ret)
+ goto out;
+
+ btrfs_put_block_group(block_group);
+ btrfs_put_block_group(block_group);
+
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret > 0)
+ ret = -EIO;
+ if (ret < 0)
+ goto out;
+
+ ret = btrfs_del_item(trans, root, path);
+ if (ret)
+ goto out;
+
+ if (remove_em) {
+ struct extent_map_tree *em_tree;
+
+ em_tree = &fs_info->mapping_tree;
+ write_lock(&em_tree->lock);
+ remove_extent_mapping(em_tree, em);
+ write_unlock(&em_tree->lock);
+ /* once for the tree */
+ free_extent_map(em);
+ }
+out:
+ if (remove_rsv)
+ btrfs_delayed_refs_rsv_release(fs_info, 1);
+ btrfs_free_path(path);
+ return ret;
+}
+
+struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
+ struct btrfs_fs_info *fs_info, const u64 chunk_offset)
+{
+ struct extent_map_tree *em_tree = &fs_info->mapping_tree;
+ struct extent_map *em;
+ struct map_lookup *map;
+ unsigned int num_items;
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, chunk_offset, 1);
+ read_unlock(&em_tree->lock);
+ ASSERT(em && em->start == chunk_offset);
+
+ /*
+ * We need to reserve 3 + N units from the metadata space info in order
+ * to remove a block group (done at btrfs_remove_chunk() and at
+ * btrfs_remove_block_group()), which are used for:
+ *
+ * 1 unit for adding the free space inode's orphan (located in the tree
+ * of tree roots).
+ * 1 unit for deleting the block group item (located in the extent
+ * tree).
+ * 1 unit for deleting the free space item (located in tree of tree
+ * roots).
+ * N units for deleting N device extent items corresponding to each
+ * stripe (located in the device tree).
+ *
+ * In order to remove a block group we also need to reserve units in the
+ * system space info in order to update the chunk tree (update one or
+ * more device items and remove one chunk item), but this is done at
+ * btrfs_remove_chunk() through a call to check_system_chunk().
+ */
+ map = em->map_lookup;
+ num_items = 3 + map->num_stripes;
+ free_extent_map(em);
+
+ return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
+ num_items, 1);
+}
+
+/*
+ * Mark block group @cache read-only, so later write won't happen to block
+ * group @cache.
+ *
+ * If @force is not set, this function will only mark the block group readonly
+ * if we have enough free space (1M) in other metadata/system block groups.
+ * If @force is not set, this function will mark the block group readonly
+ * without checking free space.
+ *
+ * NOTE: This function doesn't care if other block groups can contain all the
+ * data in this block group. That check should be done by relocation routine,
+ * not this function.
+ */
+static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
+{
+ struct btrfs_space_info *sinfo = cache->space_info;
+ u64 num_bytes;
+ u64 sinfo_used;
+ u64 min_allocable_bytes;
+ int ret = -ENOSPC;
+
+ /*
+ * We need some metadata space and system metadata space for
+ * allocating chunks in some corner cases until we force to set
+ * it to be readonly.
+ */
+ if ((sinfo->flags &
+ (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
+ !force)
+ min_allocable_bytes = SZ_1M;
+ else
+ min_allocable_bytes = 0;
+
+ spin_lock(&sinfo->lock);
+ spin_lock(&cache->lock);
+
+ if (cache->ro) {
+ cache->ro++;
+ ret = 0;
+ goto out;
+ }
+
+ num_bytes = cache->length - cache->reserved - cache->pinned -
+ cache->bytes_super - cache->used;
+ sinfo_used = btrfs_space_info_used(sinfo, true);
+
+ /*
+ * sinfo_used + num_bytes should always <= sinfo->total_bytes.
+ *
+ * Here we make sure if we mark this bg RO, we still have enough
+ * free space as buffer (if min_allocable_bytes is not 0).
+ */
+ if (sinfo_used + num_bytes + min_allocable_bytes <=
+ sinfo->total_bytes) {
+ sinfo->bytes_readonly += num_bytes;
+ cache->ro++;
+ list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
+ ret = 0;
+ }
+out:
+ spin_unlock(&cache->lock);
+ spin_unlock(&sinfo->lock);
+ if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
+ btrfs_info(cache->fs_info,
+ "unable to make block group %llu ro", cache->start);
+ btrfs_info(cache->fs_info,
+ "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu",
+ sinfo_used, num_bytes, min_allocable_bytes);
+ btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
+ }
+ return ret;
+}
+
+/*
+ * Process the unused_bgs list and remove any that don't have any allocated
+ * space inside of them.
+ */
+void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_block_group *block_group;
+ struct btrfs_space_info *space_info;
+ struct btrfs_trans_handle *trans;
+ int ret = 0;
+
+ if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
+ return;
+
+ spin_lock(&fs_info->unused_bgs_lock);
+ while (!list_empty(&fs_info->unused_bgs)) {
+ u64 start, end;
+ int trimming;
+
+ block_group = list_first_entry(&fs_info->unused_bgs,
+ struct btrfs_block_group,
+ bg_list);
+ list_del_init(&block_group->bg_list);
+
+ space_info = block_group->space_info;
+
+ if (ret || btrfs_mixed_space_info(space_info)) {
+ btrfs_put_block_group(block_group);
+ continue;
+ }
+ spin_unlock(&fs_info->unused_bgs_lock);
+
+ mutex_lock(&fs_info->delete_unused_bgs_mutex);
+
+ /* Don't want to race with allocators so take the groups_sem */
+ down_write(&space_info->groups_sem);
+ spin_lock(&block_group->lock);
+ if (block_group->reserved || block_group->pinned ||
+ block_group->used || block_group->ro ||
+ list_is_singular(&block_group->list)) {
+ /*
+ * We want to bail if we made new allocations or have
+ * outstanding allocations in this block group. We do
+ * the ro check in case balance is currently acting on
+ * this block group.
+ */
+ trace_btrfs_skip_unused_block_group(block_group);
+ spin_unlock(&block_group->lock);
+ up_write(&space_info->groups_sem);
+ goto next;
+ }
+ spin_unlock(&block_group->lock);
+
+ /* We don't want to force the issue, only flip if it's ok. */
+ ret = inc_block_group_ro(block_group, 0);
+ up_write(&space_info->groups_sem);
+ if (ret < 0) {
+ ret = 0;
+ goto next;
+ }
+
+ /*
+ * Want to do this before we do anything else so we can recover
+ * properly if we fail to join the transaction.
+ */
+ trans = btrfs_start_trans_remove_block_group(fs_info,
+ block_group->start);
+ if (IS_ERR(trans)) {
+ btrfs_dec_block_group_ro(block_group);
+ ret = PTR_ERR(trans);
+ goto next;
+ }
+
+ /*
+ * We could have pending pinned extents for this block group,
+ * just delete them, we don't care about them anymore.
+ */
+ start = block_group->start;
+ end = start + block_group->length - 1;
+ /*
+ * Hold the unused_bg_unpin_mutex lock to avoid racing with
+ * btrfs_finish_extent_commit(). If we are at transaction N,
+ * another task might be running finish_extent_commit() for the
+ * previous transaction N - 1, and have seen a range belonging
+ * to the block group in freed_extents[] before we were able to
+ * clear the whole block group range from freed_extents[]. This
+ * means that task can lookup for the block group after we
+ * unpinned it from freed_extents[] and removed it, leading to
+ * a BUG_ON() at btrfs_unpin_extent_range().
+ */
+ mutex_lock(&fs_info->unused_bg_unpin_mutex);
+ ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
+ EXTENT_DIRTY);
+ if (ret) {
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
+ btrfs_dec_block_group_ro(block_group);
+ goto end_trans;
+ }
+ ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
+ EXTENT_DIRTY);
+ if (ret) {
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
+ btrfs_dec_block_group_ro(block_group);
+ goto end_trans;
+ }
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
+
+ /* Reset pinned so btrfs_put_block_group doesn't complain */
+ spin_lock(&space_info->lock);
+ spin_lock(&block_group->lock);
+
+ btrfs_space_info_update_bytes_pinned(fs_info, space_info,
+ -block_group->pinned);
+ space_info->bytes_readonly += block_group->pinned;
+ percpu_counter_add_batch(&space_info->total_bytes_pinned,
+ -block_group->pinned,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH);
+ block_group->pinned = 0;
+
+ spin_unlock(&block_group->lock);
+ spin_unlock(&space_info->lock);
+
+ /* DISCARD can flip during remount */
+ trimming = btrfs_test_opt(fs_info, DISCARD);
+
+ /* Implicit trim during transaction commit. */
+ if (trimming)
+ btrfs_get_block_group_trimming(block_group);
+
+ /*
+ * Btrfs_remove_chunk will abort the transaction if things go
+ * horribly wrong.
+ */
+ ret = btrfs_remove_chunk(trans, block_group->start);
+
+ if (ret) {
+ if (trimming)
+ btrfs_put_block_group_trimming(block_group);
+ goto end_trans;
+ }
+
+ /*
+ * If we're not mounted with -odiscard, we can just forget
+ * about this block group. Otherwise we'll need to wait
+ * until transaction commit to do the actual discard.
+ */
+ if (trimming) {
+ spin_lock(&fs_info->unused_bgs_lock);
+ /*
+ * A concurrent scrub might have added us to the list
+ * fs_info->unused_bgs, so use a list_move operation
+ * to add the block group to the deleted_bgs list.
+ */
+ list_move(&block_group->bg_list,
+ &trans->transaction->deleted_bgs);
+ spin_unlock(&fs_info->unused_bgs_lock);
+ btrfs_get_block_group(block_group);
+ }
+end_trans:
+ btrfs_end_transaction(trans);
+next:
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
+ btrfs_put_block_group(block_group);
+ spin_lock(&fs_info->unused_bgs_lock);
+ }
+ spin_unlock(&fs_info->unused_bgs_lock);
+}
+
+void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+
+ spin_lock(&fs_info->unused_bgs_lock);
+ if (list_empty(&bg->bg_list)) {
+ btrfs_get_block_group(bg);
+ trace_btrfs_add_unused_block_group(bg);
+ list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
+ }
+ spin_unlock(&fs_info->unused_bgs_lock);
+}
+
+static int find_first_block_group(struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path,
+ struct btrfs_key *key)
+{
+ struct btrfs_root *root = fs_info->extent_root;
+ int ret = 0;
+ struct btrfs_key found_key;
+ struct extent_buffer *leaf;
+ struct btrfs_block_group_item bg;
+ u64 flags;
+ int slot;
+
+ ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ while (1) {
+ slot = path->slots[0];
+ leaf = path->nodes[0];
+ if (slot >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret == 0)
+ continue;
+ if (ret < 0)
+ goto out;
+ break;
+ }
+ btrfs_item_key_to_cpu(leaf, &found_key, slot);
+
+ if (found_key.objectid >= key->objectid &&
+ found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
+ struct extent_map_tree *em_tree;
+ struct extent_map *em;
+
+ em_tree = &root->fs_info->mapping_tree;
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, found_key.objectid,
+ found_key.offset);
+ read_unlock(&em_tree->lock);
+ if (!em) {
+ btrfs_err(fs_info,
+ "logical %llu len %llu found bg but no related chunk",
+ found_key.objectid, found_key.offset);
+ ret = -ENOENT;
+ } else if (em->start != found_key.objectid ||
+ em->len != found_key.offset) {
+ btrfs_err(fs_info,
+ "block group %llu len %llu mismatch with chunk %llu len %llu",
+ found_key.objectid, found_key.offset,
+ em->start, em->len);
+ ret = -EUCLEAN;
+ } else {
+ read_extent_buffer(leaf, &bg,
+ btrfs_item_ptr_offset(leaf, slot),
+ sizeof(bg));
+ flags = btrfs_stack_block_group_flags(&bg) &
+ BTRFS_BLOCK_GROUP_TYPE_MASK;
+
+ if (flags != (em->map_lookup->type &
+ BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ btrfs_err(fs_info,
+"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
+ found_key.objectid,
+ found_key.offset, flags,
+ (BTRFS_BLOCK_GROUP_TYPE_MASK &
+ em->map_lookup->type));
+ ret = -EUCLEAN;
+ } else {
+ ret = 0;
+ }
+ }
+ free_extent_map(em);
+ goto out;
+ }
+ path->slots[0]++;
+ }
+out:
+ return ret;
+}
+
+static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
+{
+ u64 extra_flags = chunk_to_extended(flags) &
+ BTRFS_EXTENDED_PROFILE_MASK;
+
+ write_seqlock(&fs_info->profiles_lock);
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ fs_info->avail_data_alloc_bits |= extra_flags;
+ if (flags & BTRFS_BLOCK_GROUP_METADATA)
+ fs_info->avail_metadata_alloc_bits |= extra_flags;
+ if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ fs_info->avail_system_alloc_bits |= extra_flags;
+ write_sequnlock(&fs_info->profiles_lock);
+}
+
+static int exclude_super_stripes(struct btrfs_block_group *cache)
+{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ u64 bytenr;
+ u64 *logical;
+ int stripe_len;
+ int i, nr, ret;
+
+ if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
+ stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
+ cache->bytes_super += stripe_len;
+ ret = btrfs_add_excluded_extent(fs_info, cache->start,
+ stripe_len);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+ bytenr = btrfs_sb_offset(i);
+ ret = btrfs_rmap_block(fs_info, cache->start,
+ bytenr, &logical, &nr, &stripe_len);
+ if (ret)
+ return ret;
+
+ while (nr--) {
+ u64 start, len;
+
+ if (logical[nr] > cache->start + cache->length)
+ continue;
+
+ if (logical[nr] + stripe_len <= cache->start)
+ continue;
+
+ start = logical[nr];
+ if (start < cache->start) {
+ start = cache->start;
+ len = (logical[nr] + stripe_len) - start;
+ } else {
+ len = min_t(u64, stripe_len,
+ cache->start + cache->length - start);
+ }
+
+ cache->bytes_super += len;
+ ret = btrfs_add_excluded_extent(fs_info, start, len);
+ if (ret) {
+ kfree(logical);
+ return ret;
+ }
+ }
+
+ kfree(logical);
+ }
+ return 0;
+}
+
+static void link_block_group(struct btrfs_block_group *cache)
+{
+ struct btrfs_space_info *space_info = cache->space_info;
+ int index = btrfs_bg_flags_to_raid_index(cache->flags);
+ bool first = false;
+
+ down_write(&space_info->groups_sem);
+ if (list_empty(&space_info->block_groups[index]))
+ first = true;
+ list_add_tail(&cache->list, &space_info->block_groups[index]);
+ up_write(&space_info->groups_sem);
+
+ if (first)
+ btrfs_sysfs_add_block_group_type(cache);
+}
+
+static struct btrfs_block_group *btrfs_create_block_group_cache(
+ struct btrfs_fs_info *fs_info, u64 start, u64 size)
+{
+ struct btrfs_block_group *cache;
+
+ cache = kzalloc(sizeof(*cache), GFP_NOFS);
+ if (!cache)
+ return NULL;
+
+ cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
+ GFP_NOFS);
+ if (!cache->free_space_ctl) {
+ kfree(cache);
+ return NULL;
+ }
+
+ cache->start = start;
+ cache->length = size;
+
+ cache->fs_info = fs_info;
+ cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
+ set_free_space_tree_thresholds(cache);
+
+ atomic_set(&cache->count, 1);
+ spin_lock_init(&cache->lock);
+ init_rwsem(&cache->data_rwsem);
+ INIT_LIST_HEAD(&cache->list);
+ INIT_LIST_HEAD(&cache->cluster_list);
+ INIT_LIST_HEAD(&cache->bg_list);
+ INIT_LIST_HEAD(&cache->ro_list);
+ INIT_LIST_HEAD(&cache->dirty_list);
+ INIT_LIST_HEAD(&cache->io_list);
+ btrfs_init_free_space_ctl(cache);
+ atomic_set(&cache->trimming, 0);
+ mutex_init(&cache->free_space_lock);
+ btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
+
+ return cache;
+}
+
+/*
+ * Iterate all chunks and verify that each of them has the corresponding block
+ * group
+ */
+static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
+{
+ struct extent_map_tree *map_tree = &fs_info->mapping_tree;
+ struct extent_map *em;
+ struct btrfs_block_group *bg;
+ u64 start = 0;
+ int ret = 0;
+
+ while (1) {
+ read_lock(&map_tree->lock);
+ /*
+ * lookup_extent_mapping will return the first extent map
+ * intersecting the range, so setting @len to 1 is enough to
+ * get the first chunk.
+ */
+ em = lookup_extent_mapping(map_tree, start, 1);
+ read_unlock(&map_tree->lock);
+ if (!em)
+ break;
+
+ bg = btrfs_lookup_block_group(fs_info, em->start);
+ if (!bg) {
+ btrfs_err(fs_info,
+ "chunk start=%llu len=%llu doesn't have corresponding block group",
+ em->start, em->len);
+ ret = -EUCLEAN;
+ free_extent_map(em);
+ break;
+ }
+ if (bg->start != em->start || bg->length != em->len ||
+ (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
+ (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ btrfs_err(fs_info,
+"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
+ em->start, em->len,
+ em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
+ bg->start, bg->length,
+ bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
+ ret = -EUCLEAN;
+ free_extent_map(em);
+ btrfs_put_block_group(bg);
+ break;
+ }
+ start = em->start + em->len;
+ free_extent_map(em);
+ btrfs_put_block_group(bg);
+ }
+ return ret;
+}
+
+static int read_one_block_group(struct btrfs_fs_info *info,
+ struct btrfs_path *path,
+ const struct btrfs_key *key,
+ int need_clear)
+{
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_block_group *cache;
+ struct btrfs_space_info *space_info;
+ struct btrfs_block_group_item bgi;
+ const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
+ int slot = path->slots[0];
+ int ret;
+
+ ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
+
+ cache = btrfs_create_block_group_cache(info, key->objectid, key->offset);
+ if (!cache)
+ return -ENOMEM;
+
+ if (need_clear) {
+ /*
+ * When we mount with old space cache, we need to
+ * set BTRFS_DC_CLEAR and set dirty flag.
+ *
+ * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
+ * truncate the old free space cache inode and
+ * setup a new one.
+ * b) Setting 'dirty flag' makes sure that we flush
+ * the new space cache info onto disk.
+ */
+ if (btrfs_test_opt(info, SPACE_CACHE))
+ cache->disk_cache_state = BTRFS_DC_CLEAR;
+ }
+ read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
+ sizeof(bgi));
+ cache->used = btrfs_stack_block_group_used(&bgi);
+ cache->flags = btrfs_stack_block_group_flags(&bgi);
+ if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
+ (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
+ btrfs_err(info,
+"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
+ cache->start);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * We need to exclude the super stripes now so that the space info has
+ * super bytes accounted for, otherwise we'll think we have more space
+ * than we actually do.
+ */
+ ret = exclude_super_stripes(cache);
+ if (ret) {
+ /* We may have excluded something, so call this just in case. */
+ btrfs_free_excluded_extents(cache);
+ goto error;
+ }
+
+ /*
+ * Check for two cases, either we are full, and therefore don't need
+ * to bother with the caching work since we won't find any space, or we
+ * are empty, and we can just add all the space in and be done with it.
+ * This saves us _a_lot_ of time, particularly in the full case.
+ */
+ if (key->offset == cache->used) {
+ cache->last_byte_to_unpin = (u64)-1;
+ cache->cached = BTRFS_CACHE_FINISHED;
+ btrfs_free_excluded_extents(cache);
+ } else if (cache->used == 0) {
+ cache->last_byte_to_unpin = (u64)-1;
+ cache->cached = BTRFS_CACHE_FINISHED;
+ add_new_free_space(cache, key->objectid,
+ key->objectid + key->offset);
+ btrfs_free_excluded_extents(cache);
+ }
+
+ ret = btrfs_add_block_group_cache(info, cache);
+ if (ret) {
+ btrfs_remove_free_space_cache(cache);
+ goto error;
+ }
+ trace_btrfs_add_block_group(info, cache, 0);
+ btrfs_update_space_info(info, cache->flags, key->offset,
+ cache->used, cache->bytes_super, &space_info);
+
+ cache->space_info = space_info;
+
+ link_block_group(cache);
+
+ set_avail_alloc_bits(info, cache->flags);
+ if (btrfs_chunk_readonly(info, cache->start)) {
+ inc_block_group_ro(cache, 1);
+ } else if (cache->used == 0) {
+ ASSERT(list_empty(&cache->bg_list));
+ btrfs_mark_bg_unused(cache);
+ }
+ return 0;
+error:
+ btrfs_put_block_group(cache);
+ return ret;
+}
+
+int btrfs_read_block_groups(struct btrfs_fs_info *info)
+{
+ struct btrfs_path *path;
+ int ret;
+ struct btrfs_block_group *cache;
+ struct btrfs_space_info *space_info;
+ struct btrfs_key key;
+ int need_clear = 0;
+ u64 cache_gen;
+
+ key.objectid = 0;
+ key.offset = 0;
+ key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ path->reada = READA_FORWARD;
+
+ cache_gen = btrfs_super_cache_generation(info->super_copy);
+ if (btrfs_test_opt(info, SPACE_CACHE) &&
+ btrfs_super_generation(info->super_copy) != cache_gen)
+ need_clear = 1;
+ if (btrfs_test_opt(info, CLEAR_CACHE))
+ need_clear = 1;
+
+ while (1) {
+ ret = find_first_block_group(info, path, &key);
+ if (ret > 0)
+ break;
+ if (ret != 0)
+ goto error;
+
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ ret = read_one_block_group(info, path, &key, need_clear);
+ if (ret < 0)
+ goto error;
+ key.objectid += key.offset;
+ key.offset = 0;
+ btrfs_release_path(path);
+ }
+
+ list_for_each_entry_rcu(space_info, &info->space_info, list) {
+ if (!(btrfs_get_alloc_profile(info, space_info->flags) &
+ (BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_RAID1_MASK |
+ BTRFS_BLOCK_GROUP_RAID56_MASK |
+ BTRFS_BLOCK_GROUP_DUP)))
+ continue;
+ /*
+ * Avoid allocating from un-mirrored block group if there are
+ * mirrored block groups.
+ */
+ list_for_each_entry(cache,
+ &space_info->block_groups[BTRFS_RAID_RAID0],
+ list)
+ inc_block_group_ro(cache, 1);
+ list_for_each_entry(cache,
+ &space_info->block_groups[BTRFS_RAID_SINGLE],
+ list)
+ inc_block_group_ro(cache, 1);
+ }
+
+ btrfs_init_global_block_rsv(info);
+ ret = check_chunk_block_group_mappings(info);
+error:
+ btrfs_free_path(path);
+ return ret;
+}
+
+void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_block_group *block_group;
+ struct btrfs_root *extent_root = fs_info->extent_root;
+ struct btrfs_block_group_item item;
+ struct btrfs_key key;
+ int ret = 0;
+
+ if (!trans->can_flush_pending_bgs)
+ return;
+
+ while (!list_empty(&trans->new_bgs)) {
+ block_group = list_first_entry(&trans->new_bgs,
+ struct btrfs_block_group,
+ bg_list);
+ if (ret)
+ goto next;
+
+ spin_lock(&block_group->lock);
+ btrfs_set_stack_block_group_used(&item, block_group->used);
+ btrfs_set_stack_block_group_chunk_objectid(&item,
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+ btrfs_set_stack_block_group_flags(&item, block_group->flags);
+ key.objectid = block_group->start;
+ key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ key.offset = block_group->length;
+ spin_unlock(&block_group->lock);
+
+ ret = btrfs_insert_item(trans, extent_root, &key, &item,
+ sizeof(item));
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+ ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+ add_block_group_free_space(trans, block_group);
+ /* Already aborted the transaction if it failed. */
+next:
+ btrfs_delayed_refs_rsv_release(fs_info, 1);
+ list_del_init(&block_group->bg_list);
+ }
+ btrfs_trans_release_chunk_metadata(trans);
+}
+
+int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
+ u64 type, u64 chunk_offset, u64 size)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_block_group *cache;
+ int ret;
+
+ btrfs_set_log_full_commit(trans);
+
+ cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
+ if (!cache)
+ return -ENOMEM;
+
+ cache->used = bytes_used;
+ cache->flags = type;
+ cache->last_byte_to_unpin = (u64)-1;
+ cache->cached = BTRFS_CACHE_FINISHED;
+ cache->needs_free_space = 1;
+ ret = exclude_super_stripes(cache);
+ if (ret) {
+ /* We may have excluded something, so call this just in case */
+ btrfs_free_excluded_extents(cache);
+ btrfs_put_block_group(cache);
+ return ret;
+ }
+
+ add_new_free_space(cache, chunk_offset, chunk_offset + size);
+
+ btrfs_free_excluded_extents(cache);
+
+#ifdef CONFIG_BTRFS_DEBUG
+ if (btrfs_should_fragment_free_space(cache)) {
+ u64 new_bytes_used = size - bytes_used;
+
+ bytes_used += new_bytes_used >> 1;
+ fragment_free_space(cache);
+ }
+#endif
+ /*
+ * Ensure the corresponding space_info object is created and
+ * assigned to our block group. We want our bg to be added to the rbtree
+ * with its ->space_info set.
+ */
+ cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
+ ASSERT(cache->space_info);
+
+ ret = btrfs_add_block_group_cache(fs_info, cache);
+ if (ret) {
+ btrfs_remove_free_space_cache(cache);
+ btrfs_put_block_group(cache);
+ return ret;
+ }
+
+ /*
+ * Now that our block group has its ->space_info set and is inserted in
+ * the rbtree, update the space info's counters.
+ */
+ trace_btrfs_add_block_group(fs_info, cache, 1);
+ btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
+ cache->bytes_super, &cache->space_info);
+ btrfs_update_global_block_rsv(fs_info);
+
+ link_block_group(cache);
+
+ list_add_tail(&cache->bg_list, &trans->new_bgs);
+ trans->delayed_ref_updates++;
+ btrfs_update_delayed_refs_rsv(trans);
+
+ set_avail_alloc_bits(fs_info, type);
+ return 0;
+}
+
+static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
+{
+ u64 num_devices;
+ u64 stripped;
+
+ /*
+ * if restripe for this chunk_type is on pick target profile and
+ * return, otherwise do the usual balance
+ */
+ stripped = get_restripe_target(fs_info, flags);
+ if (stripped)
+ return extended_to_chunk(stripped);
+
+ num_devices = fs_info->fs_devices->rw_devices;
+
+ stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID56_MASK |
+ BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10;
+
+ if (num_devices == 1) {
+ stripped |= BTRFS_BLOCK_GROUP_DUP;
+ stripped = flags & ~stripped;
+
+ /* turn raid0 into single device chunks */
+ if (flags & BTRFS_BLOCK_GROUP_RAID0)
+ return stripped;
+
+ /* turn mirroring into duplication */
+ if (flags & (BTRFS_BLOCK_GROUP_RAID1_MASK |
+ BTRFS_BLOCK_GROUP_RAID10))
+ return stripped | BTRFS_BLOCK_GROUP_DUP;
+ } else {
+ /* they already had raid on here, just return */
+ if (flags & stripped)
+ return flags;
+
+ stripped |= BTRFS_BLOCK_GROUP_DUP;
+ stripped = flags & ~stripped;
+
+ /* switch duplicated blocks with raid1 */
+ if (flags & BTRFS_BLOCK_GROUP_DUP)
+ return stripped | BTRFS_BLOCK_GROUP_RAID1;
+
+ /* this is drive concat, leave it alone */
+ }
+
+ return flags;
+}
+
+/*
+ * Mark one block group RO, can be called several times for the same block
+ * group.
+ *
+ * @cache: the destination block group
+ * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
+ * ensure we still have some free space after marking this
+ * block group RO.
+ */
+int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
+ bool do_chunk_alloc)
+{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct btrfs_trans_handle *trans;
+ u64 alloc_flags;
+ int ret;
+
+again:
+ trans = btrfs_join_transaction(fs_info->extent_root);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ /*
+ * we're not allowed to set block groups readonly after the dirty
+ * block groups cache has started writing. If it already started,
+ * back off and let this transaction commit
+ */
+ mutex_lock(&fs_info->ro_block_group_mutex);
+ if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
+ u64 transid = trans->transid;
+
+ mutex_unlock(&fs_info->ro_block_group_mutex);
+ btrfs_end_transaction(trans);
+
+ ret = btrfs_wait_for_commit(fs_info, transid);
+ if (ret)
+ return ret;
+ goto again;
+ }
+
+ if (do_chunk_alloc) {
+ /*
+ * If we are changing raid levels, try to allocate a
+ * corresponding block group with the new raid level.
+ */
+ alloc_flags = update_block_group_flags(fs_info, cache->flags);
+ if (alloc_flags != cache->flags) {
+ ret = btrfs_chunk_alloc(trans, alloc_flags,
+ CHUNK_ALLOC_FORCE);
+ /*
+ * ENOSPC is allowed here, we may have enough space
+ * already allocated at the new raid level to carry on
+ */
+ if (ret == -ENOSPC)
+ ret = 0;
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ ret = inc_block_group_ro(cache, !do_chunk_alloc);
+ if (!do_chunk_alloc)
+ goto unlock_out;
+ if (!ret)
+ goto out;
+ alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
+ ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
+ if (ret < 0)
+ goto out;
+ ret = inc_block_group_ro(cache, 0);
+out:
+ if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
+ alloc_flags = update_block_group_flags(fs_info, cache->flags);
+ mutex_lock(&fs_info->chunk_mutex);
+ check_system_chunk(trans, alloc_flags);
+ mutex_unlock(&fs_info->chunk_mutex);
+ }
+unlock_out:
+ mutex_unlock(&fs_info->ro_block_group_mutex);
+
+ btrfs_end_transaction(trans);
+ return ret;
+}
+
+void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
+{
+ struct btrfs_space_info *sinfo = cache->space_info;
+ u64 num_bytes;
+
+ BUG_ON(!cache->ro);
+
+ spin_lock(&sinfo->lock);
+ spin_lock(&cache->lock);
+ if (!--cache->ro) {
+ num_bytes = cache->length - cache->reserved -
+ cache->pinned - cache->bytes_super - cache->used;
+ sinfo->bytes_readonly -= num_bytes;
+ list_del_init(&cache->ro_list);
+ }
+ spin_unlock(&cache->lock);
+ spin_unlock(&sinfo->lock);
+}
+
+static int write_one_cache_group(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_block_group *cache)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ int ret;
+ struct btrfs_root *extent_root = fs_info->extent_root;
+ unsigned long bi;
+ struct extent_buffer *leaf;
+ struct btrfs_block_group_item bgi;
+ struct btrfs_key key;
+
+ key.objectid = cache->start;
+ key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ key.offset = cache->length;
+
+ ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1);
+ if (ret) {
+ if (ret > 0)
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ leaf = path->nodes[0];
+ bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ btrfs_set_stack_block_group_used(&bgi, cache->used);
+ btrfs_set_stack_block_group_chunk_objectid(&bgi,
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+ btrfs_set_stack_block_group_flags(&bgi, cache->flags);
+ write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
+ btrfs_mark_buffer_dirty(leaf);
+fail:
+ btrfs_release_path(path);
+ return ret;
+
+}
+
+static int cache_save_setup(struct btrfs_block_group *block_group,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path)
+{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct btrfs_root *root = fs_info->tree_root;
+ struct inode *inode = NULL;
+ struct extent_changeset *data_reserved = NULL;
+ u64 alloc_hint = 0;
+ int dcs = BTRFS_DC_ERROR;
+ u64 num_pages = 0;
+ int retries = 0;
+ int ret = 0;
+
+ /*
+ * If this block group is smaller than 100 megs don't bother caching the
+ * block group.
+ */
+ if (block_group->length < (100 * SZ_1M)) {
+ spin_lock(&block_group->lock);
+ block_group->disk_cache_state = BTRFS_DC_WRITTEN;
+ spin_unlock(&block_group->lock);
+ return 0;
+ }
+
+ if (trans->aborted)
+ return 0;
+again:
+ inode = lookup_free_space_inode(block_group, path);
+ if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
+ ret = PTR_ERR(inode);
+ btrfs_release_path(path);
+ goto out;
+ }
+
+ if (IS_ERR(inode)) {
+ BUG_ON(retries);
+ retries++;
+
+ if (block_group->ro)
+ goto out_free;
+
+ ret = create_free_space_inode(trans, block_group, path);
+ if (ret)
+ goto out_free;
+ goto again;
+ }
+
+ /*
+ * We want to set the generation to 0, that way if anything goes wrong
+ * from here on out we know not to trust this cache when we load up next
+ * time.
+ */
+ BTRFS_I(inode)->generation = 0;
+ ret = btrfs_update_inode(trans, root, inode);
+ if (ret) {
+ /*
+ * So theoretically we could recover from this, simply set the
+ * super cache generation to 0 so we know to invalidate the
+ * cache, but then we'd have to keep track of the block groups
+ * that fail this way so we know we _have_ to reset this cache
+ * before the next commit or risk reading stale cache. So to
+ * limit our exposure to horrible edge cases lets just abort the
+ * transaction, this only happens in really bad situations
+ * anyway.
+ */
+ btrfs_abort_transaction(trans, ret);
+ goto out_put;
+ }
+ WARN_ON(ret);
+
+ /* We've already setup this transaction, go ahead and exit */
+ if (block_group->cache_generation == trans->transid &&
+ i_size_read(inode)) {
+ dcs = BTRFS_DC_SETUP;
+ goto out_put;
+ }
+
+ if (i_size_read(inode) > 0) {
+ ret = btrfs_check_trunc_cache_free_space(fs_info,
+ &fs_info->global_block_rsv);
+ if (ret)
+ goto out_put;
+
+ ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
+ if (ret)
+ goto out_put;
+ }
+
+ spin_lock(&block_group->lock);
+ if (block_group->cached != BTRFS_CACHE_FINISHED ||
+ !btrfs_test_opt(fs_info, SPACE_CACHE)) {
+ /*
+ * don't bother trying to write stuff out _if_
+ * a) we're not cached,
+ * b) we're with nospace_cache mount option,
+ * c) we're with v2 space_cache (FREE_SPACE_TREE).
+ */
+ dcs = BTRFS_DC_WRITTEN;
+ spin_unlock(&block_group->lock);
+ goto out_put;
+ }
+ spin_unlock(&block_group->lock);
+
+ /*
+ * We hit an ENOSPC when setting up the cache in this transaction, just
+ * skip doing the setup, we've already cleared the cache so we're safe.
+ */
+ if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
+ ret = -ENOSPC;
+ goto out_put;
+ }
+
+ /*
+ * Try to preallocate enough space based on how big the block group is.
+ * Keep in mind this has to include any pinned space which could end up
+ * taking up quite a bit since it's not folded into the other space
+ * cache.
+ */
+ num_pages = div_u64(block_group->length, SZ_256M);
+ if (!num_pages)
+ num_pages = 1;
+
+ num_pages *= 16;
+ num_pages *= PAGE_SIZE;
+
+ ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
+ if (ret)
+ goto out_put;
+
+ ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
+ num_pages, num_pages,
+ &alloc_hint);
+ /*
+ * Our cache requires contiguous chunks so that we don't modify a bunch
+ * of metadata or split extents when writing the cache out, which means
+ * we can enospc if we are heavily fragmented in addition to just normal
+ * out of space conditions. So if we hit this just skip setting up any
+ * other block groups for this transaction, maybe we'll unpin enough
+ * space the next time around.
+ */
+ if (!ret)
+ dcs = BTRFS_DC_SETUP;
+ else if (ret == -ENOSPC)
+ set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
+
+out_put:
+ iput(inode);
+out_free:
+ btrfs_release_path(path);
+out:
+ spin_lock(&block_group->lock);
+ if (!ret && dcs == BTRFS_DC_SETUP)
+ block_group->cache_generation = trans->transid;
+ block_group->disk_cache_state = dcs;
+ spin_unlock(&block_group->lock);
+
+ extent_changeset_free(data_reserved);
+ return ret;
+}
+
+int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_block_group *cache, *tmp;
+ struct btrfs_transaction *cur_trans = trans->transaction;
+ struct btrfs_path *path;
+
+ if (list_empty(&cur_trans->dirty_bgs) ||
+ !btrfs_test_opt(fs_info, SPACE_CACHE))
+ return 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ /* Could add new block groups, use _safe just in case */
+ list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
+ dirty_list) {
+ if (cache->disk_cache_state == BTRFS_DC_CLEAR)
+ cache_save_setup(cache, trans, path);
+ }
+
+ btrfs_free_path(path);
+ return 0;
+}
+
+/*
+ * Transaction commit does final block group cache writeback during a critical
+ * section where nothing is allowed to change the FS. This is required in
+ * order for the cache to actually match the block group, but can introduce a
+ * lot of latency into the commit.
+ *
+ * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
+ * There's a chance we'll have to redo some of it if the block group changes
+ * again during the commit, but it greatly reduces the commit latency by
+ * getting rid of the easy block groups while we're still allowing others to
+ * join the commit.
+ */
+int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_block_group *cache;
+ struct btrfs_transaction *cur_trans = trans->transaction;
+ int ret = 0;
+ int should_put;
+ struct btrfs_path *path = NULL;
+ LIST_HEAD(dirty);
+ struct list_head *io = &cur_trans->io_bgs;
+ int num_started = 0;
+ int loops = 0;
+
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ if (list_empty(&cur_trans->dirty_bgs)) {
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ return 0;
+ }
+ list_splice_init(&cur_trans->dirty_bgs, &dirty);
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+
+again:
+ /* Make sure all the block groups on our dirty list actually exist */
+ btrfs_create_pending_block_groups(trans);
+
+ if (!path) {
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ }
+
+ /*
+ * cache_write_mutex is here only to save us from balance or automatic
+ * removal of empty block groups deleting this block group while we are
+ * writing out the cache
+ */
+ mutex_lock(&trans->transaction->cache_write_mutex);
+ while (!list_empty(&dirty)) {
+ bool drop_reserve = true;
+
+ cache = list_first_entry(&dirty, struct btrfs_block_group,
+ dirty_list);
+ /*
+ * This can happen if something re-dirties a block group that
+ * is already under IO. Just wait for it to finish and then do
+ * it all again
+ */
+ if (!list_empty(&cache->io_list)) {
+ list_del_init(&cache->io_list);
+ btrfs_wait_cache_io(trans, cache, path);
+ btrfs_put_block_group(cache);
+ }
+
+
+ /*
+ * btrfs_wait_cache_io uses the cache->dirty_list to decide if
+ * it should update the cache_state. Don't delete until after
+ * we wait.
+ *
+ * Since we're not running in the commit critical section
+ * we need the dirty_bgs_lock to protect from update_block_group
+ */
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ list_del_init(&cache->dirty_list);
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+
+ should_put = 1;
+
+ cache_save_setup(cache, trans, path);
+
+ if (cache->disk_cache_state == BTRFS_DC_SETUP) {
+ cache->io_ctl.inode = NULL;
+ ret = btrfs_write_out_cache(trans, cache, path);
+ if (ret == 0 && cache->io_ctl.inode) {
+ num_started++;
+ should_put = 0;
+
+ /*
+ * The cache_write_mutex is protecting the
+ * io_list, also refer to the definition of
+ * btrfs_transaction::io_bgs for more details
+ */
+ list_add_tail(&cache->io_list, io);
+ } else {
+ /*
+ * If we failed to write the cache, the
+ * generation will be bad and life goes on
+ */
+ ret = 0;
+ }
+ }
+ if (!ret) {
+ ret = write_one_cache_group(trans, path, cache);
+ /*
+ * Our block group might still be attached to the list
+ * of new block groups in the transaction handle of some
+ * other task (struct btrfs_trans_handle->new_bgs). This
+ * means its block group item isn't yet in the extent
+ * tree. If this happens ignore the error, as we will
+ * try again later in the critical section of the
+ * transaction commit.
+ */
+ if (ret == -ENOENT) {
+ ret = 0;
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ if (list_empty(&cache->dirty_list)) {
+ list_add_tail(&cache->dirty_list,
+ &cur_trans->dirty_bgs);
+ btrfs_get_block_group(cache);
+ drop_reserve = false;
+ }
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ } else if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ }
+ }
+
+ /* If it's not on the io list, we need to put the block group */
+ if (should_put)
+ btrfs_put_block_group(cache);
+ if (drop_reserve)
+ btrfs_delayed_refs_rsv_release(fs_info, 1);
+
+ if (ret)
+ break;
+
+ /*
+ * Avoid blocking other tasks for too long. It might even save
+ * us from writing caches for block groups that are going to be
+ * removed.
+ */
+ mutex_unlock(&trans->transaction->cache_write_mutex);
+ mutex_lock(&trans->transaction->cache_write_mutex);
+ }
+ mutex_unlock(&trans->transaction->cache_write_mutex);
+
+ /*
+ * Go through delayed refs for all the stuff we've just kicked off
+ * and then loop back (just once)
+ */
+ ret = btrfs_run_delayed_refs(trans, 0);
+ if (!ret && loops == 0) {
+ loops++;
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ list_splice_init(&cur_trans->dirty_bgs, &dirty);
+ /*
+ * dirty_bgs_lock protects us from concurrent block group
+ * deletes too (not just cache_write_mutex).
+ */
+ if (!list_empty(&dirty)) {
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ goto again;
+ }
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ } else if (ret < 0) {
+ btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
+ }
+
+ btrfs_free_path(path);
+ return ret;
+}
+
+int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_block_group *cache;
+ struct btrfs_transaction *cur_trans = trans->transaction;
+ int ret = 0;
+ int should_put;
+ struct btrfs_path *path;
+ struct list_head *io = &cur_trans->io_bgs;
+ int num_started = 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ /*
+ * Even though we are in the critical section of the transaction commit,
+ * we can still have concurrent tasks adding elements to this
+ * transaction's list of dirty block groups. These tasks correspond to
+ * endio free space workers started when writeback finishes for a
+ * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
+ * allocate new block groups as a result of COWing nodes of the root
+ * tree when updating the free space inode. The writeback for the space
+ * caches is triggered by an earlier call to
+ * btrfs_start_dirty_block_groups() and iterations of the following
+ * loop.
+ * Also we want to do the cache_save_setup first and then run the
+ * delayed refs to make sure we have the best chance at doing this all
+ * in one shot.
+ */
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ while (!list_empty(&cur_trans->dirty_bgs)) {
+ cache = list_first_entry(&cur_trans->dirty_bgs,
+ struct btrfs_block_group,
+ dirty_list);
+
+ /*
+ * This can happen if cache_save_setup re-dirties a block group
+ * that is already under IO. Just wait for it to finish and
+ * then do it all again
+ */
+ if (!list_empty(&cache->io_list)) {
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ list_del_init(&cache->io_list);
+ btrfs_wait_cache_io(trans, cache, path);
+ btrfs_put_block_group(cache);
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ }
+
+ /*
+ * Don't remove from the dirty list until after we've waited on
+ * any pending IO
+ */
+ list_del_init(&cache->dirty_list);
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ should_put = 1;
+
+ cache_save_setup(cache, trans, path);
+
+ if (!ret)
+ ret = btrfs_run_delayed_refs(trans,
+ (unsigned long) -1);
+
+ if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
+ cache->io_ctl.inode = NULL;
+ ret = btrfs_write_out_cache(trans, cache, path);
+ if (ret == 0 && cache->io_ctl.inode) {
+ num_started++;
+ should_put = 0;
+ list_add_tail(&cache->io_list, io);
+ } else {
+ /*
+ * If we failed to write the cache, the
+ * generation will be bad and life goes on
+ */
+ ret = 0;
+ }
+ }
+ if (!ret) {
+ ret = write_one_cache_group(trans, path, cache);
+ /*
+ * One of the free space endio workers might have
+ * created a new block group while updating a free space
+ * cache's inode (at inode.c:btrfs_finish_ordered_io())
+ * and hasn't released its transaction handle yet, in
+ * which case the new block group is still attached to
+ * its transaction handle and its creation has not
+ * finished yet (no block group item in the extent tree
+ * yet, etc). If this is the case, wait for all free
+ * space endio workers to finish and retry. This is a
+ * a very rare case so no need for a more efficient and
+ * complex approach.
+ */
+ if (ret == -ENOENT) {
+ wait_event(cur_trans->writer_wait,
+ atomic_read(&cur_trans->num_writers) == 1);
+ ret = write_one_cache_group(trans, path, cache);
+ }
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+ }
+
+ /* If its not on the io list, we need to put the block group */
+ if (should_put)
+ btrfs_put_block_group(cache);
+ btrfs_delayed_refs_rsv_release(fs_info, 1);
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ }
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+
+ /*
+ * Refer to the definition of io_bgs member for details why it's safe
+ * to use it without any locking
+ */
+ while (!list_empty(io)) {
+ cache = list_first_entry(io, struct btrfs_block_group,
+ io_list);
+ list_del_init(&cache->io_list);
+ btrfs_wait_cache_io(trans, cache, path);
+ btrfs_put_block_group(cache);
+ }
+
+ btrfs_free_path(path);
+ return ret;
+}
+
+int btrfs_update_block_group(struct btrfs_trans_handle *trans,
+ u64 bytenr, u64 num_bytes, int alloc)
+{
+ struct btrfs_fs_info *info = trans->fs_info;
+ struct btrfs_block_group *cache = NULL;
+ u64 total = num_bytes;
+ u64 old_val;
+ u64 byte_in_group;
+ int factor;
+ int ret = 0;
+
+ /* Block accounting for super block */
+ spin_lock(&info->delalloc_root_lock);
+ old_val = btrfs_super_bytes_used(info->super_copy);
+ if (alloc)
+ old_val += num_bytes;
+ else
+ old_val -= num_bytes;
+ btrfs_set_super_bytes_used(info->super_copy, old_val);
+ spin_unlock(&info->delalloc_root_lock);
+
+ while (total) {
+ cache = btrfs_lookup_block_group(info, bytenr);
+ if (!cache) {
+ ret = -ENOENT;
+ break;
+ }
+ factor = btrfs_bg_type_to_factor(cache->flags);
+
+ /*
+ * If this block group has free space cache written out, we
+ * need to make sure to load it if we are removing space. This
+ * is because we need the unpinning stage to actually add the
+ * space back to the block group, otherwise we will leak space.
+ */
+ if (!alloc && !btrfs_block_group_done(cache))
+ btrfs_cache_block_group(cache, 1);
+
+ byte_in_group = bytenr - cache->start;
+ WARN_ON(byte_in_group > cache->length);
+
+ spin_lock(&cache->space_info->lock);
+ spin_lock(&cache->lock);
+
+ if (btrfs_test_opt(info, SPACE_CACHE) &&
+ cache->disk_cache_state < BTRFS_DC_CLEAR)
+ cache->disk_cache_state = BTRFS_DC_CLEAR;
+
+ old_val = cache->used;
+ num_bytes = min(total, cache->length - byte_in_group);
+ if (alloc) {
+ old_val += num_bytes;
+ cache->used = old_val;
+ cache->reserved -= num_bytes;
+ cache->space_info->bytes_reserved -= num_bytes;
+ cache->space_info->bytes_used += num_bytes;
+ cache->space_info->disk_used += num_bytes * factor;
+ spin_unlock(&cache->lock);
+ spin_unlock(&cache->space_info->lock);
+ } else {
+ old_val -= num_bytes;
+ cache->used = old_val;
+ cache->pinned += num_bytes;
+ btrfs_space_info_update_bytes_pinned(info,
+ cache->space_info, num_bytes);
+ cache->space_info->bytes_used -= num_bytes;
+ cache->space_info->disk_used -= num_bytes * factor;
+ spin_unlock(&cache->lock);
+ spin_unlock(&cache->space_info->lock);
+
+ percpu_counter_add_batch(
+ &cache->space_info->total_bytes_pinned,
+ num_bytes,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH);
+ set_extent_dirty(info->pinned_extents,
+ bytenr, bytenr + num_bytes - 1,
+ GFP_NOFS | __GFP_NOFAIL);
+ }
+
+ spin_lock(&trans->transaction->dirty_bgs_lock);
+ if (list_empty(&cache->dirty_list)) {
+ list_add_tail(&cache->dirty_list,
+ &trans->transaction->dirty_bgs);
+ trans->delayed_ref_updates++;
+ btrfs_get_block_group(cache);
+ }
+ spin_unlock(&trans->transaction->dirty_bgs_lock);
+
+ /*
+ * No longer have used bytes in this block group, queue it for
+ * deletion. We do this after adding the block group to the
+ * dirty list to avoid races between cleaner kthread and space
+ * cache writeout.
+ */
+ if (!alloc && old_val == 0)
+ btrfs_mark_bg_unused(cache);
+
+ btrfs_put_block_group(cache);
+ total -= num_bytes;
+ bytenr += num_bytes;
+ }
+
+ /* Modified block groups are accounted for in the delayed_refs_rsv. */
+ btrfs_update_delayed_refs_rsv(trans);
+ return ret;
+}
+
+/**
+ * btrfs_add_reserved_bytes - update the block_group and space info counters
+ * @cache: The cache we are manipulating
+ * @ram_bytes: The number of bytes of file content, and will be same to
+ * @num_bytes except for the compress path.
+ * @num_bytes: The number of bytes in question
+ * @delalloc: The blocks are allocated for the delalloc write
+ *
+ * This is called by the allocator when it reserves space. If this is a
+ * reservation and the block group has become read only we cannot make the
+ * reservation and return -EAGAIN, otherwise this function always succeeds.
+ */
+int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
+ u64 ram_bytes, u64 num_bytes, int delalloc)
+{
+ struct btrfs_space_info *space_info = cache->space_info;
+ int ret = 0;
+
+ spin_lock(&space_info->lock);
+ spin_lock(&cache->lock);
+ if (cache->ro) {
+ ret = -EAGAIN;
+ } else {
+ cache->reserved += num_bytes;
+ space_info->bytes_reserved += num_bytes;
+ trace_btrfs_space_reservation(cache->fs_info, "space_info",
+ space_info->flags, num_bytes, 1);
+ btrfs_space_info_update_bytes_may_use(cache->fs_info,
+ space_info, -ram_bytes);
+ if (delalloc)
+ cache->delalloc_bytes += num_bytes;
+ }
+ spin_unlock(&cache->lock);
+ spin_unlock(&space_info->lock);
+ return ret;
+}
+
+/**
+ * btrfs_free_reserved_bytes - update the block_group and space info counters
+ * @cache: The cache we are manipulating
+ * @num_bytes: The number of bytes in question
+ * @delalloc: The blocks are allocated for the delalloc write
+ *
+ * This is called by somebody who is freeing space that was never actually used
+ * on disk. For example if you reserve some space for a new leaf in transaction
+ * A and before transaction A commits you free that leaf, you call this with
+ * reserve set to 0 in order to clear the reservation.
+ */
+void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
+ u64 num_bytes, int delalloc)
+{
+ struct btrfs_space_info *space_info = cache->space_info;
+
+ spin_lock(&space_info->lock);
+ spin_lock(&cache->lock);
+ if (cache->ro)
+ space_info->bytes_readonly += num_bytes;
+ cache->reserved -= num_bytes;
+ space_info->bytes_reserved -= num_bytes;
+ space_info->max_extent_size = 0;
+
+ if (delalloc)
+ cache->delalloc_bytes -= num_bytes;
+ spin_unlock(&cache->lock);
+ spin_unlock(&space_info->lock);
+}
+
+static void force_metadata_allocation(struct btrfs_fs_info *info)
+{
+ struct list_head *head = &info->space_info;
+ struct btrfs_space_info *found;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(found, head, list) {
+ if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
+ found->force_alloc = CHUNK_ALLOC_FORCE;
+ }
+ rcu_read_unlock();
+}
+
+static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *sinfo, int force)
+{
+ u64 bytes_used = btrfs_space_info_used(sinfo, false);
+ u64 thresh;
+
+ if (force == CHUNK_ALLOC_FORCE)
+ return 1;
+
+ /*
+ * in limited mode, we want to have some free space up to
+ * about 1% of the FS size.
+ */
+ if (force == CHUNK_ALLOC_LIMITED) {
+ thresh = btrfs_super_total_bytes(fs_info->super_copy);
+ thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
+
+ if (sinfo->total_bytes - bytes_used < thresh)
+ return 1;
+ }
+
+ if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
+ return 0;
+ return 1;
+}
+
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
+{
+ u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
+
+ return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
+}
+
+/*
+ * If force is CHUNK_ALLOC_FORCE:
+ * - return 1 if it successfully allocates a chunk,
+ * - return errors including -ENOSPC otherwise.
+ * If force is NOT CHUNK_ALLOC_FORCE:
+ * - return 0 if it doesn't need to allocate a new chunk,
+ * - return 1 if it successfully allocates a chunk,
+ * - return errors including -ENOSPC otherwise.
+ */
+int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
+ enum btrfs_chunk_alloc_enum force)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_space_info *space_info;
+ bool wait_for_alloc = false;
+ bool should_alloc = false;
+ int ret = 0;
+
+ /* Don't re-enter if we're already allocating a chunk */
+ if (trans->allocating_chunk)
+ return -ENOSPC;
+
+ space_info = btrfs_find_space_info(fs_info, flags);
+ ASSERT(space_info);
+
+ do {
+ spin_lock(&space_info->lock);
+ if (force < space_info->force_alloc)
+ force = space_info->force_alloc;
+ should_alloc = should_alloc_chunk(fs_info, space_info, force);
+ if (space_info->full) {
+ /* No more free physical space */
+ if (should_alloc)
+ ret = -ENOSPC;
+ else
+ ret = 0;
+ spin_unlock(&space_info->lock);
+ return ret;
+ } else if (!should_alloc) {
+ spin_unlock(&space_info->lock);
+ return 0;
+ } else if (space_info->chunk_alloc) {
+ /*
+ * Someone is already allocating, so we need to block
+ * until this someone is finished and then loop to
+ * recheck if we should continue with our allocation
+ * attempt.
+ */
+ wait_for_alloc = true;
+ spin_unlock(&space_info->lock);
+ mutex_lock(&fs_info->chunk_mutex);
+ mutex_unlock(&fs_info->chunk_mutex);
+ } else {
+ /* Proceed with allocation */
+ space_info->chunk_alloc = 1;
+ wait_for_alloc = false;
+ spin_unlock(&space_info->lock);
+ }
+
+ cond_resched();
+ } while (wait_for_alloc);
+
+ mutex_lock(&fs_info->chunk_mutex);
+ trans->allocating_chunk = true;
+
+ /*
+ * If we have mixed data/metadata chunks we want to make sure we keep
+ * allocating mixed chunks instead of individual chunks.
+ */
+ if (btrfs_mixed_space_info(space_info))
+ flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
+
+ /*
+ * if we're doing a data chunk, go ahead and make sure that
+ * we keep a reasonable number of metadata chunks allocated in the
+ * FS as well.
+ */
+ if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
+ fs_info->data_chunk_allocations++;
+ if (!(fs_info->data_chunk_allocations %
+ fs_info->metadata_ratio))
+ force_metadata_allocation(fs_info);
+ }
+
+ /*
+ * Check if we have enough space in SYSTEM chunk because we may need
+ * to update devices.
+ */
+ check_system_chunk(trans, flags);
+
+ ret = btrfs_alloc_chunk(trans, flags);
+ trans->allocating_chunk = false;
+
+ spin_lock(&space_info->lock);
+ if (ret < 0) {
+ if (ret == -ENOSPC)
+ space_info->full = 1;
+ else
+ goto out;
+ } else {
+ ret = 1;
+ space_info->max_extent_size = 0;
+ }
+
+ space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
+out:
+ space_info->chunk_alloc = 0;
+ spin_unlock(&space_info->lock);
+ mutex_unlock(&fs_info->chunk_mutex);
+ /*
+ * When we allocate a new chunk we reserve space in the chunk block
+ * reserve to make sure we can COW nodes/leafs in the chunk tree or
+ * add new nodes/leafs to it if we end up needing to do it when
+ * inserting the chunk item and updating device items as part of the
+ * second phase of chunk allocation, performed by
+ * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
+ * large number of new block groups to create in our transaction
+ * handle's new_bgs list to avoid exhausting the chunk block reserve
+ * in extreme cases - like having a single transaction create many new
+ * block groups when starting to write out the free space caches of all
+ * the block groups that were made dirty during the lifetime of the
+ * transaction.
+ */
+ if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
+ btrfs_create_pending_block_groups(trans);
+
+ return ret;
+}
+
+static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
+{
+ u64 num_dev;
+
+ num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
+ if (!num_dev)
+ num_dev = fs_info->fs_devices->rw_devices;
+
+ return num_dev;
+}
+
+/*
+ * Reserve space in the system space for allocating or removing a chunk
+ */
+void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_space_info *info;
+ u64 left;
+ u64 thresh;
+ int ret = 0;
+ u64 num_devs;
+
+ /*
+ * Needed because we can end up allocating a system chunk and for an
+ * atomic and race free space reservation in the chunk block reserve.
+ */
+ lockdep_assert_held(&fs_info->chunk_mutex);
+
+ info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
+ spin_lock(&info->lock);
+ left = info->total_bytes - btrfs_space_info_used(info, true);
+ spin_unlock(&info->lock);
+
+ num_devs = get_profile_num_devs(fs_info, type);
+
+ /* num_devs device items to update and 1 chunk item to add or remove */
+ thresh = btrfs_calc_metadata_size(fs_info, num_devs) +
+ btrfs_calc_insert_metadata_size(fs_info, 1);
+
+ if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
+ btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
+ left, thresh, type);
+ btrfs_dump_space_info(fs_info, info, 0, 0);
+ }
+
+ if (left < thresh) {
+ u64 flags = btrfs_system_alloc_profile(fs_info);
+
+ /*
+ * Ignore failure to create system chunk. We might end up not
+ * needing it, as we might not need to COW all nodes/leafs from
+ * the paths we visit in the chunk tree (they were already COWed
+ * or created in the current transaction for example).
+ */
+ ret = btrfs_alloc_chunk(trans, flags);
+ }
+
+ if (!ret) {
+ ret = btrfs_block_rsv_add(fs_info->chunk_root,
+ &fs_info->chunk_block_rsv,
+ thresh, BTRFS_RESERVE_NO_FLUSH);
+ if (!ret)
+ trans->chunk_bytes_reserved += thresh;
+ }
+}
+
+void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
+{
+ struct btrfs_block_group *block_group;
+ u64 last = 0;
+
+ while (1) {
+ struct inode *inode;
+
+ block_group = btrfs_lookup_first_block_group(info, last);
+ while (block_group) {
+ btrfs_wait_block_group_cache_done(block_group);
+ spin_lock(&block_group->lock);
+ if (block_group->iref)
+ break;
+ spin_unlock(&block_group->lock);
+ block_group = btrfs_next_block_group(block_group);
+ }
+ if (!block_group) {
+ if (last == 0)
+ break;
+ last = 0;
+ continue;
+ }
+
+ inode = block_group->inode;
+ block_group->iref = 0;
+ block_group->inode = NULL;
+ spin_unlock(&block_group->lock);
+ ASSERT(block_group->io_ctl.inode == NULL);
+ iput(inode);
+ last = block_group->start + block_group->length;
+ btrfs_put_block_group(block_group);
+ }
+}
+
+/*
+ * Must be called only after stopping all workers, since we could have block
+ * group caching kthreads running, and therefore they could race with us if we
+ * freed the block groups before stopping them.
+ */
+int btrfs_free_block_groups(struct btrfs_fs_info *info)
+{
+ struct btrfs_block_group *block_group;
+ struct btrfs_space_info *space_info;
+ struct btrfs_caching_control *caching_ctl;
+ struct rb_node *n;
+
+ down_write(&info->commit_root_sem);
+ while (!list_empty(&info->caching_block_groups)) {
+ caching_ctl = list_entry(info->caching_block_groups.next,
+ struct btrfs_caching_control, list);
+ list_del(&caching_ctl->list);
+ btrfs_put_caching_control(caching_ctl);
+ }
+ up_write(&info->commit_root_sem);
+
+ spin_lock(&info->unused_bgs_lock);
+ while (!list_empty(&info->unused_bgs)) {
+ block_group = list_first_entry(&info->unused_bgs,
+ struct btrfs_block_group,
+ bg_list);
+ list_del_init(&block_group->bg_list);
+ btrfs_put_block_group(block_group);
+ }
+ spin_unlock(&info->unused_bgs_lock);
+
+ spin_lock(&info->block_group_cache_lock);
+ while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
+ block_group = rb_entry(n, struct btrfs_block_group,
+ cache_node);
+ rb_erase(&block_group->cache_node,
+ &info->block_group_cache_tree);
+ RB_CLEAR_NODE(&block_group->cache_node);
+ spin_unlock(&info->block_group_cache_lock);
+
+ down_write(&block_group->space_info->groups_sem);
+ list_del(&block_group->list);
+ up_write(&block_group->space_info->groups_sem);
+
+ /*
+ * We haven't cached this block group, which means we could
+ * possibly have excluded extents on this block group.
+ */
+ if (block_group->cached == BTRFS_CACHE_NO ||
+ block_group->cached == BTRFS_CACHE_ERROR)
+ btrfs_free_excluded_extents(block_group);
+
+ btrfs_remove_free_space_cache(block_group);
+ ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
+ ASSERT(list_empty(&block_group->dirty_list));
+ ASSERT(list_empty(&block_group->io_list));
+ ASSERT(list_empty(&block_group->bg_list));
+ ASSERT(atomic_read(&block_group->count) == 1);
+ btrfs_put_block_group(block_group);
+
+ spin_lock(&info->block_group_cache_lock);
+ }
+ spin_unlock(&info->block_group_cache_lock);
+
+ /*
+ * Now that all the block groups are freed, go through and free all the
+ * space_info structs. This is only called during the final stages of
+ * unmount, and so we know nobody is using them. We call
+ * synchronize_rcu() once before we start, just to be on the safe side.
+ */
+ synchronize_rcu();
+
+ btrfs_release_global_block_rsv(info);
+
+ while (!list_empty(&info->space_info)) {
+ space_info = list_entry(info->space_info.next,
+ struct btrfs_space_info,
+ list);
+
+ /*
+ * Do not hide this behind enospc_debug, this is actually
+ * important and indicates a real bug if this happens.
+ */
+ if (WARN_ON(space_info->bytes_pinned > 0 ||
+ space_info->bytes_reserved > 0 ||
+ space_info->bytes_may_use > 0))
+ btrfs_dump_space_info(info, space_info, 0, 0);
+ list_del(&space_info->list);
+ btrfs_sysfs_remove_space_info(space_info);
+ }
+ return 0;
+}
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
new file mode 100644
index 000000000000..9b409676c4b2
--- /dev/null
+++ b/fs/btrfs/block-group.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_BLOCK_GROUP_H
+#define BTRFS_BLOCK_GROUP_H
+
+#include "free-space-cache.h"
+
+enum btrfs_disk_cache_state {
+ BTRFS_DC_WRITTEN,
+ BTRFS_DC_ERROR,
+ BTRFS_DC_CLEAR,
+ BTRFS_DC_SETUP,
+};
+
+/*
+ * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to
+ * only allocate a chunk if we really need one.
+ *
+ * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few
+ * chunks already allocated. This is used as part of the clustering code to
+ * help make sure we have a good pool of storage to cluster in, without filling
+ * the FS with empty chunks
+ *
+ * CHUNK_ALLOC_FORCE means it must try to allocate one
+ */
+enum btrfs_chunk_alloc_enum {
+ CHUNK_ALLOC_NO_FORCE,
+ CHUNK_ALLOC_LIMITED,
+ CHUNK_ALLOC_FORCE,
+};
+
+struct btrfs_caching_control {
+ struct list_head list;
+ struct mutex mutex;
+ wait_queue_head_t wait;
+ struct btrfs_work work;
+ struct btrfs_block_group *block_group;
+ u64 progress;
+ refcount_t count;
+};
+
+/* Once caching_thread() finds this much free space, it will wake up waiters. */
+#define CACHING_CTL_WAKE_UP SZ_2M
+
+struct btrfs_block_group {
+ struct btrfs_fs_info *fs_info;
+ struct inode *inode;
+ spinlock_t lock;
+ u64 start;
+ u64 length;
+ u64 pinned;
+ u64 reserved;
+ u64 used;
+ u64 delalloc_bytes;
+ u64 bytes_super;
+ u64 flags;
+ u64 cache_generation;
+
+ /*
+ * If the free space extent count exceeds this number, convert the block
+ * group to bitmaps.
+ */
+ u32 bitmap_high_thresh;
+
+ /*
+ * If the free space extent count drops below this number, convert the
+ * block group back to extents.
+ */
+ u32 bitmap_low_thresh;
+
+ /*
+ * It is just used for the delayed data space allocation because
+ * only the data space allocation and the relative metadata update
+ * can be done cross the transaction.
+ */
+ struct rw_semaphore data_rwsem;
+
+ /* For raid56, this is a full stripe, without parity */
+ unsigned long full_stripe_len;
+
+ unsigned int ro;
+ unsigned int iref:1;
+ unsigned int has_caching_ctl:1;
+ unsigned int removed:1;
+
+ int disk_cache_state;
+
+ /* Cache tracking stuff */
+ int cached;
+ struct btrfs_caching_control *caching_ctl;
+ u64 last_byte_to_unpin;
+
+ struct btrfs_space_info *space_info;
+
+ /* Free space cache stuff */
+ struct btrfs_free_space_ctl *free_space_ctl;
+
+ /* Block group cache stuff */
+ struct rb_node cache_node;
+
+ /* For block groups in the same raid type */
+ struct list_head list;
+
+ /* Usage count */
+ atomic_t count;
+
+ /*
+ * List of struct btrfs_free_clusters for this block group.
+ * Today it will only have one thing on it, but that may change
+ */
+ struct list_head cluster_list;
+
+ /* For delayed block group creation or deletion of empty block groups */
+ struct list_head bg_list;
+
+ /* For read-only block groups */
+ struct list_head ro_list;
+
+ atomic_t trimming;
+
+ /* For dirty block groups */
+ struct list_head dirty_list;
+ struct list_head io_list;
+
+ struct btrfs_io_ctl io_ctl;
+
+ /*
+ * Incremented when doing extent allocations and holding a read lock
+ * on the space_info's groups_sem semaphore.
+ * Decremented when an ordered extent that represents an IO against this
+ * block group's range is created (after it's added to its inode's
+ * root's list of ordered extents) or immediately after the allocation
+ * if it's a metadata extent or fallocate extent (for these cases we
+ * don't create ordered extents).
+ */
+ atomic_t reservations;
+
+ /*
+ * Incremented while holding the spinlock *lock* by a task checking if
+ * it can perform a nocow write (incremented if the value for the *ro*
+ * field is 0). Decremented by such tasks once they create an ordered
+ * extent or before that if some error happens before reaching that step.
+ * This is to prevent races between block group relocation and nocow
+ * writes through direct IO.
+ */
+ atomic_t nocow_writers;
+
+ /* Lock for free space tree operations. */
+ struct mutex free_space_lock;
+
+ /*
+ * Does the block group need to be added to the free space tree?
+ * Protected by free_space_lock.
+ */
+ int needs_free_space;
+
+ /* Record locked full stripes for RAID5/6 block group */
+ struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
+};
+
+#ifdef CONFIG_BTRFS_DEBUG
+static inline int btrfs_should_fragment_free_space(
+ struct btrfs_block_group *block_group)
+{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+
+ return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
+ block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
+ (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
+ block_group->flags & BTRFS_BLOCK_GROUP_DATA);
+}
+#endif
+
+struct btrfs_block_group *btrfs_lookup_first_block_group(
+ struct btrfs_fs_info *info, u64 bytenr);
+struct btrfs_block_group *btrfs_lookup_block_group(
+ struct btrfs_fs_info *info, u64 bytenr);
+struct btrfs_block_group *btrfs_next_block_group(
+ struct btrfs_block_group *cache);
+void btrfs_get_block_group(struct btrfs_block_group *cache);
+void btrfs_put_block_group(struct btrfs_block_group *cache);
+void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
+ const u64 start);
+void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
+bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
+void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
+void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
+void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
+ u64 num_bytes);
+int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
+int btrfs_cache_block_group(struct btrfs_block_group *cache,
+ int load_cache_only);
+void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
+struct btrfs_caching_control *btrfs_get_caching_control(
+ struct btrfs_block_group *cache);
+u64 add_new_free_space(struct btrfs_block_group *block_group,
+ u64 start, u64 end);
+struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
+ struct btrfs_fs_info *fs_info,
+ const u64 chunk_offset);
+int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
+ u64 group_start, struct extent_map *em);
+void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
+void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
+int btrfs_read_block_groups(struct btrfs_fs_info *info);
+int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
+ u64 type, u64 chunk_offset, u64 size);
+void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
+int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
+ bool do_chunk_alloc);
+void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
+int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
+int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
+int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
+int btrfs_update_block_group(struct btrfs_trans_handle *trans,
+ u64 bytenr, u64 num_bytes, int alloc);
+int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
+ u64 ram_bytes, u64 num_bytes, int delalloc);
+void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
+ u64 num_bytes, int delalloc);
+int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
+ enum btrfs_chunk_alloc_enum force);
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
+void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
+u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
+void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
+int btrfs_free_block_groups(struct btrfs_fs_info *info);
+
+static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
+{
+ return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
+}
+
+static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
+{
+ return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
+}
+
+static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
+{
+ return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
+}
+
+static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
+{
+ smp_mb();
+ return cache->cached == BTRFS_CACHE_FINISHED ||
+ cache->cached == BTRFS_CACHE_ERROR;
+}
+
+#endif /* BTRFS_BLOCK_GROUP_H */
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
index 698470b9f32d..d07bd41a7c1e 100644
--- a/fs/btrfs/block-rsv.c
+++ b/fs/btrfs/block-rsv.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
+#include "misc.h"
#include "ctree.h"
#include "block-rsv.h"
#include "space-info.h"
-#include "math.h"
#include "transaction.h"
static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
@@ -54,8 +54,9 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
spin_unlock(&dest->lock);
}
if (num_bytes)
- btrfs_space_info_add_old_bytes(fs_info, space_info,
- num_bytes);
+ btrfs_space_info_free_bytes_may_use(fs_info,
+ space_info,
+ num_bytes);
}
if (qgroup_to_release_ret)
*qgroup_to_release_ret = qgroup_to_release;
@@ -258,6 +259,7 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
struct btrfs_space_info *sinfo = block_rsv->space_info;
u64 num_bytes;
+ unsigned min_items;
/*
* The global block rsv is based on the size of the extent tree, the
@@ -267,7 +269,26 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
btrfs_root_used(&fs_info->csum_root->root_item) +
btrfs_root_used(&fs_info->tree_root->root_item);
- num_bytes = max_t(u64, num_bytes, SZ_16M);
+
+ /*
+ * We at a minimum are going to modify the csum root, the tree root, and
+ * the extent root.
+ */
+ min_items = 3;
+
+ /*
+ * But we also want to reserve enough space so we can do the fallback
+ * global reserve for an unlink, which is an additional 5 items (see the
+ * comment in __unlink_start_trans for what we're modifying.)
+ *
+ * But we also need space for the delayed ref updates from the unlink,
+ * so its 10, 5 for the actual operation, and 5 for the delayed ref
+ * updates.
+ */
+ min_items += 10;
+
+ num_bytes = max_t(u64, num_bytes,
+ btrfs_calc_insert_metadata_size(fs_info, min_items));
spin_lock(&sinfo->lock);
spin_lock(&block_rsv->lock);
@@ -275,25 +296,16 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
block_rsv->size = min_t(u64, num_bytes, SZ_512M);
if (block_rsv->reserved < block_rsv->size) {
- num_bytes = btrfs_space_info_used(sinfo, true);
- if (sinfo->total_bytes > num_bytes) {
- num_bytes = sinfo->total_bytes - num_bytes;
- num_bytes = min(num_bytes,
- block_rsv->size - block_rsv->reserved);
- block_rsv->reserved += num_bytes;
- btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
- num_bytes);
- trace_btrfs_space_reservation(fs_info, "space_info",
- sinfo->flags, num_bytes,
- 1);
- }
+ num_bytes = block_rsv->size - block_rsv->reserved;
+ block_rsv->reserved += num_bytes;
+ btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
+ num_bytes);
} else if (block_rsv->reserved > block_rsv->size) {
num_bytes = block_rsv->reserved - block_rsv->size;
btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
-num_bytes);
- trace_btrfs_space_reservation(fs_info, "space_info",
- sinfo->flags, num_bytes, 0);
block_rsv->reserved = block_rsv->size;
+ btrfs_try_granting_tickets(fs_info, sinfo);
}
if (block_rsv->reserved == block_rsv->size)
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index f853835c409c..4e12a477d32e 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -63,9 +63,6 @@ struct btrfs_inode {
/* held while logging the inode in tree-log.c */
struct mutex log_mutex;
- /* held while doing delalloc reservations */
- struct mutex delalloc_mutex;
-
/* used to order data wrt metadata */
struct btrfs_ordered_inode_tree ordered_tree;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 81a9731959a9..0b52ab4cb964 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -940,7 +940,7 @@ static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf)
kfree(sf);
}
-static int btrfsic_process_metablock(
+static noinline_for_stack int btrfsic_process_metablock(
struct btrfsic_state *state,
struct btrfsic_block *const first_block,
struct btrfsic_block_data_ctx *const first_block_ctx,
@@ -1706,8 +1706,9 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
* Test whether the disk block contains a tree block (leaf or node)
* (note that this test fails for the super block)
*/
-static int btrfsic_test_for_metadata(struct btrfsic_state *state,
- char **datav, unsigned int num_pages)
+static noinline_for_stack int btrfsic_test_for_metadata(
+ struct btrfsic_state *state,
+ char **datav, unsigned int num_pages)
{
struct btrfs_fs_info *fs_info = state->fs_info;
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 60c47b417a4b..ee834ef7beb4 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -18,6 +18,7 @@
#include <linux/sched/mm.h>
#include <linux/log2.h>
#include <crypto/hash.h>
+#include "misc.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -28,6 +29,41 @@
#include "extent_io.h"
#include "extent_map.h"
+int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct page **pages, unsigned long *out_pages,
+ unsigned long *total_in, unsigned long *total_out);
+int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
+int zlib_decompress(struct list_head *ws, unsigned char *data_in,
+ struct page *dest_page, unsigned long start_byte, size_t srclen,
+ size_t destlen);
+struct list_head *zlib_alloc_workspace(unsigned int level);
+void zlib_free_workspace(struct list_head *ws);
+struct list_head *zlib_get_workspace(unsigned int level);
+
+int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct page **pages, unsigned long *out_pages,
+ unsigned long *total_in, unsigned long *total_out);
+int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
+int lzo_decompress(struct list_head *ws, unsigned char *data_in,
+ struct page *dest_page, unsigned long start_byte, size_t srclen,
+ size_t destlen);
+struct list_head *lzo_alloc_workspace(unsigned int level);
+void lzo_free_workspace(struct list_head *ws);
+
+int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct page **pages, unsigned long *out_pages,
+ unsigned long *total_in, unsigned long *total_out);
+int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
+int zstd_decompress(struct list_head *ws, unsigned char *data_in,
+ struct page *dest_page, unsigned long start_byte, size_t srclen,
+ size_t destlen);
+void zstd_init_workspace_manager(void);
+void zstd_cleanup_workspace_manager(void);
+struct list_head *zstd_alloc_workspace(unsigned int level);
+void zstd_free_workspace(struct list_head *ws);
+struct list_head *zstd_get_workspace(unsigned int level);
+void zstd_put_workspace(struct list_head *ws);
+
static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
const char* btrfs_compress_type2str(enum btrfs_compression_type type)
@@ -38,6 +74,8 @@ const char* btrfs_compress_type2str(enum btrfs_compression_type type)
case BTRFS_COMPRESS_ZSTD:
case BTRFS_COMPRESS_NONE:
return btrfs_compress_types[type];
+ default:
+ break;
}
return NULL;
@@ -59,6 +97,70 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len)
return false;
}
+static int compression_compress_pages(int type, struct list_head *ws,
+ struct address_space *mapping, u64 start, struct page **pages,
+ unsigned long *out_pages, unsigned long *total_in,
+ unsigned long *total_out)
+{
+ switch (type) {
+ case BTRFS_COMPRESS_ZLIB:
+ return zlib_compress_pages(ws, mapping, start, pages,
+ out_pages, total_in, total_out);
+ case BTRFS_COMPRESS_LZO:
+ return lzo_compress_pages(ws, mapping, start, pages,
+ out_pages, total_in, total_out);
+ case BTRFS_COMPRESS_ZSTD:
+ return zstd_compress_pages(ws, mapping, start, pages,
+ out_pages, total_in, total_out);
+ case BTRFS_COMPRESS_NONE:
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here. As a sane fallback, return what the
+ * callers will understand as 'no compression happened'.
+ */
+ return -E2BIG;
+ }
+}
+
+static int compression_decompress_bio(int type, struct list_head *ws,
+ struct compressed_bio *cb)
+{
+ switch (type) {
+ case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
+ case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
+ case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
+ case BTRFS_COMPRESS_NONE:
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here.
+ */
+ BUG();
+ }
+}
+
+static int compression_decompress(int type, struct list_head *ws,
+ unsigned char *data_in, struct page *dest_page,
+ unsigned long start_byte, size_t srclen, size_t destlen)
+{
+ switch (type) {
+ case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
+ start_byte, srclen, destlen);
+ case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
+ start_byte, srclen, destlen);
+ case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
+ start_byte, srclen, destlen);
+ case BTRFS_COMPRESS_NONE:
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here.
+ */
+ BUG();
+ }
+}
+
static int btrfs_decompress_bio(struct compressed_bio *cb);
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
@@ -310,7 +412,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
unsigned long compressed_len,
struct page **compressed_pages,
unsigned long nr_pages,
- unsigned int write_flags)
+ unsigned int write_flags,
+ struct cgroup_subsys_state *blkcg_css)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio *bio = NULL;
@@ -319,7 +422,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
int pg_index = 0;
struct page *page;
u64 first_byte = disk_start;
- struct block_device *bdev;
blk_status_t ret;
int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
@@ -338,13 +440,15 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
cb->orig_bio = NULL;
cb->nr_pages = nr_pages;
- bdev = fs_info->fs_devices->latest_bdev;
-
bio = btrfs_bio_alloc(first_byte);
- bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
+
+ if (blkcg_css) {
+ bio->bi_opf |= REQ_CGROUP_PUNT;
+ bio_associate_blkg_from_css(bio, blkcg_css);
+ }
refcount_set(&cb->pending_bios, 1);
/* create and submit bios for the compressed pages */
@@ -377,14 +481,13 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(fs_info, bio, 0, 1);
+ ret = btrfs_map_bio(fs_info, bio, 0);
if (ret) {
bio->bi_status = ret;
bio_endio(bio);
}
bio = btrfs_bio_alloc(first_byte);
- bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
@@ -408,7 +511,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(fs_info, bio, 0, 1);
+ ret = btrfs_map_bio(fs_info, bio, 0);
if (ret) {
bio->bi_status = ret;
bio_endio(bio);
@@ -552,7 +655,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
unsigned long nr_pages;
unsigned long pg_index;
struct page *page;
- struct block_device *bdev;
struct bio *comp_bio;
u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
u64 em_len;
@@ -603,8 +705,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
if (!cb->compressed_pages)
goto fail1;
- bdev = fs_info->fs_devices->latest_bdev;
-
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
__GFP_HIGHMEM);
@@ -623,7 +723,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
cb->len = bio->bi_iter.bi_size;
comp_bio = btrfs_bio_alloc(cur_disk_byte);
- bio_set_dev(comp_bio, bdev);
comp_bio->bi_opf = REQ_OP_READ;
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
@@ -667,14 +766,13 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
fs_info->sectorsize);
sums += csum_size * nr_sectors;
- ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
if (ret) {
comp_bio->bi_status = ret;
bio_endio(comp_bio);
}
comp_bio = btrfs_bio_alloc(cur_disk_byte);
- bio_set_dev(comp_bio, bdev);
comp_bio->bi_opf = REQ_OP_READ;
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
@@ -692,7 +790,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
if (ret) {
comp_bio->bi_status = ret;
bio_endio(comp_bio);
@@ -763,26 +861,6 @@ struct heuristic_ws {
static struct workspace_manager heuristic_wsm;
-static void heuristic_init_workspace_manager(void)
-{
- btrfs_init_workspace_manager(&heuristic_wsm, &btrfs_heuristic_compress);
-}
-
-static void heuristic_cleanup_workspace_manager(void)
-{
- btrfs_cleanup_workspace_manager(&heuristic_wsm);
-}
-
-static struct list_head *heuristic_get_workspace(unsigned int level)
-{
- return btrfs_get_workspace(&heuristic_wsm, level);
-}
-
-static void heuristic_put_workspace(struct list_head *ws)
-{
- btrfs_put_workspace(&heuristic_wsm, ws);
-}
-
static void free_heuristic_ws(struct list_head *ws)
{
struct heuristic_ws *workspace;
@@ -823,12 +901,7 @@ fail:
}
const struct btrfs_compress_op btrfs_heuristic_compress = {
- .init_workspace_manager = heuristic_init_workspace_manager,
- .cleanup_workspace_manager = heuristic_cleanup_workspace_manager,
- .get_workspace = heuristic_get_workspace,
- .put_workspace = heuristic_put_workspace,
- .alloc_workspace = alloc_heuristic_ws,
- .free_workspace = free_heuristic_ws,
+ .workspace_manager = &heuristic_wsm,
};
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
@@ -839,13 +912,44 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
&btrfs_zstd_compress,
};
-void btrfs_init_workspace_manager(struct workspace_manager *wsm,
- const struct btrfs_compress_op *ops)
+static struct list_head *alloc_workspace(int type, unsigned int level)
{
- struct list_head *workspace;
+ switch (type) {
+ case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
+ case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
+ case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
+ case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here.
+ */
+ BUG();
+ }
+}
- wsm->ops = ops;
+static void free_workspace(int type, struct list_head *ws)
+{
+ switch (type) {
+ case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
+ case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
+ case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
+ case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here.
+ */
+ BUG();
+ }
+}
+static void btrfs_init_workspace_manager(int type)
+{
+ struct workspace_manager *wsm;
+ struct list_head *workspace;
+
+ wsm = btrfs_compress_op[type]->workspace_manager;
INIT_LIST_HEAD(&wsm->idle_ws);
spin_lock_init(&wsm->ws_lock);
atomic_set(&wsm->total_ws, 0);
@@ -855,7 +959,7 @@ void btrfs_init_workspace_manager(struct workspace_manager *wsm,
* Preallocate one workspace for each compression type so we can
* guarantee forward progress in the worst case
*/
- workspace = wsm->ops->alloc_workspace(0);
+ workspace = alloc_workspace(type, 0);
if (IS_ERR(workspace)) {
pr_warn(
"BTRFS: cannot preallocate compression workspace, will try later\n");
@@ -866,14 +970,16 @@ void btrfs_init_workspace_manager(struct workspace_manager *wsm,
}
}
-void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman)
+static void btrfs_cleanup_workspace_manager(int type)
{
+ struct workspace_manager *wsman;
struct list_head *ws;
+ wsman = btrfs_compress_op[type]->workspace_manager;
while (!list_empty(&wsman->idle_ws)) {
ws = wsman->idle_ws.next;
list_del(ws);
- wsman->ops->free_workspace(ws);
+ free_workspace(type, ws);
atomic_dec(&wsman->total_ws);
}
}
@@ -884,9 +990,9 @@ void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman)
* Preallocation makes a forward progress guarantees and we do not return
* errors.
*/
-struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
- unsigned int level)
+struct list_head *btrfs_get_workspace(int type, unsigned int level)
{
+ struct workspace_manager *wsm;
struct list_head *workspace;
int cpus = num_online_cpus();
unsigned nofs_flag;
@@ -896,6 +1002,7 @@ struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
wait_queue_head_t *ws_wait;
int *free_ws;
+ wsm = btrfs_compress_op[type]->workspace_manager;
idle_ws = &wsm->idle_ws;
ws_lock = &wsm->ws_lock;
total_ws = &wsm->total_ws;
@@ -931,7 +1038,7 @@ again:
* context of btrfs_compress_bio/btrfs_compress_pages
*/
nofs_flag = memalloc_nofs_save();
- workspace = wsm->ops->alloc_workspace(level);
+ workspace = alloc_workspace(type, level);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(workspace)) {
@@ -964,21 +1071,34 @@ again:
static struct list_head *get_workspace(int type, int level)
{
- return btrfs_compress_op[type]->get_workspace(level);
+ switch (type) {
+ case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
+ case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
+ case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level);
+ case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here.
+ */
+ BUG();
+ }
}
/*
* put a workspace struct back on the list or free it if we have enough
* idle ones sitting around
*/
-void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws)
+void btrfs_put_workspace(int type, struct list_head *ws)
{
+ struct workspace_manager *wsm;
struct list_head *idle_ws;
spinlock_t *ws_lock;
atomic_t *total_ws;
wait_queue_head_t *ws_wait;
int *free_ws;
+ wsm = btrfs_compress_op[type]->workspace_manager;
idle_ws = &wsm->idle_ws;
ws_lock = &wsm->ws_lock;
total_ws = &wsm->total_ws;
@@ -994,7 +1114,7 @@ void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws)
}
spin_unlock(ws_lock);
- wsm->ops->free_workspace(ws);
+ free_workspace(type, ws);
atomic_dec(total_ws);
wake:
cond_wake_up(ws_wait);
@@ -1002,7 +1122,18 @@ wake:
static void put_workspace(int type, struct list_head *ws)
{
- return btrfs_compress_op[type]->put_workspace(ws);
+ switch (type) {
+ case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
+ case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
+ case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws);
+ case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here.
+ */
+ BUG();
+ }
}
/*
@@ -1039,12 +1170,10 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
struct list_head *workspace;
int ret;
- level = btrfs_compress_op[type]->set_level(level);
+ level = btrfs_compress_set_level(type, level);
workspace = get_workspace(type, level);
- ret = btrfs_compress_op[type]->compress_pages(workspace, mapping,
- start, pages,
- out_pages,
- total_in, total_out);
+ ret = compression_compress_pages(type, workspace, mapping, start, pages,
+ out_pages, total_in, total_out);
put_workspace(type, workspace);
return ret;
}
@@ -1070,7 +1199,7 @@ static int btrfs_decompress_bio(struct compressed_bio *cb)
int type = cb->compress_type;
workspace = get_workspace(type, 0);
- ret = btrfs_compress_op[type]->decompress_bio(workspace, cb);
+ ret = compression_decompress_bio(type, workspace, cb);
put_workspace(type, workspace);
return ret;
@@ -1088,9 +1217,8 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
int ret;
workspace = get_workspace(type, 0);
- ret = btrfs_compress_op[type]->decompress(workspace, data_in,
- dest_page, start_byte,
- srclen, destlen);
+ ret = compression_decompress(type, workspace, data_in, dest_page,
+ start_byte, srclen, destlen);
put_workspace(type, workspace);
return ret;
@@ -1098,18 +1226,18 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
void __init btrfs_init_compress(void)
{
- int i;
-
- for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
- btrfs_compress_op[i]->init_workspace_manager();
+ btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
+ btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
+ btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
+ zstd_init_workspace_manager();
}
void __cold btrfs_exit_compress(void)
{
- int i;
-
- for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
- btrfs_compress_op[i]->cleanup_workspace_manager();
+ btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
+ btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
+ btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
+ zstd_cleanup_workspace_manager();
}
/*
@@ -1611,7 +1739,23 @@ unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
level = 0;
}
- level = btrfs_compress_op[type]->set_level(level);
+ level = btrfs_compress_set_level(type, level);
+
+ return level;
+}
+
+/*
+ * Adjust @level according to the limits of the compression algorithm or
+ * fallback to default
+ */
+unsigned int btrfs_compress_set_level(int type, unsigned level)
+{
+ const struct btrfs_compress_op *ops = btrfs_compress_op[type];
+
+ if (level == 0)
+ level = ops->default_level;
+ else
+ level = min(level, ops->max_level);
return level;
}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 2035b8eb1290..d253f7aa8ed5 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -93,7 +93,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
unsigned long compressed_len,
struct page **compressed_pages,
unsigned long nr_pages,
- unsigned int write_flags);
+ unsigned int write_flags,
+ struct cgroup_subsys_state *blkcg_css);
blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags);
@@ -104,11 +105,10 @@ enum btrfs_compression_type {
BTRFS_COMPRESS_ZLIB = 1,
BTRFS_COMPRESS_LZO = 2,
BTRFS_COMPRESS_ZSTD = 3,
- BTRFS_COMPRESS_TYPES = 3,
+ BTRFS_NR_COMPRESS_TYPES = 4,
};
struct workspace_manager {
- const struct btrfs_compress_op *ops;
struct list_head idle_ws;
spinlock_t ws_lock;
/* Number of free workspaces */
@@ -119,53 +119,18 @@ struct workspace_manager {
wait_queue_head_t ws_wait;
};
-void btrfs_init_workspace_manager(struct workspace_manager *wsm,
- const struct btrfs_compress_op *ops);
-struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
- unsigned int level);
-void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws);
-void btrfs_cleanup_workspace_manager(struct workspace_manager *wsm);
+struct list_head *btrfs_get_workspace(int type, unsigned int level);
+void btrfs_put_workspace(int type, struct list_head *ws);
struct btrfs_compress_op {
- void (*init_workspace_manager)(void);
-
- void (*cleanup_workspace_manager)(void);
-
- struct list_head *(*get_workspace)(unsigned int level);
-
- void (*put_workspace)(struct list_head *ws);
-
- struct list_head *(*alloc_workspace)(unsigned int level);
-
- void (*free_workspace)(struct list_head *workspace);
-
- int (*compress_pages)(struct list_head *workspace,
- struct address_space *mapping,
- u64 start,
- struct page **pages,
- unsigned long *out_pages,
- unsigned long *total_in,
- unsigned long *total_out);
-
- int (*decompress_bio)(struct list_head *workspace,
- struct compressed_bio *cb);
-
- int (*decompress)(struct list_head *workspace,
- unsigned char *data_in,
- struct page *dest_page,
- unsigned long start_byte,
- size_t srclen, size_t destlen);
-
- /*
- * This bounds the level set by the user to be within range of a
- * particular compression type. It returns the level that will be used
- * if the level is out of bounds or the default if 0 is passed in.
- */
- unsigned int (*set_level)(unsigned int level);
+ struct workspace_manager *workspace_manager;
+ /* Maximum level supported by the compression algorithm */
+ unsigned int max_level;
+ unsigned int default_level;
};
/* The heuristic workspaces are managed via the 0th workspace manager */
-#define BTRFS_NR_WORKSPACE_MANAGERS (BTRFS_COMPRESS_TYPES + 1)
+#define BTRFS_NR_WORKSPACE_MANAGERS BTRFS_NR_COMPRESS_TYPES
extern const struct btrfs_compress_op btrfs_heuristic_compress;
extern const struct btrfs_compress_op btrfs_zlib_compress;
@@ -175,6 +140,8 @@ extern const struct btrfs_compress_op btrfs_zstd_compress;
const char* btrfs_compress_type2str(enum btrfs_compression_type type);
bool btrfs_compress_is_valid_type(const char *str, size_t len);
+unsigned int btrfs_compress_set_level(int type, unsigned level);
+
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
#endif
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 5df76c17775a..5b6e86aaf2e1 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -29,34 +29,52 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
int level, int slot);
-struct btrfs_path *btrfs_alloc_path(void)
+static const struct btrfs_csums {
+ u16 size;
+ const char *name;
+ const char *driver;
+} btrfs_csums[] = {
+ [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
+ [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
+ [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
+ [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
+ .driver = "blake2b-256" },
+};
+
+int btrfs_super_csum_size(const struct btrfs_super_block *s)
{
- return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
+ u16 t = btrfs_super_csum_type(s);
+ /*
+ * csum type is validated at mount time
+ */
+ return btrfs_csums[t].size;
+}
+
+const char *btrfs_super_csum_name(u16 csum_type)
+{
+ /* csum type is validated at mount time */
+ return btrfs_csums[csum_type].name;
}
/*
- * set all locked nodes in the path to blocking locks. This should
- * be done before scheduling
+ * Return driver name if defined, otherwise the name that's also a valid driver
+ * name
*/
-noinline void btrfs_set_path_blocking(struct btrfs_path *p)
+const char *btrfs_super_csum_driver(u16 csum_type)
{
- int i;
- for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
- if (!p->nodes[i] || !p->locks[i])
- continue;
- /*
- * If we currently have a spinning reader or writer lock this
- * will bump the count of blocking holders and drop the
- * spinlock.
- */
- if (p->locks[i] == BTRFS_READ_LOCK) {
- btrfs_set_lock_blocking_read(p->nodes[i]);
- p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
- } else if (p->locks[i] == BTRFS_WRITE_LOCK) {
- btrfs_set_lock_blocking_write(p->nodes[i]);
- p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
- }
- }
+ /* csum type is validated at mount time */
+ return btrfs_csums[csum_type].driver ?:
+ btrfs_csums[csum_type].name;
+}
+
+size_t __const btrfs_get_num_csums(void)
+{
+ return ARRAY_SIZE(btrfs_csums);
+}
+
+struct btrfs_path *btrfs_alloc_path(void)
+{
+ return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
}
/* this also releases the path */
@@ -376,8 +394,6 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
* The 'start address' is the logical address of the *new* root node
* for root replace operations, or the logical address of the affected
* block for all other operations.
- *
- * Note: must be called with write lock for fs_info::tree_mod_log_lock.
*/
static noinline int
__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
@@ -387,6 +403,8 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
struct rb_node *parent = NULL;
struct tree_mod_elem *cur;
+ lockdep_assert_held_write(&fs_info->tree_mod_log_lock);
+
tm->seq = btrfs_inc_tree_mod_seq(fs_info);
tm_root = &fs_info->tree_mod_log;
@@ -1103,7 +1121,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
parent_start = buf->start;
- extent_buffer_get(cow);
+ atomic_inc(&cow->refs);
ret = tree_mod_log_insert_root(root->node, cow, 1);
BUG_ON(ret < 0);
rcu_assign_pointer(root->node, cow);
@@ -1343,6 +1361,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
struct tree_mod_elem *tm;
struct extent_buffer *eb = NULL;
struct extent_buffer *eb_root;
+ u64 eb_root_owner = 0;
struct extent_buffer *old;
struct tree_mod_root *old_root = NULL;
u64 old_generation = 0;
@@ -1380,6 +1399,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
free_extent_buffer(old);
}
} else if (old_root) {
+ eb_root_owner = btrfs_header_owner(eb_root);
btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
eb = alloc_dummy_extent_buffer(fs_info, logical);
@@ -1396,7 +1416,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
if (old_root) {
btrfs_set_header_bytenr(eb, eb->start);
btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
+ btrfs_set_header_owner(eb, eb_root_owner);
btrfs_set_header_level(eb, old_root->level);
btrfs_set_header_generation(eb, old_generation);
}
@@ -1539,7 +1559,7 @@ static int comp_keys(const struct btrfs_disk_key *disk,
/*
* same as comp_keys only with two btrfs_key's
*/
-int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
+int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
{
if (k1->objectid > k2->objectid)
return 1;
@@ -1790,8 +1810,8 @@ static void root_sub_used(struct btrfs_root *root, u32 size)
/* given a node and slot number, this reads the blocks it points to. The
* extent buffer is returned with a reference taken (but unlocked).
*/
-static noinline struct extent_buffer *read_node_slot(
- struct extent_buffer *parent, int slot)
+struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
+ int slot)
{
int level = btrfs_header_level(parent);
struct extent_buffer *eb;
@@ -1860,7 +1880,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
return 0;
/* promote the child to a root */
- child = read_node_slot(mid, 0);
+ child = btrfs_read_node_slot(mid, 0);
if (IS_ERR(child)) {
ret = PTR_ERR(child);
btrfs_handle_fs_error(fs_info, ret, NULL);
@@ -1900,7 +1920,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
return 0;
- left = read_node_slot(parent, pslot - 1);
+ left = btrfs_read_node_slot(parent, pslot - 1);
if (IS_ERR(left))
left = NULL;
@@ -1915,7 +1935,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
}
}
- right = read_node_slot(parent, pslot + 1);
+ right = btrfs_read_node_slot(parent, pslot + 1);
if (IS_ERR(right))
right = NULL;
@@ -2012,7 +2032,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* update the path */
if (left) {
if (btrfs_header_nritems(left) > orig_slot) {
- extent_buffer_get(left);
+ atomic_inc(&left->refs);
/* left was locked after cow */
path->nodes[level] = left;
path->slots[level + 1] -= 1;
@@ -2075,7 +2095,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
if (!parent)
return 1;
- left = read_node_slot(parent, pslot - 1);
+ left = btrfs_read_node_slot(parent, pslot - 1);
if (IS_ERR(left))
left = NULL;
@@ -2127,7 +2147,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(left);
free_extent_buffer(left);
}
- right = read_node_slot(parent, pslot + 1);
+ right = btrfs_read_node_slot(parent, pslot + 1);
if (IS_ERR(right))
right = NULL;
@@ -2355,32 +2375,6 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
}
/*
- * This releases any locks held in the path starting at level and
- * going all the way up to the root.
- *
- * btrfs_search_slot will keep the lock held on higher nodes in a few
- * corner cases, such as COW of the block at slot zero in the node. This
- * ignores those rules, and it should only be called when there are no
- * more updates to be done higher up in the tree.
- */
-noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
-{
- int i;
-
- if (path->keep_locks)
- return;
-
- for (i = level; i < BTRFS_MAX_LEVEL; i++) {
- if (!path->nodes[i])
- continue;
- if (!path->locks[i])
- continue;
- btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
- path->locks[i] = 0;
- }
-}
-
-/*
* helper function for btrfs_search_slot. The goal is to find a block
* in cache without setting the path to blocking. If we find the block
* we return zero and the path is unchanged.
@@ -2628,7 +2622,7 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
} else {
b = root->commit_root;
- extent_buffer_get(b);
+ atomic_inc(&b->refs);
}
level = btrfs_header_level(b);
/*
@@ -2761,12 +2755,10 @@ again:
}
while (b) {
+ int dec = 0;
+
level = btrfs_header_level(b);
- /*
- * setup the path here so we can release it under lock
- * contention with the cow code
- */
if (cow) {
bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
@@ -2837,75 +2829,7 @@ cow_done:
if (ret < 0)
goto done;
- if (level != 0) {
- int dec = 0;
- if (ret && slot > 0) {
- dec = 1;
- slot -= 1;
- }
- p->slots[level] = slot;
- err = setup_nodes_for_search(trans, root, p, b, level,
- ins_len, &write_lock_level);
- if (err == -EAGAIN)
- goto again;
- if (err) {
- ret = err;
- goto done;
- }
- b = p->nodes[level];
- slot = p->slots[level];
-
- /*
- * slot 0 is special, if we change the key
- * we have to update the parent pointer
- * which means we must have a write lock
- * on the parent
- */
- if (slot == 0 && ins_len &&
- write_lock_level < level + 1) {
- write_lock_level = level + 1;
- btrfs_release_path(p);
- goto again;
- }
-
- unlock_up(p, level, lowest_unlock,
- min_write_lock_level, &write_lock_level);
-
- if (level == lowest_level) {
- if (dec)
- p->slots[level]++;
- goto done;
- }
-
- err = read_block_for_search(root, p, &b, level,
- slot, key);
- if (err == -EAGAIN)
- goto again;
- if (err) {
- ret = err;
- goto done;
- }
-
- if (!p->skip_locking) {
- level = btrfs_header_level(b);
- if (level <= write_lock_level) {
- err = btrfs_try_tree_write_lock(b);
- if (!err) {
- btrfs_set_path_blocking(p);
- btrfs_tree_lock(b);
- }
- p->locks[level] = BTRFS_WRITE_LOCK;
- } else {
- err = btrfs_tree_read_lock_atomic(b);
- if (!err) {
- btrfs_set_path_blocking(p);
- btrfs_tree_read_lock(b);
- }
- p->locks[level] = BTRFS_READ_LOCK;
- }
- p->nodes[level] = b;
- }
- } else {
+ if (level == 0) {
p->slots[level] = slot;
if (ins_len > 0 &&
btrfs_leaf_free_space(b) < ins_len) {
@@ -2930,6 +2854,67 @@ cow_done:
min_write_lock_level, NULL);
goto done;
}
+ if (ret && slot > 0) {
+ dec = 1;
+ slot--;
+ }
+ p->slots[level] = slot;
+ err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
+ &write_lock_level);
+ if (err == -EAGAIN)
+ goto again;
+ if (err) {
+ ret = err;
+ goto done;
+ }
+ b = p->nodes[level];
+ slot = p->slots[level];
+
+ /*
+ * Slot 0 is special, if we change the key we have to update
+ * the parent pointer which means we must have a write lock on
+ * the parent
+ */
+ if (slot == 0 && ins_len && write_lock_level < level + 1) {
+ write_lock_level = level + 1;
+ btrfs_release_path(p);
+ goto again;
+ }
+
+ unlock_up(p, level, lowest_unlock, min_write_lock_level,
+ &write_lock_level);
+
+ if (level == lowest_level) {
+ if (dec)
+ p->slots[level]++;
+ goto done;
+ }
+
+ err = read_block_for_search(root, p, &b, level, slot, key);
+ if (err == -EAGAIN)
+ goto again;
+ if (err) {
+ ret = err;
+ goto done;
+ }
+
+ if (!p->skip_locking) {
+ level = btrfs_header_level(b);
+ if (level <= write_lock_level) {
+ if (!btrfs_try_tree_write_lock(b)) {
+ btrfs_set_path_blocking(p);
+ btrfs_tree_lock(b);
+ }
+ p->locks[level] = BTRFS_WRITE_LOCK;
+ } else {
+ if (!btrfs_tree_read_lock_atomic(b)) {
+ btrfs_set_path_blocking(p);
+ btrfs_tree_read_lock(b);
+ }
+ p->locks[level] = BTRFS_READ_LOCK;
+ }
+ p->nodes[level] = b;
+ }
}
ret = 1;
done:
@@ -2986,6 +2971,8 @@ again:
p->locks[level] = BTRFS_READ_LOCK;
while (b) {
+ int dec = 0;
+
level = btrfs_header_level(b);
p->nodes[level] = b;
@@ -3006,48 +2993,45 @@ again:
if (ret < 0)
goto done;
- if (level != 0) {
- int dec = 0;
- if (ret && slot > 0) {
- dec = 1;
- slot -= 1;
- }
+ if (level == 0) {
p->slots[level] = slot;
unlock_up(p, level, lowest_unlock, 0, NULL);
+ goto done;
+ }
- if (level == lowest_level) {
- if (dec)
- p->slots[level]++;
- goto done;
- }
+ if (ret && slot > 0) {
+ dec = 1;
+ slot--;
+ }
+ p->slots[level] = slot;
+ unlock_up(p, level, lowest_unlock, 0, NULL);
- err = read_block_for_search(root, p, &b, level,
- slot, key);
- if (err == -EAGAIN)
- goto again;
- if (err) {
- ret = err;
- goto done;
- }
+ if (level == lowest_level) {
+ if (dec)
+ p->slots[level]++;
+ goto done;
+ }
- level = btrfs_header_level(b);
- err = btrfs_tree_read_lock_atomic(b);
- if (!err) {
- btrfs_set_path_blocking(p);
- btrfs_tree_read_lock(b);
- }
- b = tree_mod_log_rewind(fs_info, p, b, time_seq);
- if (!b) {
- ret = -ENOMEM;
- goto done;
- }
- p->locks[level] = BTRFS_READ_LOCK;
- p->nodes[level] = b;
- } else {
- p->slots[level] = slot;
- unlock_up(p, level, lowest_unlock, 0, NULL);
+ err = read_block_for_search(root, p, &b, level, slot, key);
+ if (err == -EAGAIN)
+ goto again;
+ if (err) {
+ ret = err;
+ goto done;
+ }
+
+ level = btrfs_header_level(b);
+ if (!btrfs_tree_read_lock_atomic(b)) {
+ btrfs_set_path_blocking(p);
+ btrfs_tree_read_lock(b);
+ }
+ b = tree_mod_log_rewind(fs_info, p, b, time_seq);
+ if (!b) {
+ ret = -ENOMEM;
goto done;
}
+ p->locks[level] = BTRFS_READ_LOCK;
+ p->nodes[level] = b;
}
ret = 1;
done:
@@ -3412,7 +3396,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
free_extent_buffer(old);
add_root_to_dirty_list(root);
- extent_buffer_get(c);
+ atomic_inc(&c->refs);
path->nodes[level] = c;
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
path->slots[level] = 0;
@@ -3572,7 +3556,7 @@ static int leaf_space_used(struct extent_buffer *l, int start, int nr)
if (!nr)
return 0;
- btrfs_init_map_token(&token);
+ btrfs_init_map_token(&token, l);
start_item = btrfs_item_nr(start);
end_item = btrfs_item_nr(end);
data_len = btrfs_token_item_offset(l, start_item, &token) +
@@ -3630,8 +3614,6 @@ static noinline int __push_leaf_right(struct btrfs_path *path,
u32 data_end;
u32 this_item_size;
- btrfs_init_map_token(&token);
-
if (empty)
nr = 0;
else
@@ -3704,6 +3686,7 @@ static noinline int __push_leaf_right(struct btrfs_path *path,
push_items * sizeof(struct btrfs_item));
/* update the item pointers */
+ btrfs_init_map_token(&token, right);
right_nritems += push_items;
btrfs_set_header_nritems(right, right_nritems);
push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
@@ -3781,7 +3764,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_assert_tree_locked(path->nodes[1]);
- right = read_node_slot(upper, slot + 1);
+ right = btrfs_read_node_slot(upper, slot + 1);
/*
* slot + 1 is not valid or we fail to read the right node,
* no big deal, just return.
@@ -3858,8 +3841,6 @@ static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
u32 old_left_item_size;
struct btrfs_map_token token;
- btrfs_init_map_token(&token);
-
if (empty)
nr = min(right_nritems, max_slot);
else
@@ -3913,6 +3894,7 @@ static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
old_left_nritems = btrfs_header_nritems(left);
BUG_ON(old_left_nritems <= 0);
+ btrfs_init_map_token(&token, left);
old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
u32 ioff;
@@ -3944,6 +3926,8 @@ static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
(btrfs_header_nritems(right) - push_items) *
sizeof(struct btrfs_item));
}
+
+ btrfs_init_map_token(&token, right);
right_nritems -= push_items;
btrfs_set_header_nritems(right, right_nritems);
push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
@@ -4015,7 +3999,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_assert_tree_locked(path->nodes[1]);
- left = read_node_slot(path->nodes[1], slot - 1);
+ left = btrfs_read_node_slot(path->nodes[1], slot - 1);
/*
* slot - 1 is not valid or we fail to read the left node,
* no big deal, just return.
@@ -4074,8 +4058,6 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
struct btrfs_disk_key disk_key;
struct btrfs_map_token token;
- btrfs_init_map_token(&token);
-
nritems = nritems - mid;
btrfs_set_header_nritems(right, nritems);
data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l);
@@ -4091,6 +4073,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
+ btrfs_init_map_token(&token, right);
for (i = 0; i < nritems; i++) {
struct btrfs_item *item = btrfs_item_nr(i);
u32 ioff;
@@ -4574,8 +4557,6 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
int i;
struct btrfs_map_token token;
- btrfs_init_map_token(&token);
-
leaf = path->nodes[0];
slot = path->slots[0];
@@ -4597,6 +4578,7 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
* item0..itemN ... dataN.offset..dataN.size .. data0.size
*/
/* first correct the data pointers */
+ btrfs_init_map_token(&token, leaf);
for (i = slot; i < nritems; i++) {
u32 ioff;
item = btrfs_item_nr(i);
@@ -4671,8 +4653,6 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
int i;
struct btrfs_map_token token;
- btrfs_init_map_token(&token);
-
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
@@ -4697,6 +4677,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
* item0..itemN ... dataN.offset..dataN.size .. data0.size
*/
/* first correct the data pointers */
+ btrfs_init_map_token(&token, leaf);
for (i = slot; i < nritems; i++) {
u32 ioff;
item = btrfs_item_nr(i);
@@ -4748,8 +4729,6 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
}
btrfs_unlock_up_safe(path, 1);
- btrfs_init_map_token(&token);
-
leaf = path->nodes[0];
slot = path->slots[0];
@@ -4763,6 +4742,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
BUG();
}
+ btrfs_init_map_token(&token, leaf);
if (slot != nritems) {
unsigned int old_data = btrfs_item_end_nr(leaf, slot);
@@ -4949,7 +4929,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
root_sub_used(root, leaf->len);
- extent_buffer_get(leaf);
+ atomic_inc(&leaf->refs);
btrfs_free_tree_block(trans, root, leaf, 0, 1);
free_extent_buffer_stale(leaf);
}
@@ -4969,9 +4949,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int wret;
int i;
u32 nritems;
- struct btrfs_map_token token;
-
- btrfs_init_map_token(&token);
leaf = path->nodes[0];
last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
@@ -4983,12 +4960,14 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (slot + nr != nritems) {
int data_end = leaf_data_end(leaf);
+ struct btrfs_map_token token;
memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
data_end + dsize,
BTRFS_LEAF_DATA_OFFSET + data_end,
last_off - data_end);
+ btrfs_init_map_token(&token, leaf);
for (i = slot + nr; i < nritems; i++) {
u32 ioff;
@@ -5031,7 +5010,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
* for possible call to del_ptr below
*/
slot = path->slots[1];
- extent_buffer_get(leaf);
+ atomic_inc(&leaf->refs);
btrfs_set_path_blocking(path);
wret = push_leaf_left(trans, root, path, 1, 1,
@@ -5222,7 +5201,7 @@ find_next_key:
goto out;
}
btrfs_set_path_blocking(path);
- cur = read_node_slot(cur, slot);
+ cur = btrfs_read_node_slot(cur, slot);
if (IS_ERR(cur)) {
ret = PTR_ERR(cur);
goto out;
@@ -5244,368 +5223,6 @@ out:
return ret;
}
-static int tree_move_down(struct btrfs_path *path, int *level)
-{
- struct extent_buffer *eb;
-
- BUG_ON(*level == 0);
- eb = read_node_slot(path->nodes[*level], path->slots[*level]);
- if (IS_ERR(eb))
- return PTR_ERR(eb);
-
- path->nodes[*level - 1] = eb;
- path->slots[*level - 1] = 0;
- (*level)--;
- return 0;
-}
-
-static int tree_move_next_or_upnext(struct btrfs_path *path,
- int *level, int root_level)
-{
- int ret = 0;
- int nritems;
- nritems = btrfs_header_nritems(path->nodes[*level]);
-
- path->slots[*level]++;
-
- while (path->slots[*level] >= nritems) {
- if (*level == root_level)
- return -1;
-
- /* move upnext */
- path->slots[*level] = 0;
- free_extent_buffer(path->nodes[*level]);
- path->nodes[*level] = NULL;
- (*level)++;
- path->slots[*level]++;
-
- nritems = btrfs_header_nritems(path->nodes[*level]);
- ret = 1;
- }
- return ret;
-}
-
-/*
- * Returns 1 if it had to move up and next. 0 is returned if it moved only next
- * or down.
- */
-static int tree_advance(struct btrfs_path *path,
- int *level, int root_level,
- int allow_down,
- struct btrfs_key *key)
-{
- int ret;
-
- if (*level == 0 || !allow_down) {
- ret = tree_move_next_or_upnext(path, level, root_level);
- } else {
- ret = tree_move_down(path, level);
- }
- if (ret >= 0) {
- if (*level == 0)
- btrfs_item_key_to_cpu(path->nodes[*level], key,
- path->slots[*level]);
- else
- btrfs_node_key_to_cpu(path->nodes[*level], key,
- path->slots[*level]);
- }
- return ret;
-}
-
-static int tree_compare_item(struct btrfs_path *left_path,
- struct btrfs_path *right_path,
- char *tmp_buf)
-{
- int cmp;
- int len1, len2;
- unsigned long off1, off2;
-
- len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
- len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
- if (len1 != len2)
- return 1;
-
- off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
- off2 = btrfs_item_ptr_offset(right_path->nodes[0],
- right_path->slots[0]);
-
- read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
-
- cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
- if (cmp)
- return 1;
- return 0;
-}
-
-#define ADVANCE 1
-#define ADVANCE_ONLY_NEXT -1
-
-/*
- * This function compares two trees and calls the provided callback for
- * every changed/new/deleted item it finds.
- * If shared tree blocks are encountered, whole subtrees are skipped, making
- * the compare pretty fast on snapshotted subvolumes.
- *
- * This currently works on commit roots only. As commit roots are read only,
- * we don't do any locking. The commit roots are protected with transactions.
- * Transactions are ended and rejoined when a commit is tried in between.
- *
- * This function checks for modifications done to the trees while comparing.
- * If it detects a change, it aborts immediately.
- */
-int btrfs_compare_trees(struct btrfs_root *left_root,
- struct btrfs_root *right_root,
- btrfs_changed_cb_t changed_cb, void *ctx)
-{
- struct btrfs_fs_info *fs_info = left_root->fs_info;
- int ret;
- int cmp;
- struct btrfs_path *left_path = NULL;
- struct btrfs_path *right_path = NULL;
- struct btrfs_key left_key;
- struct btrfs_key right_key;
- char *tmp_buf = NULL;
- int left_root_level;
- int right_root_level;
- int left_level;
- int right_level;
- int left_end_reached;
- int right_end_reached;
- int advance_left;
- int advance_right;
- u64 left_blockptr;
- u64 right_blockptr;
- u64 left_gen;
- u64 right_gen;
-
- left_path = btrfs_alloc_path();
- if (!left_path) {
- ret = -ENOMEM;
- goto out;
- }
- right_path = btrfs_alloc_path();
- if (!right_path) {
- ret = -ENOMEM;
- goto out;
- }
-
- tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
- if (!tmp_buf) {
- ret = -ENOMEM;
- goto out;
- }
-
- left_path->search_commit_root = 1;
- left_path->skip_locking = 1;
- right_path->search_commit_root = 1;
- right_path->skip_locking = 1;
-
- /*
- * Strategy: Go to the first items of both trees. Then do
- *
- * If both trees are at level 0
- * Compare keys of current items
- * If left < right treat left item as new, advance left tree
- * and repeat
- * If left > right treat right item as deleted, advance right tree
- * and repeat
- * If left == right do deep compare of items, treat as changed if
- * needed, advance both trees and repeat
- * If both trees are at the same level but not at level 0
- * Compare keys of current nodes/leafs
- * If left < right advance left tree and repeat
- * If left > right advance right tree and repeat
- * If left == right compare blockptrs of the next nodes/leafs
- * If they match advance both trees but stay at the same level
- * and repeat
- * If they don't match advance both trees while allowing to go
- * deeper and repeat
- * If tree levels are different
- * Advance the tree that needs it and repeat
- *
- * Advancing a tree means:
- * If we are at level 0, try to go to the next slot. If that's not
- * possible, go one level up and repeat. Stop when we found a level
- * where we could go to the next slot. We may at this point be on a
- * node or a leaf.
- *
- * If we are not at level 0 and not on shared tree blocks, go one
- * level deeper.
- *
- * If we are not at level 0 and on shared tree blocks, go one slot to
- * the right if possible or go up and right.
- */
-
- down_read(&fs_info->commit_root_sem);
- left_level = btrfs_header_level(left_root->commit_root);
- left_root_level = left_level;
- left_path->nodes[left_level] =
- btrfs_clone_extent_buffer(left_root->commit_root);
- if (!left_path->nodes[left_level]) {
- up_read(&fs_info->commit_root_sem);
- ret = -ENOMEM;
- goto out;
- }
-
- right_level = btrfs_header_level(right_root->commit_root);
- right_root_level = right_level;
- right_path->nodes[right_level] =
- btrfs_clone_extent_buffer(right_root->commit_root);
- if (!right_path->nodes[right_level]) {
- up_read(&fs_info->commit_root_sem);
- ret = -ENOMEM;
- goto out;
- }
- up_read(&fs_info->commit_root_sem);
-
- if (left_level == 0)
- btrfs_item_key_to_cpu(left_path->nodes[left_level],
- &left_key, left_path->slots[left_level]);
- else
- btrfs_node_key_to_cpu(left_path->nodes[left_level],
- &left_key, left_path->slots[left_level]);
- if (right_level == 0)
- btrfs_item_key_to_cpu(right_path->nodes[right_level],
- &right_key, right_path->slots[right_level]);
- else
- btrfs_node_key_to_cpu(right_path->nodes[right_level],
- &right_key, right_path->slots[right_level]);
-
- left_end_reached = right_end_reached = 0;
- advance_left = advance_right = 0;
-
- while (1) {
- if (advance_left && !left_end_reached) {
- ret = tree_advance(left_path, &left_level,
- left_root_level,
- advance_left != ADVANCE_ONLY_NEXT,
- &left_key);
- if (ret == -1)
- left_end_reached = ADVANCE;
- else if (ret < 0)
- goto out;
- advance_left = 0;
- }
- if (advance_right && !right_end_reached) {
- ret = tree_advance(right_path, &right_level,
- right_root_level,
- advance_right != ADVANCE_ONLY_NEXT,
- &right_key);
- if (ret == -1)
- right_end_reached = ADVANCE;
- else if (ret < 0)
- goto out;
- advance_right = 0;
- }
-
- if (left_end_reached && right_end_reached) {
- ret = 0;
- goto out;
- } else if (left_end_reached) {
- if (right_level == 0) {
- ret = changed_cb(left_path, right_path,
- &right_key,
- BTRFS_COMPARE_TREE_DELETED,
- ctx);
- if (ret < 0)
- goto out;
- }
- advance_right = ADVANCE;
- continue;
- } else if (right_end_reached) {
- if (left_level == 0) {
- ret = changed_cb(left_path, right_path,
- &left_key,
- BTRFS_COMPARE_TREE_NEW,
- ctx);
- if (ret < 0)
- goto out;
- }
- advance_left = ADVANCE;
- continue;
- }
-
- if (left_level == 0 && right_level == 0) {
- cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
- if (cmp < 0) {
- ret = changed_cb(left_path, right_path,
- &left_key,
- BTRFS_COMPARE_TREE_NEW,
- ctx);
- if (ret < 0)
- goto out;
- advance_left = ADVANCE;
- } else if (cmp > 0) {
- ret = changed_cb(left_path, right_path,
- &right_key,
- BTRFS_COMPARE_TREE_DELETED,
- ctx);
- if (ret < 0)
- goto out;
- advance_right = ADVANCE;
- } else {
- enum btrfs_compare_tree_result result;
-
- WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
- ret = tree_compare_item(left_path, right_path,
- tmp_buf);
- if (ret)
- result = BTRFS_COMPARE_TREE_CHANGED;
- else
- result = BTRFS_COMPARE_TREE_SAME;
- ret = changed_cb(left_path, right_path,
- &left_key, result, ctx);
- if (ret < 0)
- goto out;
- advance_left = ADVANCE;
- advance_right = ADVANCE;
- }
- } else if (left_level == right_level) {
- cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
- if (cmp < 0) {
- advance_left = ADVANCE;
- } else if (cmp > 0) {
- advance_right = ADVANCE;
- } else {
- left_blockptr = btrfs_node_blockptr(
- left_path->nodes[left_level],
- left_path->slots[left_level]);
- right_blockptr = btrfs_node_blockptr(
- right_path->nodes[right_level],
- right_path->slots[right_level]);
- left_gen = btrfs_node_ptr_generation(
- left_path->nodes[left_level],
- left_path->slots[left_level]);
- right_gen = btrfs_node_ptr_generation(
- right_path->nodes[right_level],
- right_path->slots[right_level]);
- if (left_blockptr == right_blockptr &&
- left_gen == right_gen) {
- /*
- * As we're on a shared block, don't
- * allow to go deeper.
- */
- advance_left = ADVANCE_ONLY_NEXT;
- advance_right = ADVANCE_ONLY_NEXT;
- } else {
- advance_left = ADVANCE;
- advance_right = ADVANCE;
- }
- }
- } else if (left_level < right_level) {
- advance_right = ADVANCE;
- } else {
- advance_left = ADVANCE;
- }
- }
-
-out:
- btrfs_free_path(left_path);
- btrfs_free_path(right_path);
- kvfree(tmp_buf);
- return ret;
-}
-
/*
* this is similar to btrfs_next_leaf, but does not try to preserve
* and fixup the path. It looks for and returns the next key in the
@@ -5623,7 +5240,7 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
int slot;
struct extent_buffer *c;
- WARN_ON(!path->keep_locks);
+ WARN_ON(!path->keep_locks && !path->skip_locking);
while (level < BTRFS_MAX_LEVEL) {
if (!path->nodes[level])
return 1;
@@ -5639,7 +5256,7 @@ next:
!path->nodes[level + 1])
return 1;
- if (path->locks[level + 1]) {
+ if (path->locks[level + 1] || path->skip_locking) {
level++;
continue;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 299e11e6c554..b2e8fd8a8e59 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -16,7 +16,6 @@
#include <linux/backing-dev.h>
#include <linux/wait.h>
#include <linux/slab.h>
-#include <linux/kobject.h>
#include <trace/events/btrfs.h>
#include <asm/kmap_types.h>
#include <asm/unaligned.h>
@@ -29,6 +28,7 @@
#include <linux/dynamic_debug.h>
#include <linux/refcount.h>
#include <linux/crc32c.h>
+#include "extent-io-tree.h"
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"
@@ -39,10 +39,12 @@ struct btrfs_transaction;
struct btrfs_pending_snapshot;
struct btrfs_delayed_ref_root;
struct btrfs_space_info;
+struct btrfs_block_group;
extern struct kmem_cache *btrfs_trans_handle_cachep;
extern struct kmem_cache *btrfs_bit_radix_cachep;
extern struct kmem_cache *btrfs_path_cachep;
extern struct kmem_cache *btrfs_free_space_cachep;
+extern struct kmem_cache *btrfs_free_space_bitmap_cachep;
struct btrfs_ordered_sum;
struct btrfs_ref;
@@ -55,9 +57,9 @@ struct btrfs_ref;
* filesystem data as well that can be used to read data in order to repair
* read errors on other disks.
*
- * Current value is derived from RAID1 with 2 copies.
+ * Current value is derived from RAID1C4 with 4 copies.
*/
-#define BTRFS_MAX_MIRRORS (2 + 1)
+#define BTRFS_MAX_MIRRORS (4 + 1)
#define BTRFS_MAX_LEVEL 8
@@ -82,10 +84,6 @@ struct btrfs_ref;
*/
#define BTRFS_LINK_MAX 65535U
-/* four bytes for CRC32 */
-static const int btrfs_csum_sizes[] = { 4 };
-static const char *btrfs_csum_names[] = { "crc32c" };
-
#define BTRFS_EMPTY_DIR_SIZE 0
/* ioprio of readahead is set to idle */
@@ -294,7 +292,8 @@ struct btrfs_super_block {
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
- BTRFS_FEATURE_INCOMPAT_METADATA_UUID)
+ BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
+ BTRFS_FEATURE_INCOMPAT_RAID1C34)
#define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
(BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
@@ -397,13 +396,6 @@ struct btrfs_dev_replace {
wait_queue_head_t replace_wait;
};
-/* For raid type sysfs entries */
-struct raid_kobject {
- u64 flags;
- struct kobject kobj;
- struct list_head list;
-};
-
/*
* free clusters are used to claim free space in relatively large chunks,
* allowing us to do less seeky writes. They are used for all metadata
@@ -423,7 +415,7 @@ struct btrfs_free_cluster {
/* We did a full search and couldn't create a cluster */
bool fragmented;
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
/*
* when a cluster is allocated from a block group, we put the
* cluster onto a list in the block group so that it can
@@ -440,40 +432,6 @@ enum btrfs_caching_type {
BTRFS_CACHE_ERROR,
};
-enum btrfs_disk_cache_state {
- BTRFS_DC_WRITTEN,
- BTRFS_DC_ERROR,
- BTRFS_DC_CLEAR,
- BTRFS_DC_SETUP,
-};
-
-struct btrfs_caching_control {
- struct list_head list;
- struct mutex mutex;
- wait_queue_head_t wait;
- struct btrfs_work work;
- struct btrfs_block_group_cache *block_group;
- u64 progress;
- refcount_t count;
-};
-
-/* Once caching_thread() finds this much free space, it will wake up waiters. */
-#define CACHING_CTL_WAKE_UP SZ_2M
-
-struct btrfs_io_ctl {
- void *cur, *orig;
- struct page *page;
- struct page **pages;
- struct btrfs_fs_info *fs_info;
- struct inode *inode;
- unsigned long size;
- int index;
- int num_pages;
- int entries;
- int bitmaps;
- unsigned check_crcs:1;
-};
-
/*
* Tree to record all locked full stripes of a RAID5/6 block group
*/
@@ -482,120 +440,6 @@ struct btrfs_full_stripe_locks_tree {
struct mutex lock;
};
-struct btrfs_block_group_cache {
- struct btrfs_key key;
- struct btrfs_block_group_item item;
- struct btrfs_fs_info *fs_info;
- struct inode *inode;
- spinlock_t lock;
- u64 pinned;
- u64 reserved;
- u64 delalloc_bytes;
- u64 bytes_super;
- u64 flags;
- u64 cache_generation;
-
- /*
- * If the free space extent count exceeds this number, convert the block
- * group to bitmaps.
- */
- u32 bitmap_high_thresh;
-
- /*
- * If the free space extent count drops below this number, convert the
- * block group back to extents.
- */
- u32 bitmap_low_thresh;
-
- /*
- * It is just used for the delayed data space allocation because
- * only the data space allocation and the relative metadata update
- * can be done cross the transaction.
- */
- struct rw_semaphore data_rwsem;
-
- /* for raid56, this is a full stripe, without parity */
- unsigned long full_stripe_len;
-
- unsigned int ro;
- unsigned int iref:1;
- unsigned int has_caching_ctl:1;
- unsigned int removed:1;
-
- int disk_cache_state;
-
- /* cache tracking stuff */
- int cached;
- struct btrfs_caching_control *caching_ctl;
- u64 last_byte_to_unpin;
-
- struct btrfs_space_info *space_info;
-
- /* free space cache stuff */
- struct btrfs_free_space_ctl *free_space_ctl;
-
- /* block group cache stuff */
- struct rb_node cache_node;
-
- /* for block groups in the same raid type */
- struct list_head list;
-
- /* usage count */
- atomic_t count;
-
- /* List of struct btrfs_free_clusters for this block group.
- * Today it will only have one thing on it, but that may change
- */
- struct list_head cluster_list;
-
- /* For delayed block group creation or deletion of empty block groups */
- struct list_head bg_list;
-
- /* For read-only block groups */
- struct list_head ro_list;
-
- atomic_t trimming;
-
- /* For dirty block groups */
- struct list_head dirty_list;
- struct list_head io_list;
-
- struct btrfs_io_ctl io_ctl;
-
- /*
- * Incremented when doing extent allocations and holding a read lock
- * on the space_info's groups_sem semaphore.
- * Decremented when an ordered extent that represents an IO against this
- * block group's range is created (after it's added to its inode's
- * root's list of ordered extents) or immediately after the allocation
- * if it's a metadata extent or fallocate extent (for these cases we
- * don't create ordered extents).
- */
- atomic_t reservations;
-
- /*
- * Incremented while holding the spinlock *lock* by a task checking if
- * it can perform a nocow write (incremented if the value for the *ro*
- * field is 0). Decremented by such tasks once they create an ordered
- * extent or before that if some error happens before reaching that step.
- * This is to prevent races between block group relocation and nocow
- * writes through direct IO.
- */
- atomic_t nocow_writers;
-
- /* Lock for free space tree operations. */
- struct mutex free_space_lock;
-
- /*
- * Does the block group need to be added to the free space tree?
- * Protected by free_space_lock.
- */
- int needs_free_space;
-
- /* Record locked full stripes for RAID5/6 block group */
- struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
-};
-
/* delayed seq elem */
struct seq_list {
struct list_head list;
@@ -611,22 +455,6 @@ enum btrfs_orphan_cleanup_state {
ORPHAN_CLEANUP_DONE = 2,
};
-/* used by the raid56 code to lock stripes for read/modify/write */
-struct btrfs_stripe_hash {
- struct list_head hash_list;
- spinlock_t lock;
-};
-
-/* used by the raid56 code to lock stripes for read/modify/write */
-struct btrfs_stripe_hash_table {
- struct list_head stripe_cache;
- spinlock_t cache_lock;
- int cache_size;
- struct btrfs_stripe_hash table[];
-};
-
-#define BTRFS_STRIPE_HASH_TABLE_BITS 11
-
void btrfs_init_async_reclaim_work(struct work_struct *work);
/* fs_info */
@@ -650,8 +478,8 @@ struct btrfs_swapfile_pin {
void *ptr;
struct inode *inode;
/*
- * If true, ptr points to a struct btrfs_block_group_cache. Otherwise,
- * ptr points to a struct btrfs_device.
+ * If true, ptr points to a struct btrfs_block_group. Otherwise, ptr
+ * points to a struct btrfs_device.
*/
bool is_block_group;
};
@@ -896,7 +724,6 @@ struct btrfs_fs_info {
struct btrfs_workqueue *endio_meta_write_workers;
struct btrfs_workqueue *endio_write_workers;
struct btrfs_workqueue *endio_freespace_worker;
- struct btrfs_workqueue *submit_workers;
struct btrfs_workqueue *caching_workers;
struct btrfs_workqueue *readahead_workers;
@@ -908,15 +735,11 @@ struct btrfs_fs_info {
struct btrfs_workqueue *fixup_workers;
struct btrfs_workqueue *delayed_workers;
- /* the extent workers do delayed refs on the extent allocation tree */
- struct btrfs_workqueue *extent_workers;
struct task_struct *transaction_kthread;
struct task_struct *cleaner_kthread;
u32 thread_pool_size;
struct kobject *space_info_kobj;
- struct list_head pending_raid_kobjs;
- spinlock_t pending_raid_kobjs_lock; /* uncontended */
u64 total_pinned;
@@ -1282,6 +1105,16 @@ struct btrfs_root {
#endif
};
+struct btrfs_clone_extent_info {
+ u64 disk_offset;
+ u64 disk_len;
+ u64 data_offset;
+ u64 data_len;
+ u64 file_offset;
+ char *extent_buf;
+ u32 item_size;
+};
+
struct btrfs_file_private {
void *filldir_buf;
};
@@ -1380,19 +1213,6 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
btrfs_clear_opt(fs_info->mount_opt, opt); \
}
-#ifdef CONFIG_BTRFS_DEBUG
-static inline int
-btrfs_should_fragment_free_space(struct btrfs_block_group_cache *block_group)
-{
- struct btrfs_fs_info *fs_info = block_group->fs_info;
-
- return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
- block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
- (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
- block_group->flags & BTRFS_BLOCK_GROUP_DATA);
-}
-#endif
-
/*
* Requests for changes that need to be done during transaction commit.
*
@@ -1478,8 +1298,10 @@ struct btrfs_map_token {
#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
((bytes) >> (fs_info)->sb->s_blocksize_bits)
-static inline void btrfs_init_map_token (struct btrfs_map_token *token)
+static inline void btrfs_init_map_token(struct btrfs_map_token *token,
+ struct extent_buffer *eb)
{
+ token->eb = eb;
token->kaddr = NULL;
}
@@ -1510,17 +1332,10 @@ u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \
void btrfs_set_token_##bits(struct extent_buffer *eb, const void *ptr, \
unsigned long off, u##bits val, \
struct btrfs_map_token *token); \
-static inline u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
- const void *ptr, \
- unsigned long off) \
-{ \
- return btrfs_get_token_##bits(eb, ptr, off, NULL); \
-} \
-static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr,\
- unsigned long off, u##bits val) \
-{ \
- btrfs_set_token_##bits(eb, ptr, off, val, NULL); \
-}
+u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
+ const void *ptr, unsigned long off); \
+void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \
+ unsigned long off, u##bits val);
DECLARE_BTRFS_SETGET_BITS(8)
DECLARE_BTRFS_SETGET_BITS(16)
@@ -1705,18 +1520,18 @@ static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb,
}
/* struct btrfs_block_group_item */
-BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item,
+BTRFS_SETGET_STACK_FUNCS(stack_block_group_used, struct btrfs_block_group_item,
used, 64);
-BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item,
+BTRFS_SETGET_FUNCS(block_group_used, struct btrfs_block_group_item,
used, 64);
-BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid,
+BTRFS_SETGET_STACK_FUNCS(stack_block_group_chunk_objectid,
struct btrfs_block_group_item, chunk_objectid, 64);
-BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid,
+BTRFS_SETGET_FUNCS(block_group_chunk_objectid,
struct btrfs_block_group_item, chunk_objectid, 64);
-BTRFS_SETGET_FUNCS(disk_block_group_flags,
+BTRFS_SETGET_FUNCS(block_group_flags,
struct btrfs_block_group_item, flags, 64);
-BTRFS_SETGET_STACK_FUNCS(block_group_flags,
+BTRFS_SETGET_STACK_FUNCS(stack_block_group_flags,
struct btrfs_block_group_item, flags, 64);
/* struct btrfs_free_space_info */
@@ -2062,16 +1877,6 @@ static inline void btrfs_dir_item_key_to_cpu(const struct extent_buffer *eb,
btrfs_disk_key_to_cpu(key, &disk_key);
}
-static inline u8 btrfs_key_type(const struct btrfs_key *key)
-{
- return key->type;
-}
-
-static inline void btrfs_set_key_type(struct btrfs_key *key, u8 val)
-{
- key->type = val;
-}
-
/* struct btrfs_header */
BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64);
BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header,
@@ -2357,20 +2162,11 @@ BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64);
BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block,
uuid_tree_generation, 64);
-static inline int btrfs_super_csum_size(const struct btrfs_super_block *s)
-{
- u16 t = btrfs_super_csum_type(s);
- /*
- * csum type is validated at mount time
- */
- return btrfs_csum_sizes[t];
-}
+int btrfs_super_csum_size(const struct btrfs_super_block *s);
+const char *btrfs_super_csum_name(u16 csum_type);
+const char *btrfs_super_csum_driver(u16 csum_type);
+size_t __const btrfs_get_num_csums(void);
-static inline const char *btrfs_super_csum_name(u16 csum_type)
-{
- /* csum type is validated at mount time */
- return btrfs_csum_names[csum_type];
-}
/*
* The leaf data grows from end-to-front in the node.
@@ -2443,30 +2239,6 @@ static inline u32 btrfs_file_extent_inline_item_len(
return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START;
}
-/* btrfs_dev_stats_item */
-static inline u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
- const struct btrfs_dev_stats_item *ptr,
- int index)
-{
- u64 val;
-
- read_extent_buffer(eb, &val,
- offsetof(struct btrfs_dev_stats_item, values) +
- ((unsigned long)ptr) + (index * sizeof(u64)),
- sizeof(val));
- return val;
-}
-
-static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb,
- struct btrfs_dev_stats_item *ptr,
- int index, u64 val)
-{
- write_extent_buffer(eb, &val,
- offsetof(struct btrfs_dev_stats_item, values) +
- ((unsigned long)ptr) + (index * sizeof(u64)),
- sizeof(val));
-}
-
/* btrfs_qgroup_status_item */
BTRFS_SETGET_FUNCS(qgroup_status_generation, struct btrfs_qgroup_status_item,
generation, 64);
@@ -2603,32 +2375,33 @@ enum btrfs_inline_ref_type {
int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
struct btrfs_extent_inline_ref *iref,
enum btrfs_inline_ref_type is_data);
+u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes);
-static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
- unsigned num_items)
+/*
+ * Use this if we would be adding new items, as we could split nodes as we cow
+ * down the tree.
+ */
+static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info,
+ unsigned num_items)
{
return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
}
/*
- * Doing a truncate won't result in new nodes or leaves, just what we need for
- * COW.
+ * Doing a truncate or a modification won't result in new nodes or leaves, just
+ * what we need for COW.
*/
-static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
+static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info,
unsigned num_items)
{
return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
}
-void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
- const u64 start);
-void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
-bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
-void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
-void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
-void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
+int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
+ u64 start, u64 num_bytes);
+void btrfs_free_excluded_extents(struct btrfs_block_group *cache);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
unsigned long count);
void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
@@ -2645,11 +2418,6 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
int btrfs_exclude_logged_extents(struct extent_buffer *eb);
int btrfs_cross_ref_exist(struct btrfs_root *root,
u64 objectid, u64 offset, u64 bytenr);
-struct btrfs_block_group_cache *btrfs_lookup_block_group(
- struct btrfs_fs_info *info,
- u64 bytenr);
-void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
-void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 parent, u64 root_objectid,
@@ -2688,29 +2456,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans);
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref);
-int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
-int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
-int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr);
-int btrfs_free_block_groups(struct btrfs_fs_info *info);
-int btrfs_read_block_groups(struct btrfs_fs_info *info);
-int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
-int btrfs_make_block_group(struct btrfs_trans_handle *trans,
- u64 bytes_used, u64 type, u64 chunk_offset,
- u64 size);
-void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info);
-struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
- struct btrfs_fs_info *fs_info,
- const u64 chunk_offset);
-int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
- u64 group_start, struct extent_map *em);
-void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
-void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
-void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
-void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
-u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info);
-u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info);
-u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info);
+void btrfs_get_block_group_trimming(struct btrfs_block_group *cache);
+void btrfs_put_block_group_trimming(struct btrfs_block_group *cache);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
enum btrfs_reserve_flush_enum {
@@ -2721,6 +2469,7 @@ enum btrfs_reserve_flush_enum {
* case, use FLUSH LIMIT
*/
BTRFS_RESERVE_FLUSH_LIMIT,
+ BTRFS_RESERVE_FLUSH_EVICT,
BTRFS_RESERVE_FLUSH_ALL,
};
@@ -2733,49 +2482,23 @@ enum btrfs_flush_state {
FLUSH_DELALLOC_WAIT = 6,
ALLOC_CHUNK = 7,
ALLOC_CHUNK_FORCE = 8,
- COMMIT_TRANS = 9,
-};
-
-/*
- * control flags for do_chunk_alloc's force field
- * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
- * if we really need one.
- *
- * CHUNK_ALLOC_LIMITED means to only try and allocate one
- * if we have very few chunks already allocated. This is
- * used as part of the clustering code to help make sure
- * we have a good pool of storage to cluster in, without
- * filling the FS with empty chunks
- *
- * CHUNK_ALLOC_FORCE means it must try to allocate one
- *
- */
-enum btrfs_chunk_alloc_enum {
- CHUNK_ALLOC_NO_FORCE,
- CHUNK_ALLOC_LIMITED,
- CHUNK_ALLOC_FORCE,
+ RUN_DELAYED_IPUTS = 9,
+ COMMIT_TRANS = 10,
};
-int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
- enum btrfs_chunk_alloc_enum force);
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
struct btrfs_block_rsv *rsv,
int nitems, bool use_global_rsv);
void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
- bool qgroup_free);
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes);
-int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache);
-void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
-void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
u64 start, u64 end);
int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, u64 *actual_bytes);
-int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
@@ -2784,15 +2507,11 @@ int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
int btrfs_start_write_no_snapshotting(struct btrfs_root *root);
void btrfs_end_write_no_snapshotting(struct btrfs_root *root);
void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
-void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
-u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
- u64 start, u64 end);
-void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
int level, int *slot);
-int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
+int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
int btrfs_previous_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid,
int type);
@@ -2810,20 +2529,9 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
struct btrfs_path *path,
u64 min_trans);
-enum btrfs_compare_tree_result {
- BTRFS_COMPARE_TREE_NEW,
- BTRFS_COMPARE_TREE_DELETED,
- BTRFS_COMPARE_TREE_CHANGED,
- BTRFS_COMPARE_TREE_SAME,
-};
-typedef int (*btrfs_changed_cb_t)(struct btrfs_path *left_path,
- struct btrfs_path *right_path,
- struct btrfs_key *key,
- enum btrfs_compare_tree_result result,
- void *ctx);
-int btrfs_compare_trees(struct btrfs_root *left_root,
- struct btrfs_root *right_root,
- btrfs_changed_cb_t cb, void *ctx);
+struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
+ int slot);
+
int btrfs_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *buf,
struct extent_buffer *parent, int parent_slot,
@@ -2863,8 +2571,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
-void btrfs_set_path_blocking(struct btrfs_path *p);
-void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int slot, int nr);
@@ -3072,14 +2778,12 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
u64 inode_objectid, u64 ref_objectid, int ins_len,
int cow);
-int btrfs_find_name_in_backref(struct extent_buffer *leaf, int slot,
- const char *name,
- int name_len, struct btrfs_inode_ref **ref_ret);
-int btrfs_find_name_in_ext_backref(struct extent_buffer *leaf, int slot,
- u64 ref_objectid, const char *name,
- int name_len,
- struct btrfs_inode_extref **extref_ret);
-
+struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
+ int slot, const char *name,
+ int name_len);
+struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
+ struct extent_buffer *leaf, int slot, u64 ref_objectid,
+ const char *name, int name_len);
/* file-item.c */
struct btrfs_dio_private;
int btrfs_del_csums(struct btrfs_trans_handle *trans,
@@ -3141,7 +2845,7 @@ int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
unsigned int extra_bits,
- struct extent_state **cached_state, int dedupe);
+ struct extent_state **cached_state);
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root,
struct btrfs_root *parent_root,
@@ -3168,10 +2872,9 @@ int btrfs_drop_inode(struct inode *inode);
int __init btrfs_init_cachep(void);
void __cold btrfs_destroy_cachep(void);
struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
- struct btrfs_root *root, int *new,
- struct btrfs_path *path);
+ struct btrfs_root *root, struct btrfs_path *path);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
- struct btrfs_root *root, int *was_new);
+ struct btrfs_root *root);
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset,
u64 start, u64 end, int create);
@@ -3207,7 +2910,7 @@ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int btrfs_ioctl_get_supported_features(void __user *arg);
void btrfs_sync_inode_flags_to_i_flags(struct inode *inode);
-int btrfs_is_empty_uuid(u8 *uuid);
+int __pure btrfs_is_empty_uuid(u8 *uuid);
int btrfs_defrag_file(struct inode *inode, struct file *file,
struct btrfs_ioctl_defrag_range_args *range,
u64 newer_than, unsigned long max_pages);
@@ -3237,6 +2940,10 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
int btrfs_drop_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode, u64 start,
u64 end, int drop_cache);
+int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path,
+ const u64 start, const u64 end,
+ struct btrfs_clone_extent_info *clone_info,
+ struct btrfs_trans_handle **trans_out);
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, u64 start, u64 end);
int btrfs_release_file(struct inode *inode, struct file *file);
@@ -3252,12 +2959,6 @@ loff_t btrfs_remap_file_range(struct file *file_in, loff_t pos_in,
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
-/* sysfs.c */
-int __init btrfs_init_sysfs(void);
-void __cold btrfs_exit_sysfs(void);
-int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info);
-void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
-
/* super.c */
int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
unsigned long new_flags);
@@ -3443,7 +3144,7 @@ __cold
void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
unsigned int line, int errno, const char *fmt, ...);
-const char *btrfs_decode_error(int errno);
+const char * __attribute_const__ btrfs_decode_error(int errno);
__cold
void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
@@ -3726,26 +3427,4 @@ static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
}
#endif
-static inline void cond_wake_up(struct wait_queue_head *wq)
-{
- /*
- * This implies a full smp_mb barrier, see comments for
- * waitqueue_active why.
- */
- if (wq_has_sleeper(wq))
- wake_up(wq);
-}
-
-static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
-{
- /*
- * Special case for conditional wakeup where the barrier required for
- * waitqueue_active is implied by some of the preceding code. Eg. one
- * of such atomic operations (atomic_dec_and_return, ...), or a
- * unlock/lock sequence, etc.
- */
- if (waitqueue_active(wq))
- wake_up(wq);
-}
-
#endif
diff --git a/fs/btrfs/dedupe.h b/fs/btrfs/dedupe.h
deleted file mode 100644
index 90281a7a35a8..000000000000
--- a/fs/btrfs/dedupe.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2016 Fujitsu. All rights reserved.
- */
-
-#ifndef BTRFS_DEDUPE_H
-#define BTRFS_DEDUPE_H
-
-/* later in-band dedupe will expand this struct */
-struct btrfs_dedupe_hash;
-
-#endif
diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
index 17f7c0d38768..4cdac4d834f5 100644
--- a/fs/btrfs/delalloc-space.c
+++ b/fs/btrfs/delalloc-space.c
@@ -7,6 +7,7 @@
#include "space-info.h"
#include "transaction.h"
#include "qgroup.h"
+#include "block-group.h"
int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
{
@@ -129,8 +130,6 @@ commit_trans:
return -ENOSPC;
}
btrfs_space_info_update_bytes_may_use(fs_info, data_sinfo, bytes);
- trace_btrfs_space_reservation(fs_info, "space_info",
- data_sinfo->flags, bytes, 1);
spin_unlock(&data_sinfo->lock);
return 0;
@@ -182,8 +181,6 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
data_sinfo = fs_info->data_sinfo;
spin_lock(&data_sinfo->lock);
btrfs_space_info_update_bytes_may_use(fs_info, data_sinfo, -len);
- trace_btrfs_space_reservation(fs_info, "space_info",
- data_sinfo->flags, len, 0);
spin_unlock(&data_sinfo->lock);
}
@@ -254,13 +251,20 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
lockdep_assert_held(&inode->lock);
outstanding_extents = inode->outstanding_extents;
- if (outstanding_extents)
- reserve_size = btrfs_calc_trans_metadata_size(fs_info,
- outstanding_extents + 1);
+
+ /*
+ * Insert size for the number of outstanding extents, 1 normal size for
+ * updating the inode.
+ */
+ if (outstanding_extents) {
+ reserve_size = btrfs_calc_insert_metadata_size(fs_info,
+ outstanding_extents);
+ reserve_size += btrfs_calc_metadata_size(fs_info, 1);
+ }
csum_leaves = btrfs_csum_bytes_to_leaves(fs_info,
inode->csum_bytes);
- reserve_size += btrfs_calc_trans_metadata_size(fs_info,
- csum_leaves);
+ reserve_size += btrfs_calc_insert_metadata_size(fs_info,
+ csum_leaves);
/*
* For qgroup rsv, the calculation is very simple:
* account one nodesize for each outstanding extent
@@ -281,10 +285,16 @@ static void calc_inode_reservations(struct btrfs_fs_info *fs_info,
{
u64 nr_extents = count_max_extents(num_bytes);
u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, num_bytes);
+ u64 inode_update = btrfs_calc_metadata_size(fs_info, 1);
- /* We add one for the inode update at finish ordered time */
- *meta_reserve = btrfs_calc_trans_metadata_size(fs_info,
- nr_extents + csum_leaves + 1);
+ *meta_reserve = btrfs_calc_insert_metadata_size(fs_info,
+ nr_extents + csum_leaves);
+
+ /*
+ * finish_ordered_io has to update the inode, so add the space required
+ * for an inode update.
+ */
+ *meta_reserve += inode_update;
*qgroup_reserve = nr_extents * fs_info->nodesize;
}
@@ -297,7 +307,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
unsigned nr_extents;
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
int ret = 0;
- bool delalloc_lock = true;
/*
* If we are a free space inode we need to not flush since we will be in
@@ -310,7 +319,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
*/
if (btrfs_is_free_space_inode(inode)) {
flush = BTRFS_RESERVE_NO_FLUSH;
- delalloc_lock = false;
} else {
if (current->journal_info)
flush = BTRFS_RESERVE_FLUSH_LIMIT;
@@ -319,9 +327,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
schedule_timeout(1);
}
- if (delalloc_lock)
- mutex_lock(&inode->delalloc_mutex);
-
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
/*
@@ -338,10 +343,12 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
&qgroup_reserve);
ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true);
if (ret)
- goto out_fail;
+ return ret;
ret = btrfs_reserve_metadata_bytes(root, block_rsv, meta_reserve, flush);
- if (ret)
- goto out_qgroup;
+ if (ret) {
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
+ return ret;
+ }
/*
* Now we need to update our outstanding extents and csum bytes _first_
@@ -365,16 +372,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
block_rsv->qgroup_rsv_reserved += qgroup_reserve;
spin_unlock(&block_rsv->lock);
- if (delalloc_lock)
- mutex_unlock(&inode->delalloc_mutex);
return 0;
-out_qgroup:
- btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
-out_fail:
- btrfs_inode_rsv_release(inode, true);
- if (delalloc_lock)
- mutex_unlock(&inode->delalloc_mutex);
- return ret;
}
/**
@@ -408,7 +406,6 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
* btrfs_delalloc_release_extents - release our outstanding_extents
* @inode: the inode to balance the reservation for.
* @num_bytes: the number of bytes we originally reserved with
- * @qgroup_free: do we need to free qgroup meta reservation or convert them.
*
* When we reserve space we increase outstanding_extents for the extents we may
* add. Once we've set the range as delalloc or created our ordered extents we
@@ -416,8 +413,7 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
* temporarily tracked outstanding_extents. This _must_ be used in conjunction
* with btrfs_delalloc_reserve_metadata.
*/
-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
- bool qgroup_free)
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned num_extents;
@@ -431,7 +427,7 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
if (btrfs_is_testing(fs_info))
return;
- btrfs_inode_rsv_release(inode, qgroup_free);
+ btrfs_inode_rsv_release(inode, true);
}
/**
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 43fdb2992956..d3e15e1d4a91 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -6,11 +6,13 @@
#include <linux/slab.h>
#include <linux/iversion.h>
+#include "misc.h"
#include "delayed-inode.h"
#include "disk-io.h"
#include "transaction.h"
#include "ctree.h"
#include "qgroup.h"
+#include "locking.h"
#define BTRFS_DELAYED_WRITEBACK 512
#define BTRFS_DELAYED_BACKGROUND 128
@@ -474,6 +476,9 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
struct rb_root_cached *root;
struct btrfs_delayed_root *delayed_root;
+ /* Not associated with any delayed_node */
+ if (!delayed_item->delayed_node)
+ return;
delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
BUG_ON(!delayed_root);
@@ -555,7 +560,7 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
src_rsv = trans->block_rsv;
dst_rsv = &fs_info->delayed_block_rsv;
- num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
+ num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
/*
* Here we migrate space rsv from transaction rsv, since have already
@@ -609,7 +614,7 @@ static int btrfs_delayed_inode_reserve_metadata(
src_rsv = trans->block_rsv;
dst_rsv = &fs_info->delayed_block_rsv;
- num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
+ num_bytes = btrfs_calc_metadata_size(fs_info, 1);
/*
* btrfs_dirty_inode will update the inode under btrfs_join_transaction
@@ -1363,8 +1368,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
return -ENOMEM;
async_work->delayed_root = delayed_root;
- btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
- btrfs_async_run_delayed_root, NULL, NULL);
+ btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
+ NULL);
async_work->nr = nr;
btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
@@ -1525,7 +1530,12 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
* we have reserved enough space when we start a new transaction,
* so reserving metadata failure is impossible.
*/
- BUG_ON(ret);
+ if (ret < 0) {
+ btrfs_err(trans->fs_info,
+"metadata reservation failed for delayed dir item deltiona, should have been reserved");
+ btrfs_release_delayed_item(item);
+ goto end;
+ }
mutex_lock(&node->mutex);
ret = __btrfs_add_delayed_deletion_item(node, item);
@@ -1534,7 +1544,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
index, node->root->root_key.objectid,
node->inode_id, ret);
- BUG();
+ btrfs_delayed_item_release_metadata(dir->root, item);
+ btrfs_release_delayed_item(item);
}
mutex_unlock(&node->mutex);
end:
@@ -1939,12 +1950,19 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
}
inode_id = delayed_nodes[n - 1]->inode_id + 1;
-
- for (i = 0; i < n; i++)
- refcount_inc(&delayed_nodes[i]->refs);
+ for (i = 0; i < n; i++) {
+ /*
+ * Don't increase refs in case the node is dead and
+ * about to be removed from the tree in the loop below
+ */
+ if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
+ delayed_nodes[i] = NULL;
+ }
spin_unlock(&root->inode_lock);
for (i = 0; i < n; i++) {
+ if (!delayed_nodes[i])
+ continue;
__btrfs_kill_delayed_node(delayed_nodes[i]);
btrfs_release_delayed_node(delayed_nodes[i]);
}
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 9a91d1eb0af4..df3bd880061d 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -79,7 +79,7 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
{
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
- u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, nr);
+ u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
u64 released = 0;
released = __btrfs_block_rsv_release(fs_info, block_rsv, num_bytes,
@@ -105,8 +105,8 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
if (!trans->delayed_ref_updates)
return;
- num_bytes = btrfs_calc_trans_metadata_size(fs_info,
- trans->delayed_ref_updates);
+ num_bytes = btrfs_calc_insert_metadata_size(fs_info,
+ trans->delayed_ref_updates);
spin_lock(&delayed_rsv->lock);
delayed_rsv->size += num_bytes;
delayed_rsv->full = 0;
@@ -158,7 +158,7 @@ void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
0, num_bytes, 1);
if (to_free)
- btrfs_space_info_add_old_bytes(fs_info,
+ btrfs_space_info_free_bytes_may_use(fs_info,
delayed_refs_rsv->space_info, to_free);
}
@@ -174,7 +174,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush)
{
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
- u64 limit = btrfs_calc_trans_metadata_size(fs_info, 1);
+ u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
u64 num_bytes = 0;
int ret = -ENOSPC;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 6b2e9aa83ffa..f639dde2a679 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -9,6 +9,7 @@
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/math64.h>
+#include "misc.h"
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
@@ -56,7 +57,7 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
no_valid_dev_replace_entry_found:
ret = 0;
dev_replace->replace_state =
- BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED;
+ BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
dev_replace->cont_reading_from_srcdev_mode =
BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS;
dev_replace->time_started = 0;
@@ -985,7 +986,7 @@ static int btrfs_dev_replace_kthread(void *data)
return 0;
}
-int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
+int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
{
if (!dev_replace->is_valid)
return 0;
diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h
index 78c5d8f1adda..60b70dacc299 100644
--- a/fs/btrfs/dev-replace.h
+++ b/fs/btrfs/dev-replace.h
@@ -17,6 +17,6 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info);
void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info);
int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info);
-int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace);
+int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace);
#endif
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 41a2bd2e0c56..e0edfdc9c82b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -40,6 +40,7 @@
#include "compression.h"
#include "tree-checker.h"
#include "ref-verify.h"
+#include "block-group.h"
#define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
BTRFS_HEADER_FLAG_RELOC |\
@@ -204,7 +205,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset, u64 start, u64 len,
int create)
{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
int ret;
@@ -212,7 +212,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode,
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (em) {
- em->bdev = fs_info->fs_devices->latest_bdev;
read_unlock(&em_tree->lock);
goto out;
}
@@ -227,7 +226,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode,
em->len = (u64)-1;
em->block_len = (u64)-1;
em->block_start = 0;
- em->bdev = fs_info->fs_devices->latest_bdev;
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 0);
@@ -351,6 +349,9 @@ static bool btrfs_supported_super_csum(u16 csum_type)
{
switch (csum_type) {
case BTRFS_CSUM_TYPE_CRC32:
+ case BTRFS_CSUM_TYPE_XXHASH:
+ case BTRFS_CSUM_TYPE_SHA256:
+ case BTRFS_CSUM_TYPE_BLAKE2:
return true;
default:
return false;
@@ -416,6 +417,16 @@ int btrfs_verify_level_key(struct extent_buffer *eb, int level,
*/
if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
return 0;
+
+ /* We have @first_key, so this @eb must have at least one item */
+ if (btrfs_header_nritems(eb) == 0) {
+ btrfs_err(fs_info,
+ "invalid tree nritems, bytenr=%llu nritems=0 expect >0",
+ eb->start);
+ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ return -EUCLEAN;
+ }
+
if (found_level)
btrfs_node_key_to_cpu(eb, &found_key, 0);
else
@@ -534,9 +545,11 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
ret = btrfs_check_leaf_full(eb);
if (ret < 0) {
+ btrfs_print_tree(eb, 0);
btrfs_err(fs_info,
"block=%llu write time tree block corruption detected",
eb->start);
+ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
return ret;
}
write_extent_buffer(eb, result, 0, csum_size);
@@ -597,7 +610,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
/* the pending IO might have been the only thing that kept this buffer
* in memory. Make sure we have a ref for all this other checks
*/
- extent_buffer_get(eb);
+ atomic_inc(&eb->refs);
reads_done = atomic_dec_and_test(&eb->io_pages);
if (!reads_done)
@@ -695,43 +708,31 @@ static void end_workqueue_bio(struct bio *bio)
struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
struct btrfs_fs_info *fs_info;
struct btrfs_workqueue *wq;
- btrfs_work_func_t func;
fs_info = end_io_wq->info;
end_io_wq->status = bio->bi_status;
if (bio_op(bio) == REQ_OP_WRITE) {
- if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
+ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
wq = fs_info->endio_meta_write_workers;
- func = btrfs_endio_meta_write_helper;
- } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
+ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
wq = fs_info->endio_freespace_worker;
- func = btrfs_freespace_write_helper;
- } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
+ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
wq = fs_info->endio_raid56_workers;
- func = btrfs_endio_raid56_helper;
- } else {
+ else
wq = fs_info->endio_write_workers;
- func = btrfs_endio_write_helper;
- }
} else {
- if (unlikely(end_io_wq->metadata ==
- BTRFS_WQ_ENDIO_DIO_REPAIR)) {
+ if (unlikely(end_io_wq->metadata == BTRFS_WQ_ENDIO_DIO_REPAIR))
wq = fs_info->endio_repair_workers;
- func = btrfs_endio_repair_helper;
- } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
+ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
wq = fs_info->endio_raid56_workers;
- func = btrfs_endio_raid56_helper;
- } else if (end_io_wq->metadata) {
+ else if (end_io_wq->metadata)
wq = fs_info->endio_meta_workers;
- func = btrfs_endio_meta_helper;
- } else {
+ else
wq = fs_info->endio_workers;
- func = btrfs_endio_helper;
- }
}
- btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
+ btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
btrfs_queue_work(wq, &end_io_wq->work);
}
@@ -792,8 +793,13 @@ static void run_one_async_done(struct btrfs_work *work)
return;
}
- ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio,
- async->mirror_num, 1);
+ /*
+ * All of the bios that pass through here are from async helpers.
+ * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context.
+ * This changes nothing when cgroups aren't in use.
+ */
+ async->bio->bi_opf |= REQ_CGROUP_PUNT;
+ ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num);
if (ret) {
async->bio->bi_status = ret;
bio_endio(async->bio);
@@ -824,8 +830,8 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
async->mirror_num = mirror_num;
async->submit_bio_start = submit_bio_start;
- btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
- run_one_async_done, run_one_async_free);
+ btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
+ run_one_async_free);
async->bio_offset = bio_offset;
@@ -893,12 +899,12 @@ static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
BTRFS_WQ_ENDIO_METADATA);
if (ret)
goto out_w_error;
- ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num);
} else if (!async) {
ret = btree_csum_one_bio(bio);
if (ret)
goto out_w_error;
- ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num);
} else {
/*
* kthread helpers are used to submit writes so that
@@ -1037,35 +1043,6 @@ void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
free_extent_buffer(buf);
}
-int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
- int mirror_num, struct extent_buffer **eb)
-{
- struct extent_buffer *buf = NULL;
- int ret;
-
- buf = btrfs_find_create_tree_block(fs_info, bytenr);
- if (IS_ERR(buf))
- return 0;
-
- set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
-
- ret = read_extent_buffer_pages(buf, WAIT_PAGE_LOCK, mirror_num);
- if (ret) {
- free_extent_buffer_stale(buf);
- return ret;
- }
-
- if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
- free_extent_buffer_stale(buf);
- return -EIO;
- } else if (extent_buffer_uptodate(buf)) {
- *eb = buf;
- } else {
- free_extent_buffer(buf);
- }
- return 0;
-}
-
struct extent_buffer *btrfs_find_create_tree_block(
struct btrfs_fs_info *fs_info,
u64 bytenr)
@@ -1675,8 +1652,8 @@ static void end_workqueue_fn(struct btrfs_work *work)
bio->bi_status = end_io_wq->status;
bio->bi_private = end_io_wq->private;
bio->bi_end_io = end_io_wq->end_io;
- kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
bio_endio(bio);
+ kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
}
static int cleaner_kthread(void *arg)
@@ -1771,7 +1748,7 @@ static int transaction_kthread(void *arg)
}
now = ktime_get_seconds();
- if (cur->state < TRANS_STATE_BLOCKED &&
+ if (cur->state < TRANS_STATE_COMMIT_START &&
!test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
(now < cur->start_time ||
now - cur->start_time < fs_info->commit_interval)) {
@@ -1810,18 +1787,18 @@ sleep:
}
/*
- * this will find the highest generation in the array of
- * root backups. The index of the highest array is returned,
- * or -1 if we can't find anything.
+ * This will find the highest generation in the array of root backups. The
+ * index of the highest array is returned, or -EINVAL if we can't find
+ * anything.
*
* We check to make sure the array is valid by comparing the
* generation of the latest root in the array with the generation
* in the super block. If they don't match we pitch it.
*/
-static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
+static int find_newest_super_backup(struct btrfs_fs_info *info)
{
+ const u64 newest_gen = btrfs_super_generation(info->super_copy);
u64 cur;
- int newest_index = -1;
struct btrfs_root_backup *root_backup;
int i;
@@ -1829,37 +1806,10 @@ static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
root_backup = info->super_copy->super_roots + i;
cur = btrfs_backup_tree_root_gen(root_backup);
if (cur == newest_gen)
- newest_index = i;
- }
-
- /* check to see if we actually wrapped around */
- if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
- root_backup = info->super_copy->super_roots;
- cur = btrfs_backup_tree_root_gen(root_backup);
- if (cur == newest_gen)
- newest_index = 0;
+ return i;
}
- return newest_index;
-}
-
-
-/*
- * find the oldest backup so we know where to store new entries
- * in the backup array. This will set the backup_root_index
- * field in the fs_info struct
- */
-static void find_oldest_super_backup(struct btrfs_fs_info *info,
- u64 newest_gen)
-{
- int newest_index = -1;
- newest_index = find_newest_super_backup(info, newest_gen);
- /* if there was garbage in there, just move along */
- if (newest_index == -1) {
- info->backup_root_index = 0;
- } else {
- info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
- }
+ return -EINVAL;
}
/*
@@ -1869,22 +1819,8 @@ static void find_oldest_super_backup(struct btrfs_fs_info *info,
*/
static void backup_super_roots(struct btrfs_fs_info *info)
{
- int next_backup;
+ const int next_backup = info->backup_root_index;
struct btrfs_root_backup *root_backup;
- int last_backup;
-
- next_backup = info->backup_root_index;
- last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
- BTRFS_NUM_BACKUP_ROOTS;
-
- /*
- * just overwrite the last backup if we're at the same generation
- * this happens only at umount
- */
- root_backup = info->super_for_commit->super_roots + last_backup;
- if (btrfs_backup_tree_root_gen(root_backup) ==
- btrfs_header_generation(info->tree_root->node))
- next_backup = last_backup;
root_backup = info->super_for_commit->super_roots + next_backup;
@@ -1957,40 +1893,31 @@ static void backup_super_roots(struct btrfs_fs_info *info)
}
/*
- * this copies info out of the root backup array and back into
- * the in-memory super block. It is meant to help iterate through
- * the array, so you send it the number of backups you've already
- * tried and the last backup index you used.
+ * read_backup_root - Reads a backup root based on the passed priority. Prio 0
+ * is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
*
- * this returns -1 when it has tried all the backups
+ * fs_info - filesystem whose backup roots need to be read
+ * priority - priority of backup root required
+ *
+ * Returns backup root index on success and -EINVAL otherwise.
*/
-static noinline int next_root_backup(struct btrfs_fs_info *info,
- struct btrfs_super_block *super,
- int *num_backups_tried, int *backup_index)
+static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
{
+ int backup_index = find_newest_super_backup(fs_info);
+ struct btrfs_super_block *super = fs_info->super_copy;
struct btrfs_root_backup *root_backup;
- int newest = *backup_index;
-
- if (*num_backups_tried == 0) {
- u64 gen = btrfs_super_generation(super);
- newest = find_newest_super_backup(info, gen);
- if (newest == -1)
- return -1;
+ if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
+ if (priority == 0)
+ return backup_index;
- *backup_index = newest;
- *num_backups_tried = 1;
- } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
- /* we've tried all the backups, all done */
- return -1;
+ backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
+ backup_index %= BTRFS_NUM_BACKUP_ROOTS;
} else {
- /* jump to the next oldest backup */
- newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
- BTRFS_NUM_BACKUP_ROOTS;
- *backup_index = newest;
- *num_backups_tried += 1;
+ return -EINVAL;
}
- root_backup = super->super_roots + newest;
+
+ root_backup = super->super_roots + backup_index;
btrfs_set_super_generation(super,
btrfs_backup_tree_root_gen(root_backup));
@@ -2000,12 +1927,13 @@ static noinline int next_root_backup(struct btrfs_fs_info *info,
btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
/*
- * fixme: the total bytes and num_devices need to match or we should
+ * Fixme: the total bytes and num_devices need to match or we should
* need a fsck
*/
btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
- return 0;
+
+ return backup_index;
}
/* helper to cleanup workers */
@@ -2020,13 +1948,11 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
btrfs_destroy_workqueue(fs_info->rmw_workers);
btrfs_destroy_workqueue(fs_info->endio_write_workers);
btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
- btrfs_destroy_workqueue(fs_info->submit_workers);
btrfs_destroy_workqueue(fs_info->delayed_workers);
btrfs_destroy_workqueue(fs_info->caching_workers);
btrfs_destroy_workqueue(fs_info->readahead_workers);
btrfs_destroy_workqueue(fs_info->flush_workers);
btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
- btrfs_destroy_workqueue(fs_info->extent_workers);
/*
* Now that all other work queues are destroyed, we can safely destroy
* the queues used for metadata I/O, since tasks from those other work
@@ -2047,7 +1973,7 @@ static void free_root_extent_buffers(struct btrfs_root *root)
}
/* helper to cleanup tree roots */
-static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
+static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
{
free_root_extent_buffers(info->tree_root);
@@ -2056,7 +1982,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
free_root_extent_buffers(info->csum_root);
free_root_extent_buffers(info->quota_root);
free_root_extent_buffers(info->uuid_root);
- if (chunk_root)
+ if (free_chunk_root)
free_root_extent_buffers(info->chunk_root);
free_root_extent_buffers(info->free_space_root);
}
@@ -2186,16 +2112,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
fs_info->caching_workers =
btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
- /*
- * a higher idle thresh on the submit workers makes it much more
- * likely that bios will be send down in a sane order to the
- * devices
- */
- fs_info->submit_workers =
- btrfs_alloc_workqueue(fs_info, "submit", flags,
- min_t(u64, fs_devices->num_devices,
- max_active), 64);
-
fs_info->fixup_workers =
btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
@@ -2232,13 +2148,9 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
max_active, 2);
fs_info->qgroup_rescan_workers =
btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
- fs_info->extent_workers =
- btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
- min_t(u64, fs_devices->num_devices,
- max_active), 8);
if (!(fs_info->workers && fs_info->delalloc_workers &&
- fs_info->submit_workers && fs_info->flush_workers &&
+ fs_info->flush_workers &&
fs_info->endio_workers && fs_info->endio_meta_workers &&
fs_info->endio_meta_write_workers &&
fs_info->endio_repair_workers &&
@@ -2246,7 +2158,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
fs_info->endio_freespace_worker && fs_info->rmw_workers &&
fs_info->caching_workers && fs_info->readahead_workers &&
fs_info->fixup_workers && fs_info->delayed_workers &&
- fs_info->extent_workers &&
fs_info->qgroup_rescan_workers)) {
return -ENOMEM;
}
@@ -2257,13 +2168,13 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
{
struct crypto_shash *csum_shash;
- const char *csum_name = btrfs_super_csum_name(csum_type);
+ const char *csum_driver = btrfs_super_csum_driver(csum_type);
- csum_shash = crypto_alloc_shash(csum_name, 0, 0);
+ csum_shash = crypto_alloc_shash(csum_driver, 0, 0);
if (IS_ERR(csum_shash)) {
btrfs_err(fs_info, "error allocating %s hash for checksum",
- csum_name);
+ csum_driver);
return PTR_ERR(csum_shash);
}
@@ -2613,7 +2524,101 @@ out:
return ret;
}
-int open_ctree(struct super_block *sb,
+static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
+{
+ int backup_index = find_newest_super_backup(fs_info);
+ struct btrfs_super_block *sb = fs_info->super_copy;
+ struct btrfs_root *tree_root = fs_info->tree_root;
+ bool handle_error = false;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
+ u64 generation;
+ int level;
+
+ if (handle_error) {
+ if (!IS_ERR(tree_root->node))
+ free_extent_buffer(tree_root->node);
+ tree_root->node = NULL;
+
+ if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
+ break;
+
+ free_root_pointers(fs_info, 0);
+
+ /*
+ * Don't use the log in recovery mode, it won't be
+ * valid
+ */
+ btrfs_set_super_log_root(sb, 0);
+
+ /* We can't trust the free space cache either */
+ btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
+
+ ret = read_backup_root(fs_info, i);
+ backup_index = ret;
+ if (ret < 0)
+ return ret;
+ }
+ generation = btrfs_super_generation(sb);
+ level = btrfs_super_root_level(sb);
+ tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb),
+ generation, level, NULL);
+ if (IS_ERR(tree_root->node) ||
+ !extent_buffer_uptodate(tree_root->node)) {
+ handle_error = true;
+
+ if (IS_ERR(tree_root->node))
+ ret = PTR_ERR(tree_root->node);
+ else if (!extent_buffer_uptodate(tree_root->node))
+ ret = -EUCLEAN;
+
+ btrfs_warn(fs_info, "failed to read tree root");
+ continue;
+ }
+
+ btrfs_set_root_node(&tree_root->root_item, tree_root->node);
+ tree_root->commit_root = btrfs_root_node(tree_root);
+ btrfs_set_root_refs(&tree_root->root_item, 1);
+
+ /*
+ * No need to hold btrfs_root::objectid_mutex since the fs
+ * hasn't been fully initialised and we are the only user
+ */
+ ret = btrfs_find_highest_objectid(tree_root,
+ &tree_root->highest_objectid);
+ if (ret < 0) {
+ handle_error = true;
+ continue;
+ }
+
+ ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
+
+ ret = btrfs_read_roots(fs_info);
+ if (ret < 0) {
+ handle_error = true;
+ continue;
+ }
+
+ /* All successful */
+ fs_info->generation = generation;
+ fs_info->last_trans_committed = generation;
+
+ /* Always begin writing backup roots after the one being used */
+ if (backup_index < 0) {
+ fs_info->backup_root_index = 0;
+ } else {
+ fs_info->backup_root_index = backup_index + 1;
+ fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
+ }
+ break;
+ }
+
+ return ret;
+}
+
+int __cold open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
char *options)
{
@@ -2631,8 +2636,6 @@ int open_ctree(struct super_block *sb,
struct btrfs_root *chunk_root;
int ret;
int err = -EINVAL;
- int num_backups_tried = 0;
- int backup_index = 0;
int clear_free_space_tree = 0;
int level;
@@ -2683,8 +2686,6 @@ int open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->delayed_iputs);
INIT_LIST_HEAD(&fs_info->delalloc_roots);
INIT_LIST_HEAD(&fs_info->caching_block_groups);
- INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
- spin_lock_init(&fs_info->pending_raid_kobjs_lock);
spin_lock_init(&fs_info->delalloc_root_lock);
spin_lock_init(&fs_info->trans_lock);
spin_lock_init(&fs_info->fs_roots_radix_lock);
@@ -2905,13 +2906,6 @@ int open_ctree(struct super_block *sb,
set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
/*
- * run through our array of backup supers and setup
- * our ring pointer to the oldest one
- */
- generation = btrfs_super_generation(disk_super);
- find_oldest_super_backup(fs_info, generation);
-
- /*
* In the long term, we'll store the compression type in the super
* block, and it'll be used for per file compression control.
*/
@@ -3057,44 +3051,9 @@ int open_ctree(struct super_block *sb,
goto fail_tree_roots;
}
-retry_root_backup:
- generation = btrfs_super_generation(disk_super);
- level = btrfs_super_root_level(disk_super);
-
- tree_root->node = read_tree_block(fs_info,
- btrfs_super_root(disk_super),
- generation, level, NULL);
- if (IS_ERR(tree_root->node) ||
- !extent_buffer_uptodate(tree_root->node)) {
- btrfs_warn(fs_info, "failed to read tree root");
- if (!IS_ERR(tree_root->node))
- free_extent_buffer(tree_root->node);
- tree_root->node = NULL;
- goto recovery_tree_root;
- }
-
- btrfs_set_root_node(&tree_root->root_item, tree_root->node);
- tree_root->commit_root = btrfs_root_node(tree_root);
- btrfs_set_root_refs(&tree_root->root_item, 1);
-
- mutex_lock(&tree_root->objectid_mutex);
- ret = btrfs_find_highest_objectid(tree_root,
- &tree_root->highest_objectid);
- if (ret) {
- mutex_unlock(&tree_root->objectid_mutex);
- goto recovery_tree_root;
- }
-
- ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
-
- mutex_unlock(&tree_root->objectid_mutex);
-
- ret = btrfs_read_roots(fs_info);
+ ret = init_tree_roots(fs_info);
if (ret)
- goto recovery_tree_root;
-
- fs_info->generation = generation;
- fs_info->last_trans_committed = generation;
+ goto fail_tree_roots;
ret = btrfs_verify_dev_extents(fs_info);
if (ret) {
@@ -3362,7 +3321,7 @@ fail_block_groups:
btrfs_put_block_group_cache(fs_info);
fail_tree_roots:
- free_root_pointers(fs_info, 1);
+ free_root_pointers(fs_info, true);
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
fail_sb_buffer:
@@ -3389,24 +3348,6 @@ fail:
btrfs_free_stripe_hash_table(fs_info);
btrfs_close_devices(fs_info->fs_devices);
return err;
-
-recovery_tree_root:
- if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
- goto fail_tree_roots;
-
- free_root_pointers(fs_info, 0);
-
- /* don't use the log in recovery mode, it won't be valid */
- btrfs_set_super_log_root(disk_super, 0);
-
- /* we can't trust the free space cache either */
- btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
-
- ret = next_root_backup(fs_info, fs_info->super_copy,
- &num_backups_tried, &backup_index);
- if (ret == -1)
- goto fail_block_groups;
- goto retry_root_backup;
}
ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
@@ -4000,7 +3941,7 @@ int btrfs_commit_super(struct btrfs_fs_info *fs_info)
return btrfs_commit_transaction(trans);
}
-void close_ctree(struct btrfs_fs_info *fs_info)
+void __cold close_ctree(struct btrfs_fs_info *fs_info)
{
int ret;
@@ -4088,7 +4029,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
btrfs_free_block_groups(fs_info);
clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
- free_root_pointers(fs_info, 1);
+ free_root_pointers(fs_info, true);
iput(fs_info->btree_inode);
@@ -4106,6 +4047,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
cleanup_srcu_struct(&fs_info->subvol_srcu);
+ btrfs_free_csum_hash(fs_info);
btrfs_free_stripe_hash_table(fs_info);
btrfs_free_ref_cache(fs_info);
}
@@ -4464,7 +4406,7 @@ again:
return 0;
}
-static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
+static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
{
struct inode *inode;
@@ -4481,12 +4423,12 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
struct btrfs_fs_info *fs_info)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
spin_lock(&cur_trans->dirty_bgs_lock);
while (!list_empty(&cur_trans->dirty_bgs)) {
cache = list_first_entry(&cur_trans->dirty_bgs,
- struct btrfs_block_group_cache,
+ struct btrfs_block_group,
dirty_list);
if (!list_empty(&cache->io_list)) {
@@ -4514,7 +4456,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
*/
while (!list_empty(&cur_trans->io_bgs)) {
cache = list_first_entry(&cur_trans->io_bgs,
- struct btrfs_block_group_cache,
+ struct btrfs_block_group,
io_list);
list_del_init(&cache->io_list);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index e80f7c45a307..76f123ebb292 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -45,16 +45,14 @@ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 parent_transid, int level,
struct btrfs_key *first_key);
void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr);
-int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
- int mirror_num, struct extent_buffer **eb);
struct extent_buffer *btrfs_find_create_tree_block(
struct btrfs_fs_info *fs_info,
u64 bytenr);
void btrfs_clean_tree_block(struct extent_buffer *buf);
-int open_ctree(struct super_block *sb,
+int __cold open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
char *options);
-void close_ctree(struct btrfs_fs_info *fs_info);
+void __cold close_ctree(struct btrfs_fs_info *fs_info);
int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors);
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index ddf28ecf17f9..72e312cae69d 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -87,7 +87,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(sb, &key, root, NULL);
+ inode = btrfs_iget(sb, &key, root);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto fail;
@@ -214,7 +214,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root, NULL));
+ return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root));
fail:
btrfs_free_path(path);
return ERR_PTR(ret);
diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
new file mode 100644
index 000000000000..a3febe746c79
--- /dev/null
+++ b/fs/btrfs/extent-io-tree.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_EXTENT_IO_TREE_H
+#define BTRFS_EXTENT_IO_TREE_H
+
+struct extent_changeset;
+struct io_failure_record;
+
+/* Bits for the extent state */
+#define EXTENT_DIRTY (1U << 0)
+#define EXTENT_UPTODATE (1U << 1)
+#define EXTENT_LOCKED (1U << 2)
+#define EXTENT_NEW (1U << 3)
+#define EXTENT_DELALLOC (1U << 4)
+#define EXTENT_DEFRAG (1U << 5)
+#define EXTENT_BOUNDARY (1U << 6)
+#define EXTENT_NODATASUM (1U << 7)
+#define EXTENT_CLEAR_META_RESV (1U << 8)
+#define EXTENT_NEED_WAIT (1U << 9)
+#define EXTENT_DAMAGED (1U << 10)
+#define EXTENT_NORESERVE (1U << 11)
+#define EXTENT_QGROUP_RESERVED (1U << 12)
+#define EXTENT_CLEAR_DATA_RESV (1U << 13)
+#define EXTENT_DELALLOC_NEW (1U << 14)
+#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
+ EXTENT_CLEAR_DATA_RESV)
+#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
+
+/*
+ * Redefined bits above which are used only in the device allocation tree,
+ * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
+ * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
+ * manipulation functions
+ */
+#define CHUNK_ALLOCATED EXTENT_DIRTY
+#define CHUNK_TRIMMED EXTENT_DEFRAG
+
+enum {
+ IO_TREE_FS_INFO_FREED_EXTENTS0,
+ IO_TREE_FS_INFO_FREED_EXTENTS1,
+ IO_TREE_INODE_IO,
+ IO_TREE_INODE_IO_FAILURE,
+ IO_TREE_RELOC_BLOCKS,
+ IO_TREE_TRANS_DIRTY_PAGES,
+ IO_TREE_ROOT_DIRTY_LOG_PAGES,
+ IO_TREE_SELFTEST,
+};
+
+struct extent_io_tree {
+ struct rb_root state;
+ struct btrfs_fs_info *fs_info;
+ void *private_data;
+ u64 dirty_bytes;
+ bool track_uptodate;
+
+ /* Who owns this io tree, should be one of IO_TREE_* */
+ u8 owner;
+
+ spinlock_t lock;
+ const struct extent_io_ops *ops;
+};
+
+struct extent_state {
+ u64 start;
+ u64 end; /* inclusive */
+ struct rb_node rb_node;
+
+ /* ADD NEW ELEMENTS AFTER THIS */
+ wait_queue_head_t wq;
+ refcount_t refs;
+ unsigned state;
+
+ struct io_failure_record *failrec;
+
+#ifdef CONFIG_BTRFS_DEBUG
+ struct list_head leak_list;
+#endif
+};
+
+int __init extent_state_cache_init(void);
+void __cold extent_state_cache_exit(void);
+
+void extent_io_tree_init(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *tree, unsigned int owner,
+ void *private_data);
+void extent_io_tree_release(struct extent_io_tree *tree);
+
+int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ struct extent_state **cached);
+
+static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
+{
+ return lock_extent_bits(tree, start, end, NULL);
+}
+
+int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
+
+int __init extent_io_init(void);
+void __cold extent_io_exit(void);
+
+u64 count_range_bits(struct extent_io_tree *tree,
+ u64 *start, u64 search_end,
+ u64 max_bytes, unsigned bits, int contig);
+
+void free_extent_state(struct extent_state *state);
+int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, int filled,
+ struct extent_state *cached_state);
+int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, struct extent_changeset *changeset);
+int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, int wake, int delete,
+ struct extent_state **cached);
+int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, int wake, int delete,
+ struct extent_state **cached, gfp_t mask,
+ struct extent_changeset *changeset);
+
+static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
+{
+ return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
+}
+
+static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
+{
+ return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
+ GFP_NOFS, NULL);
+}
+
+static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
+ u64 start, u64 end, struct extent_state **cached)
+{
+ return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
+ GFP_ATOMIC, NULL);
+}
+
+static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
+ u64 end, unsigned bits)
+{
+ int wake = 0;
+
+ if (bits & EXTENT_LOCKED)
+ wake = 1;
+
+ return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
+}
+
+int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, struct extent_changeset *changeset);
+int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, u64 *failed_start,
+ struct extent_state **cached_state, gfp_t mask);
+int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits);
+
+static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
+ u64 end, unsigned bits)
+{
+ return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
+}
+
+static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached_state)
+{
+ return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
+ cached_state, GFP_NOFS, NULL);
+}
+
+static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
+ u64 end, gfp_t mask)
+{
+ return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
+ NULL, mask);
+}
+
+static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
+{
+ return clear_extent_bit(tree, start, end,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING, 0, 0, cached);
+}
+
+int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, unsigned clear_bits,
+ struct extent_state **cached_state);
+
+static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
+ u64 end, unsigned int extra_bits,
+ struct extent_state **cached_state)
+{
+ return set_extent_bit(tree, start, end,
+ EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
+ NULL, cached_state, GFP_NOFS);
+}
+
+static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached_state)
+{
+ return set_extent_bit(tree, start, end,
+ EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
+ NULL, cached_state, GFP_NOFS);
+}
+
+static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
+ u64 end)
+{
+ return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
+ GFP_NOFS);
+}
+
+static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached_state, gfp_t mask)
+{
+ return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
+ cached_state, mask);
+}
+
+int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, unsigned bits,
+ struct extent_state **cached_state);
+void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, unsigned bits);
+int extent_invalidatepage(struct extent_io_tree *tree,
+ struct page *page, unsigned long offset);
+bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
+ u64 *end, u64 max_bytes,
+ struct extent_state **cached_state);
+
+/* This should be reworked in the future and put elsewhere. */
+int get_state_failrec(struct extent_io_tree *tree, u64 start,
+ struct io_failure_record **failrec);
+int set_state_failrec(struct extent_io_tree *tree, u64 start,
+ struct io_failure_record *failrec);
+void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
+ u64 end);
+int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
+ struct io_failure_record **failrec_ret);
+int free_io_failure(struct extent_io_tree *failure_tree,
+ struct extent_io_tree *io_tree,
+ struct io_failure_record *rec);
+int clean_io_failure(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *failure_tree,
+ struct extent_io_tree *io_tree, u64 start,
+ struct page *page, u64 ino, unsigned int pg_offset);
+
+#endif /* BTRFS_EXTENT_IO_TREE_H */
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d3b58e388535..153f71a5bba9 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -16,6 +16,7 @@
#include <linux/percpu_counter.h>
#include <linux/lockdep.h>
#include <linux/crc32c.h>
+#include "misc.h"
#include "tree-log.h"
#include "disk-io.h"
#include "print-tree.h"
@@ -24,13 +25,13 @@
#include "locking.h"
#include "free-space-cache.h"
#include "free-space-tree.h"
-#include "math.h"
#include "sysfs.h"
#include "qgroup.h"
#include "ref-verify.h"
#include "space-info.h"
#include "block-rsv.h"
#include "delalloc-space.h"
+#include "block-group.h"
#undef SCRAMBLE_DELAYED_REFS
@@ -53,132 +54,13 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key);
-static noinline int
-block_group_cache_done(struct btrfs_block_group_cache *cache)
-{
- smp_mb();
- return cache->cached == BTRFS_CACHE_FINISHED ||
- cache->cached == BTRFS_CACHE_ERROR;
-}
-
-static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
+static int block_group_bits(struct btrfs_block_group *cache, u64 bits)
{
return (cache->flags & bits) == bits;
}
-void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
-{
- atomic_inc(&cache->count);
-}
-
-void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
-{
- if (atomic_dec_and_test(&cache->count)) {
- WARN_ON(cache->pinned > 0);
- WARN_ON(cache->reserved > 0);
-
- /*
- * If not empty, someone is still holding mutex of
- * full_stripe_lock, which can only be released by caller.
- * And it will definitely cause use-after-free when caller
- * tries to release full stripe lock.
- *
- * No better way to resolve, but only to warn.
- */
- WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
- kfree(cache->free_space_ctl);
- kfree(cache);
- }
-}
-
-/*
- * this adds the block group to the fs_info rb tree for the block group
- * cache
- */
-static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
- struct btrfs_block_group_cache *block_group)
-{
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct btrfs_block_group_cache *cache;
-
- spin_lock(&info->block_group_cache_lock);
- p = &info->block_group_cache_tree.rb_node;
-
- while (*p) {
- parent = *p;
- cache = rb_entry(parent, struct btrfs_block_group_cache,
- cache_node);
- if (block_group->key.objectid < cache->key.objectid) {
- p = &(*p)->rb_left;
- } else if (block_group->key.objectid > cache->key.objectid) {
- p = &(*p)->rb_right;
- } else {
- spin_unlock(&info->block_group_cache_lock);
- return -EEXIST;
- }
- }
-
- rb_link_node(&block_group->cache_node, parent, p);
- rb_insert_color(&block_group->cache_node,
- &info->block_group_cache_tree);
-
- if (info->first_logical_byte > block_group->key.objectid)
- info->first_logical_byte = block_group->key.objectid;
-
- spin_unlock(&info->block_group_cache_lock);
-
- return 0;
-}
-
-/*
- * This will return the block group at or after bytenr if contains is 0, else
- * it will return the block group that contains the bytenr
- */
-static struct btrfs_block_group_cache *
-block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
- int contains)
-{
- struct btrfs_block_group_cache *cache, *ret = NULL;
- struct rb_node *n;
- u64 end, start;
-
- spin_lock(&info->block_group_cache_lock);
- n = info->block_group_cache_tree.rb_node;
-
- while (n) {
- cache = rb_entry(n, struct btrfs_block_group_cache,
- cache_node);
- end = cache->key.objectid + cache->key.offset - 1;
- start = cache->key.objectid;
-
- if (bytenr < start) {
- if (!contains && (!ret || start < ret->key.objectid))
- ret = cache;
- n = n->rb_left;
- } else if (bytenr > start) {
- if (contains && bytenr <= end) {
- ret = cache;
- break;
- }
- n = n->rb_right;
- } else {
- ret = cache;
- break;
- }
- }
- if (ret) {
- btrfs_get_block_group(ret);
- if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
- info->first_logical_byte = ret->key.objectid;
- }
- spin_unlock(&info->block_group_cache_lock);
-
- return ret;
-}
-
-static int add_excluded_extent(struct btrfs_fs_info *fs_info,
- u64 start, u64 num_bytes)
+int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
+ u64 start, u64 num_bytes)
{
u64 end = start + num_bytes - 1;
set_extent_bits(&fs_info->freed_extents[0],
@@ -188,13 +70,13 @@ static int add_excluded_extent(struct btrfs_fs_info *fs_info,
return 0;
}
-static void free_excluded_extents(struct btrfs_block_group_cache *cache)
+void btrfs_free_excluded_extents(struct btrfs_block_group *cache)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
u64 start, end;
- start = cache->key.objectid;
- end = start + cache->key.offset - 1;
+ start = cache->start;
+ end = start + cache->length - 1;
clear_extent_bits(&fs_info->freed_extents[0],
start, end, EXTENT_UPTODATE);
@@ -202,494 +84,6 @@ static void free_excluded_extents(struct btrfs_block_group_cache *cache)
start, end, EXTENT_UPTODATE);
}
-static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
-{
- struct btrfs_fs_info *fs_info = cache->fs_info;
- u64 bytenr;
- u64 *logical;
- int stripe_len;
- int i, nr, ret;
-
- if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
- stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
- cache->bytes_super += stripe_len;
- ret = add_excluded_extent(fs_info, cache->key.objectid,
- stripe_len);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
- bytenr = btrfs_sb_offset(i);
- ret = btrfs_rmap_block(fs_info, cache->key.objectid,
- bytenr, &logical, &nr, &stripe_len);
- if (ret)
- return ret;
-
- while (nr--) {
- u64 start, len;
-
- if (logical[nr] > cache->key.objectid +
- cache->key.offset)
- continue;
-
- if (logical[nr] + stripe_len <= cache->key.objectid)
- continue;
-
- start = logical[nr];
- if (start < cache->key.objectid) {
- start = cache->key.objectid;
- len = (logical[nr] + stripe_len) - start;
- } else {
- len = min_t(u64, stripe_len,
- cache->key.objectid +
- cache->key.offset - start);
- }
-
- cache->bytes_super += len;
- ret = add_excluded_extent(fs_info, start, len);
- if (ret) {
- kfree(logical);
- return ret;
- }
- }
-
- kfree(logical);
- }
- return 0;
-}
-
-static struct btrfs_caching_control *
-get_caching_control(struct btrfs_block_group_cache *cache)
-{
- struct btrfs_caching_control *ctl;
-
- spin_lock(&cache->lock);
- if (!cache->caching_ctl) {
- spin_unlock(&cache->lock);
- return NULL;
- }
-
- ctl = cache->caching_ctl;
- refcount_inc(&ctl->count);
- spin_unlock(&cache->lock);
- return ctl;
-}
-
-static void put_caching_control(struct btrfs_caching_control *ctl)
-{
- if (refcount_dec_and_test(&ctl->count))
- kfree(ctl);
-}
-
-#ifdef CONFIG_BTRFS_DEBUG
-static void fragment_free_space(struct btrfs_block_group_cache *block_group)
-{
- struct btrfs_fs_info *fs_info = block_group->fs_info;
- u64 start = block_group->key.objectid;
- u64 len = block_group->key.offset;
- u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
- fs_info->nodesize : fs_info->sectorsize;
- u64 step = chunk << 1;
-
- while (len > chunk) {
- btrfs_remove_free_space(block_group, start, chunk);
- start += step;
- if (len < step)
- len = 0;
- else
- len -= step;
- }
-}
-#endif
-
-/*
- * this is only called by cache_block_group, since we could have freed extents
- * we need to check the pinned_extents for any extents that can't be used yet
- * since their free space will be released as soon as the transaction commits.
- */
-u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
- u64 start, u64 end)
-{
- struct btrfs_fs_info *info = block_group->fs_info;
- u64 extent_start, extent_end, size, total_added = 0;
- int ret;
-
- while (start < end) {
- ret = find_first_extent_bit(info->pinned_extents, start,
- &extent_start, &extent_end,
- EXTENT_DIRTY | EXTENT_UPTODATE,
- NULL);
- if (ret)
- break;
-
- if (extent_start <= start) {
- start = extent_end + 1;
- } else if (extent_start > start && extent_start < end) {
- size = extent_start - start;
- total_added += size;
- ret = btrfs_add_free_space(block_group, start,
- size);
- BUG_ON(ret); /* -ENOMEM or logic error */
- start = extent_end + 1;
- } else {
- break;
- }
- }
-
- if (start < end) {
- size = end - start;
- total_added += size;
- ret = btrfs_add_free_space(block_group, start, size);
- BUG_ON(ret); /* -ENOMEM or logic error */
- }
-
- return total_added;
-}
-
-static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
-{
- struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
- struct btrfs_fs_info *fs_info = block_group->fs_info;
- struct btrfs_root *extent_root = fs_info->extent_root;
- struct btrfs_path *path;
- struct extent_buffer *leaf;
- struct btrfs_key key;
- u64 total_found = 0;
- u64 last = 0;
- u32 nritems;
- int ret;
- bool wakeup = true;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
-
-#ifdef CONFIG_BTRFS_DEBUG
- /*
- * If we're fragmenting we don't want to make anybody think we can
- * allocate from this block group until we've had a chance to fragment
- * the free space.
- */
- if (btrfs_should_fragment_free_space(block_group))
- wakeup = false;
-#endif
- /*
- * We don't want to deadlock with somebody trying to allocate a new
- * extent for the extent root while also trying to search the extent
- * root to add free space. So we skip locking and search the commit
- * root, since its read-only
- */
- path->skip_locking = 1;
- path->search_commit_root = 1;
- path->reada = READA_FORWARD;
-
- key.objectid = last;
- key.offset = 0;
- key.type = BTRFS_EXTENT_ITEM_KEY;
-
-next:
- ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
-
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
-
- while (1) {
- if (btrfs_fs_closing(fs_info) > 1) {
- last = (u64)-1;
- break;
- }
-
- if (path->slots[0] < nritems) {
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- } else {
- ret = find_next_key(path, 0, &key);
- if (ret)
- break;
-
- if (need_resched() ||
- rwsem_is_contended(&fs_info->commit_root_sem)) {
- if (wakeup)
- caching_ctl->progress = last;
- btrfs_release_path(path);
- up_read(&fs_info->commit_root_sem);
- mutex_unlock(&caching_ctl->mutex);
- cond_resched();
- mutex_lock(&caching_ctl->mutex);
- down_read(&fs_info->commit_root_sem);
- goto next;
- }
-
- ret = btrfs_next_leaf(extent_root, path);
- if (ret < 0)
- goto out;
- if (ret)
- break;
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- continue;
- }
-
- if (key.objectid < last) {
- key.objectid = last;
- key.offset = 0;
- key.type = BTRFS_EXTENT_ITEM_KEY;
-
- if (wakeup)
- caching_ctl->progress = last;
- btrfs_release_path(path);
- goto next;
- }
-
- if (key.objectid < block_group->key.objectid) {
- path->slots[0]++;
- continue;
- }
-
- if (key.objectid >= block_group->key.objectid +
- block_group->key.offset)
- break;
-
- if (key.type == BTRFS_EXTENT_ITEM_KEY ||
- key.type == BTRFS_METADATA_ITEM_KEY) {
- total_found += add_new_free_space(block_group, last,
- key.objectid);
- if (key.type == BTRFS_METADATA_ITEM_KEY)
- last = key.objectid +
- fs_info->nodesize;
- else
- last = key.objectid + key.offset;
-
- if (total_found > CACHING_CTL_WAKE_UP) {
- total_found = 0;
- if (wakeup)
- wake_up(&caching_ctl->wait);
- }
- }
- path->slots[0]++;
- }
- ret = 0;
-
- total_found += add_new_free_space(block_group, last,
- block_group->key.objectid +
- block_group->key.offset);
- caching_ctl->progress = (u64)-1;
-
-out:
- btrfs_free_path(path);
- return ret;
-}
-
-static noinline void caching_thread(struct btrfs_work *work)
-{
- struct btrfs_block_group_cache *block_group;
- struct btrfs_fs_info *fs_info;
- struct btrfs_caching_control *caching_ctl;
- int ret;
-
- caching_ctl = container_of(work, struct btrfs_caching_control, work);
- block_group = caching_ctl->block_group;
- fs_info = block_group->fs_info;
-
- mutex_lock(&caching_ctl->mutex);
- down_read(&fs_info->commit_root_sem);
-
- if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
- ret = load_free_space_tree(caching_ctl);
- else
- ret = load_extent_tree_free(caching_ctl);
-
- spin_lock(&block_group->lock);
- block_group->caching_ctl = NULL;
- block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
- spin_unlock(&block_group->lock);
-
-#ifdef CONFIG_BTRFS_DEBUG
- if (btrfs_should_fragment_free_space(block_group)) {
- u64 bytes_used;
-
- spin_lock(&block_group->space_info->lock);
- spin_lock(&block_group->lock);
- bytes_used = block_group->key.offset -
- btrfs_block_group_used(&block_group->item);
- block_group->space_info->bytes_used += bytes_used >> 1;
- spin_unlock(&block_group->lock);
- spin_unlock(&block_group->space_info->lock);
- fragment_free_space(block_group);
- }
-#endif
-
- caching_ctl->progress = (u64)-1;
-
- up_read(&fs_info->commit_root_sem);
- free_excluded_extents(block_group);
- mutex_unlock(&caching_ctl->mutex);
-
- wake_up(&caching_ctl->wait);
-
- put_caching_control(caching_ctl);
- btrfs_put_block_group(block_group);
-}
-
-static int cache_block_group(struct btrfs_block_group_cache *cache,
- int load_cache_only)
-{
- DEFINE_WAIT(wait);
- struct btrfs_fs_info *fs_info = cache->fs_info;
- struct btrfs_caching_control *caching_ctl;
- int ret = 0;
-
- caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
- if (!caching_ctl)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&caching_ctl->list);
- mutex_init(&caching_ctl->mutex);
- init_waitqueue_head(&caching_ctl->wait);
- caching_ctl->block_group = cache;
- caching_ctl->progress = cache->key.objectid;
- refcount_set(&caching_ctl->count, 1);
- btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
- caching_thread, NULL, NULL);
-
- spin_lock(&cache->lock);
- /*
- * This should be a rare occasion, but this could happen I think in the
- * case where one thread starts to load the space cache info, and then
- * some other thread starts a transaction commit which tries to do an
- * allocation while the other thread is still loading the space cache
- * info. The previous loop should have kept us from choosing this block
- * group, but if we've moved to the state where we will wait on caching
- * block groups we need to first check if we're doing a fast load here,
- * so we can wait for it to finish, otherwise we could end up allocating
- * from a block group who's cache gets evicted for one reason or
- * another.
- */
- while (cache->cached == BTRFS_CACHE_FAST) {
- struct btrfs_caching_control *ctl;
-
- ctl = cache->caching_ctl;
- refcount_inc(&ctl->count);
- prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock(&cache->lock);
-
- schedule();
-
- finish_wait(&ctl->wait, &wait);
- put_caching_control(ctl);
- spin_lock(&cache->lock);
- }
-
- if (cache->cached != BTRFS_CACHE_NO) {
- spin_unlock(&cache->lock);
- kfree(caching_ctl);
- return 0;
- }
- WARN_ON(cache->caching_ctl);
- cache->caching_ctl = caching_ctl;
- cache->cached = BTRFS_CACHE_FAST;
- spin_unlock(&cache->lock);
-
- if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
- mutex_lock(&caching_ctl->mutex);
- ret = load_free_space_cache(cache);
-
- spin_lock(&cache->lock);
- if (ret == 1) {
- cache->caching_ctl = NULL;
- cache->cached = BTRFS_CACHE_FINISHED;
- cache->last_byte_to_unpin = (u64)-1;
- caching_ctl->progress = (u64)-1;
- } else {
- if (load_cache_only) {
- cache->caching_ctl = NULL;
- cache->cached = BTRFS_CACHE_NO;
- } else {
- cache->cached = BTRFS_CACHE_STARTED;
- cache->has_caching_ctl = 1;
- }
- }
- spin_unlock(&cache->lock);
-#ifdef CONFIG_BTRFS_DEBUG
- if (ret == 1 &&
- btrfs_should_fragment_free_space(cache)) {
- u64 bytes_used;
-
- spin_lock(&cache->space_info->lock);
- spin_lock(&cache->lock);
- bytes_used = cache->key.offset -
- btrfs_block_group_used(&cache->item);
- cache->space_info->bytes_used += bytes_used >> 1;
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
- fragment_free_space(cache);
- }
-#endif
- mutex_unlock(&caching_ctl->mutex);
-
- wake_up(&caching_ctl->wait);
- if (ret == 1) {
- put_caching_control(caching_ctl);
- free_excluded_extents(cache);
- return 0;
- }
- } else {
- /*
- * We're either using the free space tree or no caching at all.
- * Set cached to the appropriate value and wakeup any waiters.
- */
- spin_lock(&cache->lock);
- if (load_cache_only) {
- cache->caching_ctl = NULL;
- cache->cached = BTRFS_CACHE_NO;
- } else {
- cache->cached = BTRFS_CACHE_STARTED;
- cache->has_caching_ctl = 1;
- }
- spin_unlock(&cache->lock);
- wake_up(&caching_ctl->wait);
- }
-
- if (load_cache_only) {
- put_caching_control(caching_ctl);
- return 0;
- }
-
- down_write(&fs_info->commit_root_sem);
- refcount_inc(&caching_ctl->count);
- list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
- up_write(&fs_info->commit_root_sem);
-
- btrfs_get_block_group(cache);
-
- btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
-
- return ret;
-}
-
-/*
- * return the block group that starts at or after bytenr
- */
-static struct btrfs_block_group_cache *
-btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
-{
- return block_group_cache_tree_search(info, bytenr, 0);
-}
-
-/*
- * return the block group that contains the given bytenr
- */
-struct btrfs_block_group_cache *btrfs_lookup_block_group(
- struct btrfs_fs_info *info,
- u64 bytenr)
-{
- return block_group_cache_tree_search(info, bytenr, 1);
-}
-
static u64 generic_ref_to_space_flags(struct btrfs_ref *ref)
{
if (ref->type == BTRFS_REF_METADATA) {
@@ -1044,7 +438,7 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
return BTRFS_REF_TYPE_INVALID;
}
-static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
+u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
{
u32 high_crc = ~(u32)0;
u32 low_crc = ~(u32)0;
@@ -1912,8 +1306,10 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, u64 *actual_bytes)
{
- int ret;
+ int ret = 0;
u64 discarded_bytes = 0;
+ u64 end = bytenr + num_bytes;
+ u64 cur = bytenr;
struct btrfs_bio *bbio = NULL;
@@ -1922,15 +1318,23 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
* associated to its stripes that don't go away while we are discarding.
*/
btrfs_bio_counter_inc_blocked(fs_info);
- /* Tell the block device(s) that the sectors can be discarded */
- ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes,
- &bbio, 0);
- /* Error condition is -ENOMEM */
- if (!ret) {
- struct btrfs_bio_stripe *stripe = bbio->stripes;
+ while (cur < end) {
+ struct btrfs_bio_stripe *stripe;
int i;
+ num_bytes = end - cur;
+ /* Tell the block device(s) that the sectors can be discarded */
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur,
+ &num_bytes, &bbio, 0);
+ /*
+ * Error can be -ENOMEM, -ENOENT (no such chunk mapping) or
+ * -EOPNOTSUPP. For any such error, @num_bytes is not updated,
+ * thus we can't continue anyway.
+ */
+ if (ret < 0)
+ goto out;
+ stripe = bbio->stripes;
for (i = 0; i < bbio->num_stripes; i++, stripe++) {
u64 bytes;
struct request_queue *req_q;
@@ -1947,10 +1351,19 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
stripe->physical,
stripe->length,
&bytes);
- if (!ret)
+ if (!ret) {
discarded_bytes += bytes;
- else if (ret != -EOPNOTSUPP)
- break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
+ } else if (ret != -EOPNOTSUPP) {
+ /*
+ * Logic errors or -ENOMEM, or -EIO, but
+ * unlikely to happen.
+ *
+ * And since there are two loops, explicitly
+ * go to out to avoid confusion.
+ */
+ btrfs_put_bbio(bbio);
+ goto out;
+ }
/*
* Just in case we get back EOPNOTSUPP for some reason,
@@ -1960,7 +1373,9 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
ret = 0;
}
btrfs_put_bbio(bbio);
+ cur += num_bytes;
}
+out:
btrfs_bio_counter_dec(fs_info);
if (actual_bytes)
@@ -2963,16 +2378,19 @@ static noinline int check_committed_ref(struct btrfs_root *root,
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ /* If extent item has more than 1 inline ref then it's shared */
if (item_size != sizeof(*ei) +
btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
goto out;
+ /* If extent created before last snapshot => it's definitely shared */
if (btrfs_extent_generation(leaf, ei) <=
btrfs_root_last_snapshot(&root->root_item))
goto out;
iref = (struct btrfs_extent_inline_ref *)(ei + 1);
+ /* If this extent has SHARED_DATA_REF then it's shared */
type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
if (type != BTRFS_EXTENT_DATA_REF_KEY)
goto out;
@@ -3117,555 +2535,9 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
}
-static int write_one_cache_group(struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
- struct btrfs_block_group_cache *cache)
-{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- int ret;
- struct btrfs_root *extent_root = fs_info->extent_root;
- unsigned long bi;
- struct extent_buffer *leaf;
-
- ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
- if (ret) {
- if (ret > 0)
- ret = -ENOENT;
- goto fail;
- }
-
- leaf = path->nodes[0];
- bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
- write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
- btrfs_mark_buffer_dirty(leaf);
-fail:
- btrfs_release_path(path);
- return ret;
-
-}
-
-static struct btrfs_block_group_cache *next_block_group(
- struct btrfs_block_group_cache *cache)
-{
- struct btrfs_fs_info *fs_info = cache->fs_info;
- struct rb_node *node;
-
- spin_lock(&fs_info->block_group_cache_lock);
-
- /* If our block group was removed, we need a full search. */
- if (RB_EMPTY_NODE(&cache->cache_node)) {
- const u64 next_bytenr = cache->key.objectid + cache->key.offset;
-
- spin_unlock(&fs_info->block_group_cache_lock);
- btrfs_put_block_group(cache);
- cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
- }
- node = rb_next(&cache->cache_node);
- btrfs_put_block_group(cache);
- if (node) {
- cache = rb_entry(node, struct btrfs_block_group_cache,
- cache_node);
- btrfs_get_block_group(cache);
- } else
- cache = NULL;
- spin_unlock(&fs_info->block_group_cache_lock);
- return cache;
-}
-
-static int cache_save_setup(struct btrfs_block_group_cache *block_group,
- struct btrfs_trans_handle *trans,
- struct btrfs_path *path)
-{
- struct btrfs_fs_info *fs_info = block_group->fs_info;
- struct btrfs_root *root = fs_info->tree_root;
- struct inode *inode = NULL;
- struct extent_changeset *data_reserved = NULL;
- u64 alloc_hint = 0;
- int dcs = BTRFS_DC_ERROR;
- u64 num_pages = 0;
- int retries = 0;
- int ret = 0;
-
- /*
- * If this block group is smaller than 100 megs don't bother caching the
- * block group.
- */
- if (block_group->key.offset < (100 * SZ_1M)) {
- spin_lock(&block_group->lock);
- block_group->disk_cache_state = BTRFS_DC_WRITTEN;
- spin_unlock(&block_group->lock);
- return 0;
- }
-
- if (trans->aborted)
- return 0;
-again:
- inode = lookup_free_space_inode(block_group, path);
- if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
- ret = PTR_ERR(inode);
- btrfs_release_path(path);
- goto out;
- }
-
- if (IS_ERR(inode)) {
- BUG_ON(retries);
- retries++;
-
- if (block_group->ro)
- goto out_free;
-
- ret = create_free_space_inode(trans, block_group, path);
- if (ret)
- goto out_free;
- goto again;
- }
-
- /*
- * We want to set the generation to 0, that way if anything goes wrong
- * from here on out we know not to trust this cache when we load up next
- * time.
- */
- BTRFS_I(inode)->generation = 0;
- ret = btrfs_update_inode(trans, root, inode);
- if (ret) {
- /*
- * So theoretically we could recover from this, simply set the
- * super cache generation to 0 so we know to invalidate the
- * cache, but then we'd have to keep track of the block groups
- * that fail this way so we know we _have_ to reset this cache
- * before the next commit or risk reading stale cache. So to
- * limit our exposure to horrible edge cases lets just abort the
- * transaction, this only happens in really bad situations
- * anyway.
- */
- btrfs_abort_transaction(trans, ret);
- goto out_put;
- }
- WARN_ON(ret);
-
- /* We've already setup this transaction, go ahead and exit */
- if (block_group->cache_generation == trans->transid &&
- i_size_read(inode)) {
- dcs = BTRFS_DC_SETUP;
- goto out_put;
- }
-
- if (i_size_read(inode) > 0) {
- ret = btrfs_check_trunc_cache_free_space(fs_info,
- &fs_info->global_block_rsv);
- if (ret)
- goto out_put;
-
- ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
- if (ret)
- goto out_put;
- }
-
- spin_lock(&block_group->lock);
- if (block_group->cached != BTRFS_CACHE_FINISHED ||
- !btrfs_test_opt(fs_info, SPACE_CACHE)) {
- /*
- * don't bother trying to write stuff out _if_
- * a) we're not cached,
- * b) we're with nospace_cache mount option,
- * c) we're with v2 space_cache (FREE_SPACE_TREE).
- */
- dcs = BTRFS_DC_WRITTEN;
- spin_unlock(&block_group->lock);
- goto out_put;
- }
- spin_unlock(&block_group->lock);
-
- /*
- * We hit an ENOSPC when setting up the cache in this transaction, just
- * skip doing the setup, we've already cleared the cache so we're safe.
- */
- if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
- ret = -ENOSPC;
- goto out_put;
- }
-
- /*
- * Try to preallocate enough space based on how big the block group is.
- * Keep in mind this has to include any pinned space which could end up
- * taking up quite a bit since it's not folded into the other space
- * cache.
- */
- num_pages = div_u64(block_group->key.offset, SZ_256M);
- if (!num_pages)
- num_pages = 1;
-
- num_pages *= 16;
- num_pages *= PAGE_SIZE;
-
- ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
- if (ret)
- goto out_put;
-
- ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
- num_pages, num_pages,
- &alloc_hint);
- /*
- * Our cache requires contiguous chunks so that we don't modify a bunch
- * of metadata or split extents when writing the cache out, which means
- * we can enospc if we are heavily fragmented in addition to just normal
- * out of space conditions. So if we hit this just skip setting up any
- * other block groups for this transaction, maybe we'll unpin enough
- * space the next time around.
- */
- if (!ret)
- dcs = BTRFS_DC_SETUP;
- else if (ret == -ENOSPC)
- set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
-
-out_put:
- iput(inode);
-out_free:
- btrfs_release_path(path);
-out:
- spin_lock(&block_group->lock);
- if (!ret && dcs == BTRFS_DC_SETUP)
- block_group->cache_generation = trans->transid;
- block_group->disk_cache_state = dcs;
- spin_unlock(&block_group->lock);
-
- extent_changeset_free(data_reserved);
- return ret;
-}
-
-int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
-{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *cache, *tmp;
- struct btrfs_transaction *cur_trans = trans->transaction;
- struct btrfs_path *path;
-
- if (list_empty(&cur_trans->dirty_bgs) ||
- !btrfs_test_opt(fs_info, SPACE_CACHE))
- return 0;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- /* Could add new block groups, use _safe just in case */
- list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
- dirty_list) {
- if (cache->disk_cache_state == BTRFS_DC_CLEAR)
- cache_save_setup(cache, trans, path);
- }
-
- btrfs_free_path(path);
- return 0;
-}
-
-/*
- * transaction commit does final block group cache writeback during a
- * critical section where nothing is allowed to change the FS. This is
- * required in order for the cache to actually match the block group,
- * but can introduce a lot of latency into the commit.
- *
- * So, btrfs_start_dirty_block_groups is here to kick off block group
- * cache IO. There's a chance we'll have to redo some of it if the
- * block group changes again during the commit, but it greatly reduces
- * the commit latency by getting rid of the easy block groups while
- * we're still allowing others to join the commit.
- */
-int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
-{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *cache;
- struct btrfs_transaction *cur_trans = trans->transaction;
- int ret = 0;
- int should_put;
- struct btrfs_path *path = NULL;
- LIST_HEAD(dirty);
- struct list_head *io = &cur_trans->io_bgs;
- int num_started = 0;
- int loops = 0;
-
- spin_lock(&cur_trans->dirty_bgs_lock);
- if (list_empty(&cur_trans->dirty_bgs)) {
- spin_unlock(&cur_trans->dirty_bgs_lock);
- return 0;
- }
- list_splice_init(&cur_trans->dirty_bgs, &dirty);
- spin_unlock(&cur_trans->dirty_bgs_lock);
-
-again:
- /*
- * make sure all the block groups on our dirty list actually
- * exist
- */
- btrfs_create_pending_block_groups(trans);
-
- if (!path) {
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- }
-
- /*
- * cache_write_mutex is here only to save us from balance or automatic
- * removal of empty block groups deleting this block group while we are
- * writing out the cache
- */
- mutex_lock(&trans->transaction->cache_write_mutex);
- while (!list_empty(&dirty)) {
- bool drop_reserve = true;
-
- cache = list_first_entry(&dirty,
- struct btrfs_block_group_cache,
- dirty_list);
- /*
- * this can happen if something re-dirties a block
- * group that is already under IO. Just wait for it to
- * finish and then do it all again
- */
- if (!list_empty(&cache->io_list)) {
- list_del_init(&cache->io_list);
- btrfs_wait_cache_io(trans, cache, path);
- btrfs_put_block_group(cache);
- }
-
-
- /*
- * btrfs_wait_cache_io uses the cache->dirty_list to decide
- * if it should update the cache_state. Don't delete
- * until after we wait.
- *
- * Since we're not running in the commit critical section
- * we need the dirty_bgs_lock to protect from update_block_group
- */
- spin_lock(&cur_trans->dirty_bgs_lock);
- list_del_init(&cache->dirty_list);
- spin_unlock(&cur_trans->dirty_bgs_lock);
-
- should_put = 1;
-
- cache_save_setup(cache, trans, path);
-
- if (cache->disk_cache_state == BTRFS_DC_SETUP) {
- cache->io_ctl.inode = NULL;
- ret = btrfs_write_out_cache(trans, cache, path);
- if (ret == 0 && cache->io_ctl.inode) {
- num_started++;
- should_put = 0;
-
- /*
- * The cache_write_mutex is protecting the
- * io_list, also refer to the definition of
- * btrfs_transaction::io_bgs for more details
- */
- list_add_tail(&cache->io_list, io);
- } else {
- /*
- * if we failed to write the cache, the
- * generation will be bad and life goes on
- */
- ret = 0;
- }
- }
- if (!ret) {
- ret = write_one_cache_group(trans, path, cache);
- /*
- * Our block group might still be attached to the list
- * of new block groups in the transaction handle of some
- * other task (struct btrfs_trans_handle->new_bgs). This
- * means its block group item isn't yet in the extent
- * tree. If this happens ignore the error, as we will
- * try again later in the critical section of the
- * transaction commit.
- */
- if (ret == -ENOENT) {
- ret = 0;
- spin_lock(&cur_trans->dirty_bgs_lock);
- if (list_empty(&cache->dirty_list)) {
- list_add_tail(&cache->dirty_list,
- &cur_trans->dirty_bgs);
- btrfs_get_block_group(cache);
- drop_reserve = false;
- }
- spin_unlock(&cur_trans->dirty_bgs_lock);
- } else if (ret) {
- btrfs_abort_transaction(trans, ret);
- }
- }
-
- /* if it's not on the io list, we need to put the block group */
- if (should_put)
- btrfs_put_block_group(cache);
- if (drop_reserve)
- btrfs_delayed_refs_rsv_release(fs_info, 1);
-
- if (ret)
- break;
-
- /*
- * Avoid blocking other tasks for too long. It might even save
- * us from writing caches for block groups that are going to be
- * removed.
- */
- mutex_unlock(&trans->transaction->cache_write_mutex);
- mutex_lock(&trans->transaction->cache_write_mutex);
- }
- mutex_unlock(&trans->transaction->cache_write_mutex);
-
- /*
- * go through delayed refs for all the stuff we've just kicked off
- * and then loop back (just once)
- */
- ret = btrfs_run_delayed_refs(trans, 0);
- if (!ret && loops == 0) {
- loops++;
- spin_lock(&cur_trans->dirty_bgs_lock);
- list_splice_init(&cur_trans->dirty_bgs, &dirty);
- /*
- * dirty_bgs_lock protects us from concurrent block group
- * deletes too (not just cache_write_mutex).
- */
- if (!list_empty(&dirty)) {
- spin_unlock(&cur_trans->dirty_bgs_lock);
- goto again;
- }
- spin_unlock(&cur_trans->dirty_bgs_lock);
- } else if (ret < 0) {
- btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
- }
-
- btrfs_free_path(path);
- return ret;
-}
-
-int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
-{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *cache;
- struct btrfs_transaction *cur_trans = trans->transaction;
- int ret = 0;
- int should_put;
- struct btrfs_path *path;
- struct list_head *io = &cur_trans->io_bgs;
- int num_started = 0;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- /*
- * Even though we are in the critical section of the transaction commit,
- * we can still have concurrent tasks adding elements to this
- * transaction's list of dirty block groups. These tasks correspond to
- * endio free space workers started when writeback finishes for a
- * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
- * allocate new block groups as a result of COWing nodes of the root
- * tree when updating the free space inode. The writeback for the space
- * caches is triggered by an earlier call to
- * btrfs_start_dirty_block_groups() and iterations of the following
- * loop.
- * Also we want to do the cache_save_setup first and then run the
- * delayed refs to make sure we have the best chance at doing this all
- * in one shot.
- */
- spin_lock(&cur_trans->dirty_bgs_lock);
- while (!list_empty(&cur_trans->dirty_bgs)) {
- cache = list_first_entry(&cur_trans->dirty_bgs,
- struct btrfs_block_group_cache,
- dirty_list);
-
- /*
- * this can happen if cache_save_setup re-dirties a block
- * group that is already under IO. Just wait for it to
- * finish and then do it all again
- */
- if (!list_empty(&cache->io_list)) {
- spin_unlock(&cur_trans->dirty_bgs_lock);
- list_del_init(&cache->io_list);
- btrfs_wait_cache_io(trans, cache, path);
- btrfs_put_block_group(cache);
- spin_lock(&cur_trans->dirty_bgs_lock);
- }
-
- /*
- * don't remove from the dirty list until after we've waited
- * on any pending IO
- */
- list_del_init(&cache->dirty_list);
- spin_unlock(&cur_trans->dirty_bgs_lock);
- should_put = 1;
-
- cache_save_setup(cache, trans, path);
-
- if (!ret)
- ret = btrfs_run_delayed_refs(trans,
- (unsigned long) -1);
-
- if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
- cache->io_ctl.inode = NULL;
- ret = btrfs_write_out_cache(trans, cache, path);
- if (ret == 0 && cache->io_ctl.inode) {
- num_started++;
- should_put = 0;
- list_add_tail(&cache->io_list, io);
- } else {
- /*
- * if we failed to write the cache, the
- * generation will be bad and life goes on
- */
- ret = 0;
- }
- }
- if (!ret) {
- ret = write_one_cache_group(trans, path, cache);
- /*
- * One of the free space endio workers might have
- * created a new block group while updating a free space
- * cache's inode (at inode.c:btrfs_finish_ordered_io())
- * and hasn't released its transaction handle yet, in
- * which case the new block group is still attached to
- * its transaction handle and its creation has not
- * finished yet (no block group item in the extent tree
- * yet, etc). If this is the case, wait for all free
- * space endio workers to finish and retry. This is a
- * a very rare case so no need for a more efficient and
- * complex approach.
- */
- if (ret == -ENOENT) {
- wait_event(cur_trans->writer_wait,
- atomic_read(&cur_trans->num_writers) == 1);
- ret = write_one_cache_group(trans, path, cache);
- }
- if (ret)
- btrfs_abort_transaction(trans, ret);
- }
-
- /* if its not on the io list, we need to put the block group */
- if (should_put)
- btrfs_put_block_group(cache);
- btrfs_delayed_refs_rsv_release(fs_info, 1);
- spin_lock(&cur_trans->dirty_bgs_lock);
- }
- spin_unlock(&cur_trans->dirty_bgs_lock);
-
- /*
- * Refer to the definition of io_bgs member for details why it's safe
- * to use it without any locking
- */
- while (!list_empty(io)) {
- cache = list_first_entry(io, struct btrfs_block_group_cache,
- io_list);
- list_del_init(&cache->io_list);
- btrfs_wait_cache_io(trans, cache, path);
- btrfs_put_block_group(cache);
- }
-
- btrfs_free_path(path);
- return ret;
-}
-
int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
int readonly = 0;
block_group = btrfs_lookup_block_group(fs_info, bytenr);
@@ -3676,166 +2548,6 @@ int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
return readonly;
}
-bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
-{
- struct btrfs_block_group_cache *bg;
- bool ret = true;
-
- bg = btrfs_lookup_block_group(fs_info, bytenr);
- if (!bg)
- return false;
-
- spin_lock(&bg->lock);
- if (bg->ro)
- ret = false;
- else
- atomic_inc(&bg->nocow_writers);
- spin_unlock(&bg->lock);
-
- /* no put on block group, done by btrfs_dec_nocow_writers */
- if (!ret)
- btrfs_put_block_group(bg);
-
- return ret;
-
-}
-
-void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
-{
- struct btrfs_block_group_cache *bg;
-
- bg = btrfs_lookup_block_group(fs_info, bytenr);
- ASSERT(bg);
- if (atomic_dec_and_test(&bg->nocow_writers))
- wake_up_var(&bg->nocow_writers);
- /*
- * Once for our lookup and once for the lookup done by a previous call
- * to btrfs_inc_nocow_writers()
- */
- btrfs_put_block_group(bg);
- btrfs_put_block_group(bg);
-}
-
-void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
-{
- wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
-}
-
-static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
-{
- u64 extra_flags = chunk_to_extended(flags) &
- BTRFS_EXTENDED_PROFILE_MASK;
-
- write_seqlock(&fs_info->profiles_lock);
- if (flags & BTRFS_BLOCK_GROUP_DATA)
- fs_info->avail_data_alloc_bits |= extra_flags;
- if (flags & BTRFS_BLOCK_GROUP_METADATA)
- fs_info->avail_metadata_alloc_bits |= extra_flags;
- if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
- fs_info->avail_system_alloc_bits |= extra_flags;
- write_sequnlock(&fs_info->profiles_lock);
-}
-
-/*
- * returns target flags in extended format or 0 if restripe for this
- * chunk_type is not in progress
- *
- * should be called with balance_lock held
- */
-static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
-{
- struct btrfs_balance_control *bctl = fs_info->balance_ctl;
- u64 target = 0;
-
- if (!bctl)
- return 0;
-
- if (flags & BTRFS_BLOCK_GROUP_DATA &&
- bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
- target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
- } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
- bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
- target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
- } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
- bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
- target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
- }
-
- return target;
-}
-
-/*
- * @flags: available profiles in extended format (see ctree.h)
- *
- * Returns reduced profile in chunk format. If profile changing is in
- * progress (either running or paused) picks the target profile (if it's
- * already available), otherwise falls back to plain reducing.
- */
-static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
-{
- u64 num_devices = fs_info->fs_devices->rw_devices;
- u64 target;
- u64 raid_type;
- u64 allowed = 0;
-
- /*
- * see if restripe for this chunk_type is in progress, if so
- * try to reduce to the target profile
- */
- spin_lock(&fs_info->balance_lock);
- target = get_restripe_target(fs_info, flags);
- if (target) {
- /* pick target profile only if it's already available */
- if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
- spin_unlock(&fs_info->balance_lock);
- return extended_to_chunk(target);
- }
- }
- spin_unlock(&fs_info->balance_lock);
-
- /* First, mask out the RAID levels which aren't possible */
- for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
- if (num_devices >= btrfs_raid_array[raid_type].devs_min)
- allowed |= btrfs_raid_array[raid_type].bg_flag;
- }
- allowed &= flags;
-
- if (allowed & BTRFS_BLOCK_GROUP_RAID6)
- allowed = BTRFS_BLOCK_GROUP_RAID6;
- else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
- allowed = BTRFS_BLOCK_GROUP_RAID5;
- else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
- allowed = BTRFS_BLOCK_GROUP_RAID10;
- else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
- allowed = BTRFS_BLOCK_GROUP_RAID1;
- else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
- allowed = BTRFS_BLOCK_GROUP_RAID0;
-
- flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
-
- return extended_to_chunk(flags | allowed);
-}
-
-static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
-{
- unsigned seq;
- u64 flags;
-
- do {
- flags = orig_flags;
- seq = read_seqbegin(&fs_info->profiles_lock);
-
- if (flags & BTRFS_BLOCK_GROUP_DATA)
- flags |= fs_info->avail_data_alloc_bits;
- else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
- flags |= fs_info->avail_system_alloc_bits;
- else if (flags & BTRFS_BLOCK_GROUP_METADATA)
- flags |= fs_info->avail_metadata_alloc_bits;
- } while (read_seqretry(&fs_info->profiles_lock, seq));
-
- return btrfs_reduce_alloc_profile(fs_info, flags);
-}
-
static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -3849,374 +2561,13 @@ static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
else
flags = BTRFS_BLOCK_GROUP_METADATA;
- ret = get_alloc_profile(fs_info, flags);
- return ret;
-}
-
-u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
-{
- return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
-}
-
-u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
-{
- return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
-}
-
-u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
-{
- return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
-}
-
-static void force_metadata_allocation(struct btrfs_fs_info *info)
-{
- struct list_head *head = &info->space_info;
- struct btrfs_space_info *found;
-
- rcu_read_lock();
- list_for_each_entry_rcu(found, head, list) {
- if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
- found->force_alloc = CHUNK_ALLOC_FORCE;
- }
- rcu_read_unlock();
-}
-
-static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *sinfo, int force)
-{
- u64 bytes_used = btrfs_space_info_used(sinfo, false);
- u64 thresh;
-
- if (force == CHUNK_ALLOC_FORCE)
- return 1;
-
- /*
- * in limited mode, we want to have some free space up to
- * about 1% of the FS size.
- */
- if (force == CHUNK_ALLOC_LIMITED) {
- thresh = btrfs_super_total_bytes(fs_info->super_copy);
- thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
-
- if (sinfo->total_bytes - bytes_used < thresh)
- return 1;
- }
-
- if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
- return 0;
- return 1;
-}
-
-static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
-{
- u64 num_dev;
-
- num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
- if (!num_dev)
- num_dev = fs_info->fs_devices->rw_devices;
-
- return num_dev;
-}
-
-/*
- * If @is_allocation is true, reserve space in the system space info necessary
- * for allocating a chunk, otherwise if it's false, reserve space necessary for
- * removing a chunk.
- */
-void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
-{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_space_info *info;
- u64 left;
- u64 thresh;
- int ret = 0;
- u64 num_devs;
-
- /*
- * Needed because we can end up allocating a system chunk and for an
- * atomic and race free space reservation in the chunk block reserve.
- */
- lockdep_assert_held(&fs_info->chunk_mutex);
-
- info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
- spin_lock(&info->lock);
- left = info->total_bytes - btrfs_space_info_used(info, true);
- spin_unlock(&info->lock);
-
- num_devs = get_profile_num_devs(fs_info, type);
-
- /* num_devs device items to update and 1 chunk item to add or remove */
- thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) +
- btrfs_calc_trans_metadata_size(fs_info, 1);
-
- if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
- btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
- left, thresh, type);
- btrfs_dump_space_info(fs_info, info, 0, 0);
- }
-
- if (left < thresh) {
- u64 flags = btrfs_system_alloc_profile(fs_info);
-
- /*
- * Ignore failure to create system chunk. We might end up not
- * needing it, as we might not need to COW all nodes/leafs from
- * the paths we visit in the chunk tree (they were already COWed
- * or created in the current transaction for example).
- */
- ret = btrfs_alloc_chunk(trans, flags);
- }
-
- if (!ret) {
- ret = btrfs_block_rsv_add(fs_info->chunk_root,
- &fs_info->chunk_block_rsv,
- thresh, BTRFS_RESERVE_NO_FLUSH);
- if (!ret)
- trans->chunk_bytes_reserved += thresh;
- }
-}
-
-/*
- * If force is CHUNK_ALLOC_FORCE:
- * - return 1 if it successfully allocates a chunk,
- * - return errors including -ENOSPC otherwise.
- * If force is NOT CHUNK_ALLOC_FORCE:
- * - return 0 if it doesn't need to allocate a new chunk,
- * - return 1 if it successfully allocates a chunk,
- * - return errors including -ENOSPC otherwise.
- */
-int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
- enum btrfs_chunk_alloc_enum force)
-{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_space_info *space_info;
- bool wait_for_alloc = false;
- bool should_alloc = false;
- int ret = 0;
-
- /* Don't re-enter if we're already allocating a chunk */
- if (trans->allocating_chunk)
- return -ENOSPC;
-
- space_info = btrfs_find_space_info(fs_info, flags);
- ASSERT(space_info);
-
- do {
- spin_lock(&space_info->lock);
- if (force < space_info->force_alloc)
- force = space_info->force_alloc;
- should_alloc = should_alloc_chunk(fs_info, space_info, force);
- if (space_info->full) {
- /* No more free physical space */
- if (should_alloc)
- ret = -ENOSPC;
- else
- ret = 0;
- spin_unlock(&space_info->lock);
- return ret;
- } else if (!should_alloc) {
- spin_unlock(&space_info->lock);
- return 0;
- } else if (space_info->chunk_alloc) {
- /*
- * Someone is already allocating, so we need to block
- * until this someone is finished and then loop to
- * recheck if we should continue with our allocation
- * attempt.
- */
- wait_for_alloc = true;
- spin_unlock(&space_info->lock);
- mutex_lock(&fs_info->chunk_mutex);
- mutex_unlock(&fs_info->chunk_mutex);
- } else {
- /* Proceed with allocation */
- space_info->chunk_alloc = 1;
- wait_for_alloc = false;
- spin_unlock(&space_info->lock);
- }
-
- cond_resched();
- } while (wait_for_alloc);
-
- mutex_lock(&fs_info->chunk_mutex);
- trans->allocating_chunk = true;
-
- /*
- * If we have mixed data/metadata chunks we want to make sure we keep
- * allocating mixed chunks instead of individual chunks.
- */
- if (btrfs_mixed_space_info(space_info))
- flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
-
- /*
- * if we're doing a data chunk, go ahead and make sure that
- * we keep a reasonable number of metadata chunks allocated in the
- * FS as well.
- */
- if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
- fs_info->data_chunk_allocations++;
- if (!(fs_info->data_chunk_allocations %
- fs_info->metadata_ratio))
- force_metadata_allocation(fs_info);
- }
-
- /*
- * Check if we have enough space in SYSTEM chunk because we may need
- * to update devices.
- */
- check_system_chunk(trans, flags);
-
- ret = btrfs_alloc_chunk(trans, flags);
- trans->allocating_chunk = false;
-
- spin_lock(&space_info->lock);
- if (ret < 0) {
- if (ret == -ENOSPC)
- space_info->full = 1;
- else
- goto out;
- } else {
- ret = 1;
- space_info->max_extent_size = 0;
- }
-
- space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
-out:
- space_info->chunk_alloc = 0;
- spin_unlock(&space_info->lock);
- mutex_unlock(&fs_info->chunk_mutex);
- /*
- * When we allocate a new chunk we reserve space in the chunk block
- * reserve to make sure we can COW nodes/leafs in the chunk tree or
- * add new nodes/leafs to it if we end up needing to do it when
- * inserting the chunk item and updating device items as part of the
- * second phase of chunk allocation, performed by
- * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
- * large number of new block groups to create in our transaction
- * handle's new_bgs list to avoid exhausting the chunk block reserve
- * in extreme cases - like having a single transaction create many new
- * block groups when starting to write out the free space caches of all
- * the block groups that were made dirty during the lifetime of the
- * transaction.
- */
- if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
- btrfs_create_pending_block_groups(trans);
-
- return ret;
-}
-
-static int update_block_group(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes, int alloc)
-{
- struct btrfs_fs_info *info = trans->fs_info;
- struct btrfs_block_group_cache *cache = NULL;
- u64 total = num_bytes;
- u64 old_val;
- u64 byte_in_group;
- int factor;
- int ret = 0;
-
- /* block accounting for super block */
- spin_lock(&info->delalloc_root_lock);
- old_val = btrfs_super_bytes_used(info->super_copy);
- if (alloc)
- old_val += num_bytes;
- else
- old_val -= num_bytes;
- btrfs_set_super_bytes_used(info->super_copy, old_val);
- spin_unlock(&info->delalloc_root_lock);
-
- while (total) {
- cache = btrfs_lookup_block_group(info, bytenr);
- if (!cache) {
- ret = -ENOENT;
- break;
- }
- factor = btrfs_bg_type_to_factor(cache->flags);
-
- /*
- * If this block group has free space cache written out, we
- * need to make sure to load it if we are removing space. This
- * is because we need the unpinning stage to actually add the
- * space back to the block group, otherwise we will leak space.
- */
- if (!alloc && cache->cached == BTRFS_CACHE_NO)
- cache_block_group(cache, 1);
-
- byte_in_group = bytenr - cache->key.objectid;
- WARN_ON(byte_in_group > cache->key.offset);
-
- spin_lock(&cache->space_info->lock);
- spin_lock(&cache->lock);
-
- if (btrfs_test_opt(info, SPACE_CACHE) &&
- cache->disk_cache_state < BTRFS_DC_CLEAR)
- cache->disk_cache_state = BTRFS_DC_CLEAR;
-
- old_val = btrfs_block_group_used(&cache->item);
- num_bytes = min(total, cache->key.offset - byte_in_group);
- if (alloc) {
- old_val += num_bytes;
- btrfs_set_block_group_used(&cache->item, old_val);
- cache->reserved -= num_bytes;
- cache->space_info->bytes_reserved -= num_bytes;
- cache->space_info->bytes_used += num_bytes;
- cache->space_info->disk_used += num_bytes * factor;
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
- } else {
- old_val -= num_bytes;
- btrfs_set_block_group_used(&cache->item, old_val);
- cache->pinned += num_bytes;
- btrfs_space_info_update_bytes_pinned(info,
- cache->space_info, num_bytes);
- cache->space_info->bytes_used -= num_bytes;
- cache->space_info->disk_used -= num_bytes * factor;
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
-
- trace_btrfs_space_reservation(info, "pinned",
- cache->space_info->flags,
- num_bytes, 1);
- percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
- num_bytes,
- BTRFS_TOTAL_BYTES_PINNED_BATCH);
- set_extent_dirty(info->pinned_extents,
- bytenr, bytenr + num_bytes - 1,
- GFP_NOFS | __GFP_NOFAIL);
- }
-
- spin_lock(&trans->transaction->dirty_bgs_lock);
- if (list_empty(&cache->dirty_list)) {
- list_add_tail(&cache->dirty_list,
- &trans->transaction->dirty_bgs);
- trans->delayed_ref_updates++;
- btrfs_get_block_group(cache);
- }
- spin_unlock(&trans->transaction->dirty_bgs_lock);
-
- /*
- * No longer have used bytes in this block group, queue it for
- * deletion. We do this after adding the block group to the
- * dirty list to avoid races between cleaner kthread and space
- * cache writeout.
- */
- if (!alloc && old_val == 0)
- btrfs_mark_bg_unused(cache);
-
- btrfs_put_block_group(cache);
- total -= num_bytes;
- bytenr += num_bytes;
- }
-
- /* Modified block groups are accounted for in the delayed_refs_rsv. */
- btrfs_update_delayed_refs_rsv(trans);
+ ret = btrfs_get_alloc_profile(fs_info, flags);
return ret;
}
static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
u64 bytenr;
spin_lock(&fs_info->block_group_cache_lock);
@@ -4230,13 +2581,13 @@ static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
if (!cache)
return 0;
- bytenr = cache->key.objectid;
+ bytenr = cache->start;
btrfs_put_block_group(cache);
return bytenr;
}
-static int pin_down_extent(struct btrfs_block_group_cache *cache,
+static int pin_down_extent(struct btrfs_block_group *cache,
u64 bytenr, u64 num_bytes, int reserved)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
@@ -4253,8 +2604,6 @@ static int pin_down_extent(struct btrfs_block_group_cache *cache,
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
- trace_btrfs_space_reservation(fs_info, "pinned",
- cache->space_info->flags, num_bytes, 1);
percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
set_extent_dirty(fs_info->pinned_extents, bytenr,
@@ -4262,13 +2611,12 @@ static int pin_down_extent(struct btrfs_block_group_cache *cache,
return 0;
}
-/*
- * this function must be called within transaction
- */
int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, int reserved)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
+
+ ASSERT(fs_info->running_transaction);
cache = btrfs_lookup_block_group(fs_info, bytenr);
BUG_ON(!cache); /* Logic error */
@@ -4285,7 +2633,7 @@ int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
int ret;
cache = btrfs_lookup_block_group(fs_info, bytenr);
@@ -4298,7 +2646,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
* to one because the slow code to read in the free extents does check
* the pinned extents.
*/
- cache_block_group(cache, 1);
+ btrfs_cache_block_group(cache, 1);
pin_down_extent(cache, bytenr, num_bytes, 0);
@@ -4312,25 +2660,26 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 num_bytes)
{
int ret;
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_caching_control *caching_ctl;
block_group = btrfs_lookup_block_group(fs_info, start);
if (!block_group)
return -EINVAL;
- cache_block_group(block_group, 0);
- caching_ctl = get_caching_control(block_group);
+ btrfs_cache_block_group(block_group, 0);
+ caching_ctl = btrfs_get_caching_control(block_group);
if (!caching_ctl) {
/* Logic error */
- BUG_ON(!block_group_cache_done(block_group));
+ BUG_ON(!btrfs_block_group_done(block_group));
ret = btrfs_remove_free_space(block_group, start, num_bytes);
} else {
mutex_lock(&caching_ctl->mutex);
if (start >= caching_ctl->progress) {
- ret = add_excluded_extent(fs_info, start, num_bytes);
+ ret = btrfs_add_excluded_extent(fs_info, start,
+ num_bytes);
} else if (start + num_bytes <= caching_ctl->progress) {
ret = btrfs_remove_free_space(block_group,
start, num_bytes);
@@ -4344,11 +2693,12 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
num_bytes = (start + num_bytes) -
caching_ctl->progress;
start = caching_ctl->progress;
- ret = add_excluded_extent(fs_info, start, num_bytes);
+ ret = btrfs_add_excluded_extent(fs_info, start,
+ num_bytes);
}
out_lock:
mutex_unlock(&caching_ctl->mutex);
- put_caching_control(caching_ctl);
+ btrfs_put_caching_control(caching_ctl);
}
btrfs_put_block_group(block_group);
return ret;
@@ -4387,128 +2737,26 @@ int btrfs_exclude_logged_extents(struct extent_buffer *eb)
}
static void
-btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
+btrfs_inc_block_group_reservations(struct btrfs_block_group *bg)
{
atomic_inc(&bg->reservations);
}
-void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
- const u64 start)
-{
- struct btrfs_block_group_cache *bg;
-
- bg = btrfs_lookup_block_group(fs_info, start);
- ASSERT(bg);
- if (atomic_dec_and_test(&bg->reservations))
- wake_up_var(&bg->reservations);
- btrfs_put_block_group(bg);
-}
-
-void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
-{
- struct btrfs_space_info *space_info = bg->space_info;
-
- ASSERT(bg->ro);
-
- if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
- return;
-
- /*
- * Our block group is read only but before we set it to read only,
- * some task might have had allocated an extent from it already, but it
- * has not yet created a respective ordered extent (and added it to a
- * root's list of ordered extents).
- * Therefore wait for any task currently allocating extents, since the
- * block group's reservations counter is incremented while a read lock
- * on the groups' semaphore is held and decremented after releasing
- * the read access on that semaphore and creating the ordered extent.
- */
- down_write(&space_info->groups_sem);
- up_write(&space_info->groups_sem);
-
- wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
-}
-
-/**
- * btrfs_add_reserved_bytes - update the block_group and space info counters
- * @cache: The cache we are manipulating
- * @ram_bytes: The number of bytes of file content, and will be same to
- * @num_bytes except for the compress path.
- * @num_bytes: The number of bytes in question
- * @delalloc: The blocks are allocated for the delalloc write
- *
- * This is called by the allocator when it reserves space. If this is a
- * reservation and the block group has become read only we cannot make the
- * reservation and return -EAGAIN, otherwise this function always succeeds.
- */
-static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
- u64 ram_bytes, u64 num_bytes, int delalloc)
-{
- struct btrfs_space_info *space_info = cache->space_info;
- int ret = 0;
-
- spin_lock(&space_info->lock);
- spin_lock(&cache->lock);
- if (cache->ro) {
- ret = -EAGAIN;
- } else {
- cache->reserved += num_bytes;
- space_info->bytes_reserved += num_bytes;
- btrfs_space_info_update_bytes_may_use(cache->fs_info,
- space_info, -ram_bytes);
- if (delalloc)
- cache->delalloc_bytes += num_bytes;
- }
- spin_unlock(&cache->lock);
- spin_unlock(&space_info->lock);
- return ret;
-}
-
-/**
- * btrfs_free_reserved_bytes - update the block_group and space info counters
- * @cache: The cache we are manipulating
- * @num_bytes: The number of bytes in question
- * @delalloc: The blocks are allocated for the delalloc write
- *
- * This is called by somebody who is freeing space that was never actually used
- * on disk. For example if you reserve some space for a new leaf in transaction
- * A and before transaction A commits you free that leaf, you call this with
- * reserve set to 0 in order to clear the reservation.
- */
-
-static void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
- u64 num_bytes, int delalloc)
-{
- struct btrfs_space_info *space_info = cache->space_info;
-
- spin_lock(&space_info->lock);
- spin_lock(&cache->lock);
- if (cache->ro)
- space_info->bytes_readonly += num_bytes;
- cache->reserved -= num_bytes;
- space_info->bytes_reserved -= num_bytes;
- space_info->max_extent_size = 0;
-
- if (delalloc)
- cache->delalloc_bytes -= num_bytes;
- spin_unlock(&cache->lock);
- spin_unlock(&space_info->lock);
-}
void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
{
struct btrfs_caching_control *next;
struct btrfs_caching_control *caching_ctl;
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
down_write(&fs_info->commit_root_sem);
list_for_each_entry_safe(caching_ctl, next,
&fs_info->caching_block_groups, list) {
cache = caching_ctl->block_group;
- if (block_group_cache_done(cache)) {
+ if (btrfs_block_group_done(cache)) {
cache->last_byte_to_unpin = (u64)-1;
list_del_init(&caching_ctl->list);
- put_caching_control(caching_ctl);
+ btrfs_put_caching_control(caching_ctl);
} else {
cache->last_byte_to_unpin = caching_ctl->progress;
}
@@ -4557,7 +2805,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
u64 start, u64 end,
const bool return_free_space)
{
- struct btrfs_block_group_cache *cache = NULL;
+ struct btrfs_block_group *cache = NULL;
struct btrfs_space_info *space_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
struct btrfs_free_cluster *cluster = NULL;
@@ -4569,7 +2817,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
while (start <= end) {
readonly = false;
if (!cache ||
- start >= cache->key.objectid + cache->key.offset) {
+ start >= cache->start + cache->length) {
if (cache)
btrfs_put_block_group(cache);
total_unpinned = 0;
@@ -4582,7 +2830,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
empty_cluster <<= 1;
}
- len = cache->key.objectid + cache->key.offset - start;
+ len = cache->start + cache->length - start;
len = min(len, end + 1 - start);
if (start < cache->last_byte_to_unpin) {
@@ -4612,9 +2860,6 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
spin_lock(&cache->lock);
cache->pinned -= len;
btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len);
-
- trace_btrfs_space_reservation(fs_info, "pinned",
- space_info->flags, len, 0);
space_info->max_extent_size = 0;
percpu_counter_add_batch(&space_info->total_bytes_pinned,
-len, BTRFS_TOTAL_BYTES_PINNED_BATCH);
@@ -4636,17 +2881,13 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
space_info, to_add);
if (global_rsv->reserved >= global_rsv->size)
global_rsv->full = 1;
- trace_btrfs_space_reservation(fs_info,
- "space_info",
- space_info->flags,
- to_add, 1);
len -= to_add;
}
spin_unlock(&global_rsv->lock);
/* Add to any tickets we may have */
if (len)
- btrfs_space_info_add_new_bytes(fs_info,
- space_info, len);
+ btrfs_try_granting_tickets(fs_info,
+ space_info);
}
spin_unlock(&space_info->lock);
}
@@ -4659,7 +2900,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *block_group, *tmp;
+ struct btrfs_block_group *block_group, *tmp;
struct list_head *deleted_bgs;
struct extent_io_tree *unpin;
u64 start;
@@ -4705,8 +2946,8 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
ret = -EROFS;
if (!trans->aborted)
ret = btrfs_discard_extent(fs_info,
- block_group->key.objectid,
- block_group->key.offset,
+ block_group->start,
+ block_group->length,
&trimmed);
list_del_init(&block_group->bg_list);
@@ -4947,7 +3188,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
goto out;
}
- ret = update_block_group(trans, bytenr, num_bytes, 0);
+ ret = btrfs_update_block_group(trans, bytenr, num_bytes, 0);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -5041,7 +3282,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
}
if (last_ref && btrfs_header_generation(buf) == trans->transid) {
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
ret = check_ref_cleanup(trans, buf->start);
@@ -5120,53 +3361,6 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
return ret;
}
-/*
- * when we wait for progress in the block group caching, its because
- * our allocation attempt failed at least once. So, we must sleep
- * and let some progress happen before we try again.
- *
- * This function will sleep at least once waiting for new free space to
- * show up, and then it will check the block group free space numbers
- * for our min num_bytes. Another option is to have it go ahead
- * and look in the rbtree for a free extent of a given size, but this
- * is a good start.
- *
- * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
- * any of the information in this block group.
- */
-static noinline void
-wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
- u64 num_bytes)
-{
- struct btrfs_caching_control *caching_ctl;
-
- caching_ctl = get_caching_control(cache);
- if (!caching_ctl)
- return;
-
- wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
- (cache->free_space_ctl->free_space >= num_bytes));
-
- put_caching_control(caching_ctl);
-}
-
-static noinline int
-wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
-{
- struct btrfs_caching_control *caching_ctl;
- int ret = 0;
-
- caching_ctl = get_caching_control(cache);
- if (!caching_ctl)
- return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
-
- wait_event(caching_ctl->wait, block_group_cache_done(cache));
- if (cache->cached == BTRFS_CACHE_ERROR)
- ret = -EIO;
- put_caching_control(caching_ctl);
- return ret;
-}
-
enum btrfs_loop_type {
LOOP_CACHING_NOWAIT,
LOOP_CACHING_WAIT,
@@ -5175,15 +3369,14 @@ enum btrfs_loop_type {
};
static inline void
-btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
+btrfs_lock_block_group(struct btrfs_block_group *cache,
int delalloc)
{
if (delalloc)
down_read(&cache->data_rwsem);
}
-static inline void
-btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
+static inline void btrfs_grab_block_group(struct btrfs_block_group *cache,
int delalloc)
{
btrfs_get_block_group(cache);
@@ -5191,12 +3384,12 @@ btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
down_read(&cache->data_rwsem);
}
-static struct btrfs_block_group_cache *
-btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
+static struct btrfs_block_group *btrfs_lock_cluster(
+ struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
int delalloc)
{
- struct btrfs_block_group_cache *used_bg = NULL;
+ struct btrfs_block_group *used_bg = NULL;
spin_lock(&cluster->refill_lock);
while (1) {
@@ -5230,7 +3423,7 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
}
static inline void
-btrfs_release_block_group(struct btrfs_block_group_cache *cache,
+btrfs_release_block_group(struct btrfs_block_group *cache,
int delalloc)
{
if (delalloc)
@@ -5301,12 +3494,12 @@ struct find_free_extent_ctl {
* Return >0 to inform caller that we find nothing
* Return 0 means we have found a location and set ffe_ctl->found_offset.
*/
-static int find_free_extent_clustered(struct btrfs_block_group_cache *bg,
+static int find_free_extent_clustered(struct btrfs_block_group *bg,
struct btrfs_free_cluster *last_ptr,
struct find_free_extent_ctl *ffe_ctl,
- struct btrfs_block_group_cache **cluster_bg_ret)
+ struct btrfs_block_group **cluster_bg_ret)
{
- struct btrfs_block_group_cache *cluster_bg;
+ struct btrfs_block_group *cluster_bg;
u64 aligned_cluster;
u64 offset;
int ret;
@@ -5319,7 +3512,7 @@ static int find_free_extent_clustered(struct btrfs_block_group_cache *bg,
goto release_cluster;
offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
- ffe_ctl->num_bytes, cluster_bg->key.objectid,
+ ffe_ctl->num_bytes, cluster_bg->start,
&ffe_ctl->max_extent_size);
if (offset) {
/* We have a block, we're done */
@@ -5386,7 +3579,7 @@ refill_cluster:
spin_unlock(&last_ptr->refill_lock);
ffe_ctl->retry_clustered = true;
- wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
+ btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
ffe_ctl->empty_cluster + ffe_ctl->empty_size);
return -EAGAIN;
}
@@ -5405,7 +3598,7 @@ refill_cluster:
* Return 0 when we found an free extent and set ffe_ctrl->found_offset
* Return -EAGAIN to inform caller that we need to re-search this block group
*/
-static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg,
+static int find_free_extent_unclustered(struct btrfs_block_group *bg,
struct btrfs_free_cluster *last_ptr,
struct find_free_extent_ctl *ffe_ctl)
{
@@ -5453,8 +3646,8 @@ static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg,
*/
if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached &&
ffe_ctl->loop > LOOP_CACHING_NOWAIT) {
- wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
- ffe_ctl->empty_size);
+ btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
+ ffe_ctl->empty_size);
ffe_ctl->retry_unclustered = true;
return -EAGAIN;
} else if (!offset) {
@@ -5607,7 +3800,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
{
int ret = 0;
struct btrfs_free_cluster *last_ptr = NULL;
- struct btrfs_block_group_cache *block_group = NULL;
+ struct btrfs_block_group *block_group = NULL;
struct find_free_extent_ctl ffe_ctl = {0};
struct btrfs_space_info *space_info;
bool use_cluster = true;
@@ -5730,7 +3923,7 @@ search:
continue;
btrfs_grab_block_group(block_group, delalloc);
- ffe_ctl.search_start = block_group->key.objectid;
+ ffe_ctl.search_start = block_group->start;
/*
* this can happen if we end up cycling through all the
@@ -5750,13 +3943,21 @@ search:
*/
if ((flags & extra) && !(block_group->flags & extra))
goto loop;
+
+ /*
+ * This block group has different flags than we want.
+ * It's possible that we have MIXED_GROUP flag but no
+ * block group is mixed. Just skip such block group.
+ */
+ btrfs_release_block_group(block_group, delalloc);
+ continue;
}
have_block_group:
- ffe_ctl.cached = block_group_cache_done(block_group);
+ ffe_ctl.cached = btrfs_block_group_done(block_group);
if (unlikely(!ffe_ctl.cached)) {
ffe_ctl.have_caching_bg = true;
- ret = cache_block_group(block_group, 0);
+ ret = btrfs_cache_block_group(block_group, 0);
BUG_ON(ret < 0);
ret = 0;
}
@@ -5769,7 +3970,7 @@ have_block_group:
* lets look there
*/
if (last_ptr && use_cluster) {
- struct btrfs_block_group_cache *cluster_bg = NULL;
+ struct btrfs_block_group *cluster_bg = NULL;
ret = find_free_extent_clustered(block_group, last_ptr,
&ffe_ctl, &cluster_bg);
@@ -5802,7 +4003,7 @@ checks:
/* move on to the next group */
if (ffe_ctl.search_start + num_bytes >
- block_group->key.objectid + block_group->key.offset) {
+ block_group->start + block_group->length) {
btrfs_add_free_space(block_group, ffe_ctl.found_offset,
num_bytes);
goto loop;
@@ -5951,7 +4152,7 @@ static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len,
int pin, int delalloc)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
int ret = 0;
cache = btrfs_lookup_block_group(fs_info, start);
@@ -6051,7 +4252,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
if (ret)
return ret;
- ret = update_block_group(trans, ins->objectid, ins->offset, 1);
+ ret = btrfs_update_block_group(trans, ins->objectid, ins->offset, 1);
if (ret) { /* -ENOENT, logic error */
btrfs_err(fs_info, "update block group failed for %llu %llu",
ins->objectid, ins->offset);
@@ -6141,8 +4342,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
if (ret)
return ret;
- ret = update_block_group(trans, extent_key.objectid,
- fs_info->nodesize, 1);
+ ret = btrfs_update_block_group(trans, extent_key.objectid,
+ fs_info->nodesize, 1);
if (ret) { /* -ENOENT, logic error */
btrfs_err(fs_info, "update block group failed for %llu %llu",
extent_key.objectid, extent_key.offset);
@@ -6184,7 +4385,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_space_info *space_info;
/*
@@ -7254,7 +5455,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
btrfs_assert_tree_locked(parent);
parent_level = btrfs_header_level(parent);
- extent_buffer_get(parent);
+ atomic_inc(&parent->refs);
path->nodes[parent_level] = parent;
path->slots[parent_level] = btrfs_header_nritems(parent);
@@ -7292,193 +5493,13 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
return ret;
}
-static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
-{
- u64 num_devices;
- u64 stripped;
-
- /*
- * if restripe for this chunk_type is on pick target profile and
- * return, otherwise do the usual balance
- */
- stripped = get_restripe_target(fs_info, flags);
- if (stripped)
- return extended_to_chunk(stripped);
-
- num_devices = fs_info->fs_devices->rw_devices;
-
- stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID56_MASK |
- BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10;
-
- if (num_devices == 1) {
- stripped |= BTRFS_BLOCK_GROUP_DUP;
- stripped = flags & ~stripped;
-
- /* turn raid0 into single device chunks */
- if (flags & BTRFS_BLOCK_GROUP_RAID0)
- return stripped;
-
- /* turn mirroring into duplication */
- if (flags & (BTRFS_BLOCK_GROUP_RAID1_MASK |
- BTRFS_BLOCK_GROUP_RAID10))
- return stripped | BTRFS_BLOCK_GROUP_DUP;
- } else {
- /* they already had raid on here, just return */
- if (flags & stripped)
- return flags;
-
- stripped |= BTRFS_BLOCK_GROUP_DUP;
- stripped = flags & ~stripped;
-
- /* switch duplicated blocks with raid1 */
- if (flags & BTRFS_BLOCK_GROUP_DUP)
- return stripped | BTRFS_BLOCK_GROUP_RAID1;
-
- /* this is drive concat, leave it alone */
- }
-
- return flags;
-}
-
-static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
-{
- struct btrfs_space_info *sinfo = cache->space_info;
- u64 num_bytes;
- u64 sinfo_used;
- u64 min_allocable_bytes;
- int ret = -ENOSPC;
-
- /*
- * We need some metadata space and system metadata space for
- * allocating chunks in some corner cases until we force to set
- * it to be readonly.
- */
- if ((sinfo->flags &
- (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
- !force)
- min_allocable_bytes = SZ_1M;
- else
- min_allocable_bytes = 0;
-
- spin_lock(&sinfo->lock);
- spin_lock(&cache->lock);
-
- if (cache->ro) {
- cache->ro++;
- ret = 0;
- goto out;
- }
-
- num_bytes = cache->key.offset - cache->reserved - cache->pinned -
- cache->bytes_super - btrfs_block_group_used(&cache->item);
- sinfo_used = btrfs_space_info_used(sinfo, true);
-
- if (sinfo_used + num_bytes + min_allocable_bytes <=
- sinfo->total_bytes) {
- sinfo->bytes_readonly += num_bytes;
- cache->ro++;
- list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
- ret = 0;
- }
-out:
- spin_unlock(&cache->lock);
- spin_unlock(&sinfo->lock);
- if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
- btrfs_info(cache->fs_info,
- "unable to make block group %llu ro",
- cache->key.objectid);
- btrfs_info(cache->fs_info,
- "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu",
- sinfo_used, num_bytes, min_allocable_bytes);
- btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
- }
- return ret;
-}
-
-int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache)
-
-{
- struct btrfs_fs_info *fs_info = cache->fs_info;
- struct btrfs_trans_handle *trans;
- u64 alloc_flags;
- int ret;
-
-again:
- trans = btrfs_join_transaction(fs_info->extent_root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
-
- /*
- * we're not allowed to set block groups readonly after the dirty
- * block groups cache has started writing. If it already started,
- * back off and let this transaction commit
- */
- mutex_lock(&fs_info->ro_block_group_mutex);
- if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
- u64 transid = trans->transid;
-
- mutex_unlock(&fs_info->ro_block_group_mutex);
- btrfs_end_transaction(trans);
-
- ret = btrfs_wait_for_commit(fs_info, transid);
- if (ret)
- return ret;
- goto again;
- }
-
- /*
- * if we are changing raid levels, try to allocate a corresponding
- * block group with the new raid level.
- */
- alloc_flags = update_block_group_flags(fs_info, cache->flags);
- if (alloc_flags != cache->flags) {
- ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
- /*
- * ENOSPC is allowed here, we may have enough space
- * already allocated at the new raid level to
- * carry on
- */
- if (ret == -ENOSPC)
- ret = 0;
- if (ret < 0)
- goto out;
- }
-
- ret = inc_block_group_ro(cache, 0);
- if (!ret)
- goto out;
- alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
- ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
- if (ret < 0)
- goto out;
- ret = inc_block_group_ro(cache, 0);
-out:
- if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
- alloc_flags = update_block_group_flags(fs_info, cache->flags);
- mutex_lock(&fs_info->chunk_mutex);
- check_system_chunk(trans, alloc_flags);
- mutex_unlock(&fs_info->chunk_mutex);
- }
- mutex_unlock(&fs_info->ro_block_group_mutex);
-
- btrfs_end_transaction(trans);
- return ret;
-}
-
-int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
-{
- u64 alloc_flags = get_alloc_profile(trans->fs_info, type);
-
- return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
-}
-
/*
* helper to account the unused space of all the readonly block group in the
* space_info. takes mirrors into account.
*/
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
u64 free_bytes = 0;
int factor;
@@ -7496,9 +5517,8 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
}
factor = btrfs_bg_type_to_factor(block_group->flags);
- free_bytes += (block_group->key.offset -
- btrfs_block_group_used(&block_group->item)) *
- factor;
+ free_bytes += (block_group->length -
+ block_group->used) * factor;
spin_unlock(&block_group->lock);
}
@@ -7507,1356 +5527,6 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
return free_bytes;
}
-void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
-{
- struct btrfs_space_info *sinfo = cache->space_info;
- u64 num_bytes;
-
- BUG_ON(!cache->ro);
-
- spin_lock(&sinfo->lock);
- spin_lock(&cache->lock);
- if (!--cache->ro) {
- num_bytes = cache->key.offset - cache->reserved -
- cache->pinned - cache->bytes_super -
- btrfs_block_group_used(&cache->item);
- sinfo->bytes_readonly -= num_bytes;
- list_del_init(&cache->ro_list);
- }
- spin_unlock(&cache->lock);
- spin_unlock(&sinfo->lock);
-}
-
-/*
- * Checks to see if it's even possible to relocate this block group.
- *
- * @return - -1 if it's not a good idea to relocate this block group, 0 if its
- * ok to go ahead and try.
- */
-int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
-{
- struct btrfs_block_group_cache *block_group;
- struct btrfs_space_info *space_info;
- struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
- struct btrfs_device *device;
- u64 min_free;
- u64 dev_min = 1;
- u64 dev_nr = 0;
- u64 target;
- int debug;
- int index;
- int full = 0;
- int ret = 0;
-
- debug = btrfs_test_opt(fs_info, ENOSPC_DEBUG);
-
- block_group = btrfs_lookup_block_group(fs_info, bytenr);
-
- /* odd, couldn't find the block group, leave it alone */
- if (!block_group) {
- if (debug)
- btrfs_warn(fs_info,
- "can't find block group for bytenr %llu",
- bytenr);
- return -1;
- }
-
- min_free = btrfs_block_group_used(&block_group->item);
-
- /* no bytes used, we're good */
- if (!min_free)
- goto out;
-
- space_info = block_group->space_info;
- spin_lock(&space_info->lock);
-
- full = space_info->full;
-
- /*
- * if this is the last block group we have in this space, we can't
- * relocate it unless we're able to allocate a new chunk below.
- *
- * Otherwise, we need to make sure we have room in the space to handle
- * all of the extents from this block group. If we can, we're good
- */
- if ((space_info->total_bytes != block_group->key.offset) &&
- (btrfs_space_info_used(space_info, false) + min_free <
- space_info->total_bytes)) {
- spin_unlock(&space_info->lock);
- goto out;
- }
- spin_unlock(&space_info->lock);
-
- /*
- * ok we don't have enough space, but maybe we have free space on our
- * devices to allocate new chunks for relocation, so loop through our
- * alloc devices and guess if we have enough space. if this block
- * group is going to be restriped, run checks against the target
- * profile instead of the current one.
- */
- ret = -1;
-
- /*
- * index:
- * 0: raid10
- * 1: raid1
- * 2: dup
- * 3: raid0
- * 4: single
- */
- target = get_restripe_target(fs_info, block_group->flags);
- if (target) {
- index = btrfs_bg_flags_to_raid_index(extended_to_chunk(target));
- } else {
- /*
- * this is just a balance, so if we were marked as full
- * we know there is no space for a new chunk
- */
- if (full) {
- if (debug)
- btrfs_warn(fs_info,
- "no space to alloc new chunk for block group %llu",
- block_group->key.objectid);
- goto out;
- }
-
- index = btrfs_bg_flags_to_raid_index(block_group->flags);
- }
-
- if (index == BTRFS_RAID_RAID10) {
- dev_min = 4;
- /* Divide by 2 */
- min_free >>= 1;
- } else if (index == BTRFS_RAID_RAID1) {
- dev_min = 2;
- } else if (index == BTRFS_RAID_DUP) {
- /* Multiply by 2 */
- min_free <<= 1;
- } else if (index == BTRFS_RAID_RAID0) {
- dev_min = fs_devices->rw_devices;
- min_free = div64_u64(min_free, dev_min);
- }
-
- mutex_lock(&fs_info->chunk_mutex);
- list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
- u64 dev_offset;
-
- /*
- * check to make sure we can actually find a chunk with enough
- * space to fit our block group in.
- */
- if (device->total_bytes > device->bytes_used + min_free &&
- !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
- ret = find_free_dev_extent(device, min_free,
- &dev_offset, NULL);
- if (!ret)
- dev_nr++;
-
- if (dev_nr >= dev_min)
- break;
-
- ret = -1;
- }
- }
- if (debug && ret == -1)
- btrfs_warn(fs_info,
- "no space to allocate a new chunk for block group %llu",
- block_group->key.objectid);
- mutex_unlock(&fs_info->chunk_mutex);
-out:
- btrfs_put_block_group(block_group);
- return ret;
-}
-
-static int find_first_block_group(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
- struct btrfs_key *key)
-{
- struct btrfs_root *root = fs_info->extent_root;
- int ret = 0;
- struct btrfs_key found_key;
- struct extent_buffer *leaf;
- struct btrfs_block_group_item bg;
- u64 flags;
- int slot;
-
- ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
- if (ret < 0)
- goto out;
-
- while (1) {
- slot = path->slots[0];
- leaf = path->nodes[0];
- if (slot >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(root, path);
- if (ret == 0)
- continue;
- if (ret < 0)
- goto out;
- break;
- }
- btrfs_item_key_to_cpu(leaf, &found_key, slot);
-
- if (found_key.objectid >= key->objectid &&
- found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
- struct extent_map_tree *em_tree;
- struct extent_map *em;
-
- em_tree = &root->fs_info->mapping_tree;
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, found_key.objectid,
- found_key.offset);
- read_unlock(&em_tree->lock);
- if (!em) {
- btrfs_err(fs_info,
- "logical %llu len %llu found bg but no related chunk",
- found_key.objectid, found_key.offset);
- ret = -ENOENT;
- } else if (em->start != found_key.objectid ||
- em->len != found_key.offset) {
- btrfs_err(fs_info,
- "block group %llu len %llu mismatch with chunk %llu len %llu",
- found_key.objectid, found_key.offset,
- em->start, em->len);
- ret = -EUCLEAN;
- } else {
- read_extent_buffer(leaf, &bg,
- btrfs_item_ptr_offset(leaf, slot),
- sizeof(bg));
- flags = btrfs_block_group_flags(&bg) &
- BTRFS_BLOCK_GROUP_TYPE_MASK;
-
- if (flags != (em->map_lookup->type &
- BTRFS_BLOCK_GROUP_TYPE_MASK)) {
- btrfs_err(fs_info,
-"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
- found_key.objectid,
- found_key.offset, flags,
- (BTRFS_BLOCK_GROUP_TYPE_MASK &
- em->map_lookup->type));
- ret = -EUCLEAN;
- } else {
- ret = 0;
- }
- }
- free_extent_map(em);
- goto out;
- }
- path->slots[0]++;
- }
-out:
- return ret;
-}
-
-void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
-{
- struct btrfs_block_group_cache *block_group;
- u64 last = 0;
-
- while (1) {
- struct inode *inode;
-
- block_group = btrfs_lookup_first_block_group(info, last);
- while (block_group) {
- wait_block_group_cache_done(block_group);
- spin_lock(&block_group->lock);
- if (block_group->iref)
- break;
- spin_unlock(&block_group->lock);
- block_group = next_block_group(block_group);
- }
- if (!block_group) {
- if (last == 0)
- break;
- last = 0;
- continue;
- }
-
- inode = block_group->inode;
- block_group->iref = 0;
- block_group->inode = NULL;
- spin_unlock(&block_group->lock);
- ASSERT(block_group->io_ctl.inode == NULL);
- iput(inode);
- last = block_group->key.objectid + block_group->key.offset;
- btrfs_put_block_group(block_group);
- }
-}
-
-/*
- * Must be called only after stopping all workers, since we could have block
- * group caching kthreads running, and therefore they could race with us if we
- * freed the block groups before stopping them.
- */
-int btrfs_free_block_groups(struct btrfs_fs_info *info)
-{
- struct btrfs_block_group_cache *block_group;
- struct btrfs_space_info *space_info;
- struct btrfs_caching_control *caching_ctl;
- struct rb_node *n;
-
- down_write(&info->commit_root_sem);
- while (!list_empty(&info->caching_block_groups)) {
- caching_ctl = list_entry(info->caching_block_groups.next,
- struct btrfs_caching_control, list);
- list_del(&caching_ctl->list);
- put_caching_control(caching_ctl);
- }
- up_write(&info->commit_root_sem);
-
- spin_lock(&info->unused_bgs_lock);
- while (!list_empty(&info->unused_bgs)) {
- block_group = list_first_entry(&info->unused_bgs,
- struct btrfs_block_group_cache,
- bg_list);
- list_del_init(&block_group->bg_list);
- btrfs_put_block_group(block_group);
- }
- spin_unlock(&info->unused_bgs_lock);
-
- spin_lock(&info->block_group_cache_lock);
- while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
- block_group = rb_entry(n, struct btrfs_block_group_cache,
- cache_node);
- rb_erase(&block_group->cache_node,
- &info->block_group_cache_tree);
- RB_CLEAR_NODE(&block_group->cache_node);
- spin_unlock(&info->block_group_cache_lock);
-
- down_write(&block_group->space_info->groups_sem);
- list_del(&block_group->list);
- up_write(&block_group->space_info->groups_sem);
-
- /*
- * We haven't cached this block group, which means we could
- * possibly have excluded extents on this block group.
- */
- if (block_group->cached == BTRFS_CACHE_NO ||
- block_group->cached == BTRFS_CACHE_ERROR)
- free_excluded_extents(block_group);
-
- btrfs_remove_free_space_cache(block_group);
- ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
- ASSERT(list_empty(&block_group->dirty_list));
- ASSERT(list_empty(&block_group->io_list));
- ASSERT(list_empty(&block_group->bg_list));
- ASSERT(atomic_read(&block_group->count) == 1);
- btrfs_put_block_group(block_group);
-
- spin_lock(&info->block_group_cache_lock);
- }
- spin_unlock(&info->block_group_cache_lock);
-
- /* now that all the block groups are freed, go through and
- * free all the space_info structs. This is only called during
- * the final stages of unmount, and so we know nobody is
- * using them. We call synchronize_rcu() once before we start,
- * just to be on the safe side.
- */
- synchronize_rcu();
-
- btrfs_release_global_block_rsv(info);
-
- while (!list_empty(&info->space_info)) {
- int i;
-
- space_info = list_entry(info->space_info.next,
- struct btrfs_space_info,
- list);
-
- /*
- * Do not hide this behind enospc_debug, this is actually
- * important and indicates a real bug if this happens.
- */
- if (WARN_ON(space_info->bytes_pinned > 0 ||
- space_info->bytes_reserved > 0 ||
- space_info->bytes_may_use > 0))
- btrfs_dump_space_info(info, space_info, 0, 0);
- list_del(&space_info->list);
- for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
- struct kobject *kobj;
- kobj = space_info->block_group_kobjs[i];
- space_info->block_group_kobjs[i] = NULL;
- if (kobj) {
- kobject_del(kobj);
- kobject_put(kobj);
- }
- }
- kobject_del(&space_info->kobj);
- kobject_put(&space_info->kobj);
- }
- return 0;
-}
-
-/* link_block_group will queue up kobjects to add when we're reclaim-safe */
-void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info)
-{
- struct btrfs_space_info *space_info;
- struct raid_kobject *rkobj;
- LIST_HEAD(list);
- int ret = 0;
-
- spin_lock(&fs_info->pending_raid_kobjs_lock);
- list_splice_init(&fs_info->pending_raid_kobjs, &list);
- spin_unlock(&fs_info->pending_raid_kobjs_lock);
-
- list_for_each_entry(rkobj, &list, list) {
- space_info = btrfs_find_space_info(fs_info, rkobj->flags);
-
- ret = kobject_add(&rkobj->kobj, &space_info->kobj,
- "%s", btrfs_bg_type_to_raid_name(rkobj->flags));
- if (ret) {
- kobject_put(&rkobj->kobj);
- break;
- }
- }
- if (ret)
- btrfs_warn(fs_info,
- "failed to add kobject for block cache, ignoring");
-}
-
-static void link_block_group(struct btrfs_block_group_cache *cache)
-{
- struct btrfs_space_info *space_info = cache->space_info;
- struct btrfs_fs_info *fs_info = cache->fs_info;
- int index = btrfs_bg_flags_to_raid_index(cache->flags);
- bool first = false;
-
- down_write(&space_info->groups_sem);
- if (list_empty(&space_info->block_groups[index]))
- first = true;
- list_add_tail(&cache->list, &space_info->block_groups[index]);
- up_write(&space_info->groups_sem);
-
- if (first) {
- struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
- if (!rkobj) {
- btrfs_warn(cache->fs_info,
- "couldn't alloc memory for raid level kobject");
- return;
- }
- rkobj->flags = cache->flags;
- kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
-
- spin_lock(&fs_info->pending_raid_kobjs_lock);
- list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs);
- spin_unlock(&fs_info->pending_raid_kobjs_lock);
- space_info->block_group_kobjs[index] = &rkobj->kobj;
- }
-}
-
-static struct btrfs_block_group_cache *
-btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
- u64 start, u64 size)
-{
- struct btrfs_block_group_cache *cache;
-
- cache = kzalloc(sizeof(*cache), GFP_NOFS);
- if (!cache)
- return NULL;
-
- cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
- GFP_NOFS);
- if (!cache->free_space_ctl) {
- kfree(cache);
- return NULL;
- }
-
- cache->key.objectid = start;
- cache->key.offset = size;
- cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
-
- cache->fs_info = fs_info;
- cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
- set_free_space_tree_thresholds(cache);
-
- atomic_set(&cache->count, 1);
- spin_lock_init(&cache->lock);
- init_rwsem(&cache->data_rwsem);
- INIT_LIST_HEAD(&cache->list);
- INIT_LIST_HEAD(&cache->cluster_list);
- INIT_LIST_HEAD(&cache->bg_list);
- INIT_LIST_HEAD(&cache->ro_list);
- INIT_LIST_HEAD(&cache->dirty_list);
- INIT_LIST_HEAD(&cache->io_list);
- btrfs_init_free_space_ctl(cache);
- atomic_set(&cache->trimming, 0);
- mutex_init(&cache->free_space_lock);
- btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
-
- return cache;
-}
-
-
-/*
- * Iterate all chunks and verify that each of them has the corresponding block
- * group
- */
-static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
-{
- struct extent_map_tree *map_tree = &fs_info->mapping_tree;
- struct extent_map *em;
- struct btrfs_block_group_cache *bg;
- u64 start = 0;
- int ret = 0;
-
- while (1) {
- read_lock(&map_tree->lock);
- /*
- * lookup_extent_mapping will return the first extent map
- * intersecting the range, so setting @len to 1 is enough to
- * get the first chunk.
- */
- em = lookup_extent_mapping(map_tree, start, 1);
- read_unlock(&map_tree->lock);
- if (!em)
- break;
-
- bg = btrfs_lookup_block_group(fs_info, em->start);
- if (!bg) {
- btrfs_err(fs_info,
- "chunk start=%llu len=%llu doesn't have corresponding block group",
- em->start, em->len);
- ret = -EUCLEAN;
- free_extent_map(em);
- break;
- }
- if (bg->key.objectid != em->start ||
- bg->key.offset != em->len ||
- (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
- (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
- btrfs_err(fs_info,
-"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
- em->start, em->len,
- em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
- bg->key.objectid, bg->key.offset,
- bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
- ret = -EUCLEAN;
- free_extent_map(em);
- btrfs_put_block_group(bg);
- break;
- }
- start = em->start + em->len;
- free_extent_map(em);
- btrfs_put_block_group(bg);
- }
- return ret;
-}
-
-int btrfs_read_block_groups(struct btrfs_fs_info *info)
-{
- struct btrfs_path *path;
- int ret;
- struct btrfs_block_group_cache *cache;
- struct btrfs_space_info *space_info;
- struct btrfs_key key;
- struct btrfs_key found_key;
- struct extent_buffer *leaf;
- int need_clear = 0;
- u64 cache_gen;
- u64 feature;
- int mixed;
-
- feature = btrfs_super_incompat_flags(info->super_copy);
- mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
-
- key.objectid = 0;
- key.offset = 0;
- key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- path->reada = READA_FORWARD;
-
- cache_gen = btrfs_super_cache_generation(info->super_copy);
- if (btrfs_test_opt(info, SPACE_CACHE) &&
- btrfs_super_generation(info->super_copy) != cache_gen)
- need_clear = 1;
- if (btrfs_test_opt(info, CLEAR_CACHE))
- need_clear = 1;
-
- while (1) {
- ret = find_first_block_group(info, path, &key);
- if (ret > 0)
- break;
- if (ret != 0)
- goto error;
-
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-
- cache = btrfs_create_block_group_cache(info, found_key.objectid,
- found_key.offset);
- if (!cache) {
- ret = -ENOMEM;
- goto error;
- }
-
- if (need_clear) {
- /*
- * When we mount with old space cache, we need to
- * set BTRFS_DC_CLEAR and set dirty flag.
- *
- * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
- * truncate the old free space cache inode and
- * setup a new one.
- * b) Setting 'dirty flag' makes sure that we flush
- * the new space cache info onto disk.
- */
- if (btrfs_test_opt(info, SPACE_CACHE))
- cache->disk_cache_state = BTRFS_DC_CLEAR;
- }
-
- read_extent_buffer(leaf, &cache->item,
- btrfs_item_ptr_offset(leaf, path->slots[0]),
- sizeof(cache->item));
- cache->flags = btrfs_block_group_flags(&cache->item);
- if (!mixed &&
- ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
- (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
- btrfs_err(info,
-"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
- cache->key.objectid);
- ret = -EINVAL;
- goto error;
- }
-
- key.objectid = found_key.objectid + found_key.offset;
- btrfs_release_path(path);
-
- /*
- * We need to exclude the super stripes now so that the space
- * info has super bytes accounted for, otherwise we'll think
- * we have more space than we actually do.
- */
- ret = exclude_super_stripes(cache);
- if (ret) {
- /*
- * We may have excluded something, so call this just in
- * case.
- */
- free_excluded_extents(cache);
- btrfs_put_block_group(cache);
- goto error;
- }
-
- /*
- * check for two cases, either we are full, and therefore
- * don't need to bother with the caching work since we won't
- * find any space, or we are empty, and we can just add all
- * the space in and be done with it. This saves us _a_lot_ of
- * time, particularly in the full case.
- */
- if (found_key.offset == btrfs_block_group_used(&cache->item)) {
- cache->last_byte_to_unpin = (u64)-1;
- cache->cached = BTRFS_CACHE_FINISHED;
- free_excluded_extents(cache);
- } else if (btrfs_block_group_used(&cache->item) == 0) {
- cache->last_byte_to_unpin = (u64)-1;
- cache->cached = BTRFS_CACHE_FINISHED;
- add_new_free_space(cache, found_key.objectid,
- found_key.objectid +
- found_key.offset);
- free_excluded_extents(cache);
- }
-
- ret = btrfs_add_block_group_cache(info, cache);
- if (ret) {
- btrfs_remove_free_space_cache(cache);
- btrfs_put_block_group(cache);
- goto error;
- }
-
- trace_btrfs_add_block_group(info, cache, 0);
- btrfs_update_space_info(info, cache->flags, found_key.offset,
- btrfs_block_group_used(&cache->item),
- cache->bytes_super, &space_info);
-
- cache->space_info = space_info;
-
- link_block_group(cache);
-
- set_avail_alloc_bits(info, cache->flags);
- if (btrfs_chunk_readonly(info, cache->key.objectid)) {
- inc_block_group_ro(cache, 1);
- } else if (btrfs_block_group_used(&cache->item) == 0) {
- ASSERT(list_empty(&cache->bg_list));
- btrfs_mark_bg_unused(cache);
- }
- }
-
- list_for_each_entry_rcu(space_info, &info->space_info, list) {
- if (!(get_alloc_profile(info, space_info->flags) &
- (BTRFS_BLOCK_GROUP_RAID10 |
- BTRFS_BLOCK_GROUP_RAID1_MASK |
- BTRFS_BLOCK_GROUP_RAID56_MASK |
- BTRFS_BLOCK_GROUP_DUP)))
- continue;
- /*
- * avoid allocating from un-mirrored block group if there are
- * mirrored block groups.
- */
- list_for_each_entry(cache,
- &space_info->block_groups[BTRFS_RAID_RAID0],
- list)
- inc_block_group_ro(cache, 1);
- list_for_each_entry(cache,
- &space_info->block_groups[BTRFS_RAID_SINGLE],
- list)
- inc_block_group_ro(cache, 1);
- }
-
- btrfs_add_raid_kobjects(info);
- btrfs_init_global_block_rsv(info);
- ret = check_chunk_block_group_mappings(info);
-error:
- btrfs_free_path(path);
- return ret;
-}
-
-void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
-{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *block_group;
- struct btrfs_root *extent_root = fs_info->extent_root;
- struct btrfs_block_group_item item;
- struct btrfs_key key;
- int ret = 0;
-
- if (!trans->can_flush_pending_bgs)
- return;
-
- while (!list_empty(&trans->new_bgs)) {
- block_group = list_first_entry(&trans->new_bgs,
- struct btrfs_block_group_cache,
- bg_list);
- if (ret)
- goto next;
-
- spin_lock(&block_group->lock);
- memcpy(&item, &block_group->item, sizeof(item));
- memcpy(&key, &block_group->key, sizeof(key));
- spin_unlock(&block_group->lock);
-
- ret = btrfs_insert_item(trans, extent_root, &key, &item,
- sizeof(item));
- if (ret)
- btrfs_abort_transaction(trans, ret);
- ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
- if (ret)
- btrfs_abort_transaction(trans, ret);
- add_block_group_free_space(trans, block_group);
- /* already aborted the transaction if it failed. */
-next:
- btrfs_delayed_refs_rsv_release(fs_info, 1);
- list_del_init(&block_group->bg_list);
- }
- btrfs_trans_release_chunk_metadata(trans);
-}
-
-int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
- u64 type, u64 chunk_offset, u64 size)
-{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *cache;
- int ret;
-
- btrfs_set_log_full_commit(trans);
-
- cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
- if (!cache)
- return -ENOMEM;
-
- btrfs_set_block_group_used(&cache->item, bytes_used);
- btrfs_set_block_group_chunk_objectid(&cache->item,
- BTRFS_FIRST_CHUNK_TREE_OBJECTID);
- btrfs_set_block_group_flags(&cache->item, type);
-
- cache->flags = type;
- cache->last_byte_to_unpin = (u64)-1;
- cache->cached = BTRFS_CACHE_FINISHED;
- cache->needs_free_space = 1;
- ret = exclude_super_stripes(cache);
- if (ret) {
- /*
- * We may have excluded something, so call this just in
- * case.
- */
- free_excluded_extents(cache);
- btrfs_put_block_group(cache);
- return ret;
- }
-
- add_new_free_space(cache, chunk_offset, chunk_offset + size);
-
- free_excluded_extents(cache);
-
-#ifdef CONFIG_BTRFS_DEBUG
- if (btrfs_should_fragment_free_space(cache)) {
- u64 new_bytes_used = size - bytes_used;
-
- bytes_used += new_bytes_used >> 1;
- fragment_free_space(cache);
- }
-#endif
- /*
- * Ensure the corresponding space_info object is created and
- * assigned to our block group. We want our bg to be added to the rbtree
- * with its ->space_info set.
- */
- cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
- ASSERT(cache->space_info);
-
- ret = btrfs_add_block_group_cache(fs_info, cache);
- if (ret) {
- btrfs_remove_free_space_cache(cache);
- btrfs_put_block_group(cache);
- return ret;
- }
-
- /*
- * Now that our block group has its ->space_info set and is inserted in
- * the rbtree, update the space info's counters.
- */
- trace_btrfs_add_block_group(fs_info, cache, 1);
- btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
- cache->bytes_super, &cache->space_info);
- btrfs_update_global_block_rsv(fs_info);
-
- link_block_group(cache);
-
- list_add_tail(&cache->bg_list, &trans->new_bgs);
- trans->delayed_ref_updates++;
- btrfs_update_delayed_refs_rsv(trans);
-
- set_avail_alloc_bits(fs_info, type);
- return 0;
-}
-
-static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
-{
- u64 extra_flags = chunk_to_extended(flags) &
- BTRFS_EXTENDED_PROFILE_MASK;
-
- write_seqlock(&fs_info->profiles_lock);
- if (flags & BTRFS_BLOCK_GROUP_DATA)
- fs_info->avail_data_alloc_bits &= ~extra_flags;
- if (flags & BTRFS_BLOCK_GROUP_METADATA)
- fs_info->avail_metadata_alloc_bits &= ~extra_flags;
- if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
- fs_info->avail_system_alloc_bits &= ~extra_flags;
- write_sequnlock(&fs_info->profiles_lock);
-}
-
-/*
- * Clear incompat bits for the following feature(s):
- *
- * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
- * in the whole filesystem
- */
-static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
-{
- if (flags & BTRFS_BLOCK_GROUP_RAID56_MASK) {
- struct list_head *head = &fs_info->space_info;
- struct btrfs_space_info *sinfo;
-
- list_for_each_entry_rcu(sinfo, head, list) {
- bool found = false;
-
- down_read(&sinfo->groups_sem);
- if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
- found = true;
- if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
- found = true;
- up_read(&sinfo->groups_sem);
-
- if (found)
- return;
- }
- btrfs_clear_fs_incompat(fs_info, RAID56);
- }
-}
-
-int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
- u64 group_start, struct extent_map *em)
-{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *root = fs_info->extent_root;
- struct btrfs_path *path;
- struct btrfs_block_group_cache *block_group;
- struct btrfs_free_cluster *cluster;
- struct btrfs_root *tree_root = fs_info->tree_root;
- struct btrfs_key key;
- struct inode *inode;
- struct kobject *kobj = NULL;
- int ret;
- int index;
- int factor;
- struct btrfs_caching_control *caching_ctl = NULL;
- bool remove_em;
- bool remove_rsv = false;
-
- block_group = btrfs_lookup_block_group(fs_info, group_start);
- BUG_ON(!block_group);
- BUG_ON(!block_group->ro);
-
- trace_btrfs_remove_block_group(block_group);
- /*
- * Free the reserved super bytes from this block group before
- * remove it.
- */
- free_excluded_extents(block_group);
- btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
- block_group->key.offset);
-
- memcpy(&key, &block_group->key, sizeof(key));
- index = btrfs_bg_flags_to_raid_index(block_group->flags);
- factor = btrfs_bg_type_to_factor(block_group->flags);
-
- /* make sure this block group isn't part of an allocation cluster */
- cluster = &fs_info->data_alloc_cluster;
- spin_lock(&cluster->refill_lock);
- btrfs_return_cluster_to_free_space(block_group, cluster);
- spin_unlock(&cluster->refill_lock);
-
- /*
- * make sure this block group isn't part of a metadata
- * allocation cluster
- */
- cluster = &fs_info->meta_alloc_cluster;
- spin_lock(&cluster->refill_lock);
- btrfs_return_cluster_to_free_space(block_group, cluster);
- spin_unlock(&cluster->refill_lock);
-
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * get the inode first so any iput calls done for the io_list
- * aren't the final iput (no unlinks allowed now)
- */
- inode = lookup_free_space_inode(block_group, path);
-
- mutex_lock(&trans->transaction->cache_write_mutex);
- /*
- * Make sure our free space cache IO is done before removing the
- * free space inode
- */
- spin_lock(&trans->transaction->dirty_bgs_lock);
- if (!list_empty(&block_group->io_list)) {
- list_del_init(&block_group->io_list);
-
- WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
-
- spin_unlock(&trans->transaction->dirty_bgs_lock);
- btrfs_wait_cache_io(trans, block_group, path);
- btrfs_put_block_group(block_group);
- spin_lock(&trans->transaction->dirty_bgs_lock);
- }
-
- if (!list_empty(&block_group->dirty_list)) {
- list_del_init(&block_group->dirty_list);
- remove_rsv = true;
- btrfs_put_block_group(block_group);
- }
- spin_unlock(&trans->transaction->dirty_bgs_lock);
- mutex_unlock(&trans->transaction->cache_write_mutex);
-
- if (!IS_ERR(inode)) {
- ret = btrfs_orphan_add(trans, BTRFS_I(inode));
- if (ret) {
- btrfs_add_delayed_iput(inode);
- goto out;
- }
- clear_nlink(inode);
- /* One for the block groups ref */
- spin_lock(&block_group->lock);
- if (block_group->iref) {
- block_group->iref = 0;
- block_group->inode = NULL;
- spin_unlock(&block_group->lock);
- iput(inode);
- } else {
- spin_unlock(&block_group->lock);
- }
- /* One for our lookup ref */
- btrfs_add_delayed_iput(inode);
- }
-
- key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = block_group->key.objectid;
- key.type = 0;
-
- ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
- if (ret < 0)
- goto out;
- if (ret > 0)
- btrfs_release_path(path);
- if (ret == 0) {
- ret = btrfs_del_item(trans, tree_root, path);
- if (ret)
- goto out;
- btrfs_release_path(path);
- }
-
- spin_lock(&fs_info->block_group_cache_lock);
- rb_erase(&block_group->cache_node,
- &fs_info->block_group_cache_tree);
- RB_CLEAR_NODE(&block_group->cache_node);
-
- if (fs_info->first_logical_byte == block_group->key.objectid)
- fs_info->first_logical_byte = (u64)-1;
- spin_unlock(&fs_info->block_group_cache_lock);
-
- down_write(&block_group->space_info->groups_sem);
- /*
- * we must use list_del_init so people can check to see if they
- * are still on the list after taking the semaphore
- */
- list_del_init(&block_group->list);
- if (list_empty(&block_group->space_info->block_groups[index])) {
- kobj = block_group->space_info->block_group_kobjs[index];
- block_group->space_info->block_group_kobjs[index] = NULL;
- clear_avail_alloc_bits(fs_info, block_group->flags);
- }
- up_write(&block_group->space_info->groups_sem);
- clear_incompat_bg_bits(fs_info, block_group->flags);
- if (kobj) {
- kobject_del(kobj);
- kobject_put(kobj);
- }
-
- if (block_group->has_caching_ctl)
- caching_ctl = get_caching_control(block_group);
- if (block_group->cached == BTRFS_CACHE_STARTED)
- wait_block_group_cache_done(block_group);
- if (block_group->has_caching_ctl) {
- down_write(&fs_info->commit_root_sem);
- if (!caching_ctl) {
- struct btrfs_caching_control *ctl;
-
- list_for_each_entry(ctl,
- &fs_info->caching_block_groups, list)
- if (ctl->block_group == block_group) {
- caching_ctl = ctl;
- refcount_inc(&caching_ctl->count);
- break;
- }
- }
- if (caching_ctl)
- list_del_init(&caching_ctl->list);
- up_write(&fs_info->commit_root_sem);
- if (caching_ctl) {
- /* Once for the caching bgs list and once for us. */
- put_caching_control(caching_ctl);
- put_caching_control(caching_ctl);
- }
- }
-
- spin_lock(&trans->transaction->dirty_bgs_lock);
- WARN_ON(!list_empty(&block_group->dirty_list));
- WARN_ON(!list_empty(&block_group->io_list));
- spin_unlock(&trans->transaction->dirty_bgs_lock);
-
- btrfs_remove_free_space_cache(block_group);
-
- spin_lock(&block_group->space_info->lock);
- list_del_init(&block_group->ro_list);
-
- if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
- WARN_ON(block_group->space_info->total_bytes
- < block_group->key.offset);
- WARN_ON(block_group->space_info->bytes_readonly
- < block_group->key.offset);
- WARN_ON(block_group->space_info->disk_total
- < block_group->key.offset * factor);
- }
- block_group->space_info->total_bytes -= block_group->key.offset;
- block_group->space_info->bytes_readonly -= block_group->key.offset;
- block_group->space_info->disk_total -= block_group->key.offset * factor;
-
- spin_unlock(&block_group->space_info->lock);
-
- memcpy(&key, &block_group->key, sizeof(key));
-
- mutex_lock(&fs_info->chunk_mutex);
- spin_lock(&block_group->lock);
- block_group->removed = 1;
- /*
- * At this point trimming can't start on this block group, because we
- * removed the block group from the tree fs_info->block_group_cache_tree
- * so no one can't find it anymore and even if someone already got this
- * block group before we removed it from the rbtree, they have already
- * incremented block_group->trimming - if they didn't, they won't find
- * any free space entries because we already removed them all when we
- * called btrfs_remove_free_space_cache().
- *
- * And we must not remove the extent map from the fs_info->mapping_tree
- * to prevent the same logical address range and physical device space
- * ranges from being reused for a new block group. This is because our
- * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
- * completely transactionless, so while it is trimming a range the
- * currently running transaction might finish and a new one start,
- * allowing for new block groups to be created that can reuse the same
- * physical device locations unless we take this special care.
- *
- * There may also be an implicit trim operation if the file system
- * is mounted with -odiscard. The same protections must remain
- * in place until the extents have been discarded completely when
- * the transaction commit has completed.
- */
- remove_em = (atomic_read(&block_group->trimming) == 0);
- spin_unlock(&block_group->lock);
-
- mutex_unlock(&fs_info->chunk_mutex);
-
- ret = remove_block_group_free_space(trans, block_group);
- if (ret)
- goto out;
-
- btrfs_put_block_group(block_group);
- btrfs_put_block_group(block_group);
-
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret > 0)
- ret = -EIO;
- if (ret < 0)
- goto out;
-
- ret = btrfs_del_item(trans, root, path);
- if (ret)
- goto out;
-
- if (remove_em) {
- struct extent_map_tree *em_tree;
-
- em_tree = &fs_info->mapping_tree;
- write_lock(&em_tree->lock);
- remove_extent_mapping(em_tree, em);
- write_unlock(&em_tree->lock);
- /* once for the tree */
- free_extent_map(em);
- }
-out:
- if (remove_rsv)
- btrfs_delayed_refs_rsv_release(fs_info, 1);
- btrfs_free_path(path);
- return ret;
-}
-
-struct btrfs_trans_handle *
-btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
- const u64 chunk_offset)
-{
- struct extent_map_tree *em_tree = &fs_info->mapping_tree;
- struct extent_map *em;
- struct map_lookup *map;
- unsigned int num_items;
-
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, chunk_offset, 1);
- read_unlock(&em_tree->lock);
- ASSERT(em && em->start == chunk_offset);
-
- /*
- * We need to reserve 3 + N units from the metadata space info in order
- * to remove a block group (done at btrfs_remove_chunk() and at
- * btrfs_remove_block_group()), which are used for:
- *
- * 1 unit for adding the free space inode's orphan (located in the tree
- * of tree roots).
- * 1 unit for deleting the block group item (located in the extent
- * tree).
- * 1 unit for deleting the free space item (located in tree of tree
- * roots).
- * N units for deleting N device extent items corresponding to each
- * stripe (located in the device tree).
- *
- * In order to remove a block group we also need to reserve units in the
- * system space info in order to update the chunk tree (update one or
- * more device items and remove one chunk item), but this is done at
- * btrfs_remove_chunk() through a call to check_system_chunk().
- */
- map = em->map_lookup;
- num_items = 3 + map->num_stripes;
- free_extent_map(em);
-
- return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
- num_items, 1);
-}
-
-/*
- * Process the unused_bgs list and remove any that don't have any allocated
- * space inside of them.
- */
-void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
-{
- struct btrfs_block_group_cache *block_group;
- struct btrfs_space_info *space_info;
- struct btrfs_trans_handle *trans;
- int ret = 0;
-
- if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
- return;
-
- spin_lock(&fs_info->unused_bgs_lock);
- while (!list_empty(&fs_info->unused_bgs)) {
- u64 start, end;
- int trimming;
-
- block_group = list_first_entry(&fs_info->unused_bgs,
- struct btrfs_block_group_cache,
- bg_list);
- list_del_init(&block_group->bg_list);
-
- space_info = block_group->space_info;
-
- if (ret || btrfs_mixed_space_info(space_info)) {
- btrfs_put_block_group(block_group);
- continue;
- }
- spin_unlock(&fs_info->unused_bgs_lock);
-
- mutex_lock(&fs_info->delete_unused_bgs_mutex);
-
- /* Don't want to race with allocators so take the groups_sem */
- down_write(&space_info->groups_sem);
- spin_lock(&block_group->lock);
- if (block_group->reserved || block_group->pinned ||
- btrfs_block_group_used(&block_group->item) ||
- block_group->ro ||
- list_is_singular(&block_group->list)) {
- /*
- * We want to bail if we made new allocations or have
- * outstanding allocations in this block group. We do
- * the ro check in case balance is currently acting on
- * this block group.
- */
- trace_btrfs_skip_unused_block_group(block_group);
- spin_unlock(&block_group->lock);
- up_write(&space_info->groups_sem);
- goto next;
- }
- spin_unlock(&block_group->lock);
-
- /* We don't want to force the issue, only flip if it's ok. */
- ret = inc_block_group_ro(block_group, 0);
- up_write(&space_info->groups_sem);
- if (ret < 0) {
- ret = 0;
- goto next;
- }
-
- /*
- * Want to do this before we do anything else so we can recover
- * properly if we fail to join the transaction.
- */
- trans = btrfs_start_trans_remove_block_group(fs_info,
- block_group->key.objectid);
- if (IS_ERR(trans)) {
- btrfs_dec_block_group_ro(block_group);
- ret = PTR_ERR(trans);
- goto next;
- }
-
- /*
- * We could have pending pinned extents for this block group,
- * just delete them, we don't care about them anymore.
- */
- start = block_group->key.objectid;
- end = start + block_group->key.offset - 1;
- /*
- * Hold the unused_bg_unpin_mutex lock to avoid racing with
- * btrfs_finish_extent_commit(). If we are at transaction N,
- * another task might be running finish_extent_commit() for the
- * previous transaction N - 1, and have seen a range belonging
- * to the block group in freed_extents[] before we were able to
- * clear the whole block group range from freed_extents[]. This
- * means that task can lookup for the block group after we
- * unpinned it from freed_extents[] and removed it, leading to
- * a BUG_ON() at btrfs_unpin_extent_range().
- */
- mutex_lock(&fs_info->unused_bg_unpin_mutex);
- ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
- EXTENT_DIRTY);
- if (ret) {
- mutex_unlock(&fs_info->unused_bg_unpin_mutex);
- btrfs_dec_block_group_ro(block_group);
- goto end_trans;
- }
- ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
- EXTENT_DIRTY);
- if (ret) {
- mutex_unlock(&fs_info->unused_bg_unpin_mutex);
- btrfs_dec_block_group_ro(block_group);
- goto end_trans;
- }
- mutex_unlock(&fs_info->unused_bg_unpin_mutex);
-
- /* Reset pinned so btrfs_put_block_group doesn't complain */
- spin_lock(&space_info->lock);
- spin_lock(&block_group->lock);
-
- btrfs_space_info_update_bytes_pinned(fs_info, space_info,
- -block_group->pinned);
- space_info->bytes_readonly += block_group->pinned;
- percpu_counter_add_batch(&space_info->total_bytes_pinned,
- -block_group->pinned,
- BTRFS_TOTAL_BYTES_PINNED_BATCH);
- block_group->pinned = 0;
-
- spin_unlock(&block_group->lock);
- spin_unlock(&space_info->lock);
-
- /* DISCARD can flip during remount */
- trimming = btrfs_test_opt(fs_info, DISCARD);
-
- /* Implicit trim during transaction commit. */
- if (trimming)
- btrfs_get_block_group_trimming(block_group);
-
- /*
- * Btrfs_remove_chunk will abort the transaction if things go
- * horribly wrong.
- */
- ret = btrfs_remove_chunk(trans, block_group->key.objectid);
-
- if (ret) {
- if (trimming)
- btrfs_put_block_group_trimming(block_group);
- goto end_trans;
- }
-
- /*
- * If we're not mounted with -odiscard, we can just forget
- * about this block group. Otherwise we'll need to wait
- * until transaction commit to do the actual discard.
- */
- if (trimming) {
- spin_lock(&fs_info->unused_bgs_lock);
- /*
- * A concurrent scrub might have added us to the list
- * fs_info->unused_bgs, so use a list_move operation
- * to add the block group to the deleted_bgs list.
- */
- list_move(&block_group->bg_list,
- &trans->transaction->deleted_bgs);
- spin_unlock(&fs_info->unused_bgs_lock);
- btrfs_get_block_group(block_group);
- }
-end_trans:
- btrfs_end_transaction(trans);
-next:
- mutex_unlock(&fs_info->delete_unused_bgs_mutex);
- btrfs_put_block_group(block_group);
- spin_lock(&fs_info->unused_bgs_lock);
- }
- spin_unlock(&fs_info->unused_bgs_lock);
-}
-
int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
u64 start, u64 end)
{
@@ -8971,10 +5641,11 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
*/
int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
{
- struct btrfs_block_group_cache *cache = NULL;
+ struct btrfs_block_group *cache = NULL;
struct btrfs_device *device;
struct list_head *devices;
u64 group_trimmed;
+ u64 range_end = U64_MAX;
u64 start;
u64 end;
u64 trimmed = 0;
@@ -8984,26 +5655,33 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
int dev_ret = 0;
int ret = 0;
+ /*
+ * Check range overflow if range->len is set.
+ * The default range->len is U64_MAX.
+ */
+ if (range->len != U64_MAX &&
+ check_add_overflow(range->start, range->len, &range_end))
+ return -EINVAL;
+
cache = btrfs_lookup_first_block_group(fs_info, range->start);
- for (; cache; cache = next_block_group(cache)) {
- if (cache->key.objectid >= (range->start + range->len)) {
+ for (; cache; cache = btrfs_next_block_group(cache)) {
+ if (cache->start >= range_end) {
btrfs_put_block_group(cache);
break;
}
- start = max(range->start, cache->key.objectid);
- end = min(range->start + range->len,
- cache->key.objectid + cache->key.offset);
+ start = max(range->start, cache->start);
+ end = min(range_end, cache->start + cache->length);
if (end - start >= range->minlen) {
- if (!block_group_cache_done(cache)) {
- ret = cache_block_group(cache, 0);
+ if (!btrfs_block_group_done(cache)) {
+ ret = btrfs_cache_block_group(cache, 0);
if (ret) {
bg_failed++;
bg_ret = ret;
continue;
}
- ret = wait_block_group_cache_done(cache);
+ ret = btrfs_wait_block_group_cache_done(cache);
if (ret) {
bg_failed++;
bg_ret = ret;
@@ -9096,16 +5774,3 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
!atomic_read(&root->will_be_snapshotted));
}
}
-
-void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
-{
- struct btrfs_fs_info *fs_info = bg->fs_info;
-
- spin_lock(&fs_info->unused_bgs_lock);
- if (list_empty(&bg->bg_list)) {
- btrfs_get_block_group(bg);
- trace_btrfs_add_unused_block_group(bg);
- list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
- }
- spin_unlock(&fs_info->unused_bgs_lock);
-}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1ff438fd5bc2..eb8bd0258360 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -14,6 +14,7 @@
#include <linux/prefetch.h>
#include <linux/cleancache.h>
#include "extent_io.h"
+#include "extent-io-tree.h"
#include "extent_map.h"
#include "ctree.h"
#include "btrfs_inode.h"
@@ -59,12 +60,23 @@ void btrfs_leak_debug_del(struct list_head *entry)
spin_unlock_irqrestore(&leak_lock, flags);
}
-static inline
-void btrfs_leak_debug_check(void)
+static inline void btrfs_extent_buffer_leak_debug_check(void)
{
- struct extent_state *state;
struct extent_buffer *eb;
+ while (!list_empty(&buffers)) {
+ eb = list_entry(buffers.next, struct extent_buffer, leak_list);
+ pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n",
+ eb->start, eb->len, atomic_read(&eb->refs), eb->bflags);
+ list_del(&eb->leak_list);
+ kmem_cache_free(extent_buffer_cache, eb);
+ }
+}
+
+static inline void btrfs_extent_state_leak_debug_check(void)
+{
+ struct extent_state *state;
+
while (!list_empty(&states)) {
state = list_entry(states.next, struct extent_state, leak_list);
pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
@@ -74,14 +86,6 @@ void btrfs_leak_debug_check(void)
list_del(&state->leak_list);
kmem_cache_free(extent_state_cache, state);
}
-
- while (!list_empty(&buffers)) {
- eb = list_entry(buffers.next, struct extent_buffer, leak_list);
- pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n",
- eb->start, eb->len, atomic_read(&eb->refs), eb->bflags);
- list_del(&eb->leak_list);
- kmem_cache_free(extent_buffer_cache, eb);
- }
}
#define btrfs_debug_check_extent_io_range(tree, start, end) \
@@ -105,7 +109,8 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller,
#else
#define btrfs_leak_debug_add(new, head) do {} while (0)
#define btrfs_leak_debug_del(entry) do {} while (0)
-#define btrfs_leak_debug_check() do {} while (0)
+#define btrfs_extent_buffer_leak_debug_check() do {} while (0)
+#define btrfs_extent_state_leak_debug_check() do {} while (0)
#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
#endif
@@ -196,19 +201,23 @@ static int __must_check flush_write_bio(struct extent_page_data *epd)
return ret;
}
-int __init extent_io_init(void)
+int __init extent_state_cache_init(void)
{
extent_state_cache = kmem_cache_create("btrfs_extent_state",
sizeof(struct extent_state), 0,
SLAB_MEM_SPREAD, NULL);
if (!extent_state_cache)
return -ENOMEM;
+ return 0;
+}
+int __init extent_io_init(void)
+{
extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
sizeof(struct extent_buffer), 0,
SLAB_MEM_SPREAD, NULL);
if (!extent_buffer_cache)
- goto free_state_cache;
+ return -ENOMEM;
if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
offsetof(struct btrfs_io_bio, bio),
@@ -226,23 +235,24 @@ free_bioset:
free_buffer_cache:
kmem_cache_destroy(extent_buffer_cache);
extent_buffer_cache = NULL;
+ return -ENOMEM;
+}
-free_state_cache:
+void __cold extent_state_cache_exit(void)
+{
+ btrfs_extent_state_leak_debug_check();
kmem_cache_destroy(extent_state_cache);
- extent_state_cache = NULL;
- return -ENOMEM;
}
void __cold extent_io_exit(void)
{
- btrfs_leak_debug_check();
+ btrfs_extent_buffer_leak_debug_check();
/*
* Make sure all delayed rcu free are flushed before we
* destroy caches.
*/
rcu_barrier();
- kmem_cache_destroy(extent_state_cache);
kmem_cache_destroy(extent_buffer_cache);
bioset_exit(&btrfs_bioset);
}
@@ -1676,9 +1686,9 @@ out:
*
* true is returned if we find something, false if nothing was in the tree
*/
-static noinline bool find_delalloc_range(struct extent_io_tree *tree,
- u64 *start, u64 *end, u64 max_bytes,
- struct extent_state **cached_state)
+bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
+ u64 *end, u64 max_bytes,
+ struct extent_state **cached_state)
{
struct rb_node *node;
struct extent_state *state;
@@ -1796,8 +1806,8 @@ again:
/* step one, find a bunch of delalloc bytes starting at start */
delalloc_start = *start;
delalloc_end = 0;
- found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
- max_bytes, &cached_state);
+ found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
+ max_bytes, &cached_state);
if (!found || delalloc_end <= *start) {
*start = delalloc_start;
*end = delalloc_end;
@@ -1899,7 +1909,7 @@ static int __process_pages_contig(struct address_space *mapping,
if (page_ops & PAGE_SET_PRIVATE2)
SetPagePrivate2(pages[i]);
- if (pages[i] == locked_page) {
+ if (locked_page && pages[i] == locked_page) {
put_page(pages[i]);
pages_locked++;
continue;
@@ -1938,9 +1948,9 @@ out:
}
void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
- u64 delalloc_end, struct page *locked_page,
- unsigned clear_bits,
- unsigned long page_ops)
+ struct page *locked_page,
+ unsigned clear_bits,
+ unsigned long page_ops)
{
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0,
NULL);
@@ -2014,8 +2024,8 @@ out:
* set the private field for a given byte offset in the tree. If there isn't
* an extent_state there already, this does nothing.
*/
-static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start,
- struct io_failure_record *failrec)
+int set_state_failrec(struct extent_io_tree *tree, u64 start,
+ struct io_failure_record *failrec)
{
struct rb_node *node;
struct extent_state *state;
@@ -2042,8 +2052,8 @@ out:
return ret;
}
-static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start,
- struct io_failure_record **failrec)
+int get_state_failrec(struct extent_io_tree *tree, u64 start,
+ struct io_failure_record **failrec)
{
struct rb_node *node;
struct extent_state *state;
@@ -2534,7 +2544,6 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
bio = btrfs_io_bio_alloc(1);
bio->bi_end_io = endio_func;
bio->bi_iter.bi_sector = failrec->logical >> 9;
- bio_set_dev(bio, fs_info->fs_devices->latest_bdev);
bio->bi_iter.bi_size = 0;
bio->bi_private = data;
@@ -2920,7 +2929,6 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
* a contiguous page to the previous one
* @size: portion of page that we want to write
* @offset: starting offset in the page
- * @bdev: attach newly created bios to this bdev
* @bio_ret: must be valid pointer, newly allocated bio will be stored there
* @end_io_func: end_io callback for new bio
* @mirror_num: desired mirror to read/write
@@ -2931,7 +2939,6 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
struct writeback_control *wbc,
struct page *page, u64 offset,
size_t size, unsigned long pg_offset,
- struct block_device *bdev,
struct bio **bio_ret,
bio_end_io_t end_io_func,
int mirror_num,
@@ -2977,13 +2984,16 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
}
bio = btrfs_bio_alloc(offset);
- bio_set_dev(bio, bdev);
bio_add_page(bio, page, page_size, pg_offset);
bio->bi_end_io = end_io_func;
bio->bi_private = tree;
bio->bi_write_hint = page->mapping->host->i_write_hint;
bio->bi_opf = opf;
if (wbc) {
+ struct block_device *bdev;
+
+ bdev = BTRFS_I(page->mapping->host)->root->fs_info->fs_devices->latest_bdev;
+ bio_set_dev(bio, bdev);
wbc_init_bio(wbc, bio);
wbc_account_cgroup_owner(wbc, page, page_size);
}
@@ -3065,7 +3075,6 @@ static int __do_readpage(struct extent_io_tree *tree,
u64 block_start;
u64 cur_end;
struct extent_map *em;
- struct block_device *bdev;
int ret = 0;
int nr = 0;
size_t pg_offset = 0;
@@ -3142,7 +3151,6 @@ static int __do_readpage(struct extent_io_tree *tree,
offset = em->block_start + extent_offset;
disk_io_size = iosize;
}
- bdev = em->bdev;
block_start = em->block_start;
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
block_start = EXTENT_MAP_HOLE;
@@ -3232,7 +3240,7 @@ static int __do_readpage(struct extent_io_tree *tree,
ret = submit_extent_page(REQ_OP_READ | read_flags, tree, NULL,
page, offset, disk_io_size,
- pg_offset, bdev, bio,
+ pg_offset, bio,
end_bio_extent_readpage, mirror_num,
*bio_flags,
this_bio_flag,
@@ -3409,7 +3417,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
struct extent_page_data *epd,
loff_t i_size,
unsigned long nr_written,
- unsigned int write_flags, int *nr_ret)
+ int *nr_ret)
{
struct extent_io_tree *tree = epd->tree;
u64 start = page_offset(page);
@@ -3420,11 +3428,11 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
u64 block_start;
u64 iosize;
struct extent_map *em;
- struct block_device *bdev;
size_t pg_offset = 0;
size_t blocksize;
int ret = 0;
int nr = 0;
+ const unsigned int write_flags = wbc_to_write_flags(wbc);
bool compressed;
ret = btrfs_writepage_cow_fixup(page, start, page_end);
@@ -3478,7 +3486,6 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
iosize = min(em_end - cur, end - cur + 1);
iosize = ALIGN(iosize, blocksize);
offset = em->block_start + extent_offset;
- bdev = em->bdev;
block_start = em->block_start;
compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
free_extent_map(em);
@@ -3520,7 +3527,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
page, offset, iosize, pg_offset,
- bdev, &epd->bio,
+ &epd->bio,
end_bio_extent_writepage,
0, 0, 0, false);
if (ret) {
@@ -3558,11 +3565,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
size_t pg_offset = 0;
loff_t i_size = i_size_read(inode);
unsigned long end_index = i_size >> PAGE_SHIFT;
- unsigned int write_flags = 0;
unsigned long nr_written = 0;
- write_flags = wbc_to_write_flags(wbc);
-
trace___extent_writepage(page, inode, wbc);
WARN_ON(!PageLocked(page));
@@ -3600,7 +3604,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
}
ret = __extent_writepage_io(inode, page, wbc, epd,
- i_size, nr_written, write_flags, &nr);
+ i_size, nr_written, &nr);
if (ret == 1)
goto done_unlocked;
@@ -3628,6 +3632,13 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
TASK_UNINTERRUPTIBLE);
}
+static void end_extent_buffer_writeback(struct extent_buffer *eb)
+{
+ clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+ smp_mb__after_atomic();
+ wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
+}
+
/*
* Lock eb pages and flush the bio if we can't the locks
*
@@ -3699,8 +3710,11 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
if (!trylock_page(p)) {
if (!flush) {
- ret = flush_write_bio(epd);
- if (ret < 0) {
+ int err;
+
+ err = flush_write_bio(epd);
+ if (err < 0) {
+ ret = err;
failed_page_nr = i;
goto err_unlock;
}
@@ -3715,25 +3729,41 @@ err_unlock:
/* Unlock already locked pages */
for (i = 0; i < failed_page_nr; i++)
unlock_page(eb->pages[i]);
+ /*
+ * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
+ * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
+ * be made and undo everything done before.
+ */
+ btrfs_tree_lock(eb);
+ spin_lock(&eb->refs_lock);
+ set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
+ end_extent_buffer_writeback(eb);
+ spin_unlock(&eb->refs_lock);
+ percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
+ fs_info->dirty_metadata_batch);
+ btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
+ btrfs_tree_unlock(eb);
return ret;
}
-static void end_extent_buffer_writeback(struct extent_buffer *eb)
-{
- clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
- smp_mb__after_atomic();
- wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
-}
-
static void set_btree_ioerr(struct page *page)
{
struct extent_buffer *eb = (struct extent_buffer *)page->private;
+ struct btrfs_fs_info *fs_info;
SetPageError(page);
if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
return;
/*
+ * If we error out, we should add back the dirty_metadata_bytes
+ * to make it consistent.
+ */
+ fs_info = eb->fs_info;
+ percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
+ eb->len, fs_info->dirty_metadata_batch);
+
+ /*
* If writeback for a btree extent that doesn't belong to a log tree
* failed, increment the counter transaction->eb_write_errors.
* We do this because while the transaction is running and before it's
@@ -3823,7 +3853,6 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
struct extent_page_data *epd)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
- struct block_device *bdev = fs_info->fs_devices->latest_bdev;
struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
u64 offset = eb->start;
u32 nritems;
@@ -3858,7 +3887,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
clear_page_dirty_for_io(p);
set_page_writeback(p);
ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
- p, offset, PAGE_SIZE, 0, bdev,
+ p, offset, PAGE_SIZE, 0,
&epd->bio,
end_bio_extent_buffer_writepage,
0, 0, 0, false);
@@ -3969,6 +3998,10 @@ retry:
if (!ret) {
free_extent_buffer(eb);
continue;
+ } else if (ret < 0) {
+ done = 1;
+ free_extent_buffer(eb);
+ break;
}
ret = write_one_eb(eb, wbc, &epd);
@@ -4091,7 +4124,7 @@ retry:
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- done_index = page->index;
+ done_index = page->index + 1;
/*
* At this point we hold neither the i_pages lock nor
* the page lock: the page may be truncated or
@@ -4126,16 +4159,6 @@ retry:
ret = __extent_writepage(page, wbc, epd);
if (ret < 0) {
- /*
- * done_index is set past this page,
- * so media errors will not choke
- * background writeout for the entire
- * file. This has consequences for
- * range_cyclic semantics (ie. it may
- * not be suitable for data integrity
- * writeout).
- */
- done_index = page->index + 1;
done = 1;
break;
}
@@ -4210,8 +4233,12 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
.nr_to_write = nr_pages * 2,
.range_start = start,
.range_end = end + 1,
+ /* We're called from an async helper function */
+ .punt_to_cgroup = 1,
+ .no_cgroup_owner = 1,
};
+ wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
while (start <= end) {
page = find_get_page(mapping, start >> PAGE_SHIFT);
if (clear_page_dirty_for_io(page))
@@ -4226,11 +4253,12 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
}
ASSERT(ret <= 0);
- if (ret < 0) {
+ if (ret == 0)
+ ret = flush_write_bio(&epd);
+ else
end_write_bio(&epd, ret);
- return ret;
- }
- ret = flush_write_bio(&epd);
+
+ wbc_detach_inode(&wbc_writepages);
return ret;
}
@@ -4322,10 +4350,8 @@ int extent_invalidatepage(struct extent_io_tree *tree,
lock_extent_bits(tree, start, end, &cached_state);
wait_on_page_writeback(page);
- clear_extent_bit(tree, start, end,
- EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING,
- 1, 1, &cached_state);
+ clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING, 1, 1, &cached_state);
return 0;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 401423b16976..a8551a1f56e2 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -7,35 +7,6 @@
#include <linux/refcount.h>
#include "ulist.h"
-/* bits for the extent state */
-#define EXTENT_DIRTY (1U << 0)
-#define EXTENT_UPTODATE (1U << 1)
-#define EXTENT_LOCKED (1U << 2)
-#define EXTENT_NEW (1U << 3)
-#define EXTENT_DELALLOC (1U << 4)
-#define EXTENT_DEFRAG (1U << 5)
-#define EXTENT_BOUNDARY (1U << 6)
-#define EXTENT_NODATASUM (1U << 7)
-#define EXTENT_CLEAR_META_RESV (1U << 8)
-#define EXTENT_NEED_WAIT (1U << 9)
-#define EXTENT_DAMAGED (1U << 10)
-#define EXTENT_NORESERVE (1U << 11)
-#define EXTENT_QGROUP_RESERVED (1U << 12)
-#define EXTENT_CLEAR_DATA_RESV (1U << 13)
-#define EXTENT_DELALLOC_NEW (1U << 14)
-#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
- EXTENT_CLEAR_DATA_RESV)
-#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
-
-/*
- * Redefined bits above which are used only in the device allocation tree,
- * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
- * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
- * manipulation functions
- */
-#define CHUNK_ALLOCATED EXTENT_DIRTY
-#define CHUNK_TRIMMED EXTENT_DEFRAG
-
/*
* flags for bio submission. The high bits indicate the compression
* type for this bio
@@ -89,12 +60,11 @@ enum {
#define BITMAP_LAST_BYTE_MASK(nbits) \
(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
-struct extent_state;
struct btrfs_root;
struct btrfs_inode;
struct btrfs_io_bio;
struct io_failure_record;
-
+struct extent_io_tree;
typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
struct bio *bio, u64 bio_offset);
@@ -111,47 +81,6 @@ struct extent_io_ops {
int mirror);
};
-enum {
- IO_TREE_FS_INFO_FREED_EXTENTS0,
- IO_TREE_FS_INFO_FREED_EXTENTS1,
- IO_TREE_INODE_IO,
- IO_TREE_INODE_IO_FAILURE,
- IO_TREE_RELOC_BLOCKS,
- IO_TREE_TRANS_DIRTY_PAGES,
- IO_TREE_ROOT_DIRTY_LOG_PAGES,
- IO_TREE_SELFTEST,
-};
-
-struct extent_io_tree {
- struct rb_root state;
- struct btrfs_fs_info *fs_info;
- void *private_data;
- u64 dirty_bytes;
- bool track_uptodate;
-
- /* Who owns this io tree, should be one of IO_TREE_* */
- u8 owner;
-
- spinlock_t lock;
- const struct extent_io_ops *ops;
-};
-
-struct extent_state {
- u64 start;
- u64 end; /* inclusive */
- struct rb_node rb_node;
-
- /* ADD NEW ELEMENTS AFTER THIS */
- wait_queue_head_t wq;
- refcount_t refs;
- unsigned state;
-
- struct io_failure_record *failrec;
-
-#ifdef CONFIG_BTRFS_DEBUG
- struct list_head leak_list;
-#endif
-};
#define INLINE_EXTENT_BUFFER_PAGES 16
#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
@@ -259,152 +188,11 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
u64 start, u64 len,
int create);
-void extent_io_tree_init(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *tree, unsigned int owner,
- void *private_data);
-void extent_io_tree_release(struct extent_io_tree *tree);
int try_release_extent_mapping(struct page *page, gfp_t mask);
int try_release_extent_buffer(struct page *page);
-int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached);
-static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
-{
- return lock_extent_bits(tree, start, end, NULL);
-}
-
-int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
get_extent_t *get_extent, int mirror_num);
-int __init extent_io_init(void);
-void __cold extent_io_exit(void);
-
-u64 count_range_bits(struct extent_io_tree *tree,
- u64 *start, u64 search_end,
- u64 max_bytes, unsigned bits, int contig);
-
-void free_extent_state(struct extent_state *state);
-int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, int filled,
- struct extent_state *cached_state);
-int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, struct extent_changeset *changeset);
-int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, int wake, int delete,
- struct extent_state **cached);
-int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, int wake, int delete,
- struct extent_state **cached, gfp_t mask,
- struct extent_changeset *changeset);
-
-static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
-{
- return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
-}
-
-static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached)
-{
- return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
- GFP_NOFS, NULL);
-}
-
-static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
- u64 start, u64 end, struct extent_state **cached)
-{
- return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
- GFP_ATOMIC, NULL);
-}
-
-static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
- u64 end, unsigned bits)
-{
- int wake = 0;
-
- if (bits & EXTENT_LOCKED)
- wake = 1;
-
- return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
-}
-
-int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, struct extent_changeset *changeset);
-int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, u64 *failed_start,
- struct extent_state **cached_state, gfp_t mask);
-int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits);
-
-static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
- u64 end, unsigned bits)
-{
- return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
-}
-
-static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state)
-{
- return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
- cached_state, GFP_NOFS, NULL);
-}
-
-static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
- u64 end, gfp_t mask)
-{
- return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
- NULL, mask);
-}
-
-static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached)
-{
- return clear_extent_bit(tree, start, end,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 0, 0, cached);
-}
-
-int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, unsigned clear_bits,
- struct extent_state **cached_state);
-
-static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
- u64 end, unsigned int extra_bits,
- struct extent_state **cached_state)
-{
- return set_extent_bit(tree, start, end,
- EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
- NULL, cached_state, GFP_NOFS);
-}
-
-static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state)
-{
- return set_extent_bit(tree, start, end,
- EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
- NULL, cached_state, GFP_NOFS);
-}
-
-static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
- u64 end)
-{
- return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
- GFP_NOFS);
-}
-
-static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state, gfp_t mask)
-{
- return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
- cached_state, mask);
-}
-
-int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, unsigned bits,
- struct extent_state **cached_state);
-void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, unsigned bits);
-int extent_invalidatepage(struct extent_io_tree *tree,
- struct page *page, unsigned long offset);
int extent_write_full_page(struct page *page, struct writeback_control *wbc);
int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
int mode);
@@ -442,11 +230,6 @@ static inline int num_extent_pages(const struct extent_buffer *eb)
(eb->start >> PAGE_SHIFT);
}
-static inline void extent_buffer_get(struct extent_buffer *eb)
-{
- atomic_inc(&eb->refs);
-}
-
static inline int extent_buffer_uptodate(struct extent_buffer *eb)
{
return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
@@ -494,9 +277,9 @@ int map_private_extent_buffer(const struct extent_buffer *eb,
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
- u64 delalloc_end, struct page *locked_page,
- unsigned bits_to_clear,
- unsigned long page_ops);
+ struct page *locked_page,
+ unsigned bits_to_clear,
+ unsigned long page_ops);
struct bio *btrfs_bio_alloc(u64 first_byte);
struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
struct bio *btrfs_bio_clone(struct bio *bio);
@@ -508,10 +291,6 @@ struct btrfs_inode;
int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
u64 length, u64 logical, struct page *page,
unsigned int pg_offset, int mirror_num);
-int clean_io_failure(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *failure_tree,
- struct extent_io_tree *io_tree, u64 start,
- struct page *page, u64 ino, unsigned int pg_offset);
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num);
@@ -535,19 +314,12 @@ struct io_failure_record {
};
-void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
- u64 end);
-int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
- struct io_failure_record **failrec_ret);
bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
struct io_failure_record *failrec, int fail_mirror);
struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
struct io_failure_record *failrec,
struct page *page, int pg_offset, int icsum,
bio_end_io_t *endio_func, void *data);
-int free_io_failure(struct extent_io_tree *failure_tree,
- struct extent_io_tree *io_tree,
- struct io_failure_record *rec);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
bool find_lock_delalloc_range(struct inode *inode,
struct page *locked_page, u64 *start,
@@ -555,5 +327,4 @@ bool find_lock_delalloc_range(struct inode *inode,
#endif
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
-
#endif
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 9558d79faf1e..6f417ff68980 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -214,9 +214,13 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
prev->block_start != EXTENT_MAP_DELALLOC);
+ if (prev->map_lookup || next->map_lookup)
+ ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
+ test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
+
if (extent_map_end(prev) == next->start &&
prev->flags == next->flags &&
- prev->bdev == next->bdev &&
+ prev->map_lookup == next->map_lookup &&
((next->block_start == EXTENT_MAP_HOLE &&
prev->block_start == EXTENT_MAP_HOLE) ||
(next->block_start == EXTENT_MAP_INLINE &&
@@ -384,6 +388,8 @@ int add_extent_mapping(struct extent_map_tree *tree,
{
int ret = 0;
+ lockdep_assert_held_write(&tree->lock);
+
ret = tree_insert(&tree->map, em);
if (ret)
goto out;
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 473f039fcd7c..8e217337dff9 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -42,15 +42,8 @@ struct extent_map {
u64 block_len;
u64 generation;
unsigned long flags;
- union {
- struct block_device *bdev;
-
- /*
- * used for chunk mappings
- * flags & EXTENT_FLAG_FS_MAPPING must be set
- */
- struct map_lookup *map_lookup;
- };
+ /* Used for chunk mappings, flag EXTENT_FLAG_FS_MAPPING must be set */
+ struct map_lookup *map_lookup;
refcount_t refs;
unsigned int compress_type;
struct list_head list;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 1a599f50837b..3270a40b0777 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -945,7 +945,6 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
u8 type = btrfs_file_extent_type(leaf, fi);
int compress_type = btrfs_file_extent_compression(leaf, fi);
- em->bdev = fs_info->fs_devices->latest_bdev;
btrfs_item_key_to_cpu(leaf, &key, slot);
extent_start = key.offset;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 58a18ed11546..0cb43b682789 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -296,7 +296,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
key.objectid = defrag->ino;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
+ inode = btrfs_iget(fs_info->sb, &key, inode_root);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto cleanup;
@@ -537,8 +537,8 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
* we can set things up properly
*/
clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, end_of_last_block,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, cached);
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+ 0, 0, cached);
if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
if (start_pos >= isize &&
@@ -559,7 +559,7 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
}
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
- extra_bits, cached, 0);
+ extra_bits, cached);
if (err)
return err;
@@ -667,7 +667,6 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
}
split->generation = gen;
- split->bdev = em->bdev;
split->flags = flags;
split->compress_type = em->compress_type;
replace_extent_mapping(em_tree, em, split, modified);
@@ -680,7 +679,6 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
split->start = start + len;
split->len = em->start + em->len - (start + len);
- split->bdev = em->bdev;
split->flags = flags;
split->compress_type = em->compress_type;
split->generation = gen;
@@ -1591,7 +1589,6 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL;
- struct extent_state *cached_state = NULL;
struct extent_changeset *data_reserved = NULL;
u64 release_bytes = 0;
u64 lockstart;
@@ -1611,6 +1608,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
return -ENOMEM;
while (iov_iter_count(i) > 0) {
+ struct extent_state *cached_state = NULL;
size_t offset = offset_in_page(pos);
size_t sector_offset;
size_t write_bytes = min(iov_iter_count(i),
@@ -1636,6 +1634,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
break;
}
+ only_release_metadata = false;
sector_offset = pos & (fs_info->sectorsize - 1);
reserve_bytes = round_up(write_bytes + sector_offset,
fs_info->sectorsize);
@@ -1692,7 +1691,7 @@ again:
force_page_uptodate);
if (ret) {
btrfs_delalloc_release_extents(BTRFS_I(inode),
- reserve_bytes, true);
+ reserve_bytes);
break;
}
@@ -1704,7 +1703,7 @@ again:
if (extents_locked == -EAGAIN)
goto again;
btrfs_delalloc_release_extents(BTRFS_I(inode),
- reserve_bytes, true);
+ reserve_bytes);
ret = extents_locked;
break;
}
@@ -1758,11 +1757,21 @@ again:
if (copied > 0)
ret = btrfs_dirty_pages(inode, pages, dirty_pages,
pos, copied, &cached_state);
+
+ /*
+ * If we have not locked the extent range, because the range's
+ * start offset is >= i_size, we might still have a non-NULL
+ * cached extent state, acquired while marking the extent range
+ * as delalloc through btrfs_dirty_pages(). Therefore free any
+ * possible cached extent state to avoid a memory leak.
+ */
if (extents_locked)
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
lockstart, lockend, &cached_state);
- btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes,
- true);
+ else
+ free_extent_state(cached_state);
+
+ btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
if (ret) {
btrfs_drop_pages(pages, num_pages);
break;
@@ -1781,7 +1790,6 @@ again:
set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, EXTENT_NORESERVE, NULL,
NULL, GFP_NOFS);
- only_release_metadata = false;
}
btrfs_drop_pages(pages, num_pages);
@@ -1882,10 +1890,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
u64 start_pos;
u64 end_pos;
ssize_t num_written = 0;
- bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
+ const bool sync = iocb->ki_flags & IOCB_DSYNC;
ssize_t err;
loff_t pos;
- size_t count = iov_iter_count(from);
+ size_t count;
loff_t oldsize;
int clean_page = 0;
@@ -1893,9 +1901,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
(iocb->ki_flags & IOCB_NOWAIT))
return -EOPNOTSUPP;
- if (!inode_trylock(inode)) {
- if (iocb->ki_flags & IOCB_NOWAIT)
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock(inode))
return -EAGAIN;
+ } else {
inode_lock(inode);
}
@@ -1906,6 +1915,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
}
pos = iocb->ki_pos;
+ count = iov_iter_count(from);
if (iocb->ki_flags & IOCB_NOWAIT) {
/*
* We will allocate space in case nodatacow is not set,
@@ -2056,25 +2066,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
struct btrfs_trans_handle *trans;
struct btrfs_log_ctx ctx;
int ret = 0, err;
- u64 len;
-
- /*
- * If the inode needs a full sync, make sure we use a full range to
- * avoid log tree corruption, due to hole detection racing with ordered
- * extent completion for adjacent ranges, and assertion failures during
- * hole detection.
- */
- if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags)) {
- start = 0;
- end = LLONG_MAX;
- }
- /*
- * The range length can be represented by u64, we have to do the typecasts
- * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
- */
- len = (u64)end - (u64)start + 1;
trace_btrfs_sync_file(file, datasync);
btrfs_init_log_ctx(&ctx, inode);
@@ -2101,6 +2093,19 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
atomic_inc(&root->log_batch);
/*
+ * If the inode needs a full sync, make sure we use a full range to
+ * avoid log tree corruption, due to hole detection racing with ordered
+ * extent completion for adjacent ranges, and assertion failures during
+ * hole detection. Do this while holding the inode lock, to avoid races
+ * with other tasks.
+ */
+ if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags)) {
+ start = 0;
+ end = LLONG_MAX;
+ }
+
+ /*
* Before we acquired the inode's lock, someone may have dirtied more
* pages in the target range. We need to make sure that writeback for
* any such pages does not start while we are logging the inode, because
@@ -2127,8 +2132,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
/*
* We have to do this here to avoid the priority inversion of waiting on
* IO of a lower priority task while holding a transaction open.
+ *
+ * Also, the range length can be represented by u64, we have to do the
+ * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
*/
- ret = btrfs_wait_ordered_range(inode, start, len);
+ ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
if (ret) {
up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
@@ -2350,7 +2358,6 @@ out:
hole_em->block_start = EXTENT_MAP_HOLE;
hole_em->block_len = 0;
hole_em->orig_block_len = 0;
- hole_em->bdev = fs_info->fs_devices->latest_bdev;
hole_em->compress_type = BTRFS_COMPRESS_NONE;
hole_em->generation = trans->transid;
@@ -2439,27 +2446,286 @@ static int btrfs_punch_hole_lock_range(struct inode *inode,
return 0;
}
+static int btrfs_insert_clone_extent(struct btrfs_trans_handle *trans,
+ struct inode *inode,
+ struct btrfs_path *path,
+ struct btrfs_clone_extent_info *clone_info,
+ const u64 clone_len)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_file_extent_item *extent;
+ struct extent_buffer *leaf;
+ struct btrfs_key key;
+ int slot;
+ struct btrfs_ref ref = { 0 };
+ u64 ref_offset;
+ int ret;
+
+ if (clone_len == 0)
+ return 0;
+
+ if (clone_info->disk_offset == 0 &&
+ btrfs_fs_incompat(fs_info, NO_HOLES))
+ return 0;
+
+ key.objectid = btrfs_ino(BTRFS_I(inode));
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = clone_info->file_offset;
+ ret = btrfs_insert_empty_item(trans, root, path, &key,
+ clone_info->item_size);
+ if (ret)
+ return ret;
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+ write_extent_buffer(leaf, clone_info->extent_buf,
+ btrfs_item_ptr_offset(leaf, slot),
+ clone_info->item_size);
+ extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+ btrfs_set_file_extent_offset(leaf, extent, clone_info->data_offset);
+ btrfs_set_file_extent_num_bytes(leaf, extent, clone_len);
+ btrfs_mark_buffer_dirty(leaf);
+ btrfs_release_path(path);
+
+ /* If it's a hole, nothing more needs to be done. */
+ if (clone_info->disk_offset == 0)
+ return 0;
+
+ inode_add_bytes(inode, clone_len);
+ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
+ clone_info->disk_offset,
+ clone_info->disk_len, 0);
+ ref_offset = clone_info->file_offset - clone_info->data_offset;
+ btrfs_init_data_ref(&ref, root->root_key.objectid,
+ btrfs_ino(BTRFS_I(inode)), ref_offset);
+ ret = btrfs_inc_extent_ref(trans, &ref);
+
+ return ret;
+}
+
+/*
+ * The respective range must have been previously locked, as well as the inode.
+ * The end offset is inclusive (last byte of the range).
+ * @clone_info is NULL for fallocate's hole punching and non-NULL for extent
+ * cloning.
+ * When cloning, we don't want to end up in a state where we dropped extents
+ * without inserting a new one, so we must abort the transaction to avoid a
+ * corruption.
+ */
+int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path,
+ const u64 start, const u64 end,
+ struct btrfs_clone_extent_info *clone_info,
+ struct btrfs_trans_handle **trans_out)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
+ u64 ino_size = round_up(inode->i_size, fs_info->sectorsize);
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_trans_handle *trans = NULL;
+ struct btrfs_block_rsv *rsv;
+ unsigned int rsv_count;
+ u64 cur_offset;
+ u64 drop_end;
+ u64 len = end - start;
+ int ret = 0;
+
+ if (end <= start)
+ return -EINVAL;
+
+ rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
+ if (!rsv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
+ rsv->failfast = 1;
+
+ /*
+ * 1 - update the inode
+ * 1 - removing the extents in the range
+ * 1 - adding the hole extent if no_holes isn't set or if we are cloning
+ * an extent
+ */
+ if (!btrfs_fs_incompat(fs_info, NO_HOLES) || clone_info)
+ rsv_count = 3;
+ else
+ rsv_count = 2;
+
+ trans = btrfs_start_transaction(root, rsv_count);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out_free;
+ }
+
+ ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
+ min_size, false);
+ BUG_ON(ret);
+ trans->block_rsv = rsv;
+
+ cur_offset = start;
+ while (cur_offset < end) {
+ ret = __btrfs_drop_extents(trans, root, inode, path,
+ cur_offset, end + 1, &drop_end,
+ 1, 0, 0, NULL);
+ if (ret != -ENOSPC) {
+ /*
+ * When cloning we want to avoid transaction aborts when
+ * nothing was done and we are attempting to clone parts
+ * of inline extents, in such cases -EOPNOTSUPP is
+ * returned by __btrfs_drop_extents() without having
+ * changed anything in the file.
+ */
+ if (clone_info && ret && ret != -EOPNOTSUPP)
+ btrfs_abort_transaction(trans, ret);
+ break;
+ }
+
+ trans->block_rsv = &fs_info->trans_block_rsv;
+
+ if (!clone_info && cur_offset < drop_end &&
+ cur_offset < ino_size) {
+ ret = fill_holes(trans, BTRFS_I(inode), path,
+ cur_offset, drop_end);
+ if (ret) {
+ /*
+ * If we failed then we didn't insert our hole
+ * entries for the area we dropped, so now the
+ * fs is corrupted, so we must abort the
+ * transaction.
+ */
+ btrfs_abort_transaction(trans, ret);
+ break;
+ }
+ }
+
+ if (clone_info) {
+ u64 clone_len = drop_end - cur_offset;
+
+ ret = btrfs_insert_clone_extent(trans, inode, path,
+ clone_info, clone_len);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ break;
+ }
+ clone_info->data_len -= clone_len;
+ clone_info->data_offset += clone_len;
+ clone_info->file_offset += clone_len;
+ }
+
+ cur_offset = drop_end;
+
+ ret = btrfs_update_inode(trans, root, inode);
+ if (ret)
+ break;
+
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
+
+ trans = btrfs_start_transaction(root, rsv_count);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ break;
+ }
+
+ ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
+ rsv, min_size, false);
+ BUG_ON(ret); /* shouldn't happen */
+ trans->block_rsv = rsv;
+
+ if (!clone_info) {
+ ret = find_first_non_hole(inode, &cur_offset, &len);
+ if (unlikely(ret < 0))
+ break;
+ if (ret && !len) {
+ ret = 0;
+ break;
+ }
+ }
+ }
+
+ /*
+ * If we were cloning, force the next fsync to be a full one since we
+ * we replaced (or just dropped in the case of cloning holes when
+ * NO_HOLES is enabled) extents and extent maps.
+ * This is for the sake of simplicity, and cloning into files larger
+ * than 16Mb would force the full fsync any way (when
+ * try_release_extent_mapping() is invoked during page cache truncation.
+ */
+ if (clone_info)
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags);
+
+ if (ret)
+ goto out_trans;
+
+ trans->block_rsv = &fs_info->trans_block_rsv;
+ /*
+ * If we are using the NO_HOLES feature we might have had already an
+ * hole that overlaps a part of the region [lockstart, lockend] and
+ * ends at (or beyond) lockend. Since we have no file extent items to
+ * represent holes, drop_end can be less than lockend and so we must
+ * make sure we have an extent map representing the existing hole (the
+ * call to __btrfs_drop_extents() might have dropped the existing extent
+ * map representing the existing hole), otherwise the fast fsync path
+ * will not record the existence of the hole region
+ * [existing_hole_start, lockend].
+ */
+ if (drop_end <= end)
+ drop_end = end + 1;
+ /*
+ * Don't insert file hole extent item if it's for a range beyond eof
+ * (because it's useless) or if it represents a 0 bytes range (when
+ * cur_offset == drop_end).
+ */
+ if (!clone_info && cur_offset < ino_size && cur_offset < drop_end) {
+ ret = fill_holes(trans, BTRFS_I(inode), path,
+ cur_offset, drop_end);
+ if (ret) {
+ /* Same comment as above. */
+ btrfs_abort_transaction(trans, ret);
+ goto out_trans;
+ }
+ }
+ if (clone_info) {
+ ret = btrfs_insert_clone_extent(trans, inode, path, clone_info,
+ clone_info->data_len);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_trans;
+ }
+ }
+
+out_trans:
+ if (!trans)
+ goto out_free;
+
+ trans->block_rsv = &fs_info->trans_block_rsv;
+ if (ret)
+ btrfs_end_transaction(trans);
+ else
+ *trans_out = trans;
+out_free:
+ btrfs_free_block_rsv(fs_info, rsv);
+out:
+ return ret;
+}
+
static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_state *cached_state = NULL;
struct btrfs_path *path;
- struct btrfs_block_rsv *rsv;
- struct btrfs_trans_handle *trans;
+ struct btrfs_trans_handle *trans = NULL;
u64 lockstart;
u64 lockend;
u64 tail_start;
u64 tail_len;
u64 orig_start = offset;
- u64 cur_offset;
- u64 min_size = btrfs_calc_trans_metadata_size(fs_info, 1);
- u64 drop_end;
int ret = 0;
- int err = 0;
- unsigned int rsv_count;
bool same_block;
- bool no_holes = btrfs_fs_incompat(fs_info, NO_HOLES);
u64 ino_size;
bool truncated_block = false;
bool updated_inode = false;
@@ -2566,145 +2832,24 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out;
}
- rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
- if (!rsv) {
- ret = -ENOMEM;
- goto out_free;
- }
- rsv->size = btrfs_calc_trans_metadata_size(fs_info, 1);
- rsv->failfast = 1;
-
- /*
- * 1 - update the inode
- * 1 - removing the extents in the range
- * 1 - adding the hole extent if no_holes isn't set
- */
- rsv_count = no_holes ? 2 : 3;
- trans = btrfs_start_transaction(root, rsv_count);
- if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
- goto out_free;
- }
-
- ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
- min_size, false);
- BUG_ON(ret);
- trans->block_rsv = rsv;
-
- cur_offset = lockstart;
- len = lockend - cur_offset;
- while (cur_offset < lockend) {
- ret = __btrfs_drop_extents(trans, root, inode, path,
- cur_offset, lockend + 1,
- &drop_end, 1, 0, 0, NULL);
- if (ret != -ENOSPC)
- break;
-
- trans->block_rsv = &fs_info->trans_block_rsv;
-
- if (cur_offset < drop_end && cur_offset < ino_size) {
- ret = fill_holes(trans, BTRFS_I(inode), path,
- cur_offset, drop_end);
- if (ret) {
- /*
- * If we failed then we didn't insert our hole
- * entries for the area we dropped, so now the
- * fs is corrupted, so we must abort the
- * transaction.
- */
- btrfs_abort_transaction(trans, ret);
- err = ret;
- break;
- }
- }
-
- cur_offset = drop_end;
-
- ret = btrfs_update_inode(trans, root, inode);
- if (ret) {
- err = ret;
- break;
- }
-
- btrfs_end_transaction(trans);
- btrfs_btree_balance_dirty(fs_info);
-
- trans = btrfs_start_transaction(root, rsv_count);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- trans = NULL;
- break;
- }
-
- ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
- rsv, min_size, false);
- BUG_ON(ret); /* shouldn't happen */
- trans->block_rsv = rsv;
-
- ret = find_first_non_hole(inode, &cur_offset, &len);
- if (unlikely(ret < 0))
- break;
- if (ret && !len) {
- ret = 0;
- break;
- }
- }
-
- if (ret) {
- err = ret;
- goto out_trans;
- }
-
- trans->block_rsv = &fs_info->trans_block_rsv;
- /*
- * If we are using the NO_HOLES feature we might have had already an
- * hole that overlaps a part of the region [lockstart, lockend] and
- * ends at (or beyond) lockend. Since we have no file extent items to
- * represent holes, drop_end can be less than lockend and so we must
- * make sure we have an extent map representing the existing hole (the
- * call to __btrfs_drop_extents() might have dropped the existing extent
- * map representing the existing hole), otherwise the fast fsync path
- * will not record the existence of the hole region
- * [existing_hole_start, lockend].
- */
- if (drop_end <= lockend)
- drop_end = lockend + 1;
- /*
- * Don't insert file hole extent item if it's for a range beyond eof
- * (because it's useless) or if it represents a 0 bytes range (when
- * cur_offset == drop_end).
- */
- if (cur_offset < ino_size && cur_offset < drop_end) {
- ret = fill_holes(trans, BTRFS_I(inode), path,
- cur_offset, drop_end);
- if (ret) {
- /* Same comment as above. */
- btrfs_abort_transaction(trans, ret);
- err = ret;
- goto out_trans;
- }
- }
-
-out_trans:
- if (!trans)
- goto out_free;
+ ret = btrfs_punch_hole_range(inode, path, lockstart, lockend, NULL,
+ &trans);
+ btrfs_free_path(path);
+ if (ret)
+ goto out;
+ ASSERT(trans != NULL);
inode_inc_iversion(inode);
inode->i_mtime = inode->i_ctime = current_time(inode);
-
- trans->block_rsv = &fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
updated_inode = true;
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
-out_free:
- btrfs_free_path(path);
- btrfs_free_block_rsv(fs_info, rsv);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state);
out_only_mutex:
- if (!updated_inode && truncated_block && !ret && !err) {
+ if (!updated_inode && truncated_block && !ret) {
/*
* If we only end up zeroing part of a page, we still need to
* update the inode item, so that all the time fields are
@@ -2719,16 +2864,18 @@ out_only_mutex:
inode->i_ctime = now;
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
} else {
- err = btrfs_update_inode(trans, root, inode);
- ret = btrfs_end_transaction(trans);
+ int ret2;
+
+ ret = btrfs_update_inode(trans, root, inode);
+ ret2 = btrfs_end_transaction(trans);
+ if (!ret)
+ ret = ret2;
}
}
inode_unlock(inode);
- if (ret && !err)
- err = ret;
- return err;
+ return ret;
}
/* Helper structure to record which range is already reserved */
@@ -3201,29 +3348,30 @@ out:
return ret;
}
-static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
+static loff_t find_desired_extent(struct inode *inode, loff_t offset,
+ int whence)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
+ loff_t i_size = inode->i_size;
u64 lockstart;
u64 lockend;
u64 start;
u64 len;
int ret = 0;
- if (inode->i_size == 0)
+ if (i_size == 0 || offset >= i_size)
return -ENXIO;
/*
- * *offset can be negative, in this case we start finding DATA/HOLE from
+ * offset can be negative, in this case we start finding DATA/HOLE from
* the very start of the file.
*/
- start = max_t(loff_t, 0, *offset);
+ start = max_t(loff_t, 0, offset);
lockstart = round_down(start, fs_info->sectorsize);
- lockend = round_up(i_size_read(inode),
- fs_info->sectorsize);
+ lockend = round_up(i_size, fs_info->sectorsize);
if (lockend <= lockstart)
lockend = lockstart + fs_info->sectorsize;
lockend--;
@@ -3232,7 +3380,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state);
- while (start < inode->i_size) {
+ while (start < i_size) {
em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
@@ -3255,46 +3403,39 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
cond_resched();
}
free_extent_map(em);
- if (!ret) {
- if (whence == SEEK_DATA && start >= inode->i_size)
- ret = -ENXIO;
- else
- *offset = min_t(loff_t, start, inode->i_size);
- }
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state);
- return ret;
+ if (ret) {
+ offset = ret;
+ } else {
+ if (whence == SEEK_DATA && start >= i_size)
+ offset = -ENXIO;
+ else
+ offset = min_t(loff_t, start, i_size);
+ }
+
+ return offset;
}
static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
- int ret;
- inode_lock(inode);
switch (whence) {
- case SEEK_END:
- case SEEK_CUR:
- offset = generic_file_llseek(file, offset, whence);
- goto out;
+ default:
+ return generic_file_llseek(file, offset, whence);
case SEEK_DATA:
case SEEK_HOLE:
- if (offset >= i_size_read(inode)) {
- inode_unlock(inode);
- return -ENXIO;
- }
-
- ret = find_desired_extent(inode, &offset, whence);
- if (ret) {
- inode_unlock(inode);
- return ret;
- }
+ inode_lock_shared(inode);
+ offset = find_desired_extent(inode, offset, whence);
+ inode_unlock_shared(inode);
+ break;
}
- offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
-out:
- inode_unlock(inode);
- return offset;
+ if (offset < 0)
+ return offset;
+
+ return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
}
static int btrfs_file_open(struct inode *inode, struct file *filp)
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 062be9dde4c6..3283da419200 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -20,6 +20,7 @@
#include "volumes.h"
#include "space-info.h"
#include "delalloc-space.h"
+#include "block-group.h"
#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
#define MAX_CACHE_BYTES_PER_GIG SZ_32K
@@ -77,7 +78,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
* sure NOFS is set to keep us from deadlocking.
*/
nofs_flag = memalloc_nofs_save();
- inode = btrfs_iget_path(fs_info->sb, &location, root, NULL, path);
+ inode = btrfs_iget_path(fs_info->sb, &location, root, path);
btrfs_release_path(path);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(inode))
@@ -90,8 +91,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
return inode;
}
-struct inode *lookup_free_space_inode(
- struct btrfs_block_group_cache *block_group,
+struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
@@ -106,7 +106,7 @@ struct inode *lookup_free_space_inode(
return inode;
inode = __lookup_free_space_inode(fs_info->tree_root, path,
- block_group->key.objectid);
+ block_group->start);
if (IS_ERR(inode))
return inode;
@@ -189,7 +189,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
}
int create_free_space_inode(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
int ret;
@@ -200,7 +200,7 @@ int create_free_space_inode(struct btrfs_trans_handle *trans,
return ret;
return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
- ino, block_group->key.objectid);
+ ino, block_group->start);
}
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
@@ -210,8 +210,8 @@ int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
int ret;
/* 1 for slack space, 1 for updating the inode */
- needed_bytes = btrfs_calc_trunc_metadata_size(fs_info, 1) +
- btrfs_calc_trans_metadata_size(fs_info, 1);
+ needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
+ btrfs_calc_metadata_size(fs_info, 1);
spin_lock(&rsv->lock);
if (rsv->reserved < needed_bytes)
@@ -223,7 +223,7 @@ int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
}
int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -384,6 +384,12 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode
if (uptodate && !PageUptodate(page)) {
btrfs_readpage(NULL, page);
lock_page(page);
+ if (page->mapping != inode->i_mapping) {
+ btrfs_err(BTRFS_I(inode)->root->fs_info,
+ "free space cache page truncated");
+ io_ctl_drop_pages(io_ctl);
+ return -EIO;
+ }
if (!PageUptodate(page)) {
btrfs_err(BTRFS_I(inode)->root->fs_info,
"error reading free space cache");
@@ -764,7 +770,8 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
} else {
ASSERT(num_bitmaps);
num_bitmaps--;
- e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
+ e->bitmap = kmem_cache_zalloc(
+ btrfs_free_space_bitmap_cachep, GFP_NOFS);
if (!e->bitmap) {
kmem_cache_free(
btrfs_free_space_cachep, e);
@@ -812,7 +819,7 @@ free_cache:
goto out;
}
-int load_free_space_cache(struct btrfs_block_group_cache *block_group)
+int load_free_space_cache(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -820,7 +827,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)
struct btrfs_path *path;
int ret = 0;
bool matched;
- u64 used = btrfs_block_group_used(&block_group->item);
+ u64 used = block_group->used;
/*
* If this block group has been marked to be cleared for one reason or
@@ -874,13 +881,13 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)
spin_unlock(&block_group->lock);
ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
- path, block_group->key.objectid);
+ path, block_group->start);
btrfs_free_path(path);
if (ret <= 0)
goto out;
spin_lock(&ctl->tree_lock);
- matched = (ctl->free_space == (block_group->key.offset - used -
+ matched = (ctl->free_space == (block_group->length - used -
block_group->bytes_super));
spin_unlock(&ctl->tree_lock);
@@ -888,7 +895,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)
__btrfs_remove_free_space_cache(ctl);
btrfs_warn(fs_info,
"block group %llu has wrong amount of free space",
- block_group->key.objectid);
+ block_group->start);
ret = -1;
}
out:
@@ -901,7 +908,7 @@ out:
btrfs_warn(fs_info,
"failed to load free space cache for block group %llu, rebuilding it now",
- block_group->key.objectid);
+ block_group->start);
}
iput(inode);
@@ -911,7 +918,7 @@ out:
static noinline_for_stack
int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
struct btrfs_free_space_ctl *ctl,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
int *entries, int *bitmaps,
struct list_head *bitmap_list)
{
@@ -1004,7 +1011,7 @@ update_cache_item(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
- EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL);
+ EXTENT_DELALLOC, 0, 0, NULL);
goto fail;
}
leaf = path->nodes[0];
@@ -1016,9 +1023,8 @@ update_cache_item(struct btrfs_trans_handle *trans,
if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
found_key.offset != offset) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
- inode->i_size - 1,
- EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
- NULL);
+ inode->i_size - 1, EXTENT_DELALLOC, 0,
+ 0, NULL);
btrfs_release_path(path);
goto fail;
}
@@ -1040,7 +1046,7 @@ fail:
}
static noinline_for_stack int write_pinned_extent_entries(
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_io_ctl *io_ctl,
int *entries)
{
@@ -1060,9 +1066,9 @@ static noinline_for_stack int write_pinned_extent_entries(
*/
unpin = block_group->fs_info->pinned_extents;
- start = block_group->key.objectid;
+ start = block_group->start;
- while (start < block_group->key.objectid + block_group->key.offset) {
+ while (start < block_group->start + block_group->length) {
ret = find_first_extent_bit(unpin, start,
&extent_start, &extent_end,
EXTENT_DIRTY, NULL);
@@ -1070,13 +1076,12 @@ static noinline_for_stack int write_pinned_extent_entries(
return 0;
/* This pinned extent is out of our range */
- if (extent_start >= block_group->key.objectid +
- block_group->key.offset)
+ if (extent_start >= block_group->start + block_group->length)
return 0;
extent_start = max(extent_start, start);
- extent_end = min(block_group->key.objectid +
- block_group->key.offset, extent_end + 1);
+ extent_end = min(block_group->start + block_group->length,
+ extent_end + 1);
len = extent_end - extent_start;
*entries += 1;
@@ -1114,7 +1119,7 @@ static int flush_dirty_cache(struct inode *inode)
ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
if (ret)
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
- EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL);
+ EXTENT_DELALLOC, 0, 0, NULL);
return ret;
}
@@ -1140,7 +1145,7 @@ cleanup_write_cache_enospc(struct inode *inode,
static int __btrfs_wait_cache_io(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_io_ctl *io_ctl,
struct btrfs_path *path, u64 offset)
{
@@ -1167,7 +1172,7 @@ out:
#ifdef DEBUG
btrfs_err(root->fs_info,
"failed to write free space cache for block group %llu",
- block_group->key.objectid);
+ block_group->start);
#endif
}
}
@@ -1209,12 +1214,12 @@ static int btrfs_wait_cache_io_root(struct btrfs_root *root,
}
int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
block_group, &block_group->io_ctl,
- path, block_group->key.objectid);
+ path, block_group->start);
}
/**
@@ -1230,7 +1235,7 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
*/
static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
struct btrfs_free_space_ctl *ctl,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_io_ctl *io_ctl,
struct btrfs_trans_handle *trans)
{
@@ -1368,7 +1373,7 @@ out_unlock:
}
int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -1393,7 +1398,7 @@ int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
#ifdef DEBUG
btrfs_err(fs_info,
"failed to write free space cache for block group %llu",
- block_group->key.objectid);
+ block_group->start);
#endif
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_ERROR;
@@ -1646,11 +1651,11 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl,
static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
{
- struct btrfs_block_group_cache *block_group = ctl->private;
+ struct btrfs_block_group *block_group = ctl->private;
u64 max_bytes;
u64 bitmap_bytes;
u64 extent_bytes;
- u64 size = block_group->key.offset;
+ u64 size = block_group->length;
u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
@@ -1881,7 +1886,7 @@ static void free_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *bitmap_info)
{
unlink_free_space(ctl, bitmap_info);
- kfree(bitmap_info->bitmap);
+ kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
ctl->total_bitmaps--;
ctl->op->recalc_thresholds(ctl);
@@ -1990,7 +1995,7 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
- struct btrfs_block_group_cache *block_group = ctl->private;
+ struct btrfs_block_group *block_group = ctl->private;
struct btrfs_fs_info *fs_info = block_group->fs_info;
bool forced = false;
@@ -2027,7 +2032,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
* so allow those block groups to still be allowed to have a bitmap
* entry.
*/
- if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
+ if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length)
return false;
return true;
@@ -2042,7 +2047,7 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
struct btrfs_free_space *bitmap_info;
- struct btrfs_block_group_cache *block_group = NULL;
+ struct btrfs_block_group *block_group = NULL;
int added = 0;
u64 bytes, offset, bytes_added;
int ret;
@@ -2135,7 +2140,8 @@ new_bitmap:
}
/* allocate the bitmap */
- info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
+ info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
+ GFP_NOFS);
spin_lock(&ctl->tree_lock);
if (!info->bitmap) {
ret = -ENOMEM;
@@ -2146,7 +2152,9 @@ new_bitmap:
out:
if (info) {
- kfree(info->bitmap);
+ if (info->bitmap)
+ kmem_cache_free(btrfs_free_space_bitmap_cachep,
+ info->bitmap);
kmem_cache_free(btrfs_free_space_cachep, info);
}
@@ -2376,7 +2384,15 @@ out:
return ret;
}
-int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
+int btrfs_add_free_space(struct btrfs_block_group *block_group,
+ u64 bytenr, u64 size)
+{
+ return __btrfs_add_free_space(block_group->fs_info,
+ block_group->free_space_ctl,
+ bytenr, size);
+}
+
+int btrfs_remove_free_space(struct btrfs_block_group *block_group,
u64 offset, u64 bytes)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -2466,7 +2482,7 @@ out:
return ret;
}
-void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
+void btrfs_dump_free_space(struct btrfs_block_group *block_group,
u64 bytes)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
@@ -2491,14 +2507,14 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
"%d blocks of free space at or bigger than bytes is", count);
}
-void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
+void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
spin_lock_init(&ctl->tree_lock);
ctl->unit = fs_info->sectorsize;
- ctl->start = block_group->key.objectid;
+ ctl->start = block_group->start;
ctl->private = block_group;
ctl->op = &free_space_op;
INIT_LIST_HEAD(&ctl->trimming_ranges);
@@ -2520,7 +2536,7 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
*/
static int
__btrfs_return_cluster_to_free_space(
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -2586,7 +2602,7 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
spin_unlock(&ctl->tree_lock);
}
-void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
+void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_cluster *cluster;
@@ -2608,7 +2624,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
}
-u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
+u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
u64 offset, u64 bytes, u64 empty_size,
u64 *max_extent_size)
{
@@ -2662,7 +2678,7 @@ out:
* cluster and remove the cluster from it.
*/
int btrfs_return_cluster_to_free_space(
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster)
{
struct btrfs_free_space_ctl *ctl;
@@ -2696,7 +2712,7 @@ int btrfs_return_cluster_to_free_space(
return ret;
}
-static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
+static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
struct btrfs_free_space *entry,
u64 bytes, u64 min_start,
@@ -2729,7 +2745,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
* if it couldn't find anything suitably large, or a logical disk offset
* if things worked out
*/
-u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
+u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster, u64 bytes,
u64 min_start, u64 *max_extent_size)
{
@@ -2802,7 +2818,8 @@ out:
if (entry->bytes == 0) {
ctl->free_extents--;
if (entry->bitmap) {
- kfree(entry->bitmap);
+ kmem_cache_free(btrfs_free_space_bitmap_cachep,
+ entry->bitmap);
ctl->total_bitmaps--;
ctl->op->recalc_thresholds(ctl);
}
@@ -2814,7 +2831,7 @@ out:
return ret;
}
-static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
+static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group,
struct btrfs_free_space *entry,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes,
@@ -2896,7 +2913,7 @@ again:
* extent of cont1_bytes, and other clusters of at least min_bytes.
*/
static noinline int
-setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
+setup_cluster_no_bitmap(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
struct list_head *bitmaps, u64 offset, u64 bytes,
u64 cont1_bytes, u64 min_bytes)
@@ -2987,7 +3004,7 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
* that we have already failed to find extents that will work.
*/
static noinline int
-setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
+setup_cluster_bitmap(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
struct list_head *bitmaps, u64 offset, u64 bytes,
u64 cont1_bytes, u64 min_bytes)
@@ -3037,7 +3054,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
* returns zero and sets up cluster if things worked out, otherwise
* it returns -enospc
*/
-int btrfs_find_space_cluster(struct btrfs_block_group_cache *block_group,
+int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size)
{
@@ -3128,7 +3145,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
cluster->block_group = NULL;
}
-static int do_trimming(struct btrfs_block_group_cache *block_group,
+static int do_trimming(struct btrfs_block_group *block_group,
u64 *total_trimmed, u64 start, u64 bytes,
u64 reserved_start, u64 reserved_bytes,
struct btrfs_trim_range *trim_entry)
@@ -3173,7 +3190,7 @@ static int do_trimming(struct btrfs_block_group_cache *block_group,
return ret;
}
-static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
+static int trim_no_bitmap(struct btrfs_block_group *block_group,
u64 *total_trimmed, u64 start, u64 end, u64 minlen)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -3258,7 +3275,7 @@ out:
return ret;
}
-static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
+static int trim_bitmaps(struct btrfs_block_group *block_group,
u64 *total_trimmed, u64 start, u64 end, u64 minlen)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -3339,12 +3356,12 @@ next:
return ret;
}
-void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
+void btrfs_get_block_group_trimming(struct btrfs_block_group *cache)
{
atomic_inc(&cache->trimming);
}
-void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
+void btrfs_put_block_group_trimming(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct extent_map_tree *em_tree;
@@ -3360,7 +3377,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
mutex_lock(&fs_info->chunk_mutex);
em_tree = &fs_info->mapping_tree;
write_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, block_group->key.objectid,
+ em = lookup_extent_mapping(em_tree, block_group->start,
1);
BUG_ON(!em); /* logic error, can't happen */
remove_extent_mapping(em_tree, em);
@@ -3379,7 +3396,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
}
}
-int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
+int btrfs_trim_block_group(struct btrfs_block_group *block_group,
u64 *trimmed, u64 start, u64 end, u64 minlen)
{
int ret;
@@ -3577,7 +3594,7 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
* how the free space cache loading stuff works, so you can get really weird
* configurations.
*/
-int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
+int test_add_free_space_entry(struct btrfs_block_group *cache,
u64 offset, u64 bytes, bool bitmap)
{
struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
@@ -3606,7 +3623,7 @@ again:
}
if (!map) {
- map = kzalloc(PAGE_SIZE, GFP_NOFS);
+ map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
if (!map) {
kmem_cache_free(btrfs_free_space_cachep, info);
return -ENOMEM;
@@ -3635,7 +3652,8 @@ again:
if (info)
kmem_cache_free(btrfs_free_space_cachep, info);
- kfree(map);
+ if (map)
+ kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
return 0;
}
@@ -3644,7 +3662,7 @@ again:
* just used to check the absence of space, so if there is free space in the
* range at all we will return 1.
*/
-int test_check_exists(struct btrfs_block_group_cache *cache,
+int test_check_exists(struct btrfs_block_group *cache,
u64 offset, u64 bytes)
{
struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 8760acb55ffd..ba9a23241101 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -36,26 +36,37 @@ struct btrfs_free_space_op {
struct btrfs_free_space *info);
};
-struct btrfs_io_ctl;
+struct btrfs_io_ctl {
+ void *cur, *orig;
+ struct page *page;
+ struct page **pages;
+ struct btrfs_fs_info *fs_info;
+ struct inode *inode;
+ unsigned long size;
+ int index;
+ int num_pages;
+ int entries;
+ int bitmaps;
+ unsigned check_crcs:1;
+};
-struct inode *lookup_free_space_inode(
- struct btrfs_block_group_cache *block_group,
+struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
struct btrfs_path *path);
int create_free_space_inode(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path);
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct inode *inode);
-int load_free_space_cache(struct btrfs_block_group_cache *block_group);
+int load_free_space_cache(struct btrfs_block_group *block_group);
int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path);
int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path);
struct inode *lookup_free_ino_inode(struct btrfs_root *root,
struct btrfs_path *path);
@@ -69,48 +80,40 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
struct btrfs_path *path,
struct inode *inode);
-void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
+void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group);
int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_free_space_ctl *ctl,
u64 bytenr, u64 size);
-static inline int
-btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
- u64 bytenr, u64 size)
-{
- return __btrfs_add_free_space(block_group->fs_info,
- block_group->free_space_ctl,
- bytenr, size);
-}
-int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
+int btrfs_add_free_space(struct btrfs_block_group *block_group,
+ u64 bytenr, u64 size);
+int btrfs_remove_free_space(struct btrfs_block_group *block_group,
u64 bytenr, u64 size);
void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
-void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
- *block_group);
-u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
+void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group);
+u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
u64 offset, u64 bytes, u64 empty_size,
u64 *max_extent_size);
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
-void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
+void btrfs_dump_free_space(struct btrfs_block_group *block_group,
u64 bytes);
-int btrfs_find_space_cluster(struct btrfs_block_group_cache *block_group,
+int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size);
void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster);
-u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
+u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster, u64 bytes,
u64 min_start, u64 *max_extent_size);
int btrfs_return_cluster_to_free_space(
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster);
-int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
+int btrfs_trim_block_group(struct btrfs_block_group *block_group,
u64 *trimmed, u64 start, u64 end, u64 minlen);
/* Support functions for running our sanity tests */
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
+int test_add_free_space_entry(struct btrfs_block_group *cache,
u64 offset, u64 bytes, bool bitmap);
-int test_check_exists(struct btrfs_block_group_cache *cache,
- u64 offset, u64 bytes);
+int test_check_exists(struct btrfs_block_group *cache, u64 offset, u64 bytes);
#endif
#endif
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index f5dc115ebba0..258cb3fae17a 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -10,12 +10,13 @@
#include "locking.h"
#include "free-space-tree.h"
#include "transaction.h"
+#include "block-group.h"
static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path);
-void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
+void set_free_space_tree_thresholds(struct btrfs_block_group *cache)
{
u32 bitmap_range;
size_t bitmap_size;
@@ -26,8 +27,7 @@ void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
* exceeds that required for using bitmaps.
*/
bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
- num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1,
- bitmap_range);
+ num_bitmaps = div_u64(cache->length + bitmap_range - 1, bitmap_range);
bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE;
total_bitmap_size = num_bitmaps * bitmap_size;
cache->bitmap_high_thresh = div_u64(total_bitmap_size,
@@ -44,7 +44,7 @@ void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
}
static int add_new_free_space_info(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
struct btrfs_root *root = trans->fs_info->free_space_root;
@@ -53,9 +53,9 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
int ret;
- key.objectid = block_group->key.objectid;
+ key.objectid = block_group->start;
key.type = BTRFS_FREE_SPACE_INFO_KEY;
- key.offset = block_group->key.offset;
+ key.offset = block_group->length;
ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info));
if (ret)
@@ -77,7 +77,7 @@ out:
EXPORT_FOR_TESTS
struct btrfs_free_space_info *search_free_space_info(
struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path, int cow)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
@@ -85,16 +85,16 @@ struct btrfs_free_space_info *search_free_space_info(
struct btrfs_key key;
int ret;
- key.objectid = block_group->key.objectid;
+ key.objectid = block_group->start;
key.type = BTRFS_FREE_SPACE_INFO_KEY;
- key.offset = block_group->key.offset;
+ key.offset = block_group->length;
ret = btrfs_search_slot(trans, root, &key, path, 0, cow);
if (ret < 0)
return ERR_PTR(ret);
if (ret != 0) {
btrfs_warn(fs_info, "missing free space info for %llu",
- block_group->key.objectid);
+ block_group->start);
ASSERT(0);
return ERR_PTR(-ENOENT);
}
@@ -179,7 +179,7 @@ static void le_bitmap_set(unsigned long *map, unsigned int start, int len)
EXPORT_FOR_TESTS
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -196,7 +196,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
int done = 0, nr;
int ret;
- bitmap_size = free_space_bitmap_size(block_group->key.offset,
+ bitmap_size = free_space_bitmap_size(block_group->length,
fs_info->sectorsize);
bitmap = alloc_bitmap(bitmap_size);
if (!bitmap) {
@@ -204,8 +204,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
goto out;
}
- start = block_group->key.objectid;
- end = block_group->key.objectid + block_group->key.offset;
+ start = block_group->start;
+ end = block_group->start + block_group->length;
key.objectid = end - 1;
key.type = (u8)-1;
@@ -223,8 +223,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
- ASSERT(found_key.objectid == block_group->key.objectid);
- ASSERT(found_key.offset == block_group->key.offset);
+ ASSERT(found_key.objectid == block_group->start);
+ ASSERT(found_key.offset == block_group->length);
done = 1;
break;
} else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) {
@@ -270,7 +270,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
if (extent_count != expected_extent_count) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
- block_group->key.objectid, extent_count,
+ block_group->start, extent_count,
expected_extent_count);
ASSERT(0);
ret = -EIO;
@@ -319,7 +319,7 @@ out:
EXPORT_FOR_TESTS
int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -335,7 +335,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
int done = 0, nr;
int ret;
- bitmap_size = free_space_bitmap_size(block_group->key.offset,
+ bitmap_size = free_space_bitmap_size(block_group->length,
fs_info->sectorsize);
bitmap = alloc_bitmap(bitmap_size);
if (!bitmap) {
@@ -343,8 +343,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
goto out;
}
- start = block_group->key.objectid;
- end = block_group->key.objectid + block_group->key.offset;
+ start = block_group->start;
+ end = block_group->start + block_group->length;
key.objectid = end - 1;
key.type = (u8)-1;
@@ -362,8 +362,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
- ASSERT(found_key.objectid == block_group->key.objectid);
- ASSERT(found_key.offset == block_group->key.offset);
+ ASSERT(found_key.objectid == block_group->start);
+ ASSERT(found_key.offset == block_group->length);
done = 1;
break;
} else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) {
@@ -412,7 +412,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
- nrbits = div_u64(block_group->key.offset, block_group->fs_info->sectorsize);
+ nrbits = div_u64(block_group->length, block_group->fs_info->sectorsize);
start_bit = find_next_bit_le(bitmap, nrbits, 0);
while (start_bit < nrbits) {
@@ -436,7 +436,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
if (extent_count != expected_extent_count) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
- block_group->key.objectid, extent_count,
+ block_group->start, extent_count,
expected_extent_count);
ASSERT(0);
ret = -EIO;
@@ -452,7 +452,7 @@ out:
}
static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path,
int new_extents)
{
@@ -490,7 +490,7 @@ out:
}
EXPORT_FOR_TESTS
-int free_space_test_bit(struct btrfs_block_group_cache *block_group,
+int free_space_test_bit(struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 offset)
{
struct extent_buffer *leaf;
@@ -512,7 +512,7 @@ int free_space_test_bit(struct btrfs_block_group_cache *block_group,
return !!extent_buffer_test_bit(leaf, ptr, i);
}
-static void free_space_set_bits(struct btrfs_block_group_cache *block_group,
+static void free_space_set_bits(struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 *start, u64 *size,
int bit)
{
@@ -580,7 +580,7 @@ static int free_space_next_bitmap(struct btrfs_trans_handle *trans,
* the bitmap.
*/
static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path,
u64 start, u64 size, int remove)
{
@@ -596,7 +596,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
* Read the bit for the block immediately before the extent of space if
* that block is within the block group.
*/
- if (start > block_group->key.objectid) {
+ if (start > block_group->start) {
u64 prev_block = start - block_group->fs_info->sectorsize;
key.objectid = prev_block;
@@ -648,7 +648,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
* Read the bit for the block immediately after the extent of space if
* that block is within the block group.
*/
- if (end < block_group->key.objectid + block_group->key.offset) {
+ if (end < block_group->start + block_group->length) {
/* The next block may be in the next bitmap. */
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
if (end >= key.objectid + key.offset) {
@@ -693,7 +693,7 @@ out:
}
static int remove_free_space_extent(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path,
u64 start, u64 size)
{
@@ -780,7 +780,7 @@ out:
EXPORT_FOR_TESTS
int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 start, u64 size)
{
struct btrfs_free_space_info *info;
@@ -811,7 +811,7 @@ int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
u64 start, u64 size)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_path *path;
int ret;
@@ -845,7 +845,7 @@ out:
}
static int add_free_space_extent(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path,
u64 start, u64 size)
{
@@ -879,7 +879,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans,
new_key.offset = size;
/* Search for a neighbor on the left. */
- if (start == block_group->key.objectid)
+ if (start == block_group->start)
goto right;
key.objectid = start - 1;
key.type = (u8)-1;
@@ -899,8 +899,8 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans,
found_start = key.objectid;
found_end = key.objectid + key.offset;
- ASSERT(found_start >= block_group->key.objectid &&
- found_end > block_group->key.objectid);
+ ASSERT(found_start >= block_group->start &&
+ found_end > block_group->start);
ASSERT(found_start < start && found_end <= start);
/*
@@ -919,7 +919,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans,
right:
/* Search for a neighbor on the right. */
- if (end == block_group->key.objectid + block_group->key.offset)
+ if (end == block_group->start + block_group->length)
goto insert;
key.objectid = end;
key.type = (u8)-1;
@@ -939,8 +939,8 @@ right:
found_start = key.objectid;
found_end = key.objectid + key.offset;
- ASSERT(found_start >= block_group->key.objectid &&
- found_end > block_group->key.objectid);
+ ASSERT(found_start >= block_group->start &&
+ found_end > block_group->start);
ASSERT((found_start < start && found_end <= start) ||
(found_start >= end && found_end > end));
@@ -973,7 +973,7 @@ out:
EXPORT_FOR_TESTS
int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 start, u64 size)
{
struct btrfs_free_space_info *info;
@@ -1004,7 +1004,7 @@ int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
int add_to_free_space_tree(struct btrfs_trans_handle *trans,
u64 start, u64 size)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_path *path;
int ret;
@@ -1042,7 +1042,7 @@ out:
* through the normal add/remove hooks.
*/
static int populate_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group)
+ struct btrfs_block_group *block_group)
{
struct btrfs_root *extent_root = trans->fs_info->extent_root;
struct btrfs_path *path, *path2;
@@ -1074,7 +1074,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
* BLOCK_GROUP_ITEM, so an extent may precede the block group that it's
* contained in.
*/
- key.objectid = block_group->key.objectid;
+ key.objectid = block_group->start;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = 0;
@@ -1083,8 +1083,8 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
goto out_locked;
ASSERT(ret == 0);
- start = block_group->key.objectid;
- end = block_group->key.objectid + block_group->key.offset;
+ start = block_group->start;
+ end = block_group->start + block_group->length;
while (1) {
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@@ -1108,7 +1108,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
else
start += key.offset;
} else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
- if (key.objectid != block_group->key.objectid)
+ if (key.objectid != block_group->start)
break;
}
@@ -1139,7 +1139,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
struct btrfs_trans_handle *trans;
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root *free_space_root;
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct rb_node *node;
int ret;
@@ -1158,7 +1158,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
node = rb_first(&fs_info->block_group_cache_tree);
while (node) {
- block_group = rb_entry(node, struct btrfs_block_group_cache,
+ block_group = rb_entry(node, struct btrfs_block_group,
cache_node);
ret = populate_free_space_tree(trans, block_group);
if (ret)
@@ -1264,7 +1264,7 @@ abort:
}
static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
int ret;
@@ -1276,12 +1276,12 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
return ret;
return __add_to_free_space_tree(trans, block_group, path,
- block_group->key.objectid,
- block_group->key.offset);
+ block_group->start,
+ block_group->length);
}
int add_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group)
+ struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_path *path = NULL;
@@ -1311,7 +1311,7 @@ out:
}
int remove_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group)
+ struct btrfs_block_group *block_group)
{
struct btrfs_root *root = trans->fs_info->free_space_root;
struct btrfs_path *path;
@@ -1335,8 +1335,8 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
goto out;
}
- start = block_group->key.objectid;
- end = block_group->key.objectid + block_group->key.offset;
+ start = block_group->start;
+ end = block_group->start + block_group->length;
key.objectid = end - 1;
key.type = (u8)-1;
@@ -1354,8 +1354,8 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
- ASSERT(found_key.objectid == block_group->key.objectid);
- ASSERT(found_key.offset == block_group->key.offset);
+ ASSERT(found_key.objectid == block_group->start);
+ ASSERT(found_key.offset == block_group->length);
done = 1;
nr++;
path->slots[0]--;
@@ -1390,7 +1390,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
struct btrfs_path *path,
u32 expected_extent_count)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_fs_info *fs_info;
struct btrfs_root *root;
struct btrfs_key key;
@@ -1406,7 +1406,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
fs_info = block_group->fs_info;
root = fs_info->free_space_root;
- end = block_group->key.objectid + block_group->key.offset;
+ end = block_group->start + block_group->length;
while (1) {
ret = btrfs_next_item(root, path);
@@ -1453,7 +1453,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
if (extent_count != expected_extent_count) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
- block_group->key.objectid, extent_count,
+ block_group->start, extent_count,
expected_extent_count);
ASSERT(0);
ret = -EIO;
@@ -1471,7 +1471,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
struct btrfs_path *path,
u32 expected_extent_count)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_fs_info *fs_info;
struct btrfs_root *root;
struct btrfs_key key;
@@ -1484,7 +1484,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
fs_info = block_group->fs_info;
root = fs_info->free_space_root;
- end = block_group->key.objectid + block_group->key.offset;
+ end = block_group->start + block_group->length;
while (1) {
ret = btrfs_next_item(root, path);
@@ -1515,7 +1515,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
if (extent_count != expected_extent_count) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
- block_group->key.objectid, extent_count,
+ block_group->start, extent_count,
expected_extent_count);
ASSERT(0);
ret = -EIO;
@@ -1531,7 +1531,7 @@ out:
int load_free_space_tree(struct btrfs_caching_control *caching_ctl)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_free_space_info *info;
struct btrfs_path *path;
u32 extent_count, flags;
diff --git a/fs/btrfs/free-space-tree.h b/fs/btrfs/free-space-tree.h
index 22b7602bde25..dc2463e4cfe3 100644
--- a/fs/btrfs/free-space-tree.h
+++ b/fs/btrfs/free-space-tree.h
@@ -6,6 +6,8 @@
#ifndef BTRFS_FREE_SPACE_TREE_H
#define BTRFS_FREE_SPACE_TREE_H
+struct btrfs_caching_control;
+
/*
* The default size for new free space bitmap items. The last bitmap in a block
* group may be truncated, and none of the free space tree code assumes that
@@ -14,14 +16,14 @@
#define BTRFS_FREE_SPACE_BITMAP_SIZE 256
#define BTRFS_FREE_SPACE_BITMAP_BITS (BTRFS_FREE_SPACE_BITMAP_SIZE * BITS_PER_BYTE)
-void set_free_space_tree_thresholds(struct btrfs_block_group_cache *block_group);
+void set_free_space_tree_thresholds(struct btrfs_block_group *block_group);
int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info);
int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info);
int load_free_space_tree(struct btrfs_caching_control *caching_ctl);
int add_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group);
+ struct btrfs_block_group *block_group);
int remove_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group);
+ struct btrfs_block_group *block_group);
int add_to_free_space_tree(struct btrfs_trans_handle *trans,
u64 start, u64 size);
int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
@@ -30,21 +32,21 @@ int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct btrfs_free_space_info *
search_free_space_info(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path, int cow);
int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 start, u64 size);
int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 start, u64 size);
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path);
int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path);
-int free_space_test_bit(struct btrfs_block_group_cache *block_group,
+int free_space_test_bit(struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 offset);
#endif
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 30d62ef918b9..668701832845 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -8,9 +8,9 @@
#include "transaction.h"
#include "print-tree.h"
-int btrfs_find_name_in_backref(struct extent_buffer *leaf, int slot,
- const char *name,
- int name_len, struct btrfs_inode_ref **ref_ret)
+struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
+ int slot, const char *name,
+ int name_len)
{
struct btrfs_inode_ref *ref;
unsigned long ptr;
@@ -28,19 +28,15 @@ int btrfs_find_name_in_backref(struct extent_buffer *leaf, int slot,
cur_offset += len + sizeof(*ref);
if (len != name_len)
continue;
- if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) {
- if (ref_ret)
- *ref_ret = ref;
- return 1;
- }
+ if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)
+ return ref;
}
- return 0;
+ return NULL;
}
-int btrfs_find_name_in_ext_backref(struct extent_buffer *leaf, int slot,
- u64 ref_objectid,
- const char *name, int name_len,
- struct btrfs_inode_extref **extref_ret)
+struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
+ struct extent_buffer *leaf, int slot, u64 ref_objectid,
+ const char *name, int name_len)
{
struct btrfs_inode_extref *extref;
unsigned long ptr;
@@ -65,15 +61,12 @@ int btrfs_find_name_in_ext_backref(struct extent_buffer *leaf, int slot,
if (ref_name_len == name_len &&
btrfs_inode_extref_parent(leaf, extref) == ref_objectid &&
- (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)) {
- if (extref_ret)
- *extref_ret = extref;
- return 1;
- }
+ (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0))
+ return extref;
cur_offset += ref_name_len + sizeof(*extref);
}
- return 0;
+ return NULL;
}
/* Returns NULL if no extref found */
@@ -87,7 +80,6 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
{
int ret;
struct btrfs_key key;
- struct btrfs_inode_extref *extref;
key.objectid = inode_objectid;
key.type = BTRFS_INODE_EXTREF_KEY;
@@ -98,11 +90,9 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
return ERR_PTR(ret);
if (ret > 0)
return NULL;
- if (!btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
- ref_objectid, name, name_len,
- &extref))
- return NULL;
- return extref;
+ return btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
+ ref_objectid, name, name_len);
+
}
static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
@@ -142,9 +132,9 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
* This should always succeed so error here will make the FS
* readonly.
*/
- if (!btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
- ref_objectid,
- name, name_len, &extref)) {
+ extref = btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
+ ref_objectid, name, name_len);
+ if (!extref) {
btrfs_handle_fs_error(root->fs_info, -ENOENT, NULL);
ret = -EROFS;
goto out;
@@ -213,8 +203,10 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
} else if (ret < 0) {
goto out;
}
- if (!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
- name, name_len, &ref)) {
+
+ ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name,
+ name_len);
+ if (!ref) {
ret = -ENOENT;
search_ext_refs = 1;
goto out;
@@ -285,7 +277,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
if (btrfs_find_name_in_ext_backref(path->nodes[0],
path->slots[0],
ref_objectid,
- name, name_len, NULL))
+ name, name_len))
goto out;
btrfs_extend_item(path, ins_len);
@@ -341,9 +333,9 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
ins_len);
if (ret == -EEXIST) {
u32 old_size;
-
- if (btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
- name, name_len, &ref))
+ ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
+ name, name_len);
+ if (ref)
goto out;
old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
@@ -359,7 +351,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
if (ret == -EOVERFLOW) {
if (btrfs_find_name_in_backref(path->nodes[0],
path->slots[0],
- name, name_len, &ref))
+ name, name_len))
ret = -EEXIST;
else
ret = -EMLINK;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 2e8bb402050b..37345fb6191d 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -13,6 +13,19 @@
#include "transaction.h"
#include "delalloc-space.h"
+static void fail_caching_thread(struct btrfs_root *root)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ btrfs_warn(fs_info, "failed to start inode caching task");
+ btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
+ "disabling inode map caching");
+ spin_lock(&root->ino_cache_lock);
+ root->ino_cache_state = BTRFS_CACHE_ERROR;
+ spin_unlock(&root->ino_cache_lock);
+ wake_up(&root->ino_cache_wait);
+}
+
static int caching_kthread(void *data)
{
struct btrfs_root *root = data;
@@ -29,8 +42,10 @@ static int caching_kthread(void *data)
return 0;
path = btrfs_alloc_path();
- if (!path)
+ if (!path) {
+ fail_caching_thread(root);
return -ENOMEM;
+ }
/* Since the commit root is read-only, we can safely skip locking. */
path->skip_locking = 1;
@@ -146,6 +161,7 @@ static void start_caching(struct btrfs_root *root)
spin_lock(&root->ino_cache_lock);
root->ino_cache_state = BTRFS_CACHE_FINISHED;
spin_unlock(&root->ino_cache_lock);
+ wake_up(&root->ino_cache_wait);
return;
}
@@ -160,15 +176,13 @@ static void start_caching(struct btrfs_root *root)
if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
__btrfs_add_free_space(fs_info, ctl, objectid,
BTRFS_LAST_FREE_OBJECTID - objectid + 1);
+ wake_up(&root->ino_cache_wait);
}
tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu",
root->root_key.objectid);
- if (IS_ERR(tsk)) {
- btrfs_warn(fs_info, "failed to start inode caching task");
- btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
- "disabling inode map caching");
- }
+ if (IS_ERR(tsk))
+ fail_caching_thread(root);
}
int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
@@ -186,11 +200,14 @@ again:
wait_event(root->ino_cache_wait,
root->ino_cache_state == BTRFS_CACHE_FINISHED ||
+ root->ino_cache_state == BTRFS_CACHE_ERROR ||
root->free_ino_ctl->free_space > 0);
if (root->ino_cache_state == BTRFS_CACHE_FINISHED &&
root->free_ino_ctl->free_space == 0)
return -ENOSPC;
+ else if (root->ino_cache_state == BTRFS_CACHE_ERROR)
+ return btrfs_find_free_objectid(root, objectid);
else
goto again;
}
@@ -419,7 +436,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
* 1 item for free space object
* 3 items for pre-allocation
*/
- trans->bytes_reserved = btrfs_calc_trans_metadata_size(fs_info, 10);
+ trans->bytes_reserved = btrfs_calc_insert_metadata_size(fs_info, 10);
ret = btrfs_block_rsv_add(root, trans->block_rsv,
trans->bytes_reserved,
BTRFS_RESERVE_NO_FLUSH);
@@ -484,12 +501,13 @@ again:
ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
prealloc, prealloc, &alloc_hint);
if (ret) {
- btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
+ btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc, true);
goto out_put;
}
ret = btrfs_write_out_ino_cache(root, trans, path, inode);
- btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, false);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
out_put:
iput(inode);
out_release:
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 1af069a9a0c7..56032c518b26 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -30,6 +30,7 @@
#include <linux/swap.h>
#include <linux/sched/mm.h>
#include <asm/unaligned.h>
+#include "misc.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -46,8 +47,8 @@
#include "backref.h"
#include "props.h"
#include "qgroup.h"
-#include "dedupe.h"
#include "delalloc-space.h"
+#include "block-group.h"
struct btrfs_iget_args {
struct btrfs_key *location;
@@ -74,15 +75,15 @@ static struct kmem_cache *btrfs_inode_cachep;
struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_path_cachep;
struct kmem_cache *btrfs_free_space_cachep;
+struct kmem_cache *btrfs_free_space_bitmap_cachep;
static int btrfs_setsize(struct inode *inode, struct iattr *attr);
static int btrfs_truncate(struct inode *inode, bool skip_writeback);
static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
- u64 start, u64 end, u64 delalloc_end,
- int *page_started, unsigned long *nr_written,
- int unlock, struct btrfs_dedupe_hash *hash);
+ u64 start, u64 end, int *page_started,
+ unsigned long *nr_written, int unlock);
static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
u64 orig_start, u64 block_start,
u64 block_len, u64 orig_block_len,
@@ -178,6 +179,9 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
size_t cur_size = size;
unsigned long offset;
+ ASSERT((compressed_size > 0 && compressed_pages) ||
+ (compressed_size == 0 && !compressed_pages));
+
if (compressed_size && compressed_pages)
cur_size = compressed_size;
@@ -364,6 +368,7 @@ struct async_chunk {
u64 end;
unsigned int write_flags;
struct list_head extents;
+ struct cgroup_subsys_state *blkcg_css;
struct btrfs_work work;
atomic_t *pending;
};
@@ -395,10 +400,31 @@ static noinline int add_async_extent(struct async_chunk *cow,
return 0;
}
+/*
+ * Check if the inode has flags compatible with compression
+ */
+static inline bool inode_can_compress(struct inode *inode)
+{
+ if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW ||
+ BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
+ return false;
+ return true;
+}
+
+/*
+ * Check if the inode needs to be submitted to compression, based on mount
+ * options, defragmentation, properties or heuristics.
+ */
static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ if (!inode_can_compress(inode)) {
+ WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
+ KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
+ btrfs_ino(BTRFS_I(inode)));
+ return 0;
+ }
/* force compress */
if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
return 1;
@@ -441,8 +467,7 @@ static inline void inode_should_defrag(struct btrfs_inode *inode,
* are written in the same order that the flusher thread sent them
* down.
*/
-static noinline void compress_file_range(struct async_chunk *async_chunk,
- int *num_added)
+static noinline int compress_file_range(struct async_chunk *async_chunk)
{
struct inode *inode = async_chunk->inode;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -450,6 +475,7 @@ static noinline void compress_file_range(struct async_chunk *async_chunk,
u64 start = async_chunk->start;
u64 end = async_chunk->end;
u64 actual_end;
+ u64 i_size;
int ret = 0;
struct page **pages = NULL;
unsigned long nr_pages;
@@ -458,12 +484,25 @@ static noinline void compress_file_range(struct async_chunk *async_chunk,
int i;
int will_compress;
int compress_type = fs_info->compress_type;
+ int compressed_extents = 0;
int redirty = 0;
inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
SZ_16K);
- actual_end = min_t(u64, i_size_read(inode), end + 1);
+ /*
+ * We need to save i_size before now because it could change in between
+ * us evaluating the size and assigning it. This is because we lock and
+ * unlock the page in truncate and fallocate, and then modify the i_size
+ * later on.
+ *
+ * The barriers are to emulate READ_ONCE, remove that once i_size_read
+ * does that for us.
+ */
+ barrier();
+ i_size = i_size_read(inode);
+ barrier();
+ actual_end = min_t(u64, i_size, end + 1);
again:
will_compress = 0;
nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
@@ -594,14 +633,21 @@ cont:
* our outstanding extent for clearing delalloc for this
* range.
*/
- extent_clear_unlock_delalloc(inode, start, end, end,
- NULL, clear_flags,
+ extent_clear_unlock_delalloc(inode, start, end, NULL,
+ clear_flags,
PAGE_UNLOCK |
PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK |
page_error_op |
PAGE_END_WRITEBACK);
- goto free_pages_out;
+
+ for (i = 0; i < nr_pages; i++) {
+ WARN_ON(pages[i]->mapping);
+ put_page(pages[i]);
+ }
+ kfree(pages);
+
+ return 0;
}
}
@@ -620,7 +666,7 @@ cont:
*/
total_in = ALIGN(total_in, PAGE_SIZE);
if (total_compressed + blocksize <= total_in) {
- *num_added += 1;
+ compressed_extents++;
/*
* The async work queues will take care of doing actual
@@ -637,7 +683,7 @@ cont:
cond_resched();
goto again;
}
- return;
+ return compressed_extents;
}
}
if (pages) {
@@ -667,25 +713,20 @@ cleanup_and_bail_uncompressed:
* to our extent and set things up for the async work queue to run
* cow_file_range to do the normal delalloc dance.
*/
- if (page_offset(async_chunk->locked_page) >= start &&
- page_offset(async_chunk->locked_page) <= end)
+ if (async_chunk->locked_page &&
+ (page_offset(async_chunk->locked_page) >= start &&
+ page_offset(async_chunk->locked_page)) <= end) {
__set_page_dirty_nobuffers(async_chunk->locked_page);
/* unlocked later on in the async handlers */
+ }
if (redirty)
extent_range_redirty_for_io(inode, start, end);
add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
BTRFS_COMPRESS_NONE);
- *num_added += 1;
-
- return;
+ compressed_extents++;
-free_pages_out:
- for (i = 0; i < nr_pages; i++) {
- WARN_ON(pages[i]->mapping);
- put_page(pages[i]);
- }
- kfree(pages);
+ return compressed_extents;
}
static void free_async_extent_pages(struct async_extent *async_extent)
@@ -741,10 +782,7 @@ retry:
async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
- async_extent->start +
- async_extent->ram_size - 1,
- &page_started, &nr_written, 0,
- NULL);
+ &page_started, &nr_written, 0);
/* JDM XXX */
@@ -760,7 +798,7 @@ retry:
async_extent->start +
async_extent->ram_size - 1,
WB_SYNC_ALL);
- else if (ret)
+ else if (ret && async_chunk->locked_page)
unlock_page(async_chunk->locked_page);
kfree(async_extent);
cond_resched();
@@ -834,8 +872,6 @@ retry:
extent_clear_unlock_delalloc(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
- async_extent->start +
- async_extent->ram_size - 1,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK);
@@ -845,7 +881,8 @@ retry:
ins.objectid,
ins.offset, async_extent->pages,
async_extent->nr_pages,
- async_chunk->write_flags)) {
+ async_chunk->write_flags,
+ async_chunk->blkcg_css)) {
struct page *p = async_extent->pages[0];
const u64 start = async_extent->start;
const u64 end = start + async_extent->ram_size - 1;
@@ -854,7 +891,7 @@ retry:
btrfs_writepage_endio_finish_ordered(p, start, end, 0);
p->mapping = NULL;
- extent_clear_unlock_delalloc(inode, start, end, end,
+ extent_clear_unlock_delalloc(inode, start, end,
NULL, 0,
PAGE_END_WRITEBACK |
PAGE_SET_ERROR);
@@ -872,8 +909,6 @@ out_free:
extent_clear_unlock_delalloc(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
- async_extent->start +
- async_extent->ram_size - 1,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW |
EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
@@ -932,9 +967,8 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
*/
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
- u64 start, u64 end, u64 delalloc_end,
- int *page_started, unsigned long *nr_written,
- int unlock, struct btrfs_dedupe_hash *hash)
+ u64 start, u64 end, int *page_started,
+ unsigned long *nr_written, int unlock)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -973,8 +1007,7 @@ static noinline int cow_file_range(struct inode *inode,
* our outstanding extent for clearing delalloc for this
* range.
*/
- extent_clear_unlock_delalloc(inode, start, end,
- delalloc_end, NULL,
+ extent_clear_unlock_delalloc(inode, start, end, NULL,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
@@ -1057,7 +1090,7 @@ static noinline int cow_file_range(struct inode *inode,
extent_clear_unlock_delalloc(inode, start,
start + ram_size - 1,
- delalloc_end, locked_page,
+ locked_page,
EXTENT_LOCKED | EXTENT_DELALLOC,
page_ops);
if (num_bytes < cur_alloc_size)
@@ -1102,7 +1135,6 @@ out_unlock:
if (extent_reserved) {
extent_clear_unlock_delalloc(inode, start,
start + cur_alloc_size,
- start + cur_alloc_size,
locked_page,
clear_bits,
page_ops);
@@ -1110,8 +1142,7 @@ out_unlock:
if (start >= end)
goto out;
}
- extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
- locked_page,
+ extent_clear_unlock_delalloc(inode, start, end, locked_page,
clear_bits | EXTENT_CLEAR_DATA_RESV,
page_ops);
goto out;
@@ -1123,12 +1154,12 @@ out_unlock:
static noinline void async_cow_start(struct btrfs_work *work)
{
struct async_chunk *async_chunk;
- int num_added = 0;
+ int compressed_extents;
async_chunk = container_of(work, struct async_chunk, work);
- compress_file_range(async_chunk, &num_added);
- if (num_added == 0) {
+ compressed_extents = compress_file_range(async_chunk);
+ if (compressed_extents == 0) {
btrfs_add_delayed_iput(async_chunk->inode);
async_chunk->inode = NULL;
}
@@ -1169,6 +1200,8 @@ static noinline void async_cow_free(struct btrfs_work *work)
async_chunk = container_of(work, struct async_chunk, work);
if (async_chunk->inode)
btrfs_add_delayed_iput(async_chunk->inode);
+ if (async_chunk->blkcg_css)
+ css_put(async_chunk->blkcg_css);
/*
* Since the pointer to 'pending' is at the beginning of the array of
* async_chunk's, freeing it ensures the whole array has been freed.
@@ -1177,12 +1210,14 @@ static noinline void async_cow_free(struct btrfs_work *work)
kvfree(async_chunk->pending);
}
-static int cow_file_range_async(struct inode *inode, struct page *locked_page,
+static int cow_file_range_async(struct inode *inode,
+ struct writeback_control *wbc,
+ struct page *locked_page,
u64 start, u64 end, int *page_started,
- unsigned long *nr_written,
- unsigned int write_flags)
+ unsigned long *nr_written)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
struct async_cow *ctx;
struct async_chunk *async_chunk;
unsigned long nr_pages;
@@ -1191,6 +1226,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
int i;
bool should_compress;
unsigned nofs_flag;
+ const unsigned int write_flags = wbc_to_write_flags(wbc);
unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
@@ -1214,7 +1250,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
PAGE_SET_ERROR;
- extent_clear_unlock_delalloc(inode, start, end, 0, locked_page,
+ extent_clear_unlock_delalloc(inode, start, end, locked_page,
clear_bits, page_ops);
return -ENOMEM;
}
@@ -1237,14 +1273,45 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
async_chunk[i].inode = inode;
async_chunk[i].start = start;
async_chunk[i].end = cur_end;
- async_chunk[i].locked_page = locked_page;
async_chunk[i].write_flags = write_flags;
INIT_LIST_HEAD(&async_chunk[i].extents);
- btrfs_init_work(&async_chunk[i].work,
- btrfs_delalloc_helper,
- async_cow_start, async_cow_submit,
- async_cow_free);
+ /*
+ * The locked_page comes all the way from writepage and its
+ * the original page we were actually given. As we spread
+ * this large delalloc region across multiple async_chunk
+ * structs, only the first struct needs a pointer to locked_page
+ *
+ * This way we don't need racey decisions about who is supposed
+ * to unlock it.
+ */
+ if (locked_page) {
+ /*
+ * Depending on the compressibility, the pages might or
+ * might not go through async. We want all of them to
+ * be accounted against wbc once. Let's do it here
+ * before the paths diverge. wbc accounting is used
+ * only for foreign writeback detection and doesn't
+ * need full accuracy. Just account the whole thing
+ * against the first page.
+ */
+ wbc_account_cgroup_owner(wbc, locked_page,
+ cur_end - start);
+ async_chunk[i].locked_page = locked_page;
+ locked_page = NULL;
+ } else {
+ async_chunk[i].locked_page = NULL;
+ }
+
+ if (blkcg_css != blkcg_root_css) {
+ css_get(blkcg_css);
+ async_chunk[i].blkcg_css = blkcg_css;
+ } else {
+ async_chunk[i].blkcg_css = NULL;
+ }
+
+ btrfs_init_work(&async_chunk[i].work, async_cow_start,
+ async_cow_submit, async_cow_free);
nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
atomic_add(nr_pages, &fs_info->async_delalloc_pages);
@@ -1289,36 +1356,25 @@ static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
*/
static noinline int run_delalloc_nocow(struct inode *inode,
struct page *locked_page,
- u64 start, u64 end, int *page_started, int force,
- unsigned long *nr_written)
+ const u64 start, const u64 end,
+ int *page_started, int force,
+ unsigned long *nr_written)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_buffer *leaf;
struct btrfs_path *path;
- struct btrfs_file_extent_item *fi;
- struct btrfs_key found_key;
- struct extent_map *em;
- u64 cow_start;
- u64 cur_offset;
- u64 extent_end;
- u64 extent_offset;
- u64 disk_bytenr;
- u64 num_bytes;
- u64 disk_num_bytes;
- u64 ram_bytes;
- int extent_type;
+ u64 cow_start = (u64)-1;
+ u64 cur_offset = start;
int ret;
- int type;
- int nocow;
- int check_prev = 1;
- bool nolock;
+ bool check_prev = true;
+ const bool freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode));
u64 ino = btrfs_ino(BTRFS_I(inode));
+ bool nocow = false;
+ u64 disk_bytenr = 0;
path = btrfs_alloc_path();
if (!path) {
- extent_clear_unlock_delalloc(inode, start, end, end,
- locked_page,
+ extent_clear_unlock_delalloc(inode, start, end, locked_page,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, PAGE_UNLOCK |
@@ -1328,15 +1384,29 @@ static noinline int run_delalloc_nocow(struct inode *inode,
return -ENOMEM;
}
- nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
-
- cow_start = (u64)-1;
- cur_offset = start;
while (1) {
+ struct btrfs_key found_key;
+ struct btrfs_file_extent_item *fi;
+ struct extent_buffer *leaf;
+ u64 extent_end;
+ u64 extent_offset;
+ u64 num_bytes = 0;
+ u64 disk_num_bytes;
+ u64 ram_bytes;
+ int extent_type;
+
+ nocow = false;
+
ret = btrfs_lookup_file_extent(NULL, root, path, ino,
cur_offset, 0);
if (ret < 0)
goto error;
+
+ /*
+ * If there is no extent for our range when doing the initial
+ * search, then go back to the previous slot as it will be the
+ * one containing the search offset
+ */
if (ret > 0 && path->slots[0] > 0 && check_prev) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key,
@@ -1345,8 +1415,9 @@ static noinline int run_delalloc_nocow(struct inode *inode,
found_key.type == BTRFS_EXTENT_DATA_KEY)
path->slots[0]--;
}
- check_prev = 0;
+ check_prev = false;
next_slot:
+ /* Go to next leaf if we have exhausted the current one */
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
@@ -1360,28 +1431,40 @@ next_slot:
leaf = path->nodes[0];
}
- nocow = 0;
- disk_bytenr = 0;
- num_bytes = 0;
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+ /* Didn't find anything for our INO */
if (found_key.objectid > ino)
break;
+ /*
+ * Keep searching until we find an EXTENT_ITEM or there are no
+ * more extents for this inode
+ */
if (WARN_ON_ONCE(found_key.objectid < ino) ||
found_key.type < BTRFS_EXTENT_DATA_KEY) {
path->slots[0]++;
goto next_slot;
}
+
+ /* Found key is not EXTENT_DATA_KEY or starts after req range */
if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
found_key.offset > end)
break;
+ /*
+ * If the found extent starts after requested offset, then
+ * adjust extent_end to be right before this extent begins
+ */
if (found_key.offset > cur_offset) {
extent_end = found_key.offset;
extent_type = 0;
goto out_check;
}
+ /*
+ * Found extent which begins before our range and potentially
+ * intersect it
+ */
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
@@ -1395,26 +1478,36 @@ next_slot:
btrfs_file_extent_num_bytes(leaf, fi);
disk_num_bytes =
btrfs_file_extent_disk_num_bytes(leaf, fi);
+ /*
+ * If extent we got ends before our range starts, skip
+ * to next extent
+ */
if (extent_end <= start) {
path->slots[0]++;
goto next_slot;
}
+ /* Skip holes */
if (disk_bytenr == 0)
goto out_check;
+ /* Skip compressed/encrypted/encoded extents */
if (btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
goto out_check;
/*
- * Do the same check as in btrfs_cross_ref_exist but
- * without the unnecessary search.
+ * If extent is created before the last volume's snapshot
+ * this implies the extent is shared, hence we can't do
+ * nocow. This is the same check as in
+ * btrfs_cross_ref_exist but without calling
+ * btrfs_search_slot.
*/
- if (!nolock &&
+ if (!freespace_inode &&
btrfs_file_extent_generation(leaf, fi) <=
btrfs_root_last_snapshot(&root->root_item))
goto out_check;
if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
goto out_check;
+ /* If extent is RO, we must COW it */
if (btrfs_extent_readonly(fs_info, disk_bytenr))
goto out_check;
ret = btrfs_cross_ref_exist(root, ino,
@@ -1431,17 +1524,17 @@ next_slot:
goto error;
}
- WARN_ON_ONCE(nolock);
+ WARN_ON_ONCE(freespace_inode);
goto out_check;
}
disk_bytenr += extent_offset;
disk_bytenr += cur_offset - found_key.offset;
num_bytes = min(end + 1, extent_end) - cur_offset;
/*
- * if there are pending snapshots for this root,
- * we fall into common COW way.
+ * If there are pending snapshots for this root, we
+ * fall into common COW way
*/
- if (!nolock && atomic_read(&root->snapshot_force_cow))
+ if (!freespace_inode && atomic_read(&root->snapshot_force_cow))
goto out_check;
/*
* force cow if csum exists in the range.
@@ -1460,27 +1553,29 @@ next_slot:
cur_offset = cow_start;
goto error;
}
- WARN_ON_ONCE(nolock);
+ WARN_ON_ONCE(freespace_inode);
goto out_check;
}
if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
goto out_check;
- nocow = 1;
+ nocow = true;
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- extent_end = found_key.offset +
- btrfs_file_extent_ram_bytes(leaf, fi);
- extent_end = ALIGN(extent_end,
- fs_info->sectorsize);
+ extent_end = found_key.offset + ram_bytes;
+ extent_end = ALIGN(extent_end, fs_info->sectorsize);
+ /* Skip extents outside of our requested range */
+ if (extent_end <= start) {
+ path->slots[0]++;
+ goto next_slot;
+ }
} else {
+ /* If this triggers then we have a memory corruption */
BUG();
}
out_check:
- if (extent_end <= start) {
- path->slots[0]++;
- if (nocow)
- btrfs_dec_nocow_writers(fs_info, disk_bytenr);
- goto next_slot;
- }
+ /*
+ * If nocow is false then record the beginning of the range
+ * that needs to be COWed
+ */
if (!nocow) {
if (cow_start == (u64)-1)
cow_start = cur_offset;
@@ -1492,11 +1587,16 @@ out_check:
}
btrfs_release_path(path);
+
+ /*
+ * COW range from cow_start to found_key.offset - 1. As the key
+ * will contain the beginning of the first extent that can be
+ * NOCOW, following one which needs to be COW'ed
+ */
if (cow_start != (u64)-1) {
ret = cow_file_range(inode, locked_page,
cow_start, found_key.offset - 1,
- end, page_started, nr_written, 1,
- NULL);
+ page_started, nr_written, 1);
if (ret) {
if (nocow)
btrfs_dec_nocow_writers(fs_info,
@@ -1508,6 +1608,7 @@ out_check:
if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
u64 orig_start = found_key.offset - extent_offset;
+ struct extent_map *em;
em = create_io_em(inode, cur_offset, num_bytes,
orig_start,
@@ -1524,19 +1625,29 @@ out_check:
goto error;
}
free_extent_map(em);
- }
-
- if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
- type = BTRFS_ORDERED_PREALLOC;
+ ret = btrfs_add_ordered_extent(inode, cur_offset,
+ disk_bytenr, num_bytes,
+ num_bytes,
+ BTRFS_ORDERED_PREALLOC);
+ if (ret) {
+ btrfs_drop_extent_cache(BTRFS_I(inode),
+ cur_offset,
+ cur_offset + num_bytes - 1,
+ 0);
+ goto error;
+ }
} else {
- type = BTRFS_ORDERED_NOCOW;
+ ret = btrfs_add_ordered_extent(inode, cur_offset,
+ disk_bytenr, num_bytes,
+ num_bytes,
+ BTRFS_ORDERED_NOCOW);
+ if (ret)
+ goto error;
}
- ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
- num_bytes, num_bytes, type);
if (nocow)
btrfs_dec_nocow_writers(fs_info, disk_bytenr);
- BUG_ON(ret); /* -ENOMEM */
+ nocow = false;
if (root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID)
@@ -1549,7 +1660,7 @@ out_check:
num_bytes);
extent_clear_unlock_delalloc(inode, cur_offset,
- cur_offset + num_bytes - 1, end,
+ cur_offset + num_bytes - 1,
locked_page, EXTENT_LOCKED |
EXTENT_DELALLOC |
EXTENT_CLEAR_DATA_RESV,
@@ -1574,15 +1685,18 @@ out_check:
if (cow_start != (u64)-1) {
cur_offset = end;
- ret = cow_file_range(inode, locked_page, cow_start, end, end,
- page_started, nr_written, 1, NULL);
+ ret = cow_file_range(inode, locked_page, cow_start, end,
+ page_started, nr_written, 1);
if (ret)
goto error;
}
error:
+ if (nocow)
+ btrfs_dec_nocow_writers(fs_info, disk_bytenr);
+
if (ret && cur_offset < end)
- extent_clear_unlock_delalloc(inode, cur_offset, end, end,
+ extent_clear_unlock_delalloc(inode, cur_offset, end,
locked_page, EXTENT_LOCKED |
EXTENT_DELALLOC | EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
@@ -1623,7 +1737,6 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
{
int ret;
int force_cow = need_force_cow(inode, start, end);
- unsigned int write_flags = wbc_to_write_flags(wbc);
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
ret = run_delalloc_nocow(inode, locked_page, start, end,
@@ -1631,15 +1744,15 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
- } else if (!inode_need_compress(inode, start, end)) {
- ret = cow_file_range(inode, locked_page, start, end, end,
- page_started, nr_written, 1, NULL);
+ } else if (!inode_can_compress(inode) ||
+ !inode_need_compress(inode, start, end)) {
+ ret = cow_file_range(inode, locked_page, start, end,
+ page_started, nr_written, 1);
} else {
set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
&BTRFS_I(inode)->runtime_flags);
- ret = cow_file_range_async(inode, locked_page, start, end,
- page_started, nr_written,
- write_flags);
+ ret = cow_file_range_async(inode, wbc, locked_page, start, end,
+ page_started, nr_written);
}
if (ret)
btrfs_cleanup_ordered_extents(inode, locked_page, start,
@@ -2035,7 +2148,7 @@ static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
}
mapit:
- ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num);
out:
if (ret) {
@@ -2068,7 +2181,7 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
unsigned int extra_bits,
- struct extent_state **cached_state, int dedupe)
+ struct extent_state **cached_state)
{
WARN_ON(PAGE_ALIGNED(end));
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
@@ -2134,17 +2247,21 @@ again:
}
ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
- &cached_state, 0);
+ &cached_state);
if (ret) {
mapping_set_error(page->mapping, ret);
end_extent_writepage(page, ret, page_start, page_end);
ClearPageChecked(page);
- goto out;
+ goto out_reserved;
}
ClearPageChecked(page);
set_page_dirty(page);
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, false);
+out_reserved:
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
+ if (ret)
+ btrfs_delalloc_release_space(inode, data_reserved, page_start,
+ PAGE_SIZE, true);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
&cached_state);
@@ -2185,8 +2302,7 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
SetPageChecked(page);
get_page(page);
- btrfs_init_work(&fixup->work, btrfs_fixup_helper,
- btrfs_writepage_fixup_worker, NULL, NULL);
+ btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
fixup->page = page;
btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
return -EBUSY;
@@ -2600,7 +2716,7 @@ static noinline int relink_extent_backref(struct btrfs_path *path,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &key, root);
if (IS_ERR(inode)) {
srcu_read_unlock(&fs_info->subvol_srcu, index);
return 0;
@@ -2924,7 +3040,7 @@ out_kfree:
static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
u64 start, u64 len)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
cache = btrfs_lookup_block_group(fs_info, start);
ASSERT(cache);
@@ -2952,7 +3068,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
int compress_type = 0;
int ret = 0;
u64 logical_len = ordered_extent->len;
- bool nolock;
+ bool freespace_inode;
bool truncated = false;
bool range_locked = false;
bool clear_new_delalloc_bytes = false;
@@ -2963,7 +3079,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
!test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
clear_new_delalloc_bytes = true;
- nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
+ freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode));
if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
ret = -EIO;
@@ -2994,8 +3110,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
ordered_extent->len);
btrfs_ordered_update_i_size(inode, 0, ordered_extent);
- if (nolock)
- trans = btrfs_join_transaction_nolock(root);
+ if (freespace_inode)
+ trans = btrfs_join_transaction_spacecache(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
@@ -3029,8 +3145,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
EXTENT_DEFRAG, 0, 0, &cached_state);
}
- if (nolock)
- trans = btrfs_join_transaction_nolock(root);
+ if (freespace_inode)
+ trans = btrfs_join_transaction_spacecache(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
@@ -3179,7 +3295,6 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_extent *ordered_extent = NULL;
struct btrfs_workqueue *wq;
- btrfs_work_func_t func;
trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
@@ -3188,16 +3303,12 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
end - start + 1, uptodate))
return;
- if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
+ if (btrfs_is_free_space_inode(BTRFS_I(inode)))
wq = fs_info->endio_freespace_worker;
- func = btrfs_freespace_write_helper;
- } else {
+ else
wq = fs_info->endio_write_workers;
- func = btrfs_endio_write_helper;
- }
- btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
- NULL);
+ btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL);
btrfs_queue_work(wq, &ordered_extent->work);
}
@@ -3456,7 +3567,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
found_key.objectid = found_key.offset;
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &found_key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &found_key, root);
ret = PTR_ERR_OR_ZERO(inode);
if (ret && ret != -ENOENT)
goto out;
@@ -3828,7 +3939,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
{
struct btrfs_map_token token;
- btrfs_init_map_token(&token);
+ btrfs_init_map_token(&token, leaf);
btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
@@ -4889,7 +5000,7 @@ again:
if (!page) {
btrfs_delalloc_release_space(inode, data_reserved,
block_start, blocksize, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
ret = -ENOMEM;
goto out;
}
@@ -4924,12 +5035,11 @@ again:
}
clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, &cached_state);
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+ 0, 0, &cached_state);
ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
- &cached_state, 0);
+ &cached_state);
if (ret) {
unlock_extent_cached(io_tree, block_start, block_end,
&cached_state);
@@ -4957,7 +5067,7 @@ out_unlock:
if (ret)
btrfs_delalloc_release_space(inode, data_reserved, block_start,
blocksize, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, (ret != 0));
+ btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
unlock_page(page);
put_page(page);
out:
@@ -5079,7 +5189,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
hole_em->block_len = 0;
hole_em->orig_block_len = 0;
hole_em->ram_bytes = hole_size;
- hole_em->bdev = fs_info->fs_devices->latest_bdev;
hole_em->compress_type = BTRFS_COMPRESS_NONE;
hole_em->generation = fs_info->generation;
@@ -5310,9 +5419,9 @@ static void evict_inode_truncate_pages(struct inode *inode)
btrfs_qgroup_free_data(inode, NULL, start, end - start + 1);
clear_extent_bit(io_tree, start, end,
- EXTENT_LOCKED | EXTENT_DIRTY |
- EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, 1, 1, &cached_state);
+ EXTENT_LOCKED | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
+ &cached_state);
cond_resched();
spin_lock(&io_tree->lock);
@@ -5325,59 +5434,50 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
- u64 delayed_refs_extra = btrfs_calc_trans_metadata_size(fs_info, 1);
- int failures = 0;
-
- for (;;) {
- struct btrfs_trans_handle *trans;
- int ret;
-
- ret = btrfs_block_rsv_refill(root, rsv,
- rsv->size + delayed_refs_extra,
- BTRFS_RESERVE_FLUSH_LIMIT);
-
- if (ret && ++failures > 2) {
- btrfs_warn(fs_info,
- "could not allocate space for a delete; will truncate on mount");
- return ERR_PTR(-ENOSPC);
- }
-
- /*
- * Evict can generate a large amount of delayed refs without
- * having a way to add space back since we exhaust our temporary
- * block rsv. We aren't allowed to do FLUSH_ALL in this case
- * because we could deadlock with so many things in the flushing
- * code, so we have to try and hold some extra space to
- * compensate for our delayed ref generation. If we can't get
- * that space then we need see if we can steal our minimum from
- * the global reserve. We will be ratelimited by the amount of
- * space we have for the delayed refs rsv, so we'll end up
- * committing and trying again.
- */
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans) || !ret) {
- if (!IS_ERR(trans)) {
- trans->block_rsv = &fs_info->trans_block_rsv;
- trans->bytes_reserved = delayed_refs_extra;
- btrfs_block_rsv_migrate(rsv, trans->block_rsv,
- delayed_refs_extra, 1);
- }
- return trans;
- }
+ struct btrfs_trans_handle *trans;
+ u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1);
+ int ret;
+ /*
+ * Eviction should be taking place at some place safe because of our
+ * delayed iputs. However the normal flushing code will run delayed
+ * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
+ *
+ * We reserve the delayed_refs_extra here again because we can't use
+ * btrfs_start_transaction(root, 0) for the same deadlocky reason as
+ * above. We reserve our extra bit here because we generate a ton of
+ * delayed refs activity by truncating.
+ *
+ * If we cannot make our reservation we'll attempt to steal from the
+ * global reserve, because we really want to be able to free up space.
+ */
+ ret = btrfs_block_rsv_refill(root, rsv, rsv->size + delayed_refs_extra,
+ BTRFS_RESERVE_FLUSH_EVICT);
+ if (ret) {
/*
* Try to steal from the global reserve if there is space for
* it.
*/
- if (!btrfs_check_space_for_delayed_refs(fs_info) &&
- !btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0))
- return trans;
+ if (btrfs_check_space_for_delayed_refs(fs_info) ||
+ btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0)) {
+ btrfs_warn(fs_info,
+ "could not allocate space for delete; will truncate on mount");
+ return ERR_PTR(-ENOSPC);
+ }
+ delayed_refs_extra = 0;
+ }
- /* If not, commit and try again. */
- ret = btrfs_commit_transaction(trans);
- if (ret)
- return ERR_PTR(ret);
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans))
+ return trans;
+
+ if (delayed_refs_extra) {
+ trans->block_rsv = &fs_info->trans_block_rsv;
+ trans->bytes_reserved = delayed_refs_extra;
+ btrfs_block_rsv_migrate(rsv, trans->block_rsv,
+ delayed_refs_extra, 1);
}
+ return trans;
}
void btrfs_evict_inode(struct inode *inode)
@@ -5424,7 +5524,7 @@ void btrfs_evict_inode(struct inode *inode)
rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
if (!rsv)
goto no_delete;
- rsv->size = btrfs_calc_trunc_metadata_size(fs_info, 1);
+ rsv->size = btrfs_calc_metadata_size(fs_info, 1);
rsv->failfast = 1;
btrfs_i_size_write(BTRFS_I(inode), 0);
@@ -5685,12 +5785,14 @@ static struct inode *btrfs_iget_locked(struct super_block *s,
return inode;
}
-/* Get an inode object given its location and corresponding root.
- * Returns in *is_new if the inode was read from disk
+/*
+ * Get an inode object given its location and corresponding root.
+ * Path can be preallocated to prevent recursing back to iget through
+ * allocator. NULL is also valid but may require an additional allocation
+ * later.
*/
struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
- struct btrfs_root *root, int *new,
- struct btrfs_path *path)
+ struct btrfs_root *root, struct btrfs_path *path)
{
struct inode *inode;
@@ -5705,8 +5807,6 @@ struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
if (!ret) {
inode_tree_add(inode);
unlock_new_inode(inode);
- if (new)
- *new = 1;
} else {
iget_failed(inode);
/*
@@ -5724,9 +5824,9 @@ struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
}
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
- struct btrfs_root *root, int *new)
+ struct btrfs_root *root)
{
- return btrfs_iget_path(s, location, root, new, NULL);
+ return btrfs_iget_path(s, location, root, NULL);
}
static struct inode *new_simple_dir(struct super_block *s,
@@ -5792,7 +5892,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
return ERR_PTR(ret);
if (location.type == BTRFS_INODE_ITEM_KEY) {
- inode = btrfs_iget(dir->i_sb, &location, root, NULL);
+ inode = btrfs_iget(dir->i_sb, &location, root);
if (IS_ERR(inode))
return inode;
@@ -5817,7 +5917,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
else
inode = new_simple_dir(dir->i_sb, &location, sub_root);
} else {
- inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
+ inode = btrfs_iget(dir->i_sb, &location, sub_root);
}
srcu_read_unlock(&fs_info->subvol_srcu, index);
@@ -6253,13 +6353,16 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
u32 sizes[2];
int nitems = name ? 2 : 1;
unsigned long ptr;
+ unsigned int nofs_flag;
int ret;
path = btrfs_alloc_path();
if (!path)
return ERR_PTR(-ENOMEM);
+ nofs_flag = memalloc_nofs_save();
inode = new_inode(fs_info->sb);
+ memalloc_nofs_restore(nofs_flag);
if (!inode) {
btrfs_free_path(path);
return ERR_PTR(-ENOMEM);
@@ -6863,8 +6966,6 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
- if (em)
- em->bdev = fs_info->fs_devices->latest_bdev;
read_unlock(&em_tree->lock);
if (em) {
@@ -6880,7 +6981,6 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
err = -ENOMEM;
goto out;
}
- em->bdev = fs_info->fs_devices->latest_bdev;
em->start = EXTENT_MAP_HOLE;
em->orig_start = EXTENT_MAP_HOLE;
em->len = (u64)-1;
@@ -7139,7 +7239,6 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
err = -ENOMEM;
goto out;
}
- em->bdev = NULL;
ASSERT(hole_em);
/*
@@ -7499,7 +7598,6 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
{
struct extent_map_tree *em_tree;
struct extent_map *em;
- struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
ASSERT(type == BTRFS_ORDERED_PREALLOC ||
@@ -7517,7 +7615,6 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
em->len = len;
em->block_len = block_len;
em->block_start = block_start;
- em->bdev = root->fs_info->fs_devices->latest_bdev;
em->orig_block_len = orig_block_len;
em->ram_bytes = ram_bytes;
em->generation = -1;
@@ -7556,6 +7653,8 @@ static int btrfs_get_blocks_direct_read(struct extent_map *em,
struct inode *inode,
u64 start, u64 len)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+
if (em->block_start == EXTENT_MAP_HOLE ||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
return -ENOENT;
@@ -7565,7 +7664,7 @@ static int btrfs_get_blocks_direct_read(struct extent_map *em,
bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
inode->i_blkbits;
bh_result->b_size = len;
- bh_result->b_bdev = em->bdev;
+ bh_result->b_bdev = fs_info->fs_devices->latest_bdev;
set_buffer_mapped(bh_result);
return 0;
@@ -7648,7 +7747,7 @@ skip_cow:
bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
inode->i_blkbits;
bh_result->b_size = len;
- bh_result->b_bdev = em->bdev;
+ bh_result->b_bdev = fs_info->fs_devices->latest_bdev;
set_buffer_mapped(bh_result);
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
@@ -7679,12 +7778,9 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
u64 start = iblock << inode->i_blkbits;
u64 lockstart, lockend;
u64 len = bh_result->b_size;
- int unlock_bits = EXTENT_LOCKED;
int ret = 0;
- if (create)
- unlock_bits |= EXTENT_DIRTY;
- else
+ if (!create)
len = min_t(u64, len, fs_info->sectorsize);
lockstart = start;
@@ -7743,9 +7839,8 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if (ret < 0)
goto unlock_err;
- /* clear and unlock the entire range */
- clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- unlock_bits, 1, 0, &cached_state);
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, &cached_state);
} else {
ret = btrfs_get_blocks_direct_read(em, bh_result, inode,
start, len);
@@ -7761,9 +7856,8 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
*/
lockstart = start + bh_result->b_size;
if (lockstart < lockend) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, unlock_bits, 1, 0,
- &cached_state);
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+ lockstart, lockend, &cached_state);
} else {
free_extent_state(cached_state);
}
@@ -7774,8 +7868,8 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
return 0;
unlock_err:
- clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- unlock_bits, 1, 0, &cached_state);
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ &cached_state);
err:
if (dio_data)
current->journal_info = dio_data;
@@ -7795,7 +7889,7 @@ static inline blk_status_t submit_dio_repair_bio(struct inode *inode,
if (ret)
return ret;
- ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num);
return ret;
}
@@ -8148,18 +8242,14 @@ static void __endio_write_update_ordered(struct inode *inode,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_extent *ordered = NULL;
struct btrfs_workqueue *wq;
- btrfs_work_func_t func;
u64 ordered_offset = offset;
u64 ordered_bytes = bytes;
u64 last_offset;
- if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
+ if (btrfs_is_free_space_inode(BTRFS_I(inode)))
wq = fs_info->endio_freespace_worker;
- func = btrfs_freespace_write_helper;
- } else {
+ else
wq = fs_info->endio_write_workers;
- func = btrfs_endio_write_helper;
- }
while (ordered_offset < offset + bytes) {
last_offset = ordered_offset;
@@ -8167,9 +8257,8 @@ static void __endio_write_update_ordered(struct inode *inode,
&ordered_offset,
ordered_bytes,
uptodate)) {
- btrfs_init_work(&ordered->work, func,
- finish_ordered_fn,
- NULL, NULL);
+ btrfs_init_work(&ordered->work, finish_ordered_fn, NULL,
+ NULL);
btrfs_queue_work(wq, &ordered->work);
}
/*
@@ -8326,7 +8415,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
goto err;
}
map:
- ret = btrfs_map_bio(fs_info, bio, 0, 0);
+ ret = btrfs_map_bio(fs_info, bio, 0);
err:
return ret;
}
@@ -8659,7 +8748,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
} else if (ret >= 0 && (size_t)ret < count)
btrfs_delalloc_release_space(inode, data_reserved,
offset, count - (size_t)ret, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), count, false);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), count);
}
out:
if (wakeup)
@@ -8790,8 +8879,7 @@ again:
*/
if (!inode_evicting)
clear_extent_bit(tree, start, end,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DELALLOC_NEW |
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, 1, 0, &cached_state);
/*
@@ -8846,8 +8934,7 @@ again:
if (PageDirty(page))
btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
if (!inode_evicting) {
- clear_extent_bit(tree, page_start, page_end,
- EXTENT_LOCKED | EXTENT_DIRTY |
+ clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED |
EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
&cached_state);
@@ -8975,12 +9062,11 @@ again:
* reserve data&meta space before lock_page() (see above comments).
*/
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, &cached_state);
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
+ EXTENT_DEFRAG, 0, 0, &cached_state);
ret2 = btrfs_set_extent_delalloc(inode, page_start, end, 0,
- &cached_state, 0);
+ &cached_state);
if (ret2) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state);
@@ -9012,7 +9098,7 @@ again:
unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
if (!ret2) {
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
sb_end_pagefault(inode->i_sb);
extent_changeset_free(data_reserved);
return VM_FAULT_LOCKED;
@@ -9021,7 +9107,7 @@ again:
out_unlock:
unlock_page(page);
out:
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0));
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
btrfs_delalloc_release_space(inode, data_reserved, page_start,
reserved_space, (ret != 0));
out_noreserve:
@@ -9038,7 +9124,7 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
int ret;
struct btrfs_trans_handle *trans;
u64 mask = fs_info->sectorsize - 1;
- u64 min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
+ u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
if (!skip_writeback) {
ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
@@ -9261,7 +9347,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->io_failure_tree.track_uptodate = true;
atomic_set(&ei->sync_writers, 0);
mutex_init(&ei->log_mutex);
- mutex_init(&ei->delalloc_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
INIT_LIST_HEAD(&ei->delalloc_inodes);
INIT_LIST_HEAD(&ei->delayed_iput);
@@ -9358,6 +9443,7 @@ void __cold btrfs_destroy_cachep(void)
kmem_cache_destroy(btrfs_trans_handle_cachep);
kmem_cache_destroy(btrfs_path_cachep);
kmem_cache_destroy(btrfs_free_space_cachep);
+ kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
}
int __init btrfs_init_cachep(void)
@@ -9387,6 +9473,12 @@ int __init btrfs_init_cachep(void)
if (!btrfs_free_space_cachep)
goto fail;
+ btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
+ PAGE_SIZE, PAGE_SIZE,
+ SLAB_RED_ZONE, NULL);
+ if (!btrfs_free_space_bitmap_cachep)
+ goto fail;
+
return 0;
fail:
btrfs_destroy_cachep();
@@ -9483,6 +9575,9 @@ static int btrfs_rename_exchange(struct inode *old_dir,
goto out_notrans;
}
+ if (dest != root)
+ btrfs_record_root_in_trans(trans, dest);
+
/*
* We need to find a free sequence number both in the source and
* in the destination directory for the exchange.
@@ -9677,6 +9772,18 @@ out_fail:
commit_transaction = true;
}
if (commit_transaction) {
+ /*
+ * We may have set commit_transaction when logging the new name
+ * in the destination root, in which case we left the source
+ * root context in the list of log contextes. So make sure we
+ * remove it to avoid invalid memory accesses, since the context
+ * was allocated in our stack frame.
+ */
+ if (sync_log_root) {
+ mutex_lock(&root->log_mutex);
+ list_del_init(&ctx_root.list);
+ mutex_unlock(&root->log_mutex);
+ }
ret = btrfs_commit_transaction(trans);
} else {
int ret2;
@@ -9690,6 +9797,9 @@ out_notrans:
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&fs_info->subvol_sem);
+ ASSERT(list_empty(&ctx_root.list));
+ ASSERT(list_empty(&ctx_dest.list));
+
return ret;
}
@@ -10034,8 +10144,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
init_completion(&work->completion);
INIT_LIST_HEAD(&work->list);
work->inode = inode;
- btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
- btrfs_run_delalloc_work, NULL, NULL);
+ btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
return work;
}
@@ -10368,7 +10477,6 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
em->block_len = ins.offset;
em->orig_block_len = ins.offset;
em->ram_bytes = ins.offset;
- em->bdev = fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
em->generation = trans->transid;
@@ -10724,7 +10832,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
start = 0;
while (start < isize) {
u64 logical_block_start, physical_block_start;
- struct btrfs_block_group_cache *bg;
+ struct btrfs_block_group *bg;
u64 len = isize - start;
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 818f7ec8bb0e..a1ee0b775e65 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -45,6 +45,7 @@
#include "compression.h"
#include "space-info.h"
#include "delalloc-space.h"
+#include "block-group.h"
#ifdef CONFIG_64BIT
/* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
@@ -478,10 +479,9 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
return put_user(inode->i_generation, arg);
}
-static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
+static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_device *device;
struct request_queue *q;
struct fstrim_range range;
@@ -540,7 +540,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
return 0;
}
-int btrfs_is_empty_uuid(u8 *uuid)
+int __pure btrfs_is_empty_uuid(u8 *uuid)
{
int i;
@@ -1332,9 +1332,8 @@ again:
lock_extent_bits(&BTRFS_I(inode)->io_tree,
page_start, page_end - 1, &cached_state);
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
- page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
- &cached_state);
+ page_end - 1, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
+ EXTENT_DEFRAG, 0, 0, &cached_state);
if (i_done != page_cnt) {
spin_lock(&BTRFS_I(inode)->lock);
@@ -1360,8 +1359,7 @@ again:
unlock_page(pages[i]);
put_page(pages[i]);
}
- btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
- false);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
extent_changeset_free(data_reserved);
return i_done;
out:
@@ -1372,8 +1370,7 @@ out:
btrfs_delalloc_release_space(inode, data_reserved,
start_index << PAGE_SHIFT,
page_cnt << PAGE_SHIFT, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
- true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
extent_changeset_free(data_reserved);
return ret;
@@ -1411,7 +1408,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
return -EINVAL;
if (do_compress) {
- if (range->compress_type > BTRFS_COMPRESS_TYPES)
+ if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
return -EINVAL;
if (range->compress_type)
compress_type = range->compress_type;
@@ -1840,8 +1837,15 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
goto free_args;
}
- if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
+ if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC) {
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+
+ btrfs_warn(fs_info,
+"SNAP_CREATE_V2 ioctl with CREATE_ASYNC is deprecated and will be removed in kernel 5.7");
+
ptr = &transid;
+ }
if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
readonly = true;
if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
@@ -2457,7 +2461,7 @@ static int btrfs_search_path_in_tree_user(struct inode *inode,
goto out;
}
- temp_inode = btrfs_iget(sb, &key2, root, NULL);
+ temp_inode = btrfs_iget(sb, &key2, root);
if (IS_ERR(temp_inode)) {
ret = PTR_ERR(temp_inode);
goto out;
@@ -3324,61 +3328,6 @@ out:
return ret;
}
-static void clone_update_extent_map(struct btrfs_inode *inode,
- const struct btrfs_trans_handle *trans,
- const struct btrfs_path *path,
- const u64 hole_offset,
- const u64 hole_len)
-{
- struct extent_map_tree *em_tree = &inode->extent_tree;
- struct extent_map *em;
- int ret;
-
- em = alloc_extent_map();
- if (!em) {
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
- return;
- }
-
- if (path) {
- struct btrfs_file_extent_item *fi;
-
- fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_file_extent_item);
- btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
- em->generation = -1;
- if (btrfs_file_extent_type(path->nodes[0], fi) ==
- BTRFS_FILE_EXTENT_INLINE)
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &inode->runtime_flags);
- } else {
- em->start = hole_offset;
- em->len = hole_len;
- em->ram_bytes = em->len;
- em->orig_start = hole_offset;
- em->block_start = EXTENT_MAP_HOLE;
- em->block_len = 0;
- em->orig_block_len = 0;
- em->compress_type = BTRFS_COMPRESS_NONE;
- em->generation = trans->transid;
- }
-
- while (1) {
- write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em, 1);
- write_unlock(&em_tree->lock);
- if (ret != -EEXIST) {
- free_extent_map(em);
- break;
- }
- btrfs_drop_extent_cache(inode, em->start,
- em->start + em->len - 1, 0);
- }
-
- if (ret)
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
-}
-
/*
* Make sure we do not end up inserting an inline extent into a file that has
* already other (non-inline) extents. If a file has an inline extent it can
@@ -3519,6 +3468,7 @@ copy_inline_extent:
path->slots[0]),
size);
inode_add_bytes(dst, datal);
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags);
return 0;
}
@@ -3570,6 +3520,14 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
while (1) {
u64 next_key_min_offset = key.offset + 1;
+ struct btrfs_file_extent_item *extent;
+ int type;
+ u32 size;
+ struct btrfs_key new_key;
+ u64 disko = 0, diskl = 0;
+ u64 datao = 0, datal = 0;
+ u8 comp;
+ u64 drop_start;
/*
* note the key will change type as we walk through the
@@ -3610,75 +3568,115 @@ process_slot:
key.objectid != btrfs_ino(BTRFS_I(src)))
break;
- if (key.type == BTRFS_EXTENT_DATA_KEY) {
- struct btrfs_file_extent_item *extent;
- int type;
- u32 size;
- struct btrfs_key new_key;
- u64 disko = 0, diskl = 0;
- u64 datao = 0, datal = 0;
- u8 comp;
- u64 drop_start;
-
- extent = btrfs_item_ptr(leaf, slot,
- struct btrfs_file_extent_item);
- comp = btrfs_file_extent_compression(leaf, extent);
- type = btrfs_file_extent_type(leaf, extent);
- if (type == BTRFS_FILE_EXTENT_REG ||
- type == BTRFS_FILE_EXTENT_PREALLOC) {
- disko = btrfs_file_extent_disk_bytenr(leaf,
- extent);
- diskl = btrfs_file_extent_disk_num_bytes(leaf,
- extent);
- datao = btrfs_file_extent_offset(leaf, extent);
- datal = btrfs_file_extent_num_bytes(leaf,
- extent);
- } else if (type == BTRFS_FILE_EXTENT_INLINE) {
- /* take upper bound, may be compressed */
- datal = btrfs_file_extent_ram_bytes(leaf,
- extent);
- }
+ ASSERT(key.type == BTRFS_EXTENT_DATA_KEY);
+
+ extent = btrfs_item_ptr(leaf, slot,
+ struct btrfs_file_extent_item);
+ comp = btrfs_file_extent_compression(leaf, extent);
+ type = btrfs_file_extent_type(leaf, extent);
+ if (type == BTRFS_FILE_EXTENT_REG ||
+ type == BTRFS_FILE_EXTENT_PREALLOC) {
+ disko = btrfs_file_extent_disk_bytenr(leaf, extent);
+ diskl = btrfs_file_extent_disk_num_bytes(leaf, extent);
+ datao = btrfs_file_extent_offset(leaf, extent);
+ datal = btrfs_file_extent_num_bytes(leaf, extent);
+ } else if (type == BTRFS_FILE_EXTENT_INLINE) {
+ /* Take upper bound, may be compressed */
+ datal = btrfs_file_extent_ram_bytes(leaf, extent);
+ }
+
+ /*
+ * The first search might have left us at an extent item that
+ * ends before our target range's start, can happen if we have
+ * holes and NO_HOLES feature enabled.
+ */
+ if (key.offset + datal <= off) {
+ path->slots[0]++;
+ goto process_slot;
+ } else if (key.offset >= off + len) {
+ break;
+ }
+ next_key_min_offset = key.offset + datal;
+ size = btrfs_item_size_nr(leaf, slot);
+ read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot),
+ size);
+
+ btrfs_release_path(path);
+ path->leave_spinning = 0;
+
+ memcpy(&new_key, &key, sizeof(new_key));
+ new_key.objectid = btrfs_ino(BTRFS_I(inode));
+ if (off <= key.offset)
+ new_key.offset = key.offset + destoff - off;
+ else
+ new_key.offset = destoff;
+
+ /*
+ * Deal with a hole that doesn't have an extent item that
+ * represents it (NO_HOLES feature enabled).
+ * This hole is either in the middle of the cloning range or at
+ * the beginning (fully overlaps it or partially overlaps it).
+ */
+ if (new_key.offset != last_dest_end)
+ drop_start = last_dest_end;
+ else
+ drop_start = new_key.offset;
+
+ if (type == BTRFS_FILE_EXTENT_REG ||
+ type == BTRFS_FILE_EXTENT_PREALLOC) {
+ struct btrfs_clone_extent_info clone_info;
/*
- * The first search might have left us at an extent
- * item that ends before our target range's start, can
- * happen if we have holes and NO_HOLES feature enabled.
+ * a | --- range to clone ---| b
+ * | ------------- extent ------------- |
*/
- if (key.offset + datal <= off) {
- path->slots[0]++;
- goto process_slot;
- } else if (key.offset >= off + len) {
- break;
+
+ /* Subtract range b */
+ if (key.offset + datal > off + len)
+ datal = off + len - key.offset;
+
+ /* Subtract range a */
+ if (off > key.offset) {
+ datao += off - key.offset;
+ datal -= off - key.offset;
}
- next_key_min_offset = key.offset + datal;
- size = btrfs_item_size_nr(leaf, slot);
- read_extent_buffer(leaf, buf,
- btrfs_item_ptr_offset(leaf, slot),
- size);
- btrfs_release_path(path);
- path->leave_spinning = 0;
+ clone_info.disk_offset = disko;
+ clone_info.disk_len = diskl;
+ clone_info.data_offset = datao;
+ clone_info.data_len = datal;
+ clone_info.file_offset = new_key.offset;
+ clone_info.extent_buf = buf;
+ clone_info.item_size = size;
+ ret = btrfs_punch_hole_range(inode, path,
+ drop_start,
+ new_key.offset + datal - 1,
+ &clone_info, &trans);
+ if (ret)
+ goto out;
+ } else if (type == BTRFS_FILE_EXTENT_INLINE) {
+ u64 skip = 0;
+ u64 trim = 0;
- memcpy(&new_key, &key, sizeof(new_key));
- new_key.objectid = btrfs_ino(BTRFS_I(inode));
- if (off <= key.offset)
- new_key.offset = key.offset + destoff - off;
- else
- new_key.offset = destoff;
+ if (off > key.offset) {
+ skip = off - key.offset;
+ new_key.offset += skip;
+ }
- /*
- * Deal with a hole that doesn't have an extent item
- * that represents it (NO_HOLES feature enabled).
- * This hole is either in the middle of the cloning
- * range or at the beginning (fully overlaps it or
- * partially overlaps it).
- */
- if (new_key.offset != last_dest_end)
- drop_start = last_dest_end;
- else
- drop_start = new_key.offset;
+ if (key.offset + datal > off + len)
+ trim = key.offset + datal - (off + len);
+
+ if (comp && (skip || trim)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ size -= skip + trim;
+ datal -= skip + trim;
/*
+ * If our extent is inline, we know we will drop or
+ * adjust at most 1 extent item in the destination root.
+ *
* 1 - adjusting old extent (we may have to split it)
* 1 - add new extent
* 1 - inode update
@@ -3689,140 +3687,28 @@ process_slot:
goto out;
}
- if (type == BTRFS_FILE_EXTENT_REG ||
- type == BTRFS_FILE_EXTENT_PREALLOC) {
- /*
- * a | --- range to clone ---| b
- * | ------------- extent ------------- |
- */
-
- /* subtract range b */
- if (key.offset + datal > off + len)
- datal = off + len - key.offset;
-
- /* subtract range a */
- if (off > key.offset) {
- datao += off - key.offset;
- datal -= off - key.offset;
- }
-
- ret = btrfs_drop_extents(trans, root, inode,
- drop_start,
- new_key.offset + datal,
- 1);
- if (ret) {
- if (ret != -EOPNOTSUPP)
- btrfs_abort_transaction(trans,
- ret);
- btrfs_end_transaction(trans);
- goto out;
- }
-
- ret = btrfs_insert_empty_item(trans, root, path,
- &new_key, size);
- if (ret) {
+ ret = clone_copy_inline_extent(inode, trans, path,
+ &new_key, drop_start,
+ datal, skip, size, buf);
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans);
- goto out;
- }
-
- leaf = path->nodes[0];
- slot = path->slots[0];
- write_extent_buffer(leaf, buf,
- btrfs_item_ptr_offset(leaf, slot),
- size);
-
- extent = btrfs_item_ptr(leaf, slot,
- struct btrfs_file_extent_item);
-
- /* disko == 0 means it's a hole */
- if (!disko)
- datao = 0;
-
- btrfs_set_file_extent_offset(leaf, extent,
- datao);
- btrfs_set_file_extent_num_bytes(leaf, extent,
- datal);
-
- if (disko) {
- struct btrfs_ref ref = { 0 };
- inode_add_bytes(inode, datal);
- btrfs_init_generic_ref(&ref,
- BTRFS_ADD_DELAYED_REF, disko,
- diskl, 0);
- btrfs_init_data_ref(&ref,
- root->root_key.objectid,
- btrfs_ino(BTRFS_I(inode)),
- new_key.offset - datao);
- ret = btrfs_inc_extent_ref(trans, &ref);
- if (ret) {
- btrfs_abort_transaction(trans,
- ret);
- btrfs_end_transaction(trans);
- goto out;
-
- }
- }
- } else if (type == BTRFS_FILE_EXTENT_INLINE) {
- u64 skip = 0;
- u64 trim = 0;
-
- if (off > key.offset) {
- skip = off - key.offset;
- new_key.offset += skip;
- }
-
- if (key.offset + datal > off + len)
- trim = key.offset + datal - (off + len);
-
- if (comp && (skip || trim)) {
- ret = -EINVAL;
- btrfs_end_transaction(trans);
- goto out;
- }
- size -= skip + trim;
- datal -= skip + trim;
-
- ret = clone_copy_inline_extent(inode,
- trans, path,
- &new_key,
- drop_start,
- datal,
- skip, size, buf);
- if (ret) {
- if (ret != -EOPNOTSUPP)
- btrfs_abort_transaction(trans,
- ret);
- btrfs_end_transaction(trans);
- goto out;
- }
- leaf = path->nodes[0];
- slot = path->slots[0];
+ btrfs_end_transaction(trans);
+ goto out;
}
+ }
- /* If we have an implicit hole (NO_HOLES feature). */
- if (drop_start < new_key.offset)
- clone_update_extent_map(BTRFS_I(inode), trans,
- NULL, drop_start,
- new_key.offset - drop_start);
-
- clone_update_extent_map(BTRFS_I(inode), trans,
- path, 0, 0);
+ btrfs_release_path(path);
- btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(path);
+ last_dest_end = ALIGN(new_key.offset + datal,
+ fs_info->sectorsize);
+ ret = clone_finish_inode_update(trans, inode, last_dest_end,
+ destoff, olen, no_time_update);
+ if (ret)
+ goto out;
+ if (new_key.offset + datal >= destoff + len)
+ break;
- last_dest_end = ALIGN(new_key.offset + datal,
- fs_info->sectorsize);
- ret = clone_finish_inode_update(trans, inode,
- last_dest_end,
- destoff, olen,
- no_time_update);
- if (ret)
- goto out;
- if (new_key.offset + datal >= destoff + len)
- break;
- }
btrfs_release_path(path);
key.offset = next_key_min_offset;
@@ -3834,32 +3720,27 @@ process_slot:
ret = 0;
if (last_dest_end < destoff + len) {
+ struct btrfs_clone_extent_info clone_info = { 0 };
/*
* We have an implicit hole (NO_HOLES feature is enabled) that
* fully or partially overlaps our cloning range at its end.
*/
btrfs_release_path(path);
+ path->leave_spinning = 0;
/*
- * 1 - remove extent(s)
- * 1 - inode update
+ * We are dealing with a hole and our clone_info already has a
+ * disk_offset of 0, we only need to fill the data length and
+ * file offset.
*/
- trans = btrfs_start_transaction(root, 2);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- goto out;
- }
- ret = btrfs_drop_extents(trans, root, inode,
- last_dest_end, destoff + len, 1);
- if (ret) {
- if (ret != -EOPNOTSUPP)
- btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans);
+ clone_info.data_len = destoff + len - last_dest_end;
+ clone_info.file_offset = last_dest_end;
+ ret = btrfs_punch_hole_range(inode, path,
+ last_dest_end, destoff + len - 1,
+ &clone_info, &trans);
+ if (ret)
goto out;
- }
- clone_update_extent_map(BTRFS_I(inode), trans, NULL,
- last_dest_end,
- destoff + len - last_dest_end);
+
ret = clone_finish_inode_update(trans, inode, destoff + len,
destoff, olen, no_time_update);
}
@@ -4150,16 +4031,15 @@ out:
static void get_block_group_info(struct list_head *groups_list,
struct btrfs_ioctl_space_info *space)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
space->total_bytes = 0;
space->used_bytes = 0;
space->flags = 0;
list_for_each_entry(block_group, groups_list, list) {
space->flags = block_group->flags;
- space->total_bytes += block_group->key.offset;
- space->used_bytes +=
- btrfs_block_group_used(&block_group->item);
+ space->total_bytes += block_group->length;
+ space->used_bytes += block_group->used;
}
}
@@ -5070,10 +4950,9 @@ drop_write:
return ret;
}
-static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
+static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_quota_rescan_args *qsa;
int ret = 0;
@@ -5096,11 +4975,9 @@ static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
return ret;
}
-static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
+static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -5272,10 +5149,9 @@ out:
return ret;
}
-static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
+static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
size_t len;
int ret;
char label[BTRFS_LABEL_SIZE];
@@ -5359,10 +5235,9 @@ int btrfs_ioctl_get_supported_features(void __user *arg)
return 0;
}
-static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
+static int btrfs_ioctl_get_features(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_super_block *super_block = fs_info->super_copy;
struct btrfs_ioctl_feature_flags features;
@@ -5381,7 +5256,7 @@ static int check_feature_bits(struct btrfs_fs_info *fs_info,
u64 change_mask, u64 flags, u64 supported_flags,
u64 safe_set, u64 safe_clear)
{
- const char *type = btrfs_feature_set_names[set];
+ const char *type = btrfs_feature_set_name(set);
char *names;
u64 disallowed, unsupported;
u64 set_mask = flags & change_mask;
@@ -5562,8 +5437,12 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_setflags(file, argp);
case FS_IOC_GETVERSION:
return btrfs_ioctl_getversion(file, argp);
+ case FS_IOC_GETFSLABEL:
+ return btrfs_ioctl_get_fslabel(fs_info, argp);
+ case FS_IOC_SETFSLABEL:
+ return btrfs_ioctl_set_fslabel(file, argp);
case FITRIM:
- return btrfs_ioctl_fitrim(file, argp);
+ return btrfs_ioctl_fitrim(fs_info, argp);
case BTRFS_IOC_SNAP_CREATE:
return btrfs_ioctl_snap_create(file, argp, 0);
case BTRFS_IOC_SNAP_CREATE_V2:
@@ -5668,19 +5547,15 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_QUOTA_RESCAN:
return btrfs_ioctl_quota_rescan(file, argp);
case BTRFS_IOC_QUOTA_RESCAN_STATUS:
- return btrfs_ioctl_quota_rescan_status(file, argp);
+ return btrfs_ioctl_quota_rescan_status(fs_info, argp);
case BTRFS_IOC_QUOTA_RESCAN_WAIT:
- return btrfs_ioctl_quota_rescan_wait(file, argp);
+ return btrfs_ioctl_quota_rescan_wait(fs_info, argp);
case BTRFS_IOC_DEV_REPLACE:
return btrfs_ioctl_dev_replace(fs_info, argp);
- case BTRFS_IOC_GET_FSLABEL:
- return btrfs_ioctl_get_fslabel(file, argp);
- case BTRFS_IOC_SET_FSLABEL:
- return btrfs_ioctl_set_fslabel(file, argp);
case BTRFS_IOC_GET_SUPPORTED_FEATURES:
return btrfs_ioctl_get_supported_features(argp);
case BTRFS_IOC_GET_FEATURES:
- return btrfs_ioctl_get_features(file, argp);
+ return btrfs_ioctl_get_features(fs_info, argp);
case BTRFS_IOC_SET_FEATURES:
return btrfs_ioctl_set_features(file, argp);
case FS_IOC_FSGETXATTR:
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 98fccce4208c..571c4826c428 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -8,69 +8,169 @@
#include <linux/spinlock.h>
#include <linux/page-flags.h>
#include <asm/bug.h>
+#include "misc.h"
#include "ctree.h"
#include "extent_io.h"
#include "locking.h"
+/*
+ * Extent buffer locking
+ * =====================
+ *
+ * The locks use a custom scheme that allows to do more operations than are
+ * available fromt current locking primitives. The building blocks are still
+ * rwlock and wait queues.
+ *
+ * Required semantics:
+ *
+ * - reader/writer exclusion
+ * - writer/writer exclusion
+ * - reader/reader sharing
+ * - spinning lock semantics
+ * - blocking lock semantics
+ * - try-lock semantics for readers and writers
+ * - one level nesting, allowing read lock to be taken by the same thread that
+ * already has write lock
+ *
+ * The extent buffer locks (also called tree locks) manage access to eb data
+ * related to the storage in the b-tree (keys, items, but not the individual
+ * members of eb).
+ * We want concurrency of many readers and safe updates. The underlying locking
+ * is done by read-write spinlock and the blocking part is implemented using
+ * counters and wait queues.
+ *
+ * spinning semantics - the low-level rwlock is held so all other threads that
+ * want to take it are spinning on it.
+ *
+ * blocking semantics - the low-level rwlock is not held but the counter
+ * denotes how many times the blocking lock was held;
+ * sleeping is possible
+ *
+ * Write lock always allows only one thread to access the data.
+ *
+ *
+ * Debugging
+ * ---------
+ *
+ * There are additional state counters that are asserted in various contexts,
+ * removed from non-debug build to reduce extent_buffer size and for
+ * performance reasons.
+ *
+ *
+ * Lock nesting
+ * ------------
+ *
+ * A write operation on a tree might indirectly start a look up on the same
+ * tree. This can happen when btrfs_cow_block locks the tree and needs to
+ * lookup free extents.
+ *
+ * btrfs_cow_block
+ * ..
+ * alloc_tree_block_no_bg_flush
+ * btrfs_alloc_tree_block
+ * btrfs_reserve_extent
+ * ..
+ * load_free_space_cache
+ * ..
+ * btrfs_lookup_file_extent
+ * btrfs_search_slot
+ *
+ *
+ * Locking pattern - spinning
+ * --------------------------
+ *
+ * The simple locking scenario, the +--+ denotes the spinning section.
+ *
+ * +- btrfs_tree_lock
+ * | - extent_buffer::rwlock is held
+ * | - no heavy operations should happen, eg. IO, memory allocations, large
+ * | structure traversals
+ * +- btrfs_tree_unock
+*
+*
+ * Locking pattern - blocking
+ * --------------------------
+ *
+ * The blocking write uses the following scheme. The +--+ denotes the spinning
+ * section.
+ *
+ * +- btrfs_tree_lock
+ * |
+ * +- btrfs_set_lock_blocking_write
+ *
+ * - allowed: IO, memory allocations, etc.
+ *
+ * -- btrfs_tree_unlock - note, no explicit unblocking necessary
+ *
+ *
+ * Blocking read is similar.
+ *
+ * +- btrfs_tree_read_lock
+ * |
+ * +- btrfs_set_lock_blocking_read
+ *
+ * - heavy operations allowed
+ *
+ * +- btrfs_tree_read_unlock_blocking
+ * |
+ * +- btrfs_tree_read_unlock
+ *
+ */
+
#ifdef CONFIG_BTRFS_DEBUG
-static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
+static inline void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
{
WARN_ON(eb->spinning_writers);
eb->spinning_writers++;
}
-static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
+static inline void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
{
WARN_ON(eb->spinning_writers != 1);
eb->spinning_writers--;
}
-static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
+static inline void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
{
WARN_ON(eb->spinning_writers);
}
-static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
+static inline void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
{
atomic_inc(&eb->spinning_readers);
}
-static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
+static inline void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
{
WARN_ON(atomic_read(&eb->spinning_readers) == 0);
atomic_dec(&eb->spinning_readers);
}
-static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
+static inline void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
{
atomic_inc(&eb->read_locks);
}
-static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
+static inline void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
{
atomic_dec(&eb->read_locks);
}
-static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
+static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
{
BUG_ON(!atomic_read(&eb->read_locks));
}
-static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
+static inline void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
{
eb->write_locks++;
}
-static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
+static inline void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
{
eb->write_locks--;
}
-void btrfs_assert_tree_locked(struct extent_buffer *eb)
-{
- BUG_ON(!eb->write_locks);
-}
-
#else
static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
@@ -80,11 +180,19 @@ static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
-void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
#endif
+/*
+ * Mark already held read lock as blocking. Can be nested in write lock by the
+ * same thread.
+ *
+ * Use when there are potentially long operations ahead so other thread waiting
+ * on the lock will not actively spin but sleep instead.
+ *
+ * The rwlock is released and blocking reader counter is increased.
+ */
void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
{
trace_btrfs_set_lock_blocking_read(eb);
@@ -101,6 +209,14 @@ void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
read_unlock(&eb->lock);
}
+/*
+ * Mark already held write lock as blocking.
+ *
+ * Use when there are potentially long operations ahead so other threads
+ * waiting on the lock will not actively spin but sleep instead.
+ *
+ * The rwlock is released and blocking writers is set.
+ */
void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
{
trace_btrfs_set_lock_blocking_write(eb);
@@ -114,50 +230,19 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
if (eb->blocking_writers == 0) {
btrfs_assert_spinning_writers_put(eb);
btrfs_assert_tree_locked(eb);
- eb->blocking_writers++;
+ WRITE_ONCE(eb->blocking_writers, 1);
write_unlock(&eb->lock);
}
}
-void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
-{
- trace_btrfs_clear_lock_blocking_read(eb);
- /*
- * No lock is required. The lock owner may change if we have a read
- * lock, but it won't change to or away from us. If we have the write
- * lock, we are the owner and it'll never change.
- */
- if (eb->lock_nested && current->pid == eb->lock_owner)
- return;
- BUG_ON(atomic_read(&eb->blocking_readers) == 0);
- read_lock(&eb->lock);
- btrfs_assert_spinning_readers_get(eb);
- /* atomic_dec_and_test implies a barrier */
- if (atomic_dec_and_test(&eb->blocking_readers))
- cond_wake_up_nomb(&eb->read_lock_wq);
-}
-
-void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
-{
- trace_btrfs_clear_lock_blocking_write(eb);
- /*
- * no lock is required. The lock owner may change if
- * we have a read lock, but it won't change to or away
- * from us. If we have the write lock, we are the owner
- * and it'll never change.
- */
- if (eb->lock_nested && current->pid == eb->lock_owner)
- return;
- write_lock(&eb->lock);
- BUG_ON(eb->blocking_writers != 1);
- btrfs_assert_spinning_writers_get(eb);
- if (--eb->blocking_writers == 0)
- cond_wake_up(&eb->write_lock_wq);
-}
-
/*
- * take a spinning read lock. This will wait for any blocking
- * writers
+ * Lock the extent buffer for read. Wait for any writers (spinning or blocking).
+ * Can be nested in write lock by the same thread.
+ *
+ * Use when the locked section does only lightweight actions and busy waiting
+ * would be cheaper than making other threads do the wait/wake loop.
+ *
+ * The rwlock is held upon exit.
*/
void btrfs_tree_read_lock(struct extent_buffer *eb)
{
@@ -169,23 +254,24 @@ again:
read_lock(&eb->lock);
BUG_ON(eb->blocking_writers == 0 &&
current->pid == eb->lock_owner);
- if (eb->blocking_writers && current->pid == eb->lock_owner) {
- /*
- * This extent is already write-locked by our thread. We allow
- * an additional read lock to be added because it's for the same
- * thread. btrfs_find_all_roots() depends on this as it may be
- * called on a partly (write-)locked tree.
- */
- BUG_ON(eb->lock_nested);
- eb->lock_nested = true;
- read_unlock(&eb->lock);
- trace_btrfs_tree_read_lock(eb, start_ns);
- return;
- }
if (eb->blocking_writers) {
+ if (current->pid == eb->lock_owner) {
+ /*
+ * This extent is already write-locked by our thread.
+ * We allow an additional read lock to be added because
+ * it's for the same thread. btrfs_find_all_roots()
+ * depends on this as it may be called on a partly
+ * (write-)locked tree.
+ */
+ BUG_ON(eb->lock_nested);
+ eb->lock_nested = true;
+ read_unlock(&eb->lock);
+ trace_btrfs_tree_read_lock(eb, start_ns);
+ return;
+ }
read_unlock(&eb->lock);
wait_event(eb->write_lock_wq,
- eb->blocking_writers == 0);
+ READ_ONCE(eb->blocking_writers) == 0);
goto again;
}
btrfs_assert_tree_read_locks_get(eb);
@@ -194,17 +280,19 @@ again:
}
/*
- * take a spinning read lock.
- * returns 1 if we get the read lock and 0 if we don't
- * this won't wait for blocking writers
+ * Lock extent buffer for read, optimistically expecting that there are no
+ * contending blocking writers. If there are, don't wait.
+ *
+ * Return 1 if the rwlock has been taken, 0 otherwise
*/
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
{
- if (eb->blocking_writers)
+ if (READ_ONCE(eb->blocking_writers))
return 0;
read_lock(&eb->lock);
- if (eb->blocking_writers) {
+ /* Refetch value after lock */
+ if (READ_ONCE(eb->blocking_writers)) {
read_unlock(&eb->lock);
return 0;
}
@@ -215,18 +303,20 @@ int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
}
/*
- * returns 1 if we get the read lock and 0 if we don't
- * this won't wait for blocking writers
+ * Try-lock for read. Don't block or wait for contending writers.
+ *
+ * Retrun 1 if the rwlock has been taken, 0 otherwise
*/
int btrfs_try_tree_read_lock(struct extent_buffer *eb)
{
- if (eb->blocking_writers)
+ if (READ_ONCE(eb->blocking_writers))
return 0;
if (!read_trylock(&eb->lock))
return 0;
- if (eb->blocking_writers) {
+ /* Refetch value after lock */
+ if (READ_ONCE(eb->blocking_writers)) {
read_unlock(&eb->lock);
return 0;
}
@@ -237,16 +327,19 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
}
/*
- * returns 1 if we get the read lock and 0 if we don't
- * this won't wait for blocking writers or readers
+ * Try-lock for write. May block until the lock is uncontended, but does not
+ * wait until it is free.
+ *
+ * Retrun 1 if the rwlock has been taken, 0 otherwise
*/
int btrfs_try_tree_write_lock(struct extent_buffer *eb)
{
- if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
+ if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers))
return 0;
write_lock(&eb->lock);
- if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
+ /* Refetch value after lock */
+ if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) {
write_unlock(&eb->lock);
return 0;
}
@@ -258,7 +351,10 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
}
/*
- * drop a spinning read lock
+ * Release read lock. Must be used only if the lock is in spinning mode. If
+ * the read lock is nested, must pair with read lock before the write unlock.
+ *
+ * The rwlock is not held upon exit.
*/
void btrfs_tree_read_unlock(struct extent_buffer *eb)
{
@@ -280,7 +376,11 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
}
/*
- * drop a blocking read lock
+ * Release read lock, previously set to blocking by a pairing call to
+ * btrfs_set_lock_blocking_read(). Can be nested in write lock by the same
+ * thread.
+ *
+ * State of rwlock is unchanged, last reader wakes waiting threads.
*/
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
{
@@ -304,8 +404,10 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
}
/*
- * take a spinning write lock. This will wait for both
- * blocking readers or writers
+ * Lock for write. Wait for all blocking and spinning readers and writers. This
+ * starts context where reader lock could be nested by the same thread.
+ *
+ * The rwlock is held for write upon exit.
*/
void btrfs_tree_lock(struct extent_buffer *eb)
{
@@ -317,9 +419,11 @@ void btrfs_tree_lock(struct extent_buffer *eb)
WARN_ON(eb->lock_owner == current->pid);
again:
wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
- wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
+ wait_event(eb->write_lock_wq, READ_ONCE(eb->blocking_writers) == 0);
write_lock(&eb->lock);
- if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
+ /* Refetch value after lock */
+ if (atomic_read(&eb->blocking_readers) ||
+ READ_ONCE(eb->blocking_writers)) {
write_unlock(&eb->lock);
goto again;
}
@@ -330,10 +434,19 @@ again:
}
/*
- * drop a spinning or a blocking write lock.
+ * Release the write lock, either blocking or spinning (ie. there's no need
+ * for an explicit blocking unlock, like btrfs_tree_read_unlock_blocking).
+ * This also ends the context for nesting, the read lock must have been
+ * released already.
+ *
+ * Tasks blocked and waiting are woken, rwlock is not held upon exit.
*/
void btrfs_tree_unlock(struct extent_buffer *eb)
{
+ /*
+ * This is read both locked and unlocked but always by the same thread
+ * that already owns the lock so we don't need to use READ_ONCE
+ */
int blockers = eb->blocking_writers;
BUG_ON(blockers > 1);
@@ -345,12 +458,68 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
if (blockers) {
btrfs_assert_no_spinning_writers(eb);
- eb->blocking_writers--;
- /* Use the lighter barrier after atomic */
- smp_mb__after_atomic();
- cond_wake_up_nomb(&eb->write_lock_wq);
+ /* Unlocked write */
+ WRITE_ONCE(eb->blocking_writers, 0);
+ /*
+ * We need to order modifying blocking_writers above with
+ * actually waking up the sleepers to ensure they see the
+ * updated value of blocking_writers
+ */
+ cond_wake_up(&eb->write_lock_wq);
} else {
btrfs_assert_spinning_writers_put(eb);
write_unlock(&eb->lock);
}
}
+
+/*
+ * Set all locked nodes in the path to blocking locks. This should be done
+ * before scheduling
+ */
+void btrfs_set_path_blocking(struct btrfs_path *p)
+{
+ int i;
+
+ for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
+ if (!p->nodes[i] || !p->locks[i])
+ continue;
+ /*
+ * If we currently have a spinning reader or writer lock this
+ * will bump the count of blocking holders and drop the
+ * spinlock.
+ */
+ if (p->locks[i] == BTRFS_READ_LOCK) {
+ btrfs_set_lock_blocking_read(p->nodes[i]);
+ p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
+ } else if (p->locks[i] == BTRFS_WRITE_LOCK) {
+ btrfs_set_lock_blocking_write(p->nodes[i]);
+ p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
+ }
+ }
+}
+
+/*
+ * This releases any locks held in the path starting at level and going all the
+ * way up to the root.
+ *
+ * btrfs_search_slot will keep the lock held on higher nodes in a few corner
+ * cases, such as COW of the block at slot zero in the node. This ignores
+ * those rules, and it should only be called when there are no more updates to
+ * be done higher up in the tree.
+ */
+void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
+{
+ int i;
+
+ if (path->keep_locks)
+ return;
+
+ for (i = level; i < BTRFS_MAX_LEVEL; i++) {
+ if (!path->nodes[i])
+ continue;
+ if (!path->locks[i])
+ continue;
+ btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
+ path->locks[i] = 0;
+ }
+}
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 595014f64830..21a285883e89 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -6,6 +6,8 @@
#ifndef BTRFS_LOCKING_H
#define BTRFS_LOCKING_H
+#include "extent_io.h"
+
#define BTRFS_WRITE_LOCK 1
#define BTRFS_READ_LOCK 2
#define BTRFS_WRITE_LOCK_BLOCKING 3
@@ -19,13 +21,20 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb);
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
void btrfs_set_lock_blocking_read(struct extent_buffer *eb);
void btrfs_set_lock_blocking_write(struct extent_buffer *eb);
-void btrfs_clear_lock_blocking_read(struct extent_buffer *eb);
-void btrfs_clear_lock_blocking_write(struct extent_buffer *eb);
-void btrfs_assert_tree_locked(struct extent_buffer *eb);
int btrfs_try_tree_read_lock(struct extent_buffer *eb);
int btrfs_try_tree_write_lock(struct extent_buffer *eb);
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
+#ifdef CONFIG_BTRFS_DEBUG
+static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) {
+ BUG_ON(!eb->write_locks);
+}
+#else
+static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
+#endif
+
+void btrfs_set_path_blocking(struct btrfs_path *p);
+void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
{
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 579d53ae256f..aa9cd11f4b78 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -63,27 +63,7 @@ struct workspace {
static struct workspace_manager wsm;
-static void lzo_init_workspace_manager(void)
-{
- btrfs_init_workspace_manager(&wsm, &btrfs_lzo_compress);
-}
-
-static void lzo_cleanup_workspace_manager(void)
-{
- btrfs_cleanup_workspace_manager(&wsm);
-}
-
-static struct list_head *lzo_get_workspace(unsigned int level)
-{
- return btrfs_get_workspace(&wsm, level);
-}
-
-static void lzo_put_workspace(struct list_head *ws)
-{
- btrfs_put_workspace(&wsm, ws);
-}
-
-static void lzo_free_workspace(struct list_head *ws)
+void lzo_free_workspace(struct list_head *ws)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
@@ -93,7 +73,7 @@ static void lzo_free_workspace(struct list_head *ws)
kfree(workspace);
}
-static struct list_head *lzo_alloc_workspace(unsigned int level)
+struct list_head *lzo_alloc_workspace(unsigned int level)
{
struct workspace *workspace;
@@ -131,13 +111,9 @@ static inline size_t read_compress_length(const char *buf)
return le32_to_cpu(dlen);
}
-static int lzo_compress_pages(struct list_head *ws,
- struct address_space *mapping,
- u64 start,
- struct page **pages,
- unsigned long *out_pages,
- unsigned long *total_in,
- unsigned long *total_out)
+int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct page **pages, unsigned long *out_pages,
+ unsigned long *total_in, unsigned long *total_out)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0;
@@ -303,7 +279,7 @@ out:
return ret;
}
-static int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
+int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0, ret2;
@@ -444,10 +420,9 @@ done:
return ret;
}
-static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
- struct page *dest_page,
- unsigned long start_byte,
- size_t srclen, size_t destlen)
+int lzo_decompress(struct list_head *ws, unsigned char *data_in,
+ struct page *dest_page, unsigned long start_byte, size_t srclen,
+ size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
size_t in_len;
@@ -507,20 +482,8 @@ out:
return ret;
}
-static unsigned int lzo_set_level(unsigned int level)
-{
- return 0;
-}
-
const struct btrfs_compress_op btrfs_lzo_compress = {
- .init_workspace_manager = lzo_init_workspace_manager,
- .cleanup_workspace_manager = lzo_cleanup_workspace_manager,
- .get_workspace = lzo_get_workspace,
- .put_workspace = lzo_put_workspace,
- .alloc_workspace = lzo_alloc_workspace,
- .free_workspace = lzo_free_workspace,
- .compress_pages = lzo_compress_pages,
- .decompress_bio = lzo_decompress_bio,
- .decompress = lzo_decompress,
- .set_level = lzo_set_level,
+ .workspace_manager = &wsm,
+ .max_level = 1,
+ .default_level = 1,
};
diff --git a/fs/btrfs/math.h b/fs/btrfs/math.h
deleted file mode 100644
index 75246f2f56ba..000000000000
--- a/fs/btrfs/math.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Fujitsu. All rights reserved.
- * Written by Miao Xie <miaox@cn.fujitsu.com>
- */
-
-#ifndef BTRFS_MATH_H
-#define BTRFS_MATH_H
-
-#include <asm/div64.h>
-
-static inline u64 div_factor(u64 num, int factor)
-{
- if (factor == 10)
- return num;
- num *= factor;
- return div_u64(num, 10);
-}
-
-static inline u64 div_factor_fine(u64 num, int factor)
-{
- if (factor == 100)
- return num;
- num *= factor;
- return div_u64(num, 100);
-}
-
-#endif
diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
new file mode 100644
index 000000000000..72bab64ecf60
--- /dev/null
+++ b/fs/btrfs/misc.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_MISC_H
+#define BTRFS_MISC_H
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <asm/div64.h>
+
+#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
+
+static inline void cond_wake_up(struct wait_queue_head *wq)
+{
+ /*
+ * This implies a full smp_mb barrier, see comments for
+ * waitqueue_active why.
+ */
+ if (wq_has_sleeper(wq))
+ wake_up(wq);
+}
+
+static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
+{
+ /*
+ * Special case for conditional wakeup where the barrier required for
+ * waitqueue_active is implied by some of the preceding code. Eg. one
+ * of such atomic operations (atomic_dec_and_return, ...), or a
+ * unlock/lock sequence, etc.
+ */
+ if (waitqueue_active(wq))
+ wake_up(wq);
+}
+
+static inline u64 div_factor(u64 num, int factor)
+{
+ if (factor == 10)
+ return num;
+ num *= factor;
+ return div_u64(num, 10);
+}
+
+static inline u64 div_factor_fine(u64 num, int factor)
+{
+ if (factor == 100)
+ return num;
+ num *= factor;
+ return div_u64(num, 100);
+}
+
+/* Copy of is_power_of_two that is 64bit safe */
+static inline bool is_power_of_two_u64(u64 n)
+{
+ return n != 0 && (n & (n - 1)) == 0;
+}
+
+static inline bool has_single_bit_set(u64 n)
+{
+ return is_power_of_two_u64(n);
+}
+
+#endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 1744ba8b2754..fb09bc2f8e4d 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -7,6 +7,7 @@
#include <linux/blkdev.h>
#include <linux/writeback.h>
#include <linux/sched/mm.h>
+#include "misc.h"
#include "ctree.h"
#include "transaction.h"
#include "btrfs_inode.h"
@@ -546,7 +547,6 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
spin_unlock(&root->ordered_extent_lock);
btrfs_init_work(&ordered->flush_work,
- btrfs_flush_delalloc_helper,
btrfs_run_ordered_extent_work, NULL, NULL);
list_add_tail(&ordered->work_list, &works);
btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
@@ -572,12 +572,11 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
return count;
}
-u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
+void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
const u64 range_start, const u64 range_len)
{
struct btrfs_root *root;
struct list_head splice;
- u64 total_done = 0;
u64 done;
INIT_LIST_HEAD(&splice);
@@ -597,7 +596,6 @@ u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
done = btrfs_wait_ordered_extents(root, nr,
range_start, range_len);
btrfs_put_fs_root(root);
- total_done += done;
spin_lock(&fs_info->ordered_root_lock);
if (nr != U64_MAX) {
@@ -607,8 +605,6 @@ u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
list_splice_tail(&splice, &fs_info->ordered_roots);
spin_unlock(&fs_info->ordered_root_lock);
mutex_unlock(&fs_info->ordered_operations_mutex);
-
- return total_done;
}
/*
@@ -985,13 +981,14 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
struct extent_state **cached_state)
{
struct btrfs_ordered_extent *ordered;
- struct extent_state *cachedp = NULL;
+ struct extent_state *cache = NULL;
+ struct extent_state **cachedp = &cache;
if (cached_state)
- cachedp = *cached_state;
+ cachedp = cached_state;
while (1) {
- lock_extent_bits(tree, start, end, &cachedp);
+ lock_extent_bits(tree, start, end, cachedp);
ordered = btrfs_lookup_ordered_range(inode, start,
end - start + 1);
if (!ordered) {
@@ -1001,10 +998,10 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
* aren't exposing it outside of this function
*/
if (!cached_state)
- refcount_dec(&cachedp->refs);
+ refcount_dec(&cache->refs);
break;
}
- unlock_extent_cached(tree, start, end, &cachedp);
+ unlock_extent_cached(tree, start, end, cachedp);
btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
}
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 5204171ea962..4eb0319a86d7 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -186,7 +186,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
u8 *sum, int len);
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
const u64 range_start, const u64 range_len);
-u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
+void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
const u64 range_start, const u64 range_len);
void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
struct btrfs_inode *inode, u64 start,
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 9cb50577d982..873b6b694107 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -266,9 +266,9 @@ void btrfs_print_leaf(struct extent_buffer *l)
struct btrfs_block_group_item);
pr_info(
"\t\tblock group used %llu chunk_objectid %llu flags %llu\n",
- btrfs_disk_block_group_used(l, bi),
- btrfs_disk_block_group_chunk_objectid(l, bi),
- btrfs_disk_block_group_flags(l, bi));
+ btrfs_block_group_used(l, bi),
+ btrfs_block_group_chunk_objectid(l, bi),
+ btrfs_block_group_flags(l, bi));
break;
case BTRFS_CHUNK_ITEM_KEY:
print_chunk(l, btrfs_item_ptr(l, i,
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index e0469816c678..deb59e7cfcac 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -362,7 +362,7 @@ static int inherit_props(struct btrfs_trans_handle *trans,
* reservations if we do add more properties in the future.
*/
if (need_reserve) {
- num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
+ num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
ret = btrfs_block_rsv_add(root, trans->block_rsv,
num_bytes, BTRFS_RESERVE_NO_FLUSH);
if (ret)
@@ -416,11 +416,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- parent_inode = btrfs_iget(sb, &key, parent_root, NULL);
+ parent_inode = btrfs_iget(sb, &key, parent_root);
if (IS_ERR(parent_inode))
return PTR_ERR(parent_inode);
- child_inode = btrfs_iget(sb, &key, root, NULL);
+ child_inode = btrfs_iget(sb, &key, root);
if (IS_ERR(child_inode)) {
iput(parent_inode);
return PTR_ERR(child_inode);
@@ -437,8 +437,6 @@ void __init btrfs_props_init(void)
{
int i;
- hash_init(prop_handlers_ht);
-
for (i = 0; i < ARRAY_SIZE(prop_handlers); i++) {
struct prop_handler *p = &prop_handlers[i];
u64 h = btrfs_name_hash(p->xattr_name, strlen(p->xattr_name));
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index f8a3c1b0a15a..93aeb2e539a4 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -21,7 +21,7 @@
#include "backref.h"
#include "extent_io.h"
#include "qgroup.h"
-
+#include "block-group.h"
/* TODO XXX FIXME
* - subvol delete -> delete when ref goes to 0? delete limits also?
@@ -1312,8 +1312,9 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
struct btrfs_qgroup *member;
struct btrfs_qgroup_list *list;
struct ulist *tmp;
+ bool found = false;
int ret = 0;
- int err;
+ int ret2;
tmp = ulist_alloc(GFP_KERNEL);
if (!tmp)
@@ -1327,28 +1328,39 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
member = find_qgroup_rb(fs_info, src);
parent = find_qgroup_rb(fs_info, dst);
- if (!member || !parent) {
- ret = -EINVAL;
- goto out;
- }
+ /*
+ * The parent/member pair doesn't exist, then try to delete the dead
+ * relation items only.
+ */
+ if (!member || !parent)
+ goto delete_item;
/* check if such qgroup relation exist firstly */
list_for_each_entry(list, &member->groups, next_group) {
- if (list->group == parent)
- goto exist;
+ if (list->group == parent) {
+ found = true;
+ break;
+ }
}
- ret = -ENOENT;
- goto out;
-exist:
+
+delete_item:
ret = del_qgroup_relation_item(trans, src, dst);
- err = del_qgroup_relation_item(trans, dst, src);
- if (err && !ret)
- ret = err;
+ if (ret < 0 && ret != -ENOENT)
+ goto out;
+ ret2 = del_qgroup_relation_item(trans, dst, src);
+ if (ret2 < 0 && ret2 != -ENOENT)
+ goto out;
- spin_lock(&fs_info->qgroup_lock);
- del_relation_rb(fs_info, src, dst);
- ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
- spin_unlock(&fs_info->qgroup_lock);
+ /* At least one deletion succeeded, return 0 */
+ if (!ret || !ret2)
+ ret = 0;
+
+ if (found) {
+ spin_lock(&fs_info->qgroup_lock);
+ del_relation_rb(fs_info, src, dst);
+ ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
+ spin_unlock(&fs_info->qgroup_lock);
+ }
out:
ulist_free(tmp);
return ret;
@@ -1799,7 +1811,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
/* For src_path */
- extent_buffer_get(src_eb);
+ atomic_inc(&src_eb->refs);
src_path->nodes[root_level] = src_eb;
src_path->slots[root_level] = dst_path->slots[root_level];
src_path->locks[root_level] = 0;
@@ -2055,7 +2067,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
goto out;
}
/* For dst_path */
- extent_buffer_get(dst_eb);
+ atomic_inc(&dst_eb->refs);
dst_path->nodes[level] = dst_eb;
dst_path->slots[level] = 0;
dst_path->locks[level] = 0;
@@ -2114,7 +2126,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
* walk back up the tree (adjusting slot pointers as we go)
* and restart the search process.
*/
- extent_buffer_get(root_eb); /* For path */
+ atomic_inc(&root_eb->refs); /* For path */
path->nodes[root_level] = root_eb;
path->slots[root_level] = 0;
path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
@@ -3154,9 +3166,6 @@ out:
btrfs_free_path(path);
mutex_lock(&fs_info->qgroup_rescan_lock);
- if (!btrfs_fs_closing(fs_info))
- fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
-
if (err > 0 &&
fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -3172,16 +3181,30 @@ out:
trans = btrfs_start_transaction(fs_info->quota_root, 1);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
+ trans = NULL;
btrfs_err(fs_info,
"fail to start transaction for status update: %d",
err);
- goto done;
}
- ret = update_qgroup_status_item(trans);
- if (ret < 0) {
- err = ret;
- btrfs_err(fs_info, "fail to update qgroup status: %d", err);
+
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+ if (!btrfs_fs_closing(fs_info))
+ fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+ if (trans) {
+ ret = update_qgroup_status_item(trans);
+ if (ret < 0) {
+ err = ret;
+ btrfs_err(fs_info, "fail to update qgroup status: %d",
+ err);
+ }
}
+ fs_info->qgroup_rescan_running = false;
+ complete_all(&fs_info->qgroup_rescan_completion);
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
+
+ if (!trans)
+ return;
+
btrfs_end_transaction(trans);
if (btrfs_fs_closing(fs_info)) {
@@ -3192,12 +3215,6 @@ out:
} else {
btrfs_err(fs_info, "qgroup scan failed with %d", err);
}
-
-done:
- mutex_lock(&fs_info->qgroup_rescan_lock);
- fs_info->qgroup_rescan_running = false;
- mutex_unlock(&fs_info->qgroup_rescan_lock);
- complete_all(&fs_info->qgroup_rescan_completion);
}
/*
@@ -3260,10 +3277,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
spin_unlock(&fs_info->qgroup_lock);
mutex_unlock(&fs_info->qgroup_rescan_lock);
- memset(&fs_info->qgroup_rescan_work, 0,
- sizeof(fs_info->qgroup_rescan_work));
btrfs_init_work(&fs_info->qgroup_rescan_work,
- btrfs_qgroup_rescan_helper,
btrfs_qgroup_rescan_worker, NULL, NULL);
return 0;
}
@@ -3425,6 +3439,9 @@ cleanup:
while ((unode = ulist_next(&reserved->range_changed, &uiter)))
clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
+ /* Also free data bytes of already reserved one */
+ btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid,
+ orig_reserved, BTRFS_QGROUP_RSV_DATA);
extent_changeset_release(reserved);
return ret;
}
@@ -3469,7 +3486,7 @@ static int qgroup_free_reserved_data(struct inode *inode,
* EXTENT_QGROUP_RESERVED, we won't double free.
* So not need to rush.
*/
- ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree,
+ ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree,
free_start, free_start + free_len - 1,
EXTENT_QGROUP_RESERVED, &changeset);
if (ret < 0)
@@ -3609,7 +3626,7 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
return 0;
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
- trace_qgroup_meta_reserve(root, type, (s64)num_bytes);
+ trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
ret = qgroup_reserve(root, num_bytes, enforce, type);
if (ret < 0)
return ret;
@@ -3656,7 +3673,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
*/
num_bytes = sub_root_meta_rsv(root, num_bytes, type);
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
- trace_qgroup_meta_reserve(root, type, -(s64)num_bytes);
+ trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
num_bytes, type);
}
@@ -3806,7 +3823,7 @@ out:
*/
int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *subvol_root,
- struct btrfs_block_group_cache *bg,
+ struct btrfs_block_group *bg,
struct extent_buffer *subvol_parent, int subvol_slot,
struct extent_buffer *reloc_parent, int reloc_slot,
u64 last_snapshot)
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 46ba7bd2961c..236f12224d52 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -408,7 +408,7 @@ void btrfs_qgroup_init_swapped_blocks(
void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root);
int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *subvol_root,
- struct btrfs_block_group_cache *bg,
+ struct btrfs_block_group *bg,
struct extent_buffer *subvol_parent, int subvol_slot,
struct extent_buffer *reloc_parent, int reloc_slot,
u64 last_snapshot);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index f3d0576dd327..a8e53c8e7b01 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -35,6 +35,22 @@
#define RBIO_CACHE_SIZE 1024
+#define BTRFS_STRIPE_HASH_TABLE_BITS 11
+
+/* Used by the raid56 code to lock stripes for read/modify/write */
+struct btrfs_stripe_hash {
+ struct list_head hash_list;
+ spinlock_t lock;
+};
+
+/* Used by the raid56 code to lock stripes for read/modify/write */
+struct btrfs_stripe_hash_table {
+ struct list_head stripe_cache;
+ spinlock_t cache_lock;
+ int cache_size;
+ struct btrfs_stripe_hash table[];
+};
+
enum btrfs_rbio_ops {
BTRFS_RBIO_WRITE,
BTRFS_RBIO_READ_REBUILD,
@@ -174,7 +190,7 @@ static void scrub_parity_work(struct btrfs_work *work);
static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
{
- btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL);
+ btrfs_init_work(&rbio->work, work_func, NULL, NULL);
btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
}
@@ -655,8 +671,7 @@ static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
*/
static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
{
- int bucket = rbio_bucket(rbio);
- struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
+ struct btrfs_stripe_hash *h;
struct btrfs_raid_bio *cur;
struct btrfs_raid_bio *pending;
unsigned long flags;
@@ -664,64 +679,63 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
struct btrfs_raid_bio *cache_drop = NULL;
int ret = 0;
+ h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
+
spin_lock_irqsave(&h->lock, flags);
list_for_each_entry(cur, &h->hash_list, hash_list) {
- if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
- spin_lock(&cur->bio_list_lock);
-
- /* can we steal this cached rbio's pages? */
- if (bio_list_empty(&cur->bio_list) &&
- list_empty(&cur->plug_list) &&
- test_bit(RBIO_CACHE_BIT, &cur->flags) &&
- !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
- list_del_init(&cur->hash_list);
- refcount_dec(&cur->refs);
-
- steal_rbio(cur, rbio);
- cache_drop = cur;
- spin_unlock(&cur->bio_list_lock);
+ if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
+ continue;
- goto lockit;
- }
+ spin_lock(&cur->bio_list_lock);
- /* can we merge into the lock owner? */
- if (rbio_can_merge(cur, rbio)) {
- merge_rbio(cur, rbio);
- spin_unlock(&cur->bio_list_lock);
- freeit = rbio;
- ret = 1;
- goto out;
- }
+ /* Can we steal this cached rbio's pages? */
+ if (bio_list_empty(&cur->bio_list) &&
+ list_empty(&cur->plug_list) &&
+ test_bit(RBIO_CACHE_BIT, &cur->flags) &&
+ !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
+ list_del_init(&cur->hash_list);
+ refcount_dec(&cur->refs);
+ steal_rbio(cur, rbio);
+ cache_drop = cur;
+ spin_unlock(&cur->bio_list_lock);
- /*
- * we couldn't merge with the running
- * rbio, see if we can merge with the
- * pending ones. We don't have to
- * check for rmw_locked because there
- * is no way they are inside finish_rmw
- * right now
- */
- list_for_each_entry(pending, &cur->plug_list,
- plug_list) {
- if (rbio_can_merge(pending, rbio)) {
- merge_rbio(pending, rbio);
- spin_unlock(&cur->bio_list_lock);
- freeit = rbio;
- ret = 1;
- goto out;
- }
- }
+ goto lockit;
+ }
- /* no merging, put us on the tail of the plug list,
- * our rbio will be started with the currently
- * running rbio unlocks
- */
- list_add_tail(&rbio->plug_list, &cur->plug_list);
+ /* Can we merge into the lock owner? */
+ if (rbio_can_merge(cur, rbio)) {
+ merge_rbio(cur, rbio);
spin_unlock(&cur->bio_list_lock);
+ freeit = rbio;
ret = 1;
goto out;
}
+
+
+ /*
+ * We couldn't merge with the running rbio, see if we can merge
+ * with the pending ones. We don't have to check for rmw_locked
+ * because there is no way they are inside finish_rmw right now
+ */
+ list_for_each_entry(pending, &cur->plug_list, plug_list) {
+ if (rbio_can_merge(pending, rbio)) {
+ merge_rbio(pending, rbio);
+ spin_unlock(&cur->bio_list_lock);
+ freeit = rbio;
+ ret = 1;
+ goto out;
+ }
+ }
+
+ /*
+ * No merging, put us on the tail of the plug list, our rbio
+ * will be started with the currently running rbio unlocks
+ */
+ list_add_tail(&rbio->plug_list, &cur->plug_list);
+ spin_unlock(&cur->bio_list_lock);
+ ret = 1;
+ goto out;
}
lockit:
refcount_inc(&rbio->refs);
@@ -1727,8 +1741,7 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
plug = container_of(cb, struct btrfs_plug_cb, cb);
if (from_schedule) {
- btrfs_init_work(&plug->work, btrfs_rmw_helper,
- unplug_work, NULL, NULL);
+ btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
btrfs_queue_work(plug->info->rmw_workers,
&plug->work);
return;
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index bb5bd49573b4..243a2e44526e 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -14,6 +14,7 @@
#include "disk-io.h"
#include "transaction.h"
#include "dev-replace.h"
+#include "block-group.h"
#undef DEBUG
@@ -226,7 +227,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
struct btrfs_fs_info *fs_info = dev->fs_info;
int ret;
struct reada_zone *zone;
- struct btrfs_block_group_cache *cache = NULL;
+ struct btrfs_block_group *cache = NULL;
u64 start;
u64 end;
int i;
@@ -247,8 +248,8 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
if (!cache)
return NULL;
- start = cache->key.objectid;
- end = start + cache->key.offset - 1;
+ start = cache->start;
+ end = start + cache->length - 1;
btrfs_put_block_group(cache);
zone = kzalloc(sizeof(*zone), GFP_KERNEL);
@@ -638,6 +639,35 @@ static int reada_pick_zone(struct btrfs_device *dev)
return 1;
}
+static int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
+ int mirror_num, struct extent_buffer **eb)
+{
+ struct extent_buffer *buf = NULL;
+ int ret;
+
+ buf = btrfs_find_create_tree_block(fs_info, bytenr);
+ if (IS_ERR(buf))
+ return 0;
+
+ set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
+
+ ret = read_extent_buffer_pages(buf, WAIT_PAGE_LOCK, mirror_num);
+ if (ret) {
+ free_extent_buffer_stale(buf);
+ return ret;
+ }
+
+ if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
+ free_extent_buffer_stale(buf);
+ return -EIO;
+ } else if (extent_buffer_uptodate(buf)) {
+ *eb = buf;
+ } else {
+ free_extent_buffer(buf);
+ }
+ return 0;
+}
+
static int reada_start_machine_dev(struct btrfs_device *dev)
{
struct btrfs_fs_info *fs_info = dev->fs_info;
@@ -722,21 +752,19 @@ static int reada_start_machine_dev(struct btrfs_device *dev)
static void reada_start_machine_worker(struct btrfs_work *work)
{
struct reada_machine_work *rmw;
- struct btrfs_fs_info *fs_info;
int old_ioprio;
rmw = container_of(work, struct reada_machine_work, work);
- fs_info = rmw->fs_info;
-
- kfree(rmw);
old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
task_nice_ioprio(current));
set_task_ioprio(current, BTRFS_IOPRIO_READA);
- __reada_start_machine(fs_info);
+ __reada_start_machine(rmw->fs_info);
set_task_ioprio(current, old_ioprio);
- atomic_dec(&fs_info->reada_works_cnt);
+ atomic_dec(&rmw->fs_info->reada_works_cnt);
+
+ kfree(rmw);
}
static void __reada_start_machine(struct btrfs_fs_info *fs_info)
@@ -791,8 +819,7 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info)
/* FIXME we cannot handle this properly right now */
BUG();
}
- btrfs_init_work(&rmw->work, btrfs_readahead_helper,
- reada_start_machine_worker, NULL, NULL);
+ btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL);
rmw->fs_info = fs_info;
btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index e87cbdad02a3..b57f3618e58e 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -500,7 +500,7 @@ static int process_leaf(struct btrfs_root *root,
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
u32 count;
- int i = 0, tree_block_level = 0, ret;
+ int i = 0, tree_block_level = 0, ret = 0;
struct btrfs_key key;
int nritems = btrfs_header_nritems(leaf);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 7f219851fa23..d897a8e5e430 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -21,6 +21,7 @@
#include "qgroup.h"
#include "print-tree.h"
#include "delalloc-space.h"
+#include "block-group.h"
/*
* backref_node, mapping_node and tree_block start with this
@@ -146,7 +147,7 @@ struct file_extent_cluster {
struct reloc_control {
/* block group to relocate */
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
/* extent tree */
struct btrfs_root *extent_root;
/* inode for moving data */
@@ -1434,6 +1435,13 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
int clear_rsv = 0;
int ret;
+ /*
+ * The subvolume has reloc tree but the swap is finished, no need to
+ * create/update the dead reloc tree
+ */
+ if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
+ return 0;
+
if (root->reloc_root) {
reloc_root = root->reloc_root;
reloc_root->last_trans = trans->transid;
@@ -1552,11 +1560,10 @@ again:
return NULL;
}
-static int in_block_group(u64 bytenr,
- struct btrfs_block_group_cache *block_group)
+static int in_block_group(u64 bytenr, struct btrfs_block_group *block_group)
{
- if (bytenr >= block_group->key.objectid &&
- bytenr < block_group->key.objectid + block_group->key.offset)
+ if (bytenr >= block_group->start &&
+ bytenr < block_group->start + block_group->length)
return 1;
return 0;
}
@@ -2186,7 +2193,6 @@ static int clean_dirty_subvols(struct reloc_control *rc)
/* Merged subvolume, cleanup its reloc root */
struct btrfs_root *reloc_root = root->reloc_root;
- clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
list_del_init(&root->reloc_dirty_list);
root->reloc_root = NULL;
if (reloc_root) {
@@ -2195,6 +2201,7 @@ static int clean_dirty_subvols(struct reloc_control *rc)
if (ret2 < 0 && !ret)
ret = ret2;
}
+ clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
btrfs_put_fs_root(root);
} else {
/* Orphan reloc tree, just clean it up */
@@ -2238,7 +2245,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
level = btrfs_root_level(root_item);
- extent_buffer_get(reloc_root->node);
+ atomic_inc(&reloc_root->node->refs);
path->nodes[level] = reloc_root->node;
path->slots[level] = 0;
} else {
@@ -3187,7 +3194,6 @@ static noinline_for_stack
int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
u64 block_start)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
int ret = 0;
@@ -3200,7 +3206,6 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
em->len = end + 1 - start;
em->block_len = em->len;
em->block_start = block_start;
- em->bdev = fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
@@ -3269,6 +3274,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
if (!page) {
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode),
+ PAGE_SIZE);
ret = -ENOMEM;
goto out;
}
@@ -3289,7 +3296,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
btrfs_delalloc_release_extents(BTRFS_I(inode),
- PAGE_SIZE, true);
+ PAGE_SIZE);
ret = -EIO;
goto out;
}
@@ -3311,14 +3318,14 @@ static int relocate_file_extent_cluster(struct inode *inode,
}
ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
- NULL, 0);
+ NULL);
if (ret) {
unlock_page(page);
put_page(page);
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
btrfs_delalloc_release_extents(BTRFS_I(inode),
- PAGE_SIZE, true);
+ PAGE_SIZE);
clear_extent_bits(&BTRFS_I(inode)->io_tree,
page_start, page_end,
@@ -3334,8 +3341,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
put_page(page);
index++;
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE,
- false);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
balance_dirty_pages_ratelimited(inode->i_mapping);
btrfs_throttle(fs_info);
}
@@ -3535,7 +3541,7 @@ static int block_use_full_backref(struct reloc_control *rc,
}
static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct inode *inode,
u64 ino)
{
@@ -3551,7 +3557,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &key, root);
if (IS_ERR(inode))
return -ENOENT;
@@ -3854,7 +3860,7 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
u64 start, end, last;
int ret;
- last = rc->block_group->key.objectid + rc->block_group->key.offset;
+ last = rc->block_group->start + rc->block_group->length;
while (1) {
cond_resched();
if (rc->search_start >= last) {
@@ -3971,7 +3977,7 @@ int prepare_to_relocate(struct reloc_control *rc)
return -ENOMEM;
memset(&rc->cluster, 0, sizeof(rc->cluster));
- rc->search_start = rc->block_group->key.objectid;
+ rc->search_start = rc->block_group->start;
rc->extents_found = 0;
rc->nodes_relocated = 0;
rc->merging_rsv_size = 0;
@@ -4210,7 +4216,7 @@ out:
*/
static noinline_for_stack
struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *group)
+ struct btrfs_block_group *group)
{
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
@@ -4237,9 +4243,9 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
key.objectid = objectid;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &key, root);
BUG_ON(IS_ERR(inode));
- BTRFS_I(inode)->index_cnt = group->key.objectid;
+ BTRFS_I(inode)->index_cnt = group->start;
err = btrfs_orphan_add(trans, BTRFS_I(inode));
out:
@@ -4274,7 +4280,7 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
* Print the block group being relocated
*/
static void describe_relocation(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *block_group)
+ struct btrfs_block_group *block_group)
{
char buf[128] = {'\0'};
@@ -4282,7 +4288,7 @@ static void describe_relocation(struct btrfs_fs_info *fs_info,
btrfs_info(fs_info,
"relocating block group %llu flags %s",
- block_group->key.objectid, buf);
+ block_group->start, buf);
}
/*
@@ -4290,7 +4296,7 @@ static void describe_relocation(struct btrfs_fs_info *fs_info,
*/
int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
{
- struct btrfs_block_group_cache *bg;
+ struct btrfs_block_group *bg;
struct btrfs_root *extent_root = fs_info->extent_root;
struct reloc_control *rc;
struct inode *inode;
@@ -4317,7 +4323,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
rc->extent_root = extent_root;
rc->block_group = bg;
- ret = btrfs_inc_block_group_ro(rc->block_group);
+ ret = btrfs_inc_block_group_ro(rc->block_group, true);
if (ret) {
err = ret;
goto out;
@@ -4355,8 +4361,8 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
btrfs_wait_block_group_reservations(rc->block_group);
btrfs_wait_nocow_writers(rc->block_group);
btrfs_wait_ordered_roots(fs_info, U64_MAX,
- rc->block_group->key.objectid,
- rc->block_group->key.offset);
+ rc->block_group->start,
+ rc->block_group->length);
while (1) {
mutex_lock(&fs_info->cleaner_mutex);
@@ -4396,7 +4402,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
WARN_ON(rc->block_group->pinned > 0);
WARN_ON(rc->block_group->reserved > 0);
- WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
+ WARN_ON(rc->block_group->used > 0);
out:
if (err && rw)
btrfs_dec_block_group_ro(rc->block_group);
@@ -4679,7 +4685,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
node->new_bytenr != buf->start);
drop_node_buffer(node);
- extent_buffer_get(cow);
+ atomic_inc(&cow->refs);
node->eb = cow;
node->new_bytenr = cow->start;
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 47733fb55df7..3b17b647d002 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -533,7 +533,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
return ret;
}
- num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
+ num_bytes = btrfs_calc_insert_metadata_size(fs_info, items);
rsv->space_info = btrfs_find_space_info(fs_info,
BTRFS_BLOCK_GROUP_METADATA);
ret = btrfs_block_rsv_add(root, rsv, num_bytes,
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 0c99cf9fb595..21de630b0730 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -18,6 +18,7 @@
#include "check-integrity.h"
#include "rcu-string.h"
#include "raid56.h"
+#include "block-group.h"
/*
* This is only the first step towards a full-features scrub. It reads all
@@ -388,8 +389,7 @@ static struct full_stripe_lock *search_full_stripe_lock(
*
* Caller must ensure @cache is a RAID56 block group.
*/
-static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
- u64 bytenr)
+static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
{
u64 ret;
@@ -403,8 +403,8 @@ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
* round_down() can only handle power of 2, while RAID56 full
* stripe length can be 64KiB * n, so we need to manually round down.
*/
- ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
- cache->full_stripe_len + cache->key.objectid;
+ ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
+ cache->full_stripe_len + cache->start;
return ret;
}
@@ -422,7 +422,7 @@ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
bool *locked_ret)
{
- struct btrfs_block_group_cache *bg_cache;
+ struct btrfs_block_group *bg_cache;
struct btrfs_full_stripe_locks_tree *locks_root;
struct full_stripe_lock *existing;
u64 fstripe_start;
@@ -469,7 +469,7 @@ out:
static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
bool locked)
{
- struct btrfs_block_group_cache *bg_cache;
+ struct btrfs_block_group *bg_cache;
struct btrfs_full_stripe_locks_tree *locks_root;
struct full_stripe_lock *fstripe_lock;
u64 fstripe_start;
@@ -597,8 +597,8 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
sbio->index = i;
sbio->sctx = sctx;
sbio->page_count = 0;
- btrfs_init_work(&sbio->work, btrfs_scrub_helper,
- scrub_bio_end_io_worker, NULL, NULL);
+ btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL,
+ NULL);
if (i != SCRUB_BIOS_PER_SCTX - 1)
sctx->bios[i]->next_free = i + 1;
@@ -1719,8 +1719,7 @@ static void scrub_wr_bio_end_io(struct bio *bio)
sbio->status = bio->bi_status;
sbio->bio = bio;
- btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
- scrub_wr_bio_end_io_worker, NULL, NULL);
+ btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
}
@@ -2148,14 +2147,13 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
scrub_write_block_to_dev_replace(sblock);
}
- scrub_block_put(sblock);
-
if (sctx->is_dev_replace && sctx->flush_all_writes) {
mutex_lock(&sctx->wr_lock);
scrub_wr_submit(sctx);
mutex_unlock(&sctx->wr_lock);
}
+ scrub_block_put(sblock);
scrub_pending_bio_dec(sctx);
}
@@ -2203,8 +2201,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
raid56_add_scrub_pages(rbio, spage->page, spage->logical);
}
- btrfs_init_work(&sblock->work, btrfs_scrub_helper,
- scrub_missing_raid56_worker, NULL, NULL);
+ btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL);
scrub_block_get(sblock);
scrub_pending_bio_inc(sctx);
raid56_submit_missing_rbio(rbio);
@@ -2742,8 +2739,8 @@ static void scrub_parity_bio_endio(struct bio *bio)
bio_put(bio);
- btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
- scrub_parity_bio_endio_worker, NULL, NULL);
+ btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL,
+ NULL);
btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
}
@@ -3419,7 +3416,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
struct btrfs_device *scrub_dev,
u64 chunk_offset, u64 length,
u64 dev_offset,
- struct btrfs_block_group_cache *cache)
+ struct btrfs_block_group *cache)
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct extent_map_tree *map_tree = &fs_info->mapping_tree;
@@ -3483,7 +3480,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
struct extent_buffer *l;
struct btrfs_key key;
struct btrfs_key found_key;
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
path = btrfs_alloc_path();
@@ -3562,46 +3559,26 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
* -> btrfs_scrub_pause()
*/
scrub_pause_on(fs_info);
- ret = btrfs_inc_block_group_ro(cache);
- if (!ret && sctx->is_dev_replace) {
- /*
- * If we are doing a device replace wait for any tasks
- * that started delalloc right before we set the block
- * group to RO mode, as they might have just allocated
- * an extent from it or decided they could do a nocow
- * write. And if any such tasks did that, wait for their
- * ordered extents to complete and then commit the
- * current transaction, so that we can later see the new
- * extent items in the extent tree - the ordered extents
- * create delayed data references (for cow writes) when
- * they complete, which will be run and insert the
- * corresponding extent items into the extent tree when
- * we commit the transaction they used when running
- * inode.c:btrfs_finish_ordered_io(). We later use
- * the commit root of the extent tree to find extents
- * to copy from the srcdev into the tgtdev, and we don't
- * want to miss any new extents.
- */
- btrfs_wait_block_group_reservations(cache);
- btrfs_wait_nocow_writers(cache);
- ret = btrfs_wait_ordered_roots(fs_info, U64_MAX,
- cache->key.objectid,
- cache->key.offset);
- if (ret > 0) {
- struct btrfs_trans_handle *trans;
-
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans))
- ret = PTR_ERR(trans);
- else
- ret = btrfs_commit_transaction(trans);
- if (ret) {
- scrub_pause_off(fs_info);
- btrfs_put_block_group(cache);
- break;
- }
- }
- }
+
+ /*
+ * Don't do chunk preallocation for scrub.
+ *
+ * This is especially important for SYSTEM bgs, or we can hit
+ * -EFBIG from btrfs_finish_chunk_alloc() like:
+ * 1. The only SYSTEM bg is marked RO.
+ * Since SYSTEM bg is small, that's pretty common.
+ * 2. New SYSTEM bg will be allocated
+ * Due to regular version will allocate new chunk.
+ * 3. New SYSTEM bg is empty and will get cleaned up
+ * Before cleanup really happens, it's marked RO again.
+ * 4. Empty SYSTEM bg get scrubbed
+ * We go back to 2.
+ *
+ * This can easily boost the amount of SYSTEM chunks if cleaner
+ * thread can't be triggered fast enough, and use up all space
+ * of btrfs_super_block::sys_chunk_array
+ */
+ ret = btrfs_inc_block_group_ro(cache, false);
scrub_pause_off(fs_info);
if (ret == 0) {
@@ -3622,7 +3599,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
break;
}
- down_write(&fs_info->dev_replace.rwsem);
+ down_write(&dev_replace->rwsem);
dev_replace->cursor_right = found_key.offset + length;
dev_replace->cursor_left = found_key.offset;
dev_replace->item_needs_writeback = 1;
@@ -3663,10 +3640,10 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
scrub_pause_off(fs_info);
- down_write(&fs_info->dev_replace.rwsem);
+ down_write(&dev_replace->rwsem);
dev_replace->cursor_left = dev_replace->cursor_right;
dev_replace->item_needs_writeback = 1;
- up_write(&fs_info->dev_replace.rwsem);
+ up_write(&dev_replace->rwsem);
if (ro_set)
btrfs_dec_block_group_ro(cache);
@@ -3680,7 +3657,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
*/
spin_lock(&cache->lock);
if (!cache->removed && !cache->ro && cache->reserved == 0 &&
- btrfs_block_group_used(&cache->item) == 0) {
+ cache->used == 0) {
spin_unlock(&cache->lock);
btrfs_mark_bg_unused(cache);
} else {
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 69b59bf75882..ae2db5eb1549 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -25,6 +25,14 @@
#include "compression.h"
/*
+ * Maximum number of references an extent can have in order for us to attempt to
+ * issue clone operations instead of write operations. This currently exists to
+ * avoid hitting limitations of the backreference walking code (taking a lot of
+ * time and using too much memory for extents with large number of references).
+ */
+#define SEND_MAX_EXTENT_REFS 64
+
+/*
* A fs_path is a helper to dynamically build path names with unknown size.
* It reallocates the internal buffer on demand.
* It allows fast adding of path elements on the right side (normal path) and
@@ -260,6 +268,21 @@ struct name_cache_entry {
char name[];
};
+#define ADVANCE 1
+#define ADVANCE_ONLY_NEXT -1
+
+enum btrfs_compare_tree_result {
+ BTRFS_COMPARE_TREE_NEW,
+ BTRFS_COMPARE_TREE_DELETED,
+ BTRFS_COMPARE_TREE_CHANGED,
+ BTRFS_COMPARE_TREE_SAME,
+};
+typedef int (*btrfs_changed_cb_t)(struct btrfs_path *left_path,
+ struct btrfs_path *right_path,
+ struct btrfs_key *key,
+ enum btrfs_compare_tree_result result,
+ void *ctx);
+
__cold
static void inconsistent_snapshot_error(struct send_ctx *sctx,
enum btrfs_compare_tree_result result,
@@ -1233,12 +1256,20 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
*/
if (found->root == bctx->sctx->send_root) {
/*
- * TODO for the moment we don't accept clones from the inode
- * that is currently send. We may change this when
- * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
- * file.
+ * If the source inode was not yet processed we can't issue a
+ * clone operation, as the source extent does not exist yet at
+ * the destination of the stream.
*/
- if (ino >= bctx->cur_objectid)
+ if (ino > bctx->cur_objectid)
+ return 0;
+ /*
+ * We clone from the inode currently being sent as long as the
+ * source extent is already processed, otherwise we could try
+ * to clone from an extent that does not exist yet at the
+ * destination of the stream.
+ */
+ if (ino == bctx->cur_objectid &&
+ offset >= bctx->sctx->cur_inode_next_write_offset)
return 0;
}
@@ -1287,6 +1318,7 @@ static int find_extent_clone(struct send_ctx *sctx,
struct clone_root *cur_clone_root;
struct btrfs_key found_key;
struct btrfs_path *tmp_path;
+ struct btrfs_extent_item *ei;
int compressed;
u32 i;
@@ -1334,7 +1366,6 @@ static int find_extent_clone(struct send_ctx *sctx,
ret = extent_from_logical(fs_info, disk_byte, tmp_path,
&found_key, &flags);
up_read(&fs_info->commit_root_sem);
- btrfs_release_path(tmp_path);
if (ret < 0)
goto out;
@@ -1343,6 +1374,21 @@ static int find_extent_clone(struct send_ctx *sctx,
goto out;
}
+ ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0],
+ struct btrfs_extent_item);
+ /*
+ * Backreference walking (iterate_extent_inodes() below) is currently
+ * too expensive when an extent has a large number of references, both
+ * in time spent and used memory. So for now just fallback to write
+ * operations instead of clone operations when an extent has more than
+ * a certain amount of references.
+ */
+ if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) {
+ ret = -ENOENT;
+ goto out;
+ }
+ btrfs_release_path(tmp_path);
+
/*
* Setup the clone roots.
*/
@@ -4764,7 +4810,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &key, root);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -5070,7 +5116,7 @@ static int clone_range(struct send_ctx *sctx,
struct btrfs_path *path;
struct btrfs_key key;
int ret;
- u64 clone_src_i_size;
+ u64 clone_src_i_size = 0;
/*
* Prevent cloning from a zero offset with a length matching the sector
@@ -6322,68 +6368,21 @@ static int changed_extent(struct send_ctx *sctx,
{
int ret = 0;
- if (sctx->cur_ino != sctx->cmp_key->objectid) {
-
- if (result == BTRFS_COMPARE_TREE_CHANGED) {
- struct extent_buffer *leaf_l;
- struct extent_buffer *leaf_r;
- struct btrfs_file_extent_item *ei_l;
- struct btrfs_file_extent_item *ei_r;
-
- leaf_l = sctx->left_path->nodes[0];
- leaf_r = sctx->right_path->nodes[0];
- ei_l = btrfs_item_ptr(leaf_l,
- sctx->left_path->slots[0],
- struct btrfs_file_extent_item);
- ei_r = btrfs_item_ptr(leaf_r,
- sctx->right_path->slots[0],
- struct btrfs_file_extent_item);
-
- /*
- * We may have found an extent item that has changed
- * only its disk_bytenr field and the corresponding
- * inode item was not updated. This case happens due to
- * very specific timings during relocation when a leaf
- * that contains file extent items is COWed while
- * relocation is ongoing and its in the stage where it
- * updates data pointers. So when this happens we can
- * safely ignore it since we know it's the same extent,
- * but just at different logical and physical locations
- * (when an extent is fully replaced with a new one, we
- * know the generation number must have changed too,
- * since snapshot creation implies committing the current
- * transaction, and the inode item must have been updated
- * as well).
- * This replacement of the disk_bytenr happens at
- * relocation.c:replace_file_extents() through
- * relocation.c:btrfs_reloc_cow_block().
- */
- if (btrfs_file_extent_generation(leaf_l, ei_l) ==
- btrfs_file_extent_generation(leaf_r, ei_r) &&
- btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
- btrfs_file_extent_compression(leaf_l, ei_l) ==
- btrfs_file_extent_compression(leaf_r, ei_r) &&
- btrfs_file_extent_encryption(leaf_l, ei_l) ==
- btrfs_file_extent_encryption(leaf_r, ei_r) &&
- btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
- btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
- btrfs_file_extent_type(leaf_l, ei_l) ==
- btrfs_file_extent_type(leaf_r, ei_r) &&
- btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
- btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
- btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
- btrfs_file_extent_offset(leaf_l, ei_l) ==
- btrfs_file_extent_offset(leaf_r, ei_r) &&
- btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_num_bytes(leaf_r, ei_r))
- return 0;
- }
-
- inconsistent_snapshot_error(sctx, result, "extent");
- return -EIO;
- }
+ /*
+ * We have found an extent item that changed without the inode item
+ * having changed. This can happen either after relocation (where the
+ * disk_bytenr of an extent item is replaced at
+ * relocation.c:replace_file_extents()) or after deduplication into a
+ * file in both the parent and send snapshots (where an extent item can
+ * get modified or replaced with a new one). Note that deduplication
+ * updates the inode item, but it only changes the iversion (sequence
+ * field in the inode item) of the inode, so if a file is deduplicated
+ * the same amount of times in both the parent and send snapshots, its
+ * iversion becames the same in both snapshots, whence the inode item is
+ * the same on both snapshots.
+ */
+ if (sctx->cur_ino != sctx->cmp_key->objectid)
+ return 0;
if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
if (result != BTRFS_COMPARE_TREE_DELETED)
@@ -6561,6 +6560,366 @@ out:
return ret;
}
+static int tree_move_down(struct btrfs_path *path, int *level)
+{
+ struct extent_buffer *eb;
+
+ BUG_ON(*level == 0);
+ eb = btrfs_read_node_slot(path->nodes[*level], path->slots[*level]);
+ if (IS_ERR(eb))
+ return PTR_ERR(eb);
+
+ path->nodes[*level - 1] = eb;
+ path->slots[*level - 1] = 0;
+ (*level)--;
+ return 0;
+}
+
+static int tree_move_next_or_upnext(struct btrfs_path *path,
+ int *level, int root_level)
+{
+ int ret = 0;
+ int nritems;
+ nritems = btrfs_header_nritems(path->nodes[*level]);
+
+ path->slots[*level]++;
+
+ while (path->slots[*level] >= nritems) {
+ if (*level == root_level)
+ return -1;
+
+ /* move upnext */
+ path->slots[*level] = 0;
+ free_extent_buffer(path->nodes[*level]);
+ path->nodes[*level] = NULL;
+ (*level)++;
+ path->slots[*level]++;
+
+ nritems = btrfs_header_nritems(path->nodes[*level]);
+ ret = 1;
+ }
+ return ret;
+}
+
+/*
+ * Returns 1 if it had to move up and next. 0 is returned if it moved only next
+ * or down.
+ */
+static int tree_advance(struct btrfs_path *path,
+ int *level, int root_level,
+ int allow_down,
+ struct btrfs_key *key)
+{
+ int ret;
+
+ if (*level == 0 || !allow_down) {
+ ret = tree_move_next_or_upnext(path, level, root_level);
+ } else {
+ ret = tree_move_down(path, level);
+ }
+ if (ret >= 0) {
+ if (*level == 0)
+ btrfs_item_key_to_cpu(path->nodes[*level], key,
+ path->slots[*level]);
+ else
+ btrfs_node_key_to_cpu(path->nodes[*level], key,
+ path->slots[*level]);
+ }
+ return ret;
+}
+
+static int tree_compare_item(struct btrfs_path *left_path,
+ struct btrfs_path *right_path,
+ char *tmp_buf)
+{
+ int cmp;
+ int len1, len2;
+ unsigned long off1, off2;
+
+ len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
+ len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
+ if (len1 != len2)
+ return 1;
+
+ off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
+ off2 = btrfs_item_ptr_offset(right_path->nodes[0],
+ right_path->slots[0]);
+
+ read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
+
+ cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
+ if (cmp)
+ return 1;
+ return 0;
+}
+
+/*
+ * This function compares two trees and calls the provided callback for
+ * every changed/new/deleted item it finds.
+ * If shared tree blocks are encountered, whole subtrees are skipped, making
+ * the compare pretty fast on snapshotted subvolumes.
+ *
+ * This currently works on commit roots only. As commit roots are read only,
+ * we don't do any locking. The commit roots are protected with transactions.
+ * Transactions are ended and rejoined when a commit is tried in between.
+ *
+ * This function checks for modifications done to the trees while comparing.
+ * If it detects a change, it aborts immediately.
+ */
+static int btrfs_compare_trees(struct btrfs_root *left_root,
+ struct btrfs_root *right_root,
+ btrfs_changed_cb_t changed_cb, void *ctx)
+{
+ struct btrfs_fs_info *fs_info = left_root->fs_info;
+ int ret;
+ int cmp;
+ struct btrfs_path *left_path = NULL;
+ struct btrfs_path *right_path = NULL;
+ struct btrfs_key left_key;
+ struct btrfs_key right_key;
+ char *tmp_buf = NULL;
+ int left_root_level;
+ int right_root_level;
+ int left_level;
+ int right_level;
+ int left_end_reached;
+ int right_end_reached;
+ int advance_left;
+ int advance_right;
+ u64 left_blockptr;
+ u64 right_blockptr;
+ u64 left_gen;
+ u64 right_gen;
+
+ left_path = btrfs_alloc_path();
+ if (!left_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ right_path = btrfs_alloc_path();
+ if (!right_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
+ if (!tmp_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ left_path->search_commit_root = 1;
+ left_path->skip_locking = 1;
+ right_path->search_commit_root = 1;
+ right_path->skip_locking = 1;
+
+ /*
+ * Strategy: Go to the first items of both trees. Then do
+ *
+ * If both trees are at level 0
+ * Compare keys of current items
+ * If left < right treat left item as new, advance left tree
+ * and repeat
+ * If left > right treat right item as deleted, advance right tree
+ * and repeat
+ * If left == right do deep compare of items, treat as changed if
+ * needed, advance both trees and repeat
+ * If both trees are at the same level but not at level 0
+ * Compare keys of current nodes/leafs
+ * If left < right advance left tree and repeat
+ * If left > right advance right tree and repeat
+ * If left == right compare blockptrs of the next nodes/leafs
+ * If they match advance both trees but stay at the same level
+ * and repeat
+ * If they don't match advance both trees while allowing to go
+ * deeper and repeat
+ * If tree levels are different
+ * Advance the tree that needs it and repeat
+ *
+ * Advancing a tree means:
+ * If we are at level 0, try to go to the next slot. If that's not
+ * possible, go one level up and repeat. Stop when we found a level
+ * where we could go to the next slot. We may at this point be on a
+ * node or a leaf.
+ *
+ * If we are not at level 0 and not on shared tree blocks, go one
+ * level deeper.
+ *
+ * If we are not at level 0 and on shared tree blocks, go one slot to
+ * the right if possible or go up and right.
+ */
+
+ down_read(&fs_info->commit_root_sem);
+ left_level = btrfs_header_level(left_root->commit_root);
+ left_root_level = left_level;
+ left_path->nodes[left_level] =
+ btrfs_clone_extent_buffer(left_root->commit_root);
+ if (!left_path->nodes[left_level]) {
+ up_read(&fs_info->commit_root_sem);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ right_level = btrfs_header_level(right_root->commit_root);
+ right_root_level = right_level;
+ right_path->nodes[right_level] =
+ btrfs_clone_extent_buffer(right_root->commit_root);
+ if (!right_path->nodes[right_level]) {
+ up_read(&fs_info->commit_root_sem);
+ ret = -ENOMEM;
+ goto out;
+ }
+ up_read(&fs_info->commit_root_sem);
+
+ if (left_level == 0)
+ btrfs_item_key_to_cpu(left_path->nodes[left_level],
+ &left_key, left_path->slots[left_level]);
+ else
+ btrfs_node_key_to_cpu(left_path->nodes[left_level],
+ &left_key, left_path->slots[left_level]);
+ if (right_level == 0)
+ btrfs_item_key_to_cpu(right_path->nodes[right_level],
+ &right_key, right_path->slots[right_level]);
+ else
+ btrfs_node_key_to_cpu(right_path->nodes[right_level],
+ &right_key, right_path->slots[right_level]);
+
+ left_end_reached = right_end_reached = 0;
+ advance_left = advance_right = 0;
+
+ while (1) {
+ cond_resched();
+ if (advance_left && !left_end_reached) {
+ ret = tree_advance(left_path, &left_level,
+ left_root_level,
+ advance_left != ADVANCE_ONLY_NEXT,
+ &left_key);
+ if (ret == -1)
+ left_end_reached = ADVANCE;
+ else if (ret < 0)
+ goto out;
+ advance_left = 0;
+ }
+ if (advance_right && !right_end_reached) {
+ ret = tree_advance(right_path, &right_level,
+ right_root_level,
+ advance_right != ADVANCE_ONLY_NEXT,
+ &right_key);
+ if (ret == -1)
+ right_end_reached = ADVANCE;
+ else if (ret < 0)
+ goto out;
+ advance_right = 0;
+ }
+
+ if (left_end_reached && right_end_reached) {
+ ret = 0;
+ goto out;
+ } else if (left_end_reached) {
+ if (right_level == 0) {
+ ret = changed_cb(left_path, right_path,
+ &right_key,
+ BTRFS_COMPARE_TREE_DELETED,
+ ctx);
+ if (ret < 0)
+ goto out;
+ }
+ advance_right = ADVANCE;
+ continue;
+ } else if (right_end_reached) {
+ if (left_level == 0) {
+ ret = changed_cb(left_path, right_path,
+ &left_key,
+ BTRFS_COMPARE_TREE_NEW,
+ ctx);
+ if (ret < 0)
+ goto out;
+ }
+ advance_left = ADVANCE;
+ continue;
+ }
+
+ if (left_level == 0 && right_level == 0) {
+ cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
+ if (cmp < 0) {
+ ret = changed_cb(left_path, right_path,
+ &left_key,
+ BTRFS_COMPARE_TREE_NEW,
+ ctx);
+ if (ret < 0)
+ goto out;
+ advance_left = ADVANCE;
+ } else if (cmp > 0) {
+ ret = changed_cb(left_path, right_path,
+ &right_key,
+ BTRFS_COMPARE_TREE_DELETED,
+ ctx);
+ if (ret < 0)
+ goto out;
+ advance_right = ADVANCE;
+ } else {
+ enum btrfs_compare_tree_result result;
+
+ WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
+ ret = tree_compare_item(left_path, right_path,
+ tmp_buf);
+ if (ret)
+ result = BTRFS_COMPARE_TREE_CHANGED;
+ else
+ result = BTRFS_COMPARE_TREE_SAME;
+ ret = changed_cb(left_path, right_path,
+ &left_key, result, ctx);
+ if (ret < 0)
+ goto out;
+ advance_left = ADVANCE;
+ advance_right = ADVANCE;
+ }
+ } else if (left_level == right_level) {
+ cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
+ if (cmp < 0) {
+ advance_left = ADVANCE;
+ } else if (cmp > 0) {
+ advance_right = ADVANCE;
+ } else {
+ left_blockptr = btrfs_node_blockptr(
+ left_path->nodes[left_level],
+ left_path->slots[left_level]);
+ right_blockptr = btrfs_node_blockptr(
+ right_path->nodes[right_level],
+ right_path->slots[right_level]);
+ left_gen = btrfs_node_ptr_generation(
+ left_path->nodes[left_level],
+ left_path->slots[left_level]);
+ right_gen = btrfs_node_ptr_generation(
+ right_path->nodes[right_level],
+ right_path->slots[right_level]);
+ if (left_blockptr == right_blockptr &&
+ left_gen == right_gen) {
+ /*
+ * As we're on a shared block, don't
+ * allow to go deeper.
+ */
+ advance_left = ADVANCE_ONLY_NEXT;
+ advance_right = ADVANCE_ONLY_NEXT;
+ } else {
+ advance_left = ADVANCE;
+ advance_right = ADVANCE;
+ }
+ }
+ } else if (left_level < right_level) {
+ advance_right = ADVANCE;
+ } else {
+ advance_left = ADVANCE;
+ }
+ }
+
+out:
+ btrfs_free_path(left_path);
+ btrfs_free_path(right_path);
+ kvfree(tmp_buf);
+ return ret;
+}
+
static int send_subvol(struct send_ctx *sctx)
{
int ret;
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index ab7b9ec4c240..f09aa6ee9113 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include "misc.h"
#include "ctree.h"
#include "space-info.h"
#include "sysfs.h"
@@ -7,9 +8,9 @@
#include "free-space-cache.h"
#include "ordered-data.h"
#include "transaction.h"
-#include "math.h"
+#include "block-group.h"
-u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
+u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
bool may_use_included)
{
ASSERT(s_info);
@@ -33,23 +34,6 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
rcu_read_unlock();
}
-static const char *alloc_name(u64 flags)
-{
- switch (flags) {
- case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
- return "mixed";
- case BTRFS_BLOCK_GROUP_METADATA:
- return "metadata";
- case BTRFS_BLOCK_GROUP_DATA:
- return "data";
- case BTRFS_BLOCK_GROUP_SYSTEM:
- return "system";
- default:
- WARN_ON(1);
- return "invalid-combination";
- };
-}
-
static int create_space_info(struct btrfs_fs_info *info, u64 flags)
{
@@ -74,18 +58,13 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
spin_lock_init(&space_info->lock);
space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
- init_waitqueue_head(&space_info->wait);
INIT_LIST_HEAD(&space_info->ro_bgs);
INIT_LIST_HEAD(&space_info->tickets);
INIT_LIST_HEAD(&space_info->priority_tickets);
- ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype,
- info->space_info_kobj, "%s",
- alloc_name(space_info->flags));
- if (ret) {
- kobject_put(&space_info->kobj);
+ ret = btrfs_sysfs_add_space_info_type(info, space_info);
+ if (ret)
return ret;
- }
list_add_rcu(&space_info->list, &info->space_info);
if (flags & BTRFS_BLOCK_GROUP_DATA)
@@ -151,9 +130,7 @@ void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
found->bytes_readonly += bytes_readonly;
if (total_bytes > 0)
found->full = 0;
- btrfs_space_info_add_new_bytes(info, found,
- total_bytes - bytes_used -
- bytes_readonly);
+ btrfs_try_granting_tickets(info, found);
spin_unlock(&found->lock);
*space_info = found;
}
@@ -187,9 +164,7 @@ static int can_overcommit(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush,
bool system_chunk)
{
- struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
u64 profile;
- u64 space_size;
u64 avail;
u64 used;
int factor;
@@ -203,22 +178,7 @@ static int can_overcommit(struct btrfs_fs_info *fs_info,
else
profile = btrfs_metadata_alloc_profile(fs_info);
- used = btrfs_space_info_used(space_info, false);
-
- /*
- * We only want to allow over committing if we have lots of actual space
- * free, but if we don't have enough space to handle the global reserve
- * space then we could end up having a real enospc problem when trying
- * to allocate a chunk or some other such important allocation.
- */
- spin_lock(&global_rsv->lock);
- space_size = calc_global_rsv_need_space(global_rsv);
- spin_unlock(&global_rsv->lock);
- if (used + space_size >= space_info->total_bytes)
- return 0;
-
- used += space_info->bytes_may_use;
-
+ used = btrfs_space_info_used(space_info, true);
avail = atomic64_read(&fs_info->free_chunk_space);
/*
@@ -249,103 +209,41 @@ static int can_overcommit(struct btrfs_fs_info *fs_info,
* This is for space we already have accounted in space_info->bytes_may_use, so
* basically when we're returning space from block_rsv's.
*/
-void btrfs_space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
- u64 num_bytes)
+void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info)
{
- struct reserve_ticket *ticket;
struct list_head *head;
- u64 used;
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
- bool check_overcommit = false;
- spin_lock(&space_info->lock);
- head = &space_info->priority_tickets;
+ lockdep_assert_held(&space_info->lock);
- /*
- * If we are over our limit then we need to check and see if we can
- * overcommit, and if we can't then we just need to free up our space
- * and not satisfy any requests.
- */
- used = btrfs_space_info_used(space_info, true);
- if (used - num_bytes >= space_info->total_bytes)
- check_overcommit = true;
+ head = &space_info->priority_tickets;
again:
- while (!list_empty(head) && num_bytes) {
- ticket = list_first_entry(head, struct reserve_ticket,
- list);
- /*
- * We use 0 bytes because this space is already reserved, so
- * adding the ticket space would be a double count.
- */
- if (check_overcommit &&
- !can_overcommit(fs_info, space_info, 0, flush, false))
- break;
- if (num_bytes >= ticket->bytes) {
- list_del_init(&ticket->list);
- num_bytes -= ticket->bytes;
- ticket->bytes = 0;
- space_info->tickets_id++;
- wake_up(&ticket->wait);
- } else {
- ticket->bytes -= num_bytes;
- num_bytes = 0;
- }
- }
-
- if (num_bytes && head == &space_info->priority_tickets) {
- head = &space_info->tickets;
- flush = BTRFS_RESERVE_FLUSH_ALL;
- goto again;
- }
- btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes);
- trace_btrfs_space_reservation(fs_info, "space_info",
- space_info->flags, num_bytes, 0);
- spin_unlock(&space_info->lock);
-}
+ while (!list_empty(head)) {
+ struct reserve_ticket *ticket;
+ u64 used = btrfs_space_info_used(space_info, true);
-/*
- * This is for newly allocated space that isn't accounted in
- * space_info->bytes_may_use yet. So if we allocate a chunk or unpin an extent
- * we use this helper.
- */
-void btrfs_space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
- u64 num_bytes)
-{
- struct reserve_ticket *ticket;
- struct list_head *head = &space_info->priority_tickets;
+ ticket = list_first_entry(head, struct reserve_ticket, list);
-again:
- while (!list_empty(head) && num_bytes) {
- ticket = list_first_entry(head, struct reserve_ticket,
- list);
- if (num_bytes >= ticket->bytes) {
- trace_btrfs_space_reservation(fs_info, "space_info",
- space_info->flags,
- ticket->bytes, 1);
- list_del_init(&ticket->list);
- num_bytes -= ticket->bytes;
+ /* Check and see if our ticket can be satisified now. */
+ if ((used + ticket->bytes <= space_info->total_bytes) ||
+ can_overcommit(fs_info, space_info, ticket->bytes, flush,
+ false)) {
btrfs_space_info_update_bytes_may_use(fs_info,
space_info,
ticket->bytes);
+ list_del_init(&ticket->list);
ticket->bytes = 0;
space_info->tickets_id++;
wake_up(&ticket->wait);
} else {
- trace_btrfs_space_reservation(fs_info, "space_info",
- space_info->flags,
- num_bytes, 1);
- btrfs_space_info_update_bytes_may_use(fs_info,
- space_info,
- num_bytes);
- ticket->bytes -= num_bytes;
- num_bytes = 0;
+ break;
}
}
- if (num_bytes && head == &space_info->priority_tickets) {
+ if (head == &space_info->priority_tickets) {
head = &space_info->tickets;
+ flush = BTRFS_RESERVE_FLUSH_ALL;
goto again;
}
}
@@ -359,14 +257,11 @@ do { \
spin_unlock(&__rsv->lock); \
} while (0)
-void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *info, u64 bytes,
- int dump_block_groups)
+static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *info)
{
- struct btrfs_block_group_cache *cache;
- int index = 0;
+ lockdep_assert_held(&info->lock);
- spin_lock(&info->lock);
btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
info->flags,
info->total_bytes - btrfs_space_info_used(info, true),
@@ -376,7 +271,6 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
info->total_bytes, info->bytes_used, info->bytes_pinned,
info->bytes_reserved, info->bytes_may_use,
info->bytes_readonly);
- spin_unlock(&info->lock);
DUMP_BLOCK_RSV(fs_info, global_block_rsv);
DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
@@ -384,6 +278,19 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
+}
+
+void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *info, u64 bytes,
+ int dump_block_groups)
+{
+ struct btrfs_block_group *cache;
+ int index = 0;
+
+ spin_lock(&info->lock);
+ __btrfs_dump_space_info(fs_info, info);
+ spin_unlock(&info->lock);
+
if (!dump_block_groups)
return;
@@ -393,8 +300,7 @@ again:
spin_lock(&cache->lock);
btrfs_info(fs_info,
"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
- cache->key.objectid, cache->key.offset,
- btrfs_block_group_used(&cache->item), cache->pinned,
+ cache->start, cache->length, cache->used, cache->pinned,
cache->reserved, cache->ro ? "[readonly]" : "");
btrfs_dump_free_space(cache, bytes);
spin_unlock(&cache->lock);
@@ -432,7 +338,7 @@ static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
u64 bytes;
u64 nr;
- bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
+ bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
nr = div64_u64(to_reclaim, bytes);
if (!nr)
nr = 1;
@@ -557,12 +463,19 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans;
u64 bytes_needed;
u64 reclaim_bytes = 0;
+ u64 cur_free_bytes = 0;
trans = (struct btrfs_trans_handle *)current->journal_info;
if (trans)
return -EAGAIN;
spin_lock(&space_info->lock);
+ cur_free_bytes = btrfs_space_info_used(space_info, true);
+ if (cur_free_bytes < space_info->total_bytes)
+ cur_free_bytes = space_info->total_bytes - cur_free_bytes;
+ else
+ cur_free_bytes = 0;
+
if (!list_empty(&space_info->priority_tickets))
ticket = list_first_entry(&space_info->priority_tickets,
struct reserve_ticket, list);
@@ -570,6 +483,11 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
ticket = list_first_entry(&space_info->tickets,
struct reserve_ticket, list);
bytes_needed = (ticket) ? ticket->bytes : 0;
+
+ if (bytes_needed > cur_free_bytes)
+ bytes_needed -= cur_free_bytes;
+ else
+ bytes_needed = 0;
spin_unlock(&space_info->lock);
if (!bytes_needed)
@@ -684,7 +602,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
if (ret > 0 || ret == -ENOSPC)
ret = 0;
break;
- case COMMIT_TRANS:
+ case RUN_DELAYED_IPUTS:
/*
* If we have pending delayed iputs then we could free up a
* bunch of pinned space, so make sure we run the iputs before
@@ -692,7 +610,8 @@ static void flush_space(struct btrfs_fs_info *fs_info,
*/
btrfs_run_delayed_iputs(fs_info);
btrfs_wait_on_delayed_iputs(fs_info);
-
+ break;
+ case COMMIT_TRANS:
ret = may_commit_transaction(fs_info, space_info);
break;
default:
@@ -762,19 +681,70 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
}
-static bool wake_all_tickets(struct list_head *head)
+/*
+ * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
+ * @fs_info - fs_info for this fs
+ * @space_info - the space info we were flushing
+ *
+ * We call this when we've exhausted our flushing ability and haven't made
+ * progress in satisfying tickets. The reservation code handles tickets in
+ * order, so if there is a large ticket first and then smaller ones we could
+ * very well satisfy the smaller tickets. This will attempt to wake up any
+ * tickets in the list to catch this case.
+ *
+ * This function returns true if it was able to make progress by clearing out
+ * other tickets, or if it stumbles across a ticket that was smaller than the
+ * first ticket.
+ */
+static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info)
{
struct reserve_ticket *ticket;
+ u64 tickets_id = space_info->tickets_id;
+ u64 first_ticket_bytes = 0;
+
+ if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
+ btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
+ __btrfs_dump_space_info(fs_info, space_info);
+ }
+
+ while (!list_empty(&space_info->tickets) &&
+ tickets_id == space_info->tickets_id) {
+ ticket = list_first_entry(&space_info->tickets,
+ struct reserve_ticket, list);
+
+ /*
+ * may_commit_transaction will avoid committing the transaction
+ * if it doesn't feel like the space reclaimed by the commit
+ * would result in the ticket succeeding. However if we have a
+ * smaller ticket in the queue it may be small enough to be
+ * satisified by committing the transaction, so if any
+ * subsequent ticket is smaller than the first ticket go ahead
+ * and send us back for another loop through the enospc flushing
+ * code.
+ */
+ if (first_ticket_bytes == 0)
+ first_ticket_bytes = ticket->bytes;
+ else if (first_ticket_bytes > ticket->bytes)
+ return true;
+
+ if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
+ btrfs_info(fs_info, "failing ticket with %llu bytes",
+ ticket->bytes);
- while (!list_empty(head)) {
- ticket = list_first_entry(head, struct reserve_ticket, list);
list_del_init(&ticket->list);
ticket->error = -ENOSPC;
wake_up(&ticket->wait);
- if (ticket->bytes != ticket->orig_bytes)
- return true;
+
+ /*
+ * We're just throwing tickets away, so more flushing may not
+ * trip over btrfs_try_granting_tickets, so we need to call it
+ * here to see if we can make progress with the next ticket in
+ * the list.
+ */
+ btrfs_try_granting_tickets(fs_info, space_info);
}
- return false;
+ return (tickets_id != space_info->tickets_id);
}
/*
@@ -842,7 +812,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
if (flush_state > COMMIT_TRANS) {
commit_cycles++;
if (commit_cycles > 2) {
- if (wake_all_tickets(&space_info->tickets)) {
+ if (maybe_fail_all_tickets(fs_info, space_info)) {
flush_state = FLUSH_DELAYED_ITEMS_NR;
commit_cycles--;
} else {
@@ -867,9 +837,22 @@ static const enum btrfs_flush_state priority_flush_states[] = {
ALLOC_CHUNK,
};
+static const enum btrfs_flush_state evict_flush_states[] = {
+ FLUSH_DELAYED_ITEMS_NR,
+ FLUSH_DELAYED_ITEMS,
+ FLUSH_DELAYED_REFS_NR,
+ FLUSH_DELAYED_REFS,
+ FLUSH_DELALLOC,
+ FLUSH_DELALLOC_WAIT,
+ ALLOC_CHUNK,
+ COMMIT_TRANS,
+};
+
static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
- struct reserve_ticket *ticket)
+ struct btrfs_space_info *space_info,
+ struct reserve_ticket *ticket,
+ const enum btrfs_flush_state *states,
+ int states_nr)
{
u64 to_reclaim;
int flush_state;
@@ -885,8 +868,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
flush_state = 0;
do {
- flush_space(fs_info, space_info, to_reclaim,
- priority_flush_states[flush_state]);
+ flush_space(fs_info, space_info, to_reclaim, states[flush_state]);
flush_state++;
spin_lock(&space_info->lock);
if (ticket->bytes == 0) {
@@ -894,23 +876,31 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
return;
}
spin_unlock(&space_info->lock);
- } while (flush_state < ARRAY_SIZE(priority_flush_states));
+ } while (flush_state < states_nr);
}
-static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
- struct reserve_ticket *ticket)
+static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ struct reserve_ticket *ticket)
{
DEFINE_WAIT(wait);
- u64 reclaim_bytes = 0;
int ret = 0;
spin_lock(&space_info->lock);
while (ticket->bytes > 0 && ticket->error == 0) {
ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
if (ret) {
- ret = -EINTR;
+ /*
+ * Delete us from the list. After we unlock the space
+ * info, we don't want the async reclaim job to reserve
+ * space for this ticket. If that would happen, then the
+ * ticket's task would not known that space was reserved
+ * despite getting an error, resulting in a space leak
+ * (bytes_may_use counter of our space_info).
+ */
+ list_del_init(&ticket->list);
+ ticket->error = -EINTR;
break;
}
spin_unlock(&space_info->lock);
@@ -920,17 +910,66 @@ static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
finish_wait(&ticket->wait, &wait);
spin_lock(&space_info->lock);
}
- if (!ret)
- ret = ticket->error;
- if (!list_empty(&ticket->list))
- list_del_init(&ticket->list);
- if (ticket->bytes && ticket->bytes < ticket->orig_bytes)
- reclaim_bytes = ticket->orig_bytes - ticket->bytes;
spin_unlock(&space_info->lock);
+}
+
+/**
+ * handle_reserve_ticket - do the appropriate flushing and waiting for a ticket
+ * @fs_info - the fs
+ * @space_info - the space_info for the reservation
+ * @ticket - the ticket for the reservation
+ * @flush - how much we can flush
+ *
+ * This does the work of figuring out how to flush for the ticket, waiting for
+ * the reservation, and returning the appropriate error if there is one.
+ */
+static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ struct reserve_ticket *ticket,
+ enum btrfs_reserve_flush_enum flush)
+{
+ int ret;
+
+ switch (flush) {
+ case BTRFS_RESERVE_FLUSH_ALL:
+ wait_reserve_ticket(fs_info, space_info, ticket);
+ break;
+ case BTRFS_RESERVE_FLUSH_LIMIT:
+ priority_reclaim_metadata_space(fs_info, space_info, ticket,
+ priority_flush_states,
+ ARRAY_SIZE(priority_flush_states));
+ break;
+ case BTRFS_RESERVE_FLUSH_EVICT:
+ priority_reclaim_metadata_space(fs_info, space_info, ticket,
+ evict_flush_states,
+ ARRAY_SIZE(evict_flush_states));
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
- if (reclaim_bytes)
- btrfs_space_info_add_old_bytes(fs_info, space_info,
- reclaim_bytes);
+ spin_lock(&space_info->lock);
+ ret = ticket->error;
+ if (ticket->bytes || ticket->error) {
+ /*
+ * Need to delete here for priority tickets. For regular tickets
+ * either the async reclaim job deletes the ticket from the list
+ * or we delete it ourselves at wait_reserve_ticket().
+ */
+ list_del_init(&ticket->list);
+ if (!ret)
+ ret = -ENOSPC;
+ }
+ spin_unlock(&space_info->lock);
+ ASSERT(list_empty(&ticket->list));
+ /*
+ * Check that we can't have an error set if the reservation succeeded,
+ * as that would confuse tasks and lead them to error out without
+ * releasing reserved space (if an error happens the expectation is that
+ * space wasn't reserved at all).
+ */
+ ASSERT(!(ticket->bytes == 0 && ticket->error));
return ret;
}
@@ -956,8 +995,8 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
{
struct reserve_ticket ticket;
u64 used;
- u64 reclaim_bytes = 0;
int ret = 0;
+ bool pending_tickets;
ASSERT(orig_bytes);
ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
@@ -965,18 +1004,19 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
spin_lock(&space_info->lock);
ret = -ENOSPC;
used = btrfs_space_info_used(space_info, true);
+ pending_tickets = !list_empty(&space_info->tickets) ||
+ !list_empty(&space_info->priority_tickets);
/*
* Carry on if we have enough space (short-circuit) OR call
* can_overcommit() to ensure we can overcommit to continue.
*/
- if ((used + orig_bytes <= space_info->total_bytes) ||
- can_overcommit(fs_info, space_info, orig_bytes, flush,
- system_chunk)) {
+ if (!pending_tickets &&
+ ((used + orig_bytes <= space_info->total_bytes) ||
+ can_overcommit(fs_info, space_info, orig_bytes, flush,
+ system_chunk))) {
btrfs_space_info_update_bytes_may_use(fs_info, space_info,
orig_bytes);
- trace_btrfs_space_reservation(fs_info, "space_info",
- space_info->flags, orig_bytes, 1);
ret = 0;
}
@@ -988,7 +1028,6 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
* the list and we will do our own flushing further down.
*/
if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
- ticket.orig_bytes = orig_bytes;
ticket.bytes = orig_bytes;
ticket.error = 0;
init_waitqueue_head(&ticket.wait);
@@ -1028,25 +1067,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
return ret;
- if (flush == BTRFS_RESERVE_FLUSH_ALL)
- return wait_reserve_ticket(fs_info, space_info, &ticket);
-
- ret = 0;
- priority_reclaim_metadata_space(fs_info, space_info, &ticket);
- spin_lock(&space_info->lock);
- if (ticket.bytes) {
- if (ticket.bytes < orig_bytes)
- reclaim_bytes = orig_bytes - ticket.bytes;
- list_del_init(&ticket.list);
- ret = -ENOSPC;
- }
- spin_unlock(&space_info->lock);
-
- if (reclaim_bytes)
- btrfs_space_info_add_old_bytes(fs_info, space_info,
- reclaim_bytes);
- ASSERT(list_empty(&ticket.list));
- return ret;
+ return handle_reserve_ticket(fs_info, space_info, &ticket, flush);
}
/**
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index c2b54b8e1a14..1a349e3f9cc1 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -63,14 +63,12 @@ struct btrfs_space_info {
struct rw_semaphore groups_sem;
/* for block groups in our same type */
struct list_head block_groups[BTRFS_NR_RAID_TYPES];
- wait_queue_head_t wait;
struct kobject kobj;
struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES];
};
struct reserve_ticket {
- u64 orig_bytes;
u64 bytes;
int error;
struct list_head list;
@@ -87,14 +85,18 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
*
* Declare a helper function to detect underflow of various space info members
*/
-#define DECLARE_SPACE_INFO_UPDATE(name) \
+#define DECLARE_SPACE_INFO_UPDATE(name, trace_name) \
static inline void \
btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \
struct btrfs_space_info *sinfo, \
s64 bytes) \
{ \
+ const u64 abs_bytes = (bytes < 0) ? -bytes : bytes; \
lockdep_assert_held(&sinfo->lock); \
trace_update_##name(fs_info, sinfo, sinfo->name, bytes); \
+ trace_btrfs_space_reservation(fs_info, trace_name, \
+ sinfo->flags, abs_bytes, \
+ bytes > 0); \
if (bytes < 0 && sinfo->name < -bytes) { \
WARN_ON(1); \
sinfo->name = 0; \
@@ -103,15 +105,9 @@ btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \
sinfo->name += bytes; \
}
-DECLARE_SPACE_INFO_UPDATE(bytes_may_use);
-DECLARE_SPACE_INFO_UPDATE(bytes_pinned);
+DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info");
+DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned");
-void btrfs_space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
- u64 num_bytes);
-void btrfs_space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
- u64 num_bytes);
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
u64 total_bytes, u64 bytes_used,
@@ -119,7 +115,7 @@ void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
struct btrfs_space_info **space_info);
struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
u64 flags);
-u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
+u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
bool may_use_included);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
@@ -129,5 +125,18 @@ int btrfs_reserve_metadata_bytes(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush);
+void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info);
+
+static inline void btrfs_space_info_free_bytes_may_use(
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ u64 num_bytes)
+{
+ spin_lock(&space_info->lock);
+ btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes);
+ btrfs_try_granting_tickets(fs_info, space_info);
+ spin_unlock(&space_info->lock);
+}
#endif /* BTRFS_SPACE_INFO_H */
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index 4c13b737f568..73f7987143df 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -33,6 +33,8 @@ static inline void put_unaligned_le8(u8 val, void *p)
*
* The extent buffer api is used to do the page spanning work required to
* have a metadata blocksize different from the page size.
+ *
+ * There are 2 variants defined, one with a token pointer and one without.
*/
#define DEFINE_BTRFS_SETGET_BITS(bits) \
@@ -50,8 +52,10 @@ u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \
int size = sizeof(u##bits); \
u##bits res; \
\
- if (token && token->kaddr && token->offset <= offset && \
- token->eb == eb && \
+ ASSERT(token); \
+ ASSERT(token->eb == eb); \
+ \
+ if (token->kaddr && token->offset <= offset && \
(token->offset + PAGE_SIZE >= offset + size)) { \
kaddr = token->kaddr; \
p = kaddr + part_offset - token->offset; \
@@ -68,11 +72,33 @@ u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \
} \
p = kaddr + part_offset - map_start; \
res = get_unaligned_le##bits(p + off); \
- if (token) { \
- token->kaddr = kaddr; \
- token->offset = map_start; \
- token->eb = eb; \
+ token->kaddr = kaddr; \
+ token->offset = map_start; \
+ return res; \
+} \
+u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
+ const void *ptr, unsigned long off) \
+{ \
+ unsigned long part_offset = (unsigned long)ptr; \
+ unsigned long offset = part_offset + off; \
+ void *p; \
+ int err; \
+ char *kaddr; \
+ unsigned long map_start; \
+ unsigned long map_len; \
+ int size = sizeof(u##bits); \
+ u##bits res; \
+ \
+ err = map_private_extent_buffer(eb, offset, size, \
+ &kaddr, &map_start, &map_len); \
+ if (err) { \
+ __le##bits leres; \
+ \
+ read_extent_buffer(eb, &leres, offset, size); \
+ return le##bits##_to_cpu(leres); \
} \
+ p = kaddr + part_offset - map_start; \
+ res = get_unaligned_le##bits(p + off); \
return res; \
} \
void btrfs_set_token_##bits(struct extent_buffer *eb, \
@@ -89,8 +115,10 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \
unsigned long map_len; \
int size = sizeof(u##bits); \
\
- if (token && token->kaddr && token->offset <= offset && \
- token->eb == eb && \
+ ASSERT(token); \
+ ASSERT(token->eb == eb); \
+ \
+ if (token->kaddr && token->offset <= offset && \
(token->offset + PAGE_SIZE >= offset + size)) { \
kaddr = token->kaddr; \
p = kaddr + part_offset - token->offset; \
@@ -108,11 +136,32 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \
} \
p = kaddr + part_offset - map_start; \
put_unaligned_le##bits(val, p + off); \
- if (token) { \
- token->kaddr = kaddr; \
- token->offset = map_start; \
- token->eb = eb; \
+ token->kaddr = kaddr; \
+ token->offset = map_start; \
+} \
+void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \
+ unsigned long off, u##bits val) \
+{ \
+ unsigned long part_offset = (unsigned long)ptr; \
+ unsigned long offset = part_offset + off; \
+ void *p; \
+ int err; \
+ char *kaddr; \
+ unsigned long map_start; \
+ unsigned long map_len; \
+ int size = sizeof(u##bits); \
+ \
+ err = map_private_extent_buffer(eb, offset, size, \
+ &kaddr, &map_start, &map_len); \
+ if (err) { \
+ __le##bits val2; \
+ \
+ val2 = cpu_to_le##bits(val); \
+ write_extent_buffer(eb, &val2, offset, size); \
+ return; \
} \
+ p = kaddr + part_offset - map_start; \
+ put_unaligned_le##bits(val, p + off); \
}
DEFINE_BTRFS_SETGET_BITS(8)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 78de9d5d80c6..f452a94abdc3 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -43,7 +43,9 @@
#include "free-space-cache.h"
#include "backref.h"
#include "space-info.h"
+#include "sysfs.h"
#include "tests/btrfs-tests.h"
+#include "block-group.h"
#include "qgroup.h"
#define CREATE_TRACE_POINTS
@@ -64,7 +66,7 @@ static struct file_system_type btrfs_root_fs_type;
static int btrfs_remount(struct super_block *sb, int *flags, char *data);
-const char *btrfs_decode_error(int errno)
+const char * __attribute_const__ btrfs_decode_error(int errno)
{
char *errstr = "unknown";
@@ -185,7 +187,7 @@ static struct ratelimit_state printk_limits[] = {
RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100),
};
-void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
+void __cold btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
{
char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0";
struct va_format vaf;
@@ -1217,7 +1219,7 @@ static int btrfs_fill_super(struct super_block *sb,
key.objectid = BTRFS_FIRST_FREE_OBJECTID;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(sb, &key, fs_info->fs_root, NULL);
+ inode = btrfs_iget(sb, &key, fs_info->fs_root);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto fail_close;
@@ -1667,7 +1669,6 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
- btrfs_workqueue_set_max(fs_info->submit_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
@@ -1899,11 +1900,10 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
struct btrfs_device_info *devices_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
- u64 skip_space;
u64 type;
u64 avail_space;
u64 min_stripe_size;
- int min_stripes, num_stripes = 1;
+ int num_stripes = 1;
int i = 0, nr_devices;
const struct btrfs_raid_attr *rattr;
@@ -1930,12 +1930,15 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
/* calc min stripe number for data space allocation */
type = btrfs_data_alloc_profile(fs_info);
rattr = &btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)];
- min_stripes = rattr->devs_min;
if (type & BTRFS_BLOCK_GROUP_RAID0)
num_stripes = nr_devices;
else if (type & BTRFS_BLOCK_GROUP_RAID1)
num_stripes = 2;
+ else if (type & BTRFS_BLOCK_GROUP_RAID1C3)
+ num_stripes = 3;
+ else if (type & BTRFS_BLOCK_GROUP_RAID1C4)
+ num_stripes = 4;
else if (type & BTRFS_BLOCK_GROUP_RAID10)
num_stripes = 4;
@@ -1956,28 +1959,21 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
avail_space = device->total_bytes - device->bytes_used;
/* align with stripe_len */
- avail_space = div_u64(avail_space, BTRFS_STRIPE_LEN);
- avail_space *= BTRFS_STRIPE_LEN;
+ avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN);
/*
* In order to avoid overwriting the superblock on the drive,
* btrfs starts at an offset of at least 1MB when doing chunk
* allocation.
+ *
+ * This ensures we have at least min_stripe_size free space
+ * after excluding 1MB.
*/
- skip_space = SZ_1M;
-
- /*
- * we can use the free space in [0, skip_space - 1], subtract
- * it from the total.
- */
- if (avail_space && avail_space >= skip_space)
- avail_space -= skip_space;
- else
- avail_space = 0;
-
- if (avail_space < min_stripe_size)
+ if (avail_space <= SZ_1M + min_stripe_size)
continue;
+ avail_space -= SZ_1M;
+
devices_info[i].dev = device;
devices_info[i].max_avail = avail_space;
@@ -1991,9 +1987,8 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
i = nr_devices - 1;
avail_space = 0;
- while (nr_devices >= min_stripes) {
- if (num_stripes > nr_devices)
- num_stripes = nr_devices;
+ while (nr_devices >= rattr->devs_min) {
+ num_stripes = min(num_stripes, nr_devices);
if (devices_info[i].max_avail >= min_stripe_size) {
int j;
@@ -2030,7 +2025,6 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
struct btrfs_super_block *disk_super = fs_info->super_copy;
- struct list_head *head = &fs_info->space_info;
struct btrfs_space_info *found;
u64 total_used = 0;
u64 total_free_data = 0;
@@ -2044,7 +2038,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
int mixed = 0;
rcu_read_lock();
- list_for_each_entry_rcu(found, head, list) {
+ list_for_each_entry_rcu(found, &fs_info->space_info, list) {
if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
int i;
@@ -2305,7 +2299,7 @@ static const struct super_operations btrfs_super_ops = {
static const struct file_operations btrfs_ctl_fops = {
.open = btrfs_control_open,
.unlocked_ioctl = btrfs_control_ioctl,
- .compat_ioctl = btrfs_control_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
@@ -2368,10 +2362,14 @@ static int __init init_btrfs_fs(void)
if (err)
goto free_cachep;
- err = extent_map_init();
+ err = extent_state_cache_init();
if (err)
goto free_extent_io;
+ err = extent_map_init();
+ if (err)
+ goto free_extent_state_cache;
+
err = ordered_data_init();
if (err)
goto free_extent_map;
@@ -2430,6 +2428,8 @@ free_ordered_data:
ordered_data_exit();
free_extent_map:
extent_map_exit();
+free_extent_state_cache:
+ extent_state_cache_exit();
free_extent_io:
extent_io_exit();
free_cachep:
@@ -2450,6 +2450,7 @@ static void __exit exit_btrfs_fs(void)
btrfs_prelim_ref_exit();
ordered_data_exit();
extent_map_exit();
+ extent_state_cache_exit();
extent_io_exit();
btrfs_interface_exit();
btrfs_end_io_wq_exit();
@@ -2464,3 +2465,6 @@ module_exit(exit_btrfs_fs)
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crc32c");
+MODULE_SOFTDEP("pre: xxhash64");
+MODULE_SOFTDEP("pre: sha256");
+MODULE_SOFTDEP("pre: blake2b-256");
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 9539f8143b7a..5ebbe8a5ee76 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -4,12 +4,12 @@
*/
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
-#include <linux/kobject.h>
#include <linux/bug.h>
-#include <linux/debugfs.h>
+#include <crypto/hash.h>
#include "ctree.h"
#include "disk-io.h"
@@ -17,10 +17,75 @@
#include "sysfs.h"
#include "volumes.h"
#include "space-info.h"
+#include "block-group.h"
+
+struct btrfs_feature_attr {
+ struct kobj_attribute kobj_attr;
+ enum btrfs_feature_set feature_set;
+ u64 feature_bit;
+};
+
+/* For raid type sysfs entries */
+struct raid_kobject {
+ u64 flags;
+ struct kobject kobj;
+};
+
+#define __INIT_KOBJ_ATTR(_name, _mode, _show, _store) \
+{ \
+ .attr = { .name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+}
+
+#define BTRFS_ATTR_RW(_prefix, _name, _show, _store) \
+ static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \
+ __INIT_KOBJ_ATTR(_name, 0644, _show, _store)
+
+#define BTRFS_ATTR(_prefix, _name, _show) \
+ static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \
+ __INIT_KOBJ_ATTR(_name, 0444, _show, NULL)
+
+#define BTRFS_ATTR_PTR(_prefix, _name) \
+ (&btrfs_attr_##_prefix##_##_name.attr)
+
+#define BTRFS_FEAT_ATTR(_name, _feature_set, _feature_prefix, _feature_bit) \
+static struct btrfs_feature_attr btrfs_attr_features_##_name = { \
+ .kobj_attr = __INIT_KOBJ_ATTR(_name, S_IRUGO, \
+ btrfs_feature_attr_show, \
+ btrfs_feature_attr_store), \
+ .feature_set = _feature_set, \
+ .feature_bit = _feature_prefix ##_## _feature_bit, \
+}
+#define BTRFS_FEAT_ATTR_PTR(_name) \
+ (&btrfs_attr_features_##_name.kobj_attr.attr)
+
+#define BTRFS_FEAT_ATTR_COMPAT(name, feature) \
+ BTRFS_FEAT_ATTR(name, FEAT_COMPAT, BTRFS_FEATURE_COMPAT, feature)
+#define BTRFS_FEAT_ATTR_COMPAT_RO(name, feature) \
+ BTRFS_FEAT_ATTR(name, FEAT_COMPAT_RO, BTRFS_FEATURE_COMPAT_RO, feature)
+#define BTRFS_FEAT_ATTR_INCOMPAT(name, feature) \
+ BTRFS_FEAT_ATTR(name, FEAT_INCOMPAT, BTRFS_FEATURE_INCOMPAT, feature)
static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj);
static inline struct btrfs_fs_devices *to_fs_devs(struct kobject *kobj);
+static struct btrfs_feature_attr *to_btrfs_feature_attr(struct kobj_attribute *a)
+{
+ return container_of(a, struct btrfs_feature_attr, kobj_attr);
+}
+
+static struct kobj_attribute *attr_to_btrfs_attr(struct attribute *attr)
+{
+ return container_of(attr, struct kobj_attribute, attr);
+}
+
+static struct btrfs_feature_attr *attr_to_btrfs_feature_attr(
+ struct attribute *attr)
+{
+ return to_btrfs_feature_attr(attr_to_btrfs_attr(attr));
+}
+
static u64 get_features(struct btrfs_fs_info *fs_info,
enum btrfs_feature_set set)
{
@@ -194,6 +259,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(skinny_metadata, SKINNY_METADATA);
BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES);
BTRFS_FEAT_ATTR_INCOMPAT(metadata_uuid, METADATA_UUID);
BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE);
+BTRFS_FEAT_ATTR_INCOMPAT(raid1c34, RAID1C34);
static struct attribute *btrfs_supported_feature_attrs[] = {
BTRFS_FEAT_ATTR_PTR(mixed_backref),
@@ -208,6 +274,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
BTRFS_FEAT_ATTR_PTR(no_holes),
BTRFS_FEAT_ATTR_PTR(metadata_uuid),
BTRFS_FEAT_ATTR_PTR(free_space_tree),
+ BTRFS_FEAT_ATTR_PTR(raid1c34),
NULL
};
@@ -231,8 +298,30 @@ static ssize_t rmdir_subvol_show(struct kobject *kobj,
}
BTRFS_ATTR(static_feature, rmdir_subvol, rmdir_subvol_show);
+static ssize_t supported_checksums_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ ssize_t ret = 0;
+ int i;
+
+ for (i = 0; i < btrfs_get_num_csums(); i++) {
+ /*
+ * This "trick" only works as long as 'enum btrfs_csum_type' has
+ * no holes in it
+ */
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
+ (i == 0 ? "" : " "), btrfs_super_csum_name(i));
+
+ }
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ return ret;
+}
+BTRFS_ATTR(static_feature, supported_checksums, supported_checksums_show);
+
static struct attribute *btrfs_supported_static_feature_attrs[] = {
BTRFS_ATTR_PTR(static_feature, rmdir_subvol),
+ BTRFS_ATTR_PTR(static_feature, supported_checksums),
NULL
};
@@ -247,6 +336,25 @@ static const struct attribute_group btrfs_static_feature_attr_group = {
.attrs = btrfs_supported_static_feature_attrs,
};
+#ifdef CONFIG_BTRFS_DEBUG
+
+/*
+ * Runtime debugging exported via sysfs
+ *
+ * /sys/fs/btrfs/debug - applies to module or all filesystems
+ * /sys/fs/btrfs/UUID - applies only to the given filesystem
+ */
+static struct attribute *btrfs_debug_feature_attrs[] = {
+ NULL
+};
+
+static const struct attribute_group btrfs_debug_feature_attr_group = {
+ .name = "debug",
+ .attrs = btrfs_debug_feature_attrs,
+};
+
+#endif
+
static ssize_t btrfs_show_u64(u64 *value_ptr, spinlock_t *lock, char *buf)
{
u64 val;
@@ -289,16 +397,16 @@ static ssize_t raid_bytes_show(struct kobject *kobj,
{
struct btrfs_space_info *sinfo = to_space_info(kobj->parent);
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
int index = btrfs_bg_flags_to_raid_index(to_raid_kobj(kobj)->flags);
u64 val = 0;
down_read(&sinfo->groups_sem);
list_for_each_entry(block_group, &sinfo->block_groups[index], list) {
if (&attr->attr == BTRFS_ATTR_PTR(raid, total_bytes))
- val += block_group->key.offset;
+ val += block_group->length;
else
- val += btrfs_block_group_used(&block_group->item);
+ val += block_group->used;
}
up_read(&sinfo->groups_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", val);
@@ -316,7 +424,7 @@ static void release_raid_kobj(struct kobject *kobj)
kfree(to_raid_kobj(kobj));
}
-struct kobj_type btrfs_raid_ktype = {
+static struct kobj_type btrfs_raid_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = release_raid_kobj,
.default_groups = raid_groups,
@@ -375,7 +483,7 @@ static void space_info_release(struct kobject *kobj)
kfree(sinfo);
}
-struct kobj_type space_info_ktype = {
+static struct kobj_type space_info_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = space_info_release,
.default_groups = space_info_groups,
@@ -521,6 +629,19 @@ static ssize_t btrfs_metadata_uuid_show(struct kobject *kobj,
BTRFS_ATTR(, metadata_uuid, btrfs_metadata_uuid_show);
+static ssize_t btrfs_checksum_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+ u16 csum_type = btrfs_super_csum_type(fs_info->super_copy);
+
+ return snprintf(buf, PAGE_SIZE, "%s (%s)\n",
+ btrfs_super_csum_name(csum_type),
+ crypto_shash_driver_name(fs_info->csum_shash));
+}
+
+BTRFS_ATTR(, checksum, btrfs_checksum_show);
+
static const struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(, label),
BTRFS_ATTR_PTR(, nodesize),
@@ -528,6 +649,7 @@ static const struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(, clone_alignment),
BTRFS_ATTR_PTR(, quota_override),
BTRFS_ATTR_PTR(, metadata_uuid),
+ BTRFS_ATTR_PTR(, checksum),
NULL,
};
@@ -655,12 +777,17 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info)
btrfs_sysfs_rm_device_link(fs_info->fs_devices, NULL);
}
-const char * const btrfs_feature_set_names[FEAT_MAX] = {
+static const char * const btrfs_feature_set_names[FEAT_MAX] = {
[FEAT_COMPAT] = "compat",
[FEAT_COMPAT_RO] = "compat_ro",
[FEAT_INCOMPAT] = "incompat",
};
+const char * const btrfs_feature_set_name(enum btrfs_feature_set set)
+{
+ return btrfs_feature_set_names[set];
+}
+
char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags)
{
size_t bufsize = 4096; /* safe max, 64 names * 64 bytes */
@@ -730,6 +857,110 @@ static void init_feature_attrs(void)
}
}
+/*
+ * Create a sysfs entry for a given block group type at path
+ * /sys/fs/btrfs/UUID/allocation/data/TYPE
+ */
+void btrfs_sysfs_add_block_group_type(struct btrfs_block_group *cache)
+{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct btrfs_space_info *space_info = cache->space_info;
+ struct raid_kobject *rkobj;
+ const int index = btrfs_bg_flags_to_raid_index(cache->flags);
+ unsigned int nofs_flag;
+ int ret;
+
+ /*
+ * Setup a NOFS context because kobject_add(), deep in its call chain,
+ * does GFP_KERNEL allocations, and we are often called in a context
+ * where if reclaim is triggered we can deadlock (we are either holding
+ * a transaction handle or some lock required for a transaction
+ * commit).
+ */
+ nofs_flag = memalloc_nofs_save();
+
+ rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
+ if (!rkobj) {
+ memalloc_nofs_restore(nofs_flag);
+ btrfs_warn(cache->fs_info,
+ "couldn't alloc memory for raid level kobject");
+ return;
+ }
+
+ rkobj->flags = cache->flags;
+ kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
+ ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s",
+ btrfs_bg_type_to_raid_name(rkobj->flags));
+ memalloc_nofs_restore(nofs_flag);
+ if (ret) {
+ kobject_put(&rkobj->kobj);
+ btrfs_warn(fs_info,
+ "failed to add kobject for block cache, ignoring");
+ return;
+ }
+
+ space_info->block_group_kobjs[index] = &rkobj->kobj;
+}
+
+/*
+ * Remove sysfs directories for all block group types of a given space info and
+ * the space info as well
+ */
+void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info)
+{
+ int i;
+
+ for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
+ struct kobject *kobj;
+
+ kobj = space_info->block_group_kobjs[i];
+ space_info->block_group_kobjs[i] = NULL;
+ if (kobj) {
+ kobject_del(kobj);
+ kobject_put(kobj);
+ }
+ }
+ kobject_del(&space_info->kobj);
+ kobject_put(&space_info->kobj);
+}
+
+static const char *alloc_name(u64 flags)
+{
+ switch (flags) {
+ case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA:
+ return "mixed";
+ case BTRFS_BLOCK_GROUP_METADATA:
+ return "metadata";
+ case BTRFS_BLOCK_GROUP_DATA:
+ return "data";
+ case BTRFS_BLOCK_GROUP_SYSTEM:
+ return "system";
+ default:
+ WARN_ON(1);
+ return "invalid-combination";
+ };
+}
+
+/*
+ * Create a sysfs entry for a space info type at path
+ * /sys/fs/btrfs/UUID/allocation/TYPE
+ */
+int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info)
+{
+ int ret;
+
+ ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype,
+ fs_info->space_info_kobj, "%s",
+ alloc_name(space_info->flags));
+ if (ret) {
+ kobject_put(&space_info->kobj);
+ return ret;
+ }
+
+ return 0;
+}
+
/* when one_device is NULL, it removes all device links */
int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices,
@@ -806,14 +1037,34 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
return error;
}
-/* /sys/fs/btrfs/ entry */
-static struct kset *btrfs_kset;
+void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action)
+{
+ int ret;
-/* /sys/kernel/debug/btrfs */
-static struct dentry *btrfs_debugfs_root_dentry;
+ ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
+ if (ret)
+ pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
+ action, kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
+ &disk_to_dev(bdev->bd_disk)->kobj);
+}
+
+void btrfs_sysfs_update_sprout_fsid(struct btrfs_fs_devices *fs_devices,
+ const u8 *fsid)
+{
+ char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
+
+ /*
+ * Sprouting changes fsid of the mounted filesystem, rename the fsid
+ * directory
+ */
+ snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU", fsid);
+ if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf))
+ btrfs_warn(fs_devices->fs_info,
+ "sysfs: failed to create fsid for sprout");
+}
-/* Debugging tunables and exported data */
-u64 btrfs_debugfs_test;
+/* /sys/fs/btrfs/ entry */
+static struct kset *btrfs_kset;
/*
* Can be called by the device discovery thread.
@@ -859,6 +1110,13 @@ int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info)
if (error)
goto failure;
+#ifdef CONFIG_BTRFS_DEBUG
+ error = sysfs_create_group(fsid_kobj,
+ &btrfs_debug_feature_attr_group);
+ if (error)
+ goto failure;
+#endif
+
error = addrm_unknown_feature_attrs(fs_info, true);
if (error)
goto failure;
@@ -913,25 +1171,6 @@ void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info,
ret = sysfs_create_group(fsid_kobj, &btrfs_feature_attr_group);
}
-static void btrfs_init_debugfs(void)
-{
-#ifdef CONFIG_DEBUG_FS
- btrfs_debugfs_root_dentry = debugfs_create_dir("btrfs", NULL);
-
- /*
- * Example code, how to export data through debugfs.
- *
- * file: /sys/kernel/debug/btrfs/test
- * contents of: btrfs_debugfs_test
- */
-#ifdef CONFIG_BTRFS_DEBUG
- debugfs_create_u64("test", S_IRUGO | S_IWUSR, btrfs_debugfs_root_dentry,
- &btrfs_debugfs_test);
-#endif
-
-#endif
-}
-
int __init btrfs_init_sysfs(void)
{
int ret;
@@ -940,8 +1179,6 @@ int __init btrfs_init_sysfs(void)
if (!btrfs_kset)
return -ENOMEM;
- btrfs_init_debugfs();
-
init_feature_attrs();
ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_feature_attr_group);
if (ret)
@@ -951,12 +1188,17 @@ int __init btrfs_init_sysfs(void)
if (ret)
goto out_remove_group;
+#ifdef CONFIG_BTRFS_DEBUG
+ ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_debug_feature_attr_group);
+ if (ret)
+ goto out2;
+#endif
+
return 0;
out_remove_group:
sysfs_remove_group(&btrfs_kset->kobj, &btrfs_feature_attr_group);
out2:
- debugfs_remove_recursive(btrfs_debugfs_root_dentry);
kset_unregister(btrfs_kset);
return ret;
@@ -968,6 +1210,5 @@ void __cold btrfs_exit_sysfs(void)
&btrfs_static_feature_attr_group);
sysfs_remove_group(&btrfs_kset->kobj, &btrfs_feature_attr_group);
kset_unregister(btrfs_kset);
- debugfs_remove_recursive(btrfs_debugfs_root_dentry);
}
diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h
index 40716b357c1d..e10c3adfc30f 100644
--- a/fs/btrfs/sysfs.h
+++ b/fs/btrfs/sysfs.h
@@ -3,10 +3,7 @@
#ifndef BTRFS_SYSFS_H
#define BTRFS_SYSFS_H
-/*
- * Data exported through sysfs
- */
-extern u64 btrfs_debugfs_test;
+#include <linux/kobject.h>
enum btrfs_feature_set {
FEAT_COMPAT,
@@ -15,71 +12,8 @@ enum btrfs_feature_set {
FEAT_MAX
};
-#define __INIT_KOBJ_ATTR(_name, _mode, _show, _store) \
-{ \
- .attr = { .name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
-}
-
-#define BTRFS_ATTR_RW(_prefix, _name, _show, _store) \
- static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \
- __INIT_KOBJ_ATTR(_name, 0644, _show, _store)
-
-#define BTRFS_ATTR(_prefix, _name, _show) \
- static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \
- __INIT_KOBJ_ATTR(_name, 0444, _show, NULL)
-
-#define BTRFS_ATTR_PTR(_prefix, _name) \
- (&btrfs_attr_##_prefix##_##_name.attr)
-
-
-struct btrfs_feature_attr {
- struct kobj_attribute kobj_attr;
- enum btrfs_feature_set feature_set;
- u64 feature_bit;
-};
-
-#define BTRFS_FEAT_ATTR(_name, _feature_set, _feature_prefix, _feature_bit) \
-static struct btrfs_feature_attr btrfs_attr_features_##_name = { \
- .kobj_attr = __INIT_KOBJ_ATTR(_name, S_IRUGO, \
- btrfs_feature_attr_show, \
- btrfs_feature_attr_store), \
- .feature_set = _feature_set, \
- .feature_bit = _feature_prefix ##_## _feature_bit, \
-}
-#define BTRFS_FEAT_ATTR_PTR(_name) \
- (&btrfs_attr_features_##_name.kobj_attr.attr)
-
-#define BTRFS_FEAT_ATTR_COMPAT(name, feature) \
- BTRFS_FEAT_ATTR(name, FEAT_COMPAT, BTRFS_FEATURE_COMPAT, feature)
-#define BTRFS_FEAT_ATTR_COMPAT_RO(name, feature) \
- BTRFS_FEAT_ATTR(name, FEAT_COMPAT_RO, BTRFS_FEATURE_COMPAT_RO, feature)
-#define BTRFS_FEAT_ATTR_INCOMPAT(name, feature) \
- BTRFS_FEAT_ATTR(name, FEAT_INCOMPAT, BTRFS_FEATURE_INCOMPAT, feature)
-
-/* convert from attribute */
-static inline struct btrfs_feature_attr *
-to_btrfs_feature_attr(struct kobj_attribute *a)
-{
- return container_of(a, struct btrfs_feature_attr, kobj_attr);
-}
-
-static inline struct kobj_attribute *attr_to_btrfs_attr(struct attribute *attr)
-{
- return container_of(attr, struct kobj_attribute, attr);
-}
-
-static inline struct btrfs_feature_attr *
-attr_to_btrfs_feature_attr(struct attribute *attr)
-{
- return to_btrfs_feature_attr(attr_to_btrfs_attr(attr));
-}
-
char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags);
-extern const char * const btrfs_feature_set_names[FEAT_MAX];
-extern struct kobj_type space_info_ktype;
-extern struct kobj_type btrfs_raid_ktype;
+const char * const btrfs_feature_set_name(enum btrfs_feature_set set);
int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
struct btrfs_device *one_device);
int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices,
@@ -88,7 +22,19 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs,
struct kobject *parent);
int btrfs_sysfs_add_device(struct btrfs_fs_devices *fs_devs);
void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs);
+void btrfs_sysfs_update_sprout_fsid(struct btrfs_fs_devices *fs_devices,
+ const u8 *fsid);
void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info,
u64 bit, enum btrfs_feature_set set);
+void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action);
+
+int __init btrfs_init_sysfs(void);
+void __cold btrfs_exit_sysfs(void);
+int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info);
+void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
+void btrfs_sysfs_add_block_group_type(struct btrfs_block_group *cache);
+int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info);
+void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info);
#endif
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index 1e3ba4949399..a7aca4141788 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -15,6 +15,7 @@
#include "../volumes.h"
#include "../disk-io.h"
#include "../qgroup.h"
+#include "../block-group.h"
static struct vfsmount *test_mnt = NULL;
@@ -51,7 +52,13 @@ static struct file_system_type test_type = {
struct inode *btrfs_new_test_inode(void)
{
- return new_inode(test_mnt->mnt_sb);
+ struct inode *inode;
+
+ inode = new_inode(test_mnt->mnt_sb);
+ if (inode)
+ inode_init_owner(inode, NULL, S_IFREG);
+
+ return inode;
}
static int btrfs_init_test_fs(void)
@@ -195,11 +202,11 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
kfree(root);
}
-struct btrfs_block_group_cache *
+struct btrfs_block_group *
btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
unsigned long length)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (!cache)
@@ -211,9 +218,8 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
return NULL;
}
- cache->key.objectid = 0;
- cache->key.offset = length;
- cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ cache->start = 0;
+ cache->length = length;
cache->full_stripe_len = fs_info->sectorsize;
cache->fs_info = fs_info;
@@ -226,7 +232,7 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
return cache;
}
-void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache)
+void btrfs_free_dummy_block_group(struct btrfs_block_group *cache)
{
if (!cache)
return;
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index ee277bbd939b..9e52527357d8 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -41,9 +41,9 @@ struct inode *btrfs_new_test_inode(void);
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize);
void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info);
void btrfs_free_dummy_root(struct btrfs_root *root);
-struct btrfs_block_group_cache *
+struct btrfs_block_group *
btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long length);
-void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
+void btrfs_free_dummy_block_group(struct btrfs_block_group *cache);
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
#else
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 1bf6b5a79191..123d9a614357 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -438,6 +438,7 @@ static int test_find_first_clear_extent_bit(void)
{
struct extent_io_tree tree;
u64 start, end;
+ int ret = -EINVAL;
test_msg("running find_first_clear_extent_bit test");
extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST, NULL);
@@ -452,9 +453,11 @@ static int test_find_first_clear_extent_bit(void)
find_first_clear_extent_bit(&tree, SZ_512K, &start, &end,
CHUNK_TRIMMED | CHUNK_ALLOCATED);
- if (start != 0 || end != SZ_1M -1)
+ if (start != 0 || end != SZ_1M - 1) {
test_err("error finding beginning range: start %llu end %llu",
start, end);
+ goto out;
+ }
/* Now add 32M-64M so that we have a hole between 4M-32M */
set_extent_bits(&tree, SZ_32M, SZ_64M - 1,
@@ -466,9 +469,11 @@ static int test_find_first_clear_extent_bit(void)
find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end,
CHUNK_TRIMMED | CHUNK_ALLOCATED);
- if (start != SZ_4M || end != SZ_32M - 1)
+ if (start != SZ_4M || end != SZ_32M - 1) {
test_err("error finding trimmed range: start %llu end %llu",
start, end);
+ goto out;
+ }
/*
* Search in the middle of allocated range, should get the next one
@@ -477,9 +482,11 @@ static int test_find_first_clear_extent_bit(void)
find_first_clear_extent_bit(&tree, SZ_2M, &start, &end,
CHUNK_TRIMMED | CHUNK_ALLOCATED);
- if (start != SZ_4M || end != SZ_32M -1)
+ if (start != SZ_4M || end != SZ_32M - 1) {
test_err("error finding next unalloc range: start %llu end %llu",
start, end);
+ goto out;
+ }
/*
* Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag
@@ -489,9 +496,11 @@ static int test_find_first_clear_extent_bit(void)
find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end,
CHUNK_TRIMMED);
- if (start != SZ_64M || end != SZ_64M + SZ_8M - 1)
+ if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) {
test_err("error finding exact range: start %llu end %llu",
start, end);
+ goto out;
+ }
find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end,
CHUNK_TRIMMED);
@@ -500,21 +509,29 @@ static int test_find_first_clear_extent_bit(void)
* Search in the middle of set range whose immediate neighbour doesn't
* have the bits set so it must be returned
*/
- if (start != SZ_64M || end != SZ_64M + SZ_8M - 1)
+ if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) {
test_err("error finding next alloc range: start %llu end %llu",
start, end);
+ goto out;
+ }
/*
* Search beyond any known range, shall return after last known range
* and end should be -1
*/
find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED);
- if (start != SZ_64M + SZ_8M || end != -1)
+ if (start != SZ_64M + SZ_8M || end != -1) {
test_err(
"error handling beyond end of range search: start %llu end %llu",
start, end);
+ goto out;
+ }
- return 0;
+ ret = 0;
+out:
+ clear_extent_bits(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED);
+
+ return ret;
}
int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index af89f66f9e63..aebdf23f0cdd 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -8,6 +8,7 @@
#include "../ctree.h"
#include "../disk-io.h"
#include "../free-space-cache.h"
+#include "../block-group.h"
#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
@@ -16,7 +17,7 @@
* entry and remove space from either end and the middle, and make sure we can
* remove space that covers adjacent extent entries.
*/
-static int test_extents(struct btrfs_block_group_cache *cache)
+static int test_extents(struct btrfs_block_group *cache)
{
int ret = 0;
@@ -86,8 +87,7 @@ static int test_extents(struct btrfs_block_group_cache *cache)
return 0;
}
-static int test_bitmaps(struct btrfs_block_group_cache *cache,
- u32 sectorsize)
+static int test_bitmaps(struct btrfs_block_group *cache, u32 sectorsize)
{
u64 next_bitmap_offset;
int ret;
@@ -155,7 +155,7 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache,
}
/* This is the high grade jackassery */
-static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache,
+static int test_bitmaps_and_extents(struct btrfs_block_group *cache,
u32 sectorsize)
{
u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
@@ -330,7 +330,7 @@ static bool test_use_bitmap(struct btrfs_free_space_ctl *ctl,
/* Used by test_steal_space_from_bitmap_to_extent(). */
static int
-check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache,
+check_num_extents_and_bitmaps(const struct btrfs_block_group *cache,
const int num_extents,
const int num_bitmaps)
{
@@ -350,7 +350,7 @@ check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache,
}
/* Used by test_steal_space_from_bitmap_to_extent(). */
-static int check_cache_empty(struct btrfs_block_group_cache *cache)
+static int check_cache_empty(struct btrfs_block_group *cache)
{
u64 offset;
u64 max_extent_size;
@@ -392,7 +392,7 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache)
* requests.
*/
static int
-test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
+test_steal_space_from_bitmap_to_extent(struct btrfs_block_group *cache,
u32 sectorsize)
{
int ret;
@@ -828,7 +828,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
{
struct btrfs_fs_info *fs_info;
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
struct btrfs_root *root = NULL;
int ret = -ENOMEM;
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index a90dad166971..1a846bf6e197 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -9,6 +9,7 @@
#include "../disk-io.h"
#include "../free-space-tree.h"
#include "../transaction.h"
+#include "../block-group.h"
struct free_space_extent {
u64 start;
@@ -17,7 +18,7 @@ struct free_space_extent {
static int __check_free_space_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
const struct free_space_extent * const extents,
unsigned int num_extents)
@@ -47,7 +48,7 @@ static int __check_free_space_extents(struct btrfs_trans_handle *trans,
if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
if (path->slots[0] != 0)
goto invalid;
- end = cache->key.objectid + cache->key.offset;
+ end = cache->start + cache->length;
i = 0;
while (++path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@@ -106,7 +107,7 @@ invalid:
static int check_free_space_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
const struct free_space_extent * const extents,
unsigned int num_extents)
@@ -149,12 +150,12 @@ static int check_free_space_extents(struct btrfs_trans_handle *trans,
static int test_empty_block_group(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid, cache->key.offset},
+ {cache->start, cache->length},
};
return check_free_space_extents(trans, fs_info, cache, path,
@@ -163,7 +164,7 @@ static int test_empty_block_group(struct btrfs_trans_handle *trans,
static int test_remove_all(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
@@ -171,8 +172,8 @@ static int test_remove_all(struct btrfs_trans_handle *trans,
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid,
- cache->key.offset);
+ cache->start,
+ cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
@@ -184,18 +185,17 @@ static int test_remove_all(struct btrfs_trans_handle *trans,
static int test_remove_beginning(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid + alignment,
- cache->key.offset - alignment},
+ {cache->start + alignment, cache->length - alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid, alignment);
+ cache->start, alignment);
if (ret) {
test_err("could not remove free space");
return ret;
@@ -208,19 +208,18 @@ static int test_remove_beginning(struct btrfs_trans_handle *trans,
static int test_remove_end(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid, cache->key.offset - alignment},
+ {cache->start, cache->length - alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid +
- cache->key.offset - alignment,
- alignment);
+ cache->start + cache->length - alignment,
+ alignment);
if (ret) {
test_err("could not remove free space");
return ret;
@@ -232,19 +231,18 @@ static int test_remove_end(struct btrfs_trans_handle *trans,
static int test_remove_middle(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid, alignment},
- {cache->key.objectid + 2 * alignment,
- cache->key.offset - 2 * alignment},
+ {cache->start, alignment},
+ {cache->start + 2 * alignment, cache->length - 2 * alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid + alignment,
+ cache->start + alignment,
alignment);
if (ret) {
test_err("could not remove free space");
@@ -257,24 +255,23 @@ static int test_remove_middle(struct btrfs_trans_handle *trans,
static int test_merge_left(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid, 2 * alignment},
+ {cache->start, 2 * alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid,
- cache->key.offset);
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
+ ret = __add_to_free_space_tree(trans, cache, path, cache->start,
alignment);
if (ret) {
test_err("could not add free space");
@@ -282,7 +279,7 @@ static int test_merge_left(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, cache, path,
- cache->key.objectid + alignment,
+ cache->start + alignment,
alignment);
if (ret) {
test_err("could not add free space");
@@ -295,25 +292,24 @@ static int test_merge_left(struct btrfs_trans_handle *trans,
static int test_merge_right(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid + alignment, 2 * alignment},
+ {cache->start + alignment, 2 * alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid,
- cache->key.offset);
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
ret = __add_to_free_space_tree(trans, cache, path,
- cache->key.objectid + 2 * alignment,
+ cache->start + 2 * alignment,
alignment);
if (ret) {
test_err("could not add free space");
@@ -321,7 +317,7 @@ static int test_merge_right(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, cache, path,
- cache->key.objectid + alignment,
+ cache->start + alignment,
alignment);
if (ret) {
test_err("could not add free space");
@@ -334,24 +330,23 @@ static int test_merge_right(struct btrfs_trans_handle *trans,
static int test_merge_both(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid, 3 * alignment},
+ {cache->start, 3 * alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid,
- cache->key.offset);
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
+ ret = __add_to_free_space_tree(trans, cache, path, cache->start,
alignment);
if (ret) {
test_err("could not add free space");
@@ -359,16 +354,14 @@ static int test_merge_both(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, cache, path,
- cache->key.objectid + 2 * alignment,
- alignment);
+ cache->start + 2 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
ret = __add_to_free_space_tree(trans, cache, path,
- cache->key.objectid + alignment,
- alignment);
+ cache->start + alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
@@ -380,26 +373,25 @@ static int test_merge_both(struct btrfs_trans_handle *trans,
static int test_merge_none(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid, alignment},
- {cache->key.objectid + 2 * alignment, alignment},
- {cache->key.objectid + 4 * alignment, alignment},
+ {cache->start, alignment},
+ {cache->start + 2 * alignment, alignment},
+ {cache->start + 4 * alignment, alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid,
- cache->key.offset);
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
+ ret = __add_to_free_space_tree(trans, cache, path, cache->start,
alignment);
if (ret) {
test_err("could not add free space");
@@ -407,16 +399,14 @@ static int test_merge_none(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, cache, path,
- cache->key.objectid + 4 * alignment,
- alignment);
+ cache->start + 4 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
ret = __add_to_free_space_tree(trans, cache, path,
- cache->key.objectid + 2 * alignment,
- alignment);
+ cache->start + 2 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
@@ -428,7 +418,7 @@ static int test_merge_none(struct btrfs_trans_handle *trans,
typedef int (*test_func_t)(struct btrfs_trans_handle *,
struct btrfs_fs_info *,
- struct btrfs_block_group_cache *,
+ struct btrfs_block_group *,
struct btrfs_path *,
u32 alignment);
@@ -437,7 +427,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
{
struct btrfs_fs_info *fs_info;
struct btrfs_root *root = NULL;
- struct btrfs_block_group_cache *cache = NULL;
+ struct btrfs_block_group *cache = NULL;
struct btrfs_trans_handle trans;
struct btrfs_path *path = NULL;
int ret;
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index bc6dbd1b42fd..09ecf7dc7b08 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -957,7 +957,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
/* [BTRFS_MAX_EXTENT_SIZE] */
ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1, 0,
- NULL, 0);
+ NULL);
if (ret) {
test_err("btrfs_set_extent_delalloc returned %d", ret);
goto out;
@@ -972,7 +972,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
/* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
- 0, NULL, 0);
+ 0, NULL);
if (ret) {
test_err("btrfs_set_extent_delalloc returned %d", ret);
goto out;
@@ -988,8 +988,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
BTRFS_MAX_EXTENT_SIZE >> 1,
(BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
- EXTENT_DELALLOC | EXTENT_DIRTY |
- EXTENT_UPTODATE, 0, 0, NULL);
+ EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
if (ret) {
test_err("clear_extent_bit returned %d", ret);
goto out;
@@ -1005,7 +1004,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
(BTRFS_MAX_EXTENT_SIZE >> 1)
+ sectorsize - 1,
- 0, NULL, 0);
+ 0, NULL);
if (ret) {
test_err("btrfs_set_extent_delalloc returned %d", ret);
goto out;
@@ -1023,7 +1022,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
(BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
- 0, NULL, 0);
+ 0, NULL);
if (ret) {
test_err("btrfs_set_extent_delalloc returned %d", ret);
goto out;
@@ -1040,7 +1039,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
*/
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + sectorsize,
- BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL, 0);
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL);
if (ret) {
test_err("btrfs_set_extent_delalloc returned %d", ret);
goto out;
@@ -1056,8 +1055,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
BTRFS_MAX_EXTENT_SIZE + sectorsize,
BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_UPTODATE, 0, 0, NULL);
+ EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
if (ret) {
test_err("clear_extent_bit returned %d", ret);
goto out;
@@ -1075,7 +1073,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
*/
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + sectorsize,
- BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL, 0);
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL);
if (ret) {
test_err("btrfs_set_extent_delalloc returned %d", ret);
goto out;
@@ -1089,8 +1087,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
/* Empty */
ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_UPTODATE, 0, 0, NULL);
+ EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
if (ret) {
test_err("clear_extent_bit returned %d", ret);
goto out;
@@ -1105,8 +1102,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
out:
if (ret)
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_UPTODATE, 0, 0, NULL);
+ EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
iput(inode);
btrfs_free_dummy_root(root);
btrfs_free_dummy_fs_info(fs_info);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 3b8ae1a8f02d..cfc08ef9b876 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -10,6 +10,7 @@
#include <linux/pagemap.h>
#include <linux/blkdev.h>
#include <linux/uuid.h>
+#include "misc.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -19,24 +20,98 @@
#include "volumes.h"
#include "dev-replace.h"
#include "qgroup.h"
+#include "block-group.h"
#define BTRFS_ROOT_TRANS_TAG 0
+/*
+ * Transaction states and transitions
+ *
+ * No running transaction (fs tree blocks are not modified)
+ * |
+ * | To next stage:
+ * | Call start_transaction() variants. Except btrfs_join_transaction_nostart().
+ * V
+ * Transaction N [[TRANS_STATE_RUNNING]]
+ * |
+ * | New trans handles can be attached to transaction N by calling all
+ * | start_transaction() variants.
+ * |
+ * | To next stage:
+ * | Call btrfs_commit_transaction() on any trans handle attached to
+ * | transaction N
+ * V
+ * Transaction N [[TRANS_STATE_COMMIT_START]]
+ * |
+ * | Will wait for previous running transaction to completely finish if there
+ * | is one
+ * |
+ * | Then one of the following happes:
+ * | - Wait for all other trans handle holders to release.
+ * | The btrfs_commit_transaction() caller will do the commit work.
+ * | - Wait for current transaction to be committed by others.
+ * | Other btrfs_commit_transaction() caller will do the commit work.
+ * |
+ * | At this stage, only btrfs_join_transaction*() variants can attach
+ * | to this running transaction.
+ * | All other variants will wait for current one to finish and attach to
+ * | transaction N+1.
+ * |
+ * | To next stage:
+ * | Caller is chosen to commit transaction N, and all other trans handle
+ * | haven been released.
+ * V
+ * Transaction N [[TRANS_STATE_COMMIT_DOING]]
+ * |
+ * | The heavy lifting transaction work is started.
+ * | From running delayed refs (modifying extent tree) to creating pending
+ * | snapshots, running qgroups.
+ * | In short, modify supporting trees to reflect modifications of subvolume
+ * | trees.
+ * |
+ * | At this stage, all start_transaction() calls will wait for this
+ * | transaction to finish and attach to transaction N+1.
+ * |
+ * | To next stage:
+ * | Until all supporting trees are updated.
+ * V
+ * Transaction N [[TRANS_STATE_UNBLOCKED]]
+ * | Transaction N+1
+ * | All needed trees are modified, thus we only [[TRANS_STATE_RUNNING]]
+ * | need to write them back to disk and update |
+ * | super blocks. |
+ * | |
+ * | At this stage, new transaction is allowed to |
+ * | start. |
+ * | All new start_transaction() calls will be |
+ * | attached to transid N+1. |
+ * | |
+ * | To next stage: |
+ * | Until all tree blocks are super blocks are |
+ * | written to block devices |
+ * V |
+ * Transaction N [[TRANS_STATE_COMPLETED]] V
+ * All tree blocks and super blocks are written. Transaction N+1
+ * This transaction is finished and all its [[TRANS_STATE_COMMIT_START]]
+ * data structures will be cleaned up. | Life goes on
+ */
static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
[TRANS_STATE_RUNNING] = 0U,
- [TRANS_STATE_BLOCKED] = __TRANS_START,
[TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
[TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
__TRANS_ATTACH |
- __TRANS_JOIN),
+ __TRANS_JOIN |
+ __TRANS_JOIN_NOSTART),
[TRANS_STATE_UNBLOCKED] = (__TRANS_START |
__TRANS_ATTACH |
__TRANS_JOIN |
- __TRANS_JOIN_NOLOCK),
+ __TRANS_JOIN_NOLOCK |
+ __TRANS_JOIN_NOSTART),
[TRANS_STATE_COMPLETED] = (__TRANS_START |
__TRANS_ATTACH |
__TRANS_JOIN |
- __TRANS_JOIN_NOLOCK),
+ __TRANS_JOIN_NOLOCK |
+ __TRANS_JOIN_NOSTART),
};
void btrfs_put_transaction(struct btrfs_transaction *transaction)
@@ -58,10 +133,10 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
* discard the physical locations of the block groups.
*/
while (!list_empty(&transaction->deleted_bgs)) {
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
cache = list_first_entry(&transaction->deleted_bgs,
- struct btrfs_block_group_cache,
+ struct btrfs_block_group,
bg_list);
list_del_init(&cache->bg_list);
btrfs_put_block_group_trimming(cache);
@@ -378,7 +453,7 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
static inline int is_transaction_blocked(struct btrfs_transaction *trans)
{
- return (trans->state >= TRANS_STATE_BLOCKED &&
+ return (trans->state >= TRANS_STATE_COMMIT_START &&
trans->state < TRANS_STATE_UNBLOCKED &&
!trans->aborted);
}
@@ -481,7 +556,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
* worth of delayed refs updates in this trans handle, and
* refill that amount for whatever is missing in the reserve.
*/
- num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
+ num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
if (delayed_refs_rsv->full == 0) {
delayed_refs_bytes = num_bytes;
num_bytes <<= 1;
@@ -543,7 +618,8 @@ again:
ret = join_transaction(fs_info, type);
if (ret == -EBUSY) {
wait_current_trans(fs_info);
- if (unlikely(type == TRANS_ATTACH))
+ if (unlikely(type == TRANS_ATTACH ||
+ type == TRANS_JOIN_NOSTART))
ret = -ENOENT;
}
} while (ret == -EBUSY);
@@ -564,7 +640,7 @@ again:
INIT_LIST_HEAD(&h->new_bgs);
smp_mb();
- if (cur_trans->state >= TRANS_STATE_BLOCKED &&
+ if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
may_wait_transaction(fs_info, type)) {
current->journal_info = h;
btrfs_commit_transaction(h);
@@ -631,7 +707,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
if (IS_ERR(trans))
return trans;
- num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
+ num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv,
num_bytes, min_factor);
if (ret) {
@@ -653,13 +729,23 @@ struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
true);
}
-struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
+struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root)
{
return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
BTRFS_RESERVE_NO_FLUSH, true);
}
/*
+ * Similar to regular join but it never starts a transaction when none is
+ * running or after waiting for the current one to finish.
+ */
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
+{
+ return start_transaction(root, 0, TRANS_JOIN_NOSTART,
+ BTRFS_RESERVE_NO_FLUSH, true);
+}
+
+/*
* btrfs_attach_transaction() - catch the running transaction
*
* It is used when we want to commit the current the transaction, but
@@ -782,7 +868,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
struct btrfs_transaction *cur_trans = trans->transaction;
smp_mb();
- if (cur_trans->state >= TRANS_STATE_BLOCKED ||
+ if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
cur_trans->delayed_refs.flushing)
return 1;
@@ -815,7 +901,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction;
- int lock = (trans->type != TRANS_JOIN_NOLOCK);
int err = 0;
if (refcount_read(&trans->use_count) > 1) {
@@ -831,13 +916,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
btrfs_trans_release_chunk_metadata(trans);
- if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
- if (throttle)
- return btrfs_commit_transaction(trans);
- else
- wake_up_process(info->transaction_kthread);
- }
-
if (trans->type & __TRANS_FREEZABLE)
sb_end_intwrite(info->sb);
@@ -974,7 +1052,7 @@ static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
return werr;
}
-int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
+static int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages)
{
bool errors = false;
@@ -1859,7 +1937,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *block_group, *tmp;
+ struct btrfs_block_group *block_group, *tmp;
list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
btrfs_delayed_refs_rsv_release(fs_info, 1);
@@ -1933,6 +2011,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
struct btrfs_transaction *prev_trans = NULL;
int ret;
+ ASSERT(refcount_read(&trans->use_count) == 1);
+
/* Stop the commit early if ->aborted is set */
if (unlikely(READ_ONCE(cur_trans->aborted))) {
ret = cur_trans->aborted;
@@ -2037,6 +2117,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
}
} else {
spin_unlock(&fs_info->trans_lock);
+ /*
+ * The previous transaction was aborted and was already removed
+ * from the list of transactions at fs_info->trans_list. So we
+ * abort to prevent writing a new superblock that reflects a
+ * corrupt state (pointing to trees with unwritten nodes/leafs).
+ */
+ if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
+ ret = -EROFS;
+ goto cleanup_transaction;
+ }
}
extwriter_counter_dec(cur_trans, trans->type);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 527ea94b57d9..49f7196368f5 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -13,7 +13,6 @@
enum btrfs_trans_state {
TRANS_STATE_RUNNING,
- TRANS_STATE_BLOCKED,
TRANS_STATE_COMMIT_START,
TRANS_STATE_COMMIT_DOING,
TRANS_STATE_UNBLOCKED,
@@ -94,11 +93,13 @@ struct btrfs_transaction {
#define __TRANS_JOIN (1U << 11)
#define __TRANS_JOIN_NOLOCK (1U << 12)
#define __TRANS_DUMMY (1U << 13)
+#define __TRANS_JOIN_NOSTART (1U << 14)
#define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
#define TRANS_ATTACH (__TRANS_ATTACH)
#define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
#define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
+#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
#define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
@@ -182,7 +183,8 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
unsigned int num_items,
int min_factor);
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
-struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
struct btrfs_root *root);
@@ -215,8 +217,6 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages, int mark);
-int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *dirty_pages);
int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
int btrfs_transaction_blocked(struct btrfs_fs_info *info);
int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index ccd5706199d7..493d4d9e0f79 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -23,6 +23,7 @@
#include "disk-io.h"
#include "compression.h"
#include "volumes.h"
+#include "misc.h"
/*
* Error message should follow the following format:
@@ -124,6 +125,74 @@ static u64 file_extent_end(struct extent_buffer *leaf,
return end;
}
+/*
+ * Customized report for dir_item, the only new important information is
+ * key->objectid, which represents inode number
+ */
+__printf(3, 4)
+__cold
+static void dir_item_err(const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ const struct btrfs_fs_info *fs_info = eb->fs_info;
+ struct btrfs_key key;
+ struct va_format vaf;
+ va_list args;
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node",
+ btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
+ key.objectid, &vaf);
+ va_end(args);
+}
+
+/*
+ * This functions checks prev_key->objectid, to ensure current key and prev_key
+ * share the same objectid as inode number.
+ *
+ * This is to detect missing INODE_ITEM in subvolume trees.
+ *
+ * Return true if everything is OK or we don't need to check.
+ * Return false if anything is wrong.
+ */
+static bool check_prev_ino(struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot,
+ struct btrfs_key *prev_key)
+{
+ /* No prev key, skip check */
+ if (slot == 0)
+ return true;
+
+ /* Only these key->types needs to be checked */
+ ASSERT(key->type == BTRFS_XATTR_ITEM_KEY ||
+ key->type == BTRFS_INODE_REF_KEY ||
+ key->type == BTRFS_DIR_INDEX_KEY ||
+ key->type == BTRFS_DIR_ITEM_KEY ||
+ key->type == BTRFS_EXTENT_DATA_KEY);
+
+ /*
+ * Only subvolume trees along with their reloc trees need this check.
+ * Things like log tree doesn't follow this ino requirement.
+ */
+ if (!is_fstree(btrfs_header_owner(leaf)))
+ return true;
+
+ if (key->objectid == prev_key->objectid)
+ return true;
+
+ /* Error found */
+ dir_item_err(leaf, slot,
+ "invalid previous key objectid, have %llu expect %llu",
+ prev_key->objectid, key->objectid);
+ return false;
+}
static int check_extent_data_item(struct extent_buffer *leaf,
struct btrfs_key *key, int slot,
struct btrfs_key *prev_key)
@@ -141,13 +210,33 @@ static int check_extent_data_item(struct extent_buffer *leaf,
return -EUCLEAN;
}
+ /*
+ * Previous key must have the same key->objectid (ino).
+ * It can be XATTR_ITEM, INODE_ITEM or just another EXTENT_DATA.
+ * But if objectids mismatch, it means we have a missing
+ * INODE_ITEM.
+ */
+ if (!check_prev_ino(leaf, key, slot, prev_key))
+ return -EUCLEAN;
+
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) {
+ /*
+ * Make sure the item contains at least inline header, so the file
+ * extent type is not some garbage.
+ */
+ if (item_size < BTRFS_FILE_EXTENT_INLINE_DATA_START) {
+ file_extent_err(leaf, slot,
+ "invalid item size, have %u expect [%lu, %u)",
+ item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START,
+ SZ_4K);
+ return -EUCLEAN;
+ }
+ if (btrfs_file_extent_type(leaf, fi) >= BTRFS_NR_FILE_EXTENT_TYPES) {
file_extent_err(leaf, slot,
"invalid type for file extent, have %u expect range [0, %u]",
btrfs_file_extent_type(leaf, fi),
- BTRFS_FILE_EXTENT_TYPES);
+ BTRFS_NR_FILE_EXTENT_TYPES - 1);
return -EUCLEAN;
}
@@ -155,11 +244,11 @@ static int check_extent_data_item(struct extent_buffer *leaf,
* Support for new compression/encryption must introduce incompat flag,
* and must be caught in open_ctree().
*/
- if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) {
+ if (btrfs_file_extent_compression(leaf, fi) >= BTRFS_NR_COMPRESS_TYPES) {
file_extent_err(leaf, slot,
"invalid compression for file extent, have %u expect range [0, %u]",
btrfs_file_extent_compression(leaf, fi),
- BTRFS_COMPRESS_TYPES);
+ BTRFS_NR_COMPRESS_TYPES - 1);
return -EUCLEAN;
}
if (btrfs_file_extent_encryption(leaf, fi)) {
@@ -270,42 +359,17 @@ static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
return 0;
}
-/*
- * Customized reported for dir_item, only important new info is key->objectid,
- * which represents inode number
- */
-__printf(3, 4)
-__cold
-static void dir_item_err(const struct extent_buffer *eb, int slot,
- const char *fmt, ...)
-{
- const struct btrfs_fs_info *fs_info = eb->fs_info;
- struct btrfs_key key;
- struct va_format vaf;
- va_list args;
-
- btrfs_item_key_to_cpu(eb, &key, slot);
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- btrfs_crit(fs_info,
- "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
- btrfs_header_level(eb) == 0 ? "leaf" : "node",
- btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
- key.objectid, &vaf);
- va_end(args);
-}
-
static int check_dir_item(struct extent_buffer *leaf,
- struct btrfs_key *key, int slot)
+ struct btrfs_key *key, struct btrfs_key *prev_key,
+ int slot)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_dir_item *di;
u32 item_size = btrfs_item_size_nr(leaf, slot);
u32 cur = 0;
+ if (!check_prev_ino(leaf, key, slot, prev_key))
+ return -EUCLEAN;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
while (cur < item_size) {
u32 name_len;
@@ -459,23 +523,23 @@ static int check_block_group_item(struct extent_buffer *leaf,
read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
sizeof(bgi));
- if (btrfs_block_group_chunk_objectid(&bgi) !=
+ if (btrfs_stack_block_group_chunk_objectid(&bgi) !=
BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
block_group_err(leaf, slot,
"invalid block group chunk objectid, have %llu expect %llu",
- btrfs_block_group_chunk_objectid(&bgi),
+ btrfs_stack_block_group_chunk_objectid(&bgi),
BTRFS_FIRST_CHUNK_TREE_OBJECTID);
return -EUCLEAN;
}
- if (btrfs_block_group_used(&bgi) > key->offset) {
+ if (btrfs_stack_block_group_used(&bgi) > key->offset) {
block_group_err(leaf, slot,
"invalid block group used, have %llu expect [0, %llu)",
- btrfs_block_group_used(&bgi), key->offset);
+ btrfs_stack_block_group_used(&bgi), key->offset);
return -EUCLEAN;
}
- flags = btrfs_block_group_flags(&bgi);
+ flags = btrfs_stack_block_group_flags(&bgi);
if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
block_group_err(leaf, slot,
"invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
@@ -609,7 +673,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
return -EUCLEAN;
}
- if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
+ if (!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) {
chunk_err(leaf, chunk, logical,
"invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
@@ -686,9 +750,7 @@ static void dev_item_err(const struct extent_buffer *eb, int slot,
static int check_dev_item(struct extent_buffer *leaf,
struct btrfs_key *key, int slot)
{
- struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_dev_item *ditem;
- u64 max_devid = max(BTRFS_MAX_DEVS(fs_info), BTRFS_MAX_DEVS_SYS_CHUNK);
if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) {
dev_item_err(leaf, slot,
@@ -696,12 +758,6 @@ static int check_dev_item(struct extent_buffer *leaf,
key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
return -EUCLEAN;
}
- if (key->offset > max_devid) {
- dev_item_err(leaf, slot,
- "invalid devid: has=%llu expect=[0, %llu]",
- key->offset, max_devid);
- return -EUCLEAN;
- }
ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
if (btrfs_device_id(leaf, ditem) != key->offset) {
dev_item_err(leaf, slot,
@@ -793,11 +849,11 @@ static int check_inode_item(struct extent_buffer *leaf,
}
/*
- * S_IFMT is not bit mapped so we can't completely rely on is_power_of_2,
- * but is_power_of_2() can save us from checking FIFO/CHR/DIR/REG.
- * Only needs to check BLK, LNK and SOCKS
+ * S_IFMT is not bit mapped so we can't completely rely on
+ * is_power_of_2/has_single_bit_set, but it can save us from checking
+ * FIFO/CHR/DIR/REG. Only needs to check BLK, LNK and SOCKS
*/
- if (!is_power_of_2(mode & S_IFMT)) {
+ if (!has_single_bit_set(mode & S_IFMT)) {
if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) {
inode_item_err(fs_info, leaf, slot,
"invalid mode: has 0%o expect valid S_IF* bit(s)",
@@ -821,6 +877,469 @@ static int check_inode_item(struct extent_buffer *leaf,
return 0;
}
+static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
+ int slot)
+{
+ struct btrfs_fs_info *fs_info = leaf->fs_info;
+ struct btrfs_root_item ri;
+ const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY |
+ BTRFS_ROOT_SUBVOL_DEAD;
+
+ /* No such tree id */
+ if (key->objectid == 0) {
+ generic_err(leaf, slot, "invalid root id 0");
+ return -EUCLEAN;
+ }
+
+ /*
+ * Some older kernel may create ROOT_ITEM with non-zero offset, so here
+ * we only check offset for reloc tree whose key->offset must be a
+ * valid tree.
+ */
+ if (key->objectid == BTRFS_TREE_RELOC_OBJECTID && key->offset == 0) {
+ generic_err(leaf, slot, "invalid root id 0 for reloc tree");
+ return -EUCLEAN;
+ }
+
+ if (btrfs_item_size_nr(leaf, slot) != sizeof(ri)) {
+ generic_err(leaf, slot,
+ "invalid root item size, have %u expect %zu",
+ btrfs_item_size_nr(leaf, slot), sizeof(ri));
+ }
+
+ read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot),
+ sizeof(ri));
+
+ /* Generation related */
+ if (btrfs_root_generation(&ri) >
+ btrfs_super_generation(fs_info->super_copy) + 1) {
+ generic_err(leaf, slot,
+ "invalid root generation, have %llu expect (0, %llu]",
+ btrfs_root_generation(&ri),
+ btrfs_super_generation(fs_info->super_copy) + 1);
+ return -EUCLEAN;
+ }
+ if (btrfs_root_generation_v2(&ri) >
+ btrfs_super_generation(fs_info->super_copy) + 1) {
+ generic_err(leaf, slot,
+ "invalid root v2 generation, have %llu expect (0, %llu]",
+ btrfs_root_generation_v2(&ri),
+ btrfs_super_generation(fs_info->super_copy) + 1);
+ return -EUCLEAN;
+ }
+ if (btrfs_root_last_snapshot(&ri) >
+ btrfs_super_generation(fs_info->super_copy) + 1) {
+ generic_err(leaf, slot,
+ "invalid root last_snapshot, have %llu expect (0, %llu]",
+ btrfs_root_last_snapshot(&ri),
+ btrfs_super_generation(fs_info->super_copy) + 1);
+ return -EUCLEAN;
+ }
+
+ /* Alignment and level check */
+ if (!IS_ALIGNED(btrfs_root_bytenr(&ri), fs_info->sectorsize)) {
+ generic_err(leaf, slot,
+ "invalid root bytenr, have %llu expect to be aligned to %u",
+ btrfs_root_bytenr(&ri), fs_info->sectorsize);
+ return -EUCLEAN;
+ }
+ if (btrfs_root_level(&ri) >= BTRFS_MAX_LEVEL) {
+ generic_err(leaf, slot,
+ "invalid root level, have %u expect [0, %u]",
+ btrfs_root_level(&ri), BTRFS_MAX_LEVEL - 1);
+ return -EUCLEAN;
+ }
+ if (ri.drop_level >= BTRFS_MAX_LEVEL) {
+ generic_err(leaf, slot,
+ "invalid root level, have %u expect [0, %u]",
+ ri.drop_level, BTRFS_MAX_LEVEL - 1);
+ return -EUCLEAN;
+ }
+
+ /* Flags check */
+ if (btrfs_root_flags(&ri) & ~valid_root_flags) {
+ generic_err(leaf, slot,
+ "invalid root flags, have 0x%llx expect mask 0x%llx",
+ btrfs_root_flags(&ri), valid_root_flags);
+ return -EUCLEAN;
+ }
+ return 0;
+}
+
+__printf(3,4)
+__cold
+static void extent_err(const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ struct btrfs_key key;
+ struct va_format vaf;
+ va_list args;
+ u64 bytenr;
+ u64 len;
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ bytenr = key.objectid;
+ if (key.type == BTRFS_METADATA_ITEM_KEY ||
+ key.type == BTRFS_TREE_BLOCK_REF_KEY ||
+ key.type == BTRFS_SHARED_BLOCK_REF_KEY)
+ len = eb->fs_info->nodesize;
+ else
+ len = key.offset;
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(eb->fs_info,
+ "corrupt %s: block=%llu slot=%d extent bytenr=%llu len=%llu %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node",
+ eb->start, slot, bytenr, len, &vaf);
+ va_end(args);
+}
+
+static int check_extent_item(struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_fs_info *fs_info = leaf->fs_info;
+ struct btrfs_extent_item *ei;
+ bool is_tree_block = false;
+ unsigned long ptr; /* Current pointer inside inline refs */
+ unsigned long end; /* Extent item end */
+ const u32 item_size = btrfs_item_size_nr(leaf, slot);
+ u64 flags;
+ u64 generation;
+ u64 total_refs; /* Total refs in btrfs_extent_item */
+ u64 inline_refs = 0; /* found total inline refs */
+
+ if (key->type == BTRFS_METADATA_ITEM_KEY &&
+ !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
+ generic_err(leaf, slot,
+"invalid key type, METADATA_ITEM type invalid when SKINNY_METADATA feature disabled");
+ return -EUCLEAN;
+ }
+ /* key->objectid is the bytenr for both key types */
+ if (!IS_ALIGNED(key->objectid, fs_info->sectorsize)) {
+ generic_err(leaf, slot,
+ "invalid key objectid, have %llu expect to be aligned to %u",
+ key->objectid, fs_info->sectorsize);
+ return -EUCLEAN;
+ }
+
+ /* key->offset is tree level for METADATA_ITEM_KEY */
+ if (key->type == BTRFS_METADATA_ITEM_KEY &&
+ key->offset >= BTRFS_MAX_LEVEL) {
+ extent_err(leaf, slot,
+ "invalid tree level, have %llu expect [0, %u]",
+ key->offset, BTRFS_MAX_LEVEL - 1);
+ return -EUCLEAN;
+ }
+
+ /*
+ * EXTENT/METADATA_ITEM consists of:
+ * 1) One btrfs_extent_item
+ * Records the total refs, type and generation of the extent.
+ *
+ * 2) One btrfs_tree_block_info (for EXTENT_ITEM and tree backref only)
+ * Records the first key and level of the tree block.
+ *
+ * 2) Zero or more btrfs_extent_inline_ref(s)
+ * Each inline ref has one btrfs_extent_inline_ref shows:
+ * 2.1) The ref type, one of the 4
+ * TREE_BLOCK_REF Tree block only
+ * SHARED_BLOCK_REF Tree block only
+ * EXTENT_DATA_REF Data only
+ * SHARED_DATA_REF Data only
+ * 2.2) Ref type specific data
+ * Either using btrfs_extent_inline_ref::offset, or specific
+ * data structure.
+ */
+ if (item_size < sizeof(*ei)) {
+ extent_err(leaf, slot,
+ "invalid item size, have %u expect [%zu, %u)",
+ item_size, sizeof(*ei),
+ BTRFS_LEAF_DATA_SIZE(fs_info));
+ return -EUCLEAN;
+ }
+ end = item_size + btrfs_item_ptr_offset(leaf, slot);
+
+ /* Checks against extent_item */
+ ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
+ flags = btrfs_extent_flags(leaf, ei);
+ total_refs = btrfs_extent_refs(leaf, ei);
+ generation = btrfs_extent_generation(leaf, ei);
+ if (generation > btrfs_super_generation(fs_info->super_copy) + 1) {
+ extent_err(leaf, slot,
+ "invalid generation, have %llu expect (0, %llu]",
+ generation,
+ btrfs_super_generation(fs_info->super_copy) + 1);
+ return -EUCLEAN;
+ }
+ if (!has_single_bit_set(flags & (BTRFS_EXTENT_FLAG_DATA |
+ BTRFS_EXTENT_FLAG_TREE_BLOCK))) {
+ extent_err(leaf, slot,
+ "invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx",
+ flags, BTRFS_EXTENT_FLAG_DATA |
+ BTRFS_EXTENT_FLAG_TREE_BLOCK);
+ return -EUCLEAN;
+ }
+ is_tree_block = !!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK);
+ if (is_tree_block) {
+ if (key->type == BTRFS_EXTENT_ITEM_KEY &&
+ key->offset != fs_info->nodesize) {
+ extent_err(leaf, slot,
+ "invalid extent length, have %llu expect %u",
+ key->offset, fs_info->nodesize);
+ return -EUCLEAN;
+ }
+ } else {
+ if (key->type != BTRFS_EXTENT_ITEM_KEY) {
+ extent_err(leaf, slot,
+ "invalid key type, have %u expect %u for data backref",
+ key->type, BTRFS_EXTENT_ITEM_KEY);
+ return -EUCLEAN;
+ }
+ if (!IS_ALIGNED(key->offset, fs_info->sectorsize)) {
+ extent_err(leaf, slot,
+ "invalid extent length, have %llu expect aligned to %u",
+ key->offset, fs_info->sectorsize);
+ return -EUCLEAN;
+ }
+ }
+ ptr = (unsigned long)(struct btrfs_extent_item *)(ei + 1);
+
+ /* Check the special case of btrfs_tree_block_info */
+ if (is_tree_block && key->type != BTRFS_METADATA_ITEM_KEY) {
+ struct btrfs_tree_block_info *info;
+
+ info = (struct btrfs_tree_block_info *)ptr;
+ if (btrfs_tree_block_level(leaf, info) >= BTRFS_MAX_LEVEL) {
+ extent_err(leaf, slot,
+ "invalid tree block info level, have %u expect [0, %u]",
+ btrfs_tree_block_level(leaf, info),
+ BTRFS_MAX_LEVEL - 1);
+ return -EUCLEAN;
+ }
+ ptr = (unsigned long)(struct btrfs_tree_block_info *)(info + 1);
+ }
+
+ /* Check inline refs */
+ while (ptr < end) {
+ struct btrfs_extent_inline_ref *iref;
+ struct btrfs_extent_data_ref *dref;
+ struct btrfs_shared_data_ref *sref;
+ u64 dref_offset;
+ u64 inline_offset;
+ u8 inline_type;
+
+ if (ptr + sizeof(*iref) > end) {
+ extent_err(leaf, slot,
+"inline ref item overflows extent item, ptr %lu iref size %zu end %lu",
+ ptr, sizeof(*iref), end);
+ return -EUCLEAN;
+ }
+ iref = (struct btrfs_extent_inline_ref *)ptr;
+ inline_type = btrfs_extent_inline_ref_type(leaf, iref);
+ inline_offset = btrfs_extent_inline_ref_offset(leaf, iref);
+ if (ptr + btrfs_extent_inline_ref_size(inline_type) > end) {
+ extent_err(leaf, slot,
+"inline ref item overflows extent item, ptr %lu iref size %u end %lu",
+ ptr, inline_type, end);
+ return -EUCLEAN;
+ }
+
+ switch (inline_type) {
+ /* inline_offset is subvolid of the owner, no need to check */
+ case BTRFS_TREE_BLOCK_REF_KEY:
+ inline_refs++;
+ break;
+ /* Contains parent bytenr */
+ case BTRFS_SHARED_BLOCK_REF_KEY:
+ if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) {
+ extent_err(leaf, slot,
+ "invalid tree parent bytenr, have %llu expect aligned to %u",
+ inline_offset, fs_info->sectorsize);
+ return -EUCLEAN;
+ }
+ inline_refs++;
+ break;
+ /*
+ * Contains owner subvolid, owner key objectid, adjusted offset.
+ * The only obvious corruption can happen in that offset.
+ */
+ case BTRFS_EXTENT_DATA_REF_KEY:
+ dref = (struct btrfs_extent_data_ref *)(&iref->offset);
+ dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
+ if (!IS_ALIGNED(dref_offset, fs_info->sectorsize)) {
+ extent_err(leaf, slot,
+ "invalid data ref offset, have %llu expect aligned to %u",
+ dref_offset, fs_info->sectorsize);
+ return -EUCLEAN;
+ }
+ inline_refs += btrfs_extent_data_ref_count(leaf, dref);
+ break;
+ /* Contains parent bytenr and ref count */
+ case BTRFS_SHARED_DATA_REF_KEY:
+ sref = (struct btrfs_shared_data_ref *)(iref + 1);
+ if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) {
+ extent_err(leaf, slot,
+ "invalid data parent bytenr, have %llu expect aligned to %u",
+ inline_offset, fs_info->sectorsize);
+ return -EUCLEAN;
+ }
+ inline_refs += btrfs_shared_data_ref_count(leaf, sref);
+ break;
+ default:
+ extent_err(leaf, slot, "unknown inline ref type: %u",
+ inline_type);
+ return -EUCLEAN;
+ }
+ ptr += btrfs_extent_inline_ref_size(inline_type);
+ }
+ /* No padding is allowed */
+ if (ptr != end) {
+ extent_err(leaf, slot,
+ "invalid extent item size, padding bytes found");
+ return -EUCLEAN;
+ }
+
+ /* Finally, check the inline refs against total refs */
+ if (inline_refs > total_refs) {
+ extent_err(leaf, slot,
+ "invalid extent refs, have %llu expect >= inline %llu",
+ total_refs, inline_refs);
+ return -EUCLEAN;
+ }
+ return 0;
+}
+
+static int check_simple_keyed_refs(struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ u32 expect_item_size = 0;
+
+ if (key->type == BTRFS_SHARED_DATA_REF_KEY)
+ expect_item_size = sizeof(struct btrfs_shared_data_ref);
+
+ if (btrfs_item_size_nr(leaf, slot) != expect_item_size) {
+ generic_err(leaf, slot,
+ "invalid item size, have %u expect %u for key type %u",
+ btrfs_item_size_nr(leaf, slot),
+ expect_item_size, key->type);
+ return -EUCLEAN;
+ }
+ if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
+ generic_err(leaf, slot,
+"invalid key objectid for shared block ref, have %llu expect aligned to %u",
+ key->objectid, leaf->fs_info->sectorsize);
+ return -EUCLEAN;
+ }
+ if (key->type != BTRFS_TREE_BLOCK_REF_KEY &&
+ !IS_ALIGNED(key->offset, leaf->fs_info->sectorsize)) {
+ extent_err(leaf, slot,
+ "invalid tree parent bytenr, have %llu expect aligned to %u",
+ key->offset, leaf->fs_info->sectorsize);
+ return -EUCLEAN;
+ }
+ return 0;
+}
+
+static int check_extent_data_ref(struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_extent_data_ref *dref;
+ unsigned long ptr = btrfs_item_ptr_offset(leaf, slot);
+ const unsigned long end = ptr + btrfs_item_size_nr(leaf, slot);
+
+ if (btrfs_item_size_nr(leaf, slot) % sizeof(*dref) != 0) {
+ generic_err(leaf, slot,
+ "invalid item size, have %u expect aligned to %zu for key type %u",
+ btrfs_item_size_nr(leaf, slot),
+ sizeof(*dref), key->type);
+ }
+ if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
+ generic_err(leaf, slot,
+"invalid key objectid for shared block ref, have %llu expect aligned to %u",
+ key->objectid, leaf->fs_info->sectorsize);
+ return -EUCLEAN;
+ }
+ for (; ptr < end; ptr += sizeof(*dref)) {
+ u64 root_objectid;
+ u64 owner;
+ u64 offset;
+ u64 hash;
+
+ dref = (struct btrfs_extent_data_ref *)ptr;
+ root_objectid = btrfs_extent_data_ref_root(leaf, dref);
+ owner = btrfs_extent_data_ref_objectid(leaf, dref);
+ offset = btrfs_extent_data_ref_offset(leaf, dref);
+ hash = hash_extent_data_ref(root_objectid, owner, offset);
+ if (hash != key->offset) {
+ extent_err(leaf, slot,
+ "invalid extent data ref hash, item has 0x%016llx key has 0x%016llx",
+ hash, key->offset);
+ return -EUCLEAN;
+ }
+ if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) {
+ extent_err(leaf, slot,
+ "invalid extent data backref offset, have %llu expect aligned to %u",
+ offset, leaf->fs_info->sectorsize);
+ }
+ }
+ return 0;
+}
+
+#define inode_ref_err(fs_info, eb, slot, fmt, args...) \
+ inode_item_err(fs_info, eb, slot, fmt, ##args)
+static int check_inode_ref(struct extent_buffer *leaf,
+ struct btrfs_key *key, struct btrfs_key *prev_key,
+ int slot)
+{
+ struct btrfs_inode_ref *iref;
+ unsigned long ptr;
+ unsigned long end;
+
+ if (!check_prev_ino(leaf, key, slot, prev_key))
+ return -EUCLEAN;
+ /* namelen can't be 0, so item_size == sizeof() is also invalid */
+ if (btrfs_item_size_nr(leaf, slot) <= sizeof(*iref)) {
+ inode_ref_err(fs_info, leaf, slot,
+ "invalid item size, have %u expect (%zu, %u)",
+ btrfs_item_size_nr(leaf, slot),
+ sizeof(*iref), BTRFS_LEAF_DATA_SIZE(leaf->fs_info));
+ return -EUCLEAN;
+ }
+
+ ptr = btrfs_item_ptr_offset(leaf, slot);
+ end = ptr + btrfs_item_size_nr(leaf, slot);
+ while (ptr < end) {
+ u16 namelen;
+
+ if (ptr + sizeof(iref) > end) {
+ inode_ref_err(fs_info, leaf, slot,
+ "inode ref overflow, ptr %lu end %lu inode_ref_size %zu",
+ ptr, end, sizeof(iref));
+ return -EUCLEAN;
+ }
+
+ iref = (struct btrfs_inode_ref *)ptr;
+ namelen = btrfs_inode_ref_name_len(leaf, iref);
+ if (ptr + sizeof(*iref) + namelen > end) {
+ inode_ref_err(fs_info, leaf, slot,
+ "inode ref overflow, ptr %lu end %lu namelen %u",
+ ptr, end, namelen);
+ return -EUCLEAN;
+ }
+
+ /*
+ * NOTE: In theory we should record all found index numbers
+ * to find any duplicated indexes, but that will be too time
+ * consuming for inodes with too many hard links.
+ */
+ ptr += sizeof(*iref) + namelen;
+ }
+ return 0;
+}
+
/*
* Common point to switch the item-specific validation.
*/
@@ -841,7 +1360,10 @@ static int check_leaf_item(struct extent_buffer *leaf,
case BTRFS_DIR_ITEM_KEY:
case BTRFS_DIR_INDEX_KEY:
case BTRFS_XATTR_ITEM_KEY:
- ret = check_dir_item(leaf, key, slot);
+ ret = check_dir_item(leaf, key, prev_key, slot);
+ break;
+ case BTRFS_INODE_REF_KEY:
+ ret = check_inode_ref(leaf, key, prev_key, slot);
break;
case BTRFS_BLOCK_GROUP_ITEM_KEY:
ret = check_block_group_item(leaf, key, slot);
@@ -856,6 +1378,21 @@ static int check_leaf_item(struct extent_buffer *leaf,
case BTRFS_INODE_ITEM_KEY:
ret = check_inode_item(leaf, key, slot);
break;
+ case BTRFS_ROOT_ITEM_KEY:
+ ret = check_root_item(leaf, key, slot);
+ break;
+ case BTRFS_EXTENT_ITEM_KEY:
+ case BTRFS_METADATA_ITEM_KEY:
+ ret = check_extent_item(leaf, key, slot);
+ break;
+ case BTRFS_TREE_BLOCK_REF_KEY:
+ case BTRFS_SHARED_DATA_REF_KEY:
+ case BTRFS_SHARED_BLOCK_REF_KEY:
+ ret = check_simple_keyed_refs(leaf, key, slot);
+ break;
+ case BTRFS_EXTENT_DATA_REF_KEY:
+ ret = check_extent_data_ref(leaf, key, slot);
+ break;
}
return ret;
}
@@ -899,6 +1436,12 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
owner);
return -EUCLEAN;
}
+ /* Unknown tree */
+ if (owner == 0) {
+ generic_err(leaf, 0,
+ "invalid owner, root 0 is not defined");
+ return -EUCLEAN;
+ }
return 0;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 6c8297bcfeb7..6f757361db53 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -8,6 +8,7 @@
#include <linux/blkdev.h>
#include <linux/list_sort.h>
#include <linux/iversion.h>
+#include "misc.h"
#include "ctree.h"
#include "tree-log.h"
#include "disk-io.h"
@@ -24,10 +25,12 @@
* LOG_INODE_EXISTS means to log just enough to recreate the inode
* during log replay
*/
-#define LOG_INODE_ALL 0
-#define LOG_INODE_EXISTS 1
-#define LOG_OTHER_INODE 2
-#define LOG_OTHER_INODE_ALL 3
+enum {
+ LOG_INODE_ALL,
+ LOG_INODE_EXISTS,
+ LOG_OTHER_INODE,
+ LOG_OTHER_INODE_ALL,
+};
/*
* directory trouble cases
@@ -81,10 +84,12 @@
* The last stage is to deal with directories and links and extents
* and all the other fun semantics
*/
-#define LOG_WALK_PIN_ONLY 0
-#define LOG_WALK_REPLAY_INODES 1
-#define LOG_WALK_REPLAY_DIR_INDEX 2
-#define LOG_WALK_REPLAY_ALL 3
+enum {
+ LOG_WALK_PIN_ONLY,
+ LOG_WALK_REPLAY_INODES,
+ LOG_WALK_REPLAY_DIR_INDEX,
+ LOG_WALK_REPLAY_ALL,
+};
static int btrfs_log_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_inode *inode,
@@ -188,10 +193,6 @@ static int join_running_log_trans(struct btrfs_root *root)
{
int ret = -ENOENT;
- smp_mb();
- if (!root->log_root)
- return -ENOENT;
-
mutex_lock(&root->log_mutex);
if (root->log_root) {
ret = 0;
@@ -505,7 +506,7 @@ insert:
ino_size != 0) {
struct btrfs_map_token token;
- btrfs_init_map_token(&token);
+ btrfs_init_map_token(&token, dst_eb);
btrfs_set_token_inode_size(dst_eb, dst_item,
ino_size, &token);
}
@@ -558,7 +559,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root,
key.objectid = objectid;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
+ inode = btrfs_iget(root->fs_info->sb, &key, root);
if (IS_ERR(inode))
inode = NULL;
return inode;
@@ -944,54 +945,32 @@ static noinline int backref_in_log(struct btrfs_root *log,
const char *name, int namelen)
{
struct btrfs_path *path;
- struct btrfs_inode_ref *ref;
- unsigned long ptr;
- unsigned long ptr_end;
- unsigned long name_ptr;
- int found_name_len;
- int item_size;
int ret;
- int match = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
- if (ret != 0)
+ if (ret < 0) {
goto out;
-
- ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
-
- if (key->type == BTRFS_INODE_EXTREF_KEY) {
- if (btrfs_find_name_in_ext_backref(path->nodes[0],
- path->slots[0],
- ref_objectid,
- name, namelen, NULL))
- match = 1;
-
+ } else if (ret == 1) {
+ ret = 0;
goto out;
}
- item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
- ptr_end = ptr + item_size;
- while (ptr < ptr_end) {
- ref = (struct btrfs_inode_ref *)ptr;
- found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
- if (found_name_len == namelen) {
- name_ptr = (unsigned long)(ref + 1);
- ret = memcmp_extent_buffer(path->nodes[0], name,
- name_ptr, namelen);
- if (ret == 0) {
- match = 1;
- goto out;
- }
- }
- ptr = (unsigned long)(ref + 1) + found_name_len;
- }
+ if (key->type == BTRFS_INODE_EXTREF_KEY)
+ ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
+ path->slots[0],
+ ref_objectid,
+ name, namelen);
+ else
+ ret = !!btrfs_find_name_in_backref(path->nodes[0],
+ path->slots[0],
+ name, namelen);
out:
btrfs_free_path(path);
- return match;
+ return ret;
}
static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
@@ -1049,10 +1028,13 @@ again:
(unsigned long)(victim_ref + 1),
victim_name_len);
- if (!backref_in_log(log_root, &search_key,
- parent_objectid,
- victim_name,
- victim_name_len)) {
+ ret = backref_in_log(log_root, &search_key,
+ parent_objectid, victim_name,
+ victim_name_len);
+ if (ret < 0) {
+ kfree(victim_name);
+ return ret;
+ } else if (!ret) {
inc_nlink(&inode->vfs_inode);
btrfs_release_path(path);
@@ -1114,10 +1096,12 @@ again:
search_key.offset = btrfs_extref_hash(parent_objectid,
victim_name,
victim_name_len);
- ret = 0;
- if (!backref_in_log(log_root, &search_key,
- parent_objectid, victim_name,
- victim_name_len)) {
+ ret = backref_in_log(log_root, &search_key,
+ parent_objectid, victim_name,
+ victim_name_len);
+ if (ret < 0) {
+ return ret;
+ } else if (!ret) {
ret = -ENOENT;
victim_parent = read_one_inode(root,
parent_objectid);
@@ -1266,12 +1250,12 @@ again:
goto out;
if (key->type == BTRFS_INODE_EXTREF_KEY)
- ret = btrfs_find_name_in_ext_backref(log_eb, log_slot,
- parent_id, name,
- namelen, NULL);
+ ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
+ parent_id, name,
+ namelen);
else
- ret = btrfs_find_name_in_backref(log_eb, log_slot, name,
- namelen, NULL);
+ ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
+ name, namelen);
if (!ret) {
struct inode *dir;
@@ -1333,12 +1317,11 @@ static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
goto out;
}
if (key.type == BTRFS_INODE_EXTREF_KEY)
- ret = btrfs_find_name_in_ext_backref(path->nodes[0],
- path->slots[0], parent_id,
- name, namelen, NULL);
+ ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
+ path->slots[0], parent_id, name, namelen);
else
- ret = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
- name, namelen, NULL);
+ ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
+ name, namelen);
out:
btrfs_free_path(path);
@@ -1885,30 +1868,6 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
}
/*
- * Return true if an inode reference exists in the log for the given name,
- * inode and parent inode.
- */
-static bool name_in_log_ref(struct btrfs_root *log_root,
- const char *name, const int name_len,
- const u64 dirid, const u64 ino)
-{
- struct btrfs_key search_key;
-
- search_key.objectid = ino;
- search_key.type = BTRFS_INODE_REF_KEY;
- search_key.offset = dirid;
- if (backref_in_log(log_root, &search_key, dirid, name, name_len))
- return true;
-
- search_key.type = BTRFS_INODE_EXTREF_KEY;
- search_key.offset = btrfs_extref_hash(dirid, name, name_len);
- if (backref_in_log(log_root, &search_key, dirid, name, name_len))
- return true;
-
- return false;
-}
-
-/*
* take a single entry in a log directory item and replay it into
* the subvolume.
*
@@ -2024,8 +1983,31 @@ out:
return ret;
insert:
- if (name_in_log_ref(root->log_root, name, name_len,
- key->objectid, log_key.objectid)) {
+ /*
+ * Check if the inode reference exists in the log for the given name,
+ * inode and parent inode
+ */
+ found_key.objectid = log_key.objectid;
+ found_key.type = BTRFS_INODE_REF_KEY;
+ found_key.offset = key->objectid;
+ ret = backref_in_log(root->log_root, &found_key, 0, name, name_len);
+ if (ret < 0) {
+ goto out;
+ } else if (ret) {
+ /* The dentry will be added later. */
+ ret = 0;
+ update_size = false;
+ goto out;
+ }
+
+ found_key.objectid = log_key.objectid;
+ found_key.type = BTRFS_INODE_EXTREF_KEY;
+ found_key.offset = key->objectid;
+ ret = backref_in_log(root->log_root, &found_key, key->objectid, name,
+ name_len);
+ if (ret < 0) {
+ goto out;
+ } else if (ret) {
/* The dentry will be added later. */
ret = 0;
update_size = false;
@@ -2869,7 +2851,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
level = btrfs_header_level(log->node);
orig_level = level;
path->nodes[level] = log->node;
- extent_buffer_get(log->node);
+ atomic_inc(&log->node->refs);
path->slots[level] = 0;
while (1) {
@@ -2932,7 +2914,8 @@ out:
* in the tree of log roots
*/
static int update_log_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *log)
+ struct btrfs_root *log,
+ struct btrfs_root_item *root_item)
{
struct btrfs_fs_info *fs_info = log->fs_info;
int ret;
@@ -2940,10 +2923,10 @@ static int update_log_root(struct btrfs_trans_handle *trans,
if (log->log_transid == 1) {
/* insert root item on the first sync */
ret = btrfs_insert_root(trans, fs_info->log_root_tree,
- &log->root_key, &log->root_item);
+ &log->root_key, root_item);
} else {
ret = btrfs_update_root(trans, fs_info->log_root_tree,
- &log->root_key, &log->root_item);
+ &log->root_key, root_item);
}
return ret;
}
@@ -3041,6 +3024,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log = root->log_root;
struct btrfs_root *log_root_tree = fs_info->log_root_tree;
+ struct btrfs_root_item new_root_item;
int log_transid = 0;
struct btrfs_log_ctx root_log_ctx;
struct blk_plug plug;
@@ -3104,18 +3088,26 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
goto out;
}
+ /*
+ * We _must_ update under the root->log_mutex in order to make sure we
+ * have a consistent view of the log root we are trying to commit at
+ * this moment.
+ *
+ * We _must_ copy this into a local copy, because we are not holding the
+ * log_root_tree->log_mutex yet. This is important because when we
+ * commit the log_root_tree we must have a consistent view of the
+ * log_root_tree when we update the super block to point at the
+ * log_root_tree bytenr. If we update the log_root_tree here we'll race
+ * with the commit and possibly point at the new block which we may not
+ * have written out.
+ */
btrfs_set_root_node(&log->root_item, log->node);
+ memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
root->log_transid++;
log->log_transid = root->log_transid;
root->log_start_pid = 0;
/*
- * Update or create log root item under the root's log_mutex to prevent
- * races with concurrent log syncs that can lead to failure to update
- * log root item because it was not created yet.
- */
- ret = update_log_root(trans, log);
- /*
* IO has been started, blocks of the log tree have WRITTEN flag set
* in their headers. new modifications of the log will be written to
* new positions. so it's safe to allow log writers to go in.
@@ -3135,6 +3127,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&log_root_tree->log_mutex);
mutex_lock(&log_root_tree->log_mutex);
+
+ /*
+ * Now we are safe to update the log_root_tree because we're under the
+ * log_mutex, and we're a current writer so we're holding the commit
+ * open until we drop the log_mutex.
+ */
+ ret = update_log_root(trans, log, &new_root_item);
+
if (atomic_dec_and_test(&log_root_tree->log_writers)) {
/* atomic_dec_and_test implies a barrier */
cond_wake_up_nomb(&log_root_tree->log_writer_wait);
@@ -3842,7 +3842,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
{
struct btrfs_map_token token;
- btrfs_init_map_token(&token);
+ btrfs_init_map_token(&token, leaf);
if (log_inode_only) {
/* set the generation to zero so the recover code
@@ -4302,8 +4302,6 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
if (ret)
return ret;
- btrfs_init_map_token(&token);
-
ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
em->start + em->len, NULL, 0, 1,
sizeof(*fi), &extent_inserted);
@@ -4321,6 +4319,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
return ret;
}
leaf = path->nodes[0];
+ btrfs_init_map_token(&token, leaf);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
@@ -4966,7 +4965,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
key.objectid = ino;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &key, root);
/*
* If the other inode that had a conflicting dir entry was
* deleted in the current transaction, we need to log its parent
@@ -4976,8 +4975,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
ret = PTR_ERR(inode);
if (ret == -ENOENT) {
key.objectid = parent;
- inode = btrfs_iget(fs_info->sb, &key, root,
- NULL);
+ inode = btrfs_iget(fs_info->sb, &key, root);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
} else {
@@ -4985,7 +4983,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
BTRFS_I(inode),
LOG_OTHER_INODE_ALL,
0, LLONG_MAX, ctx);
- iput(inode);
+ btrfs_add_delayed_iput(inode);
}
}
continue;
@@ -5000,7 +4998,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
if (ret) {
- iput(inode);
+ btrfs_add_delayed_iput(inode);
continue;
}
@@ -5009,7 +5007,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
key.offset = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
- iput(inode);
+ btrfs_add_delayed_iput(inode);
continue;
}
@@ -5056,7 +5054,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
}
path->slots[0]++;
}
- iput(inode);
+ btrfs_add_delayed_iput(inode);
}
return ret;
@@ -5682,14 +5680,14 @@ process_leaf:
continue;
btrfs_release_path(path);
- di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
+ di_inode = btrfs_iget(fs_info->sb, &di_key, root);
if (IS_ERR(di_inode)) {
ret = PTR_ERR(di_inode);
goto next_dir_inode;
}
if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
- iput(di_inode);
+ btrfs_add_delayed_iput(di_inode);
break;
}
@@ -5701,7 +5699,7 @@ process_leaf:
if (!ret &&
btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
ret = 1;
- iput(di_inode);
+ btrfs_add_delayed_iput(di_inode);
if (ret)
goto next_dir_inode;
if (ctx->log_new_dentries) {
@@ -5808,8 +5806,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
cur_offset = item_size;
}
- dir_inode = btrfs_iget(fs_info->sb, &inode_key,
- root, NULL);
+ dir_inode = btrfs_iget(fs_info->sb, &inode_key, root);
/*
* If the parent inode was deleted, return an error to
* fallback to a transaction commit. This is to prevent
@@ -5848,7 +5845,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
if (!ret && ctx && ctx->log_new_dentries)
ret = log_new_dir_dentries(trans, root,
BTRFS_I(dir_inode), ctx);
- iput(dir_inode);
+ btrfs_add_delayed_iput(dir_inode);
if (ret)
goto out;
}
@@ -5883,7 +5880,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
search_key.objectid = found_key.offset;
search_key.type = BTRFS_INODE_ITEM_KEY;
search_key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &search_key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &search_key, root);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -5891,7 +5888,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
LOG_INODE_EXISTS,
0, LLONG_MAX, ctx);
- iput(inode);
+ btrfs_add_delayed_iput(inode);
if (ret)
return ret;
@@ -6233,7 +6230,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
struct walk_control wc = {
.process_func = process_one_buffer,
- .stage = 0,
+ .stage = LOG_WALK_PIN_ONLY,
};
path = btrfs_alloc_path();
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a13ddba1ebc3..d8e5560db285 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -14,6 +14,7 @@
#include <linux/semaphore.h>
#include <linux/uuid.h>
#include <linux/list_sort.h>
+#include "misc.h"
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
@@ -24,11 +25,11 @@
#include "async-thread.h"
#include "check-integrity.h"
#include "rcu-string.h"
-#include "math.h"
#include "dev-replace.h"
#include "sysfs.h"
#include "tree-checker.h"
#include "space-info.h"
+#include "block-group.h"
const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = {
@@ -57,6 +58,30 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
.bg_flag = BTRFS_BLOCK_GROUP_RAID1,
.mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
},
+ [BTRFS_RAID_RAID1C3] = {
+ .sub_stripes = 1,
+ .dev_stripes = 1,
+ .devs_max = 0,
+ .devs_min = 3,
+ .tolerated_failures = 2,
+ .devs_increment = 3,
+ .ncopies = 3,
+ .raid_name = "raid1c3",
+ .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3,
+ .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
+ },
+ [BTRFS_RAID_RAID1C4] = {
+ .sub_stripes = 1,
+ .dev_stripes = 1,
+ .devs_max = 0,
+ .devs_min = 4,
+ .tolerated_failures = 3,
+ .devs_increment = 4,
+ .ncopies = 4,
+ .raid_name = "raid1c4",
+ .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4,
+ .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
+ },
[BTRFS_RAID_DUP] = {
.sub_stripes = 1,
.dev_stripes = 2,
@@ -190,7 +215,6 @@ out_overflow:;
static int init_first_rw_device(struct btrfs_trans_handle *trans);
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
-static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
@@ -297,7 +321,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
DEFINE_MUTEX(uuid_mutex);
static LIST_HEAD(fs_uuids);
-struct list_head *btrfs_get_fs_uuids(void)
+struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
{
return &fs_uuids;
}
@@ -358,19 +382,6 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
kfree(fs_devices);
}
-static void btrfs_kobject_uevent(struct block_device *bdev,
- enum kobject_action action)
-{
- int ret;
-
- ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
- if (ret)
- pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
- action,
- kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
- &disk_to_dev(bdev->bd_disk)->kobj);
-}
-
void __exit btrfs_cleanup_fs_uuids(void)
{
struct btrfs_fs_devices *fs_devices;
@@ -410,8 +421,6 @@ static struct btrfs_device *__alloc_device(void)
INIT_LIST_HEAD(&dev->dev_alloc_list);
INIT_LIST_HEAD(&dev->post_commit_list);
- spin_lock_init(&dev->io_lock);
-
atomic_set(&dev->reada_in_flight, 0);
atomic_set(&dev->dev_stats_ccnt, 0);
btrfs_device_data_ordered_init(dev);
@@ -514,212 +523,6 @@ error:
return ret;
}
-static void requeue_list(struct btrfs_pending_bios *pending_bios,
- struct bio *head, struct bio *tail)
-{
-
- struct bio *old_head;
-
- old_head = pending_bios->head;
- pending_bios->head = head;
- if (pending_bios->tail)
- tail->bi_next = old_head;
- else
- pending_bios->tail = tail;
-}
-
-/*
- * we try to collect pending bios for a device so we don't get a large
- * number of procs sending bios down to the same device. This greatly
- * improves the schedulers ability to collect and merge the bios.
- *
- * But, it also turns into a long list of bios to process and that is sure
- * to eventually make the worker thread block. The solution here is to
- * make some progress and then put this work struct back at the end of
- * the list if the block device is congested. This way, multiple devices
- * can make progress from a single worker thread.
- */
-static noinline void run_scheduled_bios(struct btrfs_device *device)
-{
- struct btrfs_fs_info *fs_info = device->fs_info;
- struct bio *pending;
- struct backing_dev_info *bdi;
- struct btrfs_pending_bios *pending_bios;
- struct bio *tail;
- struct bio *cur;
- int again = 0;
- unsigned long num_run;
- unsigned long batch_run = 0;
- unsigned long last_waited = 0;
- int force_reg = 0;
- int sync_pending = 0;
- struct blk_plug plug;
-
- /*
- * this function runs all the bios we've collected for
- * a particular device. We don't want to wander off to
- * another device without first sending all of these down.
- * So, setup a plug here and finish it off before we return
- */
- blk_start_plug(&plug);
-
- bdi = device->bdev->bd_bdi;
-
-loop:
- spin_lock(&device->io_lock);
-
-loop_lock:
- num_run = 0;
-
- /* take all the bios off the list at once and process them
- * later on (without the lock held). But, remember the
- * tail and other pointers so the bios can be properly reinserted
- * into the list if we hit congestion
- */
- if (!force_reg && device->pending_sync_bios.head) {
- pending_bios = &device->pending_sync_bios;
- force_reg = 1;
- } else {
- pending_bios = &device->pending_bios;
- force_reg = 0;
- }
-
- pending = pending_bios->head;
- tail = pending_bios->tail;
- WARN_ON(pending && !tail);
-
- /*
- * if pending was null this time around, no bios need processing
- * at all and we can stop. Otherwise it'll loop back up again
- * and do an additional check so no bios are missed.
- *
- * device->running_pending is used to synchronize with the
- * schedule_bio code.
- */
- if (device->pending_sync_bios.head == NULL &&
- device->pending_bios.head == NULL) {
- again = 0;
- device->running_pending = 0;
- } else {
- again = 1;
- device->running_pending = 1;
- }
-
- pending_bios->head = NULL;
- pending_bios->tail = NULL;
-
- spin_unlock(&device->io_lock);
-
- while (pending) {
-
- rmb();
- /* we want to work on both lists, but do more bios on the
- * sync list than the regular list
- */
- if ((num_run > 32 &&
- pending_bios != &device->pending_sync_bios &&
- device->pending_sync_bios.head) ||
- (num_run > 64 && pending_bios == &device->pending_sync_bios &&
- device->pending_bios.head)) {
- spin_lock(&device->io_lock);
- requeue_list(pending_bios, pending, tail);
- goto loop_lock;
- }
-
- cur = pending;
- pending = pending->bi_next;
- cur->bi_next = NULL;
-
- BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
-
- /*
- * if we're doing the sync list, record that our
- * plug has some sync requests on it
- *
- * If we're doing the regular list and there are
- * sync requests sitting around, unplug before
- * we add more
- */
- if (pending_bios == &device->pending_sync_bios) {
- sync_pending = 1;
- } else if (sync_pending) {
- blk_finish_plug(&plug);
- blk_start_plug(&plug);
- sync_pending = 0;
- }
-
- btrfsic_submit_bio(cur);
- num_run++;
- batch_run++;
-
- cond_resched();
-
- /*
- * we made progress, there is more work to do and the bdi
- * is now congested. Back off and let other work structs
- * run instead
- */
- if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
- fs_info->fs_devices->open_devices > 1) {
- struct io_context *ioc;
-
- ioc = current->io_context;
-
- /*
- * the main goal here is that we don't want to
- * block if we're going to be able to submit
- * more requests without blocking.
- *
- * This code does two great things, it pokes into
- * the elevator code from a filesystem _and_
- * it makes assumptions about how batching works.
- */
- if (ioc && ioc->nr_batch_requests > 0 &&
- time_before(jiffies, ioc->last_waited + HZ/50UL) &&
- (last_waited == 0 ||
- ioc->last_waited == last_waited)) {
- /*
- * we want to go through our batch of
- * requests and stop. So, we copy out
- * the ioc->last_waited time and test
- * against it before looping
- */
- last_waited = ioc->last_waited;
- cond_resched();
- continue;
- }
- spin_lock(&device->io_lock);
- requeue_list(pending_bios, pending, tail);
- device->running_pending = 1;
-
- spin_unlock(&device->io_lock);
- btrfs_queue_work(fs_info->submit_workers,
- &device->work);
- goto done;
- }
- }
-
- cond_resched();
- if (again)
- goto loop;
-
- spin_lock(&device->io_lock);
- if (device->pending_bios.head || device->pending_sync_bios.head)
- goto loop_lock;
- spin_unlock(&device->io_lock);
-
-done:
- blk_finish_plug(&plug);
-}
-
-static void pending_bios_fn(struct btrfs_work *work)
-{
- struct btrfs_device *device;
-
- device = container_of(work, struct btrfs_device, work);
- run_scheduled_bios(device);
-}
-
static bool device_path_matched(const char *path, struct btrfs_device *device)
{
int found;
@@ -831,7 +634,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
}
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
- fs_devices->seeding = 1;
+ fs_devices->seeding = true;
} else {
if (bdev_read_only(bdev))
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
@@ -841,7 +644,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
q = bdev_get_queue(bdev);
if (!blk_queue_nonrot(q))
- fs_devices->rotating = 1;
+ fs_devices->rotating = true;
device->bdev = bdev;
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
@@ -1018,11 +821,15 @@ static noinline struct btrfs_device *device_list_add(const char *path,
*new_device_added = true;
if (disk_super->label[0])
- pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
- disk_super->label, devid, found_transid, path);
+ pr_info(
+ "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
+ disk_super->label, devid, found_transid, path,
+ current->comm, task_pid_nr(current));
else
- pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n",
- disk_super->fsid, devid, found_transid, path);
+ pr_info(
+ "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
+ disk_super->fsid, devid, found_transid, path,
+ current->comm, task_pid_nr(current));
} else if (!device->name || strcmp(device->name->str, path)) {
/*
@@ -1128,6 +935,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
struct btrfs_fs_devices *fs_devices;
struct btrfs_device *device;
struct btrfs_device *orig_dev;
+ int ret = 0;
fs_devices = alloc_fs_devices(orig->fsid, NULL);
if (IS_ERR(fs_devices))
@@ -1141,8 +949,10 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
device = btrfs_alloc_device(NULL, &orig_dev->devid,
orig_dev->uuid);
- if (IS_ERR(device))
+ if (IS_ERR(device)) {
+ ret = PTR_ERR(device);
goto error;
+ }
/*
* This is ok to do without rcu read locked because we hold the
@@ -1153,6 +963,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
GFP_KERNEL);
if (!name) {
btrfs_free_device(device);
+ ret = -ENOMEM;
goto error;
}
rcu_assign_pointer(device->name, name);
@@ -1167,7 +978,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
error:
mutex_unlock(&orig->device_list_mutex);
free_fs_devices(fs_devices);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
/*
@@ -1304,7 +1115,7 @@ static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
WARN_ON(fs_devices->open_devices);
WARN_ON(fs_devices->rw_devices);
fs_devices->opened = 0;
- fs_devices->seeding = 0;
+ fs_devices->seeding = false;
return 0;
}
@@ -1551,9 +1362,16 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
* @len is used to store the size of the free space that we find.
* But if we don't find suitable free space, it is used to store the size of
* the max free space.
+ *
+ * NOTE: This function will search *commit* root of device tree, and does extra
+ * check to ensure dev extents are not double allocated.
+ * This makes the function safe to allocate dev extents but may not report
+ * correct usable device space, as device extent freed in current transaction
+ * is not reported as avaiable.
*/
-int find_free_dev_extent_start(struct btrfs_device *device, u64 num_bytes,
- u64 search_start, u64 *start, u64 *len)
+static int find_free_dev_extent_start(struct btrfs_device *device,
+ u64 num_bytes, u64 search_start, u64 *start,
+ u64 *len)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_root *root = fs_info->dev_root;
@@ -1855,7 +1673,12 @@ static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
if (ret < 0)
goto error;
- BUG_ON(ret == 0); /* Corruption */
+ if (ret == 0) {
+ /* Corruption */
+ btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
+ ret = -EUCLEAN;
+ goto error;
+ }
ret = btrfs_previous_item(fs_info->chunk_root, path,
BTRFS_DEV_ITEMS_OBJECTID,
@@ -2045,7 +1868,7 @@ static struct btrfs_device * btrfs_find_next_active_device(
* where this function called, there should be always be another device (or
* this_dev) which is active.
*/
-void btrfs_assign_next_active_device(struct btrfs_device *device,
+void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
struct btrfs_device *this_dev)
{
struct btrfs_fs_info *fs_info = device->fs_info;
@@ -2447,11 +2270,11 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
mutex_unlock(&fs_info->chunk_mutex);
- fs_devices->seeding = 0;
+ fs_devices->seeding = false;
fs_devices->num_devices = 0;
fs_devices->open_devices = 0;
fs_devices->missing_devices = 0;
- fs_devices->rotating = 0;
+ fs_devices->rotating = false;
fs_devices->seed = seed_devices;
generate_random_uuid(fs_devices->fsid);
@@ -2646,7 +2469,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
if (!blk_queue_nonrot(q))
- fs_devices->rotating = 1;
+ fs_devices->rotating = true;
orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
btrfs_set_super_total_bytes(fs_info->super_copy,
@@ -2686,22 +2509,14 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
}
if (seeding_dev) {
- char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
-
ret = btrfs_finish_sprout(trans);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto error_sysfs;
}
- /* Sprouting would change fsid of the mounted root,
- * so rename the fsid on the sysfs
- */
- snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
- fs_info->fs_devices->fsid);
- if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf))
- btrfs_warn(fs_info,
- "sysfs: failed to create fsid for sprout");
+ btrfs_sysfs_update_sprout_fsid(fs_devices,
+ fs_info->fs_devices->fsid);
}
ret = btrfs_commit_transaction(trans);
@@ -3076,10 +2891,6 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
*/
lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
- ret = btrfs_can_relocate(fs_info, chunk_offset);
- if (ret)
- return -ENOSPC;
-
/* step one, relocate all the extents inside this chunk */
btrfs_scrub_pause(fs_info);
ret = btrfs_relocate_block_group(fs_info, chunk_offset);
@@ -3087,16 +2898,6 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
if (ret)
return ret;
- /*
- * We add the kobjects here (and after forcing data chunk creation)
- * since relocation is the only place we'll create chunks of a new
- * type at runtime. The only place where we'll remove the last
- * chunk of a type is the call immediately below this one. Even
- * so, we're protected against races with the cleaner thread since
- * we're covered by the delete_unused_bgs_mutex.
- */
- btrfs_add_raid_kobjects(fs_info);
-
trans = btrfs_start_trans_remove_block_group(root->fs_info,
chunk_offset);
if (IS_ERR(trans)) {
@@ -3196,7 +2997,7 @@ error:
static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
u64 chunk_offset)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
u64 bytes_used;
u64 chunk_type;
@@ -3205,30 +3006,28 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
chunk_type = cache->flags;
btrfs_put_block_group(cache);
- if (chunk_type & BTRFS_BLOCK_GROUP_DATA) {
- spin_lock(&fs_info->data_sinfo->lock);
- bytes_used = fs_info->data_sinfo->bytes_used;
- spin_unlock(&fs_info->data_sinfo->lock);
-
- if (!bytes_used) {
- struct btrfs_trans_handle *trans;
- int ret;
+ if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
+ return 0;
- trans = btrfs_join_transaction(fs_info->tree_root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ spin_lock(&fs_info->data_sinfo->lock);
+ bytes_used = fs_info->data_sinfo->bytes_used;
+ spin_unlock(&fs_info->data_sinfo->lock);
- ret = btrfs_force_chunk_alloc(trans,
- BTRFS_BLOCK_GROUP_DATA);
- btrfs_end_transaction(trans);
- if (ret < 0)
- return ret;
+ if (!bytes_used) {
+ struct btrfs_trans_handle *trans;
+ int ret;
- btrfs_add_raid_kobjects(fs_info);
+ trans = btrfs_join_transaction(fs_info->tree_root);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
- return 1;
- }
+ ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
+ btrfs_end_transaction(trans);
+ if (ret < 0)
+ return ret;
+ return 1;
}
+
return 0;
}
@@ -3407,28 +3206,28 @@ static int chunk_profiles_filter(u64 chunk_type,
static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
struct btrfs_balance_args *bargs)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
u64 chunk_used;
u64 user_thresh_min;
u64 user_thresh_max;
int ret = 1;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
- chunk_used = btrfs_block_group_used(&cache->item);
+ chunk_used = cache->used;
if (bargs->usage_min == 0)
user_thresh_min = 0;
else
- user_thresh_min = div_factor_fine(cache->key.offset,
- bargs->usage_min);
+ user_thresh_min = div_factor_fine(cache->length,
+ bargs->usage_min);
if (bargs->usage_max == 0)
user_thresh_max = 1;
else if (bargs->usage_max > 100)
- user_thresh_max = cache->key.offset;
+ user_thresh_max = cache->length;
else
- user_thresh_max = div_factor_fine(cache->key.offset,
- bargs->usage_max);
+ user_thresh_max = div_factor_fine(cache->length,
+ bargs->usage_max);
if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
ret = 0;
@@ -3440,20 +3239,19 @@ static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_off
static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
u64 chunk_offset, struct btrfs_balance_args *bargs)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
u64 chunk_used, user_thresh;
int ret = 1;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
- chunk_used = btrfs_block_group_used(&cache->item);
+ chunk_used = cache->used;
if (bargs->usage_min == 0)
user_thresh = 1;
else if (bargs->usage > 100)
- user_thresh = cache->key.offset;
+ user_thresh = cache->length;
else
- user_thresh = div_factor_fine(cache->key.offset,
- bargs->usage);
+ user_thresh = div_factor_fine(cache->length, bargs->usage);
if (chunk_used < user_thresh)
ret = 0;
@@ -3866,8 +3664,7 @@ static int alloc_profile_is_valid(u64 flags, int extended)
if (flags == 0)
return !extended; /* "0" is valid for usual profiles */
- /* true if exactly one bit set */
- return is_power_of_2(flags);
+ return has_single_bit_set(flags);
}
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
@@ -4054,7 +3851,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
int ret;
u64 num_devices;
unsigned seq;
- bool reducing_integrity;
+ bool reducing_redundancy;
int i;
if (btrfs_fs_closing(fs_info) ||
@@ -4085,7 +3882,13 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
}
num_devices = btrfs_num_devices(fs_info);
- allowed = 0;
+
+ /*
+ * SINGLE profile on-disk has no profile bit, but in-memory we have a
+ * special bit for it, to make it easier to distinguish. Thus we need
+ * to set it manually, or balance would refuse the profile.
+ */
+ allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
if (num_devices >= btrfs_raid_array[i].devs_min)
allowed |= btrfs_raid_array[i].bg_flag;
@@ -4131,9 +3934,9 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
(fs_info->avail_metadata_alloc_bits & allowed) &&
!(bctl->meta.target & allowed)))
- reducing_integrity = true;
+ reducing_redundancy = true;
else
- reducing_integrity = false;
+ reducing_redundancy = false;
/* if we're not converting, the target field is uninitialized */
meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
@@ -4142,13 +3945,13 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
bctl->data.target : fs_info->avail_data_alloc_bits;
} while (read_seqretry(&fs_info->profiles_lock, seq));
- if (reducing_integrity) {
+ if (reducing_redundancy) {
if (bctl->flags & BTRFS_BALANCE_FORCE) {
btrfs_info(fs_info,
- "balance: force reducing metadata integrity");
+ "balance: force reducing metadata redundancy");
} else {
btrfs_err(fs_info,
- "balance: reduces metadata integrity, use --force if you want this");
+ "balance: reduces metadata redundancy, use --force if you want this");
ret = -EINVAL;
goto out;
}
@@ -4914,6 +4717,14 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
btrfs_set_fs_incompat(info, RAID56);
}
+static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
+{
+ if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
+ return;
+
+ btrfs_set_fs_incompat(info, RAID1C34);
+}
+
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
u64 start, u64 type)
{
@@ -4979,6 +4790,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
max_stripe_size = SZ_32M;
max_chunk_size = 2 * max_stripe_size;
+ devs_max = min_t(int, devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
} else {
btrfs_err(info, "invalid chunk type 0x%llx requested",
type);
@@ -5059,8 +4871,11 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
btrfs_cmp_device_info, NULL);
- /* round down to number of usable stripes */
- ndevs = round_down(ndevs, devs_increment);
+ /*
+ * Round down to number of usable stripes, devs_increment can be any
+ * number so we can't use round_down()
+ */
+ ndevs -= ndevs % devs_increment;
if (ndevs < devs_min) {
ret = -ENOSPC;
@@ -5176,6 +4991,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
free_extent_map(em);
check_raid56_incompat_flag(info, type);
+ check_raid1c34_incompat_flag(info, type);
kfree(devices_info);
return 0;
@@ -5594,12 +5410,13 @@ void btrfs_put_bbio(struct btrfs_bio *bbio)
* replace.
*/
static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
- u64 logical, u64 length,
+ u64 logical, u64 *length_ret,
struct btrfs_bio **bbio_ret)
{
struct extent_map *em;
struct map_lookup *map;
struct btrfs_bio *bbio;
+ u64 length = *length_ret;
u64 offset;
u64 stripe_nr;
u64 stripe_nr_end;
@@ -5632,7 +5449,8 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
}
offset = logical - em->start;
- length = min_t(u64, em->len - offset, length);
+ length = min_t(u64, em->start + em->len - logical, length);
+ *length_ret = length;
stripe_len = map->stripe_len;
/*
@@ -5941,6 +5759,7 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 stripe_len;
u64 raid56_full_stripe_start = (u64)-1;
int data_stripes;
+ int ret = 0;
ASSERT(op != BTRFS_MAP_DISCARD);
@@ -5961,8 +5780,8 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
btrfs_crit(fs_info,
"stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
stripe_offset, offset, em->start, logical, stripe_len);
- free_extent_map(em);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/* stripe_offset is the offset of this block in its stripe */
@@ -6009,7 +5828,10 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
io_geom->stripe_offset = stripe_offset;
io_geom->raid56_stripe_offset = raid56_full_stripe_start;
- return 0;
+out:
+ /* once for us */
+ free_extent_map(em);
+ return ret;
}
static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
@@ -6020,7 +5842,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
{
struct extent_map *em;
struct map_lookup *map;
- u64 offset;
u64 stripe_offset;
u64 stripe_nr;
u64 stripe_len;
@@ -6044,18 +5865,17 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
if (op == BTRFS_MAP_DISCARD)
return __btrfs_map_block_for_discard(fs_info, logical,
- *length, bbio_ret);
+ length, bbio_ret);
ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
if (ret < 0)
return ret;
em = btrfs_get_chunk_map(fs_info, logical, *length);
- ASSERT(em);
+ ASSERT(!IS_ERR(em));
map = em->map_lookup;
*length = geom.len;
- offset = geom.offset;
stripe_len = geom.stripe_len;
stripe_nr = geom.stripe_nr;
stripe_offset = geom.stripe_offset;
@@ -6425,52 +6245,8 @@ static void btrfs_end_bio(struct bio *bio)
}
}
-/*
- * see run_scheduled_bios for a description of why bios are collected for
- * async submit.
- *
- * This will add one bio to the pending list for a device and make sure
- * the work struct is scheduled.
- */
-static noinline void btrfs_schedule_bio(struct btrfs_device *device,
- struct bio *bio)
-{
- struct btrfs_fs_info *fs_info = device->fs_info;
- int should_queue = 1;
- struct btrfs_pending_bios *pending_bios;
-
- /* don't bother with additional async steps for reads, right now */
- if (bio_op(bio) == REQ_OP_READ) {
- btrfsic_submit_bio(bio);
- return;
- }
-
- WARN_ON(bio->bi_next);
- bio->bi_next = NULL;
-
- spin_lock(&device->io_lock);
- if (op_is_sync(bio->bi_opf))
- pending_bios = &device->pending_sync_bios;
- else
- pending_bios = &device->pending_bios;
-
- if (pending_bios->tail)
- pending_bios->tail->bi_next = bio;
-
- pending_bios->tail = bio;
- if (!pending_bios->head)
- pending_bios->head = bio;
- if (device->running_pending)
- should_queue = 0;
-
- spin_unlock(&device->io_lock);
-
- if (should_queue)
- btrfs_queue_work(fs_info->submit_workers, &device->work);
-}
-
static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
- u64 physical, int dev_nr, int async)
+ u64 physical, int dev_nr)
{
struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
struct btrfs_fs_info *fs_info = bbio->fs_info;
@@ -6488,10 +6264,7 @@ static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
btrfs_bio_counter_inc_noblocked(fs_info);
- if (async)
- btrfs_schedule_bio(dev, bio);
- else
- btrfsic_submit_bio(bio);
+ btrfsic_submit_bio(bio);
}
static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
@@ -6512,7 +6285,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
}
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
- int mirror_num, int async_submit)
+ int mirror_num)
{
struct btrfs_device *dev;
struct bio *first_bio = bio;
@@ -6581,7 +6354,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
bio = first_bio;
submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
- dev_nr, async_submit);
+ dev_nr);
}
btrfs_bio_counter_dec(fs_info);
return BLK_STS_OK;
@@ -6685,9 +6458,6 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
else
generate_random_uuid(dev->uuid);
- btrfs_init_work(&dev->work, btrfs_submit_helper,
- pending_bios_fn, NULL, NULL);
-
return dev;
}
@@ -6884,7 +6654,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
if (IS_ERR(fs_devices))
return fs_devices;
- fs_devices->seeding = 1;
+ fs_devices->seeding = true;
fs_devices->opened = 1;
return fs_devices;
}
@@ -7073,48 +6843,49 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
sb_array_offset += len;
cur_offset += len;
- if (key.type == BTRFS_CHUNK_ITEM_KEY) {
- chunk = (struct btrfs_chunk *)sb_array_offset;
- /*
- * At least one btrfs_chunk with one stripe must be
- * present, exact stripe count check comes afterwards
- */
- len = btrfs_chunk_item_size(1);
- if (cur_offset + len > array_size)
- goto out_short_read;
-
- num_stripes = btrfs_chunk_num_stripes(sb, chunk);
- if (!num_stripes) {
- btrfs_err(fs_info,
- "invalid number of stripes %u in sys_array at offset %u",
- num_stripes, cur_offset);
- ret = -EIO;
- break;
- }
+ if (key.type != BTRFS_CHUNK_ITEM_KEY) {
+ btrfs_err(fs_info,
+ "unexpected item type %u in sys_array at offset %u",
+ (u32)key.type, cur_offset);
+ ret = -EIO;
+ break;
+ }
- type = btrfs_chunk_type(sb, chunk);
- if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
- btrfs_err(fs_info,
- "invalid chunk type %llu in sys_array at offset %u",
- type, cur_offset);
- ret = -EIO;
- break;
- }
+ chunk = (struct btrfs_chunk *)sb_array_offset;
+ /*
+ * At least one btrfs_chunk with one stripe must be present,
+ * exact stripe count check comes afterwards
+ */
+ len = btrfs_chunk_item_size(1);
+ if (cur_offset + len > array_size)
+ goto out_short_read;
- len = btrfs_chunk_item_size(num_stripes);
- if (cur_offset + len > array_size)
- goto out_short_read;
+ num_stripes = btrfs_chunk_num_stripes(sb, chunk);
+ if (!num_stripes) {
+ btrfs_err(fs_info,
+ "invalid number of stripes %u in sys_array at offset %u",
+ num_stripes, cur_offset);
+ ret = -EIO;
+ break;
+ }
- ret = read_one_chunk(&key, sb, chunk);
- if (ret)
- break;
- } else {
+ type = btrfs_chunk_type(sb, chunk);
+ if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
btrfs_err(fs_info,
- "unexpected item type %u in sys_array at offset %u",
- (u32)key.type, cur_offset);
+ "invalid chunk type %llu in sys_array at offset %u",
+ type, cur_offset);
ret = -EIO;
break;
}
+
+ len = btrfs_chunk_item_size(num_stripes);
+ if (cur_offset + len > array_size)
+ goto out_short_read;
+
+ ret = read_one_chunk(&key, sb, chunk);
+ if (ret)
+ break;
+
array_ptr += len;
sb_array_offset += len;
cur_offset += len;
@@ -7305,18 +7076,32 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
}
}
-static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
+static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
+ const struct btrfs_dev_stats_item *ptr,
+ int index)
{
- int i;
+ u64 val;
- for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
- btrfs_dev_stat_reset(dev, i);
+ read_extent_buffer(eb, &val,
+ offsetof(struct btrfs_dev_stats_item, values) +
+ ((unsigned long)ptr) + (index * sizeof(u64)),
+ sizeof(val));
+ return val;
+}
+
+static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
+ struct btrfs_dev_stats_item *ptr,
+ int index, u64 val)
+{
+ write_extent_buffer(eb, &val,
+ offsetof(struct btrfs_dev_stats_item, values) +
+ ((unsigned long)ptr) + (index * sizeof(u64)),
+ sizeof(val));
}
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
{
struct btrfs_key key;
- struct btrfs_key found_key;
struct btrfs_root *dev_root = fs_info->dev_root;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct extent_buffer *eb;
@@ -7327,10 +7112,8 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
int i;
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list) {
@@ -7342,14 +7125,14 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
key.offset = device->devid;
ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
if (ret) {
- __btrfs_reset_dev_stats(device);
+ for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+ btrfs_dev_stat_set(device, i, 0);
device->dev_stats_valid = 1;
btrfs_release_path(path);
continue;
}
slot = path->slots[0];
eb = path->nodes[0];
- btrfs_item_key_to_cpu(eb, &found_key, slot);
item_size = btrfs_item_size_nr(eb, slot);
ptr = btrfs_item_ptr(eb, slot,
@@ -7360,7 +7143,7 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
btrfs_dev_stat_set(device, i,
btrfs_dev_stats_value(eb, ptr, i));
else
- btrfs_dev_stat_reset(device, i);
+ btrfs_dev_stat_set(device, i, 0);
}
device->dev_stats_valid = 1;
@@ -7369,7 +7152,6 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
}
mutex_unlock(&fs_devices->device_list_mutex);
-out:
btrfs_free_path(path);
return ret < 0 ? ret : 0;
}
@@ -7543,7 +7325,7 @@ int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
stats->values[i] =
btrfs_dev_stat_read_and_reset(dev, i);
else
- btrfs_dev_stat_reset(dev, i);
+ btrfs_dev_stat_set(dev, i, 0);
}
} else {
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 7f6aa1816409..fc1b564b9cfe 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -18,10 +18,6 @@ extern struct mutex uuid_mutex;
#define BTRFS_STRIPE_LEN SZ_64K
struct buffer_head;
-struct btrfs_pending_bios {
- struct bio *head;
- struct bio *tail;
-};
struct btrfs_io_geometry {
/* remaining bytes before crossing a stripe */
@@ -68,13 +64,6 @@ struct btrfs_device {
u64 generation;
- spinlock_t io_lock ____cacheline_aligned;
- int running_pending;
- /* regular prio bios */
- struct btrfs_pending_bios pending_bios;
- /* sync bios */
- struct btrfs_pending_bios pending_sync_bios;
-
struct block_device *bdev;
/* the mode sent to blkdev_get */
@@ -82,7 +71,6 @@ struct btrfs_device {
unsigned long dev_state;
blk_status_t last_flush_error;
- int flush_bio_sent;
#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
seqcount_t data_seqcount;
@@ -255,14 +243,14 @@ struct btrfs_fs_devices {
struct list_head alloc_list;
struct btrfs_fs_devices *seed;
- int seeding;
+ bool seeding;
int opened;
/* set when we find or add a device that doesn't have the
* nonrot flag set
*/
- int rotating;
+ bool rotating;
struct btrfs_fs_info *fs_info;
/* sysfs kobjects */
@@ -331,7 +319,6 @@ struct btrfs_bio {
u64 map_type; /* get from map_lookup->type */
bio_end_io_t *end_io;
struct bio *orig_bio;
- unsigned long flags;
void *private;
atomic_t error;
int max_errors;
@@ -437,7 +424,7 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
void btrfs_mapping_tree_free(struct extent_map_tree *tree);
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
- int mirror_num, int async_submit);
+ int mirror_num);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder);
struct btrfs_device *btrfs_scan_one_device(const char *path,
@@ -475,8 +462,6 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info);
int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset);
-int find_free_dev_extent_start(struct btrfs_device *device, u64 num_bytes,
- u64 search_start, u64 *start, u64 *max_avail);
int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
u64 *start, u64 *max_avail);
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
@@ -550,12 +535,6 @@ static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
atomic_inc(&dev->dev_stats_ccnt);
}
-static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
- int index)
-{
- btrfs_dev_stat_set(dev, index, 0);
-}
-
/*
* Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
* can be used as index to access btrfs_raid_array[].
@@ -566,6 +545,10 @@ static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags)
return BTRFS_RAID_RAID10;
else if (flags & BTRFS_BLOCK_GROUP_RAID1)
return BTRFS_RAID_RAID1;
+ else if (flags & BTRFS_BLOCK_GROUP_RAID1C3)
+ return BTRFS_RAID_RAID1C3;
+ else if (flags & BTRFS_BLOCK_GROUP_RAID1C4)
+ return BTRFS_RAID_RAID1C4;
else if (flags & BTRFS_BLOCK_GROUP_DUP)
return BTRFS_RAID_DUP;
else if (flags & BTRFS_BLOCK_GROUP_RAID0)
@@ -580,7 +563,7 @@ static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags)
void btrfs_commit_device_sizes(struct btrfs_transaction *trans);
-struct list_head *btrfs_get_fs_uuids(void);
+struct list_head * __attribute_const__ btrfs_get_fs_uuids(void);
void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index b86b7ad6b900..a6c90a003c12 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -29,19 +29,9 @@ struct workspace {
static struct workspace_manager wsm;
-static void zlib_init_workspace_manager(void)
+struct list_head *zlib_get_workspace(unsigned int level)
{
- btrfs_init_workspace_manager(&wsm, &btrfs_zlib_compress);
-}
-
-static void zlib_cleanup_workspace_manager(void)
-{
- btrfs_cleanup_workspace_manager(&wsm);
-}
-
-static struct list_head *zlib_get_workspace(unsigned int level)
-{
- struct list_head *ws = btrfs_get_workspace(&wsm, level);
+ struct list_head *ws = btrfs_get_workspace(BTRFS_COMPRESS_ZLIB, level);
struct workspace *workspace = list_entry(ws, struct workspace, list);
workspace->level = level;
@@ -49,12 +39,7 @@ static struct list_head *zlib_get_workspace(unsigned int level)
return ws;
}
-static void zlib_put_workspace(struct list_head *ws)
-{
- btrfs_put_workspace(&wsm, ws);
-}
-
-static void zlib_free_workspace(struct list_head *ws)
+void zlib_free_workspace(struct list_head *ws)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
@@ -63,7 +48,7 @@ static void zlib_free_workspace(struct list_head *ws)
kfree(workspace);
}
-static struct list_head *zlib_alloc_workspace(unsigned int level)
+struct list_head *zlib_alloc_workspace(unsigned int level)
{
struct workspace *workspace;
int workspacesize;
@@ -88,13 +73,9 @@ fail:
return ERR_PTR(-ENOMEM);
}
-static int zlib_compress_pages(struct list_head *ws,
- struct address_space *mapping,
- u64 start,
- struct page **pages,
- unsigned long *out_pages,
- unsigned long *total_in,
- unsigned long *total_out)
+int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct page **pages, unsigned long *out_pages,
+ unsigned long *total_in, unsigned long *total_out)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret;
@@ -228,7 +209,7 @@ out:
return ret;
}
-static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
+int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0, ret2;
@@ -319,10 +300,9 @@ done:
return ret;
}
-static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
- struct page *dest_page,
- unsigned long start_byte,
- size_t srclen, size_t destlen)
+int zlib_decompress(struct list_head *ws, unsigned char *data_in,
+ struct page *dest_page, unsigned long start_byte, size_t srclen,
+ size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0;
@@ -418,23 +398,8 @@ next:
return ret;
}
-static unsigned int zlib_set_level(unsigned int level)
-{
- if (!level)
- return BTRFS_ZLIB_DEFAULT_LEVEL;
-
- return min_t(unsigned int, level, 9);
-}
-
const struct btrfs_compress_op btrfs_zlib_compress = {
- .init_workspace_manager = zlib_init_workspace_manager,
- .cleanup_workspace_manager = zlib_cleanup_workspace_manager,
- .get_workspace = zlib_get_workspace,
- .put_workspace = zlib_put_workspace,
- .alloc_workspace = zlib_alloc_workspace,
- .free_workspace = zlib_free_workspace,
- .compress_pages = zlib_compress_pages,
- .decompress_bio = zlib_decompress_bio,
- .decompress = zlib_decompress,
- .set_level = zlib_set_level,
+ .workspace_manager = &wsm,
+ .max_level = 9,
+ .default_level = BTRFS_ZLIB_DEFAULT_LEVEL,
};
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 3837ca180d52..9a4871636c6c 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/zstd.h>
+#include "misc.h"
#include "compression.h"
#include "ctree.h"
@@ -90,9 +91,8 @@ static inline struct workspace *list_to_workspace(struct list_head *list)
return container_of(list, struct workspace, list);
}
-static void zstd_free_workspace(struct list_head *ws);
-static struct list_head *zstd_alloc_workspace(unsigned int level);
-
+void zstd_free_workspace(struct list_head *ws);
+struct list_head *zstd_alloc_workspace(unsigned int level);
/*
* zstd_reclaim_timer_fn - reclaim timer
* @t: timer
@@ -167,7 +167,7 @@ static void zstd_calc_ws_mem_sizes(void)
}
}
-static void zstd_init_workspace_manager(void)
+void zstd_init_workspace_manager(void)
{
struct list_head *ws;
int i;
@@ -193,7 +193,7 @@ static void zstd_init_workspace_manager(void)
}
}
-static void zstd_cleanup_workspace_manager(void)
+void zstd_cleanup_workspace_manager(void)
{
struct workspace *workspace;
int i;
@@ -260,7 +260,7 @@ static struct list_head *zstd_find_workspace(unsigned int level)
* attempt to allocate a new workspace. If we fail to allocate one due to
* memory pressure, go to sleep waiting for the max level workspace to free up.
*/
-static struct list_head *zstd_get_workspace(unsigned int level)
+struct list_head *zstd_get_workspace(unsigned int level)
{
struct list_head *ws;
unsigned int nofs_flag;
@@ -301,7 +301,7 @@ again:
* isn't set, it is also set here. Only the max level workspace tries and wakes
* up waiting workspaces.
*/
-static void zstd_put_workspace(struct list_head *ws)
+void zstd_put_workspace(struct list_head *ws)
{
struct workspace *workspace = list_to_workspace(ws);
@@ -331,7 +331,7 @@ static void zstd_put_workspace(struct list_head *ws)
cond_wake_up(&wsm.wait);
}
-static void zstd_free_workspace(struct list_head *ws)
+void zstd_free_workspace(struct list_head *ws)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
@@ -340,7 +340,7 @@ static void zstd_free_workspace(struct list_head *ws)
kfree(workspace);
}
-static struct list_head *zstd_alloc_workspace(unsigned int level)
+struct list_head *zstd_alloc_workspace(unsigned int level)
{
struct workspace *workspace;
@@ -366,13 +366,9 @@ fail:
return ERR_PTR(-ENOMEM);
}
-static int zstd_compress_pages(struct list_head *ws,
- struct address_space *mapping,
- u64 start,
- struct page **pages,
- unsigned long *out_pages,
- unsigned long *total_in,
- unsigned long *total_out)
+int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct page **pages, unsigned long *out_pages,
+ unsigned long *total_in, unsigned long *total_out)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
ZSTD_CStream *stream;
@@ -547,7 +543,7 @@ out:
return ret;
}
-static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
+int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
struct page **pages_in = cb->compressed_pages;
@@ -625,10 +621,9 @@ done:
return ret;
}
-static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
- struct page *dest_page,
- unsigned long start_byte,
- size_t srclen, size_t destlen)
+int zstd_decompress(struct list_head *ws, unsigned char *data_in,
+ struct page *dest_page, unsigned long start_byte, size_t srclen,
+ size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
ZSTD_DStream *stream;
@@ -710,23 +705,9 @@ finish:
return ret;
}
-static unsigned int zstd_set_level(unsigned int level)
-{
- if (!level)
- return ZSTD_BTRFS_DEFAULT_LEVEL;
-
- return min_t(unsigned int, level, ZSTD_BTRFS_MAX_LEVEL);
-}
-
const struct btrfs_compress_op btrfs_zstd_compress = {
- .init_workspace_manager = zstd_init_workspace_manager,
- .cleanup_workspace_manager = zstd_cleanup_workspace_manager,
- .get_workspace = zstd_get_workspace,
- .put_workspace = zstd_put_workspace,
- .alloc_workspace = zstd_alloc_workspace,
- .free_workspace = zstd_free_workspace,
- .compress_pages = zstd_compress_pages,
- .decompress_bio = zstd_decompress_bio,
- .decompress = zstd_decompress,
- .set_level = zstd_set_level,
+ /* ZSTD uses own workspace manager */
+ .workspace_manager = NULL,
+ .max_level = ZSTD_BTRFS_MAX_LEVEL,
+ .default_level = ZSTD_BTRFS_DEFAULT_LEVEL,
};
diff --git a/fs/buffer.c b/fs/buffer.c
index 86a38b979323..d8c7242426bb 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -47,6 +47,9 @@
#include <linux/pagevec.h>
#include <linux/sched/mm.h>
#include <trace/events/block.h>
+#include <linux/fscrypt.h>
+
+#include "internal.h"
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
@@ -246,10 +249,6 @@ out:
return ret;
}
-/*
- * I/O completion handler for block_read_full_page() - pages
- * which come unlocked at the end of I/O.
- */
static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
unsigned long flags;
@@ -307,6 +306,47 @@ still_busy:
return;
}
+struct decrypt_bh_ctx {
+ struct work_struct work;
+ struct buffer_head *bh;
+};
+
+static void decrypt_bh(struct work_struct *work)
+{
+ struct decrypt_bh_ctx *ctx =
+ container_of(work, struct decrypt_bh_ctx, work);
+ struct buffer_head *bh = ctx->bh;
+ int err;
+
+ err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size,
+ bh_offset(bh));
+ end_buffer_async_read(bh, err == 0);
+ kfree(ctx);
+}
+
+/*
+ * I/O completion handler for block_read_full_page() - pages
+ * which come unlocked at the end of I/O.
+ */
+static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
+{
+ /* Decrypt if needed */
+ if (uptodate && IS_ENABLED(CONFIG_FS_ENCRYPTION) &&
+ IS_ENCRYPTED(bh->b_page->mapping->host) &&
+ S_ISREG(bh->b_page->mapping->host->i_mode)) {
+ struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+
+ if (ctx) {
+ INIT_WORK(&ctx->work, decrypt_bh);
+ ctx->bh = bh;
+ fscrypt_enqueue_decrypt_work(&ctx->work);
+ return;
+ }
+ uptodate = 0;
+ }
+ end_buffer_async_read(bh, uptodate);
+}
+
/*
* Completion handler for block_write_full_page() - pages which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion.
@@ -379,7 +419,7 @@ EXPORT_SYMBOL(end_buffer_async_write);
*/
static void mark_buffer_async_read(struct buffer_head *bh)
{
- bh->b_end_io = end_buffer_async_read;
+ bh->b_end_io = end_buffer_async_read_io;
set_buffer_async_read(bh);
}
@@ -1385,10 +1425,10 @@ static bool has_bh_in_lru(int cpu, void *dummy)
for (i = 0; i < BH_LRU_SIZE; i++) {
if (b->bhs[i])
- return 1;
+ return true;
}
- return 0;
+ return false;
}
void invalidate_bh_lrus(void)
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index a699e320393f..c1da294418d1 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -6,7 +6,7 @@
obj-$(CONFIG_CEPH_FS) += ceph.o
ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
- export.o caps.o snap.o xattr.o quota.o \
+ export.o caps.o snap.o xattr.o quota.o io.o \
mds_client.o mdsmap.o strings.o ceph_frag.o \
debugfs.o
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index e078cc55b989..7ab616601141 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -189,8 +189,7 @@ static int ceph_do_readpage(struct file *filp, struct page *page)
{
struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_osd_client *osdc =
- &ceph_inode_to_client(inode)->client->osdc;
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
int err = 0;
u64 off = page_offset(page);
u64 len = PAGE_SIZE;
@@ -219,8 +218,8 @@ static int ceph_do_readpage(struct file *filp, struct page *page)
dout("readpage inode %p file %p page %p index %lu\n",
inode, filp, page, page->index);
- err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
- off, &len,
+ err = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
+ &ci->i_layout, off, &len,
ci->i_truncate_seq, ci->i_truncate_size,
&page, 1, 0);
if (err == -ENOENT)
@@ -228,6 +227,8 @@ static int ceph_do_readpage(struct file *filp, struct page *page)
if (err < 0) {
SetPageError(page);
ceph_fscache_readpage_cancel(inode, page);
+ if (err == -EBLACKLISTED)
+ fsc->blacklisted = true;
goto out;
}
if (err < PAGE_SIZE)
@@ -266,6 +267,8 @@ static void finish_read(struct ceph_osd_request *req)
int i;
dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
+ if (rc == -EBLACKLISTED)
+ ceph_inode_to_client(inode)->blacklisted = true;
/* unlock all pages, zeroing any data we didn't read */
osd_data = osd_req_op_extent_osd_data(req, 0);
@@ -323,7 +326,8 @@ static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx,
/* caller of readpages does not hold buffer and read caps
* (fadvise, madvise and readahead cases) */
int want = CEPH_CAP_FILE_CACHE;
- ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, true, &got);
+ ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want,
+ true, &got);
if (ret < 0) {
dout("start_read %p, error getting cap\n", inode);
} else if (!(got & want)) {
@@ -569,7 +573,7 @@ static u64 get_writepages_data_length(struct inode *inode,
/*
* Write a single page, but leave the page locked.
*
- * If we get a write error, set the page error bit, but still adjust the
+ * If we get a write error, mark the mapping for error, but still adjust the
* dirty page accounting (i.e., page is no longer dirty).
*/
static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
@@ -640,9 +644,10 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
end_page_writeback(page);
return err;
}
+ if (err == -EBLACKLISTED)
+ fsc->blacklisted = true;
dout("writepage setting page/mapping error %d %p\n",
err, page);
- SetPageError(page);
mapping_set_error(&inode->i_data, err);
wbc->pages_skipped++;
} else {
@@ -680,23 +685,6 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
}
/*
- * lame release_pages helper. release_pages() isn't exported to
- * modules.
- */
-static void ceph_release_pages(struct page **pages, int num)
-{
- struct pagevec pvec;
- int i;
-
- pagevec_init(&pvec);
- for (i = 0; i < num; i++) {
- if (pagevec_add(&pvec, pages[i]) == 0)
- pagevec_release(&pvec);
- }
- pagevec_release(&pvec);
-}
-
-/*
* async writeback completion handler.
*
* If we get an error, set the mapping error bit, but not the individual
@@ -720,6 +708,8 @@ static void writepages_finish(struct ceph_osd_request *req)
if (rc < 0) {
mapping_set_error(mapping, rc);
ceph_set_error_write(ci);
+ if (rc == -EBLACKLISTED)
+ fsc->blacklisted = true;
} else {
ceph_clear_error_write(ci);
}
@@ -769,7 +759,7 @@ static void writepages_finish(struct ceph_osd_request *req)
dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
inode, osd_data->length, rc >= 0 ? num_pages : 0);
- ceph_release_pages(osd_data->pages, num_pages);
+ release_pages(osd_data->pages, num_pages);
}
ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
@@ -913,8 +903,9 @@ get_more_pages:
if (page_offset(page) >= ceph_wbc.i_size) {
dout("%p page eof %llu\n",
page, ceph_wbc.i_size);
- if (ceph_wbc.size_stable ||
- page_offset(page) >= i_size_read(inode))
+ if ((ceph_wbc.size_stable ||
+ page_offset(page) >= i_size_read(inode)) &&
+ clear_page_dirty_for_io(page))
mapping->a_ops->invalidatepage(page,
0, PAGE_SIZE);
unlock_page(page);
@@ -1451,7 +1442,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
want = CEPH_CAP_FILE_CACHE;
got = 0;
- err = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
+ err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1,
+ &got, &pinned_page);
if (err < 0)
goto out_restore;
@@ -1539,6 +1531,7 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
if (!prealloc_cf)
return VM_FAULT_OOM;
+ sb_start_pagefault(inode->i_sb);
ceph_block_sigs(&oldset);
if (ci->i_inline_version != CEPH_INLINE_NONE) {
@@ -1567,7 +1560,7 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
want = CEPH_CAP_FILE_BUFFER;
got = 0;
- err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len,
+ err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len,
&got, NULL);
if (err < 0)
goto out_free;
@@ -1613,6 +1606,7 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
ceph_put_cap_refs(ci, got);
out_free:
ceph_restore_sigs(&oldset);
+ sb_end_pagefault(inode->i_sb);
ceph_free_cap_flush(prealloc_cf);
if (err < 0)
ret = vmf_error(err);
@@ -1945,12 +1939,17 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
if (err >= 0 || err == -ENOENT)
have |= POOL_READ;
- else if (err != -EPERM)
+ else if (err != -EPERM) {
+ if (err == -EBLACKLISTED)
+ fsc->blacklisted = true;
goto out_unlock;
+ }
if (err2 == 0 || err2 == -EEXIST)
have |= POOL_WRITE;
else if (err2 != -EPERM) {
+ if (err2 == -EBLACKLISTED)
+ fsc->blacklisted = true;
err = err2;
goto out_unlock;
}
@@ -1988,10 +1987,11 @@ out:
return err;
}
-int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
+int ceph_pool_perm_check(struct inode *inode, int need)
{
- s64 pool;
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_string *pool_ns;
+ s64 pool;
int ret, flags;
if (ci->i_vino.snap != CEPH_NOSNAP) {
@@ -2003,7 +2003,7 @@ int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
return 0;
}
- if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode),
+ if (ceph_test_mount_opt(ceph_inode_to_client(inode),
NOPOOLPERM))
return 0;
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index bc90cf6ad7ed..73f24f307a4a 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -6,6 +6,9 @@
* Written by Milosz Tanski (milosz@adfin.com)
*/
+#include <linux/ceph/ceph_debug.h>
+
+#include <linux/fs_context.h>
#include "super.h"
#include "cache.h"
@@ -47,7 +50,7 @@ void ceph_fscache_unregister(void)
fscache_unregister_netfs(&ceph_cache_netfs);
}
-int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
+int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc)
{
const struct ceph_fsid *fsid = &fsc->client->fsid;
const char *fscache_uniq = fsc->mount_options->fscache_uniq;
@@ -64,8 +67,8 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
continue;
- pr_err("fscache cookie already registered for fsid %pU\n", fsid);
- pr_err(" use fsc=%%s mount option to specify a uniquifier\n");
+ errorf(fc, "ceph: fscache cookie already registered for fsid %pU, use fsc=<uniquifier> option",
+ fsid);
err = -EBUSY;
goto out_unlock;
}
@@ -93,7 +96,7 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
list_add_tail(&ent->list, &ceph_fscache_list);
} else {
kfree(ent);
- pr_err("unable to register fscache cookie for fsid %pU\n",
+ errorf(fc, "ceph: unable to register fscache cookie for fsid %pU",
fsid);
/* all other fs ignore this error */
}
diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h
index e486fac3434d..89dbdd1eb14a 100644
--- a/fs/ceph/cache.h
+++ b/fs/ceph/cache.h
@@ -16,7 +16,7 @@ extern struct fscache_netfs ceph_cache_netfs;
int ceph_fscache_register(void);
void ceph_fscache_unregister(void);
-int ceph_fscache_register_fs(struct ceph_fs_client* fsc);
+int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc);
void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc);
void ceph_fscache_register_inode_cookie(struct inode *inode);
@@ -88,7 +88,8 @@ static inline void ceph_fscache_unregister(void)
{
}
-static inline int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
+static inline int ceph_fscache_register_fs(struct ceph_fs_client* fsc,
+ struct fs_context *fc)
{
return 0;
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index d98dcd976c80..f5a38910a82b 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -458,37 +458,6 @@ struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
}
/*
- * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
- */
-static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
-{
- struct ceph_cap *cap;
- int mds = -1;
- struct rb_node *p;
-
- /* prefer mds with WR|BUFFER|EXCL caps */
- for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
- cap = rb_entry(p, struct ceph_cap, ci_node);
- mds = cap->mds;
- if (cap->issued & (CEPH_CAP_FILE_WR |
- CEPH_CAP_FILE_BUFFER |
- CEPH_CAP_FILE_EXCL))
- break;
- }
- return mds;
-}
-
-int ceph_get_cap_mds(struct inode *inode)
-{
- struct ceph_inode_info *ci = ceph_inode(inode);
- int mds;
- spin_lock(&ci->i_ceph_lock);
- mds = __ceph_get_cap_mds(ceph_inode(inode));
- spin_unlock(&ci->i_ceph_lock);
- return mds;
-}
-
-/*
* Called under i_ceph_lock.
*/
static void __insert_cap_node(struct ceph_inode_info *ci,
@@ -628,7 +597,7 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
/*
* Add a capability under the given MDS session.
*
- * Caller should hold session snap_rwsem (read) and s_mutex.
+ * Caller should hold session snap_rwsem (read) and ci->i_ceph_lock
*
* @fmode is the open file mode, if we are opening a file, otherwise
* it is < 0. (This is so we can atomically add the cap and add an
@@ -645,6 +614,9 @@ void ceph_add_cap(struct inode *inode,
struct ceph_cap *cap;
int mds = session->s_mds;
int actual_wanted;
+ u32 gen;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
session->s_mds, cap_id, ceph_cap_string(issued), seq);
@@ -656,6 +628,10 @@ void ceph_add_cap(struct inode *inode,
if (fmode >= 0)
wanted |= ceph_caps_for_mode(fmode);
+ spin_lock(&session->s_gen_ttl_lock);
+ gen = session->s_cap_gen;
+ spin_unlock(&session->s_gen_ttl_lock);
+
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
cap = *new_cap;
@@ -681,7 +657,7 @@ void ceph_add_cap(struct inode *inode,
list_move_tail(&cap->session_caps, &session->s_caps);
spin_unlock(&session->s_cap_lock);
- if (cap->cap_gen < session->s_cap_gen)
+ if (cap->cap_gen < gen)
cap->issued = cap->implemented = CEPH_CAP_PIN;
/*
@@ -775,7 +751,7 @@ void ceph_add_cap(struct inode *inode,
cap->seq = seq;
cap->issue_seq = seq;
cap->mseq = mseq;
- cap->cap_gen = session->s_cap_gen;
+ cap->cap_gen = gen;
if (fmode >= 0)
__ceph_get_fmode(ci, fmode);
@@ -1082,6 +1058,11 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
+ /* remove from inode's cap rbtree, and clear auth cap */
+ rb_erase(&cap->ci_node, &ci->i_caps);
+ if (ci->i_auth_cap == cap)
+ ci->i_auth_cap = NULL;
+
/* remove from session list */
spin_lock(&session->s_cap_lock);
if (session->s_cap_iterator == cap) {
@@ -1115,11 +1096,6 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
spin_unlock(&session->s_cap_lock);
- /* remove from inode list */
- rb_erase(&cap->ci_node, &ci->i_caps);
- if (ci->i_auth_cap == cap)
- ci->i_auth_cap = NULL;
-
if (removed)
ceph_put_cap(mdsc, cap);
@@ -1284,10 +1260,6 @@ void __ceph_remove_caps(struct ceph_inode_info *ci)
* Make note of max_size reported/requested from mds, revoked caps
* that have now been implemented.
*
- * Make half-hearted attempt ot to invalidate page cache if we are
- * dropping RDCACHE. Note that this will leave behind locked pages
- * that we'll then need to deal with elsewhere.
- *
* Return non-zero if delayed release, or we experienced an error
* such that the caller should requeue + retry later.
*
@@ -1301,6 +1273,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
{
struct ceph_inode_info *ci = cap->ci;
struct inode *inode = &ci->vfs_inode;
+ struct ceph_buffer *old_blob = NULL;
struct cap_msg_args arg;
int held, revoking;
int wake = 0;
@@ -1365,7 +1338,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
ci->i_requested_max_size = arg.max_size;
if (flushing & CEPH_CAP_XATTR_EXCL) {
- __ceph_build_xattrs_blob(ci);
+ old_blob = __ceph_build_xattrs_blob(ci);
arg.xattr_version = ci->i_xattrs.version;
arg.xattr_buf = ci->i_xattrs.blob;
} else {
@@ -1409,6 +1382,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
spin_unlock(&ci->i_ceph_lock);
+ ceph_buffer_put(old_blob);
+
ret = send_cap_msg(&arg);
if (ret < 0) {
dout("error sending cap msg, must requeue %p\n", inode);
@@ -1743,11 +1718,11 @@ static bool __finish_cap_flush(struct ceph_mds_client *mdsc,
* Add dirty inode to the flushing list. Assigned a seq number so we
* can wait for caps to flush without starving.
*
- * Called under i_ceph_lock.
+ * Called under i_ceph_lock. Returns the flush tid.
*/
-static int __mark_caps_flushing(struct inode *inode,
+static u64 __mark_caps_flushing(struct inode *inode,
struct ceph_mds_session *session, bool wake,
- u64 *flush_tid, u64 *oldest_flush_tid)
+ u64 *oldest_flush_tid)
{
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
@@ -1786,8 +1761,7 @@ static int __mark_caps_flushing(struct inode *inode,
list_add_tail(&cf->i_list, &ci->i_cap_flush_list);
- *flush_tid = cf->tid;
- return flushing;
+ return cf->tid;
}
/*
@@ -2025,11 +1999,6 @@ retry_locked:
}
ack:
- if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
- dout(" skipping %p I_NOFLUSH set\n", inode);
- continue;
- }
-
if (session && session != cap->session) {
dout("oops, wrong session %p mutex\n", session);
mutex_unlock(&session->s_mutex);
@@ -2077,9 +2046,9 @@ ack:
}
if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
- flushing = __mark_caps_flushing(inode, session, false,
- &flush_tid,
- &oldest_flush_tid);
+ flushing = ci->i_dirty_caps;
+ flush_tid = __mark_caps_flushing(inode, session, false,
+ &oldest_flush_tid);
} else {
flushing = 0;
flush_tid = 0;
@@ -2127,16 +2096,11 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
retry:
spin_lock(&ci->i_ceph_lock);
retry_locked:
- if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
- spin_unlock(&ci->i_ceph_lock);
- dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
- goto out;
- }
if (ci->i_dirty_caps && ci->i_auth_cap) {
struct ceph_cap *cap = ci->i_auth_cap;
int delayed;
- if (!session || session != cap->session) {
+ if (session != cap->session) {
spin_unlock(&ci->i_ceph_lock);
if (session)
mutex_unlock(&session->s_mutex);
@@ -2158,8 +2122,9 @@ retry_locked:
goto retry_locked;
}
- flushing = __mark_caps_flushing(inode, session, true,
- &flush_tid, &oldest_flush_tid);
+ flushing = ci->i_dirty_caps;
+ flush_tid = __mark_caps_flushing(inode, session, true,
+ &oldest_flush_tid);
/* __send_cap drops i_ceph_lock */
delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
@@ -2258,35 +2223,45 @@ static int unsafe_request_wait(struct inode *inode)
int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
+ struct ceph_file_info *fi = file->private_data;
struct inode *inode = file->f_mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
u64 flush_tid;
- int ret;
+ int ret, err;
int dirty;
dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
ret = file_write_and_wait_range(file, start, end);
- if (ret < 0)
- goto out;
-
if (datasync)
goto out;
dirty = try_flush_caps(inode, &flush_tid);
dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
- ret = unsafe_request_wait(inode);
+ err = unsafe_request_wait(inode);
/*
* only wait on non-file metadata writeback (the mds
* can recover size and mtime, so we don't need to
* wait for that)
*/
- if (!ret && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
- ret = wait_event_interruptible(ci->i_cap_wq,
+ if (!err && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
+ err = wait_event_interruptible(ci->i_cap_wq,
caps_are_flushed(inode, flush_tid));
}
+
+ if (err < 0)
+ ret = err;
+
+ if (errseq_check(&ci->i_meta_err, READ_ONCE(fi->meta_err))) {
+ spin_lock(&file->f_lock);
+ err = errseq_check_and_advance(&ci->i_meta_err,
+ &fi->meta_err);
+ spin_unlock(&file->f_lock);
+ if (err < 0)
+ ret = err;
+ }
out:
dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
return ret;
@@ -2557,10 +2532,15 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got,
*
* FIXME: how does a 0 return differ from -EAGAIN?
*/
-static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
- loff_t endoff, bool nonblock, int *got)
+enum {
+ NON_BLOCKING = 1,
+ CHECK_FILELOCK = 2,
+};
+
+static int try_get_cap_refs(struct inode *inode, int need, int want,
+ loff_t endoff, int flags, int *got)
{
- struct inode *inode = &ci->vfs_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
int ret = 0;
int have, implemented;
@@ -2573,6 +2553,13 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
again:
spin_lock(&ci->i_ceph_lock);
+ if ((flags & CHECK_FILELOCK) &&
+ (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK)) {
+ dout("try_get_cap_refs %p error filelock\n", inode);
+ ret = -EIO;
+ goto out_unlock;
+ }
+
/* make sure file is actually open */
file_wanted = __ceph_caps_file_wanted(ci);
if ((file_wanted & need) != need) {
@@ -2634,7 +2621,7 @@ again:
* we can not call down_read() when
* task isn't in TASK_RUNNING state
*/
- if (nonblock) {
+ if (flags & NON_BLOCKING) {
ret = -EAGAIN;
goto out_unlock;
}
@@ -2728,18 +2715,19 @@ static void check_max_size(struct inode *inode, loff_t endoff)
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
}
-int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want,
+int ceph_try_get_caps(struct inode *inode, int need, int want,
bool nonblock, int *got)
{
int ret;
BUG_ON(need & ~CEPH_CAP_FILE_RD);
BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO|CEPH_CAP_FILE_SHARED));
- ret = ceph_pool_perm_check(ci, need);
+ ret = ceph_pool_perm_check(inode, need);
if (ret < 0)
return ret;
- ret = try_get_cap_refs(ci, need, want, 0, nonblock, got);
+ ret = try_get_cap_refs(inode, need, want, 0,
+ (nonblock ? NON_BLOCKING : 0), got);
return ret == -EAGAIN ? 0 : ret;
}
@@ -2748,30 +2736,40 @@ int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want,
* due to a small max_size, make sure we check_max_size (and possibly
* ask the mds) so we don't get hung up indefinitely.
*/
-int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
+int ceph_get_caps(struct file *filp, int need, int want,
loff_t endoff, int *got, struct page **pinned_page)
{
- int _got, ret;
+ struct ceph_file_info *fi = filp->private_data;
+ struct inode *inode = file_inode(filp);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ int ret, _got, flags;
- ret = ceph_pool_perm_check(ci, need);
+ ret = ceph_pool_perm_check(inode, need);
if (ret < 0)
return ret;
+ if ((fi->fmode & CEPH_FILE_MODE_WR) &&
+ fi->filp_gen != READ_ONCE(fsc->filp_gen))
+ return -EBADF;
+
while (true) {
if (endoff > 0)
- check_max_size(&ci->vfs_inode, endoff);
+ check_max_size(inode, endoff);
+ flags = atomic_read(&fi->num_locks) ? CHECK_FILELOCK : 0;
_got = 0;
- ret = try_get_cap_refs(ci, need, want, endoff,
- false, &_got);
+ ret = try_get_cap_refs(inode, need, want, endoff,
+ flags, &_got);
if (ret == -EAGAIN)
continue;
if (!ret) {
DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(&ci->i_cap_wq, &wait);
- while (!(ret = try_get_cap_refs(ci, need, want, endoff,
- true, &_got))) {
+ flags |= NON_BLOCKING;
+ while (!(ret = try_get_cap_refs(inode, need, want,
+ endoff, flags, &_got))) {
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
@@ -2783,10 +2781,18 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
if (ret == -EAGAIN)
continue;
}
+
+ if ((fi->fmode & CEPH_FILE_MODE_WR) &&
+ fi->filp_gen != READ_ONCE(fsc->filp_gen)) {
+ if (ret >= 0 && _got)
+ ceph_put_cap_refs(ci, _got);
+ return -EBADF;
+ }
+
if (ret < 0) {
if (ret == -ESTALE) {
/* session was killed, try renew caps */
- ret = ceph_renew_caps(&ci->vfs_inode);
+ ret = ceph_renew_caps(inode);
if (ret == 0)
continue;
}
@@ -2795,9 +2801,9 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
if (ci->i_inline_version != CEPH_INLINE_NONE &&
(_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
- i_size_read(&ci->vfs_inode) > 0) {
+ i_size_read(inode) > 0) {
struct page *page =
- find_get_page(ci->vfs_inode.i_mapping, 0);
+ find_get_page(inode->i_mapping, 0);
if (page) {
if (PageUptodate(page)) {
*pinned_page = page;
@@ -2816,7 +2822,7 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
* getattr request will bring inline data into
* page cache
*/
- ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
+ ret = __ceph_do_getattr(inode, NULL,
CEPH_STAT_CAP_INLINE_DATA,
true);
if (ret < 0)
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 2eb88ed22993..facb387c2735 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -294,7 +294,6 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
{
- return 0;
}
void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 4ca0b8ff9a72..2e4764fd1872 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1553,36 +1553,37 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
{
int valid = 0;
struct dentry *parent;
- struct inode *dir;
+ struct inode *dir, *inode;
if (flags & LOOKUP_RCU) {
parent = READ_ONCE(dentry->d_parent);
dir = d_inode_rcu(parent);
if (!dir)
return -ECHILD;
+ inode = d_inode_rcu(dentry);
} else {
parent = dget_parent(dentry);
dir = d_inode(parent);
+ inode = d_inode(dentry);
}
dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
- dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
+ dentry, inode, ceph_dentry(dentry)->offset);
/* always trust cached snapped dentries, snapdir dentry */
if (ceph_snap(dir) != CEPH_NOSNAP) {
dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
- dentry, d_inode(dentry));
+ dentry, inode);
valid = 1;
- } else if (d_really_is_positive(dentry) &&
- ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
+ } else if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
valid = 1;
} else {
valid = dentry_lease_is_valid(dentry, flags);
if (valid == -ECHILD)
return valid;
if (valid || dir_lease_is_valid(dir, dentry)) {
- if (d_really_is_positive(dentry))
- valid = ceph_is_any_caps(d_inode(dentry));
+ if (inode)
+ valid = ceph_is_any_caps(inode);
else
valid = 1;
}
@@ -1808,6 +1809,7 @@ const struct file_operations ceph_dir_fops = {
.open = ceph_open,
.release = ceph_release,
.unlocked_ioctl = ceph_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.fsync = ceph_fsync,
.lock = ceph_lock,
.flock = ceph_flock,
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 15ff1b09cfa2..b6bfa94332c3 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -35,7 +35,7 @@ struct ceph_nfs_snapfh {
static int ceph_encode_snapfh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
- const static int snap_handle_length =
+ static const int snap_handle_length =
sizeof(struct ceph_nfs_snapfh) >> 2;
struct ceph_nfs_snapfh *sfh = (void *)rawfh;
u64 snapid = ceph_snap(inode);
@@ -85,9 +85,9 @@ out:
static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
- const static int handle_length =
+ static const int handle_length =
sizeof(struct ceph_nfs_fh) >> 2;
- const static int connected_handle_length =
+ static const int connected_handle_length =
sizeof(struct ceph_nfs_confh) >> 2;
int type;
@@ -458,33 +458,33 @@ static int __get_snap_name(struct dentry *parent, char *name,
if (err < 0)
goto out;
- rinfo = &req->r_reply_info;
- for (i = 0; i < rinfo->dir_nr; i++) {
- rde = rinfo->dir_entries + i;
- BUG_ON(!rde->inode.in);
- if (ceph_snap(inode) ==
- le64_to_cpu(rde->inode.in->snapid)) {
- memcpy(name, rde->name, rde->name_len);
- name[rde->name_len] = '\0';
- err = 0;
- goto out;
- }
- }
-
- if (rinfo->dir_end)
- break;
-
- BUG_ON(rinfo->dir_nr <= 0);
- rde = rinfo->dir_entries + (rinfo->dir_nr - 1);
- next_offset += rinfo->dir_nr;
- last_name = kstrndup(rde->name, rde->name_len, GFP_KERNEL);
- if (!last_name) {
- err = -ENOMEM;
- goto out;
- }
-
- ceph_mdsc_put_request(req);
- req = NULL;
+ rinfo = &req->r_reply_info;
+ for (i = 0; i < rinfo->dir_nr; i++) {
+ rde = rinfo->dir_entries + i;
+ BUG_ON(!rde->inode.in);
+ if (ceph_snap(inode) ==
+ le64_to_cpu(rde->inode.in->snapid)) {
+ memcpy(name, rde->name, rde->name_len);
+ name[rde->name_len] = '\0';
+ err = 0;
+ goto out;
+ }
+ }
+
+ if (rinfo->dir_end)
+ break;
+
+ BUG_ON(rinfo->dir_nr <= 0);
+ rde = rinfo->dir_entries + (rinfo->dir_nr - 1);
+ next_offset += rinfo->dir_nr;
+ last_name = kstrndup(rde->name, rde->name_len, GFP_KERNEL);
+ if (!last_name) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ceph_mdsc_put_request(req);
+ req = NULL;
}
err = -ENOENT;
out:
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 685a03cc4b77..11929d2bb594 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -15,6 +15,7 @@
#include "super.h"
#include "mds_client.h"
#include "cache.h"
+#include "io.h"
static __le32 ceph_flags_sys2wire(u32 flags)
{
@@ -201,6 +202,7 @@ out:
static int ceph_init_file_info(struct inode *inode, struct file *file,
int fmode, bool isdir)
{
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *fi;
dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
@@ -211,7 +213,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
struct ceph_dir_file_info *dfi =
kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
if (!dfi) {
- ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
+ ceph_put_fmode(ci, fmode); /* clean up */
return -ENOMEM;
}
@@ -222,7 +224,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
} else {
fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
if (!fi) {
- ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
+ ceph_put_fmode(ci, fmode); /* clean up */
return -ENOMEM;
}
@@ -232,6 +234,8 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
fi->fmode = fmode;
spin_lock_init(&fi->rw_contexts_lock);
INIT_LIST_HEAD(&fi->rw_contexts);
+ fi->meta_err = errseq_sample(&ci->i_meta_err);
+ fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
return 0;
}
@@ -458,6 +462,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
err = ceph_security_init_secctx(dentry, mode, &as_ctx);
if (err < 0)
goto out_ctx;
+ } else if (!d_in_lookup(dentry)) {
+ /* If it's not being looked up, it's negative */
+ return -ENOENT;
}
/* do the open */
@@ -695,7 +702,13 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
ceph_release_page_vector(pages, num_pages);
}
- if (ret <= 0 || off >= i_size || !more)
+ if (ret < 0) {
+ if (ret == -EBLACKLISTED)
+ fsc->blacklisted = true;
+ break;
+ }
+
+ if (off >= i_size || !more)
break;
}
@@ -740,6 +753,9 @@ static void ceph_aio_complete(struct inode *inode,
if (!atomic_dec_and_test(&aio_req->pending_reqs))
return;
+ if (aio_req->iocb->ki_flags & IOCB_DIRECT)
+ inode_dio_end(inode);
+
ret = aio_req->error;
if (!ret)
ret = aio_req->total_len;
@@ -921,7 +937,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
struct ceph_aio_request *aio_req = NULL;
int num_pages = 0;
int flags;
- int ret;
+ int ret = 0;
struct timespec64 mtime = current_time(inode);
size_t count = iov_iter_count(iter);
loff_t pos = iocb->ki_pos;
@@ -935,11 +951,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
(write ? "write" : "read"), file, pos, (unsigned)count,
snapc, snapc ? snapc->seq : 0);
- ret = filemap_write_and_wait_range(inode->i_mapping,
- pos, pos + count - 1);
- if (ret < 0)
- return ret;
-
if (write) {
int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
pos >> PAGE_SHIFT,
@@ -1083,6 +1094,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
CEPH_CAP_FILE_RD);
list_splice(&aio_req->osd_reqs, &osd_reqs);
+ inode_dio_begin(inode);
while (!list_empty(&osd_reqs)) {
req = list_first_entry(&osd_reqs,
struct ceph_osd_request,
@@ -1256,13 +1268,24 @@ again:
dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
+ if (iocb->ki_flags & IOCB_DIRECT)
+ ceph_start_io_direct(inode);
+ else
+ ceph_start_io_read(inode);
+
if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
else
want = CEPH_CAP_FILE_CACHE;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
- if (ret < 0)
+ ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1,
+ &got, &pinned_page);
+ if (ret < 0) {
+ if (iocb->ki_flags & IOCB_DIRECT)
+ ceph_end_io_direct(inode);
+ else
+ ceph_end_io_read(inode);
return ret;
+ }
if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
(iocb->ki_flags & IOCB_DIRECT) ||
@@ -1293,6 +1316,7 @@ again:
ret = generic_file_read_iter(iocb, to);
ceph_del_rw_context(fi, &rw_ctx);
}
+
dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
if (pinned_page) {
@@ -1300,6 +1324,12 @@ again:
pinned_page = NULL;
}
ceph_put_cap_refs(ci, got);
+
+ if (iocb->ki_flags & IOCB_DIRECT)
+ ceph_end_io_direct(inode);
+ else
+ ceph_end_io_read(inode);
+
if (retry_op > HAVE_RETRIED && ret >= 0) {
int statret;
struct page *page = NULL;
@@ -1399,7 +1429,10 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
return -ENOMEM;
retry_snap:
- inode_lock(inode);
+ if (iocb->ki_flags & IOCB_DIRECT)
+ ceph_start_io_direct(inode);
+ else
+ ceph_start_io_write(inode);
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(inode);
@@ -1457,7 +1490,7 @@ retry_snap:
else
want = CEPH_CAP_FILE_BUFFER;
got = 0;
- err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
+ err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count,
&got, NULL);
if (err < 0)
goto out;
@@ -1470,7 +1503,6 @@ retry_snap:
(ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
struct ceph_snap_context *snapc;
struct iov_iter data;
- inode_unlock(inode);
spin_lock(&ci->i_ceph_lock);
if (__ceph_have_pending_cap_snap(ci)) {
@@ -1487,11 +1519,14 @@ retry_snap:
/* we might need to revert back to that point */
data = *from;
- if (iocb->ki_flags & IOCB_DIRECT)
+ if (iocb->ki_flags & IOCB_DIRECT) {
written = ceph_direct_read_write(iocb, &data, snapc,
&prealloc_cf);
- else
+ ceph_end_io_direct(inode);
+ } else {
written = ceph_sync_write(iocb, &data, pos, snapc);
+ ceph_end_io_write(inode);
+ }
if (written > 0)
iov_iter_advance(from, written);
ceph_put_snap_context(snapc);
@@ -1506,7 +1541,7 @@ retry_snap:
written = generic_perform_write(file, from, pos);
if (likely(written >= 0))
iocb->ki_pos = pos + written;
- inode_unlock(inode);
+ ceph_end_io_write(inode);
}
if (written >= 0) {
@@ -1541,9 +1576,11 @@ retry_snap:
}
goto out_unlocked;
-
out:
- inode_unlock(inode);
+ if (iocb->ki_flags & IOCB_DIRECT)
+ ceph_end_io_direct(inode);
+ else
+ ceph_end_io_write(inode);
out_unlocked:
ceph_free_cap_flush(prealloc_cf);
current->backing_dev_info = NULL;
@@ -1781,7 +1818,7 @@ static long ceph_fallocate(struct file *file, int mode,
else
want = CEPH_CAP_FILE_BUFFER;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
+ ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
if (ret < 0)
goto unlock;
@@ -1810,16 +1847,15 @@ unlock:
* src_ci. Two attempts are made to obtain both caps, and an error is return if
* this fails; zero is returned on success.
*/
-static int get_rd_wr_caps(struct ceph_inode_info *src_ci,
- loff_t src_endoff, int *src_got,
- struct ceph_inode_info *dst_ci,
+static int get_rd_wr_caps(struct file *src_filp, int *src_got,
+ struct file *dst_filp,
loff_t dst_endoff, int *dst_got)
{
int ret = 0;
bool retrying = false;
retry_caps:
- ret = ceph_get_caps(dst_ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
+ ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
dst_endoff, dst_got, NULL);
if (ret < 0)
return ret;
@@ -1829,24 +1865,24 @@ retry_caps:
* we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
* retry dance instead to try to get both capabilities.
*/
- ret = ceph_try_get_caps(src_ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
+ ret = ceph_try_get_caps(file_inode(src_filp),
+ CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
false, src_got);
if (ret <= 0) {
/* Start by dropping dst_ci caps and getting src_ci caps */
- ceph_put_cap_refs(dst_ci, *dst_got);
+ ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
if (retrying) {
if (!ret)
/* ceph_try_get_caps masks EAGAIN */
ret = -EAGAIN;
return ret;
}
- ret = ceph_get_caps(src_ci, CEPH_CAP_FILE_RD,
- CEPH_CAP_FILE_SHARED, src_endoff,
- src_got, NULL);
+ ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
+ CEPH_CAP_FILE_SHARED, -1, src_got, NULL);
if (ret < 0)
return ret;
/*... drop src_ci caps too, and retry */
- ceph_put_cap_refs(src_ci, *src_got);
+ ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
retrying = true;
goto retry_caps;
}
@@ -1904,6 +1940,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
struct ceph_inode_info *src_ci = ceph_inode(src_inode);
struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
struct ceph_cap_flush *prealloc_cf;
+ struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
struct ceph_object_locator src_oloc, dst_oloc;
struct ceph_object_id src_oid, dst_oid;
loff_t endoff = 0, size;
@@ -1913,10 +1950,16 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
int src_got = 0, dst_got = 0, err, dirty;
bool do_final_copy = false;
- if (src_inode == dst_inode)
- return -EINVAL;
- if (src_inode->i_sb != dst_inode->i_sb)
- return -EXDEV;
+ if (src_inode->i_sb != dst_inode->i_sb) {
+ struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
+
+ if (ceph_fsid_compare(&src_fsc->client->fsid,
+ &dst_fsc->client->fsid)) {
+ dout("Copying files across clusters: src: %pU dst: %pU\n",
+ &src_fsc->client->fsid, &dst_fsc->client->fsid);
+ return -EXDEV;
+ }
+ }
if (ceph_snap(dst_inode) != CEPH_NOSNAP)
return -EROFS;
@@ -1928,13 +1971,21 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
* efficient).
*/
- if (ceph_test_mount_opt(ceph_inode_to_client(src_inode), NOCOPYFROM))
+ if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
return -EOPNOTSUPP;
+ /*
+ * Striped file layouts require that we copy partial objects, but the
+ * OSD copy-from operation only supports full-object copies. Limit
+ * this to non-striped file layouts for now.
+ */
if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
- (src_ci->i_layout.stripe_count != dst_ci->i_layout.stripe_count) ||
- (src_ci->i_layout.object_size != dst_ci->i_layout.object_size))
+ (src_ci->i_layout.stripe_count != 1) ||
+ (dst_ci->i_layout.stripe_count != 1) ||
+ (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
+ dout("Invalid src/dst files layout\n");
return -EOPNOTSUPP;
+ }
if (len < src_ci->i_layout.object_size)
return -EOPNOTSUPP; /* no remote copy will be done */
@@ -1960,8 +2011,8 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
* clients may have dirty data in their caches. And OSDs know nothing
* about caps, so they can't safely do the remote object copies.
*/
- err = get_rd_wr_caps(src_ci, (src_off + len), &src_got,
- dst_ci, (dst_off + len), &dst_got);
+ err = get_rd_wr_caps(src_file, &src_got,
+ dst_file, (dst_off + len), &dst_got);
if (err < 0) {
dout("get_rd_wr_caps returned %d\n", err);
ret = -EOPNOTSUPP;
@@ -2018,9 +2069,8 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
goto out;
}
len -= ret;
- err = get_rd_wr_caps(src_ci, (src_off + len),
- &src_got, dst_ci,
- (dst_off + len), &dst_got);
+ err = get_rd_wr_caps(src_file, &src_got,
+ dst_file, (dst_off + len), &dst_got);
if (err < 0)
goto out;
err = is_file_size_ok(src_inode, dst_inode,
@@ -2044,7 +2094,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
dst_ci->i_vino.ino, dst_objnum);
/* Do an object remote copy */
err = ceph_osdc_copy_from(
- &ceph_inode_to_client(src_inode)->client->osdc,
+ &src_fsc->client->osdc,
src_ci->i_vino.snap, 0,
&src_oid, &src_oloc,
CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
@@ -2138,7 +2188,7 @@ const struct file_operations ceph_file_fops = {
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.unlocked_ioctl = ceph_ioctl,
- .compat_ioctl = ceph_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.fallocate = ceph_fallocate,
.copy_file_range = ceph_copy_file_range,
};
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 791f84a13bb8..c07407586ce8 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -515,6 +515,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ceph_fscache_inode_init(ci);
+ ci->i_meta_err = 0;
+
return &ci->vfs_inode;
}
@@ -736,6 +738,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
int issued, new_issued, info_caps;
struct timespec64 mtime, atime, ctime;
struct ceph_buffer *xattr_blob = NULL;
+ struct ceph_buffer *old_blob = NULL;
struct ceph_string *pool_ns = NULL;
struct ceph_cap *new_cap = NULL;
int err = 0;
@@ -800,7 +803,12 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
/* update inode */
inode->i_rdev = le32_to_cpu(info->rdev);
- inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+ /* directories have fl_stripe_unit set to zero */
+ if (le32_to_cpu(info->layout.fl_stripe_unit))
+ inode->i_blkbits =
+ fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+ else
+ inode->i_blkbits = CEPH_BLOCK_SHIFT;
__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
@@ -881,7 +889,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
if (ci->i_xattrs.blob)
- ceph_buffer_put(ci->i_xattrs.blob);
+ old_blob = ci->i_xattrs.blob;
ci->i_xattrs.blob = xattr_blob;
if (xattr_blob)
memcpy(ci->i_xattrs.blob->vec.iov_base,
@@ -1022,8 +1030,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
out:
if (new_cap)
ceph_put_cap(mdsc, new_cap);
- if (xattr_blob)
- ceph_buffer_put(xattr_blob);
+ ceph_buffer_put(old_blob);
+ ceph_buffer_put(xattr_blob);
ceph_put_string(pool_ns);
return err;
}
@@ -1426,6 +1434,7 @@ retry_lookup:
dout(" final dn %p\n", dn);
} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
req->r_op == CEPH_MDS_OP_MKSNAP) &&
+ test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
struct inode *dir = req->r_parent;
@@ -1981,7 +1990,7 @@ static const struct inode_operations ceph_symlink_iops = {
int __ceph_setattr(struct inode *inode, struct iattr *attr)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- const unsigned int ia_valid = attr->ia_valid;
+ unsigned int ia_valid = attr->ia_valid;
struct ceph_mds_request *req;
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_cap_flush *prealloc_cf;
@@ -2086,6 +2095,26 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
}
}
+ if (ia_valid & ATTR_SIZE) {
+ dout("setattr %p size %lld -> %lld\n", inode,
+ inode->i_size, attr->ia_size);
+ if ((issued & CEPH_CAP_FILE_EXCL) &&
+ attr->ia_size > inode->i_size) {
+ i_size_write(inode, attr->ia_size);
+ inode->i_blocks = calc_inode_blocks(attr->ia_size);
+ ci->i_reported_size = attr->ia_size;
+ dirtied |= CEPH_CAP_FILE_EXCL;
+ ia_valid |= ATTR_MTIME;
+ } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
+ attr->ia_size != inode->i_size) {
+ req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
+ req->r_args.setattr.old_size =
+ cpu_to_le64(inode->i_size);
+ mask |= CEPH_SETATTR_SIZE;
+ release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
+ CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
+ }
+ }
if (ia_valid & ATTR_MTIME) {
dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
@@ -2108,25 +2137,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
}
}
- if (ia_valid & ATTR_SIZE) {
- dout("setattr %p size %lld -> %lld\n", inode,
- inode->i_size, attr->ia_size);
- if ((issued & CEPH_CAP_FILE_EXCL) &&
- attr->ia_size > inode->i_size) {
- i_size_write(inode, attr->ia_size);
- inode->i_blocks = calc_inode_blocks(attr->ia_size);
- ci->i_reported_size = attr->ia_size;
- dirtied |= CEPH_CAP_FILE_EXCL;
- } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
- attr->ia_size != inode->i_size) {
- req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
- req->r_args.setattr.old_size =
- cpu_to_le64(inode->i_size);
- mask |= CEPH_SETATTR_SIZE;
- release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
- CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
- }
- }
/* these do nothing */
if (ia_valid & ATTR_CTIME) {
diff --git a/fs/ceph/io.c b/fs/ceph/io.c
new file mode 100644
index 000000000000..97602ea92ff4
--- /dev/null
+++ b/fs/ceph/io.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016 Trond Myklebust
+ * Copyright (c) 2019 Jeff Layton
+ *
+ * I/O and data path helper functionality.
+ *
+ * Heavily borrowed from equivalent code in fs/nfs/io.c
+ */
+
+#include <linux/ceph/ceph_debug.h>
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/rwsem.h>
+#include <linux/fs.h>
+
+#include "super.h"
+#include "io.h"
+
+/* Call with exclusively locked inode->i_rwsem */
+static void ceph_block_o_direct(struct ceph_inode_info *ci, struct inode *inode)
+{
+ lockdep_assert_held_write(&inode->i_rwsem);
+
+ if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT) {
+ spin_lock(&ci->i_ceph_lock);
+ ci->i_ceph_flags &= ~CEPH_I_ODIRECT;
+ spin_unlock(&ci->i_ceph_lock);
+ inode_dio_wait(inode);
+ }
+}
+
+/**
+ * ceph_start_io_read - declare the file is being used for buffered reads
+ * @inode: file inode
+ *
+ * Declare that a buffered read operation is about to start, and ensure
+ * that we block all direct I/O.
+ * On exit, the function ensures that the CEPH_I_ODIRECT flag is unset,
+ * and holds a shared lock on inode->i_rwsem to ensure that the flag
+ * cannot be changed.
+ * In practice, this means that buffered read operations are allowed to
+ * execute in parallel, thanks to the shared lock, whereas direct I/O
+ * operations need to wait to grab an exclusive lock in order to set
+ * CEPH_I_ODIRECT.
+ * Note that buffered writes and truncates both take a write lock on
+ * inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
+ */
+void
+ceph_start_io_read(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ /* Be an optimist! */
+ down_read(&inode->i_rwsem);
+ if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT))
+ return;
+ up_read(&inode->i_rwsem);
+ /* Slow path.... */
+ down_write(&inode->i_rwsem);
+ ceph_block_o_direct(ci, inode);
+ downgrade_write(&inode->i_rwsem);
+}
+
+/**
+ * ceph_end_io_read - declare that the buffered read operation is done
+ * @inode: file inode
+ *
+ * Declare that a buffered read operation is done, and release the shared
+ * lock on inode->i_rwsem.
+ */
+void
+ceph_end_io_read(struct inode *inode)
+{
+ up_read(&inode->i_rwsem);
+}
+
+/**
+ * ceph_start_io_write - declare the file is being used for buffered writes
+ * @inode: file inode
+ *
+ * Declare that a buffered write operation is about to start, and ensure
+ * that we block all direct I/O.
+ */
+void
+ceph_start_io_write(struct inode *inode)
+{
+ down_write(&inode->i_rwsem);
+ ceph_block_o_direct(ceph_inode(inode), inode);
+}
+
+/**
+ * ceph_end_io_write - declare that the buffered write operation is done
+ * @inode: file inode
+ *
+ * Declare that a buffered write operation is done, and release the
+ * lock on inode->i_rwsem.
+ */
+void
+ceph_end_io_write(struct inode *inode)
+{
+ up_write(&inode->i_rwsem);
+}
+
+/* Call with exclusively locked inode->i_rwsem */
+static void ceph_block_buffered(struct ceph_inode_info *ci, struct inode *inode)
+{
+ lockdep_assert_held_write(&inode->i_rwsem);
+
+ if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT)) {
+ spin_lock(&ci->i_ceph_lock);
+ ci->i_ceph_flags |= CEPH_I_ODIRECT;
+ spin_unlock(&ci->i_ceph_lock);
+ /* FIXME: unmap_mapping_range? */
+ filemap_write_and_wait(inode->i_mapping);
+ }
+}
+
+/**
+ * ceph_end_io_direct - declare the file is being used for direct i/o
+ * @inode: file inode
+ *
+ * Declare that a direct I/O operation is about to start, and ensure
+ * that we block all buffered I/O.
+ * On exit, the function ensures that the CEPH_I_ODIRECT flag is set,
+ * and holds a shared lock on inode->i_rwsem to ensure that the flag
+ * cannot be changed.
+ * In practice, this means that direct I/O operations are allowed to
+ * execute in parallel, thanks to the shared lock, whereas buffered I/O
+ * operations need to wait to grab an exclusive lock in order to clear
+ * CEPH_I_ODIRECT.
+ * Note that buffered writes and truncates both take a write lock on
+ * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
+ */
+void
+ceph_start_io_direct(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ /* Be an optimist! */
+ down_read(&inode->i_rwsem);
+ if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT)
+ return;
+ up_read(&inode->i_rwsem);
+ /* Slow path.... */
+ down_write(&inode->i_rwsem);
+ ceph_block_buffered(ci, inode);
+ downgrade_write(&inode->i_rwsem);
+}
+
+/**
+ * ceph_end_io_direct - declare that the direct i/o operation is done
+ * @inode: file inode
+ *
+ * Declare that a direct I/O operation is done, and release the shared
+ * lock on inode->i_rwsem.
+ */
+void
+ceph_end_io_direct(struct inode *inode)
+{
+ up_read(&inode->i_rwsem);
+}
diff --git a/fs/ceph/io.h b/fs/ceph/io.h
new file mode 100644
index 000000000000..fa594cd77348
--- /dev/null
+++ b/fs/ceph/io.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _FS_CEPH_IO_H
+#define _FS_CEPH_IO_H
+
+void ceph_start_io_read(struct inode *inode);
+void ceph_end_io_read(struct inode *inode);
+void ceph_start_io_write(struct inode *inode);
+void ceph_end_io_write(struct inode *inode);
+void ceph_start_io_direct(struct inode *inode);
+void ceph_end_io_direct(struct inode *inode);
+
+#endif /* FS_CEPH_IO_H */
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index ac9b53b89365..544e9e85b120 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -32,14 +32,18 @@ void __init ceph_flock_init(void)
static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
{
- struct inode *inode = file_inode(src->fl_file);
+ struct ceph_file_info *fi = dst->fl_file->private_data;
+ struct inode *inode = file_inode(dst->fl_file);
atomic_inc(&ceph_inode(inode)->i_filelock_ref);
+ atomic_inc(&fi->num_locks);
}
static void ceph_fl_release_lock(struct file_lock *fl)
{
+ struct ceph_file_info *fi = fl->fl_file->private_data;
struct inode *inode = file_inode(fl->fl_file);
struct ceph_inode_info *ci = ceph_inode(inode);
+ atomic_dec(&fi->num_locks);
if (atomic_dec_and_test(&ci->i_filelock_ref)) {
/* clear error when all locks are released */
spin_lock(&ci->i_ceph_lock);
@@ -73,7 +77,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
* window. Caller function will decrease the counter.
*/
fl->fl_ops = &ceph_fl_lock_ops;
- atomic_inc(&ceph_inode(inode)->i_filelock_ref);
+ fl->fl_ops->fl_copy_lock(fl, NULL);
}
if (operation != CEPH_MDS_OP_SETFILELOCK || cmd == CEPH_LOCK_UNLOCK)
@@ -111,8 +115,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
req->r_wait_for_completion = ceph_lock_wait_for_completion;
err = ceph_mdsc_do_request(mdsc, inode, req);
-
- if (operation == CEPH_MDS_OP_GETFILELOCK) {
+ if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
fl->fl_type = F_RDLCK;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 920e9f048bd8..068b029cf073 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -384,8 +384,8 @@ static int parse_reply_info_readdir(void **p, void *end,
}
done:
- if (*p != end)
- goto bad;
+ /* Skip over any unrecognized fields */
+ *p = end;
return 0;
bad:
@@ -406,12 +406,10 @@ static int parse_reply_info_filelock(void **p, void *end,
goto bad;
info->filelock_reply = *p;
- *p += sizeof(*info->filelock_reply);
- if (unlikely(*p != end))
- goto bad;
+ /* Skip over any unrecognized fields */
+ *p = end;
return 0;
-
bad:
return -EIO;
}
@@ -425,18 +423,21 @@ static int parse_reply_info_create(void **p, void *end,
{
if (features == (u64)-1 ||
(features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
+ /* Malformed reply? */
if (*p == end) {
info->has_create_ino = false;
} else {
info->has_create_ino = true;
- info->ino = ceph_decode_64(p);
+ ceph_decode_64_safe(p, end, info->ino, bad);
}
+ } else {
+ if (*p != end)
+ goto bad;
}
- if (unlikely(*p != end))
- goto bad;
+ /* Skip over any unrecognized fields */
+ *p = end;
return 0;
-
bad:
return -EIO;
}
@@ -639,7 +640,6 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
s->s_renew_seq = 0;
INIT_LIST_HEAD(&s->s_caps);
s->s_nr_caps = 0;
- s->s_trim_caps = 0;
refcount_set(&s->s_ref, 1);
INIT_LIST_HEAD(&s->s_waiting);
INIT_LIST_HEAD(&s->s_unsafe);
@@ -1270,6 +1270,7 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
{
struct ceph_mds_request *req;
struct rb_node *p;
+ struct ceph_inode_info *ci;
dout("cleanup_session_requests mds%d\n", session->s_mds);
mutex_lock(&mdsc->mutex);
@@ -1278,6 +1279,16 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
struct ceph_mds_request, r_unsafe_item);
pr_warn_ratelimited(" dropping unsafe request %llu\n",
req->r_tid);
+ if (req->r_target_inode) {
+ /* dropping unsafe change of inode's attributes */
+ ci = ceph_inode(req->r_target_inode);
+ errseq_set(&ci->i_meta_err, -EIO);
+ }
+ if (req->r_unsafe_dir) {
+ /* dropping unsafe directory operation */
+ ci = ceph_inode(req->r_unsafe_dir);
+ errseq_set(&ci->i_meta_err, -EIO);
+ }
__unregister_request(mdsc, req);
}
/* zero r_attempts, so kick_requests() will re-send requests */
@@ -1370,7 +1381,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
struct ceph_inode_info *ci = ceph_inode(inode);
LIST_HEAD(to_remove);
- bool drop = false;
+ bool dirty_dropped = false;
bool invalidate = false;
dout("removing cap %p, ci is %p, inode is %p\n",
@@ -1383,9 +1394,12 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
struct ceph_cap_flush *cf;
struct ceph_mds_client *mdsc = fsc->mdsc;
- if (ci->i_wrbuffer_ref > 0 &&
- READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
- invalidate = true;
+ if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
+ if (inode->i_data.nrpages > 0)
+ invalidate = true;
+ if (ci->i_wrbuffer_ref > 0)
+ mapping_set_error(&inode->i_data, -EIO);
+ }
while (!list_empty(&ci->i_cap_flush_list)) {
cf = list_first_entry(&ci->i_cap_flush_list,
@@ -1405,7 +1419,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
inode, ceph_ino(inode));
ci->i_dirty_caps = 0;
list_del_init(&ci->i_dirty_item);
- drop = true;
+ dirty_dropped = true;
}
if (!list_empty(&ci->i_flushing_item)) {
pr_warn_ratelimited(
@@ -1415,10 +1429,22 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
ci->i_flushing_caps = 0;
list_del_init(&ci->i_flushing_item);
mdsc->num_cap_flushing--;
- drop = true;
+ dirty_dropped = true;
}
spin_unlock(&mdsc->cap_dirty_lock);
+ if (dirty_dropped) {
+ errseq_set(&ci->i_meta_err, -EIO);
+
+ if (ci->i_wrbuffer_ref_head == 0 &&
+ ci->i_wr_ref == 0 &&
+ ci->i_dirty_caps == 0 &&
+ ci->i_flushing_caps == 0) {
+ ceph_put_snap_context(ci->i_head_snapc);
+ ci->i_head_snapc = NULL;
+ }
+ }
+
if (atomic_read(&ci->i_filelock_ref) > 0) {
/* make further file lock syscall return -EIO */
ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
@@ -1430,15 +1456,6 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
ci->i_prealloc_cap_flush = NULL;
}
-
- if (drop &&
- ci->i_wrbuffer_ref_head == 0 &&
- ci->i_wr_ref == 0 &&
- ci->i_dirty_caps == 0 &&
- ci->i_flushing_caps == 0) {
- ceph_put_snap_context(ci->i_head_snapc);
- ci->i_head_snapc = NULL;
- }
}
spin_unlock(&ci->i_ceph_lock);
while (!list_empty(&to_remove)) {
@@ -1452,7 +1469,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
wake_up_all(&ci->i_cap_wq);
if (invalidate)
ceph_queue_invalidate(inode);
- if (drop)
+ if (dirty_dropped)
iput(inode);
return 0;
}
@@ -1705,11 +1722,11 @@ out:
*/
static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
{
- struct ceph_mds_session *session = arg;
+ int *remaining = arg;
struct ceph_inode_info *ci = ceph_inode(inode);
int used, wanted, oissued, mine;
- if (session->s_trim_caps <= 0)
+ if (*remaining <= 0)
return -1;
spin_lock(&ci->i_ceph_lock);
@@ -1746,7 +1763,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
if (oissued) {
/* we aren't the only cap.. just remove us */
__ceph_remove_cap(cap, true);
- session->s_trim_caps--;
+ (*remaining)--;
} else {
struct dentry *dentry;
/* try dropping referring dentries */
@@ -1758,7 +1775,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
d_prune_aliases(inode);
count = atomic_read(&inode->i_count);
if (count == 1)
- session->s_trim_caps--;
+ (*remaining)--;
dout("trim_caps_cb %p cap %p pruned, count now %d\n",
inode, cap, count);
} else {
@@ -1784,12 +1801,12 @@ int ceph_trim_caps(struct ceph_mds_client *mdsc,
dout("trim_caps mds%d start: %d / %d, trim %d\n",
session->s_mds, session->s_nr_caps, max_caps, trim_caps);
if (trim_caps > 0) {
- session->s_trim_caps = trim_caps;
- ceph_iterate_session_caps(session, trim_caps_cb, session);
+ int remaining = trim_caps;
+
+ ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
session->s_mds, session->s_nr_caps, max_caps,
- trim_caps - session->s_trim_caps);
- session->s_trim_caps = 0;
+ trim_caps - remaining);
}
ceph_flush_cap_releases(mdsc, session);
@@ -2165,13 +2182,17 @@ retry:
}
base = ceph_ino(d_inode(temp));
rcu_read_unlock();
- if (pos < 0 || read_seqretry(&rename_lock, seq)) {
- pr_err("build_path did not end path lookup where "
- "expected, pos is %d\n", pos);
- /* presumably this is only possible if racing with a
- rename of one of the parent directories (we can not
- lock the dentries above us to prevent this, but
- retrying should be harmless) */
+
+ if (read_seqretry(&rename_lock, seq))
+ goto retry;
+
+ if (pos < 0) {
+ /*
+ * A rename didn't occur, but somehow we didn't end up where
+ * we thought we would. Throw a warning and try again.
+ */
+ pr_warn("build_path did not end path lookup where "
+ "expected, pos is %d\n", pos);
goto retry;
}
@@ -2328,6 +2349,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
head->op = cpu_to_le32(req->r_op);
head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
+ head->ino = 0;
head->args = req->r_args;
ceph_encode_filepath(&p, end, ino1, path1);
@@ -3015,18 +3037,23 @@ bad:
pr_err("mdsc_handle_forward decode error err=%d\n", err);
}
-static int __decode_and_drop_session_metadata(void **p, void *end)
+static int __decode_session_metadata(void **p, void *end,
+ bool *blacklisted)
{
/* map<string,string> */
u32 n;
+ bool err_str;
ceph_decode_32_safe(p, end, n, bad);
while (n-- > 0) {
u32 len;
ceph_decode_32_safe(p, end, len, bad);
ceph_decode_need(p, end, len, bad);
+ err_str = !strncmp(*p, "error_string", len);
*p += len;
ceph_decode_32_safe(p, end, len, bad);
ceph_decode_need(p, end, len, bad);
+ if (err_str && strnstr(*p, "blacklisted", len))
+ *blacklisted = true;
*p += len;
}
return 0;
@@ -3050,6 +3077,7 @@ static void handle_session(struct ceph_mds_session *session,
u64 seq;
unsigned long features = 0;
int wake = 0;
+ bool blacklisted = false;
/* decode */
ceph_decode_need(&p, end, sizeof(*h), bad);
@@ -3062,7 +3090,7 @@ static void handle_session(struct ceph_mds_session *session,
if (msg_version >= 3) {
u32 len;
/* version >= 2, metadata */
- if (__decode_and_drop_session_metadata(&p, end) < 0)
+ if (__decode_session_metadata(&p, end, &blacklisted) < 0)
goto bad;
/* version >= 3, feature bits */
ceph_decode_32_safe(&p, end, len, bad);
@@ -3149,6 +3177,8 @@ static void handle_session(struct ceph_mds_session *session,
session->s_state = CEPH_MDS_SESSION_REJECTED;
cleanup_session_requests(mdsc, session);
remove_session_caps(session);
+ if (blacklisted)
+ mdsc->fsc->blacklisted = true;
wake = 2; /* for good measure */
break;
@@ -3998,7 +4028,27 @@ static void lock_unlock_sessions(struct ceph_mds_client *mdsc)
mutex_unlock(&mdsc->mutex);
}
+static void maybe_recover_session(struct ceph_mds_client *mdsc)
+{
+ struct ceph_fs_client *fsc = mdsc->fsc;
+
+ if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
+ return;
+
+ if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED)
+ return;
+
+ if (!READ_ONCE(fsc->blacklisted))
+ return;
+
+ if (fsc->last_auto_reconnect &&
+ time_before(jiffies, fsc->last_auto_reconnect + HZ * 60 * 30))
+ return;
+ pr_info("auto reconnect after blacklisted\n");
+ fsc->last_auto_reconnect = jiffies;
+ ceph_force_reconnect(fsc->sb);
+}
/*
* delayed work -- periodically trim expired leases, renew caps with mds
@@ -4044,7 +4094,9 @@ static void delayed_work(struct work_struct *work)
pr_info("mds%d hung\n", s->s_mds);
}
}
- if (s->s_state < CEPH_MDS_SESSION_OPEN) {
+ if (s->s_state == CEPH_MDS_SESSION_NEW ||
+ s->s_state == CEPH_MDS_SESSION_RESTARTING ||
+ s->s_state == CEPH_MDS_SESSION_REJECTED) {
/* this mds is failed or recovering, just wait */
ceph_put_mds_session(s);
continue;
@@ -4072,6 +4124,8 @@ static void delayed_work(struct work_struct *work)
ceph_trim_snapid_map(mdsc);
+ maybe_recover_session(mdsc);
+
schedule_delayed(mdsc);
}
@@ -4355,7 +4409,12 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
session = __ceph_lookup_mds_session(mdsc, mds);
if (!session)
continue;
+
+ if (session->s_state == CEPH_MDS_SESSION_REJECTED)
+ __unregister_session(mdsc, session);
+ __wake_requests(mdsc, &session->s_waiting);
mutex_unlock(&mdsc->mutex);
+
mutex_lock(&session->s_mutex);
__close_session(mdsc, session);
if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
@@ -4364,6 +4423,7 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
}
mutex_unlock(&session->s_mutex);
ceph_put_mds_session(session);
+
mutex_lock(&mdsc->mutex);
kick_requests(mdsc, mds);
}
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index f7c8603484fe..5cd131b41d84 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -148,9 +148,9 @@ enum {
CEPH_MDS_SESSION_OPENING = 2,
CEPH_MDS_SESSION_OPEN = 3,
CEPH_MDS_SESSION_HUNG = 4,
- CEPH_MDS_SESSION_CLOSING = 5,
- CEPH_MDS_SESSION_RESTARTING = 6,
- CEPH_MDS_SESSION_RECONNECTING = 7,
+ CEPH_MDS_SESSION_RESTARTING = 5,
+ CEPH_MDS_SESSION_RECONNECTING = 6,
+ CEPH_MDS_SESSION_CLOSING = 7,
CEPH_MDS_SESSION_REJECTED = 8,
};
@@ -176,7 +176,7 @@ struct ceph_mds_session {
spinlock_t s_cap_lock;
struct list_head s_caps; /* all caps issued by this session */
struct ceph_cap *s_cap_iterator;
- int s_nr_caps, s_trim_caps;
+ int s_nr_caps;
int s_num_cap_releases;
int s_cap_reconnect;
int s_readonly;
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index ce2d00da5096..aeec1d6e3769 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -20,7 +20,7 @@
int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
{
int n = 0;
- int i;
+ int i, j;
/* special case for one mds */
if (1 == m->m_num_mds && m->m_info[0].state > 0)
@@ -35,9 +35,12 @@ int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
/* pick */
n = prandom_u32() % n;
- for (i = 0; n > 0; i++, n--)
- while (m->m_info[i].state <= 0)
- i++;
+ for (j = 0, i = 0; i < m->m_num_mds; i++) {
+ if (m->m_info[i].state > 0)
+ j++;
+ if (j > n)
+ break;
+ }
return i;
}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 4c6494eb02b5..ccfcc66aaf44 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -465,6 +465,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
struct inode *inode = &ci->vfs_inode;
struct ceph_cap_snap *capsnap;
struct ceph_snap_context *old_snapc, *new_snapc;
+ struct ceph_buffer *old_blob = NULL;
int used, dirty;
capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
@@ -541,7 +542,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
capsnap->gid = inode->i_gid;
if (dirty & CEPH_CAP_XATTR_EXCL) {
- __ceph_build_xattrs_blob(ci);
+ old_blob = __ceph_build_xattrs_blob(ci);
capsnap->xattr_blob =
ceph_buffer_get(ci->i_xattrs.blob);
capsnap->xattr_version = ci->i_xattrs.version;
@@ -584,6 +585,7 @@ update_snapc:
}
spin_unlock(&ci->i_ceph_lock);
+ ceph_buffer_put(old_blob);
kfree(capsnap);
ceph_put_snap_context(old_snapc);
}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index ab4868c7308e..9c9a7c68eea3 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -9,7 +9,8 @@
#include <linux/in6.h>
#include <linux/module.h>
#include <linux/mount.h>
-#include <linux/parser.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -138,258 +139,308 @@ enum {
Opt_readdir_max_entries,
Opt_readdir_max_bytes,
Opt_congestion_kb,
- Opt_last_int,
/* int args above */
Opt_snapdirname,
Opt_mds_namespace,
- Opt_fscache_uniq,
- Opt_last_string,
+ Opt_recover_session,
+ Opt_source,
/* string args above */
Opt_dirstat,
- Opt_nodirstat,
Opt_rbytes,
- Opt_norbytes,
Opt_asyncreaddir,
- Opt_noasyncreaddir,
Opt_dcache,
- Opt_nodcache,
Opt_ino32,
- Opt_noino32,
Opt_fscache,
- Opt_nofscache,
Opt_poolperm,
- Opt_nopoolperm,
Opt_require_active_mds,
- Opt_norequire_active_mds,
-#ifdef CONFIG_CEPH_FS_POSIX_ACL
Opt_acl,
-#endif
- Opt_noacl,
Opt_quotadf,
- Opt_noquotadf,
Opt_copyfrom,
- Opt_nocopyfrom,
};
-static match_table_t fsopt_tokens = {
- {Opt_wsize, "wsize=%d"},
- {Opt_rsize, "rsize=%d"},
- {Opt_rasize, "rasize=%d"},
- {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
- {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
- {Opt_caps_max, "caps_max=%d"},
- {Opt_readdir_max_entries, "readdir_max_entries=%d"},
- {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
- {Opt_congestion_kb, "write_congestion_kb=%d"},
- /* int args above */
- {Opt_snapdirname, "snapdirname=%s"},
- {Opt_mds_namespace, "mds_namespace=%s"},
- {Opt_fscache_uniq, "fsc=%s"},
- /* string args above */
- {Opt_dirstat, "dirstat"},
- {Opt_nodirstat, "nodirstat"},
- {Opt_rbytes, "rbytes"},
- {Opt_norbytes, "norbytes"},
- {Opt_asyncreaddir, "asyncreaddir"},
- {Opt_noasyncreaddir, "noasyncreaddir"},
- {Opt_dcache, "dcache"},
- {Opt_nodcache, "nodcache"},
- {Opt_ino32, "ino32"},
- {Opt_noino32, "noino32"},
- {Opt_fscache, "fsc"},
- {Opt_nofscache, "nofsc"},
- {Opt_poolperm, "poolperm"},
- {Opt_nopoolperm, "nopoolperm"},
- {Opt_require_active_mds, "require_active_mds"},
- {Opt_norequire_active_mds, "norequire_active_mds"},
-#ifdef CONFIG_CEPH_FS_POSIX_ACL
- {Opt_acl, "acl"},
-#endif
- {Opt_noacl, "noacl"},
- {Opt_quotadf, "quotadf"},
- {Opt_noquotadf, "noquotadf"},
- {Opt_copyfrom, "copyfrom"},
- {Opt_nocopyfrom, "nocopyfrom"},
- {-1, NULL}
+enum ceph_recover_session_mode {
+ ceph_recover_session_no,
+ ceph_recover_session_clean
};
-static int parse_fsopt_token(char *c, void *private)
+static const struct fs_parameter_enum ceph_mount_param_enums[] = {
+ { Opt_recover_session, "no", ceph_recover_session_no },
+ { Opt_recover_session, "clean", ceph_recover_session_clean },
+ {}
+};
+
+static const struct fs_parameter_spec ceph_mount_param_specs[] = {
+ fsparam_flag_no ("acl", Opt_acl),
+ fsparam_flag_no ("asyncreaddir", Opt_asyncreaddir),
+ fsparam_u32 ("caps_max", Opt_caps_max),
+ fsparam_u32 ("caps_wanted_delay_max", Opt_caps_wanted_delay_max),
+ fsparam_u32 ("caps_wanted_delay_min", Opt_caps_wanted_delay_min),
+ fsparam_s32 ("write_congestion_kb", Opt_congestion_kb),
+ fsparam_flag_no ("copyfrom", Opt_copyfrom),
+ fsparam_flag_no ("dcache", Opt_dcache),
+ fsparam_flag_no ("dirstat", Opt_dirstat),
+ __fsparam (fs_param_is_string, "fsc", Opt_fscache,
+ fs_param_neg_with_no | fs_param_v_optional),
+ fsparam_flag_no ("ino32", Opt_ino32),
+ fsparam_string ("mds_namespace", Opt_mds_namespace),
+ fsparam_flag_no ("poolperm", Opt_poolperm),
+ fsparam_flag_no ("quotadf", Opt_quotadf),
+ fsparam_u32 ("rasize", Opt_rasize),
+ fsparam_flag_no ("rbytes", Opt_rbytes),
+ fsparam_s32 ("readdir_max_bytes", Opt_readdir_max_bytes),
+ fsparam_s32 ("readdir_max_entries", Opt_readdir_max_entries),
+ fsparam_enum ("recover_session", Opt_recover_session),
+ fsparam_flag_no ("require_active_mds", Opt_require_active_mds),
+ fsparam_u32 ("rsize", Opt_rsize),
+ fsparam_string ("snapdirname", Opt_snapdirname),
+ fsparam_string ("source", Opt_source),
+ fsparam_u32 ("wsize", Opt_wsize),
+ {}
+};
+
+static const struct fs_parameter_description ceph_mount_parameters = {
+ .name = "ceph",
+ .specs = ceph_mount_param_specs,
+ .enums = ceph_mount_param_enums,
+};
+
+struct ceph_parse_opts_ctx {
+ struct ceph_options *copts;
+ struct ceph_mount_options *opts;
+};
+
+/*
+ * Parse the source parameter. Distinguish the server list from the path.
+ * Internally we do not include the leading '/' in the path.
+ *
+ * The source will look like:
+ * <server_spec>[,<server_spec>...]:[<path>]
+ * where
+ * <server_spec> is <ip>[:<port>]
+ * <path> is optional, but if present must begin with '/'
+ */
+static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
{
- struct ceph_mount_options *fsopt = private;
- substring_t argstr[MAX_OPT_ARGS];
- int token, intval, ret;
+ struct ceph_parse_opts_ctx *pctx = fc->fs_private;
+ struct ceph_mount_options *fsopt = pctx->opts;
+ char *dev_name = param->string, *dev_name_end;
+ int ret;
- token = match_token((char *)c, fsopt_tokens, argstr);
- if (token < 0)
- return -EINVAL;
+ dout("%s '%s'\n", __func__, dev_name);
+ if (!dev_name || !*dev_name)
+ return invalf(fc, "ceph: Empty source");
- if (token < Opt_last_int) {
- ret = match_int(&argstr[0], &intval);
- if (ret < 0) {
- pr_err("bad option arg (not int) at '%s'\n", c);
- return ret;
+ dev_name_end = strchr(dev_name, '/');
+ if (dev_name_end) {
+ if (strlen(dev_name_end) > 1) {
+ kfree(fsopt->server_path);
+ fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
+ if (!fsopt->server_path)
+ return -ENOMEM;
}
- dout("got int token %d val %d\n", token, intval);
- } else if (token > Opt_last_int && token < Opt_last_string) {
- dout("got string token %d val %s\n", token,
- argstr[0].from);
} else {
- dout("got token %d\n", token);
+ dev_name_end = dev_name + strlen(dev_name);
}
+ dev_name_end--; /* back up to ':' separator */
+ if (dev_name_end < dev_name || *dev_name_end != ':')
+ return invalf(fc, "ceph: No path or : separator in source");
+
+ dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
+ if (fsopt->server_path)
+ dout("server path '%s'\n", fsopt->server_path);
+
+ ret = ceph_parse_mon_ips(param->string, dev_name_end - dev_name,
+ pctx->copts, fc);
+ if (ret)
+ return ret;
+
+ fc->source = param->string;
+ param->string = NULL;
+ return 0;
+}
+
+static int ceph_parse_mount_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct ceph_parse_opts_ctx *pctx = fc->fs_private;
+ struct ceph_mount_options *fsopt = pctx->opts;
+ struct fs_parse_result result;
+ unsigned int mode;
+ int token, ret;
+
+ ret = ceph_parse_param(param, pctx->copts, fc);
+ if (ret != -ENOPARAM)
+ return ret;
+
+ token = fs_parse(fc, &ceph_mount_parameters, param, &result);
+ dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
+ if (token < 0)
+ return token;
+
switch (token) {
case Opt_snapdirname:
kfree(fsopt->snapdir_name);
- fsopt->snapdir_name = kstrndup(argstr[0].from,
- argstr[0].to-argstr[0].from,
- GFP_KERNEL);
- if (!fsopt->snapdir_name)
- return -ENOMEM;
+ fsopt->snapdir_name = param->string;
+ param->string = NULL;
break;
case Opt_mds_namespace:
kfree(fsopt->mds_namespace);
- fsopt->mds_namespace = kstrndup(argstr[0].from,
- argstr[0].to-argstr[0].from,
- GFP_KERNEL);
- if (!fsopt->mds_namespace)
- return -ENOMEM;
+ fsopt->mds_namespace = param->string;
+ param->string = NULL;
break;
- case Opt_fscache_uniq:
- kfree(fsopt->fscache_uniq);
- fsopt->fscache_uniq = kstrndup(argstr[0].from,
- argstr[0].to-argstr[0].from,
- GFP_KERNEL);
- if (!fsopt->fscache_uniq)
- return -ENOMEM;
- fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
+ case Opt_recover_session:
+ mode = result.uint_32;
+ if (mode == ceph_recover_session_no)
+ fsopt->flags &= ~CEPH_MOUNT_OPT_CLEANRECOVER;
+ else if (mode == ceph_recover_session_clean)
+ fsopt->flags |= CEPH_MOUNT_OPT_CLEANRECOVER;
+ else
+ BUG();
break;
- /* misc */
+ case Opt_source:
+ if (fc->source)
+ return invalf(fc, "ceph: Multiple sources specified");
+ return ceph_parse_source(param, fc);
case Opt_wsize:
- if (intval < (int)PAGE_SIZE || intval > CEPH_MAX_WRITE_SIZE)
- return -EINVAL;
- fsopt->wsize = ALIGN(intval, PAGE_SIZE);
+ if (result.uint_32 < PAGE_SIZE ||
+ result.uint_32 > CEPH_MAX_WRITE_SIZE)
+ goto out_of_range;
+ fsopt->wsize = ALIGN(result.uint_32, PAGE_SIZE);
break;
case Opt_rsize:
- if (intval < (int)PAGE_SIZE || intval > CEPH_MAX_READ_SIZE)
- return -EINVAL;
- fsopt->rsize = ALIGN(intval, PAGE_SIZE);
+ if (result.uint_32 < PAGE_SIZE ||
+ result.uint_32 > CEPH_MAX_READ_SIZE)
+ goto out_of_range;
+ fsopt->rsize = ALIGN(result.uint_32, PAGE_SIZE);
break;
case Opt_rasize:
- if (intval < 0)
- return -EINVAL;
- fsopt->rasize = ALIGN(intval, PAGE_SIZE);
+ fsopt->rasize = ALIGN(result.uint_32, PAGE_SIZE);
break;
case Opt_caps_wanted_delay_min:
- if (intval < 1)
- return -EINVAL;
- fsopt->caps_wanted_delay_min = intval;
+ if (result.uint_32 < 1)
+ goto out_of_range;
+ fsopt->caps_wanted_delay_min = result.uint_32;
break;
case Opt_caps_wanted_delay_max:
- if (intval < 1)
- return -EINVAL;
- fsopt->caps_wanted_delay_max = intval;
+ if (result.uint_32 < 1)
+ goto out_of_range;
+ fsopt->caps_wanted_delay_max = result.uint_32;
break;
case Opt_caps_max:
- if (intval < 0)
- return -EINVAL;
- fsopt->caps_max = intval;
+ fsopt->caps_max = result.uint_32;
break;
case Opt_readdir_max_entries:
- if (intval < 1)
- return -EINVAL;
- fsopt->max_readdir = intval;
+ if (result.uint_32 < 1)
+ goto out_of_range;
+ fsopt->max_readdir = result.uint_32;
break;
case Opt_readdir_max_bytes:
- if (intval < (int)PAGE_SIZE && intval != 0)
- return -EINVAL;
- fsopt->max_readdir_bytes = intval;
+ if (result.uint_32 < PAGE_SIZE && result.uint_32 != 0)
+ goto out_of_range;
+ fsopt->max_readdir_bytes = result.uint_32;
break;
case Opt_congestion_kb:
- if (intval < 1024) /* at least 1M */
- return -EINVAL;
- fsopt->congestion_kb = intval;
+ if (result.uint_32 < 1024) /* at least 1M */
+ goto out_of_range;
+ fsopt->congestion_kb = result.uint_32;
break;
case Opt_dirstat:
- fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
- break;
- case Opt_nodirstat:
- fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
+ if (!result.negated)
+ fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
+ else
+ fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
break;
case Opt_rbytes:
- fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
- break;
- case Opt_norbytes:
- fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
+ if (!result.negated)
+ fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
+ else
+ fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
break;
case Opt_asyncreaddir:
- fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
- break;
- case Opt_noasyncreaddir:
- fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
+ if (!result.negated)
+ fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
+ else
+ fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
break;
case Opt_dcache:
- fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
- break;
- case Opt_nodcache:
- fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
+ if (!result.negated)
+ fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
+ else
+ fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
break;
case Opt_ino32:
- fsopt->flags |= CEPH_MOUNT_OPT_INO32;
- break;
- case Opt_noino32:
- fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
+ if (!result.negated)
+ fsopt->flags |= CEPH_MOUNT_OPT_INO32;
+ else
+ fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
break;
+
case Opt_fscache:
- fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
- kfree(fsopt->fscache_uniq);
- fsopt->fscache_uniq = NULL;
- break;
- case Opt_nofscache:
- fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
+#ifdef CONFIG_CEPH_FSCACHE
kfree(fsopt->fscache_uniq);
fsopt->fscache_uniq = NULL;
+ if (result.negated) {
+ fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
+ } else {
+ fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
+ fsopt->fscache_uniq = param->string;
+ param->string = NULL;
+ }
break;
+#else
+ return invalf(fc, "ceph: fscache support is disabled");
+#endif
case Opt_poolperm:
- fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
- break;
- case Opt_nopoolperm:
- fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
+ if (!result.negated)
+ fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
+ else
+ fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
break;
case Opt_require_active_mds:
- fsopt->flags &= ~CEPH_MOUNT_OPT_MOUNTWAIT;
- break;
- case Opt_norequire_active_mds:
- fsopt->flags |= CEPH_MOUNT_OPT_MOUNTWAIT;
+ if (!result.negated)
+ fsopt->flags &= ~CEPH_MOUNT_OPT_MOUNTWAIT;
+ else
+ fsopt->flags |= CEPH_MOUNT_OPT_MOUNTWAIT;
break;
case Opt_quotadf:
- fsopt->flags &= ~CEPH_MOUNT_OPT_NOQUOTADF;
- break;
- case Opt_noquotadf:
- fsopt->flags |= CEPH_MOUNT_OPT_NOQUOTADF;
+ if (!result.negated)
+ fsopt->flags &= ~CEPH_MOUNT_OPT_NOQUOTADF;
+ else
+ fsopt->flags |= CEPH_MOUNT_OPT_NOQUOTADF;
break;
case Opt_copyfrom:
- fsopt->flags &= ~CEPH_MOUNT_OPT_NOCOPYFROM;
- break;
- case Opt_nocopyfrom:
- fsopt->flags |= CEPH_MOUNT_OPT_NOCOPYFROM;
+ if (!result.negated)
+ fsopt->flags &= ~CEPH_MOUNT_OPT_NOCOPYFROM;
+ else
+ fsopt->flags |= CEPH_MOUNT_OPT_NOCOPYFROM;
break;
-#ifdef CONFIG_CEPH_FS_POSIX_ACL
case Opt_acl:
- fsopt->sb_flags |= SB_POSIXACL;
- break;
+ if (!result.negated) {
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+ fc->sb_flags |= SB_POSIXACL;
+#else
+ return invalf(fc, "ceph: POSIX ACL support is disabled");
#endif
- case Opt_noacl:
- fsopt->sb_flags &= ~SB_POSIXACL;
+ } else {
+ fc->sb_flags &= ~SB_POSIXACL;
+ }
break;
default:
- BUG_ON(token);
+ BUG();
}
return 0;
+
+out_of_range:
+ return invalf(fc, "ceph: %s out of range", param->key);
}
static void destroy_mount_options(struct ceph_mount_options *args)
{
dout("destroy_mount_options %p\n", args);
+ if (!args)
+ return;
+
kfree(args->snapdir_name);
kfree(args->mds_namespace);
kfree(args->server_path);
@@ -437,91 +488,6 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
return ceph_compare_options(new_opt, fsc->client);
}
-static int parse_mount_options(struct ceph_mount_options **pfsopt,
- struct ceph_options **popt,
- int flags, char *options,
- const char *dev_name)
-{
- struct ceph_mount_options *fsopt;
- const char *dev_name_end;
- int err;
-
- if (!dev_name || !*dev_name)
- return -EINVAL;
-
- fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
- if (!fsopt)
- return -ENOMEM;
-
- dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
-
- fsopt->sb_flags = flags;
- fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
-
- fsopt->wsize = CEPH_MAX_WRITE_SIZE;
- fsopt->rsize = CEPH_MAX_READ_SIZE;
- fsopt->rasize = CEPH_RASIZE_DEFAULT;
- fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
- if (!fsopt->snapdir_name) {
- err = -ENOMEM;
- goto out;
- }
-
- fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
- fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
- fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
- fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
- fsopt->congestion_kb = default_congestion_kb();
-
- /*
- * Distinguish the server list from the path in "dev_name".
- * Internally we do not include the leading '/' in the path.
- *
- * "dev_name" will look like:
- * <server_spec>[,<server_spec>...]:[<path>]
- * where
- * <server_spec> is <ip>[:<port>]
- * <path> is optional, but if present must begin with '/'
- */
- dev_name_end = strchr(dev_name, '/');
- if (dev_name_end) {
- if (strlen(dev_name_end) > 1) {
- fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
- if (!fsopt->server_path) {
- err = -ENOMEM;
- goto out;
- }
- }
- } else {
- dev_name_end = dev_name + strlen(dev_name);
- }
- err = -EINVAL;
- dev_name_end--; /* back up to ':' separator */
- if (dev_name_end < dev_name || *dev_name_end != ':') {
- pr_err("device name is missing path (no : separator in %s)\n",
- dev_name);
- goto out;
- }
- dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
- if (fsopt->server_path)
- dout("server path '%s'\n", fsopt->server_path);
-
- *popt = ceph_parse_options(options, dev_name, dev_name_end,
- parse_fsopt_token, (void *)fsopt);
- if (IS_ERR(*popt)) {
- err = PTR_ERR(*popt);
- goto out;
- }
-
- /* success */
- *pfsopt = fsopt;
- return 0;
-
-out:
- destroy_mount_options(fsopt);
- return err;
-}
-
/**
* ceph_show_options - Show mount options in /proc/mounts
* @m: seq_file to write to
@@ -565,7 +531,7 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",noquotadf");
#ifdef CONFIG_CEPH_FS_POSIX_ACL
- if (fsopt->sb_flags & SB_POSIXACL)
+ if (root->d_sb->s_flags & SB_POSIXACL)
seq_puts(m, ",acl");
else
seq_puts(m, ",noacl");
@@ -576,6 +542,10 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
if (fsopt->mds_namespace)
seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
+
+ if (fsopt->flags & CEPH_MOUNT_OPT_CLEANRECOVER)
+ seq_show_option(m, "recover_session", "clean");
+
if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
seq_printf(m, ",wsize=%d", fsopt->wsize);
if (fsopt->rsize != CEPH_MAX_READ_SIZE)
@@ -664,6 +634,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
fsc->sb = NULL;
fsc->mount_state = CEPH_MOUNT_MOUNTING;
+ fsc->filp_gen = 1;
atomic_long_set(&fsc->writeback_count, 0);
@@ -713,6 +684,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc)
{
dout("destroy_fs_client %p\n", fsc);
+ ceph_mdsc_destroy(fsc);
destroy_workqueue(fsc->inode_wq);
destroy_workqueue(fsc->cap_wq);
@@ -829,13 +801,7 @@ static void ceph_umount_begin(struct super_block *sb)
fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
ceph_mdsc_force_umount(fsc->mdsc);
- return;
-}
-
-static int ceph_remount(struct super_block *sb, int *flags, char *data)
-{
- sync_filesystem(sb);
- return 0;
+ fsc->filp_gen++; // invalidate open files
}
static const struct super_operations ceph_super_ops = {
@@ -846,7 +812,6 @@ static const struct super_operations ceph_super_ops = {
.evict_inode = ceph_evict_inode,
.sync_fs = ceph_sync_fs,
.put_super = ceph_put_super,
- .remount_fs = ceph_remount,
.show_options = ceph_show_options,
.statfs = ceph_statfs,
.umount_begin = ceph_umount_begin,
@@ -907,7 +872,8 @@ out:
/*
* mount: join the ceph cluster, and open root directory.
*/
-static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
+static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
+ struct fs_context *fc)
{
int err;
unsigned long started = jiffies; /* note the start time */
@@ -924,7 +890,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
/* setup fscache */
if (fsc->mount_options->flags & CEPH_MOUNT_OPT_FSCACHE) {
- err = ceph_fscache_register_fs(fsc);
+ err = ceph_fscache_register_fs(fsc, fc);
if (err < 0)
goto out;
}
@@ -959,18 +925,16 @@ out:
return ERR_PTR(err);
}
-static int ceph_set_super(struct super_block *s, void *data)
+static int ceph_set_super(struct super_block *s, struct fs_context *fc)
{
- struct ceph_fs_client *fsc = data;
+ struct ceph_fs_client *fsc = s->s_fs_info;
int ret;
- dout("set_super %p data %p\n", s, data);
+ dout("set_super %p\n", s);
- s->s_flags = fsc->mount_options->sb_flags;
s->s_maxbytes = MAX_LFS_FILESIZE;
s->s_xattr = ceph_xattr_handlers;
- s->s_fs_info = fsc;
fsc->sb = s;
fsc->max_file_size = 1ULL << 40; /* temp value until we get mdsmap */
@@ -979,25 +943,21 @@ static int ceph_set_super(struct super_block *s, void *data)
s->s_export_op = &ceph_export_ops;
s->s_time_gran = 1;
+ s->s_time_min = 0;
+ s->s_time_max = U32_MAX;
- ret = set_anon_super(s, NULL); /* what is that second arg for? */
+ ret = set_anon_super_fc(s, fc);
if (ret != 0)
- goto fail;
-
- return ret;
-
-fail:
- s->s_fs_info = NULL;
- fsc->sb = NULL;
+ fsc->sb = NULL;
return ret;
}
/*
* share superblock if same fs AND options
*/
-static int ceph_compare_super(struct super_block *sb, void *data)
+static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
{
- struct ceph_fs_client *new = data;
+ struct ceph_fs_client *new = fc->s_fs_info;
struct ceph_mount_options *fsopt = new->mount_options;
struct ceph_options *opt = new->client->options;
struct ceph_fs_client *other = ceph_sb_to_client(sb);
@@ -1013,7 +973,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
dout("fsid doesn't match\n");
return 0;
}
- if (fsopt->sb_flags != other->mount_options->sb_flags) {
+ if (fc->sb_flags != (sb->s_flags & ~SB_BORN)) {
dout("flags differ\n");
return 0;
}
@@ -1043,69 +1003,69 @@ static int ceph_setup_bdi(struct super_block *sb, struct ceph_fs_client *fsc)
return 0;
}
-static struct dentry *ceph_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ceph_get_tree(struct fs_context *fc)
{
+ struct ceph_parse_opts_ctx *pctx = fc->fs_private;
struct super_block *sb;
struct ceph_fs_client *fsc;
struct dentry *res;
+ int (*compare_super)(struct super_block *, struct fs_context *) =
+ ceph_compare_super;
int err;
- int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
- struct ceph_mount_options *fsopt = NULL;
- struct ceph_options *opt = NULL;
- dout("ceph_mount\n");
+ dout("ceph_get_tree\n");
+
+ if (!fc->source)
+ return invalf(fc, "ceph: No source");
#ifdef CONFIG_CEPH_FS_POSIX_ACL
- flags |= SB_POSIXACL;
+ fc->sb_flags |= SB_POSIXACL;
#endif
- err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
- if (err < 0) {
- res = ERR_PTR(err);
- goto out_final;
- }
/* create client (which we may/may not use) */
- fsc = create_fs_client(fsopt, opt);
+ fsc = create_fs_client(pctx->opts, pctx->copts);
+ pctx->opts = NULL;
+ pctx->copts = NULL;
if (IS_ERR(fsc)) {
- res = ERR_CAST(fsc);
+ err = PTR_ERR(fsc);
goto out_final;
}
err = ceph_mdsc_init(fsc);
- if (err < 0) {
- res = ERR_PTR(err);
+ if (err < 0)
goto out;
- }
if (ceph_test_opt(fsc->client, NOSHARE))
compare_super = NULL;
- sb = sget(fs_type, compare_super, ceph_set_super, flags, fsc);
+
+ fc->s_fs_info = fsc;
+ sb = sget_fc(fc, compare_super, ceph_set_super);
+ fc->s_fs_info = NULL;
if (IS_ERR(sb)) {
- res = ERR_CAST(sb);
+ err = PTR_ERR(sb);
goto out;
}
if (ceph_sb_to_client(sb) != fsc) {
- ceph_mdsc_destroy(fsc);
destroy_fs_client(fsc);
fsc = ceph_sb_to_client(sb);
dout("get_sb got existing client %p\n", fsc);
} else {
dout("get_sb using new client %p\n", fsc);
err = ceph_setup_bdi(sb, fsc);
- if (err < 0) {
- res = ERR_PTR(err);
+ if (err < 0)
goto out_splat;
- }
}
- res = ceph_real_mount(fsc);
- if (IS_ERR(res))
+ res = ceph_real_mount(fsc, fc);
+ if (IS_ERR(res)) {
+ err = PTR_ERR(res);
goto out_splat;
+ }
dout("root %p inode %p ino %llx.%llx\n", res,
d_inode(res), ceph_vinop(d_inode(res)));
- return res;
+ fc->root = fsc->sb->s_root;
+ return 0;
out_splat:
ceph_mdsc_close_sessions(fsc->mdsc);
@@ -1113,11 +1073,81 @@ out_splat:
goto out_final;
out:
- ceph_mdsc_destroy(fsc);
destroy_fs_client(fsc);
out_final:
- dout("ceph_mount fail %ld\n", PTR_ERR(res));
- return res;
+ dout("ceph_get_tree fail %d\n", err);
+ return err;
+}
+
+static void ceph_free_fc(struct fs_context *fc)
+{
+ struct ceph_parse_opts_ctx *pctx = fc->fs_private;
+
+ if (pctx) {
+ destroy_mount_options(pctx->opts);
+ ceph_destroy_options(pctx->copts);
+ kfree(pctx);
+ }
+}
+
+static int ceph_reconfigure_fc(struct fs_context *fc)
+{
+ sync_filesystem(fc->root->d_sb);
+ return 0;
+}
+
+static const struct fs_context_operations ceph_context_ops = {
+ .free = ceph_free_fc,
+ .parse_param = ceph_parse_mount_param,
+ .get_tree = ceph_get_tree,
+ .reconfigure = ceph_reconfigure_fc,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+static int ceph_init_fs_context(struct fs_context *fc)
+{
+ struct ceph_parse_opts_ctx *pctx;
+ struct ceph_mount_options *fsopt;
+
+ pctx = kzalloc(sizeof(*pctx), GFP_KERNEL);
+ if (!pctx)
+ return -ENOMEM;
+
+ pctx->copts = ceph_alloc_options();
+ if (!pctx->copts)
+ goto nomem;
+
+ pctx->opts = kzalloc(sizeof(*pctx->opts), GFP_KERNEL);
+ if (!pctx->opts)
+ goto nomem;
+
+ fsopt = pctx->opts;
+ fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
+
+ fsopt->wsize = CEPH_MAX_WRITE_SIZE;
+ fsopt->rsize = CEPH_MAX_READ_SIZE;
+ fsopt->rasize = CEPH_RASIZE_DEFAULT;
+ fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
+ if (!fsopt->snapdir_name)
+ goto nomem;
+
+ fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
+ fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
+ fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
+ fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
+ fsopt->congestion_kb = default_congestion_kb();
+
+ fc->fs_private = pctx;
+ fc->ops = &ceph_context_ops;
+ return 0;
+
+nomem:
+ destroy_mount_options(pctx->opts);
+ ceph_destroy_options(pctx->copts);
+ kfree(pctx);
+ return -ENOMEM;
}
static void ceph_kill_sb(struct super_block *s)
@@ -1137,8 +1167,6 @@ static void ceph_kill_sb(struct super_block *s)
ceph_fscache_unregister_fs(fsc);
- ceph_mdsc_destroy(fsc);
-
destroy_fs_client(fsc);
free_anon_bdev(dev);
}
@@ -1146,12 +1174,39 @@ static void ceph_kill_sb(struct super_block *s)
static struct file_system_type ceph_fs_type = {
.owner = THIS_MODULE,
.name = "ceph",
- .mount = ceph_mount,
+ .init_fs_context = ceph_init_fs_context,
.kill_sb = ceph_kill_sb,
.fs_flags = FS_RENAME_DOES_D_MOVE,
};
MODULE_ALIAS_FS("ceph");
+int ceph_force_reconnect(struct super_block *sb)
+{
+ struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+ int err = 0;
+
+ ceph_umount_begin(sb);
+
+ /* Make sure all page caches get invalidated.
+ * see remove_session_caps_cb() */
+ flush_workqueue(fsc->inode_wq);
+
+ /* In case that we were blacklisted. This also reset
+ * all mon/osd connections */
+ ceph_reset_client_addr(fsc->client);
+
+ ceph_osdc_clear_abort_err(&fsc->client->osdc);
+
+ fsc->blacklisted = false;
+ fsc->mount_state = CEPH_MOUNT_MOUNTED;
+
+ if (sb->s_root) {
+ err = __ceph_do_getattr(d_inode(sb->s_root), NULL,
+ CEPH_STAT_CAP_INODE, true);
+ }
+ return err;
+}
+
static int __init init_ceph(void)
{
int ret = init_caches();
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index d2352fd95dbc..f0f9cb7447ac 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/posix_acl.h>
#include <linux/refcount.h>
+#include <linux/security.h>
#include <linux/ceph/libceph.h>
@@ -31,6 +32,7 @@
#define CEPH_BLOCK_SHIFT 22 /* 4 MB */
#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
+#define CEPH_MOUNT_OPT_CLEANRECOVER (1<<1) /* auto reonnect (clean mode) after blacklisted */
#define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
#define CEPH_MOUNT_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
#define CEPH_MOUNT_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
@@ -72,7 +74,6 @@
struct ceph_mount_options {
int flags;
- int sb_flags;
int wsize; /* max write size */
int rsize; /* max read size */
@@ -101,6 +102,11 @@ struct ceph_fs_client {
struct ceph_client *client;
unsigned long mount_state;
+
+ unsigned long last_auto_reconnect;
+ bool blacklisted;
+
+ u32 filp_gen;
loff_t max_file_size;
struct ceph_mds_client *mdsc;
@@ -395,25 +401,31 @@ struct ceph_inode_info {
struct fscache_cookie *fscache;
u32 i_fscache_gen;
#endif
+ errseq_t i_meta_err;
+
struct inode vfs_inode; /* at end */
};
-static inline struct ceph_inode_info *ceph_inode(struct inode *inode)
+static inline struct ceph_inode_info *
+ceph_inode(const struct inode *inode)
{
return container_of(inode, struct ceph_inode_info, vfs_inode);
}
-static inline struct ceph_fs_client *ceph_inode_to_client(struct inode *inode)
+static inline struct ceph_fs_client *
+ceph_inode_to_client(const struct inode *inode)
{
return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
}
-static inline struct ceph_fs_client *ceph_sb_to_client(struct super_block *sb)
+static inline struct ceph_fs_client *
+ceph_sb_to_client(const struct super_block *sb)
{
return (struct ceph_fs_client *)sb->s_fs_info;
}
-static inline struct ceph_vino ceph_vino(struct inode *inode)
+static inline struct ceph_vino
+ceph_vino(const struct inode *inode)
{
return ceph_inode(inode)->i_vino;
}
@@ -499,17 +511,16 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
#define CEPH_I_DIR_ORDERED (1 << 0) /* dentries in dir are ordered */
#define CEPH_I_NODELAY (1 << 1) /* do not delay cap release */
#define CEPH_I_FLUSH (1 << 2) /* do not delay flush of dirty metadata */
-#define CEPH_I_NOFLUSH (1 << 3) /* do not flush dirty caps */
-#define CEPH_I_POOL_PERM (1 << 4) /* pool rd/wr bits are valid */
-#define CEPH_I_POOL_RD (1 << 5) /* can read from pool */
-#define CEPH_I_POOL_WR (1 << 6) /* can write to pool */
-#define CEPH_I_SEC_INITED (1 << 7) /* security initialized */
-#define CEPH_I_CAP_DROPPED (1 << 8) /* caps were forcibly dropped */
-#define CEPH_I_KICK_FLUSH (1 << 9) /* kick flushing caps */
-#define CEPH_I_FLUSH_SNAPS (1 << 10) /* need flush snapss */
-#define CEPH_I_ERROR_WRITE (1 << 11) /* have seen write errors */
-#define CEPH_I_ERROR_FILELOCK (1 << 12) /* have seen file lock errors */
-
+#define CEPH_I_POOL_PERM (1 << 3) /* pool rd/wr bits are valid */
+#define CEPH_I_POOL_RD (1 << 4) /* can read from pool */
+#define CEPH_I_POOL_WR (1 << 5) /* can write to pool */
+#define CEPH_I_SEC_INITED (1 << 6) /* security initialized */
+#define CEPH_I_CAP_DROPPED (1 << 7) /* caps were forcibly dropped */
+#define CEPH_I_KICK_FLUSH (1 << 8) /* kick flushing caps */
+#define CEPH_I_FLUSH_SNAPS (1 << 9) /* need flush snapss */
+#define CEPH_I_ERROR_WRITE (1 << 10) /* have seen write errors */
+#define CEPH_I_ERROR_FILELOCK (1 << 11) /* have seen file lock errors */
+#define CEPH_I_ODIRECT (1 << 12) /* inode in direct I/O mode */
/*
* Masks of ceph inode work.
@@ -703,6 +714,10 @@ struct ceph_file_info {
spinlock_t rw_contexts_lock;
struct list_head rw_contexts;
+
+ errseq_t meta_err;
+ u32 filp_gen;
+ atomic_t num_locks;
};
struct ceph_dir_file_info {
@@ -842,7 +857,8 @@ static inline int default_congestion_kb(void)
}
-
+/* super.c */
+extern int ceph_force_reconnect(struct super_block *sb);
/* snap.c */
struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
u64 ino);
@@ -926,7 +942,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat,
int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
-extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci);
+extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
extern const struct xattr_handler *ceph_xattr_handlers[];
@@ -959,7 +975,10 @@ static inline bool ceph_security_xattr_wanted(struct inode *in)
#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
extern int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
struct ceph_acl_sec_ctx *ctx);
-extern void ceph_security_invalidate_secctx(struct inode *inode);
+static inline void ceph_security_invalidate_secctx(struct inode *inode)
+{
+ security_inode_invalidate_secctx(inode);
+}
#else
static inline int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
struct ceph_acl_sec_ctx *ctx)
@@ -1039,7 +1058,6 @@ extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session);
extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci,
int mds);
-extern int ceph_get_cap_mds(struct inode *inode);
extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
@@ -1058,9 +1076,9 @@ extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
struct inode *dir,
int mds, int drop, int unless);
-extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
+extern int ceph_get_caps(struct file *filp, int need, int want,
loff_t endoff, int *got, struct page **pinned_page);
-extern int ceph_try_get_caps(struct ceph_inode_info *ci,
+extern int ceph_try_get_caps(struct inode *inode,
int need, int want, bool nonblock, int *got);
/* for counting open files by mode */
@@ -1071,7 +1089,7 @@ extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode);
extern const struct address_space_operations ceph_aops;
extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
extern int ceph_uninline_data(struct file *filp, struct page *locked_page);
-extern int ceph_pool_perm_check(struct ceph_inode_info *ci, int need);
+extern int ceph_pool_perm_check(struct inode *inode, int need);
extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc);
/* file.c */
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 37b458a9af3a..cb18ee637cb7 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -20,7 +20,8 @@ static int __remove_xattr(struct ceph_inode_info *ci,
static bool ceph_is_valid_xattr(const char *name)
{
- return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
+ return !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) ||
+ !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
}
@@ -754,12 +755,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
/*
* If there are dirty xattrs, reencode xattrs into the prealloc_blob
- * and swap into place.
+ * and swap into place. It returns the old i_xattrs.blob (or NULL) so
+ * that it can be freed by the caller as the i_ceph_lock is likely to be
+ * held.
*/
-void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
+struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
{
struct rb_node *p;
struct ceph_inode_xattr *xattr = NULL;
+ struct ceph_buffer *old_blob = NULL;
void *dest;
dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
@@ -790,12 +794,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
if (ci->i_xattrs.blob)
- ceph_buffer_put(ci->i_xattrs.blob);
+ old_blob = ci->i_xattrs.blob;
ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
ci->i_xattrs.prealloc_blob = NULL;
ci->i_xattrs.dirty = false;
ci->i_xattrs.version++;
}
+
+ return old_blob;
}
static inline int __get_request_mask(struct inode *in) {
@@ -887,7 +893,8 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
memcpy(value, xattr->val, xattr->val_len);
if (current->journal_info &&
- !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN))
+ !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
+ security_ismaclabel(name + XATTR_SECURITY_PREFIX_LEN))
ci->i_ceph_flags |= CEPH_I_SEC_INITED;
out:
spin_unlock(&ci->i_ceph_lock);
@@ -898,11 +905,9 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
{
struct inode *inode = d_inode(dentry);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
bool len_only = (size == 0);
u32 namelen;
int err;
- int i;
spin_lock(&ci->i_ceph_lock);
dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
@@ -931,33 +936,6 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
names = __copy_xattr_names(ci, names);
size -= namelen;
}
-
-
- /* virtual xattr names, too */
- if (vxattrs) {
- for (i = 0; vxattrs[i].name; i++) {
- size_t this_len;
-
- if (vxattrs[i].flags & VXATTR_FLAG_HIDDEN)
- continue;
- if (vxattrs[i].exists_cb && !vxattrs[i].exists_cb(ci))
- continue;
-
- this_len = strlen(vxattrs[i].name) + 1;
- namelen += this_len;
- if (len_only)
- continue;
-
- if (this_len > size) {
- err = -ERANGE;
- goto out;
- }
-
- memcpy(names, vxattrs[i].name, this_len);
- names += this_len;
- size -= this_len;
- }
- }
err = namelen;
out:
spin_unlock(&ci->i_ceph_lock);
@@ -1036,6 +1014,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_cap_flush *prealloc_cf = NULL;
+ struct ceph_buffer *old_blob = NULL;
int issued;
int err;
int dirty = 0;
@@ -1109,13 +1088,15 @@ retry:
struct ceph_buffer *blob;
spin_unlock(&ci->i_ceph_lock);
- dout(" preaallocating new blob size=%d\n", required_blob_size);
+ ceph_buffer_put(old_blob); /* Shouldn't be required */
+ dout(" pre-allocating new blob size=%d\n", required_blob_size);
blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
if (!blob)
goto do_sync_unlocked;
spin_lock(&ci->i_ceph_lock);
+ /* prealloc_blob can't be released while holding i_ceph_lock */
if (ci->i_xattrs.prealloc_blob)
- ceph_buffer_put(ci->i_xattrs.prealloc_blob);
+ old_blob = ci->i_xattrs.prealloc_blob;
ci->i_xattrs.prealloc_blob = blob;
goto retry;
}
@@ -1131,6 +1112,7 @@ retry:
}
spin_unlock(&ci->i_ceph_lock);
+ ceph_buffer_put(old_blob);
if (lock_snap_rwsem)
up_read(&mdsc->snap_rwsem);
if (dirty)
@@ -1284,42 +1266,8 @@ out:
ceph_pagelist_release(pagelist);
return err;
}
-
-void ceph_security_invalidate_secctx(struct inode *inode)
-{
- security_inode_invalidate_secctx(inode);
-}
-
-static int ceph_xattr_set_security_label(const struct xattr_handler *handler,
- struct dentry *unused, struct inode *inode,
- const char *key, const void *buf,
- size_t buflen, int flags)
-{
- if (security_ismaclabel(key)) {
- const char *name = xattr_full_name(handler, key);
- return __ceph_setxattr(inode, name, buf, buflen, flags);
- }
- return -EOPNOTSUPP;
-}
-
-static int ceph_xattr_get_security_label(const struct xattr_handler *handler,
- struct dentry *unused, struct inode *inode,
- const char *key, void *buf, size_t buflen)
-{
- if (security_ismaclabel(key)) {
- const char *name = xattr_full_name(handler, key);
- return __ceph_getxattr(inode, name, buf, buflen);
- }
- return -EOPNOTSUPP;
-}
-
-static const struct xattr_handler ceph_security_label_handler = {
- .prefix = XATTR_SECURITY_PREFIX,
- .get = ceph_xattr_get_security_label,
- .set = ceph_xattr_set_security_label,
-};
-#endif
-#endif
+#endif /* CONFIG_CEPH_FS_SECURITY_LABEL */
+#endif /* CONFIG_SECURITY */
void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx)
{
@@ -1343,9 +1291,6 @@ const struct xattr_handler *ceph_xattr_handlers[] = {
&posix_acl_access_xattr_handler,
&posix_acl_default_xattr_handler,
#endif
-#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
- &ceph_security_label_handler,
-#endif
&ceph_other_xattr_handler,
NULL,
};
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index b16219e5dac9..22cf04fb32d3 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -16,7 +16,7 @@ config CIFS
select CRYPTO_GCM
select CRYPTO_ECB
select CRYPTO_AES
- select CRYPTO_DES
+ select CRYPTO_LIB_DES
select KEYS
help
This is the client VFS module for the SMB3 family of NAS protocols,
@@ -211,3 +211,11 @@ config CIFS_FSCACHE
Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data
to be cached locally on disk through the general filesystem cache
manager. If unsure, say N.
+
+config CIFS_ROOT
+ bool "SMB root file system (Experimental)"
+ depends on CIFS=y && IP_PNP
+ help
+ Enables root file system support over SMB protocol.
+
+ Most people say N here.
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 41332f20055b..51bae9340842 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -21,3 +21,5 @@ cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o dfs_cache.o
cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o
cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o
+
+cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index a38d796f5ffe..19f6e592b941 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -122,6 +122,27 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
}
static void
+cifs_dump_channel(struct seq_file *m, int i, struct cifs_chan *chan)
+{
+ struct TCP_Server_Info *server = chan->server;
+
+ seq_printf(m, "\t\tChannel %d Number of credits: %d Dialect 0x%x "
+ "TCP status: %d Instance: %d Local Users To Server: %d "
+ "SecMode: 0x%x Req On Wire: %d In Send: %d "
+ "In MaxReq Wait: %d\n",
+ i+1,
+ server->credits,
+ server->dialect,
+ server->tcpStatus,
+ server->reconnect_instance,
+ server->srv_count,
+ server->sec_mode,
+ in_flight(server),
+ atomic_read(&server->in_send),
+ atomic_read(&server->num_waiters));
+}
+
+static void
cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
{
struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
@@ -256,6 +277,11 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
if (!server->rdma)
goto skip_rdma;
+ if (!server->smbd_conn) {
+ seq_printf(m, "\nSMBDirect transport not available");
+ goto skip_rdma;
+ }
+
seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
"transport status: %x",
server->smbd_conn->protocol,
@@ -360,11 +386,10 @@ skip_rdma:
server->srv_count,
server->sec_mode, in_flight(server));
-#ifdef CONFIG_CIFS_STATS2
seq_printf(m, " In Send: %d In MaxReq Wait: %d",
atomic_read(&server->in_send),
atomic_read(&server->num_waiters));
-#endif
+
/* dump session id helpful for use with network trace */
seq_printf(m, " SessionId: 0x%llx", ses->Suid);
if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
@@ -372,6 +397,13 @@ skip_rdma:
if (ses->sign)
seq_puts(m, " signed");
+ if (ses->chan_count > 1) {
+ seq_printf(m, "\n\n\tExtra Channels: %zu\n",
+ ses->chan_count-1);
+ for (j = 1; j < ses->chan_count; j++)
+ cifs_dump_channel(m, j, &ses->chans[j]);
+ }
+
seq_puts(m, "\n\tShares:");
j = 0;
@@ -410,8 +442,13 @@ skip_rdma:
seq_printf(m, "\n\tServer interfaces: %zu\n",
ses->iface_count);
for (j = 0; j < ses->iface_count; j++) {
+ struct cifs_server_iface *iface;
+
+ iface = &ses->iface_list[j];
seq_printf(m, "\t%d)", j);
- cifs_dump_iface(m, &ses->iface_list[j]);
+ cifs_dump_iface(m, iface);
+ if (is_ses_using_iface(ses, iface))
+ seq_puts(m, "\t\t[CONNECTED]\n");
}
spin_unlock(&ses->iface_lock);
}
@@ -452,6 +489,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
+ server->max_in_flight = 0;
#ifdef CONFIG_CIFS_STATS2
for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
atomic_set(&server->num_cmds[i], 0);
@@ -526,6 +564,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
+ seq_printf(m, "\nMax requests in flight: %d", server->max_in_flight);
#ifdef CONFIG_CIFS_STATS2
seq_puts(m, "\nTotal time spent processing by command. Time ");
seq_printf(m, "units are jiffies (%d per second)\n", HZ);
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 3d392620a2f4..100b0056a369 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -80,6 +80,60 @@ do { \
type, fmt, ##__VA_ARGS__); \
} while (0)
+#define cifs_server_dbg_func(ratefunc, type, fmt, ...) \
+do { \
+ const char *sn = ""; \
+ if (server && server->hostname) \
+ sn = server->hostname; \
+ if ((type) & FYI && cifsFYI & CIFS_INFO) { \
+ pr_debug_ ## ratefunc("%s: \\\\%s " fmt, \
+ __FILE__, sn, ##__VA_ARGS__); \
+ } else if ((type) & VFS) { \
+ pr_err_ ## ratefunc("CIFS VFS: \\\\%s " fmt, \
+ sn, ##__VA_ARGS__); \
+ } else if ((type) & NOISY && (NOISY != 0)) { \
+ pr_debug_ ## ratefunc("\\\\%s " fmt, \
+ sn, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#define cifs_server_dbg(type, fmt, ...) \
+do { \
+ if ((type) & ONCE) \
+ cifs_server_dbg_func(once, \
+ type, fmt, ##__VA_ARGS__); \
+ else \
+ cifs_server_dbg_func(ratelimited, \
+ type, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define cifs_tcon_dbg_func(ratefunc, type, fmt, ...) \
+do { \
+ const char *tn = ""; \
+ if (tcon && tcon->treeName) \
+ tn = tcon->treeName; \
+ if ((type) & FYI && cifsFYI & CIFS_INFO) { \
+ pr_debug_ ## ratefunc("%s: %s " fmt, \
+ __FILE__, tn, ##__VA_ARGS__); \
+ } else if ((type) & VFS) { \
+ pr_err_ ## ratefunc("CIFS VFS: %s " fmt, \
+ tn, ##__VA_ARGS__); \
+ } else if ((type) & NOISY && (NOISY != 0)) { \
+ pr_debug_ ## ratefunc("%s " fmt, \
+ tn, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#define cifs_tcon_dbg(type, fmt, ...) \
+do { \
+ if ((type) & ONCE) \
+ cifs_tcon_dbg_func(once, \
+ type, fmt, ##__VA_ARGS__); \
+ else \
+ cifs_tcon_dbg_func(ratelimited, \
+ type, fmt, ##__VA_ARGS__); \
+} while (0)
+
/*
* debug OFF
* ---------
@@ -91,6 +145,19 @@ do { \
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
+#define cifs_server_dbg(type, fmt, ...) \
+do { \
+ if (0) \
+ pr_debug("\\\\%s " fmt, \
+ server->hostname, ##__VA_ARGS__); \
+} while (0)
+
+#define cifs_tcon_dbg(type, fmt, ...) \
+do { \
+ if (0) \
+ pr_debug("%s " fmt, tcon->treeName, ##__VA_ARGS__); \
+} while (0)
+
#define cifs_info(fmt, ...) \
do { \
pr_info("CIFS: "fmt, ##__VA_ARGS__); \
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index b326d2ca3765..6e7c4427369d 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -53,6 +53,8 @@
#define CIFS_MOUNT_NO_HANDLE_CACHE 0x4000000 /* disable caching dir handles */
#define CIFS_MOUNT_NO_DFS 0x8000000 /* disable DFS resolving */
#define CIFS_MOUNT_MODE_FROM_SID 0x10000000 /* retrieve mode from special ACE */
+#define CIFS_MOUNT_RO_CACHE 0x20000000 /* assumes share will not change */
+#define CIFS_MOUNT_RW_CACHE 0x40000000 /* assumes only client accessing */
struct cifs_sb_info {
struct rb_root tlink_tree;
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
index 086ddc5108af..0f0dc1c1fe41 100644
--- a/fs/cifs/cifs_ioctl.h
+++ b/fs/cifs/cifs_ioctl.h
@@ -46,6 +46,7 @@ struct smb_snapshot_array {
/* query_info flags */
#define PASSTHRU_QUERY_INFO 0x00000000
#define PASSTHRU_FSCTL 0x00000001
+#define PASSTHRU_SET_INFO 0x00000002
struct smb_query_info {
__u32 info_type;
__u32 file_info_class;
@@ -56,9 +57,18 @@ struct smb_query_info {
/* char buffer[]; */
} __packed;
+struct smb3_key_debug_info {
+ __u64 Suid;
+ __u16 cipher_type;
+ __u8 auth_key[16]; /* SMB2_NTLMV2_SESSKEY_SIZE */
+ __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
+ __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
+} __packed;
+
#define CIFS_IOCTL_MAGIC 0xCF
#define CIFS_IOC_COPYCHUNK_FILE _IOW(CIFS_IOCTL_MAGIC, 3, int)
#define CIFS_IOC_SET_INTEGRITY _IO(CIFS_IOCTL_MAGIC, 4)
#define CIFS_IOC_GET_MNT_INFO _IOR(CIFS_IOCTL_MAGIC, 5, struct smb_mnt_fs_info)
#define CIFS_ENUMERATE_SNAPSHOTS _IOR(CIFS_IOCTL_MAGIC, 6, struct smb_snapshot_array)
#define CIFS_QUERY_INFO _IOWR(CIFS_IOCTL_MAGIC, 7, struct smb_query_info)
+#define CIFS_DUMP_KEY _IOWR(CIFS_IOCTL_MAGIC, 8, struct smb3_key_debug_info)
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 7f01c6e60791..7b9b876b513b 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -98,7 +98,7 @@ struct key_type cifs_spnego_key_type = {
struct key *
cifs_get_spnego_key(struct cifs_ses *sesInfo)
{
- struct TCP_Server_Info *server = sesInfo->server;
+ struct TCP_Server_Info *server = cifs_ses_server(sesInfo);
struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
char *description, *dp;
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 1d377b7f2860..06ffe52bdcfa 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -39,8 +39,6 @@ static const struct cifs_sid sid_everyone = {
/* security id for Authenticated Users system group */
static const struct cifs_sid sid_authusers = {
1, 1, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(11)} };
-/* group users */
-static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
/* S-1-22-1 Unmapped Unix users */
static const struct cifs_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22},
@@ -701,10 +699,9 @@ static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
}
#endif
-
static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
- struct cifs_fattr *fattr)
+ struct cifs_fattr *fattr, bool mode_from_special_sid)
{
int i;
int num_aces = 0;
@@ -757,22 +754,34 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
#ifdef CONFIG_CIFS_DEBUG2
dump_ace(ppace[i], end_of_acl);
#endif
- if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
+ if (mode_from_special_sid &&
+ (compare_sids(&(ppace[i]->sid),
+ &sid_unix_NFS_mode) == 0)) {
+ /*
+ * Full permissions are:
+ * 07777 = S_ISUID | S_ISGID | S_ISVTX |
+ * S_IRWXU | S_IRWXG | S_IRWXO
+ */
+ fattr->cf_mode &= ~07777;
+ fattr->cf_mode |=
+ le32_to_cpu(ppace[i]->sid.sub_auth[2]);
+ break;
+ } else if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
access_flags_to_mode(ppace[i]->access_req,
ppace[i]->type,
&fattr->cf_mode,
&user_mask);
- if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
+ else if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
access_flags_to_mode(ppace[i]->access_req,
ppace[i]->type,
&fattr->cf_mode,
&group_mask);
- if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
+ else if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
access_flags_to_mode(ppace[i]->access_req,
ppace[i]->type,
&fattr->cf_mode,
&other_mask);
- if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
+ else if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
access_flags_to_mode(ppace[i]->access_req,
ppace[i]->type,
&fattr->cf_mode,
@@ -795,22 +804,49 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
- struct cifs_sid *pgrpsid, __u64 nmode)
+ struct cifs_sid *pgrpsid, __u64 nmode, bool modefromsid)
{
u16 size = 0;
+ u32 num_aces = 0;
struct cifs_acl *pnndacl;
pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
+ if (modefromsid) {
+ struct cifs_ace *pntace =
+ (struct cifs_ace *)((char *)pnndacl + size);
+ int i;
+
+ pntace->type = ACCESS_ALLOWED;
+ pntace->flags = 0x0;
+ pntace->access_req = 0;
+ pntace->sid.num_subauth = 3;
+ pntace->sid.revision = 1;
+ for (i = 0; i < NUM_AUTHS; i++)
+ pntace->sid.authority[i] =
+ sid_unix_NFS_mode.authority[i];
+ pntace->sid.sub_auth[0] = sid_unix_NFS_mode.sub_auth[0];
+ pntace->sid.sub_auth[1] = sid_unix_NFS_mode.sub_auth[1];
+ pntace->sid.sub_auth[2] = cpu_to_le32(nmode & 07777);
+
+ /* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */
+ pntace->size = cpu_to_le16(28);
+ size += 28;
+ num_aces++;
+ }
+
size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
pownersid, nmode, S_IRWXU);
+ num_aces++;
size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
pgrpsid, nmode, S_IRWXG);
+ num_aces++;
size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
&sid_everyone, nmode, S_IRWXO);
+ num_aces++;
+ pndacl->num_aces = cpu_to_le32(num_aces);
pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
- pndacl->num_aces = cpu_to_le32(3);
return 0;
}
@@ -851,7 +887,8 @@ static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
/* Convert CIFS ACL to POSIX form */
static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
- struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr)
+ struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr,
+ bool get_mode_from_special_sid)
{
int rc = 0;
struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
@@ -900,7 +937,7 @@ static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
if (dacloffset)
parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
- group_sid_ptr, fattr);
+ group_sid_ptr, fattr, get_mode_from_special_sid);
else
cifs_dbg(FYI, "no ACL\n"); /* BB grant all or default perms? */
@@ -909,7 +946,8 @@ static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
/* Convert permission bits from mode to equivalent CIFS ACL */
static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
- __u32 secdesclen, __u64 nmode, kuid_t uid, kgid_t gid, int *aclflag)
+ __u32 secdesclen, __u64 nmode, kuid_t uid, kgid_t gid,
+ bool mode_from_sid, int *aclflag)
{
int rc = 0;
__u32 dacloffset;
@@ -934,7 +972,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
ndacl_ptr->num_aces = 0;
rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
- nmode);
+ nmode, mode_from_sid);
sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
/* copy sec desc control portion & owner and group sids */
copy_sec_desc(pntsd, pnntsd, sidsoffset);
@@ -1128,8 +1166,8 @@ out:
/* Translate the CIFS ACL (similar to NTFS ACL) for a file into mode bits */
int
cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
- struct inode *inode, const char *path,
- const struct cifs_fid *pfid)
+ struct inode *inode, bool mode_from_special_sid,
+ const char *path, const struct cifs_fid *pfid)
{
struct cifs_ntsd *pntsd = NULL;
u32 acllen = 0;
@@ -1156,8 +1194,11 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
if (IS_ERR(pntsd)) {
rc = PTR_ERR(pntsd);
cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
+ } else if (mode_from_special_sid) {
+ rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, true);
} else {
- rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr);
+ /* get approximated mode from ACL */
+ rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, false);
kfree(pntsd);
if (rc)
cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc);
@@ -1181,6 +1222,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
struct smb_version_operations *ops;
+ bool mode_from_sid;
if (IS_ERR(tlink))
return PTR_ERR(tlink);
@@ -1218,8 +1260,13 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
return -ENOMEM;
}
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
+ mode_from_sid = true;
+ else
+ mode_from_sid = false;
+
rc = build_sec_desc(pntsd, pnntsd, secdesclen, nmode, uid, gid,
- &aclflag);
+ mode_from_sid, &aclflag);
cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
index dd95a6fa24bf..439b99cefeb0 100644
--- a/fs/cifs/cifsacl.h
+++ b/fs/cifs/cifsacl.h
@@ -45,7 +45,7 @@
*/
#define DEFAULT_SEC_DESC_LEN (sizeof(struct cifs_ntsd) + \
sizeof(struct cifs_acl) + \
- (sizeof(struct cifs_ace) * 3))
+ (sizeof(struct cifs_ace) * 4))
/*
* Maximum size of a string representation of a SID:
@@ -90,8 +90,39 @@ struct cifs_acl {
__le32 num_aces;
} __attribute__((packed));
+/* ACE types - see MS-DTYP 2.4.4.1 */
+#define ACCESS_ALLOWED_ACE_TYPE 0x00
+#define ACCESS_DENIED_ACE_TYPE 0x01
+#define SYSTEM_AUDIT_ACE_TYPE 0x02
+#define SYSTEM_ALARM_ACE_TYPE 0x03
+#define ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
+#define ACCESS_ALLOWED_OBJECT_ACE_TYPE 0x05
+#define ACCESS_DENIED_OBJECT_ACE_TYPE 0x06
+#define SYSTEM_AUDIT_OBJECT_ACE_TYPE 0x07
+#define SYSTEM_ALARM_OBJECT_ACE_TYPE 0x08
+#define ACCESS_ALLOWED_CALLBACK_ACE_TYPE 0x09
+#define ACCESS_DENIED_CALLBACK_ACE_TYPE 0x0A
+#define ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE 0x0B
+#define ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE 0x0C
+#define SYSTEM_AUDIT_CALLBACK_ACE_TYPE 0x0D
+#define SYSTEM_ALARM_CALLBACK_ACE_TYPE 0x0E /* Reserved */
+#define SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE 0x0F
+#define SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 0x10 /* reserved */
+#define SYSTEM_MANDATORY_LABEL_ACE_TYPE 0x11
+#define SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE 0x12
+#define SYSTEM_SCOPED_POLICY_ID_ACE_TYPE 0x13
+
+/* ACE flags */
+#define OBJECT_INHERIT_ACE 0x01
+#define CONTAINER_INHERIT_ACE 0x02
+#define NO_PROPAGATE_INHERIT_ACE 0x04
+#define INHERIT_ONLY_ACE 0x08
+#define INHERITED_ACE 0x10
+#define SUCCESSFUL_ACCESS_ACE_FLAG 0x40
+#define FAILED_ACCESS_ACE_FLAG 0x80
+
struct cifs_ace {
- __u8 type;
+ __u8 type; /* see above and MS-DTYP 2.4.4.1 */
__u8 flags;
__le16 size;
__le32 access_req;
@@ -99,6 +130,54 @@ struct cifs_ace {
} __attribute__((packed));
/*
+ * The current SMB3 form of security descriptor is similar to what was used for
+ * cifs (see above) but some fields are split, and fields in the struct below
+ * matches names of fields to the the spec, MS-DTYP (see sections 2.4.5 and
+ * 2.4.6). Note that "CamelCase" fields are used in this struct in order to
+ * match the MS-DTYP and MS-SMB2 specs which define the wire format.
+ */
+struct smb3_sd {
+ __u8 Revision; /* revision level, MUST be one */
+ __u8 Sbz1; /* only meaningful if 'RM' flag set below */
+ __le16 Control;
+ __le32 OffsetOwner;
+ __le32 OffsetGroup;
+ __le32 OffsetSacl;
+ __le32 OffsetDacl;
+} __packed;
+
+/* Meaning of 'Control' field flags */
+#define ACL_CONTROL_SR 0x0001 /* Self relative */
+#define ACL_CONTROL_RM 0x0002 /* Resource manager control bits */
+#define ACL_CONTROL_PS 0x0004 /* SACL protected from inherits */
+#define ACL_CONTROL_PD 0x0008 /* DACL protected from inherits */
+#define ACL_CONTROL_SI 0x0010 /* SACL Auto-Inherited */
+#define ACL_CONTROL_DI 0x0020 /* DACL Auto-Inherited */
+#define ACL_CONTROL_SC 0x0040 /* SACL computed through inheritance */
+#define ACL_CONTROL_DC 0x0080 /* DACL computed through inheritence */
+#define ACL_CONTROL_SS 0x0100 /* Create server ACL */
+#define ACL_CONTROL_DT 0x0200 /* DACL provided by trusteed source */
+#define ACL_CONTROL_SD 0x0400 /* SACL defaulted */
+#define ACL_CONTROL_SP 0x0800 /* SACL is present on object */
+#define ACL_CONTROL_DD 0x1000 /* DACL defaulted */
+#define ACL_CONTROL_DP 0x2000 /* DACL is present on object */
+#define ACL_CONTROL_GD 0x4000 /* Group was defaulted */
+#define ACL_CONTROL_OD 0x8000 /* User was defaulted */
+
+/* Meaning of AclRevision flags */
+#define ACL_REVISION 0x02 /* See section 2.4.4.1 of MS-DTYP */
+#define ACL_REVISION_DS 0x04 /* Additional AceTypes allowed */
+
+struct smb3_acl {
+ u8 AclRevision; /* revision level */
+ u8 Sbz1; /* MBZ */
+ __le16 AclSize;
+ __le16 AceCount;
+ __le16 Sbz2; /* MBZ */
+} __packed;
+
+
+/*
* Minimum security identifier can be one for system defined Users
* and Groups such as NULL SID and World or Built-in accounts such
* as Administrator and Guest and consists of
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 3289b566463f..1d1051d31513 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -56,6 +56,15 @@
#include "dfs_cache.h"
#endif
+/*
+ * DOS dates from 1980/1/1 through 2107/12/31
+ * Protocol specifications indicate the range should be to 119, which
+ * limits maximum year to 2099. But this range has not been checked.
+ */
+#define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
+#define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
+#define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
+
int cifsFYI = 0;
bool traceSMB;
bool enable_oplocks = true;
@@ -109,6 +118,8 @@ extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
struct workqueue_struct *cifsiod_wq;
+struct workqueue_struct *decrypt_wq;
+struct workqueue_struct *fileinfo_put_wq;
struct workqueue_struct *cifsoplockd_wq;
__u32 cifs_lock_secret;
@@ -142,6 +153,7 @@ cifs_read_super(struct super_block *sb)
struct inode *inode;
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
+ struct timespec64 ts;
int rc = 0;
cifs_sb = CIFS_SB(sb);
@@ -158,8 +170,34 @@ cifs_read_super(struct super_block *sb)
else
sb->s_maxbytes = MAX_NON_LFS;
- /* BB FIXME fix time_gran to be larger for LANMAN sessions */
- sb->s_time_gran = 100;
+ /*
+ * Some very old servers like DOS and OS/2 used 2 second granularity
+ * (while all current servers use 100ns granularity - see MS-DTYP)
+ * but 1 second is the maximum allowed granularity for the VFS
+ * so for old servers set time granularity to 1 second while for
+ * everything else (current servers) set it to 100ns.
+ */
+ if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
+ ((tcon->ses->capabilities &
+ tcon->ses->server->vals->cap_nt_find) == 0) &&
+ !tcon->unix_ext) {
+ sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
+ ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
+ sb->s_time_min = ts.tv_sec;
+ ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
+ cpu_to_le16(SMB_TIME_MAX), 0);
+ sb->s_time_max = ts.tv_sec;
+ } else {
+ /*
+ * Almost every server, including all SMB2+, uses DCE TIME
+ * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
+ */
+ sb->s_time_gran = 100;
+ ts = cifs_NTtimeToUnix(0);
+ sb->s_time_min = ts.tv_sec;
+ ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
+ sb->s_time_max = ts.tv_sec;
+ }
sb->s_magic = CIFS_MAGIC_NUMBER;
sb->s_op = &cifs_super_ops;
@@ -400,6 +438,10 @@ cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
seq_puts(s, "strict");
else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
seq_puts(s, "none");
+ else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
+ seq_puts(s, "singleclient"); /* assume only one client access */
+ else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
+ seq_puts(s, "ro"); /* read only caching assumed */
else
seq_puts(s, "loose");
}
@@ -433,6 +475,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
cifs_show_security(s, tcon->ses);
cifs_show_cache_flavor(s, cifs_sb);
+ if (tcon->no_lease)
+ seq_puts(s, ",nolease");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
seq_puts(s, ",multiuser");
else if (tcon->ses->user_name)
@@ -554,6 +598,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",rsize=%u", cifs_sb->rsize);
seq_printf(s, ",wsize=%u", cifs_sb->wsize);
seq_printf(s, ",bsize=%u", cifs_sb->bsize);
+ if (tcon->ses->server->min_offload)
+ seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
seq_printf(s, ",echo_interval=%lu",
tcon->ses->server->echo_interval / HZ);
@@ -568,6 +614,10 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
/* convert actimeo and display it in seconds */
seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
+ if (tcon->ses->chan_max > 1)
+ seq_printf(s, ",multichannel,max_channel=%zu",
+ tcon->ses->chan_max);
+
return 0;
}
@@ -1174,6 +1224,7 @@ const struct file_operations cifs_file_ops = {
.open = cifs_open,
.release = cifs_close,
.lock = cifs_lock,
+ .flock = cifs_flock,
.fsync = cifs_fsync,
.flush = cifs_flush,
.mmap = cifs_file_mmap,
@@ -1193,6 +1244,7 @@ const struct file_operations cifs_file_strict_ops = {
.open = cifs_open,
.release = cifs_close,
.lock = cifs_lock,
+ .flock = cifs_flock,
.fsync = cifs_strict_fsync,
.flush = cifs_flush,
.mmap = cifs_file_strict_mmap,
@@ -1212,6 +1264,7 @@ const struct file_operations cifs_file_direct_ops = {
.open = cifs_open,
.release = cifs_close,
.lock = cifs_lock,
+ .flock = cifs_flock,
.fsync = cifs_fsync,
.flush = cifs_flush,
.mmap = cifs_file_mmap,
@@ -1495,11 +1548,32 @@ init_cifs(void)
goto out_clean_proc;
}
+ /*
+ * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
+ * so that we don't launch too many worker threads but
+ * Documentation/core-api/workqueue.rst recommends setting it to 0
+ */
+
+ /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
+ decrypt_wq = alloc_workqueue("smb3decryptd",
+ WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ if (!decrypt_wq) {
+ rc = -ENOMEM;
+ goto out_destroy_cifsiod_wq;
+ }
+
+ fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
+ WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ if (!fileinfo_put_wq) {
+ rc = -ENOMEM;
+ goto out_destroy_decrypt_wq;
+ }
+
cifsoplockd_wq = alloc_workqueue("cifsoplockd",
WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
if (!cifsoplockd_wq) {
rc = -ENOMEM;
- goto out_destroy_cifsiod_wq;
+ goto out_destroy_fileinfo_put_wq;
}
rc = cifs_fscache_register();
@@ -1565,6 +1639,10 @@ out_unreg_fscache:
cifs_fscache_unregister();
out_destroy_cifsoplockd_wq:
destroy_workqueue(cifsoplockd_wq);
+out_destroy_fileinfo_put_wq:
+ destroy_workqueue(fileinfo_put_wq);
+out_destroy_decrypt_wq:
+ destroy_workqueue(decrypt_wq);
out_destroy_cifsiod_wq:
destroy_workqueue(cifsiod_wq);
out_clean_proc:
@@ -1591,6 +1669,8 @@ exit_cifs(void)
cifs_destroy_inodecache();
cifs_fscache_unregister();
destroy_workqueue(cifsoplockd_wq);
+ destroy_workqueue(decrypt_wq);
+ destroy_workqueue(fileinfo_put_wq);
destroy_workqueue(cifsiod_wq);
cifs_proc_clean();
}
@@ -1601,18 +1681,17 @@ MODULE_DESCRIPTION
("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
"also older servers complying with the SNIA CIFS Specification)");
MODULE_VERSION(CIFS_VERSION);
-MODULE_SOFTDEP("pre: des");
-MODULE_SOFTDEP("pre: ecb");
-MODULE_SOFTDEP("pre: hmac");
-MODULE_SOFTDEP("pre: md4");
-MODULE_SOFTDEP("pre: md5");
-MODULE_SOFTDEP("pre: nls");
-MODULE_SOFTDEP("pre: aes");
-MODULE_SOFTDEP("pre: cmac");
-MODULE_SOFTDEP("pre: sha256");
-MODULE_SOFTDEP("pre: sha512");
-MODULE_SOFTDEP("pre: aead2");
-MODULE_SOFTDEP("pre: ccm");
-MODULE_SOFTDEP("pre: gcm");
+MODULE_SOFTDEP("ecb");
+MODULE_SOFTDEP("hmac");
+MODULE_SOFTDEP("md4");
+MODULE_SOFTDEP("md5");
+MODULE_SOFTDEP("nls");
+MODULE_SOFTDEP("aes");
+MODULE_SOFTDEP("cmac");
+MODULE_SOFTDEP("sha256");
+MODULE_SOFTDEP("sha512");
+MODULE_SOFTDEP("aead2");
+MODULE_SOFTDEP("ccm");
+MODULE_SOFTDEP("gcm");
module_init(init_cifs)
module_exit(exit_cifs)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 4b21a90015a9..b59dc7478130 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -108,6 +108,7 @@ extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to);
extern ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from);
extern ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from);
extern ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from);
+extern int cifs_flock(struct file *pfile, int cmd, struct file_lock *plock);
extern int cifs_lock(struct file *, int, struct file_lock *);
extern int cifs_fsync(struct file *, loff_t, loff_t, int);
extern int cifs_strict_fsync(struct file *, loff_t, loff_t, int);
@@ -152,5 +153,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.21"
+#define CIFS_VERSION "2.24"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index fe610e7e3670..d34a4ed8c57d 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -230,7 +230,8 @@ struct smb_version_operations {
bool (*compare_fids)(struct cifsFileInfo *, struct cifsFileInfo *);
/* setup request: allocate mid, sign message */
struct mid_q_entry *(*setup_request)(struct cifs_ses *,
- struct smb_rqst *);
+ struct TCP_Server_Info *,
+ struct smb_rqst *);
/* setup async request: allocate mid, sign message */
struct mid_q_entry *(*setup_async_request)(struct TCP_Server_Info *,
struct smb_rqst *);
@@ -268,8 +269,9 @@ struct smb_version_operations {
int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
- void (*downgrade_oplock)(struct TCP_Server_Info *,
- struct cifsInodeInfo *, bool);
+ void (*downgrade_oplock)(struct TCP_Server_Info *server,
+ struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache);
/* process transaction2 response */
bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
char *, int);
@@ -331,8 +333,9 @@ struct smb_version_operations {
umode_t mode, struct cifs_tcon *tcon,
const char *full_path,
struct cifs_sb_info *cifs_sb);
- int (*mkdir)(const unsigned int, struct cifs_tcon *, const char *,
- struct cifs_sb_info *);
+ int (*mkdir)(const unsigned int xid, struct inode *inode, umode_t mode,
+ struct cifs_tcon *tcon, const char *name,
+ struct cifs_sb_info *sb);
/* set info on created directory */
void (*mkdir_setinfo)(struct inode *, const char *,
struct cifs_sb_info *, struct cifs_tcon *,
@@ -542,6 +545,7 @@ struct smb_vol {
umode_t dir_mode;
enum securityEnum sectype; /* sectype requested via mnt opts */
bool sign; /* was signing requested via mnt opts? */
+ bool ignore_signature:1;
bool retry:1;
bool intr:1;
bool setuids:1;
@@ -559,6 +563,8 @@ struct smb_vol {
bool server_ino:1; /* use inode numbers from server ie UniqueId */
bool direct_io:1;
bool strict_io:1; /* strict cache behavior */
+ bool cache_ro:1;
+ bool cache_rw:1;
bool remap:1; /* set to remap seven reserved chars in filenames */
bool sfu_remap:1; /* remap seven reserved chars ala SFU */
bool posix_paths:1; /* unset to not ask for posix pathnames. */
@@ -576,6 +582,7 @@ struct smb_vol {
bool noblocksnd:1;
bool noautotune:1;
bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
+ bool no_lease:1; /* disable requesting leases */
bool fsc:1; /* enable fscache */
bool mfsymlinks:1; /* use Minshall+French Symlinks */
bool multiuser:1;
@@ -586,9 +593,14 @@ struct smb_vol {
bool resilient:1; /* noresilient not required since not fored for CA */
bool domainauto:1;
bool rdma:1;
+ bool multichannel:1;
+ bool use_client_guid:1;
+ /* reuse existing guid for multichannel */
+ u8 client_guid[SMB2_CLIENT_GUID_SIZE];
unsigned int bsize;
unsigned int rsize;
unsigned int wsize;
+ unsigned int min_offload;
bool sockopt_tcp_nodelay:1;
unsigned long actimeo; /* attribute cache timeout (jiffies) */
struct smb_version_operations *ops;
@@ -601,7 +613,9 @@ struct smb_vol {
__u64 snapshot_time; /* needed for timewarp tokens */
__u32 handle_timeout; /* persistent and durable handle timeout in ms */
unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
+ unsigned int max_channels;
__u16 compression; /* compression algorithm 0xFFFF default 0=disabled */
+ bool rootfs:1; /* if it's a SMB root file system */
};
/**
@@ -620,7 +634,8 @@ struct smb_vol {
CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO | \
CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID | \
CIFS_MOUNT_UID_FROM_ACL | CIFS_MOUNT_NO_HANDLE_CACHE | \
- CIFS_MOUNT_NO_DFS | CIFS_MOUNT_MODE_FROM_SID)
+ CIFS_MOUNT_NO_DFS | CIFS_MOUNT_MODE_FROM_SID | \
+ CIFS_MOUNT_RO_CACHE | CIFS_MOUNT_RW_CACHE)
/**
* Generic VFS superblock mount flags (s_flags) to consider when
@@ -672,12 +687,14 @@ struct TCP_Server_Info {
unsigned int credits; /* send no more requests at once */
unsigned int max_credits; /* can override large 32000 default at mnt */
unsigned int in_flight; /* number of requests on the wire to server */
+ unsigned int max_in_flight; /* max number of requests that were on wire */
spinlock_t req_lock; /* protect the two values above */
struct mutex srv_mutex;
struct task_struct *tsk;
char server_GUID[16];
__u16 sec_mode;
bool sign; /* is signing enabled on this connection? */
+ bool ignore_signature:1; /* skip validation of signatures in SMB2/3 rsp */
bool session_estab; /* mark when very first sess is established */
int echo_credits; /* echo reserved slots */
int oplock_credits; /* oplock break reserved slots */
@@ -726,12 +743,12 @@ struct TCP_Server_Info {
/* Total size of this PDU. Only valid from cifs_demultiplex_thread */
unsigned int pdu_size;
unsigned int total_read; /* total amount of data read in this pass */
+ atomic_t in_send; /* requests trying to send */
+ atomic_t num_waiters; /* blocked waiting to get in sendrecv */
#ifdef CONFIG_CIFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */
#endif
#ifdef CONFIG_CIFS_STATS2
- atomic_t in_send; /* requests trying to send */
- atomic_t num_waiters; /* blocked waiting to get in sendrecv */
atomic_t num_cmds[NUMBER_OF_SMB2_COMMANDS]; /* total requests by cmd */
atomic_t smb2slowcmd[NUMBER_OF_SMB2_COMMANDS]; /* count resps > 1 sec */
__u64 time_per_cmd[NUMBER_OF_SMB2_COMMANDS]; /* total time per cmd */
@@ -740,6 +757,7 @@ struct TCP_Server_Info {
#endif /* STATS2 */
unsigned int max_read;
unsigned int max_write;
+ unsigned int min_offload;
__le16 compress_algorithm;
__le16 cipher_type;
/* save initital negprot hash */
@@ -755,6 +773,7 @@ struct TCP_Server_Info {
* reconnect.
*/
int nr_targets;
+ bool noblockcnt; /* use non-blocking connect() */
};
struct cifs_credits {
@@ -941,6 +960,11 @@ struct cifs_server_iface {
struct sockaddr_storage sockaddr;
};
+struct cifs_chan {
+ struct TCP_Server_Info *server;
+ __u8 signkey[SMB3_SIGN_KEY_SIZE];
+};
+
/*
* Session structure. One of these for each uid session with a particular host
*/
@@ -971,12 +995,15 @@ struct cifs_ses {
bool sign; /* is signing required? */
bool need_reconnect:1; /* connection reset, uid now invalid */
bool domainAuto:1;
+ bool binding:1; /* are we binding the session? */
__u16 session_flags;
__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
+ __u8 binding_preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
+
/*
* Network interfaces available on the server this session is
* connected to.
@@ -990,8 +1017,37 @@ struct cifs_ses {
struct cifs_server_iface *iface_list;
size_t iface_count;
unsigned long iface_last_update; /* jiffies */
+
+#define CIFS_MAX_CHANNELS 16
+ struct cifs_chan chans[CIFS_MAX_CHANNELS];
+ size_t chan_count;
+ size_t chan_max;
+ atomic_t chan_seq; /* round robin state */
};
+/*
+ * When binding a new channel, we need to access the channel which isn't fully
+ * established yet (one past the established count)
+ */
+
+static inline
+struct cifs_chan *cifs_ses_binding_channel(struct cifs_ses *ses)
+{
+ if (ses->binding)
+ return &ses->chans[ses->chan_count];
+ else
+ return NULL;
+}
+
+static inline
+struct TCP_Server_Info *cifs_ses_server(struct cifs_ses *ses)
+{
+ if (ses->binding)
+ return ses->chans[ses->chan_count].server;
+ else
+ return ses->server;
+}
+
static inline bool
cap_unix(struct cifs_ses *ses)
{
@@ -1082,6 +1138,7 @@ struct cifs_tcon {
bool need_reopen_files:1; /* need to reopen tcon file handles */
bool use_resilient:1; /* use resilient instead of durable handles */
bool use_persistent:1; /* use persistent instead of durable handles */
+ bool no_lease:1; /* Do not request leases on files or directories */
__le32 capabilities;
__u32 share_flags;
__u32 maximal_access;
@@ -1197,6 +1254,7 @@ struct cifs_search_info {
bool smallBuf:1; /* so we know which buf_release function to call */
};
+#define ACL_NO_MODE ((umode_t)(-1))
struct cifs_open_parms {
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb;
@@ -1246,11 +1304,14 @@ struct cifsFileInfo {
unsigned int f_flags;
bool invalidHandle:1; /* file closed via session abend */
bool oplock_break_cancelled:1;
+ unsigned int oplock_epoch; /* epoch from the lease break */
+ __u32 oplock_level; /* oplock/lease level from the lease break */
int count;
spinlock_t file_info_lock; /* protects four flag/count fields above */
struct mutex fh_mutex; /* prevents reopen race after dead ses*/
struct cifs_search_info srch_inf;
struct work_struct oplock_break; /* work for oplock breaks */
+ struct work_struct put; /* work for the final part of _put */
};
struct cifs_io_parms {
@@ -1356,7 +1417,8 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
}
struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
-void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr,
+ bool offload);
void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
#define CIFS_CACHE_READ_FLG 1
@@ -1366,9 +1428,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
#define CIFS_CACHE_RW_FLG (CIFS_CACHE_READ_FLG | CIFS_CACHE_WRITE_FLG)
#define CIFS_CACHE_RHW_FLG (CIFS_CACHE_RW_FLG | CIFS_CACHE_HANDLE_FLG)
-#define CIFS_CACHE_READ(cinode) (cinode->oplock & CIFS_CACHE_READ_FLG)
+#define CIFS_CACHE_READ(cinode) ((cinode->oplock & CIFS_CACHE_READ_FLG) || (CIFS_SB(cinode->vfs_inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE))
#define CIFS_CACHE_HANDLE(cinode) (cinode->oplock & CIFS_CACHE_HANDLE_FLG)
-#define CIFS_CACHE_WRITE(cinode) (cinode->oplock & CIFS_CACHE_WRITE_FLG)
+#define CIFS_CACHE_WRITE(cinode) ((cinode->oplock & CIFS_CACHE_WRITE_FLG) || (CIFS_SB(cinode->vfs_inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE))
/*
* One of these for each file inode
@@ -1377,6 +1439,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
struct cifsInodeInfo {
bool can_cache_brlcks;
struct list_head llist; /* locks helb by this inode */
+ /*
+ * NOTE: Some code paths call down_read(lock_sem) twice, so
+ * we must always use use cifs_down_write() instead of down_write()
+ * for this semaphore to avoid deadlocks.
+ */
struct rw_semaphore lock_sem; /* protect the fields above */
/* BB add in lists for dirty pages i.e. write caching info for oplock */
struct list_head openFileList;
@@ -1386,7 +1453,7 @@ struct cifsInodeInfo {
unsigned int epoch; /* used to track lease state changes */
#define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */
#define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */
-#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */
+#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */
#define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */
#define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
#define CIFS_INO_LOCK (5) /* lock bit for synchronization */
@@ -1505,6 +1572,7 @@ struct mid_q_entry {
struct TCP_Server_Info *server; /* server corresponding to this mid */
__u64 mid; /* multiplex id */
__u16 credits; /* number of credits consumed by this mid */
+ __u16 credits_received; /* number of credits from the response */
__u32 pid; /* process id */
__u32 sequence_number; /* for CIFS signing */
unsigned long when_alloc; /* when mid was created */
@@ -1532,12 +1600,12 @@ struct close_cancelled_open {
struct cifs_fid fid;
struct cifs_tcon *tcon;
struct work_struct work;
+ __u64 mid;
+ __u16 cmd;
};
/* Make code in transport.c a little cleaner by moving
update of optional stats into function below */
-#ifdef CONFIG_CIFS_STATS2
-
static inline void cifs_in_send_inc(struct TCP_Server_Info *server)
{
atomic_inc(&server->in_send);
@@ -1558,26 +1626,12 @@ static inline void cifs_num_waiters_dec(struct TCP_Server_Info *server)
atomic_dec(&server->num_waiters);
}
+#ifdef CONFIG_CIFS_STATS2
static inline void cifs_save_when_sent(struct mid_q_entry *mid)
{
mid->when_sent = jiffies;
}
#else
-static inline void cifs_in_send_inc(struct TCP_Server_Info *server)
-{
-}
-static inline void cifs_in_send_dec(struct TCP_Server_Info *server)
-{
-}
-
-static inline void cifs_num_waiters_inc(struct TCP_Server_Info *server)
-{
-}
-
-static inline void cifs_num_waiters_dec(struct TCP_Server_Info *server)
-{
-}
-
static inline void cifs_save_when_sent(struct mid_q_entry *mid)
{
}
@@ -1887,6 +1941,8 @@ void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
extern const struct slow_work_ops cifs_oplock_break_ops;
extern struct workqueue_struct *cifsiod_wq;
+extern struct workqueue_struct *decrypt_wq;
+extern struct workqueue_struct *fileinfo_put_wq;
extern struct workqueue_struct *cifsoplockd_wq;
extern __u32 cifs_lock_secret;
@@ -1917,4 +1973,10 @@ extern struct smb_version_values smb302_values;
#define ALT_SMB311_VERSION_STRING "3.11"
extern struct smb_version_operations smb311_operations;
extern struct smb_version_values smb311_values;
+
+static inline bool is_smb1_server(struct TCP_Server_Info *server)
+{
+ return strcmp(server->vals->version_string, SMB1_VERSION_STRING) == 0;
+}
+
#endif /* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index e23234207fc2..1ed695336f62 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -109,6 +109,7 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
char *in_buf, int flags);
extern struct mid_q_entry *cifs_setup_request(struct cifs_ses *,
+ struct TCP_Server_Info *,
struct smb_rqst *);
extern struct mid_q_entry *cifs_setup_async_request(struct TCP_Server_Info *,
struct smb_rqst *);
@@ -137,7 +138,11 @@ extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool);
extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
bool fsuid_only,
struct cifsFileInfo **ret_file);
+extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
+ struct cifsFileInfo **ret_file);
extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
+extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
+ struct cifsFileInfo **ret_file);
extern unsigned int smbCalcSize(void *buf, struct TCP_Server_Info *server);
extern int decode_negTokenInit(unsigned char *security_blob, int length,
struct TCP_Server_Info *server);
@@ -166,6 +171,7 @@ extern int cifs_unlock_range(struct cifsFileInfo *cfile,
struct file_lock *flock, const unsigned int xid);
extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile);
+extern void cifs_down_write(struct rw_semaphore *sem);
extern struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid,
struct file *file,
struct tcon_link *tlink,
@@ -197,6 +203,7 @@ extern int cifs_rename_pending_delete(const char *full_path,
const unsigned int xid);
extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
struct cifs_fattr *fattr, struct inode *inode,
+ bool get_mode_from_special_sid,
const char *path, const struct cifs_fid *pfid);
extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64,
kuid_t, kgid_t);
@@ -236,6 +243,7 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid,
struct tcon_link *tlink,
struct cifs_pending_open *open);
extern void cifs_del_pending_open(struct cifs_pending_open *open);
+extern struct TCP_Server_Info *cifs_get_tcp_session(struct smb_vol *vol);
extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
int from_reconnect);
extern void cifs_put_tcon(struct cifs_tcon *tcon);
@@ -367,7 +375,8 @@ extern int CIFSSMBUnixSetPathInfo(const unsigned int xid,
const struct nls_table *nls_codepage,
int remap);
-extern int CIFSSMBMkDir(const unsigned int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBMkDir(const unsigned int xid, struct inode *inode,
+ umode_t mode, struct cifs_tcon *tcon,
const char *name, struct cifs_sb_info *cifs_sb);
extern int CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon,
const char *name, struct cifs_sb_info *cifs_sb);
@@ -577,8 +586,15 @@ void cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc);
extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
unsigned int *len, unsigned int *offset);
+int cifs_try_adding_channels(struct cifs_ses *ses);
+int cifs_ses_add_channel(struct cifs_ses *ses,
+ struct cifs_server_iface *iface);
+bool is_server_using_iface(struct TCP_Server_Info *server,
+ struct cifs_server_iface *iface);
+bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
void extract_unc_hostname(const char *unc, const char **h, size_t *len);
+int copy_path_name(char *dst, const char *src);
#ifdef CONFIG_CIFS_DFS_UPCALL
static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
diff --git a/fs/cifs/cifsroot.c b/fs/cifs/cifsroot.c
new file mode 100644
index 000000000000..37edbfb8e096
--- /dev/null
+++ b/fs/cifs/cifsroot.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SMB root file system support
+ *
+ * Copyright (c) 2019 Paulo Alcantara <palcantara@suse.de>
+ */
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/root_dev.h>
+#include <linux/kernel.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <net/ipconfig.h>
+
+#define DEFAULT_MNT_OPTS \
+ "vers=1.0,cifsacl,mfsymlinks,rsize=1048576,wsize=65536,uid=0,gid=0," \
+ "hard,rootfs"
+
+static char root_dev[2048] __initdata = "";
+static char root_opts[1024] __initdata = DEFAULT_MNT_OPTS;
+
+static __be32 __init parse_srvaddr(char *start, char *end)
+{
+ /* TODO: ipv6 support */
+ char addr[sizeof("aaa.bbb.ccc.ddd")];
+ int i = 0;
+
+ while (start < end && i < sizeof(addr) - 1) {
+ if (isdigit(*start) || *start == '.')
+ addr[i++] = *start;
+ start++;
+ }
+ addr[i] = '\0';
+ return in_aton(addr);
+}
+
+/* cifsroot=//<server-ip>/<share>[,options] */
+static int __init cifs_root_setup(char *line)
+{
+ char *s;
+ int len;
+ __be32 srvaddr = htonl(INADDR_NONE);
+
+ ROOT_DEV = Root_CIFS;
+
+ if (strlen(line) > 3 && line[0] == '/' && line[1] == '/') {
+ s = strchr(&line[2], '/');
+ if (!s || s[1] == '\0')
+ return 1;
+
+ /* make s point to ',' or '\0' at end of line */
+ s = strchrnul(s, ',');
+ /* len is strlen(unc) + '\0' */
+ len = s - line + 1;
+ if (len > sizeof(root_dev)) {
+ printk(KERN_ERR "Root-CIFS: UNC path too long\n");
+ return 1;
+ }
+ strlcpy(root_dev, line, len);
+ srvaddr = parse_srvaddr(&line[2], s);
+ if (*s) {
+ int n = snprintf(root_opts,
+ sizeof(root_opts), "%s,%s",
+ DEFAULT_MNT_OPTS, s + 1);
+ if (n >= sizeof(root_opts)) {
+ printk(KERN_ERR "Root-CIFS: mount options string too long\n");
+ root_opts[sizeof(root_opts)-1] = '\0';
+ return 1;
+ }
+ }
+ }
+
+ root_server_addr = srvaddr;
+
+ return 1;
+}
+
+__setup("cifsroot=", cifs_root_setup);
+
+int __init cifs_root_data(char **dev, char **opts)
+{
+ if (!root_dev[0] || root_server_addr == htonl(INADDR_NONE)) {
+ printk(KERN_ERR "Root-CIFS: no SMB server address\n");
+ return -1;
+ }
+
+ *dev = root_dev;
+ *opts = root_opts;
+
+ return 0;
+}
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index e2f95965065d..4f554f019a98 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -942,10 +942,8 @@ PsxDelete:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB add path length overrun check */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fileName);
}
params = 6 + name_len;
@@ -1015,10 +1013,8 @@ DelFileRetry:
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve check for buffer overruns BB */
- name_len = strnlen(name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->fileName, name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->fileName, name);
}
pSMB->SearchAttributes =
cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM);
@@ -1062,10 +1058,8 @@ RmDirRetry:
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve check for buffer overruns BB */
- name_len = strnlen(name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->DirName, name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->DirName, name);
}
pSMB->BufferFormat = 0x04;
@@ -1084,7 +1078,8 @@ RmDirRetry:
}
int
-CIFSSMBMkDir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+CIFSSMBMkDir(const unsigned int xid, struct inode *inode, umode_t mode,
+ struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb)
{
int rc = 0;
@@ -1107,10 +1102,8 @@ MkDirRetry:
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve check for buffer overruns BB */
- name_len = strnlen(name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->DirName, name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->DirName, name);
}
pSMB->BufferFormat = 0x04;
@@ -1157,10 +1150,8 @@ PsxCreat:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, name);
}
params = 6 + name_len;
@@ -1324,11 +1315,9 @@ OldOpenRetry:
fileName, PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve check for buffer overruns BB */
+ } else {
count = 0; /* no pad */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->fileName, fileName, name_len);
+ name_len = copy_path_name(pSMB->fileName, fileName);
}
if (*pOplock & REQ_OPLOCK)
pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK);
@@ -1405,7 +1394,7 @@ int
CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms, int *oplock,
FILE_ALL_INFO *buf)
{
- int rc = -EACCES;
+ int rc;
OPEN_REQ *req = NULL;
OPEN_RSP *rsp = NULL;
int bytes_returned;
@@ -1442,11 +1431,8 @@ openRetry:
/* BB improve check for buffer overruns BB */
/* no pad */
count = 0;
- name_len = strnlen(path, PATH_MAX);
- /* trailing null */
- name_len++;
+ name_len = copy_path_name(req->fileName, path);
req->NameLength = cpu_to_le16(name_len);
- strncpy(req->fileName, path, name_len);
}
if (*oplock & REQ_OPLOCK)
@@ -2812,15 +2798,10 @@ renameRetry:
remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(from_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->OldFileName, from_name, name_len);
- name_len2 = strnlen(to_name, PATH_MAX);
- name_len2++; /* trailing null */
+ } else {
+ name_len = copy_path_name(pSMB->OldFileName, from_name);
+ name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
- strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2);
- name_len2++; /* trailing null */
name_len2++; /* signature byte */
}
@@ -2962,15 +2943,10 @@ copyRetry:
toName, PATH_MAX, nls_codepage, remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fromName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->OldFileName, fromName, name_len);
- name_len2 = strnlen(toName, PATH_MAX);
- name_len2++; /* trailing null */
+ } else {
+ name_len = copy_path_name(pSMB->OldFileName, fromName);
pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
- strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2);
- name_len2++; /* trailing null */
+ name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, toName);
name_len2++; /* signature byte */
}
@@ -3021,10 +2997,8 @@ createSymLinkRetry:
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fromName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fromName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fromName);
}
params = 6 + name_len;
pSMB->MaxSetupCount = 0;
@@ -3044,10 +3018,8 @@ createSymLinkRetry:
PATH_MAX, nls_codepage, remap);
name_len_target++; /* trailing null */
name_len_target *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len_target = strnlen(toName, PATH_MAX);
- name_len_target++; /* trailing null */
- strncpy(data_offset, toName, name_len_target);
+ } else {
+ name_len_target = copy_path_name(data_offset, toName);
}
pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -3109,10 +3081,8 @@ createHardLinkRetry:
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(toName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, toName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, toName);
}
params = 6 + name_len;
pSMB->MaxSetupCount = 0;
@@ -3131,10 +3101,8 @@ createHardLinkRetry:
PATH_MAX, nls_codepage, remap);
name_len_target++; /* trailing null */
name_len_target *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len_target = strnlen(fromName, PATH_MAX);
- name_len_target++; /* trailing null */
- strncpy(data_offset, fromName, name_len_target);
+ } else {
+ name_len_target = copy_path_name(data_offset, fromName);
}
pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -3213,15 +3181,10 @@ winCreateHardLinkRetry:
remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(from_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->OldFileName, from_name, name_len);
- name_len2 = strnlen(to_name, PATH_MAX);
- name_len2++; /* trailing null */
+ } else {
+ name_len = copy_path_name(pSMB->OldFileName, from_name);
pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
- strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2);
- name_len2++; /* trailing null */
+ name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
name_len2++; /* signature byte */
}
@@ -3271,10 +3234,8 @@ querySymLinkRetry:
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, searchName);
}
params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -3691,10 +3652,8 @@ queryAclRetry:
name_len *= 2;
pSMB->FileName[name_len] = 0;
pSMB->FileName[name_len+1] = 0;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, searchName);
}
params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -3776,10 +3735,8 @@ setAclRetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fileName);
}
params = 6 + name_len;
pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -4184,9 +4141,7 @@ QInfRetry:
name_len++; /* trailing null */
name_len *= 2;
} else {
- name_len = strnlen(search_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, search_name, name_len);
+ name_len = copy_path_name(pSMB->FileName, search_name);
}
pSMB->BufferFormat = 0x04;
name_len++; /* account for buffer type byte */
@@ -4321,10 +4276,8 @@ QPathInfoRetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(search_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, search_name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, search_name);
}
params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4490,10 +4443,8 @@ UnixQPathInfoRetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, searchName);
}
params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4593,17 +4544,16 @@ findFirstRetry:
pSMB->FileName[name_len+1] = 0;
name_len += 2;
}
- } else { /* BB add check for overrun of SMB buf BB */
- name_len = strnlen(searchName, PATH_MAX);
-/* BB fix here and in unicode clause above ie
- if (name_len > buffersize-header)
- free buffer exit; BB */
- strncpy(pSMB->FileName, searchName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, searchName);
if (msearch) {
- pSMB->FileName[name_len] = CIFS_DIR_SEP(cifs_sb);
- pSMB->FileName[name_len+1] = '*';
- pSMB->FileName[name_len+2] = 0;
- name_len += 3;
+ if (WARN_ON_ONCE(name_len > PATH_MAX-2))
+ name_len = PATH_MAX-2;
+ /* overwrite nul byte */
+ pSMB->FileName[name_len-1] = CIFS_DIR_SEP(cifs_sb);
+ pSMB->FileName[name_len] = '*';
+ pSMB->FileName[name_len+1] = 0;
+ name_len += 2;
}
}
@@ -4898,10 +4848,8 @@ GetInodeNumberRetry:
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(search_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, search_name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, search_name);
}
params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -5008,9 +4956,7 @@ getDFSRetry:
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(search_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->RequestFileName, search_name, name_len);
+ name_len = copy_path_name(pSMB->RequestFileName, search_name);
}
if (ses->server->sign)
@@ -5663,10 +5609,8 @@ SetEOFRetry:
PATH_MAX, cifs_sb->local_nls, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(file_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, file_name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, file_name);
}
params = 6 + name_len;
data_count = sizeof(struct file_end_of_file_info);
@@ -5959,10 +5903,8 @@ SetTimesRetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fileName);
}
params = 6 + name_len;
@@ -6040,10 +5982,8 @@ SetAttrLgcyRetry:
PATH_MAX, nls_codepage);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->fileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->fileName, fileName);
}
pSMB->attr = cpu_to_le16(dos_attrs);
pSMB->BufferFormat = 0x04;
@@ -6203,10 +6143,8 @@ setPermsRetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(file_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, file_name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, file_name);
}
params = 6 + name_len;
@@ -6298,10 +6236,8 @@ QAllEAsRetry:
PATH_MAX, nls_codepage, remap);
list_len++; /* trailing null */
list_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- list_len = strnlen(searchName, PATH_MAX);
- list_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, list_len);
+ } else {
+ list_len = copy_path_name(pSMB->FileName, searchName);
}
params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
@@ -6480,10 +6416,8 @@ SetEARetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fileName);
}
params = 6 + name_len;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a4830ced0f98..86d1baedf21c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -74,7 +74,7 @@ enum {
Opt_user_xattr, Opt_nouser_xattr,
Opt_forceuid, Opt_noforceuid,
Opt_forcegid, Opt_noforcegid,
- Opt_noblocksend, Opt_noautotune,
+ Opt_noblocksend, Opt_noautotune, Opt_nolease,
Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
Opt_mapposix, Opt_nomapposix,
Opt_mapchars, Opt_nomapchars, Opt_sfu,
@@ -91,21 +91,23 @@ enum {
Opt_serverino, Opt_noserverino,
Opt_rwpidforward, Opt_cifsacl, Opt_nocifsacl,
Opt_acl, Opt_noacl, Opt_locallease,
- Opt_sign, Opt_seal, Opt_noac,
+ Opt_sign, Opt_ignore_signature, Opt_seal, Opt_noac,
Opt_fsc, Opt_mfsymlinks,
Opt_multiuser, Opt_sloppy, Opt_nosharesock,
Opt_persistent, Opt_nopersistent,
Opt_resilient, Opt_noresilient,
- Opt_domainauto, Opt_rdma, Opt_modesid,
+ Opt_domainauto, Opt_rdma, Opt_modesid, Opt_rootfs,
+ Opt_multichannel, Opt_nomultichannel,
Opt_compress,
/* Mount options which take numeric value */
Opt_backupuid, Opt_backupgid, Opt_uid,
Opt_cruid, Opt_gid, Opt_file_mode,
Opt_dirmode, Opt_port,
+ Opt_min_enc_offload,
Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo,
Opt_echo_interval, Opt_max_credits, Opt_handletimeout,
- Opt_snapshot,
+ Opt_snapshot, Opt_max_channels,
/* Mount options which take string value */
Opt_user, Opt_pass, Opt_ip,
@@ -134,6 +136,7 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_noforcegid, "noforcegid" },
{ Opt_noblocksend, "noblocksend" },
{ Opt_noautotune, "noautotune" },
+ { Opt_nolease, "nolease" },
{ Opt_hard, "hard" },
{ Opt_soft, "soft" },
{ Opt_perm, "perm" },
@@ -183,6 +186,7 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_noacl, "noacl" },
{ Opt_locallease, "locallease" },
{ Opt_sign, "sign" },
+ { Opt_ignore_signature, "signloosely" },
{ Opt_seal, "seal" },
{ Opt_noac, "noac" },
{ Opt_fsc, "fsc" },
@@ -196,6 +200,8 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_noresilient, "noresilienthandles"},
{ Opt_domainauto, "domainauto"},
{ Opt_rdma, "rdma"},
+ { Opt_multichannel, "multichannel" },
+ { Opt_nomultichannel, "nomultichannel" },
{ Opt_backupuid, "backupuid=%s" },
{ Opt_backupgid, "backupgid=%s" },
@@ -206,6 +212,7 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_dirmode, "dirmode=%s" },
{ Opt_dirmode, "dir_mode=%s" },
{ Opt_port, "port=%s" },
+ { Opt_min_enc_offload, "esize=%s" },
{ Opt_blocksize, "bsize=%s" },
{ Opt_rsize, "rsize=%s" },
{ Opt_wsize, "wsize=%s" },
@@ -214,6 +221,7 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_echo_interval, "echo_interval=%s" },
{ Opt_max_credits, "max_credits=%s" },
{ Opt_snapshot, "snapshot=%s" },
+ { Opt_max_channels, "max_channels=%s" },
{ Opt_compress, "compress=%s" },
{ Opt_blank_user, "user=" },
@@ -262,6 +270,7 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_ignore, "nomand" },
{ Opt_ignore, "relatime" },
{ Opt_ignore, "_netdev" },
+ { Opt_rootfs, "rootfs" },
{ Opt_err, NULL }
};
@@ -298,6 +307,8 @@ enum {
Opt_cache_loose,
Opt_cache_strict,
Opt_cache_none,
+ Opt_cache_ro,
+ Opt_cache_rw,
Opt_cache_err
};
@@ -305,6 +316,8 @@ static const match_table_t cifs_cacheflavor_tokens = {
{ Opt_cache_loose, "loose" },
{ Opt_cache_strict, "strict" },
{ Opt_cache_none, "none" },
+ { Opt_cache_ro, "ro" },
+ { Opt_cache_rw, "singleclient" },
{ Opt_cache_err, NULL }
};
@@ -378,7 +391,7 @@ static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
#ifdef CONFIG_CIFS_DFS_UPCALL
struct super_cb_data {
struct TCP_Server_Info *server;
- struct cifs_sb_info *cifs_sb;
+ struct super_block *sb;
};
/* These functions must be called with server->srv_mutex held */
@@ -389,25 +402,39 @@ static void super_cb(struct super_block *sb, void *arg)
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
- if (d->cifs_sb)
+ if (d->sb)
return;
cifs_sb = CIFS_SB(sb);
tcon = cifs_sb_master_tcon(cifs_sb);
if (tcon->ses->server == d->server)
- d->cifs_sb = cifs_sb;
+ d->sb = sb;
}
-static inline struct cifs_sb_info *
-find_super_by_tcp(struct TCP_Server_Info *server)
+static struct super_block *get_tcp_super(struct TCP_Server_Info *server)
{
struct super_cb_data d = {
.server = server,
- .cifs_sb = NULL,
+ .sb = NULL,
};
iterate_supers_type(&cifs_fs_type, super_cb, &d);
- return d.cifs_sb ? d.cifs_sb : ERR_PTR(-ENOENT);
+
+ if (unlikely(!d.sb))
+ return ERR_PTR(-ENOENT);
+ /*
+ * Grab an active reference in order to prevent automounts (DFS links)
+ * of expiring and then freeing up our cifs superblock pointer while
+ * we're doing failover.
+ */
+ cifs_sb_active(d.sb);
+ return d.sb;
+}
+
+static inline void put_tcp_super(struct super_block *sb)
+{
+ if (!IS_ERR_OR_NULL(sb))
+ cifs_sb_deactive(sb);
}
static void reconn_inval_dfs_target(struct TCP_Server_Info *server,
@@ -471,6 +498,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
struct mid_q_entry *mid_entry;
struct list_head retry_list;
#ifdef CONFIG_CIFS_DFS_UPCALL
+ struct super_block *sb = NULL;
struct cifs_sb_info *cifs_sb = NULL;
struct dfs_cache_tgt_list tgt_list = {0};
struct dfs_cache_tgt_iterator *tgt_it = NULL;
@@ -480,16 +508,18 @@ cifs_reconnect(struct TCP_Server_Info *server)
server->nr_targets = 1;
#ifdef CONFIG_CIFS_DFS_UPCALL
spin_unlock(&GlobalMid_Lock);
- cifs_sb = find_super_by_tcp(server);
- if (IS_ERR(cifs_sb)) {
- rc = PTR_ERR(cifs_sb);
+ sb = get_tcp_super(server);
+ if (IS_ERR(sb)) {
+ rc = PTR_ERR(sb);
cifs_dbg(FYI, "%s: will not do DFS failover: rc = %d\n",
__func__, rc);
- cifs_sb = NULL;
+ sb = NULL;
} else {
+ cifs_sb = CIFS_SB(sb);
+
rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list, &tgt_it);
if (rc && (rc != -EOPNOTSUPP)) {
- cifs_dbg(VFS, "%s: no target servers for DFS failover\n",
+ cifs_server_dbg(VFS, "%s: no target servers for DFS failover\n",
__func__);
} else {
server->nr_targets = dfs_cache_get_nr_tgts(&tgt_list);
@@ -503,6 +533,10 @@ cifs_reconnect(struct TCP_Server_Info *server)
/* the demux thread will exit normally
next time through the loop */
spin_unlock(&GlobalMid_Lock);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ dfs_cache_free_tgts(&tgt_list);
+ put_tcp_super(sb);
+#endif
return rc;
} else
server->tcpStatus = CifsNeedReconnect;
@@ -555,9 +589,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
spin_lock(&GlobalMid_Lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+ kref_get(&mid_entry->refcount);
if (mid_entry->mid_state == MID_REQUEST_SUBMITTED)
mid_entry->mid_state = MID_RETRY_NEEDED;
list_move(&mid_entry->qhead, &retry_list);
+ mid_entry->mid_flags |= MID_DELETED;
}
spin_unlock(&GlobalMid_Lock);
mutex_unlock(&server->srv_mutex);
@@ -567,6 +603,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
+ cifs_mid_q_entry_release(mid_entry);
}
if (cifs_rdma_enabled(server)) {
@@ -617,16 +654,19 @@ cifs_reconnect(struct TCP_Server_Info *server)
rc = dfs_cache_noreq_update_tgthint(cifs_sb->origin_fullpath + 1,
tgt_it);
if (rc) {
- cifs_dbg(VFS, "%s: failed to update DFS target hint: rc = %d\n",
+ cifs_server_dbg(VFS, "%s: failed to update DFS target hint: rc = %d\n",
__func__, rc);
}
rc = dfs_cache_update_vol(cifs_sb->origin_fullpath, server);
if (rc) {
- cifs_dbg(VFS, "%s: failed to update vol info in DFS cache: rc = %d\n",
+ cifs_server_dbg(VFS, "%s: failed to update vol info in DFS cache: rc = %d\n",
__func__, rc);
}
dfs_cache_free_tgts(&tgt_list);
+
}
+
+ put_tcp_super(sb);
#endif
if (server->tcpStatus == CifsNeedNegotiate)
mod_delayed_work(cifsiod_wq, &server->echo, 0);
@@ -678,7 +718,7 @@ allocate_buffers(struct TCP_Server_Info *server)
if (!server->bigbuf) {
server->bigbuf = (char *)cifs_buf_get();
if (!server->bigbuf) {
- cifs_dbg(VFS, "No memory for large SMB response\n");
+ cifs_server_dbg(VFS, "No memory for large SMB response\n");
msleep(3000);
/* retry will check if exiting */
return false;
@@ -691,7 +731,7 @@ allocate_buffers(struct TCP_Server_Info *server)
if (!server->smallbuf) {
server->smallbuf = (char *)cifs_small_buf_get();
if (!server->smallbuf) {
- cifs_dbg(VFS, "No memory for SMB response\n");
+ cifs_server_dbg(VFS, "No memory for SMB response\n");
msleep(1000);
/* retry will check if exiting */
return false;
@@ -712,7 +752,7 @@ server_unresponsive(struct TCP_Server_Info *server)
* We need to wait 3 echo intervals to make sure we handle such
* situations right:
* 1s client sends a normal SMB request
- * 3s client gets a response
+ * 2s client gets a response
* 30s echo workqueue job pops, and decides we got a response recently
* and don't need to send another
* ...
@@ -722,8 +762,8 @@ server_unresponsive(struct TCP_Server_Info *server)
if ((server->tcpStatus == CifsGood ||
server->tcpStatus == CifsNeedNegotiate) &&
time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
- cifs_dbg(VFS, "Server %s has not responded in %lu seconds. Reconnecting...\n",
- server->hostname, (3 * server->echo_interval) / HZ);
+ cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
+ (3 * server->echo_interval) / HZ);
cifs_reconnect(server);
wake_up(&server->response_q);
return true;
@@ -861,7 +901,7 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type)
wake_up(&server->response_q);
break;
default:
- cifs_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type);
+ cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type);
cifs_reconnect(server);
}
@@ -886,11 +926,27 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
if (mid->mid_flags & MID_DELETED)
printk_once(KERN_WARNING
"trying to dequeue a deleted mid\n");
- else
+ else {
list_del_init(&mid->qhead);
+ mid->mid_flags |= MID_DELETED;
+ }
spin_unlock(&GlobalMid_Lock);
}
+static unsigned int
+smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
+{
+ struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buffer;
+
+ /*
+ * SMB1 does not use credits.
+ */
+ if (server->vals->header_preamble_size)
+ return 0;
+
+ return le16_to_cpu(shdr->CreditRequest);
+}
+
static void
handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
char *buf, int malformed)
@@ -898,6 +954,7 @@ handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
if (server->ops->check_trans2 &&
server->ops->check_trans2(mid, server, buf, malformed))
return;
+ mid->credits_received = smb2_get_credits_from_hdr(buf, server);
mid->resp_buf = buf;
mid->large_buf = server->large_buf;
/* Was previous buf put in mpx struct for multi-rsp? */
@@ -957,8 +1014,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
cifs_dbg(FYI, "Clearing mid 0x%llx\n", mid_entry->mid);
+ kref_get(&mid_entry->refcount);
mid_entry->mid_state = MID_SHUTDOWN;
list_move(&mid_entry->qhead, &dispose_list);
+ mid_entry->mid_flags |= MID_DELETED;
}
spin_unlock(&GlobalMid_Lock);
@@ -968,6 +1027,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
cifs_dbg(FYI, "Callback mid 0x%llx\n", mid_entry->mid);
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
+ cifs_mid_q_entry_release(mid_entry);
}
/* 1/8th of sec is more than enough time for them to exit */
msleep(125);
@@ -1008,7 +1068,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
/* make sure this will fit in a large buffer */
if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) -
server->vals->header_preamble_size) {
- cifs_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
+ cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
cifs_reconnect(server);
wake_up(&server->response_q);
return -ECONNABORTED;
@@ -1113,6 +1173,7 @@ cifs_demultiplex_thread(void *p)
mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
set_freezable();
+ allow_kernel_signal(SIGKILL);
while (server->tcpStatus != CifsExiting) {
if (try_to_freeze())
continue;
@@ -1148,7 +1209,7 @@ next_pdu:
/* make sure we have enough to get to the MID */
if (server->pdu_size < HEADER_SIZE(server) - 1 -
server->vals->header_preamble_size) {
- cifs_dbg(VFS, "SMB response too short (%u bytes)\n",
+ cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n",
server->pdu_size);
cifs_reconnect(server);
wake_up(&server->response_q);
@@ -1204,12 +1265,6 @@ next_pdu:
for (i = 0; i < num_mids; i++) {
if (mids[i] != NULL) {
mids[i]->resp_buf_size = server->pdu_size;
- if ((mids[i]->mid_flags & MID_WAIT_CANCELLED) &&
- mids[i]->mid_state == MID_RESPONSE_RECEIVED &&
- server->ops->handle_cancelled_mid)
- server->ops->handle_cancelled_mid(
- mids[i]->resp_buf,
- server);
if (!mids[i]->multiRsp || mids[i]->multiEnd)
mids[i]->callback(mids[i]);
@@ -1221,7 +1276,7 @@ next_pdu:
smb2_add_credits_from_hdr(bufs[i], server);
cifs_dbg(FYI, "Received oplock break\n");
} else {
- cifs_dbg(VFS, "No task to wake, unknown frame "
+ cifs_server_dbg(VFS, "No task to wake, unknown frame "
"received! NumMids %d\n",
atomic_read(&midCount));
cifs_dump_mem("Received Data is: ", bufs[i],
@@ -1417,14 +1472,32 @@ cifs_parse_cache_flavor(char *value, struct smb_vol *vol)
case Opt_cache_loose:
vol->direct_io = false;
vol->strict_io = false;
+ vol->cache_ro = false;
+ vol->cache_rw = false;
break;
case Opt_cache_strict:
vol->direct_io = false;
vol->strict_io = true;
+ vol->cache_ro = false;
+ vol->cache_rw = false;
break;
case Opt_cache_none:
vol->direct_io = true;
vol->strict_io = false;
+ vol->cache_ro = false;
+ vol->cache_rw = false;
+ break;
+ case Opt_cache_ro:
+ vol->direct_io = false;
+ vol->strict_io = false;
+ vol->cache_ro = true;
+ vol->cache_rw = false;
+ break;
+ case Opt_cache_rw:
+ vol->direct_io = false;
+ vol->strict_io = false;
+ vol->cache_ro = false;
+ vol->cache_rw = true;
break;
default:
cifs_dbg(VFS, "bad cache= option: %s\n", value);
@@ -1636,6 +1709,10 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->echo_interval = SMB_ECHO_INTERVAL_DEFAULT;
+ /* default to no multichannel (single server connection) */
+ vol->multichannel = false;
+ vol->max_channels = 1;
+
if (!mountdata)
goto cifs_parse_mount_err;
@@ -1712,6 +1789,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case Opt_noautotune:
vol->noautotune = 1;
break;
+ case Opt_nolease:
+ vol->no_lease = 1;
+ break;
case Opt_hard:
vol->retry = 1;
break;
@@ -1747,6 +1827,11 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case Opt_nodfs:
vol->nodfs = 1;
break;
+ case Opt_rootfs:
+#ifdef CONFIG_CIFS_ROOT
+ vol->rootfs = true;
+#endif
+ break;
case Opt_posixpaths:
vol->posix_paths = 1;
break;
@@ -1854,6 +1939,10 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case Opt_sign:
vol->sign = true;
break;
+ case Opt_ignore_signature:
+ vol->sign = true;
+ vol->ignore_signature = true;
+ break;
case Opt_seal:
/* we do not do the following in secFlags because seal
* is a per tree connection (mount) not a per socket
@@ -1917,6 +2006,12 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case Opt_rdma:
vol->rdma = true;
break;
+ case Opt_multichannel:
+ vol->multichannel = true;
+ break;
+ case Opt_nomultichannel:
+ vol->multichannel = false;
+ break;
case Opt_compress:
vol->compression = UNKNOWN_TYPE;
cifs_dbg(VFS,
@@ -1988,6 +2083,13 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
}
port = (unsigned short)option;
break;
+ case Opt_min_enc_offload:
+ if (get_option_ul(args, &option)) {
+ cifs_dbg(VFS, "Invalid minimum encrypted read offload size (esize)\n");
+ goto cifs_parse_mount_err;
+ }
+ vol->min_offload = option;
+ break;
case Opt_blocksize:
if (get_option_ul(args, &option)) {
cifs_dbg(VFS, "%s: Invalid blocksize value\n",
@@ -2073,6 +2175,15 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
}
vol->max_credits = option;
break;
+ case Opt_max_channels:
+ if (get_option_ul(args, &option) || option < 1 ||
+ option > CIFS_MAX_CHANNELS) {
+ cifs_dbg(VFS, "%s: Invalid max_channels value, needs to be 1-%d\n",
+ __func__, CIFS_MAX_CHANNELS);
+ goto cifs_parse_mount_err;
+ }
+ vol->max_channels = option;
+ break;
/* String Arguments */
@@ -2585,6 +2696,12 @@ static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol)
if (server->rdma != vol->rdma)
return 0;
+ if (server->ignore_signature != vol->ignore_signature)
+ return 0;
+
+ if (server->min_offload != vol->min_offload)
+ return 0;
+
return 1;
}
@@ -2652,7 +2769,7 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
send_sig(SIGKILL, task, 1);
}
-static struct TCP_Server_Info *
+struct TCP_Server_Info *
cifs_get_tcp_session(struct smb_vol *volume_info)
{
struct TCP_Server_Info *tcp_ses = NULL;
@@ -2680,11 +2797,13 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
goto out_err_crypto_release;
}
- tcp_ses->noblocksnd = volume_info->noblocksnd;
+ tcp_ses->noblockcnt = volume_info->rootfs;
+ tcp_ses->noblocksnd = volume_info->noblocksnd || volume_info->rootfs;
tcp_ses->noautotune = volume_info->noautotune;
tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
tcp_ses->rdma = volume_info->rdma;
tcp_ses->in_flight = 0;
+ tcp_ses->max_in_flight = 0;
tcp_ses->credits = 1;
init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q);
@@ -2709,7 +2828,11 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
sizeof(tcp_ses->srcaddr));
memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
sizeof(tcp_ses->dstaddr));
- generate_random_uuid(tcp_ses->client_guid);
+ if (volume_info->use_client_guid)
+ memcpy(tcp_ses->client_guid, volume_info->client_guid,
+ SMB2_CLIENT_GUID_SIZE);
+ else
+ generate_random_uuid(tcp_ses->client_guid);
/*
* at this point we are the only ones with the pointer
* to the struct since the kernel thread not created yet
@@ -2759,10 +2882,11 @@ smbd_connected:
module_put(THIS_MODULE);
goto out_err_crypto_release;
}
+ tcp_ses->min_offload = volume_info->min_offload;
tcp_ses->tcpStatus = CifsNeedNegotiate;
tcp_ses->nr_targets = 1;
-
+ tcp_ses->ignore_signature = volume_info->ignore_signature;
/* thread spawned, put it on the list */
spin_lock(&cifs_tcp_ses_lock);
list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
@@ -2797,6 +2921,13 @@ static int match_session(struct cifs_ses *ses, struct smb_vol *vol)
vol->sectype != ses->sectype)
return 0;
+ /*
+ * If an existing session is limited to less channels than
+ * requested, it should not be reused
+ */
+ if (ses->chan_max < vol->max_channels)
+ return 0;
+
switch (ses->sectype) {
case Kerberos:
if (!uid_eq(vol->cred_uid, ses->cred_uid))
@@ -2839,16 +2970,17 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
struct nls_table *nls_codepage;
char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
bool seal = false;
+ struct TCP_Server_Info *server = ses->server;
/*
* If the mount request that resulted in the creation of the
* session requires encryption, force IPC to be encrypted too.
*/
if (volume_info->seal) {
- if (ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)
+ if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)
seal = true;
else {
- cifs_dbg(VFS,
+ cifs_server_dbg(VFS,
"IPC: server doesn't support encryption\n");
return -EOPNOTSUPP;
}
@@ -2858,7 +2990,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
if (tcon == NULL)
return -ENOMEM;
- scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname);
+ scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
/* cannot fail */
nls_codepage = load_nls_default();
@@ -2867,11 +2999,11 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
tcon->ses = ses;
tcon->ipc = true;
tcon->seal = seal;
- rc = ses->server->ops->tree_connect(xid, ses, unc, tcon, nls_codepage);
+ rc = server->ops->tree_connect(xid, ses, unc, tcon, nls_codepage);
free_xid(xid);
if (rc) {
- cifs_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
+ cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
tconInfoFree(tcon);
goto out;
}
@@ -2957,7 +3089,7 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
xid = get_xid();
rc = server->ops->logoff(xid, ses);
if (rc)
- cifs_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
+ cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
__func__, rc);
_free_xid(xid);
}
@@ -2966,6 +3098,14 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
list_del_init(&ses->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
+ /* close any extra channels */
+ if (ses->chan_count > 1) {
+ int i;
+
+ for (i = 1; i < ses->chan_count; i++)
+ cifs_put_tcp_session(ses->chans[i].server, 0);
+ }
+
sesInfoFree(ses);
cifs_put_tcp_session(server, 0);
}
@@ -2980,6 +3120,7 @@ static int
cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
{
int rc = 0;
+ int is_domain = 0;
const char *delim, *payload;
char *desc;
ssize_t len;
@@ -3027,6 +3168,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
rc = PTR_ERR(key);
goto out_err;
}
+ is_domain = 1;
}
down_read(&key->sem);
@@ -3084,6 +3226,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
goto out_key_put;
}
+ /*
+ * If we have a domain key then we must set the domainName in the
+ * for the request.
+ */
+ if (is_domain && ses->domainName) {
+ vol->domainname = kstrndup(ses->domainName,
+ strlen(ses->domainName),
+ GFP_KERNEL);
+ if (!vol->domainname) {
+ cifs_dbg(FYI, "Unable to allocate %zd bytes for "
+ "domain\n", len);
+ rc = -ENOMEM;
+ kfree(vol->username);
+ vol->username = NULL;
+ kzfree(vol->password);
+ vol->password = NULL;
+ goto out_key_put;
+ }
+ }
+
out_key_put:
up_read(&key->sem);
key_put(key);
@@ -3189,16 +3351,26 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
ses->sectype = volume_info->sectype;
ses->sign = volume_info->sign;
-
mutex_lock(&ses->session_mutex);
+
+ /* add server as first channel */
+ ses->chans[0].server = server;
+ ses->chan_count = 1;
+ ses->chan_max = volume_info->multichannel ? volume_info->max_channels:1;
+
rc = cifs_negotiate_protocol(xid, ses);
if (!rc)
rc = cifs_setup_session(xid, ses, volume_info->local_nls);
+
+ /* each channel uses a different signing key */
+ memcpy(ses->chans[0].signkey, ses->smb3signingkey,
+ sizeof(ses->smb3signingkey));
+
mutex_unlock(&ses->session_mutex);
if (rc)
goto get_ses_fail;
- /* success, put it on the list */
+ /* success, put it on the list and add it as first channel */
spin_lock(&cifs_tcp_ses_lock);
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
@@ -3227,6 +3399,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
return 0;
if (tcon->handle_timeout != volume_info->handle_timeout)
return 0;
+ if (tcon->no_lease != volume_info->no_lease)
+ return 0;
return 1;
}
@@ -3432,6 +3606,14 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
tcon->use_resilient = true;
}
+ /* If the user really knows what they are doing they can override */
+ if (tcon->share_flags & SMB2_SHAREFLAG_NO_CACHING) {
+ if (volume_info->cache_ro)
+ cifs_dbg(VFS, "cache=ro requested on mount but NO_CACHING flag set on share\n");
+ else if (volume_info->cache_rw)
+ cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n");
+ }
+
/*
* We can have only one retry value for a connection to a share so for
* resources mounted more than once to the same server share the last
@@ -3441,6 +3623,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
tcon->nocase = volume_info->nocase;
tcon->nohandlecache = volume_info->nohandlecache;
tcon->local_lease = volume_info->local_lease;
+ tcon->no_lease = volume_info->no_lease;
INIT_LIST_HEAD(&tcon->pending_opens);
spin_lock(&cifs_tcp_ses_lock);
@@ -3636,10 +3819,10 @@ bind_socket(struct TCP_Server_Info *server)
saddr4 = (struct sockaddr_in *)&server->srcaddr;
saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
if (saddr6->sin6_family == AF_INET6)
- cifs_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n",
+ cifs_server_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n",
&saddr6->sin6_addr, rc);
else
- cifs_dbg(VFS, "Failed to bind to: %pI4, error: %d\n",
+ cifs_server_dbg(VFS, "Failed to bind to: %pI4, error: %d\n",
&saddr4->sin_addr.s_addr, rc);
}
}
@@ -3743,7 +3926,7 @@ generic_ip_connect(struct TCP_Server_Info *server)
rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
IPPROTO_TCP, &socket, 1);
if (rc < 0) {
- cifs_dbg(VFS, "Error %d creating socket\n", rc);
+ cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
server->ssocket = NULL;
return rc;
}
@@ -3791,7 +3974,15 @@ generic_ip_connect(struct TCP_Server_Info *server)
socket->sk->sk_sndbuf,
socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
- rc = socket->ops->connect(socket, saddr, slen, 0);
+ rc = socket->ops->connect(socket, saddr, slen,
+ server->noblockcnt ? O_NONBLOCK : 0);
+ /*
+ * When mounting SMB root file systems, we do not want to block in
+ * connect. Otherwise bail out and then let cifs_reconnect() perform
+ * reconnect failover - if possible.
+ */
+ if (server->noblockcnt && rc == -EINPROGRESS)
+ rc = 0;
if (rc < 0) {
cifs_dbg(FYI, "Error %d connecting to server\n", rc);
sock_release(socket);
@@ -4017,6 +4208,14 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_dbg(FYI, "mounting share using direct i/o\n");
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
}
+ if (pvolume_info->cache_ro) {
+ cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n");
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE;
+ } else if (pvolume_info->cache_rw) {
+ cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n");
+ cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE |
+ CIFS_MOUNT_RW_CACHE);
+ }
if (pvolume_info->mfsymlinks) {
if (pvolume_info->sfu_emul) {
/*
@@ -4127,7 +4326,7 @@ static int mount_get_conns(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
if ((vol->persistent == true) && (!(ses->server->capabilities &
SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) {
- cifs_dbg(VFS, "persistent handles not supported by server\n");
+ cifs_server_dbg(VFS, "persistent handles not supported by server\n");
return -EOPNOTSUPP;
}
@@ -4159,8 +4358,18 @@ static int mount_get_conns(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
tcon->unix_ext = 0; /* server does not support them */
/* do not care if a following call succeed - informational */
- if (!tcon->pipe && server->ops->qfs_tcon)
+ if (!tcon->pipe && server->ops->qfs_tcon) {
server->ops->qfs_tcon(*xid, tcon);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
+ if (tcon->fsDevInfo.DeviceCharacteristics &
+ cpu_to_le32(FILE_READ_ONLY_DEVICE))
+ cifs_dbg(VFS, "mounted to read only share\n");
+ else if ((cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_RW_CACHE) == 0)
+ cifs_dbg(VFS, "read only mount of RW share\n");
+ /* no need to log a RW mount of a typical RW share */
+ }
+ }
cifs_sb->wsize = server->ops->negotiate_wsize(tcon, vol);
cifs_sb->rsize = server->ops->negotiate_rsize(tcon, vol);
@@ -4208,16 +4417,19 @@ build_unc_path_to_root(const struct smb_vol *vol,
strlen(vol->prepath) + 1 : 0;
unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1);
+ if (unc_len > MAX_TREE_SIZE)
+ return ERR_PTR(-EINVAL);
+
full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
if (full_path == NULL)
return ERR_PTR(-ENOMEM);
- strncpy(full_path, vol->UNC, unc_len);
+ memcpy(full_path, vol->UNC, unc_len);
pos = full_path + unc_len;
if (pplen) {
*pos = CIFS_DIR_SEP(cifs_sb);
- strncpy(pos + 1, vol->prepath, pplen);
+ memcpy(pos + 1, vol->prepath, pplen);
pos += pplen;
}
@@ -4331,7 +4543,7 @@ static int setup_dfs_tgt_conn(const char *path,
int rc;
struct dfs_info3_param ref = {0};
char *mdata = NULL, *fake_devname = NULL;
- struct smb_vol fake_vol = {0};
+ struct smb_vol fake_vol = {NULL};
cifs_dbg(FYI, "%s: dfs path: %s\n", __func__, path);
@@ -4562,7 +4774,7 @@ static int is_path_remote(struct cifs_sb_info *cifs_sb, struct smb_vol *vol,
rc = cifs_are_all_path_components_accessible(server, xid, tcon,
cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
if (rc != 0) {
- cifs_dbg(VFS, "cannot query dirs between root and final path, "
+ cifs_server_dbg(VFS, "cannot query dirs between root and final path, "
"enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
rc = 0;
@@ -4574,6 +4786,17 @@ static int is_path_remote(struct cifs_sb_info *cifs_sb, struct smb_vol *vol,
}
#ifdef CONFIG_CIFS_DFS_UPCALL
+static inline void set_root_tcon(struct cifs_sb_info *cifs_sb,
+ struct cifs_tcon *tcon,
+ struct cifs_tcon **root)
+{
+ spin_lock(&cifs_tcp_ses_lock);
+ tcon->tc_count++;
+ tcon->remap = cifs_remap(cifs_sb);
+ spin_unlock(&cifs_tcp_ses_lock);
+ *root = tcon;
+}
+
int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol)
{
int rc = 0;
@@ -4675,18 +4898,10 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol)
/* Cache out resolved root server */
(void)dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
root_path + 1, NULL, NULL);
- /*
- * Save root tcon for additional DFS requests to update or create a new
- * DFS cache entry, or even perform DFS failover.
- */
- spin_lock(&cifs_tcp_ses_lock);
- tcon->tc_count++;
- tcon->dfs_path = root_path;
+ kfree(root_path);
root_path = NULL;
- tcon->remap = cifs_remap(cifs_sb);
- spin_unlock(&cifs_tcp_ses_lock);
- root_tcon = tcon;
+ set_root_tcon(cifs_sb, tcon, &root_tcon);
for (count = 1; ;) {
if (!rc && tcon) {
@@ -4723,6 +4938,15 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol)
mount_put_conns(cifs_sb, xid, server, ses, tcon);
rc = mount_get_conns(vol, cifs_sb, &xid, &server, &ses,
&tcon);
+ /*
+ * Ensure that DFS referrals go through new root server.
+ */
+ if (!rc && tcon &&
+ (tcon->share_flags & (SHI1005_FLAGS_DFS |
+ SHI1005_FLAGS_DFS_ROOT))) {
+ cifs_put_tcon(root_tcon);
+ set_root_tcon(cifs_sb, tcon, &root_tcon);
+ }
}
if (rc) {
if (rc == -EACCES || rc == -EOPNOTSUPP)
@@ -4771,6 +4995,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol)
cifs_autodisable_serverino(cifs_sb);
out:
free_xid(xid);
+ cifs_try_adding_channels(ses);
return mount_setup_tlink(cifs_sb, ses, tcon);
error:
@@ -5016,7 +5241,7 @@ int
cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses)
{
int rc = 0;
- struct TCP_Server_Info *server = ses->server;
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
if (!server->ops->need_neg || !server->ops->negotiate)
return -ENOSYS;
@@ -5043,28 +5268,30 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
struct nls_table *nls_info)
{
int rc = -ENOSYS;
- struct TCP_Server_Info *server = ses->server;
-
- ses->capabilities = server->capabilities;
- if (linuxExtEnabled == 0)
- ses->capabilities &= (~server->vals->cap_unix);
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
+
+ if (!ses->binding) {
+ ses->capabilities = server->capabilities;
+ if (linuxExtEnabled == 0)
+ ses->capabilities &= (~server->vals->cap_unix);
+
+ if (ses->auth_key.response) {
+ cifs_dbg(FYI, "Free previous auth_key.response = %p\n",
+ ses->auth_key.response);
+ kfree(ses->auth_key.response);
+ ses->auth_key.response = NULL;
+ ses->auth_key.len = 0;
+ }
+ }
cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
server->sec_mode, server->capabilities, server->timeAdj);
- if (ses->auth_key.response) {
- cifs_dbg(FYI, "Free previous auth_key.response = %p\n",
- ses->auth_key.response);
- kfree(ses->auth_key.response);
- ses->auth_key.response = NULL;
- ses->auth_key.len = 0;
- }
-
if (server->ops->sess_setup)
rc = server->ops->sess_setup(xid, ses, nls_info);
if (rc)
- cifs_dbg(VFS, "Send error in SessSetup = %d\n", rc);
+ cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
return rc;
}
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 1692c0c6c23a..2faa05860a48 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -1317,7 +1317,6 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
int rc;
struct dfs_info3_param ref = {0};
char *mdata = NULL, *devname = NULL;
- bool is_smb3 = tcon->ses->server->vals->header_preamble_size == 0;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct smb_vol vol;
@@ -1344,7 +1343,7 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
goto out;
}
- rc = cifs_setup_volume_info(&vol, mdata, devname, is_smb3);
+ rc = cifs_setup_volume_info(&vol, mdata, devname, false);
kfree(devname);
if (rc) {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f26a48dd2e39..f3b79012ff29 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -69,11 +69,10 @@ cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
return full_path;
if (dfsplen)
- strncpy(full_path, tcon->treeName, dfsplen);
+ memcpy(full_path, tcon->treeName, dfsplen);
full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb);
- strncpy(full_path + dfsplen + 1, vol->prepath, pplen);
+ memcpy(full_path + dfsplen + 1, vol->prepath, pplen);
convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
- full_path[dfsplen + pplen] = 0; /* add trailing null */
return full_path;
}
@@ -126,7 +125,7 @@ cifs_bp_rename_retry:
}
rcu_read_unlock();
- full_path = kmalloc(namelen+1, GFP_KERNEL);
+ full_path = kmalloc(namelen+1, GFP_ATOMIC);
if (full_path == NULL)
return full_path;
full_path[namelen] = 0; /* trailing null */
@@ -245,10 +244,8 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
*oplock = REQ_OPLOCK;
full_path = build_path_from_dentry(direntry);
- if (full_path == NULL) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!full_path)
+ return -ENOMEM;
if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
@@ -739,10 +736,16 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
static int
cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
{
+ struct inode *inode;
+
if (flags & LOOKUP_RCU)
return -ECHILD;
if (d_really_is_positive(direntry)) {
+ inode = d_inode(direntry);
+ if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
+ CIFS_I(inode)->time = 0; /* force reval */
+
if (cifs_revalidate_dentry(direntry))
return 0;
else {
@@ -753,7 +756,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
* attributes will have been updated by
* cifs_revalidate_dentry().
*/
- if (IS_AUTOMOUNT(d_inode(direntry)) &&
+ if (IS_AUTOMOUNT(inode) &&
!(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) {
spin_lock(&direntry->d_lock);
direntry->d_flags |= DCACHE_NEED_AUTOMOUNT;
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index ce8b7f677c58..eb0bb8ca8e63 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -24,7 +24,7 @@
*/
/*
- * See Documentation/filesystems/nfs/Exporting
+ * See Documentation/filesystems/nfs/exporting.rst
* and examples in fs/exportfs
*
* Since cifs is a network file system, an "fsid" must be included for
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 97090693d182..f1fe9c44d298 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -253,6 +253,12 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
xid, fid);
+ if (rc) {
+ server->ops->close(xid, tcon, fid);
+ if (rc == -ESTALE)
+ rc = -EOPENSTALE;
+ }
+
out:
kfree(buf);
return rc;
@@ -275,6 +281,15 @@ cifs_has_mand_locks(struct cifsInodeInfo *cinode)
return has_locks;
}
+void
+cifs_down_write(struct rw_semaphore *sem)
+{
+ while (!down_write_trylock(sem))
+ msleep(10);
+}
+
+static void cifsFileInfo_put_work(struct work_struct *work);
+
struct cifsFileInfo *
cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
struct tcon_link *tlink, __u32 oplock)
@@ -300,7 +315,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
INIT_LIST_HEAD(&fdlocks->locks);
fdlocks->cfile = cfile;
cfile->llist = fdlocks;
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
list_add(&fdlocks->llist, &cinode->llist);
up_write(&cinode->lock_sem);
@@ -312,6 +327,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
cfile->invalidHandle = false;
cfile->tlink = cifs_get_tlink(tlink);
INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
+ INIT_WORK(&cfile->put, cifsFileInfo_put_work);
mutex_init(&cfile->fh_mutex);
spin_lock_init(&cfile->file_info_lock);
@@ -362,6 +378,41 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
return cifs_file;
}
+static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
+{
+ struct inode *inode = d_inode(cifs_file->dentry);
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct cifsLockInfo *li, *tmp;
+ struct super_block *sb = inode->i_sb;
+
+ /*
+ * Delete any outstanding lock records. We'll lose them when the file
+ * is closed anyway.
+ */
+ cifs_down_write(&cifsi->lock_sem);
+ list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
+ list_del(&li->llist);
+ cifs_del_lock_waiters(li);
+ kfree(li);
+ }
+ list_del(&cifs_file->llist->llist);
+ kfree(cifs_file->llist);
+ up_write(&cifsi->lock_sem);
+
+ cifs_put_tlink(cifs_file->tlink);
+ dput(cifs_file->dentry);
+ cifs_sb_deactive(sb);
+ kfree(cifs_file);
+}
+
+static void cifsFileInfo_put_work(struct work_struct *work)
+{
+ struct cifsFileInfo *cifs_file = container_of(work,
+ struct cifsFileInfo, put);
+
+ cifsFileInfo_put_final(cifs_file);
+}
+
/**
* cifsFileInfo_put - release a reference of file priv data
*
@@ -369,15 +420,15 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
*/
void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
{
- _cifsFileInfo_put(cifs_file, true);
+ _cifsFileInfo_put(cifs_file, true, true);
}
/**
* _cifsFileInfo_put - release a reference of file priv data
*
* This may involve closing the filehandle @cifs_file out on the
- * server. Must be called without holding tcon->open_file_lock and
- * cifs_file->file_info_lock.
+ * server. Must be called without holding tcon->open_file_lock,
+ * cinode->open_file_lock and cifs_file->file_info_lock.
*
* If @wait_for_oplock_handler is true and we are releasing the last
* reference, wait for any running oplock break handler of the file
@@ -385,7 +436,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
* oplock break handler, you need to pass false.
*
*/
-void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
+ bool wait_oplock_handler, bool offload)
{
struct inode *inode = d_inode(cifs_file->dentry);
struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
@@ -393,16 +445,16 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct super_block *sb = inode->i_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsLockInfo *li, *tmp;
struct cifs_fid fid;
struct cifs_pending_open open;
bool oplock_break_cancelled;
spin_lock(&tcon->open_file_lock);
-
+ spin_lock(&cifsi->open_file_lock);
spin_lock(&cifs_file->file_info_lock);
if (--cifs_file->count > 0) {
spin_unlock(&cifs_file->file_info_lock);
+ spin_unlock(&cifsi->open_file_lock);
spin_unlock(&tcon->open_file_lock);
return;
}
@@ -415,9 +467,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
/* remove it from the lists */
- spin_lock(&cifsi->open_file_lock);
list_del(&cifs_file->flist);
- spin_unlock(&cifsi->open_file_lock);
list_del(&cifs_file->tlist);
atomic_dec(&tcon->num_local_opens);
@@ -434,6 +484,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
cifs_set_oplock_level(cifsi, 0);
}
+ spin_unlock(&cifsi->open_file_lock);
spin_unlock(&tcon->open_file_lock);
oplock_break_cancelled = wait_oplock_handler ?
@@ -454,24 +505,10 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
cifs_del_pending_open(&open);
- /*
- * Delete any outstanding lock records. We'll lose them when the file
- * is closed anyway.
- */
- down_write(&cifsi->lock_sem);
- list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
- list_del(&li->llist);
- cifs_del_lock_waiters(li);
- kfree(li);
- }
- list_del(&cifs_file->llist->llist);
- kfree(cifs_file->llist);
- up_write(&cifsi->lock_sem);
-
- cifs_put_tlink(cifs_file->tlink);
- dput(cifs_file->dentry);
- cifs_sb_deactive(sb);
- kfree(cifs_file);
+ if (offload)
+ queue_work(fileinfo_put_wq, &cifs_file->put);
+ else
+ cifsFileInfo_put_final(cifs_file);
}
int cifs_open(struct inode *inode, struct file *file)
@@ -715,6 +752,13 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
+ /* O_SYNC also has bit for O_DSYNC so following check picks up either */
+ if (cfile->f_flags & O_SYNC)
+ create_options |= CREATE_WRITE_THROUGH;
+
+ if (cfile->f_flags & O_DIRECT)
+ create_options |= CREATE_NO_BUFFER;
+
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &cfile->fid);
@@ -795,7 +839,7 @@ reopen_error_exit:
int cifs_close(struct inode *inode, struct file *file)
{
if (file->private_data != NULL) {
- cifsFileInfo_put(file->private_data);
+ _cifsFileInfo_put(file->private_data, true, false);
file->private_data = NULL;
}
@@ -1021,7 +1065,7 @@ static void
cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
{
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
list_add_tail(&lock->llist, &cfile->llist->locks);
up_write(&cinode->lock_sem);
}
@@ -1043,7 +1087,7 @@ cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
try_again:
exist = false;
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
lock->type, lock->flags, &conf_lock,
@@ -1066,7 +1110,7 @@ try_again:
(lock->blist.next == &lock->blist));
if (!rc)
goto try_again;
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
list_del_init(&lock->blist);
}
@@ -1119,7 +1163,7 @@ cifs_posix_lock_set(struct file *file, struct file_lock *flock)
return rc;
try_again:
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
if (!cinode->can_cache_brlcks) {
up_write(&cinode->lock_sem);
return rc;
@@ -1325,7 +1369,7 @@ cifs_push_locks(struct cifsFileInfo *cfile)
int rc = 0;
/* we are going to update can_cache_brlcks here - need a write access */
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
if (!cinode->can_cache_brlcks) {
up_write(&cinode->lock_sem);
return rc;
@@ -1516,7 +1560,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
if (!buf)
return -ENOMEM;
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
for (i = 0; i < 2; i++) {
cur = buf;
num = 0;
@@ -1668,7 +1712,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
rc = server->ops->mand_unlock_range(cfile, flock, xid);
out:
- if (flock->fl_flags & FL_POSIX) {
+ if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) {
/*
* If this is a request to remove all locks because we
* are closing the file, it doesn't matter if the
@@ -1685,6 +1729,52 @@ out:
return rc;
}
+int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
+{
+ int rc, xid;
+ int lock = 0, unlock = 0;
+ bool wait_flag = false;
+ bool posix_lck = false;
+ struct cifs_sb_info *cifs_sb;
+ struct cifs_tcon *tcon;
+ struct cifsFileInfo *cfile;
+ __u32 type;
+
+ rc = -EACCES;
+ xid = get_xid();
+
+ if (!(fl->fl_flags & FL_FLOCK))
+ return -ENOLCK;
+
+ cfile = (struct cifsFileInfo *)file->private_data;
+ tcon = tlink_tcon(cfile->tlink);
+
+ cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
+ tcon->ses->server);
+ cifs_sb = CIFS_FILE_SB(file);
+
+ if (cap_unix(tcon->ses) &&
+ (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
+ ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
+ posix_lck = true;
+
+ if (!lock && !unlock) {
+ /*
+ * if no lock or unlock then nothing to do since we do not
+ * know what it is
+ */
+ free_xid(xid);
+ return -EOPNOTSUPP;
+ }
+
+ rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
+ xid);
+ free_xid(xid);
+ return rc;
+
+
+}
+
int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
{
int rc, xid;
@@ -1693,9 +1783,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
bool posix_lck = false;
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
- struct cifsInodeInfo *cinode;
struct cifsFileInfo *cfile;
- __u16 netfid;
__u32 type;
rc = -EACCES;
@@ -1711,8 +1799,6 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
tcon->ses->server);
cifs_sb = CIFS_FILE_SB(file);
- netfid = cfile->fid.netfid;
- cinode = CIFS_I(file_inode(file));
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
@@ -1764,7 +1850,6 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
int rc = 0;
unsigned int bytes_written = 0;
unsigned int total_written;
- struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
unsigned int xid;
@@ -1772,8 +1857,6 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
struct cifs_io_parms io_parms;
- cifs_sb = CIFS_SB(dentry->d_sb);
-
cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
write_size, *offset, dentry);
@@ -1847,13 +1930,12 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
{
struct cifsFileInfo *open_file = NULL;
struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
- struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
/* only filter by fsuid on multiuser mounts */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
fsuid_only = false;
- spin_lock(&tcon->open_file_lock);
+ spin_lock(&cifs_inode->open_file_lock);
/* we could simply get the first_list_entry since write-only entries
are always at the end of the list but since the first entry might
have a close pending, we go through the whole list */
@@ -1865,7 +1947,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
/* found a good file */
/* lock it so it will not be closed on us */
cifsFileInfo_get(open_file);
- spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_inode->open_file_lock);
return open_file;
} /* else might as well continue, and look for
another, or simply have the caller reopen it
@@ -1873,7 +1955,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
} else /* write only file */
break; /* write only files are last so must be done */
}
- spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_inode->open_file_lock);
return NULL;
}
@@ -1884,7 +1966,6 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
{
struct cifsFileInfo *open_file, *inv_file = NULL;
struct cifs_sb_info *cifs_sb;
- struct cifs_tcon *tcon;
bool any_available = false;
int rc = -EBADF;
unsigned int refind = 0;
@@ -1904,16 +1985,15 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
}
cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
- tcon = cifs_sb_master_tcon(cifs_sb);
/* only filter by fsuid on multiuser mounts */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
fsuid_only = false;
- spin_lock(&tcon->open_file_lock);
+ spin_lock(&cifs_inode->open_file_lock);
refind_writable:
if (refind > MAX_REOPEN_ATT) {
- spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_inode->open_file_lock);
return rc;
}
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
@@ -1925,7 +2005,7 @@ refind_writable:
if (!open_file->invalidHandle) {
/* found a good writable file */
cifsFileInfo_get(open_file);
- spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_inode->open_file_lock);
*ret_file = open_file;
return 0;
} else {
@@ -1945,7 +2025,7 @@ refind_writable:
cifsFileInfo_get(inv_file);
}
- spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_inode->open_file_lock);
if (inv_file) {
rc = cifs_reopen_file(inv_file, false);
@@ -1960,7 +2040,7 @@ refind_writable:
cifsFileInfo_put(inv_file);
++refind;
inv_file = NULL;
- spin_lock(&tcon->open_file_lock);
+ spin_lock(&cifs_inode->open_file_lock);
goto refind_writable;
}
@@ -1980,6 +2060,77 @@ find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
return cfile;
}
+int
+cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
+ struct cifsFileInfo **ret_file)
+{
+ struct list_head *tmp;
+ struct cifsFileInfo *cfile;
+ struct cifsInodeInfo *cinode;
+ char *full_path;
+
+ *ret_file = NULL;
+
+ spin_lock(&tcon->open_file_lock);
+ list_for_each(tmp, &tcon->openFileList) {
+ cfile = list_entry(tmp, struct cifsFileInfo,
+ tlist);
+ full_path = build_path_from_dentry(cfile->dentry);
+ if (full_path == NULL) {
+ spin_unlock(&tcon->open_file_lock);
+ return -ENOMEM;
+ }
+ if (strcmp(full_path, name)) {
+ kfree(full_path);
+ continue;
+ }
+
+ kfree(full_path);
+ cinode = CIFS_I(d_inode(cfile->dentry));
+ spin_unlock(&tcon->open_file_lock);
+ return cifs_get_writable_file(cinode, 0, ret_file);
+ }
+
+ spin_unlock(&tcon->open_file_lock);
+ return -ENOENT;
+}
+
+int
+cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
+ struct cifsFileInfo **ret_file)
+{
+ struct list_head *tmp;
+ struct cifsFileInfo *cfile;
+ struct cifsInodeInfo *cinode;
+ char *full_path;
+
+ *ret_file = NULL;
+
+ spin_lock(&tcon->open_file_lock);
+ list_for_each(tmp, &tcon->openFileList) {
+ cfile = list_entry(tmp, struct cifsFileInfo,
+ tlist);
+ full_path = build_path_from_dentry(cfile->dentry);
+ if (full_path == NULL) {
+ spin_unlock(&tcon->open_file_lock);
+ return -ENOMEM;
+ }
+ if (strcmp(full_path, name)) {
+ kfree(full_path);
+ continue;
+ }
+
+ kfree(full_path);
+ cinode = CIFS_I(d_inode(cfile->dentry));
+ spin_unlock(&tcon->open_file_lock);
+ *ret_file = find_readable_file(cinode, 0);
+ return *ret_file ? 0 : -ENOENT;
+ }
+
+ spin_unlock(&tcon->open_file_lock);
+ return -ENOENT;
+}
+
static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
{
struct address_space *mapping = page->mapping;
@@ -2683,9 +2834,17 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
if (!rc) {
if (wdata->cfile->invalidHandle)
rc = -EAGAIN;
- else
+ else {
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (wdata->mr) {
+ wdata->mr->need_invalidate = true;
+ smbd_deregister_mr(wdata->mr);
+ wdata->mr = NULL;
+ }
+#endif
rc = server->ops->async_writev(wdata,
cifs_uncached_writedata_release);
+ }
}
/* If the write was successfully sent, we are done */
@@ -3408,8 +3567,16 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata,
if (!rc) {
if (rdata->cfile->invalidHandle)
rc = -EAGAIN;
- else
+ else {
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (rdata->mr) {
+ rdata->mr->need_invalidate = true;
+ smbd_deregister_mr(rdata->mr);
+ rdata->mr = NULL;
+ }
+#endif
rc = server->ops->async_readv(rdata);
+ }
}
/* If the read was successfully sent, we are done */
@@ -3577,10 +3744,8 @@ collect_uncached_read_data(struct cifs_aio_ctx *ctx)
struct cifs_readdata *rdata, *tmp;
struct iov_iter *to = &ctx->iter;
struct cifs_sb_info *cifs_sb;
- struct cifs_tcon *tcon;
int rc;
- tcon = tlink_tcon(ctx->cfile->tlink);
cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
mutex_lock(&ctx->aio_mutex);
@@ -4399,17 +4564,15 @@ static int cifs_readpage(struct file *file, struct page *page)
static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
{
struct cifsFileInfo *open_file;
- struct cifs_tcon *tcon =
- cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
- spin_lock(&tcon->open_file_lock);
+ spin_lock(&cifs_inode->open_file_lock);
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
- spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_inode->open_file_lock);
return 1;
}
}
- spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_inode->open_file_lock);
return 0;
}
@@ -4567,12 +4730,13 @@ void cifs_oplock_break(struct work_struct *work)
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
int rc = 0;
+ bool purge_cache = false;
wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
TASK_UNINTERRUPTIBLE);
- server->ops->downgrade_oplock(server, cinode,
- test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
+ server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
+ cfile->oplock_epoch, &purge_cache);
if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
cifs_has_mand_locks(cinode)) {
@@ -4587,18 +4751,21 @@ void cifs_oplock_break(struct work_struct *work)
else
break_lease(inode, O_WRONLY);
rc = filemap_fdatawrite(inode->i_mapping);
- if (!CIFS_CACHE_READ(cinode)) {
+ if (!CIFS_CACHE_READ(cinode) || purge_cache) {
rc = filemap_fdatawait(inode->i_mapping);
mapping_set_error(inode->i_mapping, rc);
cifs_zap_mapping(inode);
}
cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
+ if (CIFS_CACHE_WRITE(cinode))
+ goto oplock_break_ack;
}
rc = cifs_push_locks(cfile);
if (rc)
cifs_dbg(VFS, "Push locks rc = %d\n", rc);
+oplock_break_ack:
/*
* releasing stale oplock after recent reconnect of smb session using
* a now incorrect file handle is not a data integrity issue but do
@@ -4610,7 +4777,7 @@ void cifs_oplock_break(struct work_struct *work)
cinode);
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
}
- _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
+ _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
cifs_done_oplock_break(cinode);
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 56ca4b8ccaba..8a76195e8a69 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -414,6 +414,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
/* if uniqueid is different, return error */
if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
+ CIFS_I(*pinode)->time = 0; /* force reval */
rc = -ESTALE;
goto cgiiu_exit;
}
@@ -421,6 +422,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
/* if filetype is different, return error */
if (unlikely(((*pinode)->i_mode & S_IFMT) !=
(fattr.cf_mode & S_IFMT))) {
+ CIFS_I(*pinode)->time = 0; /* force reval */
rc = -ESTALE;
goto cgiiu_exit;
}
@@ -725,22 +727,138 @@ static __u64 simple_hashstr(const char *str)
return hash;
}
+/**
+ * cifs_backup_query_path_info - SMB1 fallback code to get ino
+ *
+ * Fallback code to get file metadata when we don't have access to
+ * @full_path (EACCESS) and have backup creds.
+ *
+ * @data will be set to search info result buffer
+ * @resp_buf will be set to cifs resp buf and needs to be freed with
+ * cifs_buf_release() when done with @data.
+ */
+static int
+cifs_backup_query_path_info(int xid,
+ struct cifs_tcon *tcon,
+ struct super_block *sb,
+ const char *full_path,
+ void **resp_buf,
+ FILE_ALL_INFO **data)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifs_search_info info = {0};
+ u16 flags;
+ int rc;
+
+ *resp_buf = NULL;
+ info.endOfSearch = false;
+ if (tcon->unix_ext)
+ info.info_level = SMB_FIND_FILE_UNIX;
+ else if ((tcon->ses->capabilities &
+ tcon->ses->server->vals->cap_nt_find) == 0)
+ info.info_level = SMB_FIND_FILE_INFO_STANDARD;
+ else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+ info.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
+ else /* no srvino useful for fallback to some netapp */
+ info.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
+
+ flags = CIFS_SEARCH_CLOSE_ALWAYS |
+ CIFS_SEARCH_CLOSE_AT_END |
+ CIFS_SEARCH_BACKUP_SEARCH;
+
+ rc = CIFSFindFirst(xid, tcon, full_path,
+ cifs_sb, NULL, flags, &info, false);
+ if (rc)
+ return rc;
+
+ *resp_buf = (void *)info.ntwrk_buf_start;
+ *data = (FILE_ALL_INFO *)info.srch_entries_start;
+ return 0;
+}
+
+static void
+cifs_set_fattr_ino(int xid,
+ struct cifs_tcon *tcon,
+ struct super_block *sb,
+ struct inode **inode,
+ const char *full_path,
+ FILE_ALL_INFO *data,
+ struct cifs_fattr *fattr)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct TCP_Server_Info *server = tcon->ses->server;
+ int rc;
+
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
+ if (*inode)
+ fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid;
+ else
+ fattr->cf_uniqueid = iunique(sb, ROOT_I);
+ return;
+ }
+
+ /*
+ * If we have an inode pass a NULL tcon to ensure we don't
+ * make a round trip to the server. This only works for SMB2+.
+ */
+ rc = server->ops->get_srv_inum(xid,
+ *inode ? NULL : tcon,
+ cifs_sb, full_path,
+ &fattr->cf_uniqueid,
+ data);
+ if (rc) {
+ /*
+ * If that fails reuse existing ino or generate one
+ * and disable server ones
+ */
+ if (*inode)
+ fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid;
+ else {
+ fattr->cf_uniqueid = iunique(sb, ROOT_I);
+ cifs_autodisable_serverino(cifs_sb);
+ }
+ return;
+ }
+
+ /* If no errors, check for zero root inode (invalid) */
+ if (fattr->cf_uniqueid == 0 && strlen(full_path) == 0) {
+ cifs_dbg(FYI, "Invalid (0) inodenum\n");
+ if (*inode) {
+ /* reuse */
+ fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid;
+ } else {
+ /* make an ino by hashing the UNC */
+ fattr->cf_flags |= CIFS_FATTR_FAKE_ROOT_INO;
+ fattr->cf_uniqueid = simple_hashstr(tcon->treeName);
+ }
+ }
+}
+
+static inline bool is_inode_cache_good(struct inode *ino)
+{
+ return ino && CIFS_CACHE_READ(CIFS_I(ino)) && CIFS_I(ino)->time != 0;
+}
+
int
-cifs_get_inode_info(struct inode **inode, const char *full_path,
- FILE_ALL_INFO *data, struct super_block *sb, int xid,
+cifs_get_inode_info(struct inode **inode,
+ const char *full_path,
+ FILE_ALL_INFO *in_data,
+ struct super_block *sb, int xid,
const struct cifs_fid *fid)
{
- __u16 srchflgs;
- int rc = 0, tmprc = ENOSYS;
+
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- char *buf = NULL;
bool adjust_tz = false;
- struct cifs_fattr fattr;
- struct cifs_search_info *srchinf = NULL;
+ struct cifs_fattr fattr = {0};
bool symlink = false;
+ FILE_ALL_INFO *data = in_data;
+ FILE_ALL_INFO *tmp_data = NULL;
+ void *smb1_backup_rsp_buf = NULL;
+ int rc = 0;
+ int tmprc = 0;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
@@ -748,142 +866,88 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
tcon = tlink_tcon(tlink);
server = tcon->ses->server;
- cifs_dbg(FYI, "Getting info on %s\n", full_path);
+ /*
+ * 1. Fetch file metadata if not provided (data)
+ */
- if ((data == NULL) && (*inode != NULL)) {
- if (CIFS_CACHE_READ(CIFS_I(*inode)) &&
- CIFS_I(*inode)->time != 0) {
+ if (!data) {
+ if (is_inode_cache_good(*inode)) {
cifs_dbg(FYI, "No need to revalidate cached inode sizes\n");
- goto cgii_exit;
- }
- }
-
- /* if inode info is not passed, get it from server */
- if (data == NULL) {
- if (!server->ops->query_path_info) {
- rc = -ENOSYS;
- goto cgii_exit;
+ goto out;
}
- buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
- if (buf == NULL) {
+ tmp_data = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+ if (!tmp_data) {
rc = -ENOMEM;
- goto cgii_exit;
+ goto out;
}
- data = (FILE_ALL_INFO *)buf;
- rc = server->ops->query_path_info(xid, tcon, cifs_sb, full_path,
- data, &adjust_tz, &symlink);
+ rc = server->ops->query_path_info(xid, tcon, cifs_sb,
+ full_path, tmp_data,
+ &adjust_tz, &symlink);
+ data = tmp_data;
}
- if (!rc) {
- cifs_all_info_to_fattr(&fattr, data, sb, adjust_tz,
- symlink);
- } else if (rc == -EREMOTE) {
+ /*
+ * 2. Convert it to internal cifs metadata (fattr)
+ */
+
+ switch (rc) {
+ case 0:
+ cifs_all_info_to_fattr(&fattr, data, sb, adjust_tz, symlink);
+ break;
+ case -EREMOTE:
+ /* DFS link, no metadata available on this server */
cifs_create_dfs_fattr(&fattr, sb);
rc = 0;
- } else if ((rc == -EACCES) && backup_cred(cifs_sb) &&
- (strcmp(server->vals->version_string, SMB1_VERSION_STRING)
- == 0)) {
+ break;
+ case -EACCES:
/*
- * For SMB2 and later the backup intent flag is already
- * sent if needed on open and there is no path based
- * FindFirst operation to use to retry with
+ * perm errors, try again with backup flags if possible
+ *
+ * For SMB2 and later the backup intent flag
+ * is already sent if needed on open and there
+ * is no path based FindFirst operation to use
+ * to retry with
*/
+ if (backup_cred(cifs_sb) && is_smb1_server(server)) {
+ /* for easier reading */
+ FILE_DIRECTORY_INFO *fdi;
+ SEARCH_ID_FULL_DIR_INFO *si;
+
+ rc = cifs_backup_query_path_info(xid, tcon, sb,
+ full_path,
+ &smb1_backup_rsp_buf,
+ &data);
+ if (rc)
+ goto out;
- srchinf = kzalloc(sizeof(struct cifs_search_info),
- GFP_KERNEL);
- if (srchinf == NULL) {
- rc = -ENOMEM;
- goto cgii_exit;
+ fdi = (FILE_DIRECTORY_INFO *)data;
+ si = (SEARCH_ID_FULL_DIR_INFO *)data;
+
+ cifs_dir_info_to_fattr(&fattr, fdi, cifs_sb);
+ fattr.cf_uniqueid = le64_to_cpu(si->UniqueId);
+ /* uniqueid set, skip get inum step */
+ goto handle_mnt_opt;
+ } else {
+ /* nothing we can do, bail out */
+ goto out;
}
+ break;
+ default:
+ cifs_dbg(FYI, "%s: unhandled err rc %d\n", __func__, rc);
+ goto out;
+ }
- srchinf->endOfSearch = false;
- if (tcon->unix_ext)
- srchinf->info_level = SMB_FIND_FILE_UNIX;
- else if ((tcon->ses->capabilities &
- tcon->ses->server->vals->cap_nt_find) == 0)
- srchinf->info_level = SMB_FIND_FILE_INFO_STANDARD;
- else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
- srchinf->info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
- else /* no srvino useful for fallback to some netapp */
- srchinf->info_level = SMB_FIND_FILE_DIRECTORY_INFO;
-
- srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
- CIFS_SEARCH_CLOSE_AT_END |
- CIFS_SEARCH_BACKUP_SEARCH;
-
- rc = CIFSFindFirst(xid, tcon, full_path,
- cifs_sb, NULL, srchflgs, srchinf, false);
- if (!rc) {
- data = (FILE_ALL_INFO *)srchinf->srch_entries_start;
+ /*
+ * 3. Get or update inode number (fattr.cf_uniqueid)
+ */
- cifs_dir_info_to_fattr(&fattr,
- (FILE_DIRECTORY_INFO *)data, cifs_sb);
- fattr.cf_uniqueid = le64_to_cpu(
- ((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
-
- cifs_buf_release(srchinf->ntwrk_buf_start);
- }
- kfree(srchinf);
- if (rc)
- goto cgii_exit;
- } else
- goto cgii_exit;
+ cifs_set_fattr_ino(xid, tcon, sb, inode, full_path, data, &fattr);
/*
- * If an inode wasn't passed in, then get the inode number
- *
- * Is an i_ino of zero legal? Can we use that to check if the server
- * supports returning inode numbers? Are there other sanity checks we
- * can use to ensure that the server is really filling in that field?
+ * 4. Tweak fattr based on mount options
*/
- if (*inode == NULL) {
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
- if (server->ops->get_srv_inum)
- tmprc = server->ops->get_srv_inum(xid,
- tcon, cifs_sb, full_path,
- &fattr.cf_uniqueid, data);
- if (tmprc) {
- cifs_dbg(FYI, "GetSrvInodeNum rc %d\n",
- tmprc);
- fattr.cf_uniqueid = iunique(sb, ROOT_I);
- cifs_autodisable_serverino(cifs_sb);
- } else if ((fattr.cf_uniqueid == 0) &&
- strlen(full_path) == 0) {
- /* some servers ret bad root ino ie 0 */
- cifs_dbg(FYI, "Invalid (0) inodenum\n");
- fattr.cf_flags |=
- CIFS_FATTR_FAKE_ROOT_INO;
- fattr.cf_uniqueid =
- simple_hashstr(tcon->treeName);
- }
- } else
- fattr.cf_uniqueid = iunique(sb, ROOT_I);
- } else {
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
- && server->ops->get_srv_inum) {
- /*
- * Pass a NULL tcon to ensure we don't make a round
- * trip to the server. This only works for SMB2+.
- */
- tmprc = server->ops->get_srv_inum(xid,
- NULL, cifs_sb, full_path,
- &fattr.cf_uniqueid, data);
- if (tmprc)
- fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
- else if ((fattr.cf_uniqueid == 0) &&
- strlen(full_path) == 0) {
- /*
- * Reuse existing root inode num since
- * inum zero for root causes ls of . and .. to
- * not be returned
- */
- cifs_dbg(FYI, "Srv ret 0 inode num for root\n");
- fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
- }
- } else
- fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
- }
+handle_mnt_opt:
/* query for SFU type info if supported and needed */
if (fattr.cf_cifsattrs & ATTR_SYSTEM &&
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
@@ -893,12 +957,21 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
}
/* fill in 0777 bits from ACL */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
- rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, full_path, fid);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) {
+ rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, true,
+ full_path, fid);
+ if (rc) {
+ cifs_dbg(FYI, "%s: Get mode from SID failed. rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
+ rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, false,
+ full_path, fid);
if (rc) {
cifs_dbg(FYI, "%s: Getting ACL failed with error: %d\n",
__func__, rc);
- goto cgii_exit;
+ goto out;
}
}
@@ -914,6 +987,10 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc);
}
+ /*
+ * 5. Update inode with final fattr data
+ */
+
if (!*inode) {
*inode = cifs_iget(sb, &fattr);
if (!*inode)
@@ -924,26 +1001,25 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
/* if uniqueid is different, return error */
if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
+ CIFS_I(*inode)->time = 0; /* force reval */
rc = -ESTALE;
- goto cgii_exit;
+ goto out;
}
/* if filetype is different, return error */
if (unlikely(((*inode)->i_mode & S_IFMT) !=
(fattr.cf_mode & S_IFMT))) {
+ CIFS_I(*inode)->time = 0; /* force reval */
rc = -ESTALE;
- goto cgii_exit;
+ goto out;
}
cifs_fattr_to_inode(*inode, &fattr);
}
-
-cgii_exit:
- if ((*inode) && ((*inode)->i_ino == 0))
- cifs_dbg(FYI, "inode number of zero returned\n");
-
- kfree(buf);
+out:
+ cifs_buf_release(smb1_backup_rsp_buf);
cifs_put_tlink(tlink);
+ kfree(tmp_data);
return rc;
}
@@ -1613,13 +1689,14 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
}
/* BB add setting the equivalent of mode via CreateX w/ACLs */
- rc = server->ops->mkdir(xid, tcon, full_path, cifs_sb);
+ rc = server->ops->mkdir(xid, inode, mode, tcon, full_path, cifs_sb);
if (rc) {
cifs_dbg(FYI, "cifs_mkdir returned 0x%x\n", rc);
d_drop(direntry);
goto mkdir_out;
}
+ /* TODO: skip this for smb2/smb3 */
rc = cifs_mkdir_qinfo(inode, direntry, mode, full_path, cifs_sb, tcon,
xid);
mkdir_out:
@@ -2461,9 +2538,9 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
cifsFileInfo_put(wfile);
if (rc)
- return rc;
+ goto cifs_setattr_exit;
} else if (rc != -EBADF)
- return rc;
+ goto cifs_setattr_exit;
else
rc = 0;
}
@@ -2480,7 +2557,8 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
if (attrs->ia_valid & ATTR_GID)
gid = attrs->ia_gid;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
+ (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) {
if (uid_valid(uid) || gid_valid(gid)) {
rc = id_mode_to_cifs_acl(inode, full_path, NO_CHANGE_64,
uid, gid);
@@ -2501,7 +2579,8 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
if (attrs->ia_valid & ATTR_MODE) {
mode = attrs->ia_mode;
rc = 0;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
+ (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) {
rc = id_mode_to_cifs_acl(inode, full_path, mode,
INVALID_UID, INVALID_GID);
if (rc) {
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 76ddd98b6298..1a01e108d75e 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -164,6 +164,7 @@ static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
{
struct inode *inode = file_inode(filep);
+ struct smb3_key_debug_info pkey_inf;
int rc = -ENOTTY; /* strange error - but the precedent */
unsigned int xid;
struct cifsFileInfo *pSMBFile = filep->private_data;
@@ -270,6 +271,34 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
else
rc = -EOPNOTSUPP;
break;
+ case CIFS_DUMP_KEY:
+ if (pSMBFile == NULL)
+ break;
+ if (!capable(CAP_SYS_ADMIN)) {
+ rc = -EACCES;
+ break;
+ }
+
+ tcon = tlink_tcon(pSMBFile->tlink);
+ if (!smb3_encryption_required(tcon)) {
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ pkey_inf.cipher_type =
+ le16_to_cpu(tcon->ses->server->cipher_type);
+ pkey_inf.Suid = tcon->ses->Suid;
+ memcpy(pkey_inf.auth_key, tcon->ses->auth_key.response,
+ 16 /* SMB2_NTLMV2_SESSKEY_SIZE */);
+ memcpy(pkey_inf.smb3decryptionkey,
+ tcon->ses->smb3decryptionkey, SMB3_SIGN_KEY_SIZE);
+ memcpy(pkey_inf.smb3encryptionkey,
+ tcon->ses->smb3encryptionkey, SMB3_SIGN_KEY_SIZE);
+ if (copy_to_user((void __user *)arg, &pkey_inf,
+ sizeof(struct smb3_key_debug_info)))
+ rc = -EFAULT;
+ else
+ rc = 0;
+ break;
default:
cifs_dbg(FYI, "unsupported ioctl\n");
break;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index f383877a6511..40ca394fd5de 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -488,21 +488,10 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
&pCifsInode->flags);
- /*
- * Set flag if the server downgrades the oplock
- * to L2 else clear.
- */
- if (pSMB->OplockLevel)
- set_bit(
- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &pCifsInode->flags);
- else
- clear_bit(
- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &pCifsInode->flags);
-
- cifs_queue_oplock_break(netfile);
+ netfile->oplock_epoch = 0;
+ netfile->oplock_level = pSMB->OplockLevel;
netfile->oplock_break_cancelled = false;
+ cifs_queue_oplock_break(netfile);
spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
@@ -1011,3 +1000,25 @@ void extract_unc_hostname(const char *unc, const char **h, size_t *len)
*h = unc;
*len = end - unc;
}
+
+/**
+ * copy_path_name - copy src path to dst, possibly truncating
+ *
+ * returns number of bytes written (including trailing nul)
+ */
+int copy_path_name(char *dst, const char *src)
+{
+ int name_len;
+
+ /*
+ * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
+ * will truncate and strlen(dst) will be PATH_MAX-1
+ */
+ name_len = strscpy(dst, src, PATH_MAX);
+ if (WARN_ON_ONCE(name_len < 0))
+ name_len = PATH_MAX-1;
+
+ /* we count the trailing nul */
+ name_len++;
+ return name_len;
+}
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index ed92958e842d..9b41436fb8db 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -117,10 +117,6 @@ static const struct smb_to_posix_error mapping_table_ERRSRV[] = {
{0, 0}
};
-static const struct smb_to_posix_error mapping_table_ERRHRD[] = {
- {0, 0}
-};
-
/*
* Convert a string containing text IPv4 or IPv6 address to binary form.
*
@@ -949,8 +945,8 @@ static const int total_days_of_prev_months[] = {
struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
{
struct timespec64 ts;
- time64_t sec;
- int min, days, month, year;
+ time64_t sec, days;
+ int min, day, month, year;
u16 date = le16_to_cpu(le_date);
u16 time = le16_to_cpu(le_time);
SMB_TIME *st = (SMB_TIME *)&time;
@@ -966,15 +962,15 @@ struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
sec += 60 * 60 * st->Hours;
if (st->Hours > 24)
cifs_dbg(VFS, "illegal hours %d\n", st->Hours);
- days = sd->Day;
+ day = sd->Day;
month = sd->Month;
- if (days < 1 || days > 31 || month < 1 || month > 12) {
- cifs_dbg(VFS, "illegal date, month %d day: %d\n", month, days);
- days = clamp(days, 1, 31);
+ if (day < 1 || day > 31 || month < 1 || month > 12) {
+ cifs_dbg(VFS, "illegal date, month %d day: %d\n", month, day);
+ day = clamp(day, 1, 31);
month = clamp(month, 1, 12);
}
month -= 1;
- days += total_days_of_prev_months[month];
+ days = day + total_days_of_prev_months[month];
days += 3652; /* account for difference in days between 1980 and 1970 */
year = sd->Year;
days += year * 365;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index dcd49ad60c83..fb3bdc44775c 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -31,6 +31,231 @@
#include <linux/utsname.h>
#include <linux/slab.h>
#include "cifs_spnego.h"
+#include "smb2proto.h"
+
+bool
+is_server_using_iface(struct TCP_Server_Info *server,
+ struct cifs_server_iface *iface)
+{
+ struct sockaddr_in *i4 = (struct sockaddr_in *)&iface->sockaddr;
+ struct sockaddr_in6 *i6 = (struct sockaddr_in6 *)&iface->sockaddr;
+ struct sockaddr_in *s4 = (struct sockaddr_in *)&server->dstaddr;
+ struct sockaddr_in6 *s6 = (struct sockaddr_in6 *)&server->dstaddr;
+
+ if (server->dstaddr.ss_family != iface->sockaddr.ss_family)
+ return false;
+ if (server->dstaddr.ss_family == AF_INET) {
+ if (s4->sin_addr.s_addr != i4->sin_addr.s_addr)
+ return false;
+ } else if (server->dstaddr.ss_family == AF_INET6) {
+ if (memcmp(&s6->sin6_addr, &i6->sin6_addr,
+ sizeof(i6->sin6_addr)) != 0)
+ return false;
+ } else {
+ /* unknown family.. */
+ return false;
+ }
+ return true;
+}
+
+bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface)
+{
+ int i;
+
+ for (i = 0; i < ses->chan_count; i++) {
+ if (is_server_using_iface(ses->chans[i].server, iface))
+ return true;
+ }
+ return false;
+}
+
+/* returns number of channels added */
+int cifs_try_adding_channels(struct cifs_ses *ses)
+{
+ int old_chan_count = ses->chan_count;
+ int left = ses->chan_max - ses->chan_count;
+ int i = 0;
+ int rc = 0;
+ int tries = 0;
+
+ if (left <= 0) {
+ cifs_dbg(FYI,
+ "ses already at max_channels (%zu), nothing to open\n",
+ ses->chan_max);
+ return 0;
+ }
+
+ if (ses->server->dialect < SMB30_PROT_ID) {
+ cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
+ return 0;
+ }
+
+ /*
+ * Keep connecting to same, fastest, iface for all channels as
+ * long as its RSS. Try next fastest one if not RSS or channel
+ * creation fails.
+ */
+ while (left > 0) {
+ struct cifs_server_iface *iface;
+
+ tries++;
+ if (tries > 3*ses->chan_max) {
+ cifs_dbg(FYI, "too many attempt at opening channels (%d channels left to open)\n",
+ left);
+ break;
+ }
+
+ iface = &ses->iface_list[i];
+ if (is_ses_using_iface(ses, iface) && !iface->rss_capable) {
+ i = (i+1) % ses->iface_count;
+ continue;
+ }
+
+ rc = cifs_ses_add_channel(ses, iface);
+ if (rc) {
+ cifs_dbg(FYI, "failed to open extra channel on iface#%d rc=%d\n",
+ i, rc);
+ i = (i+1) % ses->iface_count;
+ continue;
+ }
+
+ cifs_dbg(FYI, "successfully opened new channel on iface#%d\n",
+ i);
+ left--;
+ }
+
+ return ses->chan_count - old_chan_count;
+}
+
+int
+cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface)
+{
+ struct cifs_chan *chan;
+ struct smb_vol vol = {NULL};
+ static const char unc_fmt[] = "\\%s\\foo";
+ char unc[sizeof(unc_fmt)+SERVER_NAME_LEN_WITH_NULL] = {0};
+ struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
+ struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
+ int rc;
+ unsigned int xid = get_xid();
+
+ cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ",
+ ses, iface->speed, iface->rdma_capable ? "yes" : "no");
+ if (iface->sockaddr.ss_family == AF_INET)
+ cifs_dbg(FYI, "ip:%pI4)\n", &ipv4->sin_addr);
+ else
+ cifs_dbg(FYI, "ip:%pI6)\n", &ipv6->sin6_addr);
+
+ /*
+ * Setup a smb_vol with mostly the same info as the existing
+ * session and overwrite it with the requested iface data.
+ *
+ * We need to setup at least the fields used for negprot and
+ * sesssetup.
+ *
+ * We only need the volume here, so we can reuse memory from
+ * the session and server without caring about memory
+ * management.
+ */
+
+ /* Always make new connection for now (TODO?) */
+ vol.nosharesock = true;
+
+ /* Auth */
+ vol.domainauto = ses->domainAuto;
+ vol.domainname = ses->domainName;
+ vol.username = ses->user_name;
+ vol.password = ses->password;
+ vol.sectype = ses->sectype;
+ vol.sign = ses->sign;
+
+ /* UNC and paths */
+ /* XXX: Use ses->server->hostname? */
+ sprintf(unc, unc_fmt, ses->serverName);
+ vol.UNC = unc;
+ vol.prepath = "";
+
+ /* Re-use same version as master connection */
+ vol.vals = ses->server->vals;
+ vol.ops = ses->server->ops;
+
+ vol.noblocksnd = ses->server->noblocksnd;
+ vol.noautotune = ses->server->noautotune;
+ vol.sockopt_tcp_nodelay = ses->server->tcp_nodelay;
+ vol.echo_interval = ses->server->echo_interval / HZ;
+
+ /*
+ * This will be used for encoding/decoding user/domain/pw
+ * during sess setup auth.
+ *
+ * XXX: We use the default for simplicity but the proper way
+ * would be to use the one that ses used, which is not
+ * stored. This might break when dealing with non-ascii
+ * strings.
+ */
+ vol.local_nls = load_nls_default();
+
+ /* Use RDMA if possible */
+ vol.rdma = iface->rdma_capable;
+ memcpy(&vol.dstaddr, &iface->sockaddr, sizeof(struct sockaddr_storage));
+
+ /* reuse master con client guid */
+ memcpy(&vol.client_guid, ses->server->client_guid,
+ SMB2_CLIENT_GUID_SIZE);
+ vol.use_client_guid = true;
+
+ mutex_lock(&ses->session_mutex);
+
+ chan = &ses->chans[ses->chan_count];
+ chan->server = cifs_get_tcp_session(&vol);
+ if (IS_ERR(chan->server)) {
+ rc = PTR_ERR(chan->server);
+ chan->server = NULL;
+ goto out;
+ }
+
+ /*
+ * We need to allocate the server crypto now as we will need
+ * to sign packets before we generate the channel signing key
+ * (we sign with the session key)
+ */
+ rc = smb311_crypto_shash_allocate(chan->server);
+ if (rc) {
+ cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
+ goto out;
+ }
+
+ ses->binding = true;
+ rc = cifs_negotiate_protocol(xid, ses);
+ if (rc)
+ goto out;
+
+ rc = cifs_setup_session(xid, ses, vol.local_nls);
+ if (rc)
+ goto out;
+
+ /* success, put it on the list
+ * XXX: sharing ses between 2 tcp server is not possible, the
+ * way "internal" linked lists works in linux makes element
+ * only able to belong to one list
+ *
+ * the binding session is already established so the rest of
+ * the code should be able to look it up, no need to add the
+ * ses to the new server.
+ */
+
+ ses->chan_count++;
+ atomic_set(&ses->chan_seq, 0);
+out:
+ ses->binding = false;
+ mutex_unlock(&ses->session_mutex);
+
+ if (rc && chan->server)
+ cifs_put_tcp_session(chan->server, 0);
+ unload_nls(vol.local_nls);
+
+ return rc;
+}
static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
{
@@ -159,13 +384,16 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
char *bcc_ptr = *pbcc_area;
+ int len;
/* copy user */
/* BB what about null user mounts - check that we do this BB */
/* copy user */
if (ses->user_name != NULL) {
- strncpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
- bcc_ptr += strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
+ len = strscpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
+ if (WARN_ON_ONCE(len < 0))
+ len = CIFS_MAX_USERNAME_LEN - 1;
+ bcc_ptr += len;
}
/* else null user mount */
*bcc_ptr = 0;
@@ -173,8 +401,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
/* copy domain */
if (ses->domainName != NULL) {
- strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
- bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ if (WARN_ON_ONCE(len < 0))
+ len = CIFS_MAX_DOMAINNAME_LEN - 1;
+ bcc_ptr += len;
} /* else we will send a null domain name
so the server will default to its own domain */
*bcc_ptr = 0;
@@ -242,9 +472,10 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
kfree(ses->serverOS);
- ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
+ ses->serverOS = kmalloc(len + 1, GFP_KERNEL);
if (ses->serverOS) {
- strncpy(ses->serverOS, bcc_ptr, len);
+ memcpy(ses->serverOS, bcc_ptr, len);
+ ses->serverOS[len] = 0;
if (strncmp(ses->serverOS, "OS/2", 4) == 0)
cifs_dbg(FYI, "OS/2 server\n");
}
@@ -258,9 +489,11 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
kfree(ses->serverNOS);
- ses->serverNOS = kzalloc(len + 1, GFP_KERNEL);
- if (ses->serverNOS)
- strncpy(ses->serverNOS, bcc_ptr, len);
+ ses->serverNOS = kmalloc(len + 1, GFP_KERNEL);
+ if (ses->serverNOS) {
+ memcpy(ses->serverNOS, bcc_ptr, len);
+ ses->serverNOS[len] = 0;
+ }
bcc_ptr += len + 1;
bleft -= len + 1;
@@ -334,6 +567,7 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
struct cifs_ses *ses)
{
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer;
__u32 flags;
@@ -346,9 +580,9 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
NTLMSSP_NEGOTIATE_SEAL;
- if (ses->server->sign)
+ if (server->sign)
flags |= NTLMSSP_NEGOTIATE_SIGN;
- if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
+ if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
sec_blob->NegotiateFlags = cpu_to_le32(flags);
@@ -690,7 +924,6 @@ sess_auth_lanman(struct sess_data *sess_data)
char *bcc_ptr;
struct cifs_ses *ses = sess_data->ses;
char lnm_session_key[CIFS_AUTH_RESP_SIZE];
- __u32 capabilities;
__u16 bytes_remaining;
/* lanman 2 style sessionsetup */
@@ -701,7 +934,7 @@ sess_auth_lanman(struct sess_data *sess_data)
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
bcc_ptr = sess_data->iov[2].iov_base;
- capabilities = cifs_ssetup_hdr(ses, pSMB);
+ (void)cifs_ssetup_hdr(ses, pSMB);
pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index b7421a096319..d70a2bb062df 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -171,6 +171,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
/* we do not want to loop forever */
last_mid = cur_mid;
cur_mid++;
+ /* avoid 0xFFFF MID */
+ if (cur_mid == 0xffff)
+ cur_mid++;
/*
* This nested loop looks more expensive than it is.
@@ -366,12 +369,10 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
static void
cifs_downgrade_oplock(struct TCP_Server_Info *server,
- struct cifsInodeInfo *cinode, bool set_level2)
+ struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache)
{
- if (set_level2)
- cifs_set_oplock_level(cinode, OPLOCK_READ);
- else
- cifs_set_oplock_level(cinode, 0);
+ cifs_set_oplock_level(cinode, oplock);
}
static bool
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index e6a1fc72018f..8b0b512c5792 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -145,7 +145,7 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
cur = buf;
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
if (flock->fl_start > li->offset ||
(flock->fl_start + length) <
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index d8d9cdfa30b6..4121ac1163ca 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -51,7 +51,8 @@ static int
smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path,
__u32 desired_access, __u32 create_disposition,
- __u32 create_options, void *ptr, int command)
+ __u32 create_options, umode_t mode, void *ptr, int command,
+ struct cifsFileInfo *cfile)
{
int rc;
__le16 *utf16_path = NULL;
@@ -83,10 +84,16 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
memset(rsp_iov, 0, sizeof(rsp_iov));
+ /* We already have a handle so we can skip the open */
+ if (cfile)
+ goto after_open;
+
/* Open */
utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
- if (!utf16_path)
- return -ENOMEM;
+ if (!utf16_path) {
+ rc = -ENOMEM;
+ goto finished;
+ }
oparms.tcon = tcon;
oparms.desired_access = desired_access;
@@ -96,6 +103,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
oparms.fid = &fid;
oparms.reconnect = false;
+ oparms.mode = mode;
memset(&open_iov, 0, sizeof(open_iov));
rqst[num_rqst].rq_iov = open_iov;
@@ -106,7 +114,10 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
if (rc)
goto finished;
- smb2_set_next_command(tcon, &rqst[num_rqst++]);
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ after_open:
+ num_rqst++;
+ rc = 0;
/* Operation */
switch (command) {
@@ -115,15 +126,31 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
rqst[num_rqst].rq_iov = qi_iov;
rqst[num_rqst].rq_nvec = 1;
- rc = SMB2_query_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
- COMPOUND_FID, FILE_ALL_INFORMATION,
+ if (cfile)
+ rc = SMB2_query_info_init(tcon, &rqst[num_rqst],
+ cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid,
+ FILE_ALL_INFORMATION,
+ SMB2_O_INFO_FILE, 0,
+ sizeof(struct smb2_file_all_info) +
+ PATH_MAX * 2, 0, NULL);
+ else {
+ rc = SMB2_query_info_init(tcon, &rqst[num_rqst],
+ COMPOUND_FID,
+ COMPOUND_FID,
+ FILE_ALL_INFORMATION,
SMB2_O_INFO_FILE, 0,
sizeof(struct smb2_file_all_info) +
PATH_MAX * 2, 0, NULL);
+ if (!rc) {
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst]);
+ }
+ }
+
if (rc)
goto finished;
- smb2_set_next_command(tcon, &rqst[num_rqst]);
- smb2_set_related(&rqst[num_rqst++]);
+ num_rqst++;
trace_smb3_query_info_compound_enter(xid, ses->Suid, tcon->tid,
full_path);
break;
@@ -182,14 +209,27 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
size[0] = sizeof(FILE_BASIC_INFO);
data[0] = ptr;
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
- COMPOUND_FID, current->tgid,
- FILE_BASIC_INFORMATION,
- SMB2_O_INFO_FILE, 0, data, size);
+ if (cfile)
+ rc = SMB2_set_info_init(tcon, &rqst[num_rqst],
+ cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid, current->tgid,
+ FILE_BASIC_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ else {
+ rc = SMB2_set_info_init(tcon, &rqst[num_rqst],
+ COMPOUND_FID,
+ COMPOUND_FID, current->tgid,
+ FILE_BASIC_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ if (!rc) {
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst]);
+ }
+ }
+
if (rc)
goto finished;
- smb2_set_next_command(tcon, &rqst[num_rqst]);
- smb2_set_related(&rqst[num_rqst++]);
+ num_rqst++;
trace_smb3_set_info_compound_enter(xid, ses->Suid, tcon->tid,
full_path);
break;
@@ -210,14 +250,25 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
size[1] = len + 2 /* null */;
data[1] = (__le16 *)ptr;
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
- COMPOUND_FID, current->tgid,
- FILE_RENAME_INFORMATION,
+ if (cfile)
+ rc = SMB2_set_info_init(tcon, &rqst[num_rqst],
+ cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid,
+ current->tgid, FILE_RENAME_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ else {
+ rc = SMB2_set_info_init(tcon, &rqst[num_rqst],
+ COMPOUND_FID, COMPOUND_FID,
+ current->tgid, FILE_RENAME_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
+ if (!rc) {
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst]);
+ }
+ }
if (rc)
goto finished;
- smb2_set_next_command(tcon, &rqst[num_rqst]);
- smb2_set_related(&rqst[num_rqst++]);
+ num_rqst++;
trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_HARDLINK:
@@ -254,21 +305,43 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
if (rc)
goto finished;
+ /* We already have a handle so we can skip the close */
+ if (cfile)
+ goto after_close;
/* Close */
memset(&close_iov, 0, sizeof(close_iov));
rqst[num_rqst].rq_iov = close_iov;
rqst[num_rqst].rq_nvec = 1;
rc = SMB2_close_init(tcon, &rqst[num_rqst], COMPOUND_FID,
COMPOUND_FID);
- smb2_set_related(&rqst[num_rqst++]);
+ smb2_set_related(&rqst[num_rqst]);
if (rc)
goto finished;
-
- rc = compound_send_recv(xid, ses, flags, num_rqst, rqst,
- resp_buftype, rsp_iov);
+ after_close:
+ num_rqst++;
+
+ if (cfile) {
+ cifsFileInfo_put(cfile);
+ cfile = NULL;
+ rc = compound_send_recv(xid, ses, flags, num_rqst - 2,
+ &rqst[1], &resp_buftype[1],
+ &rsp_iov[1]);
+ } else
+ rc = compound_send_recv(xid, ses, flags, num_rqst,
+ rqst, resp_buftype,
+ rsp_iov);
finished:
+ if (cfile)
+ cifsFileInfo_put(cfile);
+
SMB2_open_free(&rqst[0]);
+ if (rc == -EREMCHG) {
+ printk_once(KERN_WARNING "server share %s deleted\n",
+ tcon->treeName);
+ tcon->need_reconnect = true;
+ }
+
switch (command) {
case SMB2_OP_QUERY_INFO:
if (rc == 0) {
@@ -371,6 +444,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
__u32 create_options = 0;
struct cifs_fid fid;
bool no_cached_open = tcon->nohandlecache;
+ struct cifsFileInfo *cfile;
*adjust_tz = false;
*symlink = false;
@@ -402,9 +476,10 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
+ cifs_get_readable_path(tcon, full_path, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_READ_ATTRIBUTES, FILE_OPEN, create_options,
- smb2_data, SMB2_OP_QUERY_INFO);
+ ACL_NO_MODE, smb2_data, SMB2_OP_QUERY_INFO, cfile);
if (rc == -EOPNOTSUPP) {
*symlink = true;
create_options |= OPEN_REPARSE_POINT;
@@ -412,8 +487,8 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
/* Failed on a symbolic link - query a reparse point info */
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_READ_ATTRIBUTES, FILE_OPEN,
- create_options, smb2_data,
- SMB2_OP_QUERY_INFO);
+ create_options, ACL_NO_MODE,
+ smb2_data, SMB2_OP_QUERY_INFO, NULL);
}
if (rc)
goto out;
@@ -425,12 +500,14 @@ out:
}
int
-smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+smb2_mkdir(const unsigned int xid, struct inode *parent_inode, umode_t mode,
+ struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb)
{
return smb2_compound_op(xid, tcon, cifs_sb, name,
FILE_WRITE_ATTRIBUTES, FILE_CREATE,
- CREATE_NOT_FILE, NULL, SMB2_OP_MKDIR);
+ CREATE_NOT_FILE, mode, NULL, SMB2_OP_MKDIR,
+ NULL);
}
void
@@ -440,6 +517,7 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
{
FILE_BASIC_INFO data;
struct cifsInodeInfo *cifs_i;
+ struct cifsFileInfo *cfile;
u32 dosattrs;
int tmprc;
@@ -447,9 +525,11 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
cifs_i = CIFS_I(inode);
dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
data.Attributes = cpu_to_le32(dosattrs);
+ cifs_get_writable_path(tcon, name, &cfile);
tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
FILE_WRITE_ATTRIBUTES, FILE_CREATE,
- CREATE_NOT_FILE, &data, SMB2_OP_SET_INFO);
+ CREATE_NOT_FILE, ACL_NO_MODE,
+ &data, SMB2_OP_SET_INFO, cfile);
if (tmprc == 0)
cifs_i->cifsAttrs = dosattrs;
}
@@ -459,8 +539,8 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb)
{
return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
- CREATE_NOT_FILE,
- NULL, SMB2_OP_RMDIR);
+ CREATE_NOT_FILE, ACL_NO_MODE,
+ NULL, SMB2_OP_RMDIR, NULL);
}
int
@@ -469,13 +549,14 @@ smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
{
return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
- NULL, SMB2_OP_DELETE);
+ ACL_NO_MODE, NULL, SMB2_OP_DELETE, NULL);
}
static int
smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
const char *from_name, const char *to_name,
- struct cifs_sb_info *cifs_sb, __u32 access, int command)
+ struct cifs_sb_info *cifs_sb, __u32 access, int command,
+ struct cifsFileInfo *cfile)
{
__le16 *smb2_to_name = NULL;
int rc;
@@ -486,7 +567,8 @@ smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
goto smb2_rename_path;
}
rc = smb2_compound_op(xid, tcon, cifs_sb, from_name, access,
- FILE_OPEN, 0, smb2_to_name, command);
+ FILE_OPEN, 0, ACL_NO_MODE, smb2_to_name,
+ command, cfile);
smb2_rename_path:
kfree(smb2_to_name);
return rc;
@@ -497,8 +579,12 @@ smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
const char *from_name, const char *to_name,
struct cifs_sb_info *cifs_sb)
{
- return smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
- DELETE, SMB2_OP_RENAME);
+ struct cifsFileInfo *cfile;
+
+ cifs_get_writable_path(tcon, from_name, &cfile);
+
+ return smb2_set_path_attr(xid, tcon, from_name, to_name,
+ cifs_sb, DELETE, SMB2_OP_RENAME, cfile);
}
int
@@ -507,7 +593,8 @@ smb2_create_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb)
{
return smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
- FILE_READ_ATTRIBUTES, SMB2_OP_HARDLINK);
+ FILE_READ_ATTRIBUTES, SMB2_OP_HARDLINK,
+ NULL);
}
int
@@ -518,8 +605,8 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
__le64 eof = cpu_to_le64(size);
return smb2_compound_op(xid, tcon, cifs_sb, full_path,
- FILE_WRITE_DATA, FILE_OPEN, 0, &eof,
- SMB2_OP_SET_EOF);
+ FILE_WRITE_DATA, FILE_OPEN, 0, ACL_NO_MODE,
+ &eof, SMB2_OP_SET_EOF, NULL);
}
int
@@ -540,8 +627,8 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
return PTR_ERR(tlink);
rc = smb2_compound_op(xid, tlink_tcon(tlink), cifs_sb, full_path,
- FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf,
- SMB2_OP_SET_INFO);
+ FILE_WRITE_ATTRIBUTES, FILE_OPEN,
+ 0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, NULL);
cifs_put_tlink(tlink);
return rc;
}
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 82ade16c9501..7fde3775cb57 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -511,7 +511,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
{STATUS_PRINT_QUEUE_FULL, -EIO, "STATUS_PRINT_QUEUE_FULL"},
{STATUS_NO_SPOOL_SPACE, -EIO, "STATUS_NO_SPOOL_SPACE"},
{STATUS_PRINT_CANCELLED, -EIO, "STATUS_PRINT_CANCELLED"},
- {STATUS_NETWORK_NAME_DELETED, -EIO, "STATUS_NETWORK_NAME_DELETED"},
+ {STATUS_NETWORK_NAME_DELETED, -EREMCHG, "STATUS_NETWORK_NAME_DELETED"},
{STATUS_NETWORK_ACCESS_DENIED, -EACCES, "STATUS_NETWORK_ACCESS_DENIED"},
{STATUS_BAD_DEVICE_TYPE, -EIO, "STATUS_BAD_DEVICE_TYPE"},
{STATUS_BAD_NETWORK_NAME, -ENOENT, "STATUS_BAD_NETWORK_NAME"},
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index e311f58dc1c8..0516fc482d43 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -29,6 +29,7 @@
#include "cifs_unicode.h"
#include "smb2status.h"
#include "smb2glob.h"
+#include "nterr.h"
static int
check_smb2_hdr(struct smb2_sync_hdr *shdr, __u64 mid)
@@ -249,16 +250,10 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
* of junk. Other servers match RFC1001 len to actual
* SMB2/SMB3 frame length (header + smb2 response specific data)
* Some windows servers also pad up to 8 bytes when compounding.
- * If pad is longer than eight bytes, log the server behavior
- * (once), since may indicate a problem but allow it and continue
- * since the frame is parseable.
*/
- if (clc_len < len) {
- pr_warn_once(
- "srv rsp padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
- len, clc_len, command, mid);
+ if (clc_len < len)
return 0;
- }
+
pr_warn_once(
"srv rsp too short, len %d not %d. cmd:%d mid:%llu\n",
len, clc_len, command, mid);
@@ -534,7 +529,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
cifs_dbg(FYI, "found in the open list\n");
cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
- le32_to_cpu(rsp->NewLeaseState));
+ lease_state);
if (ack_req)
cfile->oplock_break_cancelled = false;
@@ -543,17 +538,8 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
- /*
- * Set or clear flags depending on the lease state being READ.
- * HANDLE caching flag should be added when the client starts
- * to defer closing remote file handles with HANDLE leases.
- */
- if (lease_state & SMB2_LEASE_READ_CACHING_HE)
- set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &cinode->flags);
- else
- clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &cinode->flags);
+ cfile->oplock_epoch = le16_to_cpu(rsp->Epoch);
+ cfile->oplock_level = lease_state;
cifs_queue_oplock_break(cfile);
kfree(lw);
@@ -576,7 +562,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
cifs_dbg(FYI, "found in the pending open list\n");
cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
- le32_to_cpu(rsp->NewLeaseState));
+ lease_state);
open->oplock = lease_state;
}
@@ -673,10 +659,10 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+
list_for_each(tmp1, &ses->tcon_list) {
tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
- cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
spin_lock(&tcon->open_file_lock);
list_for_each(tmp2, &tcon->openFileList) {
cfile = list_entry(tmp2, struct cifsFileInfo,
@@ -688,6 +674,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
continue;
cifs_dbg(FYI, "file id match, oplock break\n");
+ cifs_stats_inc(
+ &tcon->stats.cifs_stats.num_oplock_brks);
cinode = CIFS_I(d_inode(cfile->dentry));
spin_lock(&cfile->file_info_lock);
if (!CIFS_CACHE_WRITE(cinode) &&
@@ -699,18 +687,9 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
&cinode->flags);
- /*
- * Set flag if the server downgrades the oplock
- * to L2 else clear.
- */
- if (rsp->OplockLevel)
- set_bit(
- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &cinode->flags);
- else
- clear_bit(
- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &cinode->flags);
+ cfile->oplock_epoch = 0;
+ cfile->oplock_level = rsp->OplockLevel;
+
spin_unlock(&cfile->file_info_lock);
cifs_queue_oplock_break(cfile);
@@ -720,9 +699,6 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
return true;
}
spin_unlock(&tcon->open_file_lock);
- spin_unlock(&cifs_tcp_ses_lock);
- cifs_dbg(FYI, "No matching file for oplock break\n");
- return true;
}
}
spin_unlock(&cifs_tcp_ses_lock);
@@ -735,45 +711,98 @@ smb2_cancelled_close_fid(struct work_struct *work)
{
struct close_cancelled_open *cancelled = container_of(work,
struct close_cancelled_open, work);
+ struct cifs_tcon *tcon = cancelled->tcon;
+ int rc;
- cifs_dbg(VFS, "Close unmatched open\n");
+ if (cancelled->mid)
+ cifs_tcon_dbg(VFS, "Close unmatched open for MID:%llx\n",
+ cancelled->mid);
+ else
+ cifs_tcon_dbg(VFS, "Close interrupted close\n");
- SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
- cancelled->fid.volatile_fid);
- cifs_put_tcon(cancelled->tcon);
+ rc = SMB2_close(0, tcon, cancelled->fid.persistent_fid,
+ cancelled->fid.volatile_fid);
+ if (rc)
+ cifs_tcon_dbg(VFS, "Close cancelled mid failed rc:%d\n", rc);
+
+ cifs_put_tcon(tcon);
kfree(cancelled);
}
+/*
+ * Caller should already has an extra reference to @tcon
+ * This function is used to queue work to close a handle to prevent leaks
+ * on the server.
+ * We handle two cases. If an open was interrupted after we sent the
+ * SMB2_CREATE to the server but before we processed the reply, and second
+ * if a close was interrupted before we sent the SMB2_CLOSE to the server.
+ */
+static int
+__smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
+ __u64 persistent_fid, __u64 volatile_fid)
+{
+ struct close_cancelled_open *cancelled;
+
+ cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+ if (!cancelled)
+ return -ENOMEM;
+
+ cancelled->fid.persistent_fid = persistent_fid;
+ cancelled->fid.volatile_fid = volatile_fid;
+ cancelled->tcon = tcon;
+ cancelled->cmd = cmd;
+ cancelled->mid = mid;
+ INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
+ WARN_ON(queue_work(cifsiod_wq, &cancelled->work) == false);
+
+ return 0;
+}
+
+int
+smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
+ __u64 volatile_fid)
+{
+ int rc;
+
+ cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
+ spin_lock(&cifs_tcp_ses_lock);
+ tcon->tc_count++;
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ rc = __smb2_handle_cancelled_cmd(tcon, SMB2_CLOSE_HE, 0,
+ persistent_fid, volatile_fid);
+ if (rc)
+ cifs_put_tcon(tcon);
+
+ return rc;
+}
+
int
smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
{
struct smb2_sync_hdr *sync_hdr = (struct smb2_sync_hdr *)buffer;
struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
struct cifs_tcon *tcon;
- struct close_cancelled_open *cancelled;
+ int rc;
if (sync_hdr->Command != SMB2_CREATE ||
sync_hdr->Status != STATUS_SUCCESS)
return 0;
- cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
- if (!cancelled)
- return -ENOMEM;
-
tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId,
sync_hdr->TreeId);
- if (!tcon) {
- kfree(cancelled);
+ if (!tcon)
return -ENOENT;
- }
- cancelled->fid.persistent_fid = rsp->PersistentFileId;
- cancelled->fid.volatile_fid = rsp->VolatileFileId;
- cancelled->tcon = tcon;
- INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
- queue_work(cifsiod_wq, &cancelled->work);
+ rc = __smb2_handle_cancelled_cmd(tcon,
+ le16_to_cpu(sync_hdr->Command),
+ le64_to_cpu(sync_hdr->MessageId),
+ rsp->PersistentFileId,
+ rsp->VolatileFileId);
+ if (rc)
+ cifs_put_tcon(tcon);
- return 0;
+ return rc;
}
/**
@@ -788,23 +817,37 @@ smb311_update_preauth_hash(struct cifs_ses *ses, struct kvec *iov, int nvec)
int i, rc;
struct sdesc *d;
struct smb2_sync_hdr *hdr;
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
- if (ses->server->tcpStatus == CifsGood) {
- /* skip non smb311 connections */
- if (ses->server->dialect != SMB311_PROT_ID)
- return 0;
+ hdr = (struct smb2_sync_hdr *)iov[0].iov_base;
+ /* neg prot are always taken */
+ if (hdr->Command == SMB2_NEGOTIATE)
+ goto ok;
- /* skip last sess setup response */
- hdr = (struct smb2_sync_hdr *)iov[0].iov_base;
- if (hdr->Flags & SMB2_FLAGS_SIGNED)
- return 0;
- }
+ /*
+ * If we process a command which wasn't a negprot it means the
+ * neg prot was already done, so the server dialect was set
+ * and we can test it. Preauth requires 3.1.1 for now.
+ */
+ if (server->dialect != SMB311_PROT_ID)
+ return 0;
+
+ if (hdr->Command != SMB2_SESSION_SETUP)
+ return 0;
+
+ /* skip last sess setup response */
+ if ((hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
+ && (hdr->Status == NT_STATUS_OK
+ || (hdr->Status !=
+ cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))))
+ return 0;
- rc = smb311_crypto_shash_allocate(ses->server);
+ok:
+ rc = smb311_crypto_shash_allocate(server);
if (rc)
return rc;
- d = ses->server->secmech.sdescsha512;
+ d = server->secmech.sdescsha512;
rc = crypto_shash_init(&d->shash);
if (rc) {
cifs_dbg(VFS, "%s: could not init sha512 shash\n", __func__);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index a5bc1b671c12..a7f328f79c6f 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -10,6 +10,7 @@
#include <linux/falloc.h>
#include <linux/scatterlist.h>
#include <linux/uuid.h>
+#include <linux/sort.h>
#include <crypto/aead.h>
#include "cifsglob.h"
#include "smb2pdu.h"
@@ -109,10 +110,10 @@ smb2_add_credits(struct TCP_Server_Info *server,
/* change_conf hasn't been executed */
break;
case 0:
- cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
+ cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
break;
case 1:
- cifs_dbg(VFS, "disabling echoes and oplocks\n");
+ cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
break;
case 2:
cifs_dbg(FYI, "disabling oplocks\n");
@@ -151,13 +152,7 @@ smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
static unsigned int
smb2_get_credits(struct mid_q_entry *mid)
{
- struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
-
- if (mid->mid_state == MID_RESPONSE_RECEIVED
- || mid->mid_state == MID_RESPONSE_MALFORMED)
- return le16_to_cpu(shdr->CreditRequest);
-
- return 0;
+ return mid->credits_received;
}
static int
@@ -203,6 +198,8 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
credits->instance = server->reconnect_instance;
server->credits -= credits->value;
server->in_flight++;
+ if (server->in_flight > server->max_in_flight)
+ server->max_in_flight = server->in_flight;
break;
}
}
@@ -230,7 +227,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
if (server->reconnect_instance != credits->instance) {
spin_unlock(&server->req_lock);
- cifs_dbg(VFS, "trying to return %d credits to old session\n",
+ cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
credits->value - new_val);
return -EAGAIN;
}
@@ -270,7 +267,7 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
__u64 wire_mid = le64_to_cpu(shdr->MessageId);
if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
- cifs_dbg(VFS, "Encrypted frame parsing not supported yet\n");
+ cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
return NULL;
}
@@ -294,10 +291,10 @@ smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
#ifdef CONFIG_CIFS_DEBUG2
struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
- cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
+ cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
shdr->ProcessId);
- cifs_dbg(VFS, "smb buf %p len %u\n", buf,
+ cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
server->ops->calc_smb_size(buf, server));
#endif
}
@@ -313,7 +310,7 @@ smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
{
int rc;
- ses->server->CurrentMid = 0;
+ cifs_ses_server(ses)->CurrentMid = 0;
rc = SMB2_negotiate(xid, ses);
/* BB we probably don't need to retry with modern servers */
if (rc == -EAGAIN)
@@ -556,6 +553,13 @@ out:
return rc;
}
+static int compare_iface(const void *ia, const void *ib)
+{
+ const struct cifs_server_iface *a = (struct cifs_server_iface *)ia;
+ const struct cifs_server_iface *b = (struct cifs_server_iface *)ib;
+
+ return a->speed == b->speed ? 0 : (a->speed > b->speed ? -1 : 1);
+}
static int
SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
@@ -576,7 +580,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
"server does not support query network interfaces\n");
goto out;
} else if (rc != 0) {
- cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
+ cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
goto out;
}
@@ -585,6 +589,9 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
if (rc)
goto out;
+ /* sort interfaces from fastest to slowest */
+ sort(iface_list, iface_count, sizeof(*iface_list), compare_iface, NULL);
+
spin_lock(&ses->iface_lock);
kfree(ses->iface_list);
ses->iface_list = iface_list;
@@ -656,6 +663,15 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
return 0;
}
+ /*
+ * We do not hold the lock for the open because in case
+ * SMB2_open needs to reconnect, it will end up calling
+ * cifs_mark_open_files_invalid() which takes the lock again
+ * thus causing a deadlock
+ */
+
+ mutex_unlock(&tcon->crfid.fid_mutex);
+
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -677,7 +693,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
if (rc)
- goto oshr_exit;
+ goto oshr_free;
smb2_set_next_command(tcon, &rqst[0]);
memset(&qi_iov, 0, sizeof(qi_iov));
@@ -690,18 +706,10 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
sizeof(struct smb2_file_all_info) +
PATH_MAX * 2, 0, NULL);
if (rc)
- goto oshr_exit;
+ goto oshr_free;
smb2_set_related(&rqst[1]);
- /*
- * We do not hold the lock for the open because in case
- * SMB2_open needs to reconnect, it will end up calling
- * cifs_mark_open_files_invalid() which takes the lock again
- * thus causing a deadlock
- */
-
- mutex_unlock(&tcon->crfid.fid_mutex);
rc = compound_send_recv(xid, ses, flags, 2, rqst,
resp_buftype, rsp_iov);
mutex_lock(&tcon->crfid.fid_mutex);
@@ -739,8 +747,16 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
/* Cached root is still invalid, continue normaly */
- if (rc)
+ if (rc) {
+ if (rc == -EREMCHG) {
+ tcon->need_reconnect = true;
+ printk_once(KERN_WARNING "server share %s deleted\n",
+ tcon->treeName);
+ }
goto oshr_exit;
+ }
+
+ atomic_inc(&tcon->num_remote_opens);
o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
oparms.fid->persistent_fid = o_rsp->PersistentFileId;
@@ -1167,6 +1183,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
rc = compound_send_recv(xid, ses, flags, 3, rqst,
resp_buftype, rsp_iov);
+ /* no need to bump num_remote_opens because handle immediately closed */
sea_exit:
kfree(ea);
@@ -1330,11 +1347,11 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
(char **)&res_key, &ret_data_len);
if (rc) {
- cifs_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
+ cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
goto req_res_key_exit;
}
if (ret_data_len < sizeof(struct resume_key_req)) {
- cifs_dbg(VFS, "Invalid refcopy resume key length\n");
+ cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
rc = -EINVAL;
goto req_res_key_exit;
}
@@ -1369,7 +1386,10 @@ smb2_ioctl_query_info(const unsigned int xid,
struct cifs_fid fid;
struct kvec qi_iov[1];
struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
+ struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
struct kvec close_iov[1];
+ unsigned int size[2];
+ void *data[2];
memset(rqst, 0, sizeof(rqst));
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
@@ -1387,15 +1407,10 @@ smb2_ioctl_query_info(const unsigned int xid,
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- buffer = kmalloc(qi.output_buffer_length, GFP_KERNEL);
- if (buffer == NULL)
- return -ENOMEM;
-
- if (copy_from_user(buffer, arg + sizeof(struct smb_query_info),
- qi.output_buffer_length)) {
- rc = -EFAULT;
- goto iqinf_exit;
- }
+ buffer = memdup_user(arg + sizeof(struct smb_query_info),
+ qi.output_buffer_length);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
/* Open */
memset(&open_iov, 0, sizeof(open_iov));
@@ -1404,7 +1419,6 @@ smb2_ioctl_query_info(const unsigned int xid,
memset(&oparms, 0, sizeof(oparms));
oparms.tcon = tcon;
- oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
oparms.disposition = FILE_OPEN;
if (is_dir)
oparms.create_options = CREATE_NOT_FILE;
@@ -1413,9 +1427,6 @@ smb2_ioctl_query_info(const unsigned int xid,
oparms.fid = &fid;
oparms.reconnect = false;
- /*
- * FSCTL codes encode the special access they need in the fsctl code.
- */
if (qi.flags & PASSTHRU_FSCTL) {
switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
@@ -1431,6 +1442,10 @@ smb2_ioctl_query_info(const unsigned int xid,
oparms.desired_access = GENERIC_WRITE;
break;
}
+ } else if (qi.flags & PASSTHRU_SET_INFO) {
+ oparms.desired_access = GENERIC_WRITE;
+ } else {
+ oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
}
rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
@@ -1454,6 +1469,24 @@ smb2_ioctl_query_info(const unsigned int xid,
qi.output_buffer_length,
CIFSMaxBufSize);
}
+ } else if (qi.flags == PASSTHRU_SET_INFO) {
+ /* Can eventually relax perm check since server enforces too */
+ if (!capable(CAP_SYS_ADMIN))
+ rc = -EPERM;
+ else {
+ memset(&si_iov, 0, sizeof(si_iov));
+ rqst[1].rq_iov = si_iov;
+ rqst[1].rq_nvec = 1;
+
+ size[0] = 8;
+ data[0] = buffer;
+
+ rc = SMB2_set_info_init(tcon, &rqst[1],
+ COMPOUND_FID, COMPOUND_FID,
+ current->tgid,
+ FILE_END_OF_FILE_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ }
} else if (qi.flags == PASSTHRU_QUERY_INFO) {
memset(&qi_iov, 0, sizeof(qi_iov));
rqst[1].rq_iov = qi_iov;
@@ -1465,7 +1498,7 @@ smb2_ioctl_query_info(const unsigned int xid,
qi.input_buffer_length,
qi.output_buffer_length, buffer);
} else { /* unknown flags */
- cifs_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
+ cifs_tcon_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
rc = -EINVAL;
}
@@ -1488,41 +1521,40 @@ smb2_ioctl_query_info(const unsigned int xid,
resp_buftype, rsp_iov);
if (rc)
goto iqinf_exit;
+
+ /* No need to bump num_remote_opens since handle immediately closed */
if (qi.flags & PASSTHRU_FSCTL) {
pqi = (struct smb_query_info __user *)arg;
io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
if (qi.input_buffer_length > 0 &&
- le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length > rsp_iov[1].iov_len) {
- rc = -EFAULT;
- goto iqinf_exit;
- }
- if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
- sizeof(qi.input_buffer_length))) {
- rc = -EFAULT;
- goto iqinf_exit;
- }
+ le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
+ > rsp_iov[1].iov_len)
+ goto e_fault;
+
+ if (copy_to_user(&pqi->input_buffer_length,
+ &qi.input_buffer_length,
+ sizeof(qi.input_buffer_length)))
+ goto e_fault;
+
if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
(const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
- qi.input_buffer_length)) {
- rc = -EFAULT;
- goto iqinf_exit;
- }
+ qi.input_buffer_length))
+ goto e_fault;
} else {
pqi = (struct smb_query_info __user *)arg;
qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
- if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
- sizeof(qi.input_buffer_length))) {
- rc = -EFAULT;
- goto iqinf_exit;
- }
- if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) {
- rc = -EFAULT;
- goto iqinf_exit;
- }
+ if (copy_to_user(&pqi->input_buffer_length,
+ &qi.input_buffer_length,
+ sizeof(qi.input_buffer_length)))
+ goto e_fault;
+
+ if (copy_to_user(pqi + 1, qi_rsp->Buffer,
+ qi.input_buffer_length))
+ goto e_fault;
}
iqinf_exit:
@@ -1538,6 +1570,10 @@ smb2_ioctl_query_info(const unsigned int xid,
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
return rc;
+
+e_fault:
+ rc = -EFAULT;
+ goto iqinf_exit;
}
static ssize_t
@@ -1592,7 +1628,7 @@ smb2_copychunk_range(const unsigned int xid,
if (rc == 0) {
if (ret_data_len !=
sizeof(struct copychunk_ioctl_rsp)) {
- cifs_dbg(VFS, "invalid cchunk response size\n");
+ cifs_tcon_dbg(VFS, "invalid cchunk response size\n");
rc = -EIO;
goto cchunk_out;
}
@@ -1606,12 +1642,12 @@ smb2_copychunk_range(const unsigned int xid,
*/
if (le32_to_cpu(retbuf->TotalBytesWritten) >
le32_to_cpu(pcchunk->Length)) {
- cifs_dbg(VFS, "invalid copy chunk response\n");
+ cifs_tcon_dbg(VFS, "invalid copy chunk response\n");
rc = -EIO;
goto cchunk_out;
}
if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
- cifs_dbg(VFS, "invalid num chunks written\n");
+ cifs_tcon_dbg(VFS, "invalid num chunks written\n");
rc = -EIO;
goto cchunk_out;
}
@@ -2214,6 +2250,11 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
resp_buftype, rsp_iov);
if (rc) {
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ if (rc == -EREMCHG) {
+ tcon->need_reconnect = true;
+ printk_once(KERN_WARNING "server share %s deleted\n",
+ tcon->treeName);
+ }
goto qic_exit;
}
*rsp = rsp_iov[1];
@@ -2401,7 +2442,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
if (rc) {
if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
- cifs_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
+ cifs_tcon_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
goto out;
}
@@ -2410,7 +2451,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
nls_codepage, remap, search_name,
true /* is_unicode */);
if (rc) {
- cifs_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
+ cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
goto out;
}
@@ -2640,7 +2681,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
rsp_iov[1].iov_len) {
- cifs_dbg(VFS, "srv returned invalid ioctl len: %d\n",
+ cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
plen);
rc = -EIO;
goto querty_exit;
@@ -2939,7 +2980,6 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
loff_t offset, loff_t len)
{
struct inode *inode;
- struct cifsInodeInfo *cifsi;
struct cifsFileInfo *cfile = file->private_data;
struct file_zero_data_information fsctl_buf;
long rc;
@@ -2949,7 +2989,6 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
xid = get_xid();
inode = d_inode(cfile->dentry);
- cifsi = CIFS_I(inode);
/* Need to make file sparse, if not already, before freeing range. */
/* Consider adding equivalent for compressed since it could also work */
@@ -3243,22 +3282,38 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
static void
smb2_downgrade_oplock(struct TCP_Server_Info *server,
- struct cifsInodeInfo *cinode, bool set_level2)
+ struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache)
{
- if (set_level2)
- server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
- 0, NULL);
- else
- server->ops->set_oplock_level(cinode, 0, 0, NULL);
+ server->ops->set_oplock_level(cinode, oplock, 0, NULL);
}
static void
-smb21_downgrade_oplock(struct TCP_Server_Info *server,
- struct cifsInodeInfo *cinode, bool set_level2)
+smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache);
+
+static void
+smb3_downgrade_oplock(struct TCP_Server_Info *server,
+ struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache)
{
- server->ops->set_oplock_level(cinode,
- set_level2 ? SMB2_LEASE_READ_CACHING_HE :
- 0, 0, NULL);
+ unsigned int old_state = cinode->oplock;
+ unsigned int old_epoch = cinode->epoch;
+ unsigned int new_state;
+
+ if (epoch > old_epoch) {
+ smb21_set_oplock_level(cinode, oplock, 0, NULL);
+ cinode->epoch = epoch;
+ }
+
+ new_state = cinode->oplock;
+ *purge_cache = false;
+
+ if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
+ (new_state & CIFS_CACHE_READ_FLG) == 0)
+ *purge_cache = true;
+ else if (old_state == new_state && (epoch - old_epoch > 1))
+ *purge_cache = true;
}
static void
@@ -3295,6 +3350,11 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
return;
+ /* Check if the server granted an oplock rather than a lease */
+ if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
+ return smb2_set_oplock_level(cinode, oplock, epoch,
+ purge_cache);
+
if (oplock & SMB2_LEASE_READ_CACHING_HE) {
new_oplock |= CIFS_CACHE_READ_FLG;
strcat(message, "R");
@@ -3489,7 +3549,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
unsigned int buflen)
{
- sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
+ void *addr;
+ /*
+ * VMAP_STACK (at least) puts stack into the vmalloc address space
+ */
+ if (is_vmalloc_addr(buf))
+ addr = vmalloc_to_page(buf);
+ else
+ addr = virt_to_page(buf);
+ sg_set_page(sg, addr, buflen, offset_in_page(buf));
}
/* Assumes the first rqst has a transform header as the first iov.
@@ -3547,14 +3615,16 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
u8 *ses_enc_key;
spin_lock(&cifs_tcp_ses_lock);
- list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
- if (ses->Suid != ses_id)
- continue;
- ses_enc_key = enc ? ses->smb3encryptionkey :
- ses->smb3decryptionkey;
- memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
- spin_unlock(&cifs_tcp_ses_lock);
- return 0;
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ if (ses->Suid == ses_id) {
+ ses_enc_key = enc ? ses->smb3encryptionkey :
+ ses->smb3decryptionkey;
+ memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return 0;
+ }
+ }
}
spin_unlock(&cifs_tcp_ses_lock);
@@ -3587,14 +3657,14 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
if (rc) {
- cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
+ cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
enc ? "en" : "de");
return 0;
}
rc = smb3_crypto_aead_allocate(server);
if (rc) {
- cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
+ cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
return rc;
}
@@ -3602,19 +3672,19 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
server->secmech.ccmaesdecrypt;
rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
+ cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
return rc;
}
rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
+ cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
return rc;
}
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
- cifs_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
+ cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
return -ENOMEM;
}
@@ -3625,7 +3695,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
sg = init_sg(num_rqst, rqst, sign);
if (!sg) {
- cifs_dbg(VFS, "%s: Failed to init sg\n", __func__);
+ cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
rc = -ENOMEM;
goto free_req;
}
@@ -3633,7 +3703,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
iv_len = crypto_aead_ivsize(tfm);
iv = kzalloc(iv_len, GFP_KERNEL);
if (!iv) {
- cifs_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
+ cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
rc = -ENOMEM;
goto free_sg;
}
@@ -3875,7 +3945,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
bool use_rdma_mr = false;
if (shdr->Command != SMB2_READ) {
- cifs_dbg(VFS, "only big read responses are supported\n");
+ cifs_server_dbg(VFS, "only big read responses are supported\n");
return -ENOTSUPP;
}
@@ -3990,8 +4060,56 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
return length;
}
+struct smb2_decrypt_work {
+ struct work_struct decrypt;
+ struct TCP_Server_Info *server;
+ struct page **ppages;
+ char *buf;
+ unsigned int npages;
+ unsigned int len;
+};
+
+
+static void smb2_decrypt_offload(struct work_struct *work)
+{
+ struct smb2_decrypt_work *dw = container_of(work,
+ struct smb2_decrypt_work, decrypt);
+ int i, rc;
+ struct mid_q_entry *mid;
+
+ rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
+ dw->ppages, dw->npages, dw->len);
+ if (rc) {
+ cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
+ goto free_pages;
+ }
+
+ dw->server->lstrp = jiffies;
+ mid = smb2_find_mid(dw->server, dw->buf);
+ if (mid == NULL)
+ cifs_dbg(FYI, "mid not found\n");
+ else {
+ mid->decrypted = true;
+ rc = handle_read_data(dw->server, mid, dw->buf,
+ dw->server->vals->read_rsp_size,
+ dw->ppages, dw->npages, dw->len);
+ mid->callback(mid);
+ cifs_mid_q_entry_release(mid);
+ }
+
+free_pages:
+ for (i = dw->npages-1; i >= 0; i--)
+ put_page(dw->ppages[i]);
+
+ kfree(dw->ppages);
+ cifs_small_buf_release(dw->buf);
+ kfree(dw);
+}
+
+
static int
-receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid)
+receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
+ int *num_mids)
{
char *buf = server->smallbuf;
struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
@@ -4001,7 +4119,9 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid)
unsigned int buflen = server->pdu_size;
int rc;
int i = 0;
+ struct smb2_decrypt_work *dw;
+ *num_mids = 1;
len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
@@ -4037,6 +4157,32 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid)
if (rc)
goto free_pages;
+ /*
+ * For large reads, offload to different thread for better performance,
+ * use more cores decrypting which can be expensive
+ */
+
+ if ((server->min_offload) && (server->in_flight > 1) &&
+ (server->pdu_size >= server->min_offload)) {
+ dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
+ if (dw == NULL)
+ goto non_offloaded_decrypt;
+
+ dw->buf = server->smallbuf;
+ server->smallbuf = (char *)cifs_small_buf_get();
+
+ INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
+
+ dw->npages = npages;
+ dw->server = server;
+ dw->ppages = pages;
+ dw->len = len;
+ queue_work(decrypt_wq, &dw->decrypt);
+ *num_mids = 0; /* worker thread takes care of finding mid */
+ return -1;
+ }
+
+non_offloaded_decrypt:
rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
pages, npages, len);
if (rc)
@@ -4070,7 +4216,6 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
{
int ret, length;
char *buf = server->smallbuf;
- char *tmpbuf;
struct smb2_sync_hdr *shdr;
unsigned int pdu_length = server->pdu_size;
unsigned int buf_size;
@@ -4100,18 +4245,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
return length;
next_is_large = server->large_buf;
- one_more:
+one_more:
shdr = (struct smb2_sync_hdr *)buf;
if (shdr->NextCommand) {
- if (next_is_large) {
- tmpbuf = server->bigbuf;
+ if (next_is_large)
next_buffer = (char *)cifs_buf_get();
- } else {
- tmpbuf = server->smallbuf;
+ else
next_buffer = (char *)cifs_small_buf_get();
- }
memcpy(next_buffer,
- tmpbuf + le32_to_cpu(shdr->NextCommand),
+ buf + le32_to_cpu(shdr->NextCommand),
pdu_length - le32_to_cpu(shdr->NextCommand));
}
@@ -4125,7 +4267,7 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
}
if (*num_mids >= MAX_COMPOUND) {
- cifs_dbg(VFS, "too many PDUs in compound\n");
+ cifs_server_dbg(VFS, "too many PDUs in compound\n");
return -1;
}
bufs[*num_mids] = buf;
@@ -4140,12 +4282,21 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
pdu_length -= le32_to_cpu(shdr->NextCommand);
server->large_buf = next_is_large;
if (next_is_large)
- server->bigbuf = next_buffer;
+ server->bigbuf = buf = next_buffer;
else
- server->smallbuf = next_buffer;
-
- buf += le32_to_cpu(shdr->NextCommand);
+ server->smallbuf = buf = next_buffer;
goto one_more;
+ } else if (ret != 0) {
+ /*
+ * ret != 0 here means that we didn't get to handle_mid() thus
+ * server->smallbuf and server->bigbuf are still valid. We need
+ * to free next_buffer because it is not going to be used
+ * anywhere.
+ */
+ if (next_is_large)
+ free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
+ else
+ free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
}
return ret;
@@ -4162,7 +4313,7 @@ smb3_receive_transform(struct TCP_Server_Info *server,
if (pdu_length < sizeof(struct smb2_transform_hdr) +
sizeof(struct smb2_sync_hdr)) {
- cifs_dbg(VFS, "Transform message is too small (%u)\n",
+ cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
pdu_length);
cifs_reconnect(server);
wake_up(&server->response_q);
@@ -4170,7 +4321,7 @@ smb3_receive_transform(struct TCP_Server_Info *server,
}
if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
- cifs_dbg(VFS, "Transform message is broken\n");
+ cifs_server_dbg(VFS, "Transform message is broken\n");
cifs_reconnect(server);
wake_up(&server->response_q);
return -ECONNABORTED;
@@ -4178,8 +4329,7 @@ smb3_receive_transform(struct TCP_Server_Info *server,
/* TODO: add support for compounds containing READ. */
if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
- *num_mids = 1;
- return receive_encrypted_read(server, &mids[0]);
+ return receive_encrypted_read(server, &mids[0], num_mids);
}
return receive_encrypted_standard(server, mids, bufs, num_mids);
@@ -4425,7 +4575,7 @@ struct smb_version_operations smb21_operations = {
.print_stats = smb2_print_stats,
.is_oplock_break = smb2_is_valid_oplock_break,
.handle_cancelled_mid = smb2_handle_cancelled_mid,
- .downgrade_oplock = smb21_downgrade_oplock,
+ .downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.negotiate_wsize = smb2_negotiate_wsize,
@@ -4525,7 +4675,7 @@ struct smb_version_operations smb30_operations = {
.dump_share_caps = smb2_dump_share_caps,
.is_oplock_break = smb2_is_valid_oplock_break,
.handle_cancelled_mid = smb2_handle_cancelled_mid,
- .downgrade_oplock = smb21_downgrade_oplock,
+ .downgrade_oplock = smb3_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.negotiate_wsize = smb3_negotiate_wsize,
@@ -4633,7 +4783,7 @@ struct smb_version_operations smb311_operations = {
.dump_share_caps = smb2_dump_share_caps,
.is_oplock_break = smb2_is_valid_oplock_break,
.handle_cancelled_mid = smb2_handle_cancelled_mid,
- .downgrade_oplock = smb21_downgrade_oplock,
+ .downgrade_oplock = smb3_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.negotiate_wsize = smb3_negotiate_wsize,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index c8cd7b6cdda2..ed77f94dbf1d 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -426,16 +426,9 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf,
* SMB information in the SMB header. If the return code is zero, this
* function must have filled in request_buf pointer.
*/
-static int
-smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
- void **request_buf, unsigned int *total_len)
+static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+ void **request_buf, unsigned int *total_len)
{
- int rc;
-
- rc = smb2_reconnect(smb2_command, tcon);
- if (rc)
- return rc;
-
/* BB eventually switch this to SMB2 specific small buf size */
if (smb2_command == SMB2_SET_INFO)
*request_buf = cifs_buf_get();
@@ -456,7 +449,31 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
cifs_stats_inc(&tcon->num_smbs_sent);
}
- return rc;
+ return 0;
+}
+
+static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+ void **request_buf, unsigned int *total_len)
+{
+ int rc;
+
+ rc = smb2_reconnect(smb2_command, tcon);
+ if (rc)
+ return rc;
+
+ return __smb2_plain_req_init(smb2_command, tcon, request_buf,
+ total_len);
+}
+
+static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
+ void **request_buf, unsigned int *total_len)
+{
+ /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
+ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
+ return __smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf,
+ total_len);
+ }
+ return smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf, total_len);
}
/* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
@@ -503,8 +520,7 @@ build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname)
pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID;
/* copy up to max of first 100 bytes of server name to NetName field */
- pneg_ctxt->DataLength = cpu_to_le16(2 +
- (2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp)));
+ pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp));
/* context size is DataLength + minimal smb2_neg_context */
return DIV_ROUND_UP(le16_to_cpu(pneg_ctxt->DataLength) +
sizeof(struct smb2_neg_context), 8) * 8;
@@ -543,7 +559,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
if (*total_len > 200) {
/* In case length corrupted don't want to overrun smb buffer */
- cifs_dbg(VFS, "Bad frame length assembling neg contexts\n");
+ cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n");
return;
}
@@ -661,7 +677,7 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt);
if (len_of_smb <= offset) {
- cifs_dbg(VFS, "Invalid response: negotiate context offset\n");
+ cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n");
return -EINVAL;
}
@@ -693,7 +709,7 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
server->posix_ext_supported = true;
else
- cifs_dbg(VFS, "unknown negcontext of type %d ignored\n",
+ cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
le16_to_cpu(pctx->ContextType));
if (rc)
@@ -752,6 +768,8 @@ add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
unsigned int num = *num_iovec;
iov[num].iov_base = create_posix_buf(mode);
+ if (mode == ACL_NO_MODE)
+ cifs_dbg(FYI, "illegal mode\n");
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = sizeof(struct create_posix);
@@ -790,7 +808,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
struct kvec rsp_iov;
int rc = 0;
int resp_buftype;
- struct TCP_Server_Info *server = ses->server;
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
int blob_offset, blob_length;
char *security_blob;
int flags = CIFS_NEG_OP;
@@ -812,13 +830,13 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
- if (strcmp(ses->server->vals->version_string,
+ if (strcmp(server->vals->version_string,
SMB3ANY_VERSION_STRING) == 0) {
req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
req->DialectCount = cpu_to_le16(2);
total_len += 4;
- } else if (strcmp(ses->server->vals->version_string,
+ } else if (strcmp(server->vals->version_string,
SMBDEFAULT_VERSION_STRING) == 0) {
req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
@@ -828,7 +846,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
total_len += 8;
} else {
/* otherwise send specific dialect */
- req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
+ req->Dialects[0] = cpu_to_le16(server->vals->protocol_id);
req->DialectCount = cpu_to_le16(1);
total_len += 2;
}
@@ -841,16 +859,16 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
else
req->SecurityMode = 0;
- req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
+ req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
/* ClientGUID must be zero for SMB2.02 dialect */
- if (ses->server->vals->protocol_id == SMB20_PROT_ID)
+ if (server->vals->protocol_id == SMB20_PROT_ID)
memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
else {
memcpy(req->ClientGUID, server->client_guid,
SMB2_CLIENT_GUID_SIZE);
- if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
- (strcmp(ses->server->vals->version_string,
+ if ((server->vals->protocol_id == SMB311_PROT_ID) ||
+ (strcmp(server->vals->version_string,
SMBDEFAULT_VERSION_STRING) == 0))
assemble_neg_contexts(req, server, &total_len);
}
@@ -869,42 +887,42 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
* cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
*/
if (rc == -EOPNOTSUPP) {
- cifs_dbg(VFS, "Dialect not supported by server. Consider "
+ cifs_server_dbg(VFS, "Dialect not supported by server. Consider "
"specifying vers=1.0 or vers=2.0 on mount for accessing"
" older servers\n");
goto neg_exit;
} else if (rc != 0)
goto neg_exit;
- if (strcmp(ses->server->vals->version_string,
+ if (strcmp(server->vals->version_string,
SMB3ANY_VERSION_STRING) == 0) {
if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
- cifs_dbg(VFS,
+ cifs_server_dbg(VFS,
"SMB2 dialect returned but not requested\n");
return -EIO;
} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
- cifs_dbg(VFS,
+ cifs_server_dbg(VFS,
"SMB2.1 dialect returned but not requested\n");
return -EIO;
}
- } else if (strcmp(ses->server->vals->version_string,
+ } else if (strcmp(server->vals->version_string,
SMBDEFAULT_VERSION_STRING) == 0) {
if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
- cifs_dbg(VFS,
+ cifs_server_dbg(VFS,
"SMB2 dialect returned but not requested\n");
return -EIO;
} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
/* ops set to 3.0 by default for default so update */
- ses->server->ops = &smb21_operations;
- ses->server->vals = &smb21_values;
+ server->ops = &smb21_operations;
+ server->vals = &smb21_values;
} else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
- ses->server->ops = &smb311_operations;
- ses->server->vals = &smb311_values;
+ server->ops = &smb311_operations;
+ server->vals = &smb311_values;
}
} else if (le16_to_cpu(rsp->DialectRevision) !=
- ses->server->vals->protocol_id) {
+ server->vals->protocol_id) {
/* if requested single dialect ensure returned dialect matched */
- cifs_dbg(VFS, "Illegal 0x%x dialect returned: not requested\n",
+ cifs_server_dbg(VFS, "Illegal 0x%x dialect returned: not requested\n",
le16_to_cpu(rsp->DialectRevision));
return -EIO;
}
@@ -922,7 +940,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
else {
- cifs_dbg(VFS, "Illegal dialect returned by server 0x%x\n",
+ cifs_server_dbg(VFS, "Illegal dialect returned by server 0x%x\n",
le16_to_cpu(rsp->DialectRevision));
rc = -EIO;
goto neg_exit;
@@ -982,7 +1000,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
rc = smb311_decode_neg_context(rsp, server,
rsp_iov.iov_len);
else
- cifs_dbg(VFS, "Missing expected negotiate contexts\n");
+ cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
}
neg_exit:
free_rsp_buf(resp_buftype, rsp);
@@ -996,11 +1014,12 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
struct validate_negotiate_info_rsp *pneg_rsp = NULL;
u32 rsplen;
u32 inbuflen; /* max of 4 dialects */
+ struct TCP_Server_Info *server = tcon->ses->server;
cifs_dbg(FYI, "validate negotiate\n");
/* In SMB3.11 preauth integrity supersedes validate negotiate */
- if (tcon->ses->server->dialect == SMB311_PROT_ID)
+ if (server->dialect == SMB311_PROT_ID)
return 0;
/*
@@ -1019,15 +1038,15 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
}
if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
- cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
+ cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
if (!pneg_inbuf)
return -ENOMEM;
pneg_inbuf->Capabilities =
- cpu_to_le32(tcon->ses->server->vals->req_capabilities);
- memcpy(pneg_inbuf->Guid, tcon->ses->server->client_guid,
+ cpu_to_le32(server->vals->req_capabilities);
+ memcpy(pneg_inbuf->Guid, server->client_guid,
SMB2_CLIENT_GUID_SIZE);
if (tcon->ses->sign)
@@ -1040,7 +1059,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
pneg_inbuf->SecurityMode = 0;
- if (strcmp(tcon->ses->server->vals->version_string,
+ if (strcmp(server->vals->version_string,
SMB3ANY_VERSION_STRING) == 0) {
pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
@@ -1048,7 +1067,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
/* structure is big enough for 3 dialects, sending only 2 */
inbuflen = sizeof(*pneg_inbuf) -
(2 * sizeof(pneg_inbuf->Dialects[0]));
- } else if (strcmp(tcon->ses->server->vals->version_string,
+ } else if (strcmp(server->vals->version_string,
SMBDEFAULT_VERSION_STRING) == 0) {
pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
@@ -1060,7 +1079,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
} else {
/* otherwise specific dialect was requested */
pneg_inbuf->Dialects[0] =
- cpu_to_le16(tcon->ses->server->vals->protocol_id);
+ cpu_to_le16(server->vals->protocol_id);
pneg_inbuf->DialectCount = cpu_to_le16(1);
/* structure is big enough for 3 dialects, sending only 1 */
inbuflen = sizeof(*pneg_inbuf) -
@@ -1076,18 +1095,18 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
* Old Windows versions or Netapp SMB server can return
* not supported error. Client should accept it.
*/
- cifs_dbg(VFS, "Server does not support validate negotiate\n");
+ cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n");
rc = 0;
goto out_free_inbuf;
} else if (rc != 0) {
- cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
+ cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
rc = -EIO;
goto out_free_inbuf;
}
rc = -EIO;
if (rsplen != sizeof(*pneg_rsp)) {
- cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n",
+ cifs_tcon_dbg(VFS, "invalid protocol negotiate response size: %d\n",
rsplen);
/* relax check since Mac returns max bufsize allowed on ioctl */
@@ -1096,16 +1115,16 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
}
/* check validate negotiate info response matches what we got earlier */
- if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect))
+ if (pneg_rsp->Dialect != cpu_to_le16(server->dialect))
goto vneg_out;
- if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
+ if (pneg_rsp->SecurityMode != cpu_to_le16(server->sec_mode))
goto vneg_out;
/* do not validate server guid because not saved at negprot time yet */
if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
- SMB2_LARGE_FILES) != tcon->ses->server->capabilities)
+ SMB2_LARGE_FILES) != server->capabilities)
goto vneg_out;
/* validate negotiate successful */
@@ -1114,7 +1133,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
goto out_free_rsp;
vneg_out:
- cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
+ cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n");
out_free_rsp:
kfree(pneg_rsp);
out_free_inbuf:
@@ -1169,7 +1188,7 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
int rc;
struct cifs_ses *ses = sess_data->ses;
struct smb2_sess_setup_req *req;
- struct TCP_Server_Info *server = ses->server;
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
unsigned int total_len;
rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, (void **) &req,
@@ -1177,13 +1196,21 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
if (rc)
return rc;
- /* First session, not a reauthenticate */
- req->sync_hdr.SessionId = 0;
-
- /* if reconnect, we need to send previous sess id, otherwise it is 0 */
- req->PreviousSessionId = sess_data->previous_session;
-
- req->Flags = 0; /* MBZ */
+ if (sess_data->ses->binding) {
+ req->sync_hdr.SessionId = sess_data->ses->Suid;
+ req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
+ req->PreviousSessionId = 0;
+ req->Flags = SMB2_SESSION_REQ_FLAG_BINDING;
+ } else {
+ /* First session, not a reauthenticate */
+ req->sync_hdr.SessionId = 0;
+ /*
+ * if reconnect, we need to send previous sess id
+ * otherwise it is 0
+ */
+ req->PreviousSessionId = sess_data->previous_session;
+ req->Flags = 0; /* MBZ */
+ }
/* enough to enable echos and oplocks and one max size write */
req->sync_hdr.CreditRequest = cpu_to_le16(130);
@@ -1196,7 +1223,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
else
req->SecurityMode = 0;
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
+#else
req->Capabilities = 0;
+#endif /* DFS_UPCALL */
+
req->Channel = 0; /* MBZ */
sess_data->iov[0].iov_base = (char *)req;
@@ -1251,28 +1283,33 @@ SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
{
int rc = 0;
struct cifs_ses *ses = sess_data->ses;
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
- mutex_lock(&ses->server->srv_mutex);
- if (ses->server->ops->generate_signingkey) {
- rc = ses->server->ops->generate_signingkey(ses);
+ mutex_lock(&server->srv_mutex);
+ if (server->ops->generate_signingkey) {
+ rc = server->ops->generate_signingkey(ses);
if (rc) {
cifs_dbg(FYI,
"SMB3 session key generation failed\n");
- mutex_unlock(&ses->server->srv_mutex);
+ mutex_unlock(&server->srv_mutex);
return rc;
}
}
- if (!ses->server->session_estab) {
- ses->server->sequence_number = 0x2;
- ses->server->session_estab = true;
+ if (!server->session_estab) {
+ server->sequence_number = 0x2;
+ server->session_estab = true;
}
- mutex_unlock(&ses->server->srv_mutex);
+ mutex_unlock(&server->srv_mutex);
cifs_dbg(FYI, "SMB2/3 session established successfully\n");
- spin_lock(&GlobalMid_Lock);
- ses->status = CifsGood;
- ses->need_reconnect = false;
- spin_unlock(&GlobalMid_Lock);
+ /* keep existing ses state if binding */
+ if (!ses->binding) {
+ spin_lock(&GlobalMid_Lock);
+ ses->status = CifsGood;
+ ses->need_reconnect = false;
+ spin_unlock(&GlobalMid_Lock);
+ }
+
return rc;
}
@@ -1310,16 +1347,19 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
goto out_put_spnego_key;
}
- ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
- GFP_KERNEL);
- if (!ses->auth_key.response) {
- cifs_dbg(VFS,
- "Kerberos can't allocate (%u bytes) memory",
- msg->sesskey_len);
- rc = -ENOMEM;
- goto out_put_spnego_key;
+ /* keep session key if binding */
+ if (!ses->binding) {
+ ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
+ GFP_KERNEL);
+ if (!ses->auth_key.response) {
+ cifs_dbg(VFS,
+ "Kerberos can't allocate (%u bytes) memory",
+ msg->sesskey_len);
+ rc = -ENOMEM;
+ goto out_put_spnego_key;
+ }
+ ses->auth_key.len = msg->sesskey_len;
}
- ses->auth_key.len = msg->sesskey_len;
sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
sess_data->iov[1].iov_len = msg->secblob_len;
@@ -1329,9 +1369,11 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
goto out_put_spnego_key;
rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
- ses->Suid = rsp->sync_hdr.SessionId;
-
- ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ /* keep session id and flags if binding */
+ if (!ses->binding) {
+ ses->Suid = rsp->sync_hdr.SessionId;
+ ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ }
rc = SMB2_sess_establish_session(sess_data);
out_put_spnego_key:
@@ -1425,9 +1467,11 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
-
- ses->Suid = rsp->sync_hdr.SessionId;
- ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ /* keep existing ses id and flags if binding */
+ if (!ses->binding) {
+ ses->Suid = rsp->sync_hdr.SessionId;
+ ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ }
out:
kfree(ntlmssp_blob);
@@ -1484,8 +1528,11 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
- ses->Suid = rsp->sync_hdr.SessionId;
- ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ /* keep existing ses id and flags if binding */
+ if (!ses->binding) {
+ ses->Suid = rsp->sync_hdr.SessionId;
+ ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ }
rc = SMB2_sess_establish_session(sess_data);
out:
@@ -1502,7 +1549,7 @@ SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
{
int type;
- type = smb2_select_sectype(ses->server, ses->sectype);
+ type = smb2_select_sectype(cifs_ses_server(ses), ses->sectype);
cifs_dbg(FYI, "sess setup type %d\n", type);
if (type == Unspecified) {
cifs_dbg(VFS,
@@ -1530,7 +1577,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int rc = 0;
- struct TCP_Server_Info *server = ses->server;
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
struct SMB2_sess_data *sess_data;
cifs_dbg(FYI, "Session Setup\n");
@@ -1556,14 +1603,14 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
/*
* Initialize the session hash with the server one.
*/
- memcpy(ses->preauth_sha_hash, ses->server->preauth_sha_hash,
+ memcpy(ses->preauth_sha_hash, server->preauth_sha_hash,
SMB2_PREAUTH_HASH_SIZE);
while (sess_data->func)
sess_data->func(sess_data);
if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
- cifs_dbg(VFS, "signing requested but authenticated as guest\n");
+ cifs_server_dbg(VFS, "signing requested but authenticated as guest\n");
rc = sess_data->result;
out:
kfree(sess_data);
@@ -1656,10 +1703,11 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
__le16 *unc_path = NULL;
int flags = 0;
unsigned int total_len;
+ struct TCP_Server_Info *server = ses->server;
cifs_dbg(FYI, "TCON\n");
- if (!(ses->server) || !tree)
+ if (!server || !tree)
return -EIO;
unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
@@ -1702,7 +1750,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
* unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
* (Samba servers don't always set the flag so also check if null user)
*/
- if ((ses->server->dialect == SMB311_PROT_ID) &&
+ if ((server->dialect == SMB311_PROT_ID) &&
!smb3_encryption_required(tcon) &&
!(ses->session_flags &
(SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
@@ -1741,7 +1789,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
cifs_dbg(FYI, "connection to printer\n");
break;
default:
- cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
+ cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
rc = -EOPNOTSUPP;
goto tcon_error_exit;
}
@@ -1756,15 +1804,15 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
- cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
+ cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n");
if (tcon->seal &&
- !(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
- cifs_dbg(VFS, "Encryption is requested but not supported\n");
+ !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+ cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n");
init_copy_chunk_defaults(tcon);
- if (tcon->ses->server->ops->validate_negotiate)
- rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
+ if (server->ops->validate_negotiate)
+ rc = server->ops->validate_negotiate(xid, tcon);
tcon_exit:
free_rsp_buf(resp_buftype, rsp);
@@ -1773,7 +1821,7 @@ tcon_exit:
tcon_error_exit:
if (rsp && rsp->sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
- cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
+ cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
}
goto tcon_exit;
}
@@ -1799,6 +1847,8 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
return 0;
+ close_shroot(&tcon->crfid);
+
rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req,
&total_len);
if (rc)
@@ -2346,6 +2396,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
rqst.rq_iov = iov;
rqst.rq_nvec = n_iov;
+ /* no need to inc num_remote_opens because we close it just below */
trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
FILE_WRITE_ATTRIBUTES);
/* resource #4: response buffer */
@@ -2410,6 +2461,7 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
/* File attributes ignored on open (used in create though) */
req->FileAttributes = cpu_to_le32(file_attributes);
req->ShareAccess = FILE_SHARE_ALL_LE;
+
req->CreateDisposition = cpu_to_le32(oparms->disposition);
req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
@@ -2453,7 +2505,7 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
iov[1].iov_len = uni_path_len;
iov[1].iov_base = path;
- if (!server->oplocks)
+ if ((!server->oplocks) || (tcon->no_lease))
*oplock = SMB2_OPLOCK_LEVEL_NONE;
if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
@@ -2511,6 +2563,20 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
return rc;
}
+ if ((oparms->disposition == FILE_CREATE) &&
+ (oparms->mode != ACL_NO_MODE)) {
+ if (n_iov > 2) {
+ struct create_context *ccontext =
+ (struct create_context *)iov[n_iov-1].iov_base;
+ ccontext->Next =
+ cpu_to_le32(iov[n_iov-1].iov_len);
+ }
+
+ /* rc = add_sd_context(iov, &n_iov, oparms->mode); */
+ if (rc)
+ return rc;
+ }
+
if (n_iov > 2) {
struct create_context *ccontext =
(struct create_context *)iov[n_iov-1].iov_base;
@@ -2589,6 +2655,11 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
}
trace_smb3_open_err(xid, tcon->tid, ses->Suid,
oparms->create_options, oparms->desired_access, rc);
+ if (rc == -EREMCHG) {
+ printk_once(KERN_WARNING "server share %s deleted\n",
+ tcon->treeName);
+ tcon->need_reconnect = true;
+ }
goto creat_exit;
} else
trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid,
@@ -2632,7 +2703,7 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
int rc;
char *in_data_buf;
- rc = smb2_plain_req_init(SMB2_IOCTL, tcon, (void **) &req, &total_len);
+ rc = smb2_ioctl_req_init(opcode, tcon, (void **) &req, &total_len);
if (rc)
return rc;
@@ -2737,6 +2808,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
int flags = 0;
+ struct TCP_Server_Info *server;
cifs_dbg(FYI, "SMB2 IOCTL\n");
@@ -2752,7 +2824,10 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
else
return -EIO;
- if (!ses || !(ses->server))
+ if (!ses)
+ return -EIO;
+ server = ses->server;
+ if (!server)
return -EIO;
if (smb3_encryption_required(tcon))
@@ -2802,14 +2877,14 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
if (*plen == 0)
goto ioctl_exit; /* server returned no data */
else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
- cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
+ cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
*plen = 0;
rc = -EIO;
goto ioctl_exit;
}
if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
- cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
+ cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
le32_to_cpu(rsp->OutputOffset));
*plen = 0;
rc = -EIO;
@@ -2908,6 +2983,7 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
+ trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid);
rc = SMB2_close_init(tcon, &rqst, persistent_fid, volatile_fid);
if (rc)
goto close_exit;
@@ -2920,7 +2996,9 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid,
rc);
goto close_exit;
- }
+ } else
+ trace_smb3_close_done(xid, persistent_fid, tcon->tid,
+ ses->Suid);
atomic_dec(&tcon->num_remote_opens);
@@ -2936,7 +3014,21 @@ int
SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid)
{
- return SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
+ int rc;
+ int tmp_rc;
+
+ rc = SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
+
+ /* retry close in a worker thread if this one is interrupted */
+ if (rc == -EINTR) {
+ tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
+ volatile_fid);
+ if (tmp_rc)
+ cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
+ persistent_fid, tmp_rc);
+ }
+
+ return rc;
}
int
@@ -3050,12 +3142,16 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
int resp_buftype = CIFS_NO_BUFFER;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server;
int flags = 0;
bool allocated = false;
cifs_dbg(FYI, "Query Info\n");
- if (!ses || !(ses->server))
+ if (!ses)
+ return -EIO;
+ server = ses->server;
+ if (!server)
return -EIO;
if (smb3_encryption_required(tcon))
@@ -3093,7 +3189,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
if (!*data) {
*data = kmalloc(*dlen, GFP_KERNEL);
if (!*data) {
- cifs_dbg(VFS,
+ cifs_tcon_dbg(VFS,
"Error %d allocating memory for acl\n",
rc);
*dlen = 0;
@@ -3154,6 +3250,92 @@ SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
}
/*
+ * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory
+ * See MS-SMB2 2.2.35 and 2.2.36
+ */
+
+static int
+SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
+ struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid,
+ u32 completion_filter, bool watch_tree)
+{
+ struct smb2_change_notify_req *req;
+ struct kvec *iov = rqst->rq_iov;
+ unsigned int total_len;
+ int rc;
+
+ rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, (void **) &req, &total_len);
+ if (rc)
+ return rc;
+
+ req->PersistentFileId = persistent_fid;
+ req->VolatileFileId = volatile_fid;
+ req->OutputBufferLength =
+ cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
+ req->CompletionFilter = cpu_to_le32(completion_filter);
+ if (watch_tree)
+ req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
+ else
+ req->Flags = 0;
+
+ iov[0].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
+
+ return 0;
+}
+
+int
+SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, bool watch_tree,
+ u32 completion_filter)
+{
+ struct cifs_ses *ses = tcon->ses;
+ struct smb_rqst rqst;
+ struct kvec iov[1];
+ struct kvec rsp_iov = {NULL, 0};
+ int resp_buftype = CIFS_NO_BUFFER;
+ int flags = 0;
+ int rc = 0;
+
+ cifs_dbg(FYI, "change notify\n");
+ if (!ses || !(ses->server))
+ return -EIO;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ memset(&iov, 0, sizeof(iov));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = SMB2_notify_init(xid, &rqst, tcon, persistent_fid, volatile_fid,
+ completion_filter, watch_tree);
+ if (rc)
+ goto cnotify_exit;
+
+ trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
+ (u8)watch_tree, completion_filter);
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+
+ if (rc != 0) {
+ cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE);
+ trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid,
+ (u8)watch_tree, completion_filter, rc);
+ } else
+ trace_smb3_notify_done(xid, persistent_fid, tcon->tid,
+ ses->Suid, (u8)watch_tree, completion_filter);
+
+ cnotify_exit:
+ if (rqst.rq_iov)
+ cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
+ free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+ return rc;
+}
+
+
+
+/*
* This is a no-op for now. We're not really interested in the reply, but
* rather in the fact that the server sent one and that server->lstrp
* gets updated.
@@ -3282,51 +3464,76 @@ SMB2_echo(struct TCP_Server_Info *server)
return rc;
}
+void
+SMB2_flush_free(struct smb_rqst *rqst)
+{
+ if (rqst && rqst->rq_iov)
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+}
+
int
-SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
- u64 volatile_fid)
+SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
+ struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid)
{
- struct smb_rqst rqst;
struct smb2_flush_req *req;
- struct cifs_ses *ses = tcon->ses;
- struct kvec iov[1];
- struct kvec rsp_iov;
- int resp_buftype;
- int rc = 0;
- int flags = 0;
+ struct kvec *iov = rqst->rq_iov;
unsigned int total_len;
-
- cifs_dbg(FYI, "Flush\n");
-
- if (!ses || !(ses->server))
- return -EIO;
+ int rc;
rc = smb2_plain_req_init(SMB2_FLUSH, tcon, (void **) &req, &total_len);
if (rc)
return rc;
- if (smb3_encryption_required(tcon))
- flags |= CIFS_TRANSFORM_REQ;
-
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
+ return 0;
+}
+
+int
+SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ u64 volatile_fid)
+{
+ struct cifs_ses *ses = tcon->ses;
+ struct smb_rqst rqst;
+ struct kvec iov[1];
+ struct kvec rsp_iov = {NULL, 0};
+ int resp_buftype = CIFS_NO_BUFFER;
+ int flags = 0;
+ int rc = 0;
+
+ cifs_dbg(FYI, "flush\n");
+ if (!ses || !(ses->server))
+ return -EIO;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
memset(&rqst, 0, sizeof(struct smb_rqst));
+ memset(&iov, 0, sizeof(iov));
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
+ rc = SMB2_flush_init(xid, &rqst, tcon, persistent_fid, volatile_fid);
+ if (rc)
+ goto flush_exit;
+
+ trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
- cifs_small_buf_release(req);
if (rc != 0) {
cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid,
rc);
- }
+ } else
+ trace_smb3_flush_done(xid, persistent_fid, tcon->tid,
+ ses->Suid);
+ flush_exit:
+ SMB2_flush_free(&rqst);
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
return rc;
}
@@ -3441,8 +3648,8 @@ smb2_readv_callback(struct mid_q_entry *mid)
struct smb2_sync_hdr *shdr =
(struct smb2_sync_hdr *)rdata->iov[0].iov_base;
struct cifs_credits credits = { .value = 0, .instance = 0 };
- struct smb_rqst rqst = { .rq_iov = rdata->iov,
- .rq_nvec = 2,
+ struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
+ .rq_nvec = 1,
.rq_pages = rdata->pages,
.rq_offset = rdata->page_offset,
.rq_npages = rdata->nr_pages,
@@ -3463,7 +3670,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
rc = smb2_verify_signature(&rqst, server);
if (rc)
- cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
+ cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n",
rc);
}
/* FIXME: should this be counted toward the initiating task? */
@@ -3590,7 +3797,7 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, char **buf, int *buf_type)
{
struct smb_rqst rqst;
- int resp_buftype, rc = -EACCES;
+ int resp_buftype, rc;
struct smb2_read_plain_req *req = NULL;
struct smb2_read_rsp *rsp = NULL;
struct kvec iov[1];
@@ -4053,7 +4260,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
break;
default:
- cifs_dbg(VFS, "info level %u isn't supported\n",
+ cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
srch_inf->info_level);
rc = -EINVAL;
goto qdir_exit;
@@ -4144,7 +4351,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
else if (resp_buftype == CIFS_SMALL_BUFFER)
srch_inf->smallBuf = true;
else
- cifs_dbg(VFS, "illegal search buffer type\n");
+ cifs_tcon_dbg(VFS, "illegal search buffer type\n");
trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
tcon->ses->Suid, index, srch_inf->entries_in_buffer);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 747de9317659..f264e1d36fe1 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -143,7 +143,9 @@ struct smb2_transform_hdr {
#define SMB2_FLAGS_ASYNC_COMMAND cpu_to_le32(0x00000002)
#define SMB2_FLAGS_RELATED_OPERATIONS cpu_to_le32(0x00000004)
#define SMB2_FLAGS_SIGNED cpu_to_le32(0x00000008)
+#define SMB2_FLAGS_PRIORITY_MASK cpu_to_le32(0x00000070) /* SMB3.1.1 */
#define SMB2_FLAGS_DFS_OPERATIONS cpu_to_le32(0x10000000)
+#define SMB2_FLAGS_REPLAY_OPERATION cpu_to_le32(0x20000000) /* SMB3 & up */
/*
* Definitions for SMB2 Protocol Data Units (network frames)
@@ -836,6 +838,7 @@ struct create_durable_handle_reconnect_v2 {
struct create_context ccontext;
__u8 Name[8];
struct durable_reconnect_context_v2 dcontext;
+ __u8 Pad[4];
} __packed;
/* See MS-SMB2 2.2.13.2.5 */
@@ -1383,7 +1386,7 @@ struct smb2_oplock_break {
struct smb2_lease_break {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 44 */
- __le16 Reserved;
+ __le16 Epoch;
__le32 Flags;
__u8 LeaseKey[16];
__le32 CurrentLeaseState;
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 07ca72486cfa..d21a5fcc8d06 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -46,7 +46,8 @@ extern int smb2_verify_signature(struct smb_rqst *, struct TCP_Server_Info *);
extern int smb2_check_receive(struct mid_q_entry *mid,
struct TCP_Server_Info *server, bool log_error);
extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
- struct smb_rqst *rqst);
+ struct TCP_Server_Info *,
+ struct smb_rqst *rqst);
extern struct mid_q_entry *smb2_setup_async_request(
struct TCP_Server_Info *server, struct smb_rqst *rqst);
extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
@@ -84,7 +85,8 @@ extern int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
umode_t mode, struct cifs_tcon *tcon,
const char *full_path,
struct cifs_sb_info *cifs_sb);
-extern int smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon,
+extern int smb2_mkdir(const unsigned int xid, struct inode *inode,
+ umode_t mode, struct cifs_tcon *tcon,
const char *name, struct cifs_sb_info *cifs_sb);
extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path,
struct cifs_sb_info *cifs_sb,
@@ -149,6 +151,10 @@ extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
bool is_fsctl, char *in_data, u32 indatalen,
__u32 max_response_size);
extern void SMB2_ioctl_free(struct smb_rqst *rqst);
+extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, bool watch_tree,
+ u32 completion_filter);
+
extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
extern int SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
@@ -158,6 +164,10 @@ extern int SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
extern void SMB2_close_free(struct smb_rqst *rqst);
extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
+extern int SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
+ struct cifs_tcon *tcon,
+ u64 persistent_file_id, u64 volatile_file_id);
+extern void SMB2_flush_free(struct smb_rqst *rqst);
extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
struct smb2_file_all_info *data);
@@ -203,6 +213,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
const u64 persistent_fid, const u64 volatile_fid,
const __u8 oplock_level);
+extern int smb2_handle_cancelled_close(struct cifs_tcon *tcon,
+ __u64 persistent_fid,
+ __u64 volatile_fid);
extern int smb2_handle_cancelled_mid(char *buffer,
struct TCP_Server_Info *server);
void smb2_cancelled_close_fid(struct work_struct *work);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 1ccbcf9c2c3b..387c88704c52 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -98,6 +98,61 @@ err:
return rc;
}
+
+static
+int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
+{
+ struct cifs_chan *chan;
+ struct cifs_ses *ses = NULL;
+ int i;
+ int rc = 0;
+
+ spin_lock(&cifs_tcp_ses_lock);
+
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ if (ses->Suid == ses_id)
+ goto found;
+ }
+ }
+ cifs_server_dbg(VFS, "%s: Could not find session 0x%llx\n",
+ __func__, ses_id);
+ rc = -ENOENT;
+ goto out;
+
+found:
+ if (ses->binding) {
+ /*
+ * If we are in the process of binding a new channel
+ * to an existing session, use the master connection
+ * session key
+ */
+ memcpy(key, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE);
+ goto out;
+ }
+
+ /*
+ * Otherwise, use the channel key.
+ */
+
+ for (i = 0; i < ses->chan_count; i++) {
+ chan = ses->chans + i;
+ if (chan->server == server) {
+ memcpy(key, chan->signkey, SMB3_SIGN_KEY_SIZE);
+ goto out;
+ }
+ }
+
+ cifs_dbg(VFS,
+ "%s: Could not find channel signing key for session 0x%llx\n",
+ __func__, ses_id);
+ rc = -ENOENT;
+
+out:
+ spin_unlock(&cifs_tcp_ses_lock);
+ return rc;
+}
+
static struct cifs_ses *
smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
{
@@ -176,7 +231,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
ses = smb2_find_smb_ses(server, shdr->SessionId);
if (!ses) {
- cifs_dbg(VFS, "%s: Could not find session\n", __func__);
+ cifs_server_dbg(VFS, "%s: Could not find session\n", __func__);
return 0;
}
@@ -185,21 +240,21 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
rc = smb2_crypto_shash_allocate(server);
if (rc) {
- cifs_dbg(VFS, "%s: sha256 alloc failed\n", __func__);
+ cifs_server_dbg(VFS, "%s: sha256 alloc failed\n", __func__);
return rc;
}
rc = crypto_shash_setkey(server->secmech.hmacsha256,
ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
+ cifs_server_dbg(VFS, "%s: Could not update with response\n", __func__);
return rc;
}
shash = &server->secmech.sdeschmacsha256->shash;
rc = crypto_shash_init(shash);
if (rc) {
- cifs_dbg(VFS, "%s: Could not init sha256", __func__);
+ cifs_server_dbg(VFS, "%s: Could not init sha256", __func__);
return rc;
}
@@ -215,7 +270,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
rc = crypto_shash_update(shash, iov[0].iov_base,
iov[0].iov_len);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with payload\n",
+ cifs_server_dbg(VFS, "%s: Could not update with payload\n",
__func__);
return rc;
}
@@ -239,68 +294,69 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
int rc = 0;
unsigned char prfhash[SMB2_HMACSHA256_SIZE];
unsigned char *hashptr = prfhash;
+ struct TCP_Server_Info *server = ses->server;
memset(prfhash, 0x0, SMB2_HMACSHA256_SIZE);
memset(key, 0x0, key_size);
- rc = smb3_crypto_shash_allocate(ses->server);
+ rc = smb3_crypto_shash_allocate(server);
if (rc) {
- cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
+ cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
goto smb3signkey_ret;
}
- rc = crypto_shash_setkey(ses->server->secmech.hmacsha256,
+ rc = crypto_shash_setkey(server->secmech.hmacsha256,
ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Could not set with session key\n", __func__);
+ cifs_server_dbg(VFS, "%s: Could not set with session key\n", __func__);
goto smb3signkey_ret;
}
- rc = crypto_shash_init(&ses->server->secmech.sdeschmacsha256->shash);
+ rc = crypto_shash_init(&server->secmech.sdeschmacsha256->shash);
if (rc) {
- cifs_dbg(VFS, "%s: Could not init sign hmac\n", __func__);
+ cifs_server_dbg(VFS, "%s: Could not init sign hmac\n", __func__);
goto smb3signkey_ret;
}
- rc = crypto_shash_update(&ses->server->secmech.sdeschmacsha256->shash,
+ rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
i, 4);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with n\n", __func__);
+ cifs_server_dbg(VFS, "%s: Could not update with n\n", __func__);
goto smb3signkey_ret;
}
- rc = crypto_shash_update(&ses->server->secmech.sdeschmacsha256->shash,
+ rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
label.iov_base, label.iov_len);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with label\n", __func__);
+ cifs_server_dbg(VFS, "%s: Could not update with label\n", __func__);
goto smb3signkey_ret;
}
- rc = crypto_shash_update(&ses->server->secmech.sdeschmacsha256->shash,
+ rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
&zero, 1);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with zero\n", __func__);
+ cifs_server_dbg(VFS, "%s: Could not update with zero\n", __func__);
goto smb3signkey_ret;
}
- rc = crypto_shash_update(&ses->server->secmech.sdeschmacsha256->shash,
+ rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
context.iov_base, context.iov_len);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with context\n", __func__);
+ cifs_server_dbg(VFS, "%s: Could not update with context\n", __func__);
goto smb3signkey_ret;
}
- rc = crypto_shash_update(&ses->server->secmech.sdeschmacsha256->shash,
+ rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
L, 4);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with L\n", __func__);
+ cifs_server_dbg(VFS, "%s: Could not update with L\n", __func__);
goto smb3signkey_ret;
}
- rc = crypto_shash_final(&ses->server->secmech.sdeschmacsha256->shash,
+ rc = crypto_shash_final(&server->secmech.sdeschmacsha256->shash,
hashptr);
if (rc) {
- cifs_dbg(VFS, "%s: Could not generate sha256 hash\n", __func__);
+ cifs_server_dbg(VFS, "%s: Could not generate sha256 hash\n", __func__);
goto smb3signkey_ret;
}
@@ -327,21 +383,45 @@ generate_smb3signingkey(struct cifs_ses *ses,
{
int rc;
- rc = generate_key(ses, ptriplet->signing.label,
- ptriplet->signing.context, ses->smb3signingkey,
- SMB3_SIGN_KEY_SIZE);
- if (rc)
- return rc;
+ /*
+ * All channels use the same encryption/decryption keys but
+ * they have their own signing key.
+ *
+ * When we generate the keys, check if it is for a new channel
+ * (binding) in which case we only need to generate a signing
+ * key and store it in the channel as to not overwrite the
+ * master connection signing key stored in the session
+ */
- rc = generate_key(ses, ptriplet->encryption.label,
- ptriplet->encryption.context, ses->smb3encryptionkey,
- SMB3_SIGN_KEY_SIZE);
- if (rc)
- return rc;
+ if (ses->binding) {
+ rc = generate_key(ses, ptriplet->signing.label,
+ ptriplet->signing.context,
+ cifs_ses_binding_channel(ses)->signkey,
+ SMB3_SIGN_KEY_SIZE);
+ if (rc)
+ return rc;
+ } else {
+ rc = generate_key(ses, ptriplet->signing.label,
+ ptriplet->signing.context,
+ ses->smb3signingkey,
+ SMB3_SIGN_KEY_SIZE);
+ if (rc)
+ return rc;
- rc = generate_key(ses, ptriplet->decryption.label,
- ptriplet->decryption.context,
- ses->smb3decryptionkey, SMB3_SIGN_KEY_SIZE);
+ memcpy(ses->chans[0].signkey, ses->smb3signingkey,
+ SMB3_SIGN_KEY_SIZE);
+
+ rc = generate_key(ses, ptriplet->encryption.label,
+ ptriplet->encryption.context,
+ ses->smb3encryptionkey,
+ SMB3_SIGN_KEY_SIZE);
+ rc = generate_key(ses, ptriplet->decryption.label,
+ ptriplet->decryption.context,
+ ses->smb3decryptionkey,
+ SMB3_SIGN_KEY_SIZE);
+ if (rc)
+ return rc;
+ }
if (rc)
return rc;
@@ -430,23 +510,21 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
unsigned char *sigptr = smb3_signature;
struct kvec *iov = rqst->rq_iov;
struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
- struct cifs_ses *ses;
struct shash_desc *shash = &server->secmech.sdesccmacaes->shash;
struct smb_rqst drqst;
+ u8 key[SMB3_SIGN_KEY_SIZE];
- ses = smb2_find_smb_ses(server, shdr->SessionId);
- if (!ses) {
- cifs_dbg(VFS, "%s: Could not find session\n", __func__);
+ rc = smb2_get_sign_key(shdr->SessionId, server, key);
+ if (rc)
return 0;
- }
memset(smb3_signature, 0x0, SMB2_CMACAES_SIZE);
memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE);
rc = crypto_shash_setkey(server->secmech.cmacaes,
- ses->smb3signingkey, SMB2_CMACAES_SIZE);
+ key, SMB2_CMACAES_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__);
+ cifs_server_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__);
return rc;
}
@@ -457,7 +535,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
*/
rc = crypto_shash_init(shash);
if (rc) {
- cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__);
+ cifs_server_dbg(VFS, "%s: Could not init cmac aes\n", __func__);
return rc;
}
@@ -473,7 +551,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
rc = crypto_shash_update(shash, iov[0].iov_base,
iov[0].iov_len);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with payload\n",
+ cifs_server_dbg(VFS, "%s: Could not update with payload\n",
__func__);
return rc;
}
@@ -493,16 +571,25 @@ static int
smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
{
int rc = 0;
- struct smb2_sync_hdr *shdr =
- (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
+ struct smb2_sync_hdr *shdr;
+ struct smb2_sess_setup_req *ssr;
+ bool is_binding;
+ bool is_signed;
- if (!(shdr->Flags & SMB2_FLAGS_SIGNED) ||
- server->tcpStatus == CifsNeedNegotiate)
- return rc;
+ shdr = (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
+ ssr = (struct smb2_sess_setup_req *)shdr;
+
+ is_binding = shdr->Command == SMB2_SESSION_SETUP &&
+ (ssr->Flags & SMB2_SESSION_REQ_FLAG_BINDING);
+ is_signed = shdr->Flags & SMB2_FLAGS_SIGNED;
- if (!server->session_estab) {
+ if (!is_signed)
+ return 0;
+ if (server->tcpStatus == CifsNeedNegotiate)
+ return 0;
+ if (!is_binding && !server->session_estab) {
strncpy(shdr->Signature, "BSRSPYL", 8);
- return rc;
+ return 0;
}
rc = server->ops->calc_signature(rqst, server);
@@ -521,6 +608,7 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
if ((shdr->Command == SMB2_NEGOTIATE) ||
(shdr->Command == SMB2_SESSION_SETUP) ||
(shdr->Command == SMB2_OPLOCK_BREAK) ||
+ server->ignore_signature ||
(!server->session_estab))
return 0;
@@ -608,18 +696,18 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
}
static int
-smb2_get_mid_entry(struct cifs_ses *ses, struct smb2_sync_hdr *shdr,
- struct mid_q_entry **mid)
+smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
+ struct smb2_sync_hdr *shdr, struct mid_q_entry **mid)
{
- if (ses->server->tcpStatus == CifsExiting)
+ if (server->tcpStatus == CifsExiting)
return -ENOENT;
- if (ses->server->tcpStatus == CifsNeedReconnect) {
+ if (server->tcpStatus == CifsNeedReconnect) {
cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
return -EAGAIN;
}
- if (ses->server->tcpStatus == CifsNeedNegotiate &&
+ if (server->tcpStatus == CifsNeedNegotiate &&
shdr->Command != SMB2_NEGOTIATE)
return -EAGAIN;
@@ -636,11 +724,11 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct smb2_sync_hdr *shdr,
/* else ok - we are shutting down the session */
}
- *mid = smb2_mid_entry_alloc(shdr, ses->server);
+ *mid = smb2_mid_entry_alloc(shdr, server);
if (*mid == NULL)
return -ENOMEM;
spin_lock(&GlobalMid_Lock);
- list_add_tail(&(*mid)->qhead, &ses->server->pending_mid_q);
+ list_add_tail(&(*mid)->qhead, &server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
return 0;
@@ -665,7 +753,7 @@ smb2_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
rc = smb2_verify_signature(&rqst, server);
if (rc)
- cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
+ cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
rc);
}
@@ -673,24 +761,25 @@ smb2_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
}
struct mid_q_entry *
-smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
+smb2_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *server,
+ struct smb_rqst *rqst)
{
int rc;
struct smb2_sync_hdr *shdr =
(struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
- smb2_seq_num_into_buf(ses->server, shdr);
+ smb2_seq_num_into_buf(server, shdr);
- rc = smb2_get_mid_entry(ses, shdr, &mid);
+ rc = smb2_get_mid_entry(ses, server, shdr, &mid);
if (rc) {
- revert_current_mid_from_hdr(ses->server, shdr);
+ revert_current_mid_from_hdr(server, shdr);
return ERR_PTR(rc);
}
- rc = smb2_sign_rqst(rqst, ses->server);
+ rc = smb2_sign_rqst(rqst, server);
if (rc) {
- revert_current_mid_from_hdr(ses->server, shdr);
+ revert_current_mid_from_hdr(server, shdr);
cifs_delete_mid(mid);
return ERR_PTR(rc);
}
@@ -739,7 +828,7 @@ smb3_crypto_aead_allocate(struct TCP_Server_Info *server)
else
tfm = crypto_alloc_aead("ccm(aes)", 0, 0);
if (IS_ERR(tfm)) {
- cifs_dbg(VFS, "%s: Failed to alloc encrypt aead\n",
+ cifs_server_dbg(VFS, "%s: Failed to alloc encrypt aead\n",
__func__);
return PTR_ERR(tfm);
}
@@ -754,7 +843,7 @@ smb3_crypto_aead_allocate(struct TCP_Server_Info *server)
if (IS_ERR(tfm)) {
crypto_free_aead(server->secmech.ccmaesencrypt);
server->secmech.ccmaesencrypt = NULL;
- cifs_dbg(VFS, "%s: Failed to alloc decrypt aead\n",
+ cifs_server_dbg(VFS, "%s: Failed to alloc decrypt aead\n",
__func__);
return PTR_ERR(tfm);
}
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index cd07e5301d42..5b1b97e9e0c9 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -1069,7 +1069,7 @@ static int smbd_post_send_data(
if (n_vec > SMBDIRECT_MAX_SGE) {
cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
- return -ENOMEM;
+ return -EINVAL;
}
sg_init_table(sgl, n_vec);
@@ -1476,6 +1476,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
info->transport_status = SMBD_DESTROYED;
destroy_workqueue(info->workqueue);
+ log_rdma_event(INFO, "rdma session destroyed\n");
kfree(info);
}
@@ -1505,8 +1506,9 @@ create_conn:
log_rdma_event(INFO, "creating rdma session\n");
server->smbd_conn = smbd_get_connection(
server, (struct sockaddr *) &server->dstaddr);
- log_rdma_event(INFO, "created rdma session info=%p\n",
- server->smbd_conn);
+
+ if (server->smbd_conn)
+ cifs_dbg(VFS, "RDMA transport re-established\n");
return server->smbd_conn ? 0 : -ENOENT;
}
@@ -1654,15 +1656,17 @@ static struct smbd_connection *_smbd_get_connection(
info->send_cq = NULL;
info->recv_cq = NULL;
- info->send_cq = ib_alloc_cq(info->id->device, info,
- info->send_credit_target, 0, IB_POLL_SOFTIRQ);
+ info->send_cq =
+ ib_alloc_cq_any(info->id->device, info,
+ info->send_credit_target, IB_POLL_SOFTIRQ);
if (IS_ERR(info->send_cq)) {
info->send_cq = NULL;
goto alloc_cq_failed;
}
- info->recv_cq = ib_alloc_cq(info->id->device, info,
- info->receive_credit_max, 0, IB_POLL_SOFTIRQ);
+ info->recv_cq =
+ ib_alloc_cq_any(info->id->device, info,
+ info->receive_credit_max, IB_POLL_SOFTIRQ);
if (IS_ERR(info->recv_cq)) {
info->recv_cq = NULL;
goto alloc_cq_failed;
@@ -1968,7 +1972,7 @@ read_rfc1002_done:
if (info->transport_status != SMBD_CONNECTED) {
log_read(ERR, "disconnected\n");
- return 0;
+ return -ECONNABORTED;
}
goto again;
@@ -2267,12 +2271,7 @@ static void smbd_mr_recovery_work(struct work_struct *work)
int rc;
list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
- if (smbdirect_mr->state == MR_INVALIDATED)
- ib_dma_unmap_sg(
- info->id->device, smbdirect_mr->sgl,
- smbdirect_mr->sgl_count,
- smbdirect_mr->dir);
- else if (smbdirect_mr->state == MR_ERROR) {
+ if (smbdirect_mr->state == MR_ERROR) {
/* recover this MR entry */
rc = ib_dereg_mr(smbdirect_mr->mr);
@@ -2600,11 +2599,20 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
*/
smbdirect_mr->state = MR_INVALIDATED;
- /*
- * Schedule the work to do MR recovery for future I/Os
- * MR recovery is slow and we don't want it to block the current I/O
- */
- queue_work(info->workqueue, &info->mr_recovery_work);
+ if (smbdirect_mr->state == MR_INVALIDATED) {
+ ib_dma_unmap_sg(
+ info->id->device, smbdirect_mr->sgl,
+ smbdirect_mr->sgl_count,
+ smbdirect_mr->dir);
+ smbdirect_mr->state = MR_READY;
+ if (atomic_inc_return(&info->mr_ready_count) == 1)
+ wake_up_interruptible(&info->wait_mr);
+ } else
+ /*
+ * Schedule the work to do MR recovery for future I/Os MR
+ * recovery is slow and don't want it to block current I/O
+ */
+ queue_work(info->workqueue, &info->mr_recovery_work);
done:
if (atomic_dec_and_test(&info->mr_used_count))
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 2b6d87bfdf8e..39a938443e3e 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -11,13 +11,14 @@
*/
-#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/fips.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/random.h>
+#include <crypto/des.h>
#include "cifs_fs_sb.h"
#include "cifs_unicode.h"
#include "cifspdu.h"
@@ -58,19 +59,18 @@ static int
smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
{
unsigned char key2[8];
- struct crypto_cipher *tfm_des;
+ struct des_ctx ctx;
str_to_key(key, key2);
- tfm_des = crypto_alloc_cipher("des", 0, 0);
- if (IS_ERR(tfm_des)) {
- cifs_dbg(VFS, "could not allocate des crypto API\n");
- return PTR_ERR(tfm_des);
+ if (fips_enabled) {
+ cifs_dbg(VFS, "FIPS compliance enabled: DES not permitted\n");
+ return -ENOENT;
}
- crypto_cipher_setkey(tfm_des, key2, 8);
- crypto_cipher_encrypt_one(tfm_des, out, in);
- crypto_free_cipher(tfm_des);
+ des_expand_key(&ctx, key2, DES_KEY_SIZE);
+ des_encrypt(&ctx, out, in);
+ memzero_explicit(&ctx, sizeof(ctx));
return 0;
}
diff --git a/fs/cifs/smbfsctl.h b/fs/cifs/smbfsctl.h
index 08628e6a42ac..1ff28529cf4b 100644
--- a/fs/cifs/smbfsctl.h
+++ b/fs/cifs/smbfsctl.h
@@ -144,6 +144,17 @@
#define IO_REPARSE_APPXSTREAM 0xC0000014
/* NFS symlinks, Win 8/SMB3 and later */
#define IO_REPARSE_TAG_NFS 0x80000014
+/*
+ * AzureFileSync - see
+ * https://docs.microsoft.com/en-us/azure/storage/files/storage-sync-cloud-tiering
+ */
+#define IO_REPARSE_TAG_AZ_FILE_SYNC 0x8000001e
+/* WSL reparse tags */
+#define IO_REPARSE_TAG_LX_SYMLINK 0xA000001D
+#define IO_REPARSE_TAG_AF_UNIX 0x80000023
+#define IO_REPARSE_TAG_LX_FIFO 0x80000024
+#define IO_REPARSE_TAG_LX_CHR 0x80000025
+#define IO_REPARSE_TAG_LX_BLK 0x80000026
/* fsctl flags */
/* If Flags is set to this value, the request is an FSCTL not ioctl request */
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index 99c4d799c24b..e7e350b13d6a 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -117,6 +117,41 @@ DEFINE_SMB3_RW_DONE_EVENT(falloc_done);
/*
* For handle based calls other than read and write, and get/set info
*/
+DECLARE_EVENT_CLASS(smb3_fd_class,
+ TP_PROTO(unsigned int xid,
+ __u64 fid,
+ __u32 tid,
+ __u64 sesid),
+ TP_ARGS(xid, fid, tid, sesid),
+ TP_STRUCT__entry(
+ __field(unsigned int, xid)
+ __field(__u64, fid)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ ),
+ TP_fast_assign(
+ __entry->xid = xid;
+ __entry->fid = fid;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ ),
+ TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx",
+ __entry->xid, __entry->sesid, __entry->tid, __entry->fid)
+)
+
+#define DEFINE_SMB3_FD_EVENT(name) \
+DEFINE_EVENT(smb3_fd_class, smb3_##name, \
+ TP_PROTO(unsigned int xid, \
+ __u64 fid, \
+ __u32 tid, \
+ __u64 sesid), \
+ TP_ARGS(xid, fid, tid, sesid))
+
+DEFINE_SMB3_FD_EVENT(flush_enter);
+DEFINE_SMB3_FD_EVENT(flush_done);
+DEFINE_SMB3_FD_EVENT(close_enter);
+DEFINE_SMB3_FD_EVENT(close_done);
+
DECLARE_EVENT_CLASS(smb3_fd_err_class,
TP_PROTO(unsigned int xid,
__u64 fid,
@@ -200,6 +235,8 @@ DEFINE_EVENT(smb3_inf_enter_class, smb3_##name, \
DEFINE_SMB3_INF_ENTER_EVENT(query_info_enter);
DEFINE_SMB3_INF_ENTER_EVENT(query_info_done);
+DEFINE_SMB3_INF_ENTER_EVENT(notify_enter);
+DEFINE_SMB3_INF_ENTER_EVENT(notify_done);
DECLARE_EVENT_CLASS(smb3_inf_err_class,
TP_PROTO(unsigned int xid,
@@ -246,6 +283,7 @@ DEFINE_EVENT(smb3_inf_err_class, smb3_##name, \
DEFINE_SMB3_INF_ERR_EVENT(query_info_err);
DEFINE_SMB3_INF_ERR_EVENT(set_info_err);
+DEFINE_SMB3_INF_ERR_EVENT(notify_err);
DEFINE_SMB3_INF_ERR_EVENT(fsctl_err);
DECLARE_EVENT_CLASS(smb3_inf_compound_enter_class,
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 5d6d44bfe10a..3d2e11f85cba 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -86,29 +86,21 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
static void _cifs_mid_q_entry_release(struct kref *refcount)
{
- struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
- refcount);
-
- mempool_free(mid, cifs_mid_poolp);
-}
-
-void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
-{
- spin_lock(&GlobalMid_Lock);
- kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
- spin_unlock(&GlobalMid_Lock);
-}
-
-void
-DeleteMidQEntry(struct mid_q_entry *midEntry)
-{
+ struct mid_q_entry *midEntry =
+ container_of(refcount, struct mid_q_entry, refcount);
#ifdef CONFIG_CIFS_STATS2
__le16 command = midEntry->server->vals->lock_cmd;
__u16 smb_cmd = le16_to_cpu(midEntry->command);
unsigned long now;
unsigned long roundtrip_time;
- struct TCP_Server_Info *server = midEntry->server;
#endif
+ struct TCP_Server_Info *server = midEntry->server;
+
+ if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
+ midEntry->mid_state == MID_RESPONSE_RECEIVED &&
+ server->ops->handle_cancelled_mid)
+ server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
+
midEntry->mid_state = MID_FREE;
atomic_dec(&midCount);
if (midEntry->large_buf)
@@ -118,7 +110,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
#ifdef CONFIG_CIFS_STATS2
now = jiffies;
if (now < midEntry->when_alloc)
- cifs_dbg(VFS, "invalid mid allocation time\n");
+ cifs_server_dbg(VFS, "invalid mid allocation time\n");
roundtrip_time = now - midEntry->when_alloc;
if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
@@ -166,6 +158,19 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
}
}
#endif
+
+ mempool_free(midEntry, cifs_mid_poolp);
+}
+
+void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
+{
+ spin_lock(&GlobalMid_Lock);
+ kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
+ spin_unlock(&GlobalMid_Lock);
+}
+
+void DeleteMidQEntry(struct mid_q_entry *midEntry)
+{
cifs_mid_q_entry_release(midEntry);
}
@@ -173,8 +178,10 @@ void
cifs_delete_mid(struct mid_q_entry *mid)
{
spin_lock(&GlobalMid_Lock);
- list_del_init(&mid->qhead);
- mid->mid_flags |= MID_DELETED;
+ if (!(mid->mid_flags & MID_DELETED)) {
+ list_del_init(&mid->qhead);
+ mid->mid_flags |= MID_DELETED;
+ }
spin_unlock(&GlobalMid_Lock);
DeleteMidQEntry(mid);
@@ -232,7 +239,7 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
retries++;
if (retries >= 14 ||
(!server->noblocksnd && (retries > 2))) {
- cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
+ cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
ssocket);
return -EAGAIN;
}
@@ -246,7 +253,7 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
if (rc == 0) {
/* should never happen, letting socket clear before
retrying is our only obvious option here */
- cifs_dbg(VFS, "tcp sent no data\n");
+ cifs_server_dbg(VFS, "tcp sent no data\n");
msleep(500);
continue;
}
@@ -318,8 +325,11 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
int val = 1;
__be32 rfc1002_marker;
- if (cifs_rdma_enabled(server) && server->smbd_conn) {
- rc = smbd_send(server, num_rqst, rqst);
+ if (cifs_rdma_enabled(server)) {
+ /* return -EAGAIN when connecting or reconnecting */
+ rc = -EAGAIN;
+ if (server->smbd_conn)
+ rc = smbd_send(server, num_rqst, rqst);
goto smbd_done;
}
@@ -440,7 +450,7 @@ unmask:
}
smbd_done:
if (rc < 0 && rc != -EINTR)
- cifs_dbg(VFS, "Error %d sending data on socket to server\n",
+ cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
rc);
else if (rc > 0)
rc = 0;
@@ -473,8 +483,8 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
cur_rqst[0].rq_nvec = 1;
if (!server->ops->init_transform_rq) {
- cifs_dbg(VFS, "Encryption requested but transform callback "
- "is missing\n");
+ cifs_server_dbg(VFS, "Encryption requested but transform "
+ "callback is missing\n");
return -EIO;
}
@@ -532,6 +542,8 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
/* oplock breaks must not be held up */
server->in_flight++;
+ if (server->in_flight > server->max_in_flight)
+ server->max_in_flight = server->in_flight;
*credits -= 1;
*instance = server->reconnect_instance;
spin_unlock(&server->req_lock);
@@ -548,7 +560,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
if (!rc) {
trace_smb3_credit_timeout(server->CurrentMid,
server->hostname, num_credits);
- cifs_dbg(VFS, "wait timed out after %d ms\n",
+ cifs_server_dbg(VFS, "wait timed out after %d ms\n",
timeout);
return -ENOTSUPP;
}
@@ -589,7 +601,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
trace_smb3_credit_timeout(
server->CurrentMid,
server->hostname, num_credits);
- cifs_dbg(VFS, "wait timed out after %d ms\n",
+ cifs_server_dbg(VFS, "wait timed out after %d ms\n",
timeout);
return -ENOTSUPP;
}
@@ -608,6 +620,8 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
*credits -= num_credits;
server->in_flight += num_credits;
+ if (server->in_flight > server->max_in_flight)
+ server->max_in_flight = server->in_flight;
*instance = server->reconnect_instance;
}
spin_unlock(&server->req_lock);
@@ -868,8 +882,11 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
rc = -EHOSTDOWN;
break;
default:
- list_del_init(&mid->qhead);
- cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
+ if (!(mid->mid_flags & MID_DELETED)) {
+ list_del_init(&mid->qhead);
+ mid->mid_flags |= MID_DELETED;
+ }
+ cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
__func__, mid->mid, mid->mid_state);
rc = -EIO;
}
@@ -910,7 +927,7 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
rc = cifs_verify_signature(&rqst, server,
mid->sequence_number);
if (rc)
- cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
+ cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
rc);
}
@@ -919,7 +936,8 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
}
struct mid_q_entry *
-cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
+cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
+ struct smb_rqst *rqst)
{
int rc;
struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
@@ -991,7 +1009,18 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
return -EIO;
}
- server = ses->server;
+ if (!ses->binding) {
+ uint index = 0;
+
+ if (ses->chan_count > 1) {
+ index = (uint)atomic_inc_return(&ses->chan_seq);
+ index %= ses->chan_count;
+ }
+ server = ses->chans[index].server;
+ } else {
+ server = cifs_ses_server(ses);
+ }
+
if (server->tcpStatus == CifsExiting)
return -ENOENT;
@@ -1036,7 +1065,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
}
for (i = 0; i < num_rqst; i++) {
- midQ[i] = server->ops->setup_request(ses, &rqst[i]);
+ midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
if (IS_ERR(midQ[i])) {
revert_current_mid(server, i);
for (j = 0; j < i; j++)
@@ -1107,12 +1136,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
}
if (rc != 0) {
for (; i < num_rqst; i++) {
- cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
+ cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
midQ[i]->mid, le16_to_cpu(midQ[i]->command));
send_cancel(server, &rqst[i], midQ[i]);
spin_lock(&GlobalMid_Lock);
+ midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
- midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
midQ[i]->callback = cifs_cancelled_callback;
cancelled_mid[i] = true;
credits[i].value = 0;
@@ -1242,17 +1271,19 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
struct kvec iov = { .iov_base = in_buf, .iov_len = len };
struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
struct cifs_credits credits = { .value = 1, .instance = 0 };
+ struct TCP_Server_Info *server;
if (ses == NULL) {
cifs_dbg(VFS, "Null smb session\n");
return -EIO;
}
- if (ses->server == NULL) {
+ server = ses->server;
+ if (server == NULL) {
cifs_dbg(VFS, "Null tcp session\n");
return -EIO;
}
- if (ses->server->tcpStatus == CifsExiting)
+ if (server->tcpStatus == CifsExiting)
return -ENOENT;
/* Ensure that we do not send more than 50 overlapping requests
@@ -1260,12 +1291,12 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
use ses->maxReq */
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
+ cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
len);
return -EIO;
}
- rc = wait_for_free_request(ses->server, flags, &credits.instance);
+ rc = wait_for_free_request(server, flags, &credits.instance);
if (rc)
return rc;
@@ -1273,70 +1304,70 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
- mutex_lock(&ses->server->srv_mutex);
+ mutex_lock(&server->srv_mutex);
rc = allocate_mid(ses, in_buf, &midQ);
if (rc) {
- mutex_unlock(&ses->server->srv_mutex);
+ mutex_unlock(&server->srv_mutex);
/* Update # of requests on wire to server */
- add_credits(ses->server, &credits, 0);
+ add_credits(server, &credits, 0);
return rc;
}
- rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
+ rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
if (rc) {
- mutex_unlock(&ses->server->srv_mutex);
+ mutex_unlock(&server->srv_mutex);
goto out;
}
midQ->mid_state = MID_REQUEST_SUBMITTED;
- cifs_in_send_inc(ses->server);
- rc = smb_send(ses->server, in_buf, len);
- cifs_in_send_dec(ses->server);
+ cifs_in_send_inc(server);
+ rc = smb_send(server, in_buf, len);
+ cifs_in_send_dec(server);
cifs_save_when_sent(midQ);
if (rc < 0)
- ses->server->sequence_number -= 2;
+ server->sequence_number -= 2;
- mutex_unlock(&ses->server->srv_mutex);
+ mutex_unlock(&server->srv_mutex);
if (rc < 0)
goto out;
- rc = wait_for_response(ses->server, midQ);
+ rc = wait_for_response(server, midQ);
if (rc != 0) {
- send_cancel(ses->server, &rqst, midQ);
+ send_cancel(server, &rqst, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
- add_credits(ses->server, &credits, 0);
+ add_credits(server, &credits, 0);
return rc;
}
spin_unlock(&GlobalMid_Lock);
}
- rc = cifs_sync_mid_result(midQ, ses->server);
+ rc = cifs_sync_mid_result(midQ, server);
if (rc != 0) {
- add_credits(ses->server, &credits, 0);
+ add_credits(server, &credits, 0);
return rc;
}
if (!midQ->resp_buf || !out_buf ||
midQ->mid_state != MID_RESPONSE_RECEIVED) {
rc = -EIO;
- cifs_dbg(VFS, "Bad MID state?\n");
+ cifs_server_dbg(VFS, "Bad MID state?\n");
goto out;
}
*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
- rc = cifs_check_receive(midQ, ses->server, 0);
+ rc = cifs_check_receive(midQ, server, 0);
out:
cifs_delete_mid(midQ);
- add_credits(ses->server, &credits, 0);
+ add_credits(server, &credits, 0);
return rc;
}
@@ -1379,19 +1410,21 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec iov = { .iov_base = in_buf, .iov_len = len };
struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
unsigned int instance;
+ struct TCP_Server_Info *server;
if (tcon == NULL || tcon->ses == NULL) {
cifs_dbg(VFS, "Null smb session\n");
return -EIO;
}
ses = tcon->ses;
+ server = ses->server;
- if (ses->server == NULL) {
+ if (server == NULL) {
cifs_dbg(VFS, "Null tcp session\n");
return -EIO;
}
- if (ses->server->tcpStatus == CifsExiting)
+ if (server->tcpStatus == CifsExiting)
return -ENOENT;
/* Ensure that we do not send more than 50 overlapping requests
@@ -1399,12 +1432,12 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
use ses->maxReq */
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
+ cifs_tcon_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
len);
return -EIO;
}
- rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, &instance);
+ rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
if (rc)
return rc;
@@ -1412,31 +1445,31 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
- mutex_lock(&ses->server->srv_mutex);
+ mutex_lock(&server->srv_mutex);
rc = allocate_mid(ses, in_buf, &midQ);
if (rc) {
- mutex_unlock(&ses->server->srv_mutex);
+ mutex_unlock(&server->srv_mutex);
return rc;
}
- rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
+ rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
if (rc) {
cifs_delete_mid(midQ);
- mutex_unlock(&ses->server->srv_mutex);
+ mutex_unlock(&server->srv_mutex);
return rc;
}
midQ->mid_state = MID_REQUEST_SUBMITTED;
- cifs_in_send_inc(ses->server);
- rc = smb_send(ses->server, in_buf, len);
- cifs_in_send_dec(ses->server);
+ cifs_in_send_inc(server);
+ rc = smb_send(server, in_buf, len);
+ cifs_in_send_dec(server);
cifs_save_when_sent(midQ);
if (rc < 0)
- ses->server->sequence_number -= 2;
+ server->sequence_number -= 2;
- mutex_unlock(&ses->server->srv_mutex);
+ mutex_unlock(&server->srv_mutex);
if (rc < 0) {
cifs_delete_mid(midQ);
@@ -1444,21 +1477,21 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
}
/* Wait for a reply - allow signals to interrupt. */
- rc = wait_event_interruptible(ses->server->response_q,
+ rc = wait_event_interruptible(server->response_q,
(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
- ((ses->server->tcpStatus != CifsGood) &&
- (ses->server->tcpStatus != CifsNew)));
+ ((server->tcpStatus != CifsGood) &&
+ (server->tcpStatus != CifsNew)));
/* Were we interrupted by a signal ? */
if ((rc == -ERESTARTSYS) &&
(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
- ((ses->server->tcpStatus == CifsGood) ||
- (ses->server->tcpStatus == CifsNew))) {
+ ((server->tcpStatus == CifsGood) ||
+ (server->tcpStatus == CifsNew))) {
if (in_buf->Command == SMB_COM_TRANSACTION2) {
/* POSIX lock. We send a NT_CANCEL SMB to cause the
blocking lock to return. */
- rc = send_cancel(ses->server, &rqst, midQ);
+ rc = send_cancel(server, &rqst, midQ);
if (rc) {
cifs_delete_mid(midQ);
return rc;
@@ -1477,9 +1510,9 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
}
}
- rc = wait_for_response(ses->server, midQ);
+ rc = wait_for_response(server, midQ);
if (rc) {
- send_cancel(ses->server, &rqst, midQ);
+ send_cancel(server, &rqst, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
@@ -1494,20 +1527,20 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
rstart = 1;
}
- rc = cifs_sync_mid_result(midQ, ses->server);
+ rc = cifs_sync_mid_result(midQ, server);
if (rc != 0)
return rc;
/* rcvd frame is ok */
if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
rc = -EIO;
- cifs_dbg(VFS, "Bad MID state?\n");
+ cifs_tcon_dbg(VFS, "Bad MID state?\n");
goto out;
}
*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
- rc = cifs_check_receive(midQ, ses->server, 0);
+ rc = cifs_check_receive(midQ, server, 0);
out:
cifs_delete_mid(midQ);
if (rstart && rc == -EACCES)
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 9076150758d8..db4ba8f6077e 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -31,7 +31,7 @@
#include "cifs_fs_sb.h"
#include "cifs_unicode.h"
-#define MAX_EA_VALUE_SIZE 65535
+#define MAX_EA_VALUE_SIZE CIFSMaxBufSize
#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
#define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */
#define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 321f56e487cb..b1c70e2b9b1e 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -188,6 +188,9 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
sb->s_magic = CODA_SUPER_MAGIC;
sb->s_op = &coda_super_operations;
sb->s_d_op = &coda_dentry_operations;
+ sb->s_time_gran = 1;
+ sb->s_time_min = S64_MIN;
+ sb->s_time_max = S64_MAX;
error = super_setup_bdi(sb);
if (error)
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 644d48c12ce8..3aec27e5eb82 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -63,11 +63,8 @@ static long coda_pioctl(struct file *filp, unsigned int cmd,
* Look up the pathname. Note that the pathname is in
* user memory, and namei takes care of this
*/
- if (data.follow)
- error = user_path(data.path, &path);
- else
- error = user_lpath(data.path, &path);
-
+ error = user_path_at(AT_FDCWD, data.path,
+ data.follow ? LOOKUP_FOLLOW : 0, &path);
if (error)
return error;
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
index b7f9ffa1d5f1..aaad4ca1217e 100644
--- a/fs/compat_binfmt_elf.c
+++ b/fs/compat_binfmt_elf.c
@@ -48,8 +48,8 @@
#define elf_prstatus compat_elf_prstatus
#define elf_prpsinfo compat_elf_prpsinfo
-#undef ns_to_timeval
-#define ns_to_timeval ns_to_old_timeval32
+#undef ns_to_kernel_old_timeval
+#define ns_to_kernel_old_timeval ns_to_old_timeval32
/*
* To use this file, asm/elf.h must define compat_elf_check_arch.
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 6e30949d9f77..358ea2ecf36b 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -11,8 +11,6 @@
* ioctls.
*/
-#include <linux/joystick.h>
-
#include <linux/types.h>
#include <linux/compat.h>
#include <linux/kernel.h>
@@ -27,13 +25,9 @@
#include <linux/file.h>
#include <linux/ppp-ioctl.h>
#include <linux/if_pppox.h>
-#include <linux/mtio.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>
-#include <linux/raw.h>
#include <linux/blkdev.h>
-#include <linux/rtc.h>
-#include <linux/pci.h>
#include <linux/serial.h>
#include <linux/ctype.h>
#include <linux/syscalls.h>
@@ -42,13 +36,6 @@
#include "internal.h"
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_sock.h>
-#include <net/bluetooth/rfcomm.h>
-
-#include <linux/capi.h>
-#include <linux/gigaset_dev.h>
-
#ifdef CONFIG_BLOCK
#include <linux/cdrom.h>
#include <linux/fd.h>
@@ -60,448 +47,11 @@
#include <linux/uaccess.h>
#include <linux/watchdog.h>
-#include <linux/soundcard.h>
-
#include <linux/hiddev.h>
#include <linux/sort.h>
-#ifdef CONFIG_SPARC
-#include <linux/fb.h>
-#include <asm/fbio.h>
-#endif
-
-#define convert_in_user(srcptr, dstptr) \
-({ \
- typeof(*srcptr) val; \
- \
- get_user(val, srcptr) || put_user(val, dstptr); \
-})
-
-static int do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int err;
-
- err = security_file_ioctl(file, cmd, arg);
- if (err)
- return err;
-
- return vfs_ioctl(file, cmd, arg);
-}
-
-#ifdef CONFIG_BLOCK
-typedef struct sg_io_hdr32 {
- compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
- compat_int_t dxfer_direction; /* [i] data transfer direction */
- unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
- unsigned char mx_sb_len; /* [i] max length to write to sbp */
- unsigned short iovec_count; /* [i] 0 implies no scatter gather */
- compat_uint_t dxfer_len; /* [i] byte count of data transfer */
- compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
- or scatter gather list */
- compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
- compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
- compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
- compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
- compat_int_t pack_id; /* [i->o] unused internally (normally) */
- compat_uptr_t usr_ptr; /* [i->o] unused internally */
- unsigned char status; /* [o] scsi status */
- unsigned char masked_status; /* [o] shifted, masked scsi status */
- unsigned char msg_status; /* [o] messaging level data (optional) */
- unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
- unsigned short host_status; /* [o] errors from host adapter */
- unsigned short driver_status; /* [o] errors from software driver */
- compat_int_t resid; /* [o] dxfer_len - actual_transferred */
- compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
- compat_uint_t info; /* [o] auxiliary information */
-} sg_io_hdr32_t; /* 64 bytes long (on sparc32) */
-
-typedef struct sg_iovec32 {
- compat_uint_t iov_base;
- compat_uint_t iov_len;
-} sg_iovec32_t;
-
-static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
-{
- sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
- sg_iovec32_t __user *iov32 = dxferp;
- int i;
-
- for (i = 0; i < iovec_count; i++) {
- u32 base, len;
-
- if (get_user(base, &iov32[i].iov_base) ||
- get_user(len, &iov32[i].iov_len) ||
- put_user(compat_ptr(base), &iov[i].iov_base) ||
- put_user(len, &iov[i].iov_len))
- return -EFAULT;
- }
-
- if (put_user(iov, &sgio->dxferp))
- return -EFAULT;
- return 0;
-}
-
-static int sg_ioctl_trans(struct file *file, unsigned int cmd,
- sg_io_hdr32_t __user *sgio32)
-{
- sg_io_hdr_t __user *sgio;
- u16 iovec_count;
- u32 data;
- void __user *dxferp;
- int err;
- int interface_id;
-
- if (get_user(interface_id, &sgio32->interface_id))
- return -EFAULT;
- if (interface_id != 'S')
- return do_ioctl(file, cmd, (unsigned long)sgio32);
-
- if (get_user(iovec_count, &sgio32->iovec_count))
- return -EFAULT;
-
- {
- void __user *top = compat_alloc_user_space(0);
- void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
- (iovec_count * sizeof(sg_iovec_t)));
- if (new > top)
- return -EINVAL;
-
- sgio = new;
- }
-
- /* Ok, now construct. */
- if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
- (2 * sizeof(int)) +
- (2 * sizeof(unsigned char)) +
- (1 * sizeof(unsigned short)) +
- (1 * sizeof(unsigned int))))
- return -EFAULT;
-
- if (get_user(data, &sgio32->dxferp))
- return -EFAULT;
- dxferp = compat_ptr(data);
- if (iovec_count) {
- if (sg_build_iovec(sgio, dxferp, iovec_count))
- return -EFAULT;
- } else {
- if (put_user(dxferp, &sgio->dxferp))
- return -EFAULT;
- }
-
- {
- unsigned char __user *cmdp;
- unsigned char __user *sbp;
-
- if (get_user(data, &sgio32->cmdp))
- return -EFAULT;
- cmdp = compat_ptr(data);
-
- if (get_user(data, &sgio32->sbp))
- return -EFAULT;
- sbp = compat_ptr(data);
-
- if (put_user(cmdp, &sgio->cmdp) ||
- put_user(sbp, &sgio->sbp))
- return -EFAULT;
- }
-
- if (copy_in_user(&sgio->timeout, &sgio32->timeout,
- 3 * sizeof(int)))
- return -EFAULT;
-
- if (get_user(data, &sgio32->usr_ptr))
- return -EFAULT;
- if (put_user(compat_ptr(data), &sgio->usr_ptr))
- return -EFAULT;
-
- err = do_ioctl(file, cmd, (unsigned long) sgio);
-
- if (err >= 0) {
- void __user *datap;
-
- if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
- sizeof(int)) ||
- get_user(datap, &sgio->usr_ptr) ||
- put_user((u32)(unsigned long)datap,
- &sgio32->usr_ptr) ||
- copy_in_user(&sgio32->status, &sgio->status,
- (4 * sizeof(unsigned char)) +
- (2 * sizeof(unsigned short)) +
- (3 * sizeof(int))))
- err = -EFAULT;
- }
-
- return err;
-}
-
-struct compat_sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
- char req_state;
- char orphan;
- char sg_io_owned;
- char problem;
- int pack_id;
- compat_uptr_t usr_ptr;
- unsigned int duration;
- int unused;
-};
-
-static int sg_grt_trans(struct file *file,
- unsigned int cmd, struct compat_sg_req_info __user *o)
-{
- int err, i;
- sg_req_info_t __user *r;
- r = compat_alloc_user_space(sizeof(sg_req_info_t)*SG_MAX_QUEUE);
- err = do_ioctl(file, cmd, (unsigned long)r);
- if (err < 0)
- return err;
- for (i = 0; i < SG_MAX_QUEUE; i++) {
- void __user *ptr;
- int d;
-
- if (copy_in_user(o + i, r + i, offsetof(sg_req_info_t, usr_ptr)) ||
- get_user(ptr, &r[i].usr_ptr) ||
- get_user(d, &r[i].duration) ||
- put_user((u32)(unsigned long)(ptr), &o[i].usr_ptr) ||
- put_user(d, &o[i].duration))
- return -EFAULT;
- }
- return err;
-}
-#endif /* CONFIG_BLOCK */
-
-struct sock_fprog32 {
- unsigned short len;
- compat_caddr_t filter;
-};
-
-#define PPPIOCSPASS32 _IOW('t', 71, struct sock_fprog32)
-#define PPPIOCSACTIVE32 _IOW('t', 70, struct sock_fprog32)
-
-static int ppp_sock_fprog_ioctl_trans(struct file *file,
- unsigned int cmd, struct sock_fprog32 __user *u_fprog32)
-{
- struct sock_fprog __user *u_fprog64 = compat_alloc_user_space(sizeof(struct sock_fprog));
- void __user *fptr64;
- u32 fptr32;
- u16 flen;
-
- if (get_user(flen, &u_fprog32->len) ||
- get_user(fptr32, &u_fprog32->filter))
- return -EFAULT;
-
- fptr64 = compat_ptr(fptr32);
-
- if (put_user(flen, &u_fprog64->len) ||
- put_user(fptr64, &u_fprog64->filter))
- return -EFAULT;
-
- if (cmd == PPPIOCSPASS32)
- cmd = PPPIOCSPASS;
- else
- cmd = PPPIOCSACTIVE;
-
- return do_ioctl(file, cmd, (unsigned long) u_fprog64);
-}
-
-struct ppp_option_data32 {
- compat_caddr_t ptr;
- u32 length;
- compat_int_t transmit;
-};
-#define PPPIOCSCOMPRESS32 _IOW('t', 77, struct ppp_option_data32)
-
-struct ppp_idle32 {
- compat_time_t xmit_idle;
- compat_time_t recv_idle;
-};
-#define PPPIOCGIDLE32 _IOR('t', 63, struct ppp_idle32)
-
-static int ppp_gidle(struct file *file, unsigned int cmd,
- struct ppp_idle32 __user *idle32)
-{
- struct ppp_idle __user *idle;
- __kernel_time_t xmit, recv;
- int err;
-
- idle = compat_alloc_user_space(sizeof(*idle));
-
- err = do_ioctl(file, PPPIOCGIDLE, (unsigned long) idle);
-
- if (!err) {
- if (get_user(xmit, &idle->xmit_idle) ||
- get_user(recv, &idle->recv_idle) ||
- put_user(xmit, &idle32->xmit_idle) ||
- put_user(recv, &idle32->recv_idle))
- err = -EFAULT;
- }
- return err;
-}
-
-static int ppp_scompress(struct file *file, unsigned int cmd,
- struct ppp_option_data32 __user *odata32)
-{
- struct ppp_option_data __user *odata;
- __u32 data;
- void __user *datap;
-
- odata = compat_alloc_user_space(sizeof(*odata));
-
- if (get_user(data, &odata32->ptr))
- return -EFAULT;
-
- datap = compat_ptr(data);
- if (put_user(datap, &odata->ptr))
- return -EFAULT;
-
- if (copy_in_user(&odata->length, &odata32->length,
- sizeof(__u32) + sizeof(int)))
- return -EFAULT;
-
- return do_ioctl(file, PPPIOCSCOMPRESS, (unsigned long) odata);
-}
-
-#ifdef CONFIG_BLOCK
-struct mtget32 {
- compat_long_t mt_type;
- compat_long_t mt_resid;
- compat_long_t mt_dsreg;
- compat_long_t mt_gstat;
- compat_long_t mt_erreg;
- compat_daddr_t mt_fileno;
- compat_daddr_t mt_blkno;
-};
-#define MTIOCGET32 _IOR('m', 2, struct mtget32)
-
-struct mtpos32 {
- compat_long_t mt_blkno;
-};
-#define MTIOCPOS32 _IOR('m', 3, struct mtpos32)
-
-static int mt_ioctl_trans(struct file *file,
- unsigned int cmd, void __user *argp)
-{
- /* NULL initialization to make gcc shut up */
- struct mtget __user *get = NULL;
- struct mtget32 __user *umget32;
- struct mtpos __user *pos = NULL;
- struct mtpos32 __user *upos32;
- unsigned long kcmd;
- void *karg;
- int err = 0;
-
- switch(cmd) {
- case MTIOCPOS32:
- kcmd = MTIOCPOS;
- pos = compat_alloc_user_space(sizeof(*pos));
- karg = pos;
- break;
- default: /* MTIOCGET32 */
- kcmd = MTIOCGET;
- get = compat_alloc_user_space(sizeof(*get));
- karg = get;
- break;
- }
- if (karg == NULL)
- return -EFAULT;
- err = do_ioctl(file, kcmd, (unsigned long)karg);
- if (err)
- return err;
- switch (cmd) {
- case MTIOCPOS32:
- upos32 = argp;
- err = convert_in_user(&pos->mt_blkno, &upos32->mt_blkno);
- break;
- case MTIOCGET32:
- umget32 = argp;
- err = convert_in_user(&get->mt_type, &umget32->mt_type);
- err |= convert_in_user(&get->mt_resid, &umget32->mt_resid);
- err |= convert_in_user(&get->mt_dsreg, &umget32->mt_dsreg);
- err |= convert_in_user(&get->mt_gstat, &umget32->mt_gstat);
- err |= convert_in_user(&get->mt_erreg, &umget32->mt_erreg);
- err |= convert_in_user(&get->mt_fileno, &umget32->mt_fileno);
- err |= convert_in_user(&get->mt_blkno, &umget32->mt_blkno);
- break;
- }
- return err ? -EFAULT: 0;
-}
-
-#endif /* CONFIG_BLOCK */
-
-/* Bluetooth ioctls */
-#define HCIUARTSETPROTO _IOW('U', 200, int)
-#define HCIUARTGETPROTO _IOR('U', 201, int)
-#define HCIUARTGETDEVICE _IOR('U', 202, int)
-#define HCIUARTSETFLAGS _IOW('U', 203, int)
-#define HCIUARTGETFLAGS _IOR('U', 204, int)
-
-#define RTC_IRQP_READ32 _IOR('p', 0x0b, compat_ulong_t)
-#define RTC_IRQP_SET32 _IOW('p', 0x0c, compat_ulong_t)
-#define RTC_EPOCH_READ32 _IOR('p', 0x0d, compat_ulong_t)
-#define RTC_EPOCH_SET32 _IOW('p', 0x0e, compat_ulong_t)
-
-static int rtc_ioctl(struct file *file,
- unsigned cmd, void __user *argp)
-{
- unsigned long __user *valp = compat_alloc_user_space(sizeof(*valp));
- int ret;
-
- if (valp == NULL)
- return -EFAULT;
- switch (cmd) {
- case RTC_IRQP_READ32:
- case RTC_EPOCH_READ32:
- ret = do_ioctl(file, (cmd == RTC_IRQP_READ32) ?
- RTC_IRQP_READ : RTC_EPOCH_READ,
- (unsigned long)valp);
- if (ret)
- return ret;
- return convert_in_user(valp, (unsigned int __user *)argp);
- case RTC_IRQP_SET32:
- return do_ioctl(file, RTC_IRQP_SET, (unsigned long)argp);
- case RTC_EPOCH_SET32:
- return do_ioctl(file, RTC_EPOCH_SET, (unsigned long)argp);
- }
-
- return -ENOIOCTLCMD;
-}
-
-/* on ia32 l_start is on a 32-bit boundary */
-#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
-struct space_resv_32 {
- __s16 l_type;
- __s16 l_whence;
- __s64 l_start __attribute__((packed));
- /* len == 0 means until end of file */
- __s64 l_len __attribute__((packed));
- __s32 l_sysid;
- __u32 l_pid;
- __s32 l_pad[4]; /* reserve area */
-};
-
-#define FS_IOC_RESVSP_32 _IOW ('X', 40, struct space_resv_32)
-#define FS_IOC_RESVSP64_32 _IOW ('X', 42, struct space_resv_32)
-
-/* just account for different alignment */
-static int compat_ioctl_preallocate(struct file *file,
- struct space_resv_32 __user *p32)
-{
- struct space_resv __user *p = compat_alloc_user_space(sizeof(*p));
-
- if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) ||
- copy_in_user(&p->l_whence, &p32->l_whence, sizeof(s16)) ||
- copy_in_user(&p->l_start, &p32->l_start, sizeof(s64)) ||
- copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
- copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
- copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
- return -EFAULT;
-
- return ioctl_preallocate(file, p);
-}
-#endif
-
/*
* simple reversible transform to make our table more evenly
* distributed after sorting.
@@ -509,33 +59,7 @@ static int compat_ioctl_preallocate(struct file *file,
#define XFORM(i) (((i) ^ ((i) << 27) ^ ((i) << 17)) & 0xffffffff)
#define COMPATIBLE_IOCTL(cmd) XFORM((u32)cmd),
-/* ioctl should not be warned about even if it's not implemented.
- Valid reasons to use this:
- - It is implemented with ->compat_ioctl on some device, but programs
- call it on others too.
- - The ioctl is not implemented in the native kernel, but programs
- call it commonly anyways.
- Most other reasons are not valid. */
-#define IGNORE_IOCTL(cmd) COMPATIBLE_IOCTL(cmd)
-
static unsigned int ioctl_pointer[] = {
-/* compatible ioctls first */
-/* Little t */
-COMPATIBLE_IOCTL(TIOCOUTQ)
-/* Little f */
-COMPATIBLE_IOCTL(FIOCLEX)
-COMPATIBLE_IOCTL(FIONCLEX)
-COMPATIBLE_IOCTL(FIOASYNC)
-COMPATIBLE_IOCTL(FIONBIO)
-COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */
-COMPATIBLE_IOCTL(FS_IOC_FIEMAP)
-/* 0x00 */
-COMPATIBLE_IOCTL(FIBMAP)
-COMPATIBLE_IOCTL(FIGETBSZ)
-/* 'X' - originally XFS but some now in the VFS */
-COMPATIBLE_IOCTL(FIFREEZE)
-COMPATIBLE_IOCTL(FITHAW)
-COMPATIBLE_IOCTL(FITRIM)
#ifdef CONFIG_BLOCK
/* Big S */
COMPATIBLE_IOCTL(SCSI_IOCTL_GET_IDLUN)
@@ -547,43 +71,10 @@ COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND)
COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST)
COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI)
#endif
-/* Big V (don't complain on serial console) */
-IGNORE_IOCTL(VT_OPENQRY)
-IGNORE_IOCTL(VT_GETMODE)
-/* Little p (/dev/rtc, /dev/envctrl, etc.) */
-COMPATIBLE_IOCTL(RTC_AIE_ON)
-COMPATIBLE_IOCTL(RTC_AIE_OFF)
-COMPATIBLE_IOCTL(RTC_UIE_ON)
-COMPATIBLE_IOCTL(RTC_UIE_OFF)
-COMPATIBLE_IOCTL(RTC_PIE_ON)
-COMPATIBLE_IOCTL(RTC_PIE_OFF)
-COMPATIBLE_IOCTL(RTC_WIE_ON)
-COMPATIBLE_IOCTL(RTC_WIE_OFF)
-COMPATIBLE_IOCTL(RTC_ALM_SET)
-COMPATIBLE_IOCTL(RTC_ALM_READ)
-COMPATIBLE_IOCTL(RTC_RD_TIME)
-COMPATIBLE_IOCTL(RTC_SET_TIME)
-COMPATIBLE_IOCTL(RTC_WKALM_SET)
-COMPATIBLE_IOCTL(RTC_WKALM_RD)
-/*
- * These two are only for the sbus rtc driver, but
- * hwclock tries them on every rtc device first when
- * running on sparc. On other architectures the entries
- * are useless but harmless.
- */
-COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
-COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
-/* Little m */
-COMPATIBLE_IOCTL(MTIOCTOP)
-/* Socket level stuff */
-COMPATIBLE_IOCTL(FIOQSIZE)
#ifdef CONFIG_BLOCK
-/* md calls this on random blockdevs */
-IGNORE_IOCTL(RAID_VERSION)
-/* qemu/qemu-img might call these two on plain files for probing */
-IGNORE_IOCTL(CDROM_DRIVE_STATUS)
-IGNORE_IOCTL(FDGETPRM32)
/* SG stuff */
+COMPATIBLE_IOCTL(SG_IO)
+COMPATIBLE_IOCTL(SG_GET_REQUEST_TABLE)
COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
COMPATIBLE_IOCTL(SG_EMULATED_HOST)
@@ -607,317 +98,6 @@ COMPATIBLE_IOCTL(SG_GET_REQUEST_TABLE)
COMPATIBLE_IOCTL(SG_SET_KEEP_ORPHAN)
COMPATIBLE_IOCTL(SG_GET_KEEP_ORPHAN)
#endif
-/* PPP stuff */
-COMPATIBLE_IOCTL(PPPIOCGFLAGS)
-COMPATIBLE_IOCTL(PPPIOCSFLAGS)
-COMPATIBLE_IOCTL(PPPIOCGASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCSASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCGUNIT)
-COMPATIBLE_IOCTL(PPPIOCGRASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCSRASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCGMRU)
-COMPATIBLE_IOCTL(PPPIOCSMRU)
-COMPATIBLE_IOCTL(PPPIOCSMAXCID)
-COMPATIBLE_IOCTL(PPPIOCGXASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCSXASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCXFERUNIT)
-/* PPPIOCSCOMPRESS is translated */
-COMPATIBLE_IOCTL(PPPIOCGNPMODE)
-COMPATIBLE_IOCTL(PPPIOCSNPMODE)
-COMPATIBLE_IOCTL(PPPIOCGDEBUG)
-COMPATIBLE_IOCTL(PPPIOCSDEBUG)
-/* PPPIOCSPASS is translated */
-/* PPPIOCSACTIVE is translated */
-/* PPPIOCGIDLE is translated */
-COMPATIBLE_IOCTL(PPPIOCNEWUNIT)
-COMPATIBLE_IOCTL(PPPIOCATTACH)
-COMPATIBLE_IOCTL(PPPIOCDETACH)
-COMPATIBLE_IOCTL(PPPIOCSMRRU)
-COMPATIBLE_IOCTL(PPPIOCCONNECT)
-COMPATIBLE_IOCTL(PPPIOCDISCONN)
-COMPATIBLE_IOCTL(PPPIOCATTCHAN)
-COMPATIBLE_IOCTL(PPPIOCGCHAN)
-COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
-/* PPPOX */
-COMPATIBLE_IOCTL(PPPOEIOCSFWD)
-COMPATIBLE_IOCTL(PPPOEIOCDFWD)
-/* Big A */
-/* sparc only */
-/* Big Q for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_SEQ_RESET)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_SYNC)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_INFO)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_CTRLRATE)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_GETOUTCOUNT)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_GETINCOUNT)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_PERCMODE)
-COMPATIBLE_IOCTL(SNDCTL_FM_LOAD_INSTR)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_TESTMIDI)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_RESETSAMPLES)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_NRSYNTHS)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_NRMIDIS)
-COMPATIBLE_IOCTL(SNDCTL_MIDI_INFO)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_THRESHOLD)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_MEMAVL)
-COMPATIBLE_IOCTL(SNDCTL_FM_4OP_ENABLE)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_PANIC)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_OUTOFBAND)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_GETTIME)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_ID)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_CONTROL)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_REMOVESAMPLE)
-/* Big T for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_TMR_TIMEBASE)
-COMPATIBLE_IOCTL(SNDCTL_TMR_START)
-COMPATIBLE_IOCTL(SNDCTL_TMR_STOP)
-COMPATIBLE_IOCTL(SNDCTL_TMR_CONTINUE)
-COMPATIBLE_IOCTL(SNDCTL_TMR_TEMPO)
-COMPATIBLE_IOCTL(SNDCTL_TMR_SOURCE)
-COMPATIBLE_IOCTL(SNDCTL_TMR_METRONOME)
-COMPATIBLE_IOCTL(SNDCTL_TMR_SELECT)
-/* Little m for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_MIDI_PRETIME)
-COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUMODE)
-COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUCMD)
-/* Big P for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_DSP_RESET)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SYNC)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SPEED)
-COMPATIBLE_IOCTL(SNDCTL_DSP_STEREO)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETBLKSIZE)
-COMPATIBLE_IOCTL(SNDCTL_DSP_CHANNELS)
-COMPATIBLE_IOCTL(SOUND_PCM_WRITE_FILTER)
-COMPATIBLE_IOCTL(SNDCTL_DSP_POST)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SUBDIVIDE)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETFRAGMENT)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETFMTS)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETFMT)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETOSPACE)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETISPACE)
-COMPATIBLE_IOCTL(SNDCTL_DSP_NONBLOCK)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETCAPS)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETTRIGGER)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETTRIGGER)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETIPTR)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETOPTR)
-/* SNDCTL_DSP_MAPINBUF, XXX needs translation */
-/* SNDCTL_DSP_MAPOUTBUF, XXX needs translation */
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETSYNCRO)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETDUPLEX)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETODELAY)
-COMPATIBLE_IOCTL(SNDCTL_DSP_PROFILE)
-COMPATIBLE_IOCTL(SOUND_PCM_READ_RATE)
-COMPATIBLE_IOCTL(SOUND_PCM_READ_CHANNELS)
-COMPATIBLE_IOCTL(SOUND_PCM_READ_BITS)
-COMPATIBLE_IOCTL(SOUND_PCM_READ_FILTER)
-/* Big C for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_COPR_RESET)
-COMPATIBLE_IOCTL(SNDCTL_COPR_LOAD)
-COMPATIBLE_IOCTL(SNDCTL_COPR_RDATA)
-COMPATIBLE_IOCTL(SNDCTL_COPR_RCODE)
-COMPATIBLE_IOCTL(SNDCTL_COPR_WDATA)
-COMPATIBLE_IOCTL(SNDCTL_COPR_WCODE)
-COMPATIBLE_IOCTL(SNDCTL_COPR_RUN)
-COMPATIBLE_IOCTL(SNDCTL_COPR_HALT)
-COMPATIBLE_IOCTL(SNDCTL_COPR_SENDMSG)
-COMPATIBLE_IOCTL(SNDCTL_COPR_RCVMSG)
-/* Big M for sound/OSS */
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_VOLUME)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_BASS)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_TREBLE)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_SYNTH)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_PCM)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_SPEAKER)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_MIC)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_CD)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_IMIX)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_ALTPCM)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECLEV)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_IGAIN)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_OGAIN)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE1)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE2)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE3)
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL1))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL2))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL3))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEIN))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEOUT))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_VIDEO))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_RADIO))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_MONITOR))
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_MUTE)
-/* SOUND_MIXER_READ_ENHANCE, same value as READ_MUTE */
-/* SOUND_MIXER_READ_LOUD, same value as READ_MUTE */
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECSRC)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_DEVMASK)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECMASK)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_STEREODEVS)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_CAPS)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_VOLUME)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_BASS)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_TREBLE)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SYNTH)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_PCM)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SPEAKER)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MIC)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_CD)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IMIX)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_ALTPCM)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECLEV)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IGAIN)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_OGAIN)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE1)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE2)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE3)
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL1))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL2))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL3))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEIN))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEOUT))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_VIDEO))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_RADIO))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_MONITOR))
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MUTE)
-/* SOUND_MIXER_WRITE_ENHANCE, same value as WRITE_MUTE */
-/* SOUND_MIXER_WRITE_LOUD, same value as WRITE_MUTE */
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECSRC)
-COMPATIBLE_IOCTL(SOUND_MIXER_INFO)
-COMPATIBLE_IOCTL(SOUND_OLD_MIXER_INFO)
-COMPATIBLE_IOCTL(SOUND_MIXER_ACCESS)
-COMPATIBLE_IOCTL(SOUND_MIXER_AGC)
-COMPATIBLE_IOCTL(SOUND_MIXER_3DSE)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE1)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE2)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE3)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE4)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE5)
-COMPATIBLE_IOCTL(SOUND_MIXER_GETLEVELS)
-COMPATIBLE_IOCTL(SOUND_MIXER_SETLEVELS)
-COMPATIBLE_IOCTL(OSS_GETVERSION)
-/* Raw devices */
-COMPATIBLE_IOCTL(RAW_SETBIND)
-COMPATIBLE_IOCTL(RAW_GETBIND)
-/* Watchdog */
-COMPATIBLE_IOCTL(WDIOC_GETSUPPORT)
-COMPATIBLE_IOCTL(WDIOC_GETSTATUS)
-COMPATIBLE_IOCTL(WDIOC_GETBOOTSTATUS)
-COMPATIBLE_IOCTL(WDIOC_GETTEMP)
-COMPATIBLE_IOCTL(WDIOC_SETOPTIONS)
-COMPATIBLE_IOCTL(WDIOC_KEEPALIVE)
-COMPATIBLE_IOCTL(WDIOC_SETTIMEOUT)
-COMPATIBLE_IOCTL(WDIOC_GETTIMEOUT)
-COMPATIBLE_IOCTL(WDIOC_SETPRETIMEOUT)
-COMPATIBLE_IOCTL(WDIOC_GETPRETIMEOUT)
-/* Big R */
-COMPATIBLE_IOCTL(RNDGETENTCNT)
-COMPATIBLE_IOCTL(RNDADDTOENTCNT)
-COMPATIBLE_IOCTL(RNDGETPOOL)
-COMPATIBLE_IOCTL(RNDADDENTROPY)
-COMPATIBLE_IOCTL(RNDZAPENTCNT)
-COMPATIBLE_IOCTL(RNDCLEARPOOL)
-/* Bluetooth */
-COMPATIBLE_IOCTL(HCIDEVUP)
-COMPATIBLE_IOCTL(HCIDEVDOWN)
-COMPATIBLE_IOCTL(HCIDEVRESET)
-COMPATIBLE_IOCTL(HCIDEVRESTAT)
-COMPATIBLE_IOCTL(HCIGETDEVLIST)
-COMPATIBLE_IOCTL(HCIGETDEVINFO)
-COMPATIBLE_IOCTL(HCIGETCONNLIST)
-COMPATIBLE_IOCTL(HCIGETCONNINFO)
-COMPATIBLE_IOCTL(HCIGETAUTHINFO)
-COMPATIBLE_IOCTL(HCISETRAW)
-COMPATIBLE_IOCTL(HCISETSCAN)
-COMPATIBLE_IOCTL(HCISETAUTH)
-COMPATIBLE_IOCTL(HCISETENCRYPT)
-COMPATIBLE_IOCTL(HCISETPTYPE)
-COMPATIBLE_IOCTL(HCISETLINKPOL)
-COMPATIBLE_IOCTL(HCISETLINKMODE)
-COMPATIBLE_IOCTL(HCISETACLMTU)
-COMPATIBLE_IOCTL(HCISETSCOMTU)
-COMPATIBLE_IOCTL(HCIBLOCKADDR)
-COMPATIBLE_IOCTL(HCIUNBLOCKADDR)
-COMPATIBLE_IOCTL(HCIINQUIRY)
-COMPATIBLE_IOCTL(HCIUARTSETPROTO)
-COMPATIBLE_IOCTL(HCIUARTGETPROTO)
-COMPATIBLE_IOCTL(HCIUARTGETDEVICE)
-COMPATIBLE_IOCTL(HCIUARTSETFLAGS)
-COMPATIBLE_IOCTL(HCIUARTGETFLAGS)
-COMPATIBLE_IOCTL(RFCOMMCREATEDEV)
-COMPATIBLE_IOCTL(RFCOMMRELEASEDEV)
-COMPATIBLE_IOCTL(RFCOMMGETDEVLIST)
-COMPATIBLE_IOCTL(RFCOMMGETDEVINFO)
-COMPATIBLE_IOCTL(RFCOMMSTEALDLC)
-/* CAPI */
-COMPATIBLE_IOCTL(CAPI_REGISTER)
-COMPATIBLE_IOCTL(CAPI_GET_MANUFACTURER)
-COMPATIBLE_IOCTL(CAPI_GET_VERSION)
-COMPATIBLE_IOCTL(CAPI_GET_SERIAL)
-COMPATIBLE_IOCTL(CAPI_GET_PROFILE)
-COMPATIBLE_IOCTL(CAPI_MANUFACTURER_CMD)
-COMPATIBLE_IOCTL(CAPI_GET_ERRCODE)
-COMPATIBLE_IOCTL(CAPI_INSTALLED)
-COMPATIBLE_IOCTL(CAPI_GET_FLAGS)
-COMPATIBLE_IOCTL(CAPI_SET_FLAGS)
-COMPATIBLE_IOCTL(CAPI_CLR_FLAGS)
-COMPATIBLE_IOCTL(CAPI_NCCI_OPENCOUNT)
-COMPATIBLE_IOCTL(CAPI_NCCI_GETUNIT)
-/* Misc. */
-COMPATIBLE_IOCTL(0x41545900) /* ATYIO_CLKR */
-COMPATIBLE_IOCTL(0x41545901) /* ATYIO_CLKW */
-COMPATIBLE_IOCTL(PCIIOC_CONTROLLER)
-COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_IO)
-COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_MEM)
-COMPATIBLE_IOCTL(PCIIOC_WRITE_COMBINE)
-/* hiddev */
-COMPATIBLE_IOCTL(HIDIOCGVERSION)
-COMPATIBLE_IOCTL(HIDIOCAPPLICATION)
-COMPATIBLE_IOCTL(HIDIOCGDEVINFO)
-COMPATIBLE_IOCTL(HIDIOCGSTRING)
-COMPATIBLE_IOCTL(HIDIOCINITREPORT)
-COMPATIBLE_IOCTL(HIDIOCGREPORT)
-COMPATIBLE_IOCTL(HIDIOCSREPORT)
-COMPATIBLE_IOCTL(HIDIOCGREPORTINFO)
-COMPATIBLE_IOCTL(HIDIOCGFIELDINFO)
-COMPATIBLE_IOCTL(HIDIOCGUSAGE)
-COMPATIBLE_IOCTL(HIDIOCSUSAGE)
-COMPATIBLE_IOCTL(HIDIOCGUCODE)
-COMPATIBLE_IOCTL(HIDIOCGFLAG)
-COMPATIBLE_IOCTL(HIDIOCSFLAG)
-COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINDEX)
-COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINFO)
-/* joystick */
-COMPATIBLE_IOCTL(JSIOCGVERSION)
-COMPATIBLE_IOCTL(JSIOCGAXES)
-COMPATIBLE_IOCTL(JSIOCGBUTTONS)
-COMPATIBLE_IOCTL(JSIOCGNAME(0))
-
-/* fat 'r' ioctls. These are handled by fat with ->compat_ioctl,
- but we don't want warnings on other file systems. So declare
- them as compatible here. */
-#define VFAT_IOCTL_READDIR_BOTH32 _IOR('r', 1, struct compat_dirent[2])
-#define VFAT_IOCTL_READDIR_SHORT32 _IOR('r', 2, struct compat_dirent[2])
-
-IGNORE_IOCTL(VFAT_IOCTL_READDIR_BOTH32)
-IGNORE_IOCTL(VFAT_IOCTL_READDIR_SHORT32)
-
-#ifdef CONFIG_SPARC
-/* Sparc framebuffers, handled in sbusfb_compat_ioctl() */
-IGNORE_IOCTL(FBIOGTYPE)
-IGNORE_IOCTL(FBIOSATTR)
-IGNORE_IOCTL(FBIOGATTR)
-IGNORE_IOCTL(FBIOSVIDEO)
-IGNORE_IOCTL(FBIOGVIDEO)
-IGNORE_IOCTL(FBIOSCURPOS)
-IGNORE_IOCTL(FBIOGCURPOS)
-IGNORE_IOCTL(FBIOGCURMAX)
-IGNORE_IOCTL(FBIOPUTCMAP32)
-IGNORE_IOCTL(FBIOGETCMAP32)
-IGNORE_IOCTL(FBIOSCURSOR32)
-IGNORE_IOCTL(FBIOGCURSOR32)
-#endif
};
/*
@@ -930,51 +110,12 @@ IGNORE_IOCTL(FBIOGCURSOR32)
static long do_ioctl_trans(unsigned int cmd,
unsigned long arg, struct file *file)
{
- void __user *argp = compat_ptr(arg);
-
- switch (cmd) {
- case PPPIOCGIDLE32:
- return ppp_gidle(file, cmd, argp);
- case PPPIOCSCOMPRESS32:
- return ppp_scompress(file, cmd, argp);
- case PPPIOCSPASS32:
- case PPPIOCSACTIVE32:
- return ppp_sock_fprog_ioctl_trans(file, cmd, argp);
-#ifdef CONFIG_BLOCK
- case SG_IO:
- return sg_ioctl_trans(file, cmd, argp);
- case SG_GET_REQUEST_TABLE:
- return sg_grt_trans(file, cmd, argp);
- case MTIOCGET32:
- case MTIOCPOS32:
- return mt_ioctl_trans(file, cmd, argp);
-#endif
- /* Not implemented in the native kernel */
- case RTC_IRQP_READ32:
- case RTC_IRQP_SET32:
- case RTC_EPOCH_READ32:
- case RTC_EPOCH_SET32:
- return rtc_ioctl(file, cmd, argp);
- }
-
- /*
- * These take an integer instead of a pointer as 'arg',
- * so we must not do a compat_ptr() translation.
- */
- switch (cmd) {
- /* RAID */
- case HOT_REMOVE_DISK:
- case HOT_ADD_DISK:
- case SET_DISK_FAULTY:
- case SET_BITMAP_FILE:
- return vfs_ioctl(file, cmd, arg);
- }
-
return -ENOIOCTLCMD;
}
static int compat_ioctl_check_table(unsigned int xcmd)
{
+#ifdef CONFIG_BLOCK
int i;
const int max = ARRAY_SIZE(ioctl_pointer) - 1;
@@ -993,6 +134,9 @@ static int compat_ioctl_check_table(unsigned int xcmd)
i--;
return ioctl_pointer[i] == xcmd;
+#else
+ return 0;
+#endif
}
COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
@@ -1009,44 +153,62 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
if (error)
goto out_fput;
- /*
- * To allow the compat_ioctl handlers to be self contained
- * we need to check the common ioctls here first.
- * Just handle them with the standard handlers below.
- */
switch (cmd) {
+ /* these are never seen by ->ioctl(), no argument or int argument */
case FIOCLEX:
case FIONCLEX:
+ case FIFREEZE:
+ case FITHAW:
+ case FICLONE:
+ goto do_ioctl;
+ /* these are never seen by ->ioctl(), pointer argument */
case FIONBIO:
case FIOASYNC:
case FIOQSIZE:
- break;
-
-#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
+ case FS_IOC_FIEMAP:
+ case FIGETBSZ:
+ case FICLONERANGE:
+ case FIDEDUPERANGE:
+ goto found_handler;
+ /*
+ * The next group is the stuff handled inside file_ioctl().
+ * For regular files these never reach ->ioctl(); for
+ * devices, sockets, etc. they do and one (FIONREAD) is
+ * even accepted in some cases. In all those cases
+ * argument has the same type, so we can handle these
+ * here, shunting them towards do_vfs_ioctl().
+ * ->compat_ioctl() will never see any of those.
+ */
+ /* pointer argument, never actually handled by ->ioctl() */
+ case FIBMAP:
+ goto found_handler;
+ /* handled by some ->ioctl(); always a pointer to int */
+ case FIONREAD:
+ goto found_handler;
+ /* these get messy on amd64 due to alignment differences */
+#if defined(CONFIG_X86_64)
case FS_IOC_RESVSP_32:
case FS_IOC_RESVSP64_32:
- error = compat_ioctl_preallocate(f.file, compat_ptr(arg));
+ error = compat_ioctl_preallocate(f.file, 0, compat_ptr(arg));
+ goto out_fput;
+ case FS_IOC_UNRESVSP_32:
+ case FS_IOC_UNRESVSP64_32:
+ error = compat_ioctl_preallocate(f.file, FALLOC_FL_PUNCH_HOLE,
+ compat_ptr(arg));
+ goto out_fput;
+ case FS_IOC_ZERO_RANGE_32:
+ error = compat_ioctl_preallocate(f.file, FALLOC_FL_ZERO_RANGE,
+ compat_ptr(arg));
goto out_fput;
#else
case FS_IOC_RESVSP:
case FS_IOC_RESVSP64:
- error = ioctl_preallocate(f.file, compat_ptr(arg));
- goto out_fput;
+ case FS_IOC_UNRESVSP:
+ case FS_IOC_UNRESVSP64:
+ case FS_IOC_ZERO_RANGE:
+ goto found_handler;
#endif
- case FICLONE:
- case FICLONERANGE:
- case FIDEDUPERANGE:
- case FS_IOC_FIEMAP:
- goto do_ioctl;
-
- case FIBMAP:
- case FIGETBSZ:
- case FIONREAD:
- if (S_ISREG(file_inode(f.file)->i_mode))
- break;
- /*FALL THROUGH*/
-
default:
if (f.file->f_op->compat_ioctl) {
error = f.file->f_op->compat_ioctl(f.file, cmd, arg);
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index f752d83a9c44..22dce2d35a4b 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -20,12 +20,21 @@
#include <linux/list.h>
#include <linux/spinlock.h>
+struct configfs_fragment {
+ atomic_t frag_count;
+ struct rw_semaphore frag_sem;
+ bool frag_dead;
+};
+
+void put_fragment(struct configfs_fragment *);
+struct configfs_fragment *get_fragment(struct configfs_fragment *);
+
struct configfs_dirent {
atomic_t s_count;
int s_dependent_count;
struct list_head s_sibling;
struct list_head s_children;
- struct list_head s_links;
+ int s_links;
void * s_element;
int s_type;
umode_t s_mode;
@@ -34,6 +43,7 @@ struct configfs_dirent {
#ifdef CONFIG_LOCKDEP
int s_depth;
#endif
+ struct configfs_fragment *s_frag;
};
#define CONFIGFS_ROOT 0x0001
@@ -56,13 +66,13 @@ extern struct kmem_cache *configfs_dir_cachep;
extern int configfs_is_root(struct config_item *item);
extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *);
-extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct inode *));
+extern struct inode *configfs_create(struct dentry *, umode_t mode);
extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
extern int configfs_create_bin_file(struct config_item *,
const struct configfs_bin_attribute *);
-extern int configfs_make_dirent(struct configfs_dirent *,
- struct dentry *, void *, umode_t, int);
+extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *,
+ void *, umode_t, int, struct configfs_fragment *);
extern int configfs_dirent_is_ready(struct configfs_dirent *);
extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
@@ -74,7 +84,6 @@ extern int configfs_setattr(struct dentry *dentry, struct iattr *iattr);
extern struct dentry *configfs_pin_fs(void);
extern void configfs_release_fs(void);
-extern struct rw_semaphore configfs_rename_sem;
extern const struct file_operations configfs_dir_operations;
extern const struct file_operations configfs_file_operations;
extern const struct file_operations configfs_bin_file_operations;
@@ -87,14 +96,8 @@ extern int configfs_symlink(struct inode *dir, struct dentry *dentry,
const char *symname);
extern int configfs_unlink(struct inode *dir, struct dentry *dentry);
-struct configfs_symlink {
- struct list_head sl_list;
- struct config_item *sl_target;
-};
-
-extern int configfs_create_link(struct configfs_symlink *sl,
- struct dentry *parent,
- struct dentry *dentry);
+int configfs_create_link(struct configfs_dirent *target, struct dentry *parent,
+ struct dentry *dentry, char *body);
static inline struct config_item * to_item(struct dentry * dentry)
{
@@ -122,11 +125,7 @@ static inline struct config_item *configfs_get_config_item(struct dentry *dentry
spin_lock(&dentry->d_lock);
if (!d_unhashed(dentry)) {
struct configfs_dirent * sd = dentry->d_fsdata;
- if (sd->s_type & CONFIGFS_ITEM_LINK) {
- struct configfs_symlink * sl = sd->s_element;
- item = config_item_get(sl->sl_target);
- } else
- item = config_item_get(sd->s_element);
+ item = config_item_get(sd->s_element);
}
spin_unlock(&dentry->d_lock);
@@ -137,6 +136,7 @@ static inline void release_configfs_dirent(struct configfs_dirent * sd)
{
if (!(sd->s_type & CONFIGFS_ROOT)) {
kfree(sd->s_iattr);
+ put_fragment(sd->s_frag);
kmem_cache_free(configfs_dir_cachep, sd);
}
}
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 92112915de8e..cf7b7e1d5bd7 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -22,7 +22,6 @@
#include <linux/configfs.h>
#include "configfs_internal.h"
-DECLARE_RWSEM(configfs_rename_sem);
/*
* Protects mutations of configfs_dirent linkage together with proper i_mutex
* Also protects mutations of symlinks linkage to target configfs_dirent
@@ -151,11 +150,38 @@ configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd)
#endif /* CONFIG_LOCKDEP */
+static struct configfs_fragment *new_fragment(void)
+{
+ struct configfs_fragment *p;
+
+ p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL);
+ if (p) {
+ atomic_set(&p->frag_count, 1);
+ init_rwsem(&p->frag_sem);
+ p->frag_dead = false;
+ }
+ return p;
+}
+
+void put_fragment(struct configfs_fragment *frag)
+{
+ if (frag && atomic_dec_and_test(&frag->frag_count))
+ kfree(frag);
+}
+
+struct configfs_fragment *get_fragment(struct configfs_fragment *frag)
+{
+ if (likely(frag))
+ atomic_inc(&frag->frag_count);
+ return frag;
+}
+
/*
* Allocates a new configfs_dirent and links it to the parent configfs_dirent
*/
static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd,
- void *element, int type)
+ void *element, int type,
+ struct configfs_fragment *frag)
{
struct configfs_dirent * sd;
@@ -164,7 +190,6 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *paren
return ERR_PTR(-ENOMEM);
atomic_set(&sd->s_count, 1);
- INIT_LIST_HEAD(&sd->s_links);
INIT_LIST_HEAD(&sd->s_children);
sd->s_element = element;
sd->s_type = type;
@@ -175,6 +200,7 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *paren
kmem_cache_free(configfs_dir_cachep, sd);
return ERR_PTR(-ENOENT);
}
+ sd->s_frag = get_fragment(frag);
list_add(&sd->s_sibling, &parent_sd->s_children);
spin_unlock(&configfs_dirent_lock);
@@ -209,11 +235,11 @@ static int configfs_dirent_exists(struct configfs_dirent *parent_sd,
int configfs_make_dirent(struct configfs_dirent * parent_sd,
struct dentry * dentry, void * element,
- umode_t mode, int type)
+ umode_t mode, int type, struct configfs_fragment *frag)
{
struct configfs_dirent * sd;
- sd = configfs_new_dirent(parent_sd, element, type);
+ sd = configfs_new_dirent(parent_sd, element, type, frag);
if (IS_ERR(sd))
return PTR_ERR(sd);
@@ -225,30 +251,16 @@ int configfs_make_dirent(struct configfs_dirent * parent_sd,
return 0;
}
-static void init_dir(struct inode * inode)
+static void configfs_remove_dirent(struct dentry *dentry)
{
- inode->i_op = &configfs_dir_inode_operations;
- inode->i_fop = &configfs_dir_operations;
-
- /* directory inodes start off with i_nlink == 2 (for "." entry) */
- inc_nlink(inode);
-}
-
-static void configfs_init_file(struct inode * inode)
-{
- inode->i_size = PAGE_SIZE;
- inode->i_fop = &configfs_file_operations;
-}
-
-static void configfs_init_bin_file(struct inode *inode)
-{
- inode->i_size = 0;
- inode->i_fop = &configfs_bin_file_operations;
-}
+ struct configfs_dirent *sd = dentry->d_fsdata;
-static void init_symlink(struct inode * inode)
-{
- inode->i_op = &configfs_symlink_inode_operations;
+ if (!sd)
+ return;
+ spin_lock(&configfs_dirent_lock);
+ list_del_init(&sd->s_sibling);
+ spin_unlock(&configfs_dirent_lock);
+ configfs_put(sd);
}
/**
@@ -260,11 +272,13 @@ static void init_symlink(struct inode * inode)
* until it is validated by configfs_dir_set_ready()
*/
-static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
+static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
+ struct configfs_fragment *frag)
{
int error;
umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
struct dentry *p = dentry->d_parent;
+ struct inode *inode;
BUG_ON(!item);
@@ -273,25 +287,30 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
return error;
error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
- CONFIGFS_DIR | CONFIGFS_USET_CREATING);
+ CONFIGFS_DIR | CONFIGFS_USET_CREATING,
+ frag);
if (unlikely(error))
return error;
configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata);
- error = configfs_create(dentry, mode, init_dir);
- if (!error) {
- inc_nlink(d_inode(p));
- item->ci_dentry = dentry;
- } else {
- struct configfs_dirent *sd = dentry->d_fsdata;
- if (sd) {
- spin_lock(&configfs_dirent_lock);
- list_del_init(&sd->s_sibling);
- spin_unlock(&configfs_dirent_lock);
- configfs_put(sd);
- }
- }
- return error;
+ inode = configfs_create(dentry, mode);
+ if (IS_ERR(inode))
+ goto out_remove;
+
+ inode->i_op = &configfs_dir_inode_operations;
+ inode->i_fop = &configfs_dir_operations;
+ /* directory inodes start off with i_nlink == 2 (for "." entry) */
+ inc_nlink(inode);
+ d_instantiate(dentry, inode);
+ /* already hashed */
+ dget(dentry); /* pin directory dentries in core */
+ inc_nlink(d_inode(p));
+ item->ci_dentry = dentry;
+ return 0;
+
+out_remove:
+ configfs_remove_dirent(dentry);
+ return PTR_ERR(inode);
}
/*
@@ -332,40 +351,40 @@ int configfs_dirent_is_ready(struct configfs_dirent *sd)
return ret;
}
-int configfs_create_link(struct configfs_symlink *sl,
- struct dentry *parent,
- struct dentry *dentry)
+int configfs_create_link(struct configfs_dirent *target, struct dentry *parent,
+ struct dentry *dentry, char *body)
{
int err = 0;
umode_t mode = S_IFLNK | S_IRWXUGO;
+ struct configfs_dirent *p = parent->d_fsdata;
+ struct inode *inode;
- err = configfs_make_dirent(parent->d_fsdata, dentry, sl, mode,
- CONFIGFS_ITEM_LINK);
- if (!err) {
- err = configfs_create(dentry, mode, init_symlink);
- if (err) {
- struct configfs_dirent *sd = dentry->d_fsdata;
- if (sd) {
- spin_lock(&configfs_dirent_lock);
- list_del_init(&sd->s_sibling);
- spin_unlock(&configfs_dirent_lock);
- configfs_put(sd);
- }
- }
- }
- return err;
+ err = configfs_make_dirent(p, dentry, target, mode, CONFIGFS_ITEM_LINK,
+ p->s_frag);
+ if (err)
+ return err;
+
+ inode = configfs_create(dentry, mode);
+ if (IS_ERR(inode))
+ goto out_remove;
+
+ inode->i_link = body;
+ inode->i_op = &configfs_symlink_inode_operations;
+ d_instantiate(dentry, inode);
+ dget(dentry); /* pin link dentries in core */
+ return 0;
+
+out_remove:
+ configfs_remove_dirent(dentry);
+ return PTR_ERR(inode);
}
static void remove_dir(struct dentry * d)
{
struct dentry * parent = dget(d->d_parent);
- struct configfs_dirent * sd;
- sd = d->d_fsdata;
- spin_lock(&configfs_dirent_lock);
- list_del_init(&sd->s_sibling);
- spin_unlock(&configfs_dirent_lock);
- configfs_put(sd);
+ configfs_remove_dirent(d);
+
if (d_really_is_positive(d))
simple_rmdir(d_inode(parent),d);
@@ -406,20 +425,27 @@ static void configfs_remove_dir(struct config_item * item)
static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * dentry)
{
struct configfs_attribute * attr = sd->s_element;
- int error;
+ struct inode *inode;
spin_lock(&configfs_dirent_lock);
dentry->d_fsdata = configfs_get(sd);
sd->s_dentry = dentry;
spin_unlock(&configfs_dirent_lock);
- error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG,
- (sd->s_type & CONFIGFS_ITEM_BIN_ATTR) ?
- configfs_init_bin_file :
- configfs_init_file);
- if (error)
+ inode = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG);
+ if (IS_ERR(inode)) {
configfs_put(sd);
- return error;
+ return PTR_ERR(inode);
+ }
+ if (sd->s_type & CONFIGFS_ITEM_BIN_ATTR) {
+ inode->i_size = 0;
+ inode->i_fop = &configfs_bin_file_operations;
+ } else {
+ inode->i_size = PAGE_SIZE;
+ inode->i_fop = &configfs_file_operations;
+ }
+ d_add(dentry, inode);
+ return 0;
}
static struct dentry * configfs_lookup(struct inode *dir,
@@ -489,7 +515,7 @@ static int configfs_detach_prep(struct dentry *dentry, struct dentry **wait)
parent_sd->s_type |= CONFIGFS_USET_DROPPING;
ret = -EBUSY;
- if (!list_empty(&parent_sd->s_links))
+ if (parent_sd->s_links)
goto out;
ret = 0;
@@ -599,7 +625,8 @@ static int populate_attrs(struct config_item *item)
static int configfs_attach_group(struct config_item *parent_item,
struct config_item *item,
- struct dentry *dentry);
+ struct dentry *dentry,
+ struct configfs_fragment *frag);
static void configfs_detach_group(struct config_item *item);
static void detach_groups(struct config_group *group)
@@ -647,7 +674,8 @@ static void detach_groups(struct config_group *group)
* try using vfs_mkdir. Just a thought.
*/
static int create_default_group(struct config_group *parent_group,
- struct config_group *group)
+ struct config_group *group,
+ struct configfs_fragment *frag)
{
int ret;
struct configfs_dirent *sd;
@@ -663,7 +691,7 @@ static int create_default_group(struct config_group *parent_group,
d_add(child, NULL);
ret = configfs_attach_group(&parent_group->cg_item,
- &group->cg_item, child);
+ &group->cg_item, child, frag);
if (!ret) {
sd = child->d_fsdata;
sd->s_type |= CONFIGFS_USET_DEFAULT;
@@ -677,13 +705,14 @@ static int create_default_group(struct config_group *parent_group,
return ret;
}
-static int populate_groups(struct config_group *group)
+static int populate_groups(struct config_group *group,
+ struct configfs_fragment *frag)
{
struct config_group *new_group;
int ret = 0;
list_for_each_entry(new_group, &group->default_groups, group_entry) {
- ret = create_default_group(group, new_group);
+ ret = create_default_group(group, new_group, frag);
if (ret) {
detach_groups(group);
break;
@@ -797,11 +826,12 @@ static void link_group(struct config_group *parent_group, struct config_group *g
*/
static int configfs_attach_item(struct config_item *parent_item,
struct config_item *item,
- struct dentry *dentry)
+ struct dentry *dentry,
+ struct configfs_fragment *frag)
{
int ret;
- ret = configfs_create_dir(item, dentry);
+ ret = configfs_create_dir(item, dentry, frag);
if (!ret) {
ret = populate_attrs(item);
if (ret) {
@@ -831,12 +861,13 @@ static void configfs_detach_item(struct config_item *item)
static int configfs_attach_group(struct config_item *parent_item,
struct config_item *item,
- struct dentry *dentry)
+ struct dentry *dentry,
+ struct configfs_fragment *frag)
{
int ret;
struct configfs_dirent *sd;
- ret = configfs_attach_item(parent_item, item, dentry);
+ ret = configfs_attach_item(parent_item, item, dentry, frag);
if (!ret) {
sd = dentry->d_fsdata;
sd->s_type |= CONFIGFS_USET_DIR;
@@ -852,7 +883,7 @@ static int configfs_attach_group(struct config_item *parent_item,
*/
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
configfs_adjust_dir_dirent_depth_before_populate(sd);
- ret = populate_groups(to_config_group(item));
+ ret = populate_groups(to_config_group(item), frag);
if (ret) {
configfs_detach_item(item);
d_inode(dentry)->i_flags |= S_DEAD;
@@ -1247,6 +1278,7 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
struct configfs_dirent *sd;
const struct config_item_type *type;
struct module *subsys_owner = NULL, *new_item_owner = NULL;
+ struct configfs_fragment *frag;
char *name;
sd = dentry->d_parent->d_fsdata;
@@ -1265,6 +1297,12 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
goto out;
}
+ frag = new_fragment();
+ if (!frag) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
/* Get a working ref for the duration of this function */
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
@@ -1367,9 +1405,9 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
spin_unlock(&configfs_dirent_lock);
if (group)
- ret = configfs_attach_group(parent_item, item, dentry);
+ ret = configfs_attach_group(parent_item, item, dentry, frag);
else
- ret = configfs_attach_item(parent_item, item, dentry);
+ ret = configfs_attach_item(parent_item, item, dentry, frag);
spin_lock(&configfs_dirent_lock);
sd->s_type &= ~CONFIGFS_USET_IN_MKDIR;
@@ -1406,6 +1444,7 @@ out_put:
* reference.
*/
config_item_put(parent_item);
+ put_fragment(frag);
out:
return ret;
@@ -1417,6 +1456,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
struct config_item *item;
struct configfs_subsystem *subsys;
struct configfs_dirent *sd;
+ struct configfs_fragment *frag;
struct module *subsys_owner = NULL, *dead_item_owner = NULL;
int ret;
@@ -1474,6 +1514,16 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
}
} while (ret == -EAGAIN);
+ frag = sd->s_frag;
+ if (down_write_killable(&frag->frag_sem)) {
+ spin_lock(&configfs_dirent_lock);
+ configfs_detach_rollback(dentry);
+ spin_unlock(&configfs_dirent_lock);
+ return -EINTR;
+ }
+ frag->frag_dead = true;
+ up_write(&frag->frag_sem);
+
/* Get a working ref for the duration of this function */
item = configfs_get_config_item(dentry);
@@ -1523,44 +1573,6 @@ const struct inode_operations configfs_root_inode_operations = {
.setattr = configfs_setattr,
};
-#if 0
-int configfs_rename_dir(struct config_item * item, const char *new_name)
-{
- int error = 0;
- struct dentry * new_dentry, * parent;
-
- if (!strcmp(config_item_name(item), new_name))
- return -EINVAL;
-
- if (!item->parent)
- return -EINVAL;
-
- down_write(&configfs_rename_sem);
- parent = item->parent->dentry;
-
- inode_lock(d_inode(parent));
-
- new_dentry = lookup_one_len(new_name, parent, strlen(new_name));
- if (!IS_ERR(new_dentry)) {
- if (d_really_is_negative(new_dentry)) {
- error = config_item_set_name(item, "%s", new_name);
- if (!error) {
- d_add(new_dentry, NULL);
- d_move(item->dentry, new_dentry);
- }
- else
- d_delete(new_dentry);
- } else
- error = -EEXIST;
- dput(new_dentry);
- }
- inode_unlock(d_inode(parent));
- up_write(&configfs_rename_sem);
-
- return error;
-}
-#endif
-
static int configfs_dir_open(struct inode *inode, struct file *file)
{
struct dentry * dentry = file->f_path.dentry;
@@ -1574,7 +1586,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
*/
err = -ENOENT;
if (configfs_dirent_is_ready(parent_sd)) {
- file->private_data = configfs_new_dirent(parent_sd, NULL, 0);
+ file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL);
if (IS_ERR(file->private_data))
err = PTR_ERR(file->private_data);
else
@@ -1732,8 +1744,13 @@ int configfs_register_group(struct config_group *parent_group,
{
struct configfs_subsystem *subsys = parent_group->cg_subsys;
struct dentry *parent;
+ struct configfs_fragment *frag;
int ret;
+ frag = new_fragment();
+ if (!frag)
+ return -ENOMEM;
+
mutex_lock(&subsys->su_mutex);
link_group(parent_group, group);
mutex_unlock(&subsys->su_mutex);
@@ -1741,7 +1758,7 @@ int configfs_register_group(struct config_group *parent_group,
parent = parent_group->cg_item.ci_dentry;
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
- ret = create_default_group(parent_group, group);
+ ret = create_default_group(parent_group, group, frag);
if (ret)
goto err_out;
@@ -1749,12 +1766,14 @@ int configfs_register_group(struct config_group *parent_group,
configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
spin_unlock(&configfs_dirent_lock);
inode_unlock(d_inode(parent));
+ put_fragment(frag);
return 0;
err_out:
inode_unlock(d_inode(parent));
mutex_lock(&subsys->su_mutex);
unlink_group(group);
mutex_unlock(&subsys->su_mutex);
+ put_fragment(frag);
return ret;
}
EXPORT_SYMBOL(configfs_register_group);
@@ -1770,16 +1789,12 @@ void configfs_unregister_group(struct config_group *group)
struct configfs_subsystem *subsys = group->cg_subsys;
struct dentry *dentry = group->cg_item.ci_dentry;
struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
+ struct configfs_dirent *sd = dentry->d_fsdata;
+ struct configfs_fragment *frag = sd->s_frag;
- mutex_lock(&subsys->su_mutex);
- if (!group->cg_item.ci_parent->ci_group) {
- /*
- * The parent has already been unlinked and detached
- * due to a rmdir.
- */
- goto unlink_group;
- }
- mutex_unlock(&subsys->su_mutex);
+ down_write(&frag->frag_sem);
+ frag->frag_dead = true;
+ up_write(&frag->frag_sem);
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
spin_lock(&configfs_dirent_lock);
@@ -1796,7 +1811,6 @@ void configfs_unregister_group(struct config_group *group)
dput(dentry);
mutex_lock(&subsys->su_mutex);
-unlink_group:
unlink_group(group);
mutex_unlock(&subsys->su_mutex);
}
@@ -1853,10 +1867,17 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
struct dentry *dentry;
struct dentry *root;
struct configfs_dirent *sd;
+ struct configfs_fragment *frag;
+
+ frag = new_fragment();
+ if (!frag)
+ return -ENOMEM;
root = configfs_pin_fs();
- if (IS_ERR(root))
+ if (IS_ERR(root)) {
+ put_fragment(frag);
return PTR_ERR(root);
+ }
if (!group->cg_item.ci_name)
group->cg_item.ci_name = group->cg_item.ci_namebuf;
@@ -1872,7 +1893,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
d_add(dentry, NULL);
err = configfs_attach_group(sd->s_element, &group->cg_item,
- dentry);
+ dentry, frag);
if (err) {
BUG_ON(d_inode(dentry));
d_drop(dentry);
@@ -1890,6 +1911,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
unlink_group(group);
configfs_release_fs();
}
+ put_fragment(frag);
return err;
}
@@ -1899,12 +1921,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
struct config_group *group = &subsys->su_group;
struct dentry *dentry = group->cg_item.ci_dentry;
struct dentry *root = dentry->d_sb->s_root;
+ struct configfs_dirent *sd = dentry->d_fsdata;
+ struct configfs_fragment *frag = sd->s_frag;
if (dentry->d_parent != root) {
pr_err("Tried to unregister non-subsystem!\n");
return;
}
+ down_write(&frag->frag_sem);
+ frag->frag_dead = true;
+ up_write(&frag->frag_sem);
+
inode_lock_nested(d_inode(root),
I_MUTEX_PARENT);
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 61e4db4390a1..fb65b706cc0d 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -39,40 +39,44 @@ struct configfs_buffer {
bool write_in_progress;
char *bin_buffer;
int bin_buffer_size;
+ int cb_max_size;
+ struct config_item *item;
+ struct module *owner;
+ union {
+ struct configfs_attribute *attr;
+ struct configfs_bin_attribute *bin_attr;
+ };
};
+static inline struct configfs_fragment *to_frag(struct file *file)
+{
+ struct configfs_dirent *sd = file->f_path.dentry->d_fsdata;
-/**
- * fill_read_buffer - allocate and fill buffer from item.
- * @dentry: dentry pointer.
- * @buffer: data buffer for file.
- *
- * Allocate @buffer->page, if it hasn't been already, then call the
- * config_item's show() method to fill the buffer with this attribute's
- * data.
- * This is called only once, on the file's first read.
- */
-static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buffer)
+ return sd->s_frag;
+}
+
+static int fill_read_buffer(struct file *file, struct configfs_buffer *buffer)
{
- struct configfs_attribute * attr = to_attr(dentry);
- struct config_item * item = to_item(dentry->d_parent);
- int ret = 0;
- ssize_t count;
+ struct configfs_fragment *frag = to_frag(file);
+ ssize_t count = -ENOENT;
if (!buffer->page)
buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
if (!buffer->page)
return -ENOMEM;
- count = attr->show(item, buffer->page);
-
- BUG_ON(count > (ssize_t)SIMPLE_ATTR_SIZE);
- if (count >= 0) {
- buffer->needs_read_fill = 0;
- buffer->count = count;
- } else
- ret = count;
- return ret;
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ count = buffer->attr->show(buffer->item, buffer->page);
+ up_read(&frag->frag_sem);
+
+ if (count < 0)
+ return count;
+ if (WARN_ON_ONCE(count > (ssize_t)SIMPLE_ATTR_SIZE))
+ return -EIO;
+ buffer->needs_read_fill = 0;
+ buffer->count = count;
+ return 0;
}
/**
@@ -97,12 +101,13 @@ static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buf
static ssize_t
configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
- struct configfs_buffer * buffer = file->private_data;
+ struct configfs_buffer *buffer = file->private_data;
ssize_t retval = 0;
mutex_lock(&buffer->mutex);
if (buffer->needs_read_fill) {
- if ((retval = fill_read_buffer(file->f_path.dentry,buffer)))
+ retval = fill_read_buffer(file, buffer);
+ if (retval)
goto out;
}
pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
@@ -138,10 +143,8 @@ static ssize_t
configfs_read_bin_file(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct configfs_fragment *frag = to_frag(file);
struct configfs_buffer *buffer = file->private_data;
- struct dentry *dentry = file->f_path.dentry;
- struct config_item *item = to_item(dentry->d_parent);
- struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
ssize_t retval = 0;
ssize_t len = min_t(size_t, count, PAGE_SIZE);
@@ -156,14 +159,19 @@ configfs_read_bin_file(struct file *file, char __user *buf,
if (buffer->needs_read_fill) {
/* perform first read with buf == NULL to get extent */
- len = bin_attr->read(item, NULL, 0);
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ len = buffer->bin_attr->read(buffer->item, NULL, 0);
+ else
+ len = -ENOENT;
+ up_read(&frag->frag_sem);
if (len <= 0) {
retval = len;
goto out;
}
/* do not exceed the maximum value */
- if (bin_attr->cb_max_size && len > bin_attr->cb_max_size) {
+ if (buffer->cb_max_size && len > buffer->cb_max_size) {
retval = -EFBIG;
goto out;
}
@@ -176,7 +184,13 @@ configfs_read_bin_file(struct file *file, char __user *buf,
buffer->bin_buffer_size = len;
/* perform second read to fill buffer */
- len = bin_attr->read(item, buffer->bin_buffer, len);
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ len = buffer->bin_attr->read(buffer->item,
+ buffer->bin_buffer, len);
+ else
+ len = -ENOENT;
+ up_read(&frag->frag_sem);
if (len < 0) {
retval = len;
vfree(buffer->bin_buffer);
@@ -226,25 +240,17 @@ fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size
return error ? -EFAULT : count;
}
-
-/**
- * flush_write_buffer - push buffer to config_item.
- * @dentry: dentry to the attribute
- * @buffer: data buffer for file.
- * @count: number of bytes
- *
- * Get the correct pointers for the config_item and the attribute we're
- * dealing with, then call the store() method for the attribute,
- * passing the buffer that we acquired in fill_write_buffer().
- */
-
static int
-flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size_t count)
+flush_write_buffer(struct file *file, struct configfs_buffer *buffer, size_t count)
{
- struct configfs_attribute * attr = to_attr(dentry);
- struct config_item * item = to_item(dentry->d_parent);
-
- return attr->store(item, buffer->page, count);
+ struct configfs_fragment *frag = to_frag(file);
+ int res = -ENOENT;
+
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ res = buffer->attr->store(buffer->item, buffer->page, count);
+ up_read(&frag->frag_sem);
+ return res;
}
@@ -268,13 +274,13 @@ flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size
static ssize_t
configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- struct configfs_buffer * buffer = file->private_data;
+ struct configfs_buffer *buffer = file->private_data;
ssize_t len;
mutex_lock(&buffer->mutex);
len = fill_write_buffer(buffer, buf, count);
if (len > 0)
- len = flush_write_buffer(file->f_path.dentry, buffer, len);
+ len = flush_write_buffer(file, buffer, len);
if (len > 0)
*ppos += len;
mutex_unlock(&buffer->mutex);
@@ -299,8 +305,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct configfs_buffer *buffer = file->private_data;
- struct dentry *dentry = file->f_path.dentry;
- struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
void *tbuf = NULL;
ssize_t len;
@@ -316,8 +320,8 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
/* buffer grows? */
if (*ppos + count > buffer->bin_buffer_size) {
- if (bin_attr->cb_max_size &&
- *ppos + count > bin_attr->cb_max_size) {
+ if (buffer->cb_max_size &&
+ *ppos + count > buffer->cb_max_size) {
len = -EFBIG;
goto out;
}
@@ -349,31 +353,51 @@ out:
return len;
}
-static int check_perm(struct inode * inode, struct file * file, int type)
+static int __configfs_open_file(struct inode *inode, struct file *file, int type)
{
- struct config_item *item = configfs_get_config_item(file->f_path.dentry->d_parent);
- struct configfs_attribute * attr = to_attr(file->f_path.dentry);
- struct configfs_bin_attribute *bin_attr = NULL;
- struct configfs_buffer * buffer;
- struct configfs_item_operations * ops = NULL;
- int error = 0;
+ struct dentry *dentry = file->f_path.dentry;
+ struct configfs_fragment *frag = to_frag(file);
+ struct configfs_attribute *attr;
+ struct configfs_buffer *buffer;
+ int error;
- if (!item || !attr)
- goto Einval;
+ error = -ENOMEM;
+ buffer = kzalloc(sizeof(struct configfs_buffer), GFP_KERNEL);
+ if (!buffer)
+ goto out;
- if (type & CONFIGFS_ITEM_BIN_ATTR)
- bin_attr = to_bin_attr(file->f_path.dentry);
+ error = -ENOENT;
+ down_read(&frag->frag_sem);
+ if (unlikely(frag->frag_dead))
+ goto out_free_buffer;
- /* Grab the module reference for this attribute if we have one */
- if (!try_module_get(attr->ca_owner)) {
- error = -ENODEV;
- goto Done;
+ error = -EINVAL;
+ buffer->item = to_item(dentry->d_parent);
+ if (!buffer->item)
+ goto out_free_buffer;
+
+ attr = to_attr(dentry);
+ if (!attr)
+ goto out_put_item;
+
+ if (type & CONFIGFS_ITEM_BIN_ATTR) {
+ buffer->bin_attr = to_bin_attr(dentry);
+ buffer->cb_max_size = buffer->bin_attr->cb_max_size;
+ } else {
+ buffer->attr = attr;
}
- if (item->ci_type)
- ops = item->ci_type->ct_item_ops;
- else
- goto Eaccess;
+ buffer->owner = attr->ca_owner;
+ /* Grab the module reference for this attribute if we have one */
+ error = -ENODEV;
+ if (!try_module_get(buffer->owner))
+ goto out_put_item;
+
+ error = -EACCES;
+ if (!buffer->item->ci_type)
+ goto out_put_module;
+
+ buffer->ops = buffer->item->ci_type->ct_item_ops;
/* File needs write support.
* The inode's perms must say it's ok,
@@ -381,13 +405,11 @@ static int check_perm(struct inode * inode, struct file * file, int type)
*/
if (file->f_mode & FMODE_WRITE) {
if (!(inode->i_mode & S_IWUGO))
- goto Eaccess;
-
+ goto out_put_module;
if ((type & CONFIGFS_ITEM_ATTR) && !attr->store)
- goto Eaccess;
-
- if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->write)
- goto Eaccess;
+ goto out_put_module;
+ if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->write)
+ goto out_put_module;
}
/* File needs read support.
@@ -396,92 +418,72 @@ static int check_perm(struct inode * inode, struct file * file, int type)
*/
if (file->f_mode & FMODE_READ) {
if (!(inode->i_mode & S_IRUGO))
- goto Eaccess;
-
+ goto out_put_module;
if ((type & CONFIGFS_ITEM_ATTR) && !attr->show)
- goto Eaccess;
-
- if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->read)
- goto Eaccess;
+ goto out_put_module;
+ if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->read)
+ goto out_put_module;
}
- /* No error? Great, allocate a buffer for the file, and store it
- * it in file->private_data for easy access.
- */
- buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
- if (!buffer) {
- error = -ENOMEM;
- goto Enomem;
- }
mutex_init(&buffer->mutex);
buffer->needs_read_fill = 1;
buffer->read_in_progress = false;
buffer->write_in_progress = false;
- buffer->ops = ops;
file->private_data = buffer;
- goto Done;
+ up_read(&frag->frag_sem);
+ return 0;
- Einval:
- error = -EINVAL;
- goto Done;
- Eaccess:
- error = -EACCES;
- Enomem:
- module_put(attr->ca_owner);
- Done:
- if (error && item)
- config_item_put(item);
+out_put_module:
+ module_put(buffer->owner);
+out_put_item:
+ config_item_put(buffer->item);
+out_free_buffer:
+ up_read(&frag->frag_sem);
+ kfree(buffer);
+out:
return error;
}
static int configfs_release(struct inode *inode, struct file *filp)
{
- struct config_item * item = to_item(filp->f_path.dentry->d_parent);
- struct configfs_attribute * attr = to_attr(filp->f_path.dentry);
- struct module * owner = attr->ca_owner;
- struct configfs_buffer * buffer = filp->private_data;
-
- if (item)
- config_item_put(item);
- /* After this point, attr should not be accessed. */
- module_put(owner);
-
- if (buffer) {
- if (buffer->page)
- free_page((unsigned long)buffer->page);
- mutex_destroy(&buffer->mutex);
- kfree(buffer);
- }
+ struct configfs_buffer *buffer = filp->private_data;
+
+ module_put(buffer->owner);
+ if (buffer->page)
+ free_page((unsigned long)buffer->page);
+ mutex_destroy(&buffer->mutex);
+ kfree(buffer);
return 0;
}
static int configfs_open_file(struct inode *inode, struct file *filp)
{
- return check_perm(inode, filp, CONFIGFS_ITEM_ATTR);
+ return __configfs_open_file(inode, filp, CONFIGFS_ITEM_ATTR);
}
static int configfs_open_bin_file(struct inode *inode, struct file *filp)
{
- return check_perm(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
+ return __configfs_open_file(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
}
-static int configfs_release_bin_file(struct inode *inode, struct file *filp)
+static int configfs_release_bin_file(struct inode *inode, struct file *file)
{
- struct configfs_buffer *buffer = filp->private_data;
- struct dentry *dentry = filp->f_path.dentry;
- struct config_item *item = to_item(dentry->d_parent);
- struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
- ssize_t len = 0;
- int ret;
+ struct configfs_buffer *buffer = file->private_data;
buffer->read_in_progress = false;
if (buffer->write_in_progress) {
+ struct configfs_fragment *frag = to_frag(file);
buffer->write_in_progress = false;
- len = bin_attr->write(item, buffer->bin_buffer,
- buffer->bin_buffer_size);
-
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead) {
+ /* result of ->release() is ignored */
+ buffer->bin_attr->write(buffer->item,
+ buffer->bin_buffer,
+ buffer->bin_buffer_size);
+ }
+ up_read(&frag->frag_sem);
/* vfree on NULL is safe */
vfree(buffer->bin_buffer);
buffer->bin_buffer = NULL;
@@ -489,10 +491,8 @@ static int configfs_release_bin_file(struct inode *inode, struct file *filp)
buffer->needs_read_fill = 1;
}
- ret = configfs_release(inode, filp);
- if (len < 0)
- return len;
- return ret;
+ configfs_release(inode, file);
+ return 0;
}
@@ -527,7 +527,7 @@ int configfs_create_file(struct config_item * item, const struct configfs_attrib
inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL);
error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
- CONFIGFS_ITEM_ATTR);
+ CONFIGFS_ITEM_ATTR, parent_sd->s_frag);
inode_unlock(d_inode(dir));
return error;
@@ -549,7 +549,7 @@ int configfs_create_bin_file(struct config_item *item,
inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL);
error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode,
- CONFIGFS_ITEM_BIN_ATTR);
+ CONFIGFS_ITEM_BIN_ATTR, parent_sd->s_frag);
inode_unlock(dir->d_inode);
return error;
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index ab0284321912..680aba9c00d5 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -76,14 +76,14 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
if (ia_valid & ATTR_GID)
sd_iattr->ia_gid = iattr->ia_gid;
if (ia_valid & ATTR_ATIME)
- sd_iattr->ia_atime = timespec64_trunc(iattr->ia_atime,
- inode->i_sb->s_time_gran);
+ sd_iattr->ia_atime = timestamp_truncate(iattr->ia_atime,
+ inode);
if (ia_valid & ATTR_MTIME)
- sd_iattr->ia_mtime = timespec64_trunc(iattr->ia_mtime,
- inode->i_sb->s_time_gran);
+ sd_iattr->ia_mtime = timestamp_truncate(iattr->ia_mtime,
+ inode);
if (ia_valid & ATTR_CTIME)
- sd_iattr->ia_ctime = timespec64_trunc(iattr->ia_ctime,
- inode->i_sb->s_time_gran);
+ sd_iattr->ia_ctime = timestamp_truncate(iattr->ia_ctime,
+ inode);
if (ia_valid & ATTR_MODE) {
umode_t mode = iattr->ia_mode;
@@ -164,41 +164,27 @@ static void configfs_set_inode_lock_class(struct configfs_dirent *sd,
#endif /* CONFIG_LOCKDEP */
-int configfs_create(struct dentry * dentry, umode_t mode, void (*init)(struct inode *))
+struct inode *configfs_create(struct dentry *dentry, umode_t mode)
{
- int error = 0;
struct inode *inode = NULL;
struct configfs_dirent *sd;
struct inode *p_inode;
if (!dentry)
- return -ENOENT;
+ return ERR_PTR(-ENOENT);
if (d_really_is_positive(dentry))
- return -EEXIST;
+ return ERR_PTR(-EEXIST);
sd = dentry->d_fsdata;
inode = configfs_new_inode(mode, sd, dentry->d_sb);
if (!inode)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
p_inode = d_inode(dentry->d_parent);
p_inode->i_mtime = p_inode->i_ctime = current_time(p_inode);
configfs_set_inode_lock_class(sd, inode);
-
- init(inode);
- if (S_ISDIR(mode) || S_ISLNK(mode)) {
- /*
- * ->symlink(), ->mkdir(), configfs_register_subsystem() or
- * create_default_group() - already hashed.
- */
- d_instantiate(dentry, inode);
- dget(dentry); /* pin link and directory dentries in core */
- } else {
- /* ->lookup() */
- d_add(dentry, inode);
- }
- return error;
+ return inode;
}
/*
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index 55438dd58189..0c6e8cf61953 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -28,9 +28,18 @@ static struct vfsmount *configfs_mount = NULL;
struct kmem_cache *configfs_dir_cachep;
static int configfs_mnt_count = 0;
+
+static void configfs_free_inode(struct inode *inode)
+{
+ if (S_ISLNK(inode->i_mode))
+ kfree(inode->i_link);
+ free_inode_nonrcu(inode);
+}
+
static const struct super_operations configfs_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
+ .free_inode = configfs_free_inode,
};
static struct config_group configfs_root_group = {
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index 91eac6c55e07..cb61467478ca 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -55,41 +55,63 @@ static void fill_item_path(struct config_item * item, char * buffer, int length)
}
}
+static int configfs_get_target_path(struct config_item *item,
+ struct config_item *target, char *path)
+{
+ int depth, size;
+ char *s;
+
+ depth = item_depth(item);
+ size = item_path_length(target) + depth * 3 - 1;
+ if (size > PATH_MAX)
+ return -ENAMETOOLONG;
+
+ pr_debug("%s: depth = %d, size = %d\n", __func__, depth, size);
+
+ for (s = path; depth--; s += 3)
+ strcpy(s,"../");
+
+ fill_item_path(target, path, size);
+ pr_debug("%s: path = '%s'\n", __func__, path);
+ return 0;
+}
+
static int create_link(struct config_item *parent_item,
struct config_item *item,
struct dentry *dentry)
{
struct configfs_dirent *target_sd = item->ci_dentry->d_fsdata;
- struct configfs_symlink *sl;
+ char *body;
int ret;
- ret = -ENOENT;
if (!configfs_dirent_is_ready(target_sd))
- goto out;
- ret = -ENOMEM;
- sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
- if (sl) {
+ return -ENOENT;
+
+ body = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!body)
+ return -ENOMEM;
+
+ configfs_get(target_sd);
+ spin_lock(&configfs_dirent_lock);
+ if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
+ spin_unlock(&configfs_dirent_lock);
+ configfs_put(target_sd);
+ kfree(body);
+ return -ENOENT;
+ }
+ target_sd->s_links++;
+ spin_unlock(&configfs_dirent_lock);
+ ret = configfs_get_target_path(parent_item, item, body);
+ if (!ret)
+ ret = configfs_create_link(target_sd, parent_item->ci_dentry,
+ dentry, body);
+ if (ret) {
spin_lock(&configfs_dirent_lock);
- if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
- spin_unlock(&configfs_dirent_lock);
- kfree(sl);
- return -ENOENT;
- }
- sl->sl_target = config_item_get(item);
- list_add(&sl->sl_list, &target_sd->s_links);
+ target_sd->s_links--;
spin_unlock(&configfs_dirent_lock);
- ret = configfs_create_link(sl, parent_item->ci_dentry,
- dentry);
- if (ret) {
- spin_lock(&configfs_dirent_lock);
- list_del_init(&sl->sl_list);
- spin_unlock(&configfs_dirent_lock);
- config_item_put(item);
- kfree(sl);
- }
+ configfs_put(target_sd);
+ kfree(body);
}
-
-out:
return ret;
}
@@ -131,9 +153,8 @@ int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symna
* Fake invisibility if dir belongs to a group/default groups hierarchy
* being attached
*/
- ret = -ENOENT;
if (!configfs_dirent_is_ready(sd))
- goto out;
+ return -ENOENT;
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
@@ -143,11 +164,42 @@ int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symna
!type->ct_item_ops->allow_link)
goto out_put;
+ /*
+ * This is really sick. What they wanted was a hybrid of
+ * link(2) and symlink(2) - they wanted the target resolved
+ * at syscall time (as link(2) would've done), be a directory
+ * (which link(2) would've refused to do) *AND* be a deep
+ * fucking magic, making the target busy from rmdir POV.
+ * symlink(2) is nothing of that sort, and the locking it
+ * gets matches the normal symlink(2) semantics. Without
+ * attempts to resolve the target (which might very well
+ * not even exist yet) done prior to locking the parent
+ * directory. This perversion, OTOH, needs to resolve
+ * the target, which would lead to obvious deadlocks if
+ * attempted with any directories locked.
+ *
+ * Unfortunately, that garbage is userland ABI and we should've
+ * said "no" back in 2005. Too late now, so we get to
+ * play very ugly games with locking.
+ *
+ * Try *ANYTHING* of that sort in new code, and you will
+ * really regret it. Just ask yourself - what could a BOFH
+ * do to me and do I want to find it out first-hand?
+ *
+ * AV, a thoroughly annoyed bastard.
+ */
+ inode_unlock(dir);
ret = get_target(symname, &path, &target_item, dentry->d_sb);
+ inode_lock(dir);
if (ret)
goto out_put;
- ret = type->ct_item_ops->allow_link(parent_item, target_item);
+ if (dentry->d_inode || d_unhashed(dentry))
+ ret = -EEXIST;
+ else
+ ret = inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ if (!ret)
+ ret = type->ct_item_ops->allow_link(parent_item, target_item);
if (!ret) {
mutex_lock(&configfs_symlink_mutex);
ret = create_link(parent_item, target_item, dentry);
@@ -162,15 +214,12 @@ int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symna
out_put:
config_item_put(parent_item);
-
-out:
return ret;
}
int configfs_unlink(struct inode *dir, struct dentry *dentry)
{
- struct configfs_dirent *sd = dentry->d_fsdata;
- struct configfs_symlink *sl;
+ struct configfs_dirent *sd = dentry->d_fsdata, *target_sd;
struct config_item *parent_item;
const struct config_item_type *type;
int ret;
@@ -179,7 +228,7 @@ int configfs_unlink(struct inode *dir, struct dentry *dentry)
if (!(sd->s_type & CONFIGFS_ITEM_LINK))
goto out;
- sl = sd->s_element;
+ target_sd = sd->s_element;
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
@@ -193,21 +242,18 @@ int configfs_unlink(struct inode *dir, struct dentry *dentry)
/*
* drop_link() must be called before
- * list_del_init(&sl->sl_list), so that the order of
+ * decrementing target's ->s_links, so that the order of
* drop_link(this, target) and drop_item(target) is preserved.
*/
if (type && type->ct_item_ops &&
type->ct_item_ops->drop_link)
type->ct_item_ops->drop_link(parent_item,
- sl->sl_target);
+ target_sd->s_element);
spin_lock(&configfs_dirent_lock);
- list_del_init(&sl->sl_list);
+ target_sd->s_links--;
spin_unlock(&configfs_dirent_lock);
-
- /* Put reference from create_link() */
- config_item_put(sl->sl_target);
- kfree(sl);
+ configfs_put(target_sd);
config_item_put(parent_item);
@@ -217,79 +263,8 @@ out:
return ret;
}
-static int configfs_get_target_path(struct config_item * item, struct config_item * target,
- char *path)
-{
- char * s;
- int depth, size;
-
- depth = item_depth(item);
- size = item_path_length(target) + depth * 3 - 1;
- if (size > PATH_MAX)
- return -ENAMETOOLONG;
-
- pr_debug("%s: depth = %d, size = %d\n", __func__, depth, size);
-
- for (s = path; depth--; s += 3)
- strcpy(s,"../");
-
- fill_item_path(target, path, size);
- pr_debug("%s: path = '%s'\n", __func__, path);
-
- return 0;
-}
-
-static int configfs_getlink(struct dentry *dentry, char * path)
-{
- struct config_item *item, *target_item;
- int error = 0;
-
- item = configfs_get_config_item(dentry->d_parent);
- if (!item)
- return -EINVAL;
-
- target_item = configfs_get_config_item(dentry);
- if (!target_item) {
- config_item_put(item);
- return -EINVAL;
- }
-
- down_read(&configfs_rename_sem);
- error = configfs_get_target_path(item, target_item, path);
- up_read(&configfs_rename_sem);
-
- config_item_put(item);
- config_item_put(target_item);
- return error;
-
-}
-
-static const char *configfs_get_link(struct dentry *dentry,
- struct inode *inode,
- struct delayed_call *done)
-{
- char *body;
- int error;
-
- if (!dentry)
- return ERR_PTR(-ECHILD);
-
- body = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!body)
- return ERR_PTR(-ENOMEM);
-
- error = configfs_getlink(dentry, body);
- if (!error) {
- set_delayed_call(done, kfree_link, body);
- return body;
- }
-
- kfree(body);
- return ERR_PTR(error);
-}
-
const struct inode_operations configfs_symlink_inode_operations = {
- .get_link = configfs_get_link,
+ .get_link = simple_get_link,
.setattr = configfs_setattr,
};
diff --git a/fs/coredump.c b/fs/coredump.c
index e42e17e55bfd..b1ea7dfbd149 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -7,6 +7,7 @@
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/swap.h>
+#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/pagemap.h>
@@ -187,11 +188,13 @@ put_exe_file:
* name into corename, which must have space for at least
* CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
*/
-static int format_corename(struct core_name *cn, struct coredump_params *cprm)
+static int format_corename(struct core_name *cn, struct coredump_params *cprm,
+ size_t **argv, int *argc)
{
const struct cred *cred = current_cred();
const char *pat_ptr = core_pattern;
int ispipe = (*pat_ptr == '|');
+ bool was_space = false;
int pid_in_pattern = 0;
int err = 0;
@@ -201,12 +204,35 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
return -ENOMEM;
cn->corename[0] = '\0';
- if (ispipe)
+ if (ispipe) {
+ int argvs = sizeof(core_pattern) / 2;
+ (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
+ if (!(*argv))
+ return -ENOMEM;
+ (*argv)[(*argc)++] = 0;
++pat_ptr;
+ }
/* Repeat as long as we have more pattern to process and more output
space */
while (*pat_ptr) {
+ /*
+ * Split on spaces before doing template expansion so that
+ * %e and %E don't get split if they have spaces in them
+ */
+ if (ispipe) {
+ if (isspace(*pat_ptr)) {
+ was_space = true;
+ pat_ptr++;
+ continue;
+ } else if (was_space) {
+ was_space = false;
+ err = cn_printf(cn, "%c", '\0');
+ if (err)
+ return err;
+ (*argv)[(*argc)++] = cn->used;
+ }
+ }
if (*pat_ptr != '%') {
err = cn_printf(cn, "%c", *pat_ptr++);
} else {
@@ -546,6 +572,8 @@ void do_coredump(const kernel_siginfo_t *siginfo)
struct cred *cred;
int retval = 0;
int ispipe;
+ size_t *argv = NULL;
+ int argc = 0;
struct files_struct *displaced;
/* require nonrelative corefile path and be extra careful */
bool need_suid_safe = false;
@@ -592,9 +620,10 @@ void do_coredump(const kernel_siginfo_t *siginfo)
old_cred = override_creds(cred);
- ispipe = format_corename(&cn, &cprm);
+ ispipe = format_corename(&cn, &cprm, &argv, &argc);
if (ispipe) {
+ int argi;
int dump_count;
char **helper_argv;
struct subprocess_info *sub_info;
@@ -637,12 +666,16 @@ void do_coredump(const kernel_siginfo_t *siginfo)
goto fail_dropcount;
}
- helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL);
+ helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
+ GFP_KERNEL);
if (!helper_argv) {
printk(KERN_WARNING "%s failed to allocate memory\n",
__func__);
goto fail_dropcount;
}
+ for (argi = 0; argi < argc; argi++)
+ helper_argv[argi] = cn.corename + argv[argi];
+ helper_argv[argi] = NULL;
retval = -ENOMEM;
sub_info = call_usermodehelper_setup(helper_argv[0],
@@ -652,7 +685,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
retval = call_usermodehelper_exec(sub_info,
UMH_WAIT_EXEC);
- argv_free(helper_argv);
+ kfree(helper_argv);
if (retval) {
printk(KERN_INFO "Core dump to |%s pipe failed\n",
cn.corename);
@@ -766,6 +799,7 @@ fail_dropcount:
if (ispipe)
atomic_dec(&core_dump_count);
fail_unlock:
+ kfree(argv);
kfree(cn.corename);
coredump_finish(mm, core_dumped);
revert_creds(old_cred);
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 9352487bd0fc..2f04024c3588 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -24,6 +24,7 @@
#include <linux/blkdev.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/super.h>
+#include <linux/fs_context.h>
#include <linux/slab.h>
#include <linux/vfs.h>
#include <linux/mutex.h>
@@ -506,18 +507,19 @@ static void cramfs_kill_sb(struct super_block *sb)
kfree(sbi);
}
-static int cramfs_remount(struct super_block *sb, int *flags, char *data)
+static int cramfs_reconfigure(struct fs_context *fc)
{
- sync_filesystem(sb);
- *flags |= SB_RDONLY;
+ sync_filesystem(fc->root->d_sb);
+ fc->sb_flags |= SB_RDONLY;
return 0;
}
-static int cramfs_read_super(struct super_block *sb,
- struct cramfs_super *super, int silent)
+static int cramfs_read_super(struct super_block *sb, struct fs_context *fc,
+ struct cramfs_super *super)
{
struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
unsigned long root_offset;
+ bool silent = fc->sb_flags & SB_SILENT;
/* We don't know the real size yet */
sbi->size = PAGE_SIZE;
@@ -532,7 +534,7 @@ static int cramfs_read_super(struct super_block *sb,
/* check for wrong endianness */
if (super->magic == CRAMFS_MAGIC_WEND) {
if (!silent)
- pr_err("wrong endianness\n");
+ errorf(fc, "cramfs: wrong endianness");
return -EINVAL;
}
@@ -544,22 +546,22 @@ static int cramfs_read_super(struct super_block *sb,
mutex_unlock(&read_mutex);
if (super->magic != CRAMFS_MAGIC) {
if (super->magic == CRAMFS_MAGIC_WEND && !silent)
- pr_err("wrong endianness\n");
+ errorf(fc, "cramfs: wrong endianness");
else if (!silent)
- pr_err("wrong magic\n");
+ errorf(fc, "cramfs: wrong magic");
return -EINVAL;
}
}
/* get feature flags first */
if (super->flags & ~CRAMFS_SUPPORTED_FLAGS) {
- pr_err("unsupported filesystem features\n");
+ errorf(fc, "cramfs: unsupported filesystem features");
return -EINVAL;
}
/* Check that the root inode is in a sane state */
if (!S_ISDIR(super->root.mode)) {
- pr_err("root is not a directory\n");
+ errorf(fc, "cramfs: root is not a directory");
return -EINVAL;
}
/* correct strange, hard-coded permissions of mkcramfs */
@@ -578,12 +580,12 @@ static int cramfs_read_super(struct super_block *sb,
sbi->magic = super->magic;
sbi->flags = super->flags;
if (root_offset == 0)
- pr_info("empty filesystem");
+ infof(fc, "cramfs: empty filesystem");
else if (!(super->flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) &&
((root_offset != sizeof(struct cramfs_super)) &&
(root_offset != 512 + sizeof(struct cramfs_super))))
{
- pr_err("bad root offset %lu\n", root_offset);
+ errorf(fc, "cramfs: bad root offset %lu", root_offset);
return -EINVAL;
}
@@ -597,6 +599,8 @@ static int cramfs_finalize_super(struct super_block *sb,
/* Set it all up.. */
sb->s_flags |= SB_RDONLY;
+ sb->s_time_min = 0;
+ sb->s_time_max = 0;
sb->s_op = &cramfs_ops;
root = get_cramfs_inode(sb, cramfs_root, 0);
if (IS_ERR(root))
@@ -607,8 +611,7 @@ static int cramfs_finalize_super(struct super_block *sb,
return 0;
}
-static int cramfs_blkdev_fill_super(struct super_block *sb, void *data,
- int silent)
+static int cramfs_blkdev_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct cramfs_sb_info *sbi;
struct cramfs_super super;
@@ -623,14 +626,13 @@ static int cramfs_blkdev_fill_super(struct super_block *sb, void *data,
for (i = 0; i < READ_BUFFERS; i++)
buffer_blocknr[i] = -1;
- err = cramfs_read_super(sb, &super, silent);
+ err = cramfs_read_super(sb, fc, &super);
if (err)
return err;
return cramfs_finalize_super(sb, &super.root);
}
-static int cramfs_mtd_fill_super(struct super_block *sb, void *data,
- int silent)
+static int cramfs_mtd_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct cramfs_sb_info *sbi;
struct cramfs_super super;
@@ -652,7 +654,7 @@ static int cramfs_mtd_fill_super(struct super_block *sb, void *data,
pr_info("checking physical address %pap for linear cramfs image\n",
&sbi->linear_phys_addr);
- err = cramfs_read_super(sb, &super, silent);
+ err = cramfs_read_super(sb, fc, &super);
if (err)
return err;
@@ -947,32 +949,41 @@ static const struct inode_operations cramfs_dir_inode_operations = {
};
static const struct super_operations cramfs_ops = {
- .remount_fs = cramfs_remount,
.statfs = cramfs_statfs,
};
-static struct dentry *cramfs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static int cramfs_get_tree(struct fs_context *fc)
{
- struct dentry *ret = ERR_PTR(-ENOPROTOOPT);
+ int ret = -ENOPROTOOPT;
if (IS_ENABLED(CONFIG_CRAMFS_MTD)) {
- ret = mount_mtd(fs_type, flags, dev_name, data,
- cramfs_mtd_fill_super);
- if (!IS_ERR(ret))
- return ret;
- }
- if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV)) {
- ret = mount_bdev(fs_type, flags, dev_name, data,
- cramfs_blkdev_fill_super);
+ ret = get_tree_mtd(fc, cramfs_mtd_fill_super);
+ if (!ret)
+ return 0;
}
+ if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV))
+ ret = get_tree_bdev(fc, cramfs_blkdev_fill_super);
return ret;
}
+static const struct fs_context_operations cramfs_context_ops = {
+ .get_tree = cramfs_get_tree,
+ .reconfigure = cramfs_reconfigure,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+static int cramfs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &cramfs_context_ops;
+ return 0;
+}
+
static struct file_system_type cramfs_fs_type = {
.owner = THIS_MODULE,
.name = "cramfs",
- .mount = cramfs_mount,
+ .init_fs_context = cramfs_init_fs_context,
.kill_sb = cramfs_kill_sb,
.fs_flags = FS_REQUIRES_DEV,
};
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig
index 5fdf24877c17..ff5a1746cbae 100644
--- a/fs/crypto/Kconfig
+++ b/fs/crypto/Kconfig
@@ -7,6 +7,8 @@ config FS_ENCRYPTION
select CRYPTO_ECB
select CRYPTO_XTS
select CRYPTO_CTS
+ select CRYPTO_SHA512
+ select CRYPTO_HMAC
select KEYS
help
Enable encryption of files and directories. This
diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile
index 4f0df5e682e4..232e2bb5a337 100644
--- a/fs/crypto/Makefile
+++ b/fs/crypto/Makefile
@@ -1,5 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o
-fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o
+fscrypto-y := crypto.o \
+ fname.o \
+ hkdf.o \
+ hooks.o \
+ keyring.o \
+ keysetup.o \
+ keysetup_v1.o \
+ policy.o
+
fscrypto-$(CONFIG_BLOCK) += bio.o
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 82da2510721f..1f4b8a277060 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -26,7 +26,7 @@
#include <linux/namei.h>
#include "fscrypt_private.h"
-static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
+void fscrypt_decrypt_bio(struct bio *bio)
{
struct bio_vec *bv;
struct bvec_iter_all iter_all;
@@ -37,37 +37,10 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
bv->bv_offset);
if (ret)
SetPageError(page);
- else if (done)
- SetPageUptodate(page);
- if (done)
- unlock_page(page);
}
}
-
-void fscrypt_decrypt_bio(struct bio *bio)
-{
- __fscrypt_decrypt_bio(bio, false);
-}
EXPORT_SYMBOL(fscrypt_decrypt_bio);
-static void completion_pages(struct work_struct *work)
-{
- struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work);
- struct bio *bio = ctx->bio;
-
- __fscrypt_decrypt_bio(bio, true);
- fscrypt_release_ctx(ctx);
- bio_put(bio);
-}
-
-void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
-{
- INIT_WORK(&ctx->work, completion_pages);
- ctx->bio = bio;
- fscrypt_enqueue_decrypt_work(&ctx->work);
-}
-EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
-
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 45c3d0427fb2..3719efa546c6 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -27,29 +27,20 @@
#include <linux/ratelimit.h>
#include <linux/dcache.h>
#include <linux/namei.h>
-#include <crypto/aes.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
static unsigned int num_prealloc_crypto_pages = 32;
-static unsigned int num_prealloc_crypto_ctxs = 128;
module_param(num_prealloc_crypto_pages, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_pages,
"Number of crypto pages to preallocate");
-module_param(num_prealloc_crypto_ctxs, uint, 0444);
-MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
- "Number of crypto contexts to preallocate");
static mempool_t *fscrypt_bounce_page_pool = NULL;
-static LIST_HEAD(fscrypt_free_ctxs);
-static DEFINE_SPINLOCK(fscrypt_ctx_lock);
-
static struct workqueue_struct *fscrypt_read_workqueue;
static DEFINE_MUTEX(fscrypt_init_mutex);
-static struct kmem_cache *fscrypt_ctx_cachep;
struct kmem_cache *fscrypt_info_cachep;
void fscrypt_enqueue_decrypt_work(struct work_struct *work)
@@ -58,62 +49,6 @@ void fscrypt_enqueue_decrypt_work(struct work_struct *work)
}
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
-/**
- * fscrypt_release_ctx() - Release a decryption context
- * @ctx: The decryption context to release.
- *
- * If the decryption context was allocated from the pre-allocated pool, return
- * it to that pool. Else, free it.
- */
-void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
-{
- unsigned long flags;
-
- if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
- kmem_cache_free(fscrypt_ctx_cachep, ctx);
- } else {
- spin_lock_irqsave(&fscrypt_ctx_lock, flags);
- list_add(&ctx->free_list, &fscrypt_free_ctxs);
- spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
- }
-}
-EXPORT_SYMBOL(fscrypt_release_ctx);
-
-/**
- * fscrypt_get_ctx() - Get a decryption context
- * @gfp_flags: The gfp flag for memory allocation
- *
- * Allocate and initialize a decryption context.
- *
- * Return: A new decryption context on success; an ERR_PTR() otherwise.
- */
-struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
-{
- struct fscrypt_ctx *ctx;
- unsigned long flags;
-
- /*
- * First try getting a ctx from the free list so that we don't have to
- * call into the slab allocator.
- */
- spin_lock_irqsave(&fscrypt_ctx_lock, flags);
- ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
- struct fscrypt_ctx, free_list);
- if (ctx)
- list_del(&ctx->free_list);
- spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
- if (!ctx) {
- ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
- ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- } else {
- ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- }
- return ctx;
-}
-EXPORT_SYMBOL(fscrypt_get_ctx);
-
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
{
return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
@@ -138,14 +73,17 @@ EXPORT_SYMBOL(fscrypt_free_bounce_page);
void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
const struct fscrypt_info *ci)
{
+ u8 flags = fscrypt_policy_flags(&ci->ci_policy);
+
memset(iv, 0, ci->ci_mode->ivsize);
- iv->lblk_num = cpu_to_le64(lblk_num);
- if (ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY)
+ if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
+ WARN_ON_ONCE((u32)lblk_num != lblk_num);
+ lblk_num |= (u64)ci->ci_inode->i_ino << 32;
+ } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE);
-
- if (ci->ci_essiv_tfm != NULL)
- crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
+ }
+ iv->lblk_num = cpu_to_le64(lblk_num);
}
/* Encrypt or decrypt a single filesystem block of file contents */
@@ -188,10 +126,8 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
skcipher_request_free(req);
if (res) {
- fscrypt_err(inode->i_sb,
- "%scryption failed for inode %lu, block %llu: %d",
- (rw == FS_DECRYPT ? "de" : "en"),
- inode->i_ino, lblk_num, res);
+ fscrypt_err(inode, "%scryption failed for block %llu: %d",
+ (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res);
return res;
}
return 0;
@@ -398,17 +334,6 @@ const struct dentry_operations fscrypt_d_ops = {
.d_revalidate = fscrypt_d_revalidate,
};
-static void fscrypt_destroy(void)
-{
- struct fscrypt_ctx *pos, *n;
-
- list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
- kmem_cache_free(fscrypt_ctx_cachep, pos);
- INIT_LIST_HEAD(&fscrypt_free_ctxs);
- mempool_destroy(fscrypt_bounce_page_pool);
- fscrypt_bounce_page_pool = NULL;
-}
-
/**
* fscrypt_initialize() - allocate major buffers for fs encryption.
* @cop_flags: fscrypt operations flags
@@ -416,11 +341,11 @@ static void fscrypt_destroy(void)
* We only call this when we start accessing encrypted files, since it
* results in memory getting allocated that wouldn't otherwise be used.
*
- * Return: Zero on success, non-zero otherwise.
+ * Return: 0 on success; -errno on failure
*/
int fscrypt_initialize(unsigned int cop_flags)
{
- int i, res = -ENOMEM;
+ int err = 0;
/* No need to allocate a bounce page pool if this FS won't use it. */
if (cop_flags & FS_CFLG_OWN_PAGES)
@@ -428,32 +353,21 @@ int fscrypt_initialize(unsigned int cop_flags)
mutex_lock(&fscrypt_init_mutex);
if (fscrypt_bounce_page_pool)
- goto already_initialized;
-
- for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
- struct fscrypt_ctx *ctx;
-
- ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
- if (!ctx)
- goto fail;
- list_add(&ctx->free_list, &fscrypt_free_ctxs);
- }
+ goto out_unlock;
+ err = -ENOMEM;
fscrypt_bounce_page_pool =
mempool_create_page_pool(num_prealloc_crypto_pages, 0);
if (!fscrypt_bounce_page_pool)
- goto fail;
+ goto out_unlock;
-already_initialized:
+ err = 0;
+out_unlock:
mutex_unlock(&fscrypt_init_mutex);
- return 0;
-fail:
- fscrypt_destroy();
- mutex_unlock(&fscrypt_init_mutex);
- return res;
+ return err;
}
-void fscrypt_msg(struct super_block *sb, const char *level,
+void fscrypt_msg(const struct inode *inode, const char *level,
const char *fmt, ...)
{
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
@@ -467,8 +381,9 @@ void fscrypt_msg(struct super_block *sb, const char *level,
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- if (sb)
- printk("%sfscrypt (%s): %pV\n", level, sb->s_id, &vaf);
+ if (inode)
+ printk("%sfscrypt (%s, inode %lu): %pV\n",
+ level, inode->i_sb->s_id, inode->i_ino, &vaf);
else
printk("%sfscrypt: %pV\n", level, &vaf);
va_end(args);
@@ -479,6 +394,8 @@ void fscrypt_msg(struct super_block *sb, const char *level,
*/
static int __init fscrypt_init(void)
{
+ int err = -ENOMEM;
+
/*
* Use an unbound workqueue to allow bios to be decrypted in parallel
* even when they happen to complete on the same CPU. This sacrifices
@@ -493,39 +410,21 @@ static int __init fscrypt_init(void)
if (!fscrypt_read_workqueue)
goto fail;
- fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
- if (!fscrypt_ctx_cachep)
- goto fail_free_queue;
-
fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
if (!fscrypt_info_cachep)
- goto fail_free_ctx;
+ goto fail_free_queue;
+
+ err = fscrypt_init_keyring();
+ if (err)
+ goto fail_free_info;
return 0;
-fail_free_ctx:
- kmem_cache_destroy(fscrypt_ctx_cachep);
+fail_free_info:
+ kmem_cache_destroy(fscrypt_info_cachep);
fail_free_queue:
destroy_workqueue(fscrypt_read_workqueue);
fail:
- return -ENOMEM;
-}
-module_init(fscrypt_init)
-
-/**
- * fscrypt_exit() - Shutdown the fs encryption system
- */
-static void __exit fscrypt_exit(void)
-{
- fscrypt_destroy();
-
- if (fscrypt_read_workqueue)
- destroy_workqueue(fscrypt_read_workqueue);
- kmem_cache_destroy(fscrypt_ctx_cachep);
- kmem_cache_destroy(fscrypt_info_cachep);
-
- fscrypt_essiv_cleanup();
+ return err;
}
-module_exit(fscrypt_exit);
-
-MODULE_LICENSE("GPL");
+late_initcall(fscrypt_init)
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 00d150ff3033..3da3707c10e3 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -71,9 +71,7 @@ int fname_encrypt(struct inode *inode, const struct qstr *iname,
res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
skcipher_request_free(req);
if (res < 0) {
- fscrypt_err(inode->i_sb,
- "Filename encryption failed for inode %lu: %d",
- inode->i_ino, res);
+ fscrypt_err(inode, "Filename encryption failed: %d", res);
return res;
}
@@ -117,9 +115,7 @@ static int fname_decrypt(struct inode *inode,
res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
skcipher_request_free(req);
if (res < 0) {
- fscrypt_err(inode->i_sb,
- "Filename decryption failed for inode %lu: %d",
- inode->i_ino, res);
+ fscrypt_err(inode, "Filename decryption failed: %d", res);
return res;
}
@@ -127,44 +123,45 @@ static int fname_decrypt(struct inode *inode,
return 0;
}
-static const char *lookup_table =
+static const char lookup_table[65] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
#define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
/**
- * digest_encode() -
+ * base64_encode() -
*
- * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
+ * Encodes the input string using characters from the set [A-Za-z0-9+,].
* The encoded string is roughly 4/3 times the size of the input string.
+ *
+ * Return: length of the encoded string
*/
-static int digest_encode(const char *src, int len, char *dst)
+static int base64_encode(const u8 *src, int len, char *dst)
{
- int i = 0, bits = 0, ac = 0;
+ int i, bits = 0, ac = 0;
char *cp = dst;
- while (i < len) {
- ac += (((unsigned char) src[i]) << bits);
+ for (i = 0; i < len; i++) {
+ ac += src[i] << bits;
bits += 8;
do {
*cp++ = lookup_table[ac & 0x3f];
ac >>= 6;
bits -= 6;
} while (bits >= 6);
- i++;
}
if (bits)
*cp++ = lookup_table[ac & 0x3f];
return cp - dst;
}
-static int digest_decode(const char *src, int len, char *dst)
+static int base64_decode(const char *src, int len, u8 *dst)
{
- int i = 0, bits = 0, ac = 0;
+ int i, bits = 0, ac = 0;
const char *p;
- char *cp = dst;
+ u8 *cp = dst;
- while (i < len) {
+ for (i = 0; i < len; i++) {
p = strchr(lookup_table, src[i]);
if (p == NULL || src[i] == 0)
return -2;
@@ -175,7 +172,6 @@ static int digest_decode(const char *src, int len, char *dst)
ac >>= 8;
bits -= 8;
}
- i++;
}
if (ac)
return -1;
@@ -185,8 +181,9 @@ static int digest_decode(const char *src, int len, char *dst)
bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len,
u32 max_len, u32 *encrypted_len_ret)
{
- int padding = 4 << (inode->i_crypt_info->ci_flags &
- FS_POLICY_FLAGS_PAD_MASK);
+ const struct fscrypt_info *ci = inode->i_crypt_info;
+ int padding = 4 << (fscrypt_policy_flags(&ci->ci_policy) &
+ FSCRYPT_POLICY_FLAGS_PAD_MASK);
u32 encrypted_len;
if (orig_len > max_len)
@@ -272,7 +269,7 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
return fname_decrypt(inode, iname, oname);
if (iname->len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) {
- oname->len = digest_encode(iname->name, iname->len,
+ oname->len = base64_encode(iname->name, iname->len,
oname->name);
return 0;
}
@@ -287,7 +284,7 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
FSCRYPT_FNAME_DIGEST(iname->name, iname->len),
FSCRYPT_FNAME_DIGEST_SIZE);
oname->name[0] = '_';
- oname->len = 1 + digest_encode((const char *)&digested_name,
+ oname->len = 1 + base64_encode((const u8 *)&digested_name,
sizeof(digested_name), oname->name + 1);
return 0;
}
@@ -380,8 +377,8 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
if (fname->crypto_buf.name == NULL)
return -ENOMEM;
- ret = digest_decode(iname->name + digested, iname->len - digested,
- fname->crypto_buf.name);
+ ret = base64_decode(iname->name + digested, iname->len - digested,
+ fname->crypto_buf.name);
if (ret < 0) {
ret = -ENOENT;
goto errout;
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 8978eec9d766..130b50e5a011 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -4,9 +4,8 @@
*
* Copyright (C) 2015, Google, Inc.
*
- * This contains encryption key functions.
- *
- * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
+ * Originally written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar.
+ * Heavily modified since then.
*/
#ifndef _FSCRYPT_PRIVATE_H
@@ -15,30 +14,133 @@
#include <linux/fscrypt.h>
#include <crypto/hash.h>
-/* Encryption parameters */
+#define CONST_STRLEN(str) (sizeof(str) - 1)
+
#define FS_KEY_DERIVATION_NONCE_SIZE 16
-/**
- * Encryption context for inode
- *
- * Protector format:
- * 1 byte: Protector format (1 = this version)
- * 1 byte: File contents encryption mode
- * 1 byte: File names encryption mode
- * 1 byte: Flags
- * 8 bytes: Master Key descriptor
- * 16 bytes: Encryption Key derivation nonce
- */
-struct fscrypt_context {
- u8 format;
+#define FSCRYPT_MIN_KEY_SIZE 16
+
+#define FSCRYPT_CONTEXT_V1 1
+#define FSCRYPT_CONTEXT_V2 2
+
+struct fscrypt_context_v1 {
+ u8 version; /* FSCRYPT_CONTEXT_V1 */
u8 contents_encryption_mode;
u8 filenames_encryption_mode;
u8 flags;
- u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+ u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
-} __packed;
+};
-#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
+struct fscrypt_context_v2 {
+ u8 version; /* FSCRYPT_CONTEXT_V2 */
+ u8 contents_encryption_mode;
+ u8 filenames_encryption_mode;
+ u8 flags;
+ u8 __reserved[4];
+ u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
+ u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
+};
+
+/**
+ * fscrypt_context - the encryption context of an inode
+ *
+ * This is the on-disk equivalent of an fscrypt_policy, stored alongside each
+ * encrypted file usually in a hidden extended attribute. It contains the
+ * fields from the fscrypt_policy, in order to identify the encryption algorithm
+ * and key with which the file is encrypted. It also contains a nonce that was
+ * randomly generated by fscrypt itself; this is used as KDF input or as a tweak
+ * to cause different files to be encrypted differently.
+ */
+union fscrypt_context {
+ u8 version;
+ struct fscrypt_context_v1 v1;
+ struct fscrypt_context_v2 v2;
+};
+
+/*
+ * Return the size expected for the given fscrypt_context based on its version
+ * number, or 0 if the context version is unrecognized.
+ */
+static inline int fscrypt_context_size(const union fscrypt_context *ctx)
+{
+ switch (ctx->version) {
+ case FSCRYPT_CONTEXT_V1:
+ BUILD_BUG_ON(sizeof(ctx->v1) != 28);
+ return sizeof(ctx->v1);
+ case FSCRYPT_CONTEXT_V2:
+ BUILD_BUG_ON(sizeof(ctx->v2) != 40);
+ return sizeof(ctx->v2);
+ }
+ return 0;
+}
+
+#undef fscrypt_policy
+union fscrypt_policy {
+ u8 version;
+ struct fscrypt_policy_v1 v1;
+ struct fscrypt_policy_v2 v2;
+};
+
+/*
+ * Return the size expected for the given fscrypt_policy based on its version
+ * number, or 0 if the policy version is unrecognized.
+ */
+static inline int fscrypt_policy_size(const union fscrypt_policy *policy)
+{
+ switch (policy->version) {
+ case FSCRYPT_POLICY_V1:
+ return sizeof(policy->v1);
+ case FSCRYPT_POLICY_V2:
+ return sizeof(policy->v2);
+ }
+ return 0;
+}
+
+/* Return the contents encryption mode of a valid encryption policy */
+static inline u8
+fscrypt_policy_contents_mode(const union fscrypt_policy *policy)
+{
+ switch (policy->version) {
+ case FSCRYPT_POLICY_V1:
+ return policy->v1.contents_encryption_mode;
+ case FSCRYPT_POLICY_V2:
+ return policy->v2.contents_encryption_mode;
+ }
+ BUG();
+}
+
+/* Return the filenames encryption mode of a valid encryption policy */
+static inline u8
+fscrypt_policy_fnames_mode(const union fscrypt_policy *policy)
+{
+ switch (policy->version) {
+ case FSCRYPT_POLICY_V1:
+ return policy->v1.filenames_encryption_mode;
+ case FSCRYPT_POLICY_V2:
+ return policy->v2.filenames_encryption_mode;
+ }
+ BUG();
+}
+
+/* Return the flags (FSCRYPT_POLICY_FLAG*) of a valid encryption policy */
+static inline u8
+fscrypt_policy_flags(const union fscrypt_policy *policy)
+{
+ switch (policy->version) {
+ case FSCRYPT_POLICY_V1:
+ return policy->v1.flags;
+ case FSCRYPT_POLICY_V2:
+ return policy->v2.flags;
+ }
+ BUG();
+}
+
+static inline bool
+fscrypt_is_direct_key_policy(const union fscrypt_policy *policy)
+{
+ return fscrypt_policy_flags(policy) & FSCRYPT_POLICY_FLAG_DIRECT_KEY;
+}
/**
* For encrypted symlinks, the ciphertext length is stored at the beginning
@@ -61,30 +163,41 @@ struct fscrypt_info {
/* The actual crypto transform used for encryption and decryption */
struct crypto_skcipher *ci_ctfm;
+ /* True if the key should be freed when this fscrypt_info is freed */
+ bool ci_owns_key;
+
/*
- * Cipher for ESSIV IV generation. Only set for CBC contents
- * encryption, otherwise is NULL.
+ * Encryption mode used for this inode. It corresponds to either the
+ * contents or filenames encryption mode, depending on the inode type.
*/
- struct crypto_cipher *ci_essiv_tfm;
+ struct fscrypt_mode *ci_mode;
+
+ /* Back-pointer to the inode */
+ struct inode *ci_inode;
/*
- * Encryption mode used for this inode. It corresponds to either
- * ci_data_mode or ci_filename_mode, depending on the inode type.
+ * The master key with which this inode was unlocked (decrypted). This
+ * will be NULL if the master key was found in a process-subscribed
+ * keyring rather than in the filesystem-level keyring.
*/
- struct fscrypt_mode *ci_mode;
+ struct key *ci_master_key;
/*
- * If non-NULL, then this inode uses a master key directly rather than a
- * derived key, and ci_ctfm will equal ci_master_key->mk_ctfm.
- * Otherwise, this inode uses a derived key.
+ * Link in list of inodes that were unlocked with the master key.
+ * Only used when ->ci_master_key is set.
*/
- struct fscrypt_master_key *ci_master_key;
+ struct list_head ci_master_key_link;
- /* fields from the fscrypt_context */
- u8 ci_data_mode;
- u8 ci_filename_mode;
- u8 ci_flags;
- u8 ci_master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+ /*
+ * If non-NULL, then encryption is done using the master key directly
+ * and ci_ctfm will equal ci_direct_key->dk_ctfm.
+ */
+ struct fscrypt_direct_key *ci_direct_key;
+
+ /* The encryption policy used by this inode */
+ union fscrypt_policy ci_policy;
+
+ /* This inode's nonce, copied from the fscrypt_context */
u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE];
};
@@ -93,21 +206,19 @@ typedef enum {
FS_ENCRYPT,
} fscrypt_direction_t;
-#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
-
static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
u32 filenames_mode)
{
- if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
- filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
+ if (contents_mode == FSCRYPT_MODE_AES_128_CBC &&
+ filenames_mode == FSCRYPT_MODE_AES_128_CTS)
return true;
- if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
- filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
+ if (contents_mode == FSCRYPT_MODE_AES_256_XTS &&
+ filenames_mode == FSCRYPT_MODE_AES_256_CTS)
return true;
- if (contents_mode == FS_ENCRYPTION_MODE_ADIANTUM &&
- filenames_mode == FS_ENCRYPTION_MODE_ADIANTUM)
+ if (contents_mode == FSCRYPT_MODE_ADIANTUM &&
+ filenames_mode == FSCRYPT_MODE_ADIANTUM)
return true;
return false;
@@ -125,12 +236,12 @@ extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags);
extern const struct dentry_operations fscrypt_d_ops;
extern void __printf(3, 4) __cold
-fscrypt_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+fscrypt_msg(const struct inode *inode, const char *level, const char *fmt, ...);
-#define fscrypt_warn(sb, fmt, ...) \
- fscrypt_msg(sb, KERN_WARNING, fmt, ##__VA_ARGS__)
-#define fscrypt_err(sb, fmt, ...) \
- fscrypt_msg(sb, KERN_ERR, fmt, ##__VA_ARGS__)
+#define fscrypt_warn(inode, fmt, ...) \
+ fscrypt_msg((inode), KERN_WARNING, fmt, ##__VA_ARGS__)
+#define fscrypt_err(inode, fmt, ...) \
+ fscrypt_msg((inode), KERN_ERR, fmt, ##__VA_ARGS__)
#define FSCRYPT_MAX_IV_SIZE 32
@@ -155,17 +266,218 @@ extern bool fscrypt_fname_encrypted_size(const struct inode *inode,
u32 orig_len, u32 max_len,
u32 *encrypted_len_ret);
-/* keyinfo.c */
+/* hkdf.c */
+
+struct fscrypt_hkdf {
+ struct crypto_shash *hmac_tfm;
+};
+
+extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
+ unsigned int master_key_size);
+
+/*
+ * The list of contexts in which fscrypt uses HKDF. These values are used as
+ * the first byte of the HKDF application-specific info string to guarantee that
+ * info strings are never repeated between contexts. This ensures that all HKDF
+ * outputs are unique and cryptographically isolated, i.e. knowledge of one
+ * output doesn't reveal another.
+ */
+#define HKDF_CONTEXT_KEY_IDENTIFIER 1
+#define HKDF_CONTEXT_PER_FILE_KEY 2
+#define HKDF_CONTEXT_DIRECT_KEY 3
+#define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4
+
+extern int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context,
+ const u8 *info, unsigned int infolen,
+ u8 *okm, unsigned int okmlen);
+
+extern void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf);
+
+/* keyring.c */
+
+/*
+ * fscrypt_master_key_secret - secret key material of an in-use master key
+ */
+struct fscrypt_master_key_secret {
+
+ /*
+ * For v2 policy keys: HKDF context keyed by this master key.
+ * For v1 policy keys: not set (hkdf.hmac_tfm == NULL).
+ */
+ struct fscrypt_hkdf hkdf;
+
+ /* Size of the raw key in bytes. Set even if ->raw isn't set. */
+ u32 size;
+
+ /* For v1 policy keys: the raw key. Wiped for v2 policy keys. */
+ u8 raw[FSCRYPT_MAX_KEY_SIZE];
+
+} __randomize_layout;
+
+/*
+ * fscrypt_master_key - an in-use master key
+ *
+ * This represents a master encryption key which has been added to the
+ * filesystem and can be used to "unlock" the encrypted files which were
+ * encrypted with it.
+ */
+struct fscrypt_master_key {
+
+ /*
+ * The secret key material. After FS_IOC_REMOVE_ENCRYPTION_KEY is
+ * executed, this is wiped and no new inodes can be unlocked with this
+ * key; however, there may still be inodes in ->mk_decrypted_inodes
+ * which could not be evicted. As long as some inodes still remain,
+ * FS_IOC_REMOVE_ENCRYPTION_KEY can be retried, or
+ * FS_IOC_ADD_ENCRYPTION_KEY can add the secret again.
+ *
+ * Locking: protected by key->sem (outer) and mk_secret_sem (inner).
+ * The reason for two locks is that key->sem also protects modifying
+ * mk_users, which ranks it above the semaphore for the keyring key
+ * type, which is in turn above page faults (via keyring_read). But
+ * sometimes filesystems call fscrypt_get_encryption_info() from within
+ * a transaction, which ranks it below page faults. So we need a
+ * separate lock which protects mk_secret but not also mk_users.
+ */
+ struct fscrypt_master_key_secret mk_secret;
+ struct rw_semaphore mk_secret_sem;
+
+ /*
+ * For v1 policy keys: an arbitrary key descriptor which was assigned by
+ * userspace (->descriptor).
+ *
+ * For v2 policy keys: a cryptographic hash of this key (->identifier).
+ */
+ struct fscrypt_key_specifier mk_spec;
+
+ /*
+ * Keyring which contains a key of type 'key_type_fscrypt_user' for each
+ * user who has added this key. Normally each key will be added by just
+ * one user, but it's possible that multiple users share a key, and in
+ * that case we need to keep track of those users so that one user can't
+ * remove the key before the others want it removed too.
+ *
+ * This is NULL for v1 policy keys; those can only be added by root.
+ *
+ * Locking: in addition to this keyrings own semaphore, this is
+ * protected by the master key's key->sem, so we can do atomic
+ * search+insert. It can also be searched without taking any locks, but
+ * in that case the returned key may have already been removed.
+ */
+ struct key *mk_users;
+
+ /*
+ * Length of ->mk_decrypted_inodes, plus one if mk_secret is present.
+ * Once this goes to 0, the master key is removed from ->s_master_keys.
+ * The 'struct fscrypt_master_key' will continue to live as long as the
+ * 'struct key' whose payload it is, but we won't let this reference
+ * count rise again.
+ */
+ refcount_t mk_refcount;
+
+ /*
+ * List of inodes that were unlocked using this key. This allows the
+ * inodes to be evicted efficiently if the key is removed.
+ */
+ struct list_head mk_decrypted_inodes;
+ spinlock_t mk_decrypted_inodes_lock;
+
+ /* Crypto API transforms for DIRECT_KEY policies, allocated on-demand */
+ struct crypto_skcipher *mk_direct_tfms[__FSCRYPT_MODE_MAX + 1];
+
+ /*
+ * Crypto API transforms for filesystem-layer implementation of
+ * IV_INO_LBLK_64 policies, allocated on-demand.
+ */
+ struct crypto_skcipher *mk_iv_ino_lblk_64_tfms[__FSCRYPT_MODE_MAX + 1];
+
+} __randomize_layout;
+
+static inline bool
+is_master_key_secret_present(const struct fscrypt_master_key_secret *secret)
+{
+ /*
+ * The READ_ONCE() is only necessary for fscrypt_drop_inode() and
+ * fscrypt_key_describe(). These run in atomic context, so they can't
+ * take ->mk_secret_sem and thus 'secret' can change concurrently which
+ * would be a data race. But they only need to know whether the secret
+ * *was* present at the time of check, so READ_ONCE() suffices.
+ */
+ return READ_ONCE(secret->size) != 0;
+}
+
+static inline const char *master_key_spec_type(
+ const struct fscrypt_key_specifier *spec)
+{
+ switch (spec->type) {
+ case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR:
+ return "descriptor";
+ case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER:
+ return "identifier";
+ }
+ return "[unknown]";
+}
+
+static inline int master_key_spec_len(const struct fscrypt_key_specifier *spec)
+{
+ switch (spec->type) {
+ case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR:
+ return FSCRYPT_KEY_DESCRIPTOR_SIZE;
+ case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER:
+ return FSCRYPT_KEY_IDENTIFIER_SIZE;
+ }
+ return 0;
+}
+
+extern struct key *
+fscrypt_find_master_key(struct super_block *sb,
+ const struct fscrypt_key_specifier *mk_spec);
+
+extern int fscrypt_verify_key_added(struct super_block *sb,
+ const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]);
+
+extern int __init fscrypt_init_keyring(void);
+
+/* keysetup.c */
struct fscrypt_mode {
const char *friendly_name;
const char *cipher_str;
int keysize;
int ivsize;
- bool logged_impl_name;
- bool needs_essiv;
+ int logged_impl_name;
};
-extern void __exit fscrypt_essiv_cleanup(void);
+static inline bool
+fscrypt_mode_supports_direct_key(const struct fscrypt_mode *mode)
+{
+ return mode->ivsize >= offsetofend(union fscrypt_iv, nonce);
+}
+
+extern struct crypto_skcipher *
+fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
+ const struct inode *inode);
+
+extern int fscrypt_set_derived_key(struct fscrypt_info *ci,
+ const u8 *derived_key);
+
+/* keysetup_v1.c */
+
+extern void fscrypt_put_direct_key(struct fscrypt_direct_key *dk);
+
+extern int fscrypt_setup_v1_file_key(struct fscrypt_info *ci,
+ const u8 *raw_master_key);
+
+extern int fscrypt_setup_v1_file_key_via_subscribed_keyrings(
+ struct fscrypt_info *ci);
+/* policy.c */
+
+extern bool fscrypt_policies_equal(const union fscrypt_policy *policy1,
+ const union fscrypt_policy *policy2);
+extern bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
+ const struct inode *inode);
+extern int fscrypt_policy_from_context(union fscrypt_policy *policy_u,
+ const union fscrypt_context *ctx_u,
+ int ctx_size);
#endif /* _FSCRYPT_PRIVATE_H */
diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c
new file mode 100644
index 000000000000..f21873e1b467
--- /dev/null
+++ b/fs/crypto/hkdf.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of HKDF ("HMAC-based Extract-and-Expand Key Derivation
+ * Function"), aka RFC 5869. See also the original paper (Krawczyk 2010):
+ * "Cryptographic Extraction and Key Derivation: The HKDF Scheme".
+ *
+ * This is used to derive keys from the fscrypt master keys.
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+
+#include "fscrypt_private.h"
+
+/*
+ * HKDF supports any unkeyed cryptographic hash algorithm, but fscrypt uses
+ * SHA-512 because it is reasonably secure and efficient; and since it produces
+ * a 64-byte digest, deriving an AES-256-XTS key preserves all 64 bytes of
+ * entropy from the master key and requires only one iteration of HKDF-Expand.
+ */
+#define HKDF_HMAC_ALG "hmac(sha512)"
+#define HKDF_HASHLEN SHA512_DIGEST_SIZE
+
+/*
+ * HKDF consists of two steps:
+ *
+ * 1. HKDF-Extract: extract a pseudorandom key of length HKDF_HASHLEN bytes from
+ * the input keying material and optional salt.
+ * 2. HKDF-Expand: expand the pseudorandom key into output keying material of
+ * any length, parameterized by an application-specific info string.
+ *
+ * HKDF-Extract can be skipped if the input is already a pseudorandom key of
+ * length HKDF_HASHLEN bytes. However, cipher modes other than AES-256-XTS take
+ * shorter keys, and we don't want to force users of those modes to provide
+ * unnecessarily long master keys. Thus fscrypt still does HKDF-Extract. No
+ * salt is used, since fscrypt master keys should already be pseudorandom and
+ * there's no way to persist a random salt per master key from kernel mode.
+ */
+
+/* HKDF-Extract (RFC 5869 section 2.2), unsalted */
+static int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
+ unsigned int ikmlen, u8 prk[HKDF_HASHLEN])
+{
+ static const u8 default_salt[HKDF_HASHLEN];
+ SHASH_DESC_ON_STACK(desc, hmac_tfm);
+ int err;
+
+ err = crypto_shash_setkey(hmac_tfm, default_salt, HKDF_HASHLEN);
+ if (err)
+ return err;
+
+ desc->tfm = hmac_tfm;
+ err = crypto_shash_digest(desc, ikm, ikmlen, prk);
+ shash_desc_zero(desc);
+ return err;
+}
+
+/*
+ * Compute HKDF-Extract using the given master key as the input keying material,
+ * and prepare an HMAC transform object keyed by the resulting pseudorandom key.
+ *
+ * Afterwards, the keyed HMAC transform object can be used for HKDF-Expand many
+ * times without having to recompute HKDF-Extract each time.
+ */
+int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
+ unsigned int master_key_size)
+{
+ struct crypto_shash *hmac_tfm;
+ u8 prk[HKDF_HASHLEN];
+ int err;
+
+ hmac_tfm = crypto_alloc_shash(HKDF_HMAC_ALG, 0, 0);
+ if (IS_ERR(hmac_tfm)) {
+ fscrypt_err(NULL, "Error allocating " HKDF_HMAC_ALG ": %ld",
+ PTR_ERR(hmac_tfm));
+ return PTR_ERR(hmac_tfm);
+ }
+
+ if (WARN_ON(crypto_shash_digestsize(hmac_tfm) != sizeof(prk))) {
+ err = -EINVAL;
+ goto err_free_tfm;
+ }
+
+ err = hkdf_extract(hmac_tfm, master_key, master_key_size, prk);
+ if (err)
+ goto err_free_tfm;
+
+ err = crypto_shash_setkey(hmac_tfm, prk, sizeof(prk));
+ if (err)
+ goto err_free_tfm;
+
+ hkdf->hmac_tfm = hmac_tfm;
+ goto out;
+
+err_free_tfm:
+ crypto_free_shash(hmac_tfm);
+out:
+ memzero_explicit(prk, sizeof(prk));
+ return err;
+}
+
+/*
+ * HKDF-Expand (RFC 5869 section 2.3). This expands the pseudorandom key, which
+ * was already keyed into 'hkdf->hmac_tfm' by fscrypt_init_hkdf(), into 'okmlen'
+ * bytes of output keying material parameterized by the application-specific
+ * 'info' of length 'infolen' bytes, prefixed by "fscrypt\0" and the 'context'
+ * byte. This is thread-safe and may be called by multiple threads in parallel.
+ *
+ * ('context' isn't part of the HKDF specification; it's just a prefix fscrypt
+ * adds to its application-specific info strings to guarantee that it doesn't
+ * accidentally repeat an info string when using HKDF for different purposes.)
+ */
+int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context,
+ const u8 *info, unsigned int infolen,
+ u8 *okm, unsigned int okmlen)
+{
+ SHASH_DESC_ON_STACK(desc, hkdf->hmac_tfm);
+ u8 prefix[9];
+ unsigned int i;
+ int err;
+ const u8 *prev = NULL;
+ u8 counter = 1;
+ u8 tmp[HKDF_HASHLEN];
+
+ if (WARN_ON(okmlen > 255 * HKDF_HASHLEN))
+ return -EINVAL;
+
+ desc->tfm = hkdf->hmac_tfm;
+
+ memcpy(prefix, "fscrypt\0", 8);
+ prefix[8] = context;
+
+ for (i = 0; i < okmlen; i += HKDF_HASHLEN) {
+
+ err = crypto_shash_init(desc);
+ if (err)
+ goto out;
+
+ if (prev) {
+ err = crypto_shash_update(desc, prev, HKDF_HASHLEN);
+ if (err)
+ goto out;
+ }
+
+ err = crypto_shash_update(desc, prefix, sizeof(prefix));
+ if (err)
+ goto out;
+
+ err = crypto_shash_update(desc, info, infolen);
+ if (err)
+ goto out;
+
+ BUILD_BUG_ON(sizeof(counter) != 1);
+ if (okmlen - i < HKDF_HASHLEN) {
+ err = crypto_shash_finup(desc, &counter, 1, tmp);
+ if (err)
+ goto out;
+ memcpy(&okm[i], tmp, okmlen - i);
+ memzero_explicit(tmp, sizeof(tmp));
+ } else {
+ err = crypto_shash_finup(desc, &counter, 1, &okm[i]);
+ if (err)
+ goto out;
+ }
+ counter++;
+ prev = &okm[i];
+ }
+ err = 0;
+out:
+ if (unlikely(err))
+ memzero_explicit(okm, okmlen); /* so caller doesn't need to */
+ shash_desc_zero(desc);
+ return err;
+}
+
+void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf)
+{
+ crypto_free_shash(hkdf->hmac_tfm);
+}
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index c1d6715d88e9..bb3b7fcfdd48 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -39,9 +39,9 @@ int fscrypt_file_open(struct inode *inode, struct file *filp)
dir = dget_parent(file_dentry(filp));
if (IS_ENCRYPTED(d_inode(dir)) &&
!fscrypt_has_permitted_context(d_inode(dir), inode)) {
- fscrypt_warn(inode->i_sb,
- "inconsistent encryption contexts: %lu/%lu",
- d_inode(dir)->i_ino, inode->i_ino);
+ fscrypt_warn(inode,
+ "Inconsistent encryption context (parent directory: %lu)",
+ d_inode(dir)->i_ino);
err = -EPERM;
}
dput(dir);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
deleted file mode 100644
index 207ebed918c1..000000000000
--- a/fs/crypto/keyinfo.c
+++ /dev/null
@@ -1,611 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * key management facility for FS encryption support.
- *
- * Copyright (C) 2015, Google, Inc.
- *
- * This contains encryption key functions.
- *
- * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
- */
-
-#include <keys/user-type.h>
-#include <linux/hashtable.h>
-#include <linux/scatterlist.h>
-#include <crypto/aes.h>
-#include <crypto/algapi.h>
-#include <crypto/sha.h>
-#include <crypto/skcipher.h>
-#include "fscrypt_private.h"
-
-static struct crypto_shash *essiv_hash_tfm;
-
-/* Table of keys referenced by FS_POLICY_FLAG_DIRECT_KEY policies */
-static DEFINE_HASHTABLE(fscrypt_master_keys, 6); /* 6 bits = 64 buckets */
-static DEFINE_SPINLOCK(fscrypt_master_keys_lock);
-
-/*
- * Key derivation function. This generates the derived key by encrypting the
- * master key with AES-128-ECB using the inode's nonce as the AES key.
- *
- * The master key must be at least as long as the derived key. If the master
- * key is longer, then only the first 'derived_keysize' bytes are used.
- */
-static int derive_key_aes(const u8 *master_key,
- const struct fscrypt_context *ctx,
- u8 *derived_key, unsigned int derived_keysize)
-{
- int res = 0;
- struct skcipher_request *req = NULL;
- DECLARE_CRYPTO_WAIT(wait);
- struct scatterlist src_sg, dst_sg;
- struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
-
- if (IS_ERR(tfm)) {
- res = PTR_ERR(tfm);
- tfm = NULL;
- goto out;
- }
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
- req = skcipher_request_alloc(tfm, GFP_NOFS);
- if (!req) {
- res = -ENOMEM;
- goto out;
- }
- skcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- crypto_req_done, &wait);
- res = crypto_skcipher_setkey(tfm, ctx->nonce, sizeof(ctx->nonce));
- if (res < 0)
- goto out;
-
- sg_init_one(&src_sg, master_key, derived_keysize);
- sg_init_one(&dst_sg, derived_key, derived_keysize);
- skcipher_request_set_crypt(req, &src_sg, &dst_sg, derived_keysize,
- NULL);
- res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
-out:
- skcipher_request_free(req);
- crypto_free_skcipher(tfm);
- return res;
-}
-
-/*
- * Search the current task's subscribed keyrings for a "logon" key with
- * description prefix:descriptor, and if found acquire a read lock on it and
- * return a pointer to its validated payload in *payload_ret.
- */
-static struct key *
-find_and_lock_process_key(const char *prefix,
- const u8 descriptor[FS_KEY_DESCRIPTOR_SIZE],
- unsigned int min_keysize,
- const struct fscrypt_key **payload_ret)
-{
- char *description;
- struct key *key;
- const struct user_key_payload *ukp;
- const struct fscrypt_key *payload;
-
- description = kasprintf(GFP_NOFS, "%s%*phN", prefix,
- FS_KEY_DESCRIPTOR_SIZE, descriptor);
- if (!description)
- return ERR_PTR(-ENOMEM);
-
- key = request_key(&key_type_logon, description, NULL);
- kfree(description);
- if (IS_ERR(key))
- return key;
-
- down_read(&key->sem);
- ukp = user_key_payload_locked(key);
-
- if (!ukp) /* was the key revoked before we acquired its semaphore? */
- goto invalid;
-
- payload = (const struct fscrypt_key *)ukp->data;
-
- if (ukp->datalen != sizeof(struct fscrypt_key) ||
- payload->size < 1 || payload->size > FS_MAX_KEY_SIZE) {
- fscrypt_warn(NULL,
- "key with description '%s' has invalid payload",
- key->description);
- goto invalid;
- }
-
- if (payload->size < min_keysize) {
- fscrypt_warn(NULL,
- "key with description '%s' is too short (got %u bytes, need %u+ bytes)",
- key->description, payload->size, min_keysize);
- goto invalid;
- }
-
- *payload_ret = payload;
- return key;
-
-invalid:
- up_read(&key->sem);
- key_put(key);
- return ERR_PTR(-ENOKEY);
-}
-
-static struct fscrypt_mode available_modes[] = {
- [FS_ENCRYPTION_MODE_AES_256_XTS] = {
- .friendly_name = "AES-256-XTS",
- .cipher_str = "xts(aes)",
- .keysize = 64,
- .ivsize = 16,
- },
- [FS_ENCRYPTION_MODE_AES_256_CTS] = {
- .friendly_name = "AES-256-CTS-CBC",
- .cipher_str = "cts(cbc(aes))",
- .keysize = 32,
- .ivsize = 16,
- },
- [FS_ENCRYPTION_MODE_AES_128_CBC] = {
- .friendly_name = "AES-128-CBC",
- .cipher_str = "cbc(aes)",
- .keysize = 16,
- .ivsize = 16,
- .needs_essiv = true,
- },
- [FS_ENCRYPTION_MODE_AES_128_CTS] = {
- .friendly_name = "AES-128-CTS-CBC",
- .cipher_str = "cts(cbc(aes))",
- .keysize = 16,
- .ivsize = 16,
- },
- [FS_ENCRYPTION_MODE_ADIANTUM] = {
- .friendly_name = "Adiantum",
- .cipher_str = "adiantum(xchacha12,aes)",
- .keysize = 32,
- .ivsize = 32,
- },
-};
-
-static struct fscrypt_mode *
-select_encryption_mode(const struct fscrypt_info *ci, const struct inode *inode)
-{
- if (!fscrypt_valid_enc_modes(ci->ci_data_mode, ci->ci_filename_mode)) {
- fscrypt_warn(inode->i_sb,
- "inode %lu uses unsupported encryption modes (contents mode %d, filenames mode %d)",
- inode->i_ino, ci->ci_data_mode,
- ci->ci_filename_mode);
- return ERR_PTR(-EINVAL);
- }
-
- if (S_ISREG(inode->i_mode))
- return &available_modes[ci->ci_data_mode];
-
- if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
- return &available_modes[ci->ci_filename_mode];
-
- WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n",
- inode->i_ino, (inode->i_mode & S_IFMT));
- return ERR_PTR(-EINVAL);
-}
-
-/* Find the master key, then derive the inode's actual encryption key */
-static int find_and_derive_key(const struct inode *inode,
- const struct fscrypt_context *ctx,
- u8 *derived_key, const struct fscrypt_mode *mode)
-{
- struct key *key;
- const struct fscrypt_key *payload;
- int err;
-
- key = find_and_lock_process_key(FS_KEY_DESC_PREFIX,
- ctx->master_key_descriptor,
- mode->keysize, &payload);
- if (key == ERR_PTR(-ENOKEY) && inode->i_sb->s_cop->key_prefix) {
- key = find_and_lock_process_key(inode->i_sb->s_cop->key_prefix,
- ctx->master_key_descriptor,
- mode->keysize, &payload);
- }
- if (IS_ERR(key))
- return PTR_ERR(key);
-
- if (ctx->flags & FS_POLICY_FLAG_DIRECT_KEY) {
- if (mode->ivsize < offsetofend(union fscrypt_iv, nonce)) {
- fscrypt_warn(inode->i_sb,
- "direct key mode not allowed with %s",
- mode->friendly_name);
- err = -EINVAL;
- } else if (ctx->contents_encryption_mode !=
- ctx->filenames_encryption_mode) {
- fscrypt_warn(inode->i_sb,
- "direct key mode not allowed with different contents and filenames modes");
- err = -EINVAL;
- } else {
- memcpy(derived_key, payload->raw, mode->keysize);
- err = 0;
- }
- } else {
- err = derive_key_aes(payload->raw, ctx, derived_key,
- mode->keysize);
- }
- up_read(&key->sem);
- key_put(key);
- return err;
-}
-
-/* Allocate and key a symmetric cipher object for the given encryption mode */
-static struct crypto_skcipher *
-allocate_skcipher_for_mode(struct fscrypt_mode *mode, const u8 *raw_key,
- const struct inode *inode)
-{
- struct crypto_skcipher *tfm;
- int err;
-
- tfm = crypto_alloc_skcipher(mode->cipher_str, 0, 0);
- if (IS_ERR(tfm)) {
- fscrypt_warn(inode->i_sb,
- "error allocating '%s' transform for inode %lu: %ld",
- mode->cipher_str, inode->i_ino, PTR_ERR(tfm));
- return tfm;
- }
- if (unlikely(!mode->logged_impl_name)) {
- /*
- * fscrypt performance can vary greatly depending on which
- * crypto algorithm implementation is used. Help people debug
- * performance problems by logging the ->cra_driver_name the
- * first time a mode is used. Note that multiple threads can
- * race here, but it doesn't really matter.
- */
- mode->logged_impl_name = true;
- pr_info("fscrypt: %s using implementation \"%s\"\n",
- mode->friendly_name,
- crypto_skcipher_alg(tfm)->base.cra_driver_name);
- }
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
- err = crypto_skcipher_setkey(tfm, raw_key, mode->keysize);
- if (err)
- goto err_free_tfm;
-
- return tfm;
-
-err_free_tfm:
- crypto_free_skcipher(tfm);
- return ERR_PTR(err);
-}
-
-/* Master key referenced by FS_POLICY_FLAG_DIRECT_KEY policy */
-struct fscrypt_master_key {
- struct hlist_node mk_node;
- refcount_t mk_refcount;
- const struct fscrypt_mode *mk_mode;
- struct crypto_skcipher *mk_ctfm;
- u8 mk_descriptor[FS_KEY_DESCRIPTOR_SIZE];
- u8 mk_raw[FS_MAX_KEY_SIZE];
-};
-
-static void free_master_key(struct fscrypt_master_key *mk)
-{
- if (mk) {
- crypto_free_skcipher(mk->mk_ctfm);
- kzfree(mk);
- }
-}
-
-static void put_master_key(struct fscrypt_master_key *mk)
-{
- if (!refcount_dec_and_lock(&mk->mk_refcount, &fscrypt_master_keys_lock))
- return;
- hash_del(&mk->mk_node);
- spin_unlock(&fscrypt_master_keys_lock);
-
- free_master_key(mk);
-}
-
-/*
- * Find/insert the given master key into the fscrypt_master_keys table. If
- * found, it is returned with elevated refcount, and 'to_insert' is freed if
- * non-NULL. If not found, 'to_insert' is inserted and returned if it's
- * non-NULL; otherwise NULL is returned.
- */
-static struct fscrypt_master_key *
-find_or_insert_master_key(struct fscrypt_master_key *to_insert,
- const u8 *raw_key, const struct fscrypt_mode *mode,
- const struct fscrypt_info *ci)
-{
- unsigned long hash_key;
- struct fscrypt_master_key *mk;
-
- /*
- * Careful: to avoid potentially leaking secret key bytes via timing
- * information, we must key the hash table by descriptor rather than by
- * raw key, and use crypto_memneq() when comparing raw keys.
- */
-
- BUILD_BUG_ON(sizeof(hash_key) > FS_KEY_DESCRIPTOR_SIZE);
- memcpy(&hash_key, ci->ci_master_key_descriptor, sizeof(hash_key));
-
- spin_lock(&fscrypt_master_keys_lock);
- hash_for_each_possible(fscrypt_master_keys, mk, mk_node, hash_key) {
- if (memcmp(ci->ci_master_key_descriptor, mk->mk_descriptor,
- FS_KEY_DESCRIPTOR_SIZE) != 0)
- continue;
- if (mode != mk->mk_mode)
- continue;
- if (crypto_memneq(raw_key, mk->mk_raw, mode->keysize))
- continue;
- /* using existing tfm with same (descriptor, mode, raw_key) */
- refcount_inc(&mk->mk_refcount);
- spin_unlock(&fscrypt_master_keys_lock);
- free_master_key(to_insert);
- return mk;
- }
- if (to_insert)
- hash_add(fscrypt_master_keys, &to_insert->mk_node, hash_key);
- spin_unlock(&fscrypt_master_keys_lock);
- return to_insert;
-}
-
-/* Prepare to encrypt directly using the master key in the given mode */
-static struct fscrypt_master_key *
-fscrypt_get_master_key(const struct fscrypt_info *ci, struct fscrypt_mode *mode,
- const u8 *raw_key, const struct inode *inode)
-{
- struct fscrypt_master_key *mk;
- int err;
-
- /* Is there already a tfm for this key? */
- mk = find_or_insert_master_key(NULL, raw_key, mode, ci);
- if (mk)
- return mk;
-
- /* Nope, allocate one. */
- mk = kzalloc(sizeof(*mk), GFP_NOFS);
- if (!mk)
- return ERR_PTR(-ENOMEM);
- refcount_set(&mk->mk_refcount, 1);
- mk->mk_mode = mode;
- mk->mk_ctfm = allocate_skcipher_for_mode(mode, raw_key, inode);
- if (IS_ERR(mk->mk_ctfm)) {
- err = PTR_ERR(mk->mk_ctfm);
- mk->mk_ctfm = NULL;
- goto err_free_mk;
- }
- memcpy(mk->mk_descriptor, ci->ci_master_key_descriptor,
- FS_KEY_DESCRIPTOR_SIZE);
- memcpy(mk->mk_raw, raw_key, mode->keysize);
-
- return find_or_insert_master_key(mk, raw_key, mode, ci);
-
-err_free_mk:
- free_master_key(mk);
- return ERR_PTR(err);
-}
-
-static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt)
-{
- struct crypto_shash *tfm = READ_ONCE(essiv_hash_tfm);
-
- /* init hash transform on demand */
- if (unlikely(!tfm)) {
- struct crypto_shash *prev_tfm;
-
- tfm = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(tfm)) {
- fscrypt_warn(NULL,
- "error allocating SHA-256 transform: %ld",
- PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
- prev_tfm = cmpxchg(&essiv_hash_tfm, NULL, tfm);
- if (prev_tfm) {
- crypto_free_shash(tfm);
- tfm = prev_tfm;
- }
- }
-
- {
- SHASH_DESC_ON_STACK(desc, tfm);
- desc->tfm = tfm;
-
- return crypto_shash_digest(desc, key, keysize, salt);
- }
-}
-
-static int init_essiv_generator(struct fscrypt_info *ci, const u8 *raw_key,
- int keysize)
-{
- int err;
- struct crypto_cipher *essiv_tfm;
- u8 salt[SHA256_DIGEST_SIZE];
-
- essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(essiv_tfm))
- return PTR_ERR(essiv_tfm);
-
- ci->ci_essiv_tfm = essiv_tfm;
-
- err = derive_essiv_salt(raw_key, keysize, salt);
- if (err)
- goto out;
-
- /*
- * Using SHA256 to derive the salt/key will result in AES-256 being
- * used for IV generation. File contents encryption will still use the
- * configured keysize (AES-128) nevertheless.
- */
- err = crypto_cipher_setkey(essiv_tfm, salt, sizeof(salt));
- if (err)
- goto out;
-
-out:
- memzero_explicit(salt, sizeof(salt));
- return err;
-}
-
-void __exit fscrypt_essiv_cleanup(void)
-{
- crypto_free_shash(essiv_hash_tfm);
-}
-
-/*
- * Given the encryption mode and key (normally the derived key, but for
- * FS_POLICY_FLAG_DIRECT_KEY mode it's the master key), set up the inode's
- * symmetric cipher transform object(s).
- */
-static int setup_crypto_transform(struct fscrypt_info *ci,
- struct fscrypt_mode *mode,
- const u8 *raw_key, const struct inode *inode)
-{
- struct fscrypt_master_key *mk;
- struct crypto_skcipher *ctfm;
- int err;
-
- if (ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY) {
- mk = fscrypt_get_master_key(ci, mode, raw_key, inode);
- if (IS_ERR(mk))
- return PTR_ERR(mk);
- ctfm = mk->mk_ctfm;
- } else {
- mk = NULL;
- ctfm = allocate_skcipher_for_mode(mode, raw_key, inode);
- if (IS_ERR(ctfm))
- return PTR_ERR(ctfm);
- }
- ci->ci_master_key = mk;
- ci->ci_ctfm = ctfm;
-
- if (mode->needs_essiv) {
- /* ESSIV implies 16-byte IVs which implies !DIRECT_KEY */
- WARN_ON(mode->ivsize != AES_BLOCK_SIZE);
- WARN_ON(ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY);
-
- err = init_essiv_generator(ci, raw_key, mode->keysize);
- if (err) {
- fscrypt_warn(inode->i_sb,
- "error initializing ESSIV generator for inode %lu: %d",
- inode->i_ino, err);
- return err;
- }
- }
- return 0;
-}
-
-static void put_crypt_info(struct fscrypt_info *ci)
-{
- if (!ci)
- return;
-
- if (ci->ci_master_key) {
- put_master_key(ci->ci_master_key);
- } else {
- crypto_free_skcipher(ci->ci_ctfm);
- crypto_free_cipher(ci->ci_essiv_tfm);
- }
- kmem_cache_free(fscrypt_info_cachep, ci);
-}
-
-int fscrypt_get_encryption_info(struct inode *inode)
-{
- struct fscrypt_info *crypt_info;
- struct fscrypt_context ctx;
- struct fscrypt_mode *mode;
- u8 *raw_key = NULL;
- int res;
-
- if (fscrypt_has_encryption_key(inode))
- return 0;
-
- res = fscrypt_initialize(inode->i_sb->s_cop->flags);
- if (res)
- return res;
-
- res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
- if (res < 0) {
- if (!fscrypt_dummy_context_enabled(inode) ||
- IS_ENCRYPTED(inode))
- return res;
- /* Fake up a context for an unencrypted directory */
- memset(&ctx, 0, sizeof(ctx));
- ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
- ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
- ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
- memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE);
- } else if (res != sizeof(ctx)) {
- return -EINVAL;
- }
-
- if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
- return -EINVAL;
-
- if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
- return -EINVAL;
-
- crypt_info = kmem_cache_zalloc(fscrypt_info_cachep, GFP_NOFS);
- if (!crypt_info)
- return -ENOMEM;
-
- crypt_info->ci_flags = ctx.flags;
- crypt_info->ci_data_mode = ctx.contents_encryption_mode;
- crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
- memcpy(crypt_info->ci_master_key_descriptor, ctx.master_key_descriptor,
- FS_KEY_DESCRIPTOR_SIZE);
- memcpy(crypt_info->ci_nonce, ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
-
- mode = select_encryption_mode(crypt_info, inode);
- if (IS_ERR(mode)) {
- res = PTR_ERR(mode);
- goto out;
- }
- WARN_ON(mode->ivsize > FSCRYPT_MAX_IV_SIZE);
- crypt_info->ci_mode = mode;
-
- /*
- * This cannot be a stack buffer because it may be passed to the
- * scatterlist crypto API as part of key derivation.
- */
- res = -ENOMEM;
- raw_key = kmalloc(mode->keysize, GFP_NOFS);
- if (!raw_key)
- goto out;
-
- res = find_and_derive_key(inode, &ctx, raw_key, mode);
- if (res)
- goto out;
-
- res = setup_crypto_transform(crypt_info, mode, raw_key, inode);
- if (res)
- goto out;
-
- if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL)
- crypt_info = NULL;
-out:
- if (res == -ENOKEY)
- res = 0;
- put_crypt_info(crypt_info);
- kzfree(raw_key);
- return res;
-}
-EXPORT_SYMBOL(fscrypt_get_encryption_info);
-
-/**
- * fscrypt_put_encryption_info - free most of an inode's fscrypt data
- *
- * Free the inode's fscrypt_info. Filesystems must call this when the inode is
- * being evicted. An RCU grace period need not have elapsed yet.
- */
-void fscrypt_put_encryption_info(struct inode *inode)
-{
- put_crypt_info(inode->i_crypt_info);
- inode->i_crypt_info = NULL;
-}
-EXPORT_SYMBOL(fscrypt_put_encryption_info);
-
-/**
- * fscrypt_free_inode - free an inode's fscrypt data requiring RCU delay
- *
- * Free the inode's cached decrypted symlink target, if any. Filesystems must
- * call this after an RCU grace period, just before they free the inode.
- */
-void fscrypt_free_inode(struct inode *inode)
-{
- if (IS_ENCRYPTED(inode) && S_ISLNK(inode->i_mode)) {
- kfree(inode->i_link);
- inode->i_link = NULL;
- }
-}
-EXPORT_SYMBOL(fscrypt_free_inode);
diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
new file mode 100644
index 000000000000..040df1f5e1c8
--- /dev/null
+++ b/fs/crypto/keyring.c
@@ -0,0 +1,986 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Filesystem-level keyring for fscrypt
+ *
+ * Copyright 2019 Google LLC
+ */
+
+/*
+ * This file implements management of fscrypt master keys in the
+ * filesystem-level keyring, including the ioctls:
+ *
+ * - FS_IOC_ADD_ENCRYPTION_KEY
+ * - FS_IOC_REMOVE_ENCRYPTION_KEY
+ * - FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS
+ * - FS_IOC_GET_ENCRYPTION_KEY_STATUS
+ *
+ * See the "User API" section of Documentation/filesystems/fscrypt.rst for more
+ * information about these ioctls.
+ */
+
+#include <crypto/skcipher.h>
+#include <linux/key-type.h>
+#include <linux/seq_file.h>
+
+#include "fscrypt_private.h"
+
+static void wipe_master_key_secret(struct fscrypt_master_key_secret *secret)
+{
+ fscrypt_destroy_hkdf(&secret->hkdf);
+ memzero_explicit(secret, sizeof(*secret));
+}
+
+static void move_master_key_secret(struct fscrypt_master_key_secret *dst,
+ struct fscrypt_master_key_secret *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+ memzero_explicit(src, sizeof(*src));
+}
+
+static void free_master_key(struct fscrypt_master_key *mk)
+{
+ size_t i;
+
+ wipe_master_key_secret(&mk->mk_secret);
+
+ for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) {
+ crypto_free_skcipher(mk->mk_direct_tfms[i]);
+ crypto_free_skcipher(mk->mk_iv_ino_lblk_64_tfms[i]);
+ }
+
+ key_put(mk->mk_users);
+ kzfree(mk);
+}
+
+static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec)
+{
+ if (spec->__reserved)
+ return false;
+ return master_key_spec_len(spec) != 0;
+}
+
+static int fscrypt_key_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
+{
+ key->payload.data[0] = (struct fscrypt_master_key *)prep->data;
+ return 0;
+}
+
+static void fscrypt_key_destroy(struct key *key)
+{
+ free_master_key(key->payload.data[0]);
+}
+
+static void fscrypt_key_describe(const struct key *key, struct seq_file *m)
+{
+ seq_puts(m, key->description);
+
+ if (key_is_positive(key)) {
+ const struct fscrypt_master_key *mk = key->payload.data[0];
+
+ if (!is_master_key_secret_present(&mk->mk_secret))
+ seq_puts(m, ": secret removed");
+ }
+}
+
+/*
+ * Type of key in ->s_master_keys. Each key of this type represents a master
+ * key which has been added to the filesystem. Its payload is a
+ * 'struct fscrypt_master_key'. The "." prefix in the key type name prevents
+ * users from adding keys of this type via the keyrings syscalls rather than via
+ * the intended method of FS_IOC_ADD_ENCRYPTION_KEY.
+ */
+static struct key_type key_type_fscrypt = {
+ .name = "._fscrypt",
+ .instantiate = fscrypt_key_instantiate,
+ .destroy = fscrypt_key_destroy,
+ .describe = fscrypt_key_describe,
+};
+
+static int fscrypt_user_key_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
+{
+ /*
+ * We just charge FSCRYPT_MAX_KEY_SIZE bytes to the user's key quota for
+ * each key, regardless of the exact key size. The amount of memory
+ * actually used is greater than the size of the raw key anyway.
+ */
+ return key_payload_reserve(key, FSCRYPT_MAX_KEY_SIZE);
+}
+
+static void fscrypt_user_key_describe(const struct key *key, struct seq_file *m)
+{
+ seq_puts(m, key->description);
+}
+
+/*
+ * Type of key in ->mk_users. Each key of this type represents a particular
+ * user who has added a particular master key.
+ *
+ * Note that the name of this key type really should be something like
+ * ".fscrypt-user" instead of simply ".fscrypt". But the shorter name is chosen
+ * mainly for simplicity of presentation in /proc/keys when read by a non-root
+ * user. And it is expected to be rare that a key is actually added by multiple
+ * users, since users should keep their encryption keys confidential.
+ */
+static struct key_type key_type_fscrypt_user = {
+ .name = ".fscrypt",
+ .instantiate = fscrypt_user_key_instantiate,
+ .describe = fscrypt_user_key_describe,
+};
+
+/* Search ->s_master_keys or ->mk_users */
+static struct key *search_fscrypt_keyring(struct key *keyring,
+ struct key_type *type,
+ const char *description)
+{
+ /*
+ * We need to mark the keyring reference as "possessed" so that we
+ * acquire permission to search it, via the KEY_POS_SEARCH permission.
+ */
+ key_ref_t keyref = make_key_ref(keyring, true /* possessed */);
+
+ keyref = keyring_search(keyref, type, description, false);
+ if (IS_ERR(keyref)) {
+ if (PTR_ERR(keyref) == -EAGAIN || /* not found */
+ PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */
+ keyref = ERR_PTR(-ENOKEY);
+ return ERR_CAST(keyref);
+ }
+ return key_ref_to_ptr(keyref);
+}
+
+#define FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE \
+ (CONST_STRLEN("fscrypt-") + FIELD_SIZEOF(struct super_block, s_id))
+
+#define FSCRYPT_MK_DESCRIPTION_SIZE (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + 1)
+
+#define FSCRYPT_MK_USERS_DESCRIPTION_SIZE \
+ (CONST_STRLEN("fscrypt-") + 2 * FSCRYPT_KEY_IDENTIFIER_SIZE + \
+ CONST_STRLEN("-users") + 1)
+
+#define FSCRYPT_MK_USER_DESCRIPTION_SIZE \
+ (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + CONST_STRLEN(".uid.") + 10 + 1)
+
+static void format_fs_keyring_description(
+ char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE],
+ const struct super_block *sb)
+{
+ sprintf(description, "fscrypt-%s", sb->s_id);
+}
+
+static void format_mk_description(
+ char description[FSCRYPT_MK_DESCRIPTION_SIZE],
+ const struct fscrypt_key_specifier *mk_spec)
+{
+ sprintf(description, "%*phN",
+ master_key_spec_len(mk_spec), (u8 *)&mk_spec->u);
+}
+
+static void format_mk_users_keyring_description(
+ char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE],
+ const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE])
+{
+ sprintf(description, "fscrypt-%*phN-users",
+ FSCRYPT_KEY_IDENTIFIER_SIZE, mk_identifier);
+}
+
+static void format_mk_user_description(
+ char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE],
+ const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE])
+{
+
+ sprintf(description, "%*phN.uid.%u", FSCRYPT_KEY_IDENTIFIER_SIZE,
+ mk_identifier, __kuid_val(current_fsuid()));
+}
+
+/* Create ->s_master_keys if needed. Synchronized by fscrypt_add_key_mutex. */
+static int allocate_filesystem_keyring(struct super_block *sb)
+{
+ char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE];
+ struct key *keyring;
+
+ if (sb->s_master_keys)
+ return 0;
+
+ format_fs_keyring_description(description, sb);
+ keyring = keyring_alloc(description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+ current_cred(), KEY_POS_SEARCH |
+ KEY_USR_SEARCH | KEY_USR_READ | KEY_USR_VIEW,
+ KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
+ if (IS_ERR(keyring))
+ return PTR_ERR(keyring);
+
+ /* Pairs with READ_ONCE() in fscrypt_find_master_key() */
+ smp_store_release(&sb->s_master_keys, keyring);
+ return 0;
+}
+
+void fscrypt_sb_free(struct super_block *sb)
+{
+ key_put(sb->s_master_keys);
+ sb->s_master_keys = NULL;
+}
+
+/*
+ * Find the specified master key in ->s_master_keys.
+ * Returns ERR_PTR(-ENOKEY) if not found.
+ */
+struct key *fscrypt_find_master_key(struct super_block *sb,
+ const struct fscrypt_key_specifier *mk_spec)
+{
+ struct key *keyring;
+ char description[FSCRYPT_MK_DESCRIPTION_SIZE];
+
+ /* pairs with smp_store_release() in allocate_filesystem_keyring() */
+ keyring = READ_ONCE(sb->s_master_keys);
+ if (keyring == NULL)
+ return ERR_PTR(-ENOKEY); /* No keyring yet, so no keys yet. */
+
+ format_mk_description(description, mk_spec);
+ return search_fscrypt_keyring(keyring, &key_type_fscrypt, description);
+}
+
+static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk)
+{
+ char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE];
+ struct key *keyring;
+
+ format_mk_users_keyring_description(description,
+ mk->mk_spec.u.identifier);
+ keyring = keyring_alloc(description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+ current_cred(), KEY_POS_SEARCH |
+ KEY_USR_SEARCH | KEY_USR_READ | KEY_USR_VIEW,
+ KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
+ if (IS_ERR(keyring))
+ return PTR_ERR(keyring);
+
+ mk->mk_users = keyring;
+ return 0;
+}
+
+/*
+ * Find the current user's "key" in the master key's ->mk_users.
+ * Returns ERR_PTR(-ENOKEY) if not found.
+ */
+static struct key *find_master_key_user(struct fscrypt_master_key *mk)
+{
+ char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE];
+
+ format_mk_user_description(description, mk->mk_spec.u.identifier);
+ return search_fscrypt_keyring(mk->mk_users, &key_type_fscrypt_user,
+ description);
+}
+
+/*
+ * Give the current user a "key" in ->mk_users. This charges the user's quota
+ * and marks the master key as added by the current user, so that it cannot be
+ * removed by another user with the key. Either the master key's key->sem must
+ * be held for write, or the master key must be still undergoing initialization.
+ */
+static int add_master_key_user(struct fscrypt_master_key *mk)
+{
+ char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE];
+ struct key *mk_user;
+ int err;
+
+ format_mk_user_description(description, mk->mk_spec.u.identifier);
+ mk_user = key_alloc(&key_type_fscrypt_user, description,
+ current_fsuid(), current_gid(), current_cred(),
+ KEY_POS_SEARCH | KEY_USR_VIEW, 0, NULL);
+ if (IS_ERR(mk_user))
+ return PTR_ERR(mk_user);
+
+ err = key_instantiate_and_link(mk_user, NULL, 0, mk->mk_users, NULL);
+ key_put(mk_user);
+ return err;
+}
+
+/*
+ * Remove the current user's "key" from ->mk_users.
+ * The master key's key->sem must be held for write.
+ *
+ * Returns 0 if removed, -ENOKEY if not found, or another -errno code.
+ */
+static int remove_master_key_user(struct fscrypt_master_key *mk)
+{
+ struct key *mk_user;
+ int err;
+
+ mk_user = find_master_key_user(mk);
+ if (IS_ERR(mk_user))
+ return PTR_ERR(mk_user);
+ err = key_unlink(mk->mk_users, mk_user);
+ key_put(mk_user);
+ return err;
+}
+
+/*
+ * Allocate a new fscrypt_master_key which contains the given secret, set it as
+ * the payload of a new 'struct key' of type fscrypt, and link the 'struct key'
+ * into the given keyring. Synchronized by fscrypt_add_key_mutex.
+ */
+static int add_new_master_key(struct fscrypt_master_key_secret *secret,
+ const struct fscrypt_key_specifier *mk_spec,
+ struct key *keyring)
+{
+ struct fscrypt_master_key *mk;
+ char description[FSCRYPT_MK_DESCRIPTION_SIZE];
+ struct key *key;
+ int err;
+
+ mk = kzalloc(sizeof(*mk), GFP_KERNEL);
+ if (!mk)
+ return -ENOMEM;
+
+ mk->mk_spec = *mk_spec;
+
+ move_master_key_secret(&mk->mk_secret, secret);
+ init_rwsem(&mk->mk_secret_sem);
+
+ refcount_set(&mk->mk_refcount, 1); /* secret is present */
+ INIT_LIST_HEAD(&mk->mk_decrypted_inodes);
+ spin_lock_init(&mk->mk_decrypted_inodes_lock);
+
+ if (mk_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) {
+ err = allocate_master_key_users_keyring(mk);
+ if (err)
+ goto out_free_mk;
+ err = add_master_key_user(mk);
+ if (err)
+ goto out_free_mk;
+ }
+
+ /*
+ * Note that we don't charge this key to anyone's quota, since when
+ * ->mk_users is in use those keys are charged instead, and otherwise
+ * (when ->mk_users isn't in use) only root can add these keys.
+ */
+ format_mk_description(description, mk_spec);
+ key = key_alloc(&key_type_fscrypt, description,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
+ KEY_POS_SEARCH | KEY_USR_SEARCH | KEY_USR_VIEW,
+ KEY_ALLOC_NOT_IN_QUOTA, NULL);
+ if (IS_ERR(key)) {
+ err = PTR_ERR(key);
+ goto out_free_mk;
+ }
+ err = key_instantiate_and_link(key, mk, sizeof(*mk), keyring, NULL);
+ key_put(key);
+ if (err)
+ goto out_free_mk;
+
+ return 0;
+
+out_free_mk:
+ free_master_key(mk);
+ return err;
+}
+
+#define KEY_DEAD 1
+
+static int add_existing_master_key(struct fscrypt_master_key *mk,
+ struct fscrypt_master_key_secret *secret)
+{
+ struct key *mk_user;
+ bool rekey;
+ int err;
+
+ /*
+ * If the current user is already in ->mk_users, then there's nothing to
+ * do. (Not applicable for v1 policy keys, which have NULL ->mk_users.)
+ */
+ if (mk->mk_users) {
+ mk_user = find_master_key_user(mk);
+ if (mk_user != ERR_PTR(-ENOKEY)) {
+ if (IS_ERR(mk_user))
+ return PTR_ERR(mk_user);
+ key_put(mk_user);
+ return 0;
+ }
+ }
+
+ /* If we'll be re-adding ->mk_secret, try to take the reference. */
+ rekey = !is_master_key_secret_present(&mk->mk_secret);
+ if (rekey && !refcount_inc_not_zero(&mk->mk_refcount))
+ return KEY_DEAD;
+
+ /* Add the current user to ->mk_users, if applicable. */
+ if (mk->mk_users) {
+ err = add_master_key_user(mk);
+ if (err) {
+ if (rekey && refcount_dec_and_test(&mk->mk_refcount))
+ return KEY_DEAD;
+ return err;
+ }
+ }
+
+ /* Re-add the secret if needed. */
+ if (rekey) {
+ down_write(&mk->mk_secret_sem);
+ move_master_key_secret(&mk->mk_secret, secret);
+ up_write(&mk->mk_secret_sem);
+ }
+ return 0;
+}
+
+static int add_master_key(struct super_block *sb,
+ struct fscrypt_master_key_secret *secret,
+ const struct fscrypt_key_specifier *mk_spec)
+{
+ static DEFINE_MUTEX(fscrypt_add_key_mutex);
+ struct key *key;
+ int err;
+
+ mutex_lock(&fscrypt_add_key_mutex); /* serialize find + link */
+retry:
+ key = fscrypt_find_master_key(sb, mk_spec);
+ if (IS_ERR(key)) {
+ err = PTR_ERR(key);
+ if (err != -ENOKEY)
+ goto out_unlock;
+ /* Didn't find the key in ->s_master_keys. Add it. */
+ err = allocate_filesystem_keyring(sb);
+ if (err)
+ goto out_unlock;
+ err = add_new_master_key(secret, mk_spec, sb->s_master_keys);
+ } else {
+ /*
+ * Found the key in ->s_master_keys. Re-add the secret if
+ * needed, and add the user to ->mk_users if needed.
+ */
+ down_write(&key->sem);
+ err = add_existing_master_key(key->payload.data[0], secret);
+ up_write(&key->sem);
+ if (err == KEY_DEAD) {
+ /* Key being removed or needs to be removed */
+ key_invalidate(key);
+ key_put(key);
+ goto retry;
+ }
+ key_put(key);
+ }
+out_unlock:
+ mutex_unlock(&fscrypt_add_key_mutex);
+ return err;
+}
+
+/*
+ * Add a master encryption key to the filesystem, causing all files which were
+ * encrypted with it to appear "unlocked" (decrypted) when accessed.
+ *
+ * When adding a key for use by v1 encryption policies, this ioctl is
+ * privileged, and userspace must provide the 'key_descriptor'.
+ *
+ * When adding a key for use by v2+ encryption policies, this ioctl is
+ * unprivileged. This is needed, in general, to allow non-root users to use
+ * encryption without encountering the visibility problems of process-subscribed
+ * keyrings and the inability to properly remove keys. This works by having
+ * each key identified by its cryptographically secure hash --- the
+ * 'key_identifier'. The cryptographic hash ensures that a malicious user
+ * cannot add the wrong key for a given identifier. Furthermore, each added key
+ * is charged to the appropriate user's quota for the keyrings service, which
+ * prevents a malicious user from adding too many keys. Finally, we forbid a
+ * user from removing a key while other users have added it too, which prevents
+ * a user who knows another user's key from causing a denial-of-service by
+ * removing it at an inopportune time. (We tolerate that a user who knows a key
+ * can prevent other users from removing it.)
+ *
+ * For more details, see the "FS_IOC_ADD_ENCRYPTION_KEY" section of
+ * Documentation/filesystems/fscrypt.rst.
+ */
+int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg)
+{
+ struct super_block *sb = file_inode(filp)->i_sb;
+ struct fscrypt_add_key_arg __user *uarg = _uarg;
+ struct fscrypt_add_key_arg arg;
+ struct fscrypt_master_key_secret secret;
+ int err;
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (!valid_key_spec(&arg.key_spec))
+ return -EINVAL;
+
+ if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE ||
+ arg.raw_size > FSCRYPT_MAX_KEY_SIZE)
+ return -EINVAL;
+
+ if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved)))
+ return -EINVAL;
+
+ memset(&secret, 0, sizeof(secret));
+ secret.size = arg.raw_size;
+ err = -EFAULT;
+ if (copy_from_user(secret.raw, uarg->raw, secret.size))
+ goto out_wipe_secret;
+
+ switch (arg.key_spec.type) {
+ case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR:
+ /*
+ * Only root can add keys that are identified by an arbitrary
+ * descriptor rather than by a cryptographic hash --- since
+ * otherwise a malicious user could add the wrong key.
+ */
+ err = -EACCES;
+ if (!capable(CAP_SYS_ADMIN))
+ goto out_wipe_secret;
+ break;
+ case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER:
+ err = fscrypt_init_hkdf(&secret.hkdf, secret.raw, secret.size);
+ if (err)
+ goto out_wipe_secret;
+
+ /*
+ * Now that the HKDF context is initialized, the raw key is no
+ * longer needed.
+ */
+ memzero_explicit(secret.raw, secret.size);
+
+ /* Calculate the key identifier and return it to userspace. */
+ err = fscrypt_hkdf_expand(&secret.hkdf,
+ HKDF_CONTEXT_KEY_IDENTIFIER,
+ NULL, 0, arg.key_spec.u.identifier,
+ FSCRYPT_KEY_IDENTIFIER_SIZE);
+ if (err)
+ goto out_wipe_secret;
+ err = -EFAULT;
+ if (copy_to_user(uarg->key_spec.u.identifier,
+ arg.key_spec.u.identifier,
+ FSCRYPT_KEY_IDENTIFIER_SIZE))
+ goto out_wipe_secret;
+ break;
+ default:
+ WARN_ON(1);
+ err = -EINVAL;
+ goto out_wipe_secret;
+ }
+
+ err = add_master_key(sb, &secret, &arg.key_spec);
+out_wipe_secret:
+ wipe_master_key_secret(&secret);
+ return err;
+}
+EXPORT_SYMBOL_GPL(fscrypt_ioctl_add_key);
+
+/*
+ * Verify that the current user has added a master key with the given identifier
+ * (returns -ENOKEY if not). This is needed to prevent a user from encrypting
+ * their files using some other user's key which they don't actually know.
+ * Cryptographically this isn't much of a problem, but the semantics of this
+ * would be a bit weird, so it's best to just forbid it.
+ *
+ * The system administrator (CAP_FOWNER) can override this, which should be
+ * enough for any use cases where encryption policies are being set using keys
+ * that were chosen ahead of time but aren't available at the moment.
+ *
+ * Note that the key may have already removed by the time this returns, but
+ * that's okay; we just care whether the key was there at some point.
+ *
+ * Return: 0 if the key is added, -ENOKEY if it isn't, or another -errno code
+ */
+int fscrypt_verify_key_added(struct super_block *sb,
+ const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE])
+{
+ struct fscrypt_key_specifier mk_spec;
+ struct key *key, *mk_user;
+ struct fscrypt_master_key *mk;
+ int err;
+
+ mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER;
+ memcpy(mk_spec.u.identifier, identifier, FSCRYPT_KEY_IDENTIFIER_SIZE);
+
+ key = fscrypt_find_master_key(sb, &mk_spec);
+ if (IS_ERR(key)) {
+ err = PTR_ERR(key);
+ goto out;
+ }
+ mk = key->payload.data[0];
+ mk_user = find_master_key_user(mk);
+ if (IS_ERR(mk_user)) {
+ err = PTR_ERR(mk_user);
+ } else {
+ key_put(mk_user);
+ err = 0;
+ }
+ key_put(key);
+out:
+ if (err == -ENOKEY && capable(CAP_FOWNER))
+ err = 0;
+ return err;
+}
+
+/*
+ * Try to evict the inode's dentries from the dentry cache. If the inode is a
+ * directory, then it can have at most one dentry; however, that dentry may be
+ * pinned by child dentries, so first try to evict the children too.
+ */
+static void shrink_dcache_inode(struct inode *inode)
+{
+ struct dentry *dentry;
+
+ if (S_ISDIR(inode->i_mode)) {
+ dentry = d_find_any_alias(inode);
+ if (dentry) {
+ shrink_dcache_parent(dentry);
+ dput(dentry);
+ }
+ }
+ d_prune_aliases(inode);
+}
+
+static void evict_dentries_for_decrypted_inodes(struct fscrypt_master_key *mk)
+{
+ struct fscrypt_info *ci;
+ struct inode *inode;
+ struct inode *toput_inode = NULL;
+
+ spin_lock(&mk->mk_decrypted_inodes_lock);
+
+ list_for_each_entry(ci, &mk->mk_decrypted_inodes, ci_master_key_link) {
+ inode = ci->ci_inode;
+ spin_lock(&inode->i_lock);
+ if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
+ __iget(inode);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&mk->mk_decrypted_inodes_lock);
+
+ shrink_dcache_inode(inode);
+ iput(toput_inode);
+ toput_inode = inode;
+
+ spin_lock(&mk->mk_decrypted_inodes_lock);
+ }
+
+ spin_unlock(&mk->mk_decrypted_inodes_lock);
+ iput(toput_inode);
+}
+
+static int check_for_busy_inodes(struct super_block *sb,
+ struct fscrypt_master_key *mk)
+{
+ struct list_head *pos;
+ size_t busy_count = 0;
+ unsigned long ino;
+ struct dentry *dentry;
+ char _path[256];
+ char *path = NULL;
+
+ spin_lock(&mk->mk_decrypted_inodes_lock);
+
+ list_for_each(pos, &mk->mk_decrypted_inodes)
+ busy_count++;
+
+ if (busy_count == 0) {
+ spin_unlock(&mk->mk_decrypted_inodes_lock);
+ return 0;
+ }
+
+ {
+ /* select an example file to show for debugging purposes */
+ struct inode *inode =
+ list_first_entry(&mk->mk_decrypted_inodes,
+ struct fscrypt_info,
+ ci_master_key_link)->ci_inode;
+ ino = inode->i_ino;
+ dentry = d_find_alias(inode);
+ }
+ spin_unlock(&mk->mk_decrypted_inodes_lock);
+
+ if (dentry) {
+ path = dentry_path(dentry, _path, sizeof(_path));
+ dput(dentry);
+ }
+ if (IS_ERR_OR_NULL(path))
+ path = "(unknown)";
+
+ fscrypt_warn(NULL,
+ "%s: %zu inode(s) still busy after removing key with %s %*phN, including ino %lu (%s)",
+ sb->s_id, busy_count, master_key_spec_type(&mk->mk_spec),
+ master_key_spec_len(&mk->mk_spec), (u8 *)&mk->mk_spec.u,
+ ino, path);
+ return -EBUSY;
+}
+
+static int try_to_lock_encrypted_files(struct super_block *sb,
+ struct fscrypt_master_key *mk)
+{
+ int err1;
+ int err2;
+
+ /*
+ * An inode can't be evicted while it is dirty or has dirty pages.
+ * Thus, we first have to clean the inodes in ->mk_decrypted_inodes.
+ *
+ * Just do it the easy way: call sync_filesystem(). It's overkill, but
+ * it works, and it's more important to minimize the amount of caches we
+ * drop than the amount of data we sync. Also, unprivileged users can
+ * already call sync_filesystem() via sys_syncfs() or sys_sync().
+ */
+ down_read(&sb->s_umount);
+ err1 = sync_filesystem(sb);
+ up_read(&sb->s_umount);
+ /* If a sync error occurs, still try to evict as much as possible. */
+
+ /*
+ * Inodes are pinned by their dentries, so we have to evict their
+ * dentries. shrink_dcache_sb() would suffice, but would be overkill
+ * and inappropriate for use by unprivileged users. So instead go
+ * through the inodes' alias lists and try to evict each dentry.
+ */
+ evict_dentries_for_decrypted_inodes(mk);
+
+ /*
+ * evict_dentries_for_decrypted_inodes() already iput() each inode in
+ * the list; any inodes for which that dropped the last reference will
+ * have been evicted due to fscrypt_drop_inode() detecting the key
+ * removal and telling the VFS to evict the inode. So to finish, we
+ * just need to check whether any inodes couldn't be evicted.
+ */
+ err2 = check_for_busy_inodes(sb, mk);
+
+ return err1 ?: err2;
+}
+
+/*
+ * Try to remove an fscrypt master encryption key.
+ *
+ * FS_IOC_REMOVE_ENCRYPTION_KEY (all_users=false) removes the current user's
+ * claim to the key, then removes the key itself if no other users have claims.
+ * FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS (all_users=true) always removes the
+ * key itself.
+ *
+ * To "remove the key itself", first we wipe the actual master key secret, so
+ * that no more inodes can be unlocked with it. Then we try to evict all cached
+ * inodes that had been unlocked with the key.
+ *
+ * If all inodes were evicted, then we unlink the fscrypt_master_key from the
+ * keyring. Otherwise it remains in the keyring in the "incompletely removed"
+ * state (without the actual secret key) where it tracks the list of remaining
+ * inodes. Userspace can execute the ioctl again later to retry eviction, or
+ * alternatively can re-add the secret key again.
+ *
+ * For more details, see the "Removing keys" section of
+ * Documentation/filesystems/fscrypt.rst.
+ */
+static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
+{
+ struct super_block *sb = file_inode(filp)->i_sb;
+ struct fscrypt_remove_key_arg __user *uarg = _uarg;
+ struct fscrypt_remove_key_arg arg;
+ struct key *key;
+ struct fscrypt_master_key *mk;
+ u32 status_flags = 0;
+ int err;
+ bool dead;
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (!valid_key_spec(&arg.key_spec))
+ return -EINVAL;
+
+ if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved)))
+ return -EINVAL;
+
+ /*
+ * Only root can add and remove keys that are identified by an arbitrary
+ * descriptor rather than by a cryptographic hash.
+ */
+ if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR &&
+ !capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ /* Find the key being removed. */
+ key = fscrypt_find_master_key(sb, &arg.key_spec);
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+ mk = key->payload.data[0];
+
+ down_write(&key->sem);
+
+ /* If relevant, remove current user's (or all users) claim to the key */
+ if (mk->mk_users && mk->mk_users->keys.nr_leaves_on_tree != 0) {
+ if (all_users)
+ err = keyring_clear(mk->mk_users);
+ else
+ err = remove_master_key_user(mk);
+ if (err) {
+ up_write(&key->sem);
+ goto out_put_key;
+ }
+ if (mk->mk_users->keys.nr_leaves_on_tree != 0) {
+ /*
+ * Other users have still added the key too. We removed
+ * the current user's claim to the key, but we still
+ * can't remove the key itself.
+ */
+ status_flags |=
+ FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS;
+ err = 0;
+ up_write(&key->sem);
+ goto out_put_key;
+ }
+ }
+
+ /* No user claims remaining. Go ahead and wipe the secret. */
+ dead = false;
+ if (is_master_key_secret_present(&mk->mk_secret)) {
+ down_write(&mk->mk_secret_sem);
+ wipe_master_key_secret(&mk->mk_secret);
+ dead = refcount_dec_and_test(&mk->mk_refcount);
+ up_write(&mk->mk_secret_sem);
+ }
+ up_write(&key->sem);
+ if (dead) {
+ /*
+ * No inodes reference the key, and we wiped the secret, so the
+ * key object is free to be removed from the keyring.
+ */
+ key_invalidate(key);
+ err = 0;
+ } else {
+ /* Some inodes still reference this key; try to evict them. */
+ err = try_to_lock_encrypted_files(sb, mk);
+ if (err == -EBUSY) {
+ status_flags |=
+ FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY;
+ err = 0;
+ }
+ }
+ /*
+ * We return 0 if we successfully did something: removed a claim to the
+ * key, wiped the secret, or tried locking the files again. Users need
+ * to check the informational status flags if they care whether the key
+ * has been fully removed including all files locked.
+ */
+out_put_key:
+ key_put(key);
+ if (err == 0)
+ err = put_user(status_flags, &uarg->removal_status_flags);
+ return err;
+}
+
+int fscrypt_ioctl_remove_key(struct file *filp, void __user *uarg)
+{
+ return do_remove_key(filp, uarg, false);
+}
+EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key);
+
+int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *uarg)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ return do_remove_key(filp, uarg, true);
+}
+EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key_all_users);
+
+/*
+ * Retrieve the status of an fscrypt master encryption key.
+ *
+ * We set ->status to indicate whether the key is absent, present, or
+ * incompletely removed. "Incompletely removed" means that the master key
+ * secret has been removed, but some files which had been unlocked with it are
+ * still in use. This field allows applications to easily determine the state
+ * of an encrypted directory without using a hack such as trying to open a
+ * regular file in it (which can confuse the "incompletely removed" state with
+ * absent or present).
+ *
+ * In addition, for v2 policy keys we allow applications to determine, via
+ * ->status_flags and ->user_count, whether the key has been added by the
+ * current user, by other users, or by both. Most applications should not need
+ * this, since ordinarily only one user should know a given key. However, if a
+ * secret key is shared by multiple users, applications may wish to add an
+ * already-present key to prevent other users from removing it. This ioctl can
+ * be used to check whether that really is the case before the work is done to
+ * add the key --- which might e.g. require prompting the user for a passphrase.
+ *
+ * For more details, see the "FS_IOC_GET_ENCRYPTION_KEY_STATUS" section of
+ * Documentation/filesystems/fscrypt.rst.
+ */
+int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg)
+{
+ struct super_block *sb = file_inode(filp)->i_sb;
+ struct fscrypt_get_key_status_arg arg;
+ struct key *key;
+ struct fscrypt_master_key *mk;
+ int err;
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (!valid_key_spec(&arg.key_spec))
+ return -EINVAL;
+
+ if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved)))
+ return -EINVAL;
+
+ arg.status_flags = 0;
+ arg.user_count = 0;
+ memset(arg.__out_reserved, 0, sizeof(arg.__out_reserved));
+
+ key = fscrypt_find_master_key(sb, &arg.key_spec);
+ if (IS_ERR(key)) {
+ if (key != ERR_PTR(-ENOKEY))
+ return PTR_ERR(key);
+ arg.status = FSCRYPT_KEY_STATUS_ABSENT;
+ err = 0;
+ goto out;
+ }
+ mk = key->payload.data[0];
+ down_read(&key->sem);
+
+ if (!is_master_key_secret_present(&mk->mk_secret)) {
+ arg.status = FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED;
+ err = 0;
+ goto out_release_key;
+ }
+
+ arg.status = FSCRYPT_KEY_STATUS_PRESENT;
+ if (mk->mk_users) {
+ struct key *mk_user;
+
+ arg.user_count = mk->mk_users->keys.nr_leaves_on_tree;
+ mk_user = find_master_key_user(mk);
+ if (!IS_ERR(mk_user)) {
+ arg.status_flags |=
+ FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF;
+ key_put(mk_user);
+ } else if (mk_user != ERR_PTR(-ENOKEY)) {
+ err = PTR_ERR(mk_user);
+ goto out_release_key;
+ }
+ }
+ err = 0;
+out_release_key:
+ up_read(&key->sem);
+ key_put(key);
+out:
+ if (!err && copy_to_user(uarg, &arg, sizeof(arg)))
+ err = -EFAULT;
+ return err;
+}
+EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_key_status);
+
+int __init fscrypt_init_keyring(void)
+{
+ int err;
+
+ err = register_key_type(&key_type_fscrypt);
+ if (err)
+ return err;
+
+ err = register_key_type(&key_type_fscrypt_user);
+ if (err)
+ goto err_unregister_fscrypt;
+
+ return 0;
+
+err_unregister_fscrypt:
+ unregister_key_type(&key_type_fscrypt);
+ return err;
+}
diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
new file mode 100644
index 000000000000..f577bb6613f9
--- /dev/null
+++ b/fs/crypto/keysetup.c
@@ -0,0 +1,527 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Key setup facility for FS encryption support.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * Originally written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar.
+ * Heavily modified since then.
+ */
+
+#include <crypto/skcipher.h>
+#include <linux/key.h>
+
+#include "fscrypt_private.h"
+
+static struct fscrypt_mode available_modes[] = {
+ [FSCRYPT_MODE_AES_256_XTS] = {
+ .friendly_name = "AES-256-XTS",
+ .cipher_str = "xts(aes)",
+ .keysize = 64,
+ .ivsize = 16,
+ },
+ [FSCRYPT_MODE_AES_256_CTS] = {
+ .friendly_name = "AES-256-CTS-CBC",
+ .cipher_str = "cts(cbc(aes))",
+ .keysize = 32,
+ .ivsize = 16,
+ },
+ [FSCRYPT_MODE_AES_128_CBC] = {
+ .friendly_name = "AES-128-CBC-ESSIV",
+ .cipher_str = "essiv(cbc(aes),sha256)",
+ .keysize = 16,
+ .ivsize = 16,
+ },
+ [FSCRYPT_MODE_AES_128_CTS] = {
+ .friendly_name = "AES-128-CTS-CBC",
+ .cipher_str = "cts(cbc(aes))",
+ .keysize = 16,
+ .ivsize = 16,
+ },
+ [FSCRYPT_MODE_ADIANTUM] = {
+ .friendly_name = "Adiantum",
+ .cipher_str = "adiantum(xchacha12,aes)",
+ .keysize = 32,
+ .ivsize = 32,
+ },
+};
+
+static struct fscrypt_mode *
+select_encryption_mode(const union fscrypt_policy *policy,
+ const struct inode *inode)
+{
+ if (S_ISREG(inode->i_mode))
+ return &available_modes[fscrypt_policy_contents_mode(policy)];
+
+ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+ return &available_modes[fscrypt_policy_fnames_mode(policy)];
+
+ WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n",
+ inode->i_ino, (inode->i_mode & S_IFMT));
+ return ERR_PTR(-EINVAL);
+}
+
+/* Create a symmetric cipher object for the given encryption mode and key */
+struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode,
+ const u8 *raw_key,
+ const struct inode *inode)
+{
+ struct crypto_skcipher *tfm;
+ int err;
+
+ tfm = crypto_alloc_skcipher(mode->cipher_str, 0, 0);
+ if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT) {
+ fscrypt_warn(inode,
+ "Missing crypto API support for %s (API name: \"%s\")",
+ mode->friendly_name, mode->cipher_str);
+ return ERR_PTR(-ENOPKG);
+ }
+ fscrypt_err(inode, "Error allocating '%s' transform: %ld",
+ mode->cipher_str, PTR_ERR(tfm));
+ return tfm;
+ }
+ if (!xchg(&mode->logged_impl_name, 1)) {
+ /*
+ * fscrypt performance can vary greatly depending on which
+ * crypto algorithm implementation is used. Help people debug
+ * performance problems by logging the ->cra_driver_name the
+ * first time a mode is used.
+ */
+ pr_info("fscrypt: %s using implementation \"%s\"\n",
+ mode->friendly_name,
+ crypto_skcipher_alg(tfm)->base.cra_driver_name);
+ }
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
+ err = crypto_skcipher_setkey(tfm, raw_key, mode->keysize);
+ if (err)
+ goto err_free_tfm;
+
+ return tfm;
+
+err_free_tfm:
+ crypto_free_skcipher(tfm);
+ return ERR_PTR(err);
+}
+
+/* Given the per-file key, set up the file's crypto transform object */
+int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key)
+{
+ struct crypto_skcipher *tfm;
+
+ tfm = fscrypt_allocate_skcipher(ci->ci_mode, derived_key, ci->ci_inode);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ ci->ci_ctfm = tfm;
+ ci->ci_owns_key = true;
+ return 0;
+}
+
+static int setup_per_mode_key(struct fscrypt_info *ci,
+ struct fscrypt_master_key *mk,
+ struct crypto_skcipher **tfms,
+ u8 hkdf_context, bool include_fs_uuid)
+{
+ const struct inode *inode = ci->ci_inode;
+ const struct super_block *sb = inode->i_sb;
+ struct fscrypt_mode *mode = ci->ci_mode;
+ u8 mode_num = mode - available_modes;
+ struct crypto_skcipher *tfm, *prev_tfm;
+ u8 mode_key[FSCRYPT_MAX_KEY_SIZE];
+ u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)];
+ unsigned int hkdf_infolen = 0;
+ int err;
+
+ if (WARN_ON(mode_num > __FSCRYPT_MODE_MAX))
+ return -EINVAL;
+
+ /* pairs with cmpxchg() below */
+ tfm = READ_ONCE(tfms[mode_num]);
+ if (likely(tfm != NULL))
+ goto done;
+
+ BUILD_BUG_ON(sizeof(mode_num) != 1);
+ BUILD_BUG_ON(sizeof(sb->s_uuid) != 16);
+ BUILD_BUG_ON(sizeof(hkdf_info) != 17);
+ hkdf_info[hkdf_infolen++] = mode_num;
+ if (include_fs_uuid) {
+ memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid,
+ sizeof(sb->s_uuid));
+ hkdf_infolen += sizeof(sb->s_uuid);
+ }
+ err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
+ hkdf_context, hkdf_info, hkdf_infolen,
+ mode_key, mode->keysize);
+ if (err)
+ return err;
+ tfm = fscrypt_allocate_skcipher(mode, mode_key, inode);
+ memzero_explicit(mode_key, mode->keysize);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ /* pairs with READ_ONCE() above */
+ prev_tfm = cmpxchg(&tfms[mode_num], NULL, tfm);
+ if (prev_tfm != NULL) {
+ crypto_free_skcipher(tfm);
+ tfm = prev_tfm;
+ }
+done:
+ ci->ci_ctfm = tfm;
+ return 0;
+}
+
+static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
+ struct fscrypt_master_key *mk)
+{
+ u8 derived_key[FSCRYPT_MAX_KEY_SIZE];
+ int err;
+
+ if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
+ /*
+ * DIRECT_KEY: instead of deriving per-file keys, the per-file
+ * nonce will be included in all the IVs. But unlike v1
+ * policies, for v2 policies in this case we don't encrypt with
+ * the master key directly but rather derive a per-mode key.
+ * This ensures that the master key is consistently used only
+ * for HKDF, avoiding key reuse issues.
+ */
+ if (!fscrypt_mode_supports_direct_key(ci->ci_mode)) {
+ fscrypt_warn(ci->ci_inode,
+ "Direct key flag not allowed with %s",
+ ci->ci_mode->friendly_name);
+ return -EINVAL;
+ }
+ return setup_per_mode_key(ci, mk, mk->mk_direct_tfms,
+ HKDF_CONTEXT_DIRECT_KEY, false);
+ } else if (ci->ci_policy.v2.flags &
+ FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
+ /*
+ * IV_INO_LBLK_64: encryption keys are derived from (master_key,
+ * mode_num, filesystem_uuid), and inode number is included in
+ * the IVs. This format is optimized for use with inline
+ * encryption hardware compliant with the UFS or eMMC standards.
+ */
+ return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_tfms,
+ HKDF_CONTEXT_IV_INO_LBLK_64_KEY,
+ true);
+ }
+
+ err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
+ HKDF_CONTEXT_PER_FILE_KEY,
+ ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE,
+ derived_key, ci->ci_mode->keysize);
+ if (err)
+ return err;
+
+ err = fscrypt_set_derived_key(ci, derived_key);
+ memzero_explicit(derived_key, ci->ci_mode->keysize);
+ return err;
+}
+
+/*
+ * Find the master key, then set up the inode's actual encryption key.
+ *
+ * If the master key is found in the filesystem-level keyring, then the
+ * corresponding 'struct key' is returned in *master_key_ret with
+ * ->mk_secret_sem read-locked. This is needed to ensure that only one task
+ * links the fscrypt_info into ->mk_decrypted_inodes (as multiple tasks may race
+ * to create an fscrypt_info for the same inode), and to synchronize the master
+ * key being removed with a new inode starting to use it.
+ */
+static int setup_file_encryption_key(struct fscrypt_info *ci,
+ struct key **master_key_ret)
+{
+ struct key *key;
+ struct fscrypt_master_key *mk = NULL;
+ struct fscrypt_key_specifier mk_spec;
+ int err;
+
+ switch (ci->ci_policy.version) {
+ case FSCRYPT_POLICY_V1:
+ mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR;
+ memcpy(mk_spec.u.descriptor,
+ ci->ci_policy.v1.master_key_descriptor,
+ FSCRYPT_KEY_DESCRIPTOR_SIZE);
+ break;
+ case FSCRYPT_POLICY_V2:
+ mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER;
+ memcpy(mk_spec.u.identifier,
+ ci->ci_policy.v2.master_key_identifier,
+ FSCRYPT_KEY_IDENTIFIER_SIZE);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ key = fscrypt_find_master_key(ci->ci_inode->i_sb, &mk_spec);
+ if (IS_ERR(key)) {
+ if (key != ERR_PTR(-ENOKEY) ||
+ ci->ci_policy.version != FSCRYPT_POLICY_V1)
+ return PTR_ERR(key);
+
+ /*
+ * As a legacy fallback for v1 policies, search for the key in
+ * the current task's subscribed keyrings too. Don't move this
+ * to before the search of ->s_master_keys, since users
+ * shouldn't be able to override filesystem-level keys.
+ */
+ return fscrypt_setup_v1_file_key_via_subscribed_keyrings(ci);
+ }
+
+ mk = key->payload.data[0];
+ down_read(&mk->mk_secret_sem);
+
+ /* Has the secret been removed (via FS_IOC_REMOVE_ENCRYPTION_KEY)? */
+ if (!is_master_key_secret_present(&mk->mk_secret)) {
+ err = -ENOKEY;
+ goto out_release_key;
+ }
+
+ /*
+ * Require that the master key be at least as long as the derived key.
+ * Otherwise, the derived key cannot possibly contain as much entropy as
+ * that required by the encryption mode it will be used for. For v1
+ * policies it's also required for the KDF to work at all.
+ */
+ if (mk->mk_secret.size < ci->ci_mode->keysize) {
+ fscrypt_warn(NULL,
+ "key with %s %*phN is too short (got %u bytes, need %u+ bytes)",
+ master_key_spec_type(&mk_spec),
+ master_key_spec_len(&mk_spec), (u8 *)&mk_spec.u,
+ mk->mk_secret.size, ci->ci_mode->keysize);
+ err = -ENOKEY;
+ goto out_release_key;
+ }
+
+ switch (ci->ci_policy.version) {
+ case FSCRYPT_POLICY_V1:
+ err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw);
+ break;
+ case FSCRYPT_POLICY_V2:
+ err = fscrypt_setup_v2_file_key(ci, mk);
+ break;
+ default:
+ WARN_ON(1);
+ err = -EINVAL;
+ break;
+ }
+ if (err)
+ goto out_release_key;
+
+ *master_key_ret = key;
+ return 0;
+
+out_release_key:
+ up_read(&mk->mk_secret_sem);
+ key_put(key);
+ return err;
+}
+
+static void put_crypt_info(struct fscrypt_info *ci)
+{
+ struct key *key;
+
+ if (!ci)
+ return;
+
+ if (ci->ci_direct_key)
+ fscrypt_put_direct_key(ci->ci_direct_key);
+ else if (ci->ci_owns_key)
+ crypto_free_skcipher(ci->ci_ctfm);
+
+ key = ci->ci_master_key;
+ if (key) {
+ struct fscrypt_master_key *mk = key->payload.data[0];
+
+ /*
+ * Remove this inode from the list of inodes that were unlocked
+ * with the master key.
+ *
+ * In addition, if we're removing the last inode from a key that
+ * already had its secret removed, invalidate the key so that it
+ * gets removed from ->s_master_keys.
+ */
+ spin_lock(&mk->mk_decrypted_inodes_lock);
+ list_del(&ci->ci_master_key_link);
+ spin_unlock(&mk->mk_decrypted_inodes_lock);
+ if (refcount_dec_and_test(&mk->mk_refcount))
+ key_invalidate(key);
+ key_put(key);
+ }
+ memzero_explicit(ci, sizeof(*ci));
+ kmem_cache_free(fscrypt_info_cachep, ci);
+}
+
+int fscrypt_get_encryption_info(struct inode *inode)
+{
+ struct fscrypt_info *crypt_info;
+ union fscrypt_context ctx;
+ struct fscrypt_mode *mode;
+ struct key *master_key = NULL;
+ int res;
+
+ if (fscrypt_has_encryption_key(inode))
+ return 0;
+
+ res = fscrypt_initialize(inode->i_sb->s_cop->flags);
+ if (res)
+ return res;
+
+ res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+ if (res < 0) {
+ if (!fscrypt_dummy_context_enabled(inode) ||
+ IS_ENCRYPTED(inode)) {
+ fscrypt_warn(inode,
+ "Error %d getting encryption context",
+ res);
+ return res;
+ }
+ /* Fake up a context for an unencrypted directory */
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.version = FSCRYPT_CONTEXT_V1;
+ ctx.v1.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS;
+ ctx.v1.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS;
+ memset(ctx.v1.master_key_descriptor, 0x42,
+ FSCRYPT_KEY_DESCRIPTOR_SIZE);
+ res = sizeof(ctx.v1);
+ }
+
+ crypt_info = kmem_cache_zalloc(fscrypt_info_cachep, GFP_NOFS);
+ if (!crypt_info)
+ return -ENOMEM;
+
+ crypt_info->ci_inode = inode;
+
+ res = fscrypt_policy_from_context(&crypt_info->ci_policy, &ctx, res);
+ if (res) {
+ fscrypt_warn(inode,
+ "Unrecognized or corrupt encryption context");
+ goto out;
+ }
+
+ switch (ctx.version) {
+ case FSCRYPT_CONTEXT_V1:
+ memcpy(crypt_info->ci_nonce, ctx.v1.nonce,
+ FS_KEY_DERIVATION_NONCE_SIZE);
+ break;
+ case FSCRYPT_CONTEXT_V2:
+ memcpy(crypt_info->ci_nonce, ctx.v2.nonce,
+ FS_KEY_DERIVATION_NONCE_SIZE);
+ break;
+ default:
+ WARN_ON(1);
+ res = -EINVAL;
+ goto out;
+ }
+
+ if (!fscrypt_supported_policy(&crypt_info->ci_policy, inode)) {
+ res = -EINVAL;
+ goto out;
+ }
+
+ mode = select_encryption_mode(&crypt_info->ci_policy, inode);
+ if (IS_ERR(mode)) {
+ res = PTR_ERR(mode);
+ goto out;
+ }
+ WARN_ON(mode->ivsize > FSCRYPT_MAX_IV_SIZE);
+ crypt_info->ci_mode = mode;
+
+ res = setup_file_encryption_key(crypt_info, &master_key);
+ if (res)
+ goto out;
+
+ if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL) {
+ if (master_key) {
+ struct fscrypt_master_key *mk =
+ master_key->payload.data[0];
+
+ refcount_inc(&mk->mk_refcount);
+ crypt_info->ci_master_key = key_get(master_key);
+ spin_lock(&mk->mk_decrypted_inodes_lock);
+ list_add(&crypt_info->ci_master_key_link,
+ &mk->mk_decrypted_inodes);
+ spin_unlock(&mk->mk_decrypted_inodes_lock);
+ }
+ crypt_info = NULL;
+ }
+ res = 0;
+out:
+ if (master_key) {
+ struct fscrypt_master_key *mk = master_key->payload.data[0];
+
+ up_read(&mk->mk_secret_sem);
+ key_put(master_key);
+ }
+ if (res == -ENOKEY)
+ res = 0;
+ put_crypt_info(crypt_info);
+ return res;
+}
+EXPORT_SYMBOL(fscrypt_get_encryption_info);
+
+/**
+ * fscrypt_put_encryption_info - free most of an inode's fscrypt data
+ *
+ * Free the inode's fscrypt_info. Filesystems must call this when the inode is
+ * being evicted. An RCU grace period need not have elapsed yet.
+ */
+void fscrypt_put_encryption_info(struct inode *inode)
+{
+ put_crypt_info(inode->i_crypt_info);
+ inode->i_crypt_info = NULL;
+}
+EXPORT_SYMBOL(fscrypt_put_encryption_info);
+
+/**
+ * fscrypt_free_inode - free an inode's fscrypt data requiring RCU delay
+ *
+ * Free the inode's cached decrypted symlink target, if any. Filesystems must
+ * call this after an RCU grace period, just before they free the inode.
+ */
+void fscrypt_free_inode(struct inode *inode)
+{
+ if (IS_ENCRYPTED(inode) && S_ISLNK(inode->i_mode)) {
+ kfree(inode->i_link);
+ inode->i_link = NULL;
+ }
+}
+EXPORT_SYMBOL(fscrypt_free_inode);
+
+/**
+ * fscrypt_drop_inode - check whether the inode's master key has been removed
+ *
+ * Filesystems supporting fscrypt must call this from their ->drop_inode()
+ * method so that encrypted inodes are evicted as soon as they're no longer in
+ * use and their master key has been removed.
+ *
+ * Return: 1 if fscrypt wants the inode to be evicted now, otherwise 0
+ */
+int fscrypt_drop_inode(struct inode *inode)
+{
+ const struct fscrypt_info *ci = READ_ONCE(inode->i_crypt_info);
+ const struct fscrypt_master_key *mk;
+
+ /*
+ * If ci is NULL, then the inode doesn't have an encryption key set up
+ * so it's irrelevant. If ci_master_key is NULL, then the master key
+ * was provided via the legacy mechanism of the process-subscribed
+ * keyrings, so we don't know whether it's been removed or not.
+ */
+ if (!ci || !ci->ci_master_key)
+ return 0;
+ mk = ci->ci_master_key->payload.data[0];
+
+ /*
+ * Note: since we aren't holding ->mk_secret_sem, the result here can
+ * immediately become outdated. But there's no correctness problem with
+ * unnecessarily evicting. Nor is there a correctness problem with not
+ * evicting while iput() is racing with the key being removed, since
+ * then the thread removing the key will either evict the inode itself
+ * or will correctly detect that it wasn't evicted due to the race.
+ */
+ return !is_master_key_secret_present(&mk->mk_secret);
+}
+EXPORT_SYMBOL_GPL(fscrypt_drop_inode);
diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c
new file mode 100644
index 000000000000..5298ef22aa85
--- /dev/null
+++ b/fs/crypto/keysetup_v1.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Key setup for v1 encryption policies
+ *
+ * Copyright 2015, 2019 Google LLC
+ */
+
+/*
+ * This file implements compatibility functions for the original encryption
+ * policy version ("v1"), including:
+ *
+ * - Deriving per-file keys using the AES-128-ECB based KDF
+ * (rather than the new method of using HKDF-SHA512)
+ *
+ * - Retrieving fscrypt master keys from process-subscribed keyrings
+ * (rather than the new method of using a filesystem-level keyring)
+ *
+ * - Handling policies with the DIRECT_KEY flag set using a master key table
+ * (rather than the new method of implementing DIRECT_KEY with per-mode keys
+ * managed alongside the master keys in the filesystem-level keyring)
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/skcipher.h>
+#include <keys/user-type.h>
+#include <linux/hashtable.h>
+#include <linux/scatterlist.h>
+
+#include "fscrypt_private.h"
+
+/* Table of keys referenced by DIRECT_KEY policies */
+static DEFINE_HASHTABLE(fscrypt_direct_keys, 6); /* 6 bits = 64 buckets */
+static DEFINE_SPINLOCK(fscrypt_direct_keys_lock);
+
+/*
+ * v1 key derivation function. This generates the derived key by encrypting the
+ * master key with AES-128-ECB using the nonce as the AES key. This provides a
+ * unique derived key with sufficient entropy for each inode. However, it's
+ * nonstandard, non-extensible, doesn't evenly distribute the entropy from the
+ * master key, and is trivially reversible: an attacker who compromises a
+ * derived key can "decrypt" it to get back to the master key, then derive any
+ * other key. For all new code, use HKDF instead.
+ *
+ * The master key must be at least as long as the derived key. If the master
+ * key is longer, then only the first 'derived_keysize' bytes are used.
+ */
+static int derive_key_aes(const u8 *master_key,
+ const u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE],
+ u8 *derived_key, unsigned int derived_keysize)
+{
+ int res = 0;
+ struct skcipher_request *req = NULL;
+ DECLARE_CRYPTO_WAIT(wait);
+ struct scatterlist src_sg, dst_sg;
+ struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+
+ if (IS_ERR(tfm)) {
+ res = PTR_ERR(tfm);
+ tfm = NULL;
+ goto out;
+ }
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
+ req = skcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ res = -ENOMEM;
+ goto out;
+ }
+ skcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &wait);
+ res = crypto_skcipher_setkey(tfm, nonce, FS_KEY_DERIVATION_NONCE_SIZE);
+ if (res < 0)
+ goto out;
+
+ sg_init_one(&src_sg, master_key, derived_keysize);
+ sg_init_one(&dst_sg, derived_key, derived_keysize);
+ skcipher_request_set_crypt(req, &src_sg, &dst_sg, derived_keysize,
+ NULL);
+ res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+out:
+ skcipher_request_free(req);
+ crypto_free_skcipher(tfm);
+ return res;
+}
+
+/*
+ * Search the current task's subscribed keyrings for a "logon" key with
+ * description prefix:descriptor, and if found acquire a read lock on it and
+ * return a pointer to its validated payload in *payload_ret.
+ */
+static struct key *
+find_and_lock_process_key(const char *prefix,
+ const u8 descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE],
+ unsigned int min_keysize,
+ const struct fscrypt_key **payload_ret)
+{
+ char *description;
+ struct key *key;
+ const struct user_key_payload *ukp;
+ const struct fscrypt_key *payload;
+
+ description = kasprintf(GFP_NOFS, "%s%*phN", prefix,
+ FSCRYPT_KEY_DESCRIPTOR_SIZE, descriptor);
+ if (!description)
+ return ERR_PTR(-ENOMEM);
+
+ key = request_key(&key_type_logon, description, NULL);
+ kfree(description);
+ if (IS_ERR(key))
+ return key;
+
+ down_read(&key->sem);
+ ukp = user_key_payload_locked(key);
+
+ if (!ukp) /* was the key revoked before we acquired its semaphore? */
+ goto invalid;
+
+ payload = (const struct fscrypt_key *)ukp->data;
+
+ if (ukp->datalen != sizeof(struct fscrypt_key) ||
+ payload->size < 1 || payload->size > FSCRYPT_MAX_KEY_SIZE) {
+ fscrypt_warn(NULL,
+ "key with description '%s' has invalid payload",
+ key->description);
+ goto invalid;
+ }
+
+ if (payload->size < min_keysize) {
+ fscrypt_warn(NULL,
+ "key with description '%s' is too short (got %u bytes, need %u+ bytes)",
+ key->description, payload->size, min_keysize);
+ goto invalid;
+ }
+
+ *payload_ret = payload;
+ return key;
+
+invalid:
+ up_read(&key->sem);
+ key_put(key);
+ return ERR_PTR(-ENOKEY);
+}
+
+/* Master key referenced by DIRECT_KEY policy */
+struct fscrypt_direct_key {
+ struct hlist_node dk_node;
+ refcount_t dk_refcount;
+ const struct fscrypt_mode *dk_mode;
+ struct crypto_skcipher *dk_ctfm;
+ u8 dk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
+ u8 dk_raw[FSCRYPT_MAX_KEY_SIZE];
+};
+
+static void free_direct_key(struct fscrypt_direct_key *dk)
+{
+ if (dk) {
+ crypto_free_skcipher(dk->dk_ctfm);
+ kzfree(dk);
+ }
+}
+
+void fscrypt_put_direct_key(struct fscrypt_direct_key *dk)
+{
+ if (!refcount_dec_and_lock(&dk->dk_refcount, &fscrypt_direct_keys_lock))
+ return;
+ hash_del(&dk->dk_node);
+ spin_unlock(&fscrypt_direct_keys_lock);
+
+ free_direct_key(dk);
+}
+
+/*
+ * Find/insert the given key into the fscrypt_direct_keys table. If found, it
+ * is returned with elevated refcount, and 'to_insert' is freed if non-NULL. If
+ * not found, 'to_insert' is inserted and returned if it's non-NULL; otherwise
+ * NULL is returned.
+ */
+static struct fscrypt_direct_key *
+find_or_insert_direct_key(struct fscrypt_direct_key *to_insert,
+ const u8 *raw_key, const struct fscrypt_info *ci)
+{
+ unsigned long hash_key;
+ struct fscrypt_direct_key *dk;
+
+ /*
+ * Careful: to avoid potentially leaking secret key bytes via timing
+ * information, we must key the hash table by descriptor rather than by
+ * raw key, and use crypto_memneq() when comparing raw keys.
+ */
+
+ BUILD_BUG_ON(sizeof(hash_key) > FSCRYPT_KEY_DESCRIPTOR_SIZE);
+ memcpy(&hash_key, ci->ci_policy.v1.master_key_descriptor,
+ sizeof(hash_key));
+
+ spin_lock(&fscrypt_direct_keys_lock);
+ hash_for_each_possible(fscrypt_direct_keys, dk, dk_node, hash_key) {
+ if (memcmp(ci->ci_policy.v1.master_key_descriptor,
+ dk->dk_descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE) != 0)
+ continue;
+ if (ci->ci_mode != dk->dk_mode)
+ continue;
+ if (crypto_memneq(raw_key, dk->dk_raw, ci->ci_mode->keysize))
+ continue;
+ /* using existing tfm with same (descriptor, mode, raw_key) */
+ refcount_inc(&dk->dk_refcount);
+ spin_unlock(&fscrypt_direct_keys_lock);
+ free_direct_key(to_insert);
+ return dk;
+ }
+ if (to_insert)
+ hash_add(fscrypt_direct_keys, &to_insert->dk_node, hash_key);
+ spin_unlock(&fscrypt_direct_keys_lock);
+ return to_insert;
+}
+
+/* Prepare to encrypt directly using the master key in the given mode */
+static struct fscrypt_direct_key *
+fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key)
+{
+ struct fscrypt_direct_key *dk;
+ int err;
+
+ /* Is there already a tfm for this key? */
+ dk = find_or_insert_direct_key(NULL, raw_key, ci);
+ if (dk)
+ return dk;
+
+ /* Nope, allocate one. */
+ dk = kzalloc(sizeof(*dk), GFP_NOFS);
+ if (!dk)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&dk->dk_refcount, 1);
+ dk->dk_mode = ci->ci_mode;
+ dk->dk_ctfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key,
+ ci->ci_inode);
+ if (IS_ERR(dk->dk_ctfm)) {
+ err = PTR_ERR(dk->dk_ctfm);
+ dk->dk_ctfm = NULL;
+ goto err_free_dk;
+ }
+ memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor,
+ FSCRYPT_KEY_DESCRIPTOR_SIZE);
+ memcpy(dk->dk_raw, raw_key, ci->ci_mode->keysize);
+
+ return find_or_insert_direct_key(dk, raw_key, ci);
+
+err_free_dk:
+ free_direct_key(dk);
+ return ERR_PTR(err);
+}
+
+/* v1 policy, DIRECT_KEY: use the master key directly */
+static int setup_v1_file_key_direct(struct fscrypt_info *ci,
+ const u8 *raw_master_key)
+{
+ const struct fscrypt_mode *mode = ci->ci_mode;
+ struct fscrypt_direct_key *dk;
+
+ if (!fscrypt_mode_supports_direct_key(mode)) {
+ fscrypt_warn(ci->ci_inode,
+ "Direct key mode not allowed with %s",
+ mode->friendly_name);
+ return -EINVAL;
+ }
+
+ if (ci->ci_policy.v1.contents_encryption_mode !=
+ ci->ci_policy.v1.filenames_encryption_mode) {
+ fscrypt_warn(ci->ci_inode,
+ "Direct key mode not allowed with different contents and filenames modes");
+ return -EINVAL;
+ }
+
+ dk = fscrypt_get_direct_key(ci, raw_master_key);
+ if (IS_ERR(dk))
+ return PTR_ERR(dk);
+ ci->ci_direct_key = dk;
+ ci->ci_ctfm = dk->dk_ctfm;
+ return 0;
+}
+
+/* v1 policy, !DIRECT_KEY: derive the file's encryption key */
+static int setup_v1_file_key_derived(struct fscrypt_info *ci,
+ const u8 *raw_master_key)
+{
+ u8 *derived_key;
+ int err;
+
+ /*
+ * This cannot be a stack buffer because it will be passed to the
+ * scatterlist crypto API during derive_key_aes().
+ */
+ derived_key = kmalloc(ci->ci_mode->keysize, GFP_NOFS);
+ if (!derived_key)
+ return -ENOMEM;
+
+ err = derive_key_aes(raw_master_key, ci->ci_nonce,
+ derived_key, ci->ci_mode->keysize);
+ if (err)
+ goto out;
+
+ err = fscrypt_set_derived_key(ci, derived_key);
+out:
+ kzfree(derived_key);
+ return err;
+}
+
+int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, const u8 *raw_master_key)
+{
+ if (ci->ci_policy.v1.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
+ return setup_v1_file_key_direct(ci, raw_master_key);
+ else
+ return setup_v1_file_key_derived(ci, raw_master_key);
+}
+
+int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci)
+{
+ struct key *key;
+ const struct fscrypt_key *payload;
+ int err;
+
+ key = find_and_lock_process_key(FSCRYPT_KEY_DESC_PREFIX,
+ ci->ci_policy.v1.master_key_descriptor,
+ ci->ci_mode->keysize, &payload);
+ if (key == ERR_PTR(-ENOKEY) && ci->ci_inode->i_sb->s_cop->key_prefix) {
+ key = find_and_lock_process_key(ci->ci_inode->i_sb->s_cop->key_prefix,
+ ci->ci_policy.v1.master_key_descriptor,
+ ci->ci_mode->keysize, &payload);
+ }
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+
+ err = fscrypt_setup_v1_file_key(ci, payload->raw);
+ up_read(&key->sem);
+ key_put(key);
+ return err;
+}
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 4941fe8471ce..96f528071bed 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -5,8 +5,9 @@
* Copyright (C) 2015, Google, Inc.
* Copyright (C) 2015, Motorola Mobility.
*
- * Written by Michael Halcrow, 2015.
+ * Originally written by Michael Halcrow, 2015.
* Modified by Jaegeuk Kim, 2015.
+ * Modified by Eric Biggers, 2019 for v2 policy support.
*/
#include <linux/random.h>
@@ -14,70 +15,342 @@
#include <linux/mount.h>
#include "fscrypt_private.h"
-/*
- * check whether an encryption policy is consistent with an encryption context
+/**
+ * fscrypt_policies_equal - check whether two encryption policies are the same
+ *
+ * Return: %true if equal, else %false
*/
-static bool is_encryption_context_consistent_with_policy(
- const struct fscrypt_context *ctx,
- const struct fscrypt_policy *policy)
+bool fscrypt_policies_equal(const union fscrypt_policy *policy1,
+ const union fscrypt_policy *policy2)
{
- return memcmp(ctx->master_key_descriptor, policy->master_key_descriptor,
- FS_KEY_DESCRIPTOR_SIZE) == 0 &&
- (ctx->flags == policy->flags) &&
- (ctx->contents_encryption_mode ==
- policy->contents_encryption_mode) &&
- (ctx->filenames_encryption_mode ==
- policy->filenames_encryption_mode);
+ if (policy1->version != policy2->version)
+ return false;
+
+ return !memcmp(policy1, policy2, fscrypt_policy_size(policy1));
}
-static int create_encryption_context_from_policy(struct inode *inode,
- const struct fscrypt_policy *policy)
+static bool supported_iv_ino_lblk_64_policy(
+ const struct fscrypt_policy_v2 *policy,
+ const struct inode *inode)
{
- struct fscrypt_context ctx;
+ struct super_block *sb = inode->i_sb;
+ int ino_bits = 64, lblk_bits = 64;
+
+ if (policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
+ fscrypt_warn(inode,
+ "The DIRECT_KEY and IV_INO_LBLK_64 flags are mutually exclusive");
+ return false;
+ }
+ /*
+ * It's unsafe to include inode numbers in the IVs if the filesystem can
+ * potentially renumber inodes, e.g. via filesystem shrinking.
+ */
+ if (!sb->s_cop->has_stable_inodes ||
+ !sb->s_cop->has_stable_inodes(sb)) {
+ fscrypt_warn(inode,
+ "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't have stable inode numbers",
+ sb->s_id);
+ return false;
+ }
+ if (sb->s_cop->get_ino_and_lblk_bits)
+ sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
+ if (ino_bits > 32 || lblk_bits > 32) {
+ fscrypt_warn(inode,
+ "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't use 32-bit inode and block numbers",
+ sb->s_id);
+ return false;
+ }
+ return true;
+}
- ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
- memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
- FS_KEY_DESCRIPTOR_SIZE);
+/**
+ * fscrypt_supported_policy - check whether an encryption policy is supported
+ *
+ * Given an encryption policy, check whether all its encryption modes and other
+ * settings are supported by this kernel. (But we don't currently don't check
+ * for crypto API support here, so attempting to use an algorithm not configured
+ * into the crypto API will still fail later.)
+ *
+ * Return: %true if supported, else %false
+ */
+bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
+ const struct inode *inode)
+{
+ switch (policy_u->version) {
+ case FSCRYPT_POLICY_V1: {
+ const struct fscrypt_policy_v1 *policy = &policy_u->v1;
+
+ if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
+ policy->filenames_encryption_mode)) {
+ fscrypt_warn(inode,
+ "Unsupported encryption modes (contents %d, filenames %d)",
+ policy->contents_encryption_mode,
+ policy->filenames_encryption_mode);
+ return false;
+ }
+
+ if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK |
+ FSCRYPT_POLICY_FLAG_DIRECT_KEY)) {
+ fscrypt_warn(inode,
+ "Unsupported encryption flags (0x%02x)",
+ policy->flags);
+ return false;
+ }
+
+ return true;
+ }
+ case FSCRYPT_POLICY_V2: {
+ const struct fscrypt_policy_v2 *policy = &policy_u->v2;
+
+ if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
+ policy->filenames_encryption_mode)) {
+ fscrypt_warn(inode,
+ "Unsupported encryption modes (contents %d, filenames %d)",
+ policy->contents_encryption_mode,
+ policy->filenames_encryption_mode);
+ return false;
+ }
+
+ if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) {
+ fscrypt_warn(inode,
+ "Unsupported encryption flags (0x%02x)",
+ policy->flags);
+ return false;
+ }
+
+ if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) &&
+ !supported_iv_ino_lblk_64_policy(policy, inode))
+ return false;
+
+ if (memchr_inv(policy->__reserved, 0,
+ sizeof(policy->__reserved))) {
+ fscrypt_warn(inode,
+ "Reserved bits set in encryption policy");
+ return false;
+ }
+
+ return true;
+ }
+ }
+ return false;
+}
- if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
- policy->filenames_encryption_mode))
+/**
+ * fscrypt_new_context_from_policy - create a new fscrypt_context from a policy
+ *
+ * Create an fscrypt_context for an inode that is being assigned the given
+ * encryption policy. A new nonce is randomly generated.
+ *
+ * Return: the size of the new context in bytes.
+ */
+static int fscrypt_new_context_from_policy(union fscrypt_context *ctx_u,
+ const union fscrypt_policy *policy_u)
+{
+ memset(ctx_u, 0, sizeof(*ctx_u));
+
+ switch (policy_u->version) {
+ case FSCRYPT_POLICY_V1: {
+ const struct fscrypt_policy_v1 *policy = &policy_u->v1;
+ struct fscrypt_context_v1 *ctx = &ctx_u->v1;
+
+ ctx->version = FSCRYPT_CONTEXT_V1;
+ ctx->contents_encryption_mode =
+ policy->contents_encryption_mode;
+ ctx->filenames_encryption_mode =
+ policy->filenames_encryption_mode;
+ ctx->flags = policy->flags;
+ memcpy(ctx->master_key_descriptor,
+ policy->master_key_descriptor,
+ sizeof(ctx->master_key_descriptor));
+ get_random_bytes(ctx->nonce, sizeof(ctx->nonce));
+ return sizeof(*ctx);
+ }
+ case FSCRYPT_POLICY_V2: {
+ const struct fscrypt_policy_v2 *policy = &policy_u->v2;
+ struct fscrypt_context_v2 *ctx = &ctx_u->v2;
+
+ ctx->version = FSCRYPT_CONTEXT_V2;
+ ctx->contents_encryption_mode =
+ policy->contents_encryption_mode;
+ ctx->filenames_encryption_mode =
+ policy->filenames_encryption_mode;
+ ctx->flags = policy->flags;
+ memcpy(ctx->master_key_identifier,
+ policy->master_key_identifier,
+ sizeof(ctx->master_key_identifier));
+ get_random_bytes(ctx->nonce, sizeof(ctx->nonce));
+ return sizeof(*ctx);
+ }
+ }
+ BUG();
+}
+
+/**
+ * fscrypt_policy_from_context - convert an fscrypt_context to an fscrypt_policy
+ *
+ * Given an fscrypt_context, build the corresponding fscrypt_policy.
+ *
+ * Return: 0 on success, or -EINVAL if the fscrypt_context has an unrecognized
+ * version number or size.
+ *
+ * This does *not* validate the settings within the policy itself, e.g. the
+ * modes, flags, and reserved bits. Use fscrypt_supported_policy() for that.
+ */
+int fscrypt_policy_from_context(union fscrypt_policy *policy_u,
+ const union fscrypt_context *ctx_u,
+ int ctx_size)
+{
+ memset(policy_u, 0, sizeof(*policy_u));
+
+ if (ctx_size <= 0 || ctx_size != fscrypt_context_size(ctx_u))
return -EINVAL;
- if (policy->flags & ~FS_POLICY_FLAGS_VALID)
+ switch (ctx_u->version) {
+ case FSCRYPT_CONTEXT_V1: {
+ const struct fscrypt_context_v1 *ctx = &ctx_u->v1;
+ struct fscrypt_policy_v1 *policy = &policy_u->v1;
+
+ policy->version = FSCRYPT_POLICY_V1;
+ policy->contents_encryption_mode =
+ ctx->contents_encryption_mode;
+ policy->filenames_encryption_mode =
+ ctx->filenames_encryption_mode;
+ policy->flags = ctx->flags;
+ memcpy(policy->master_key_descriptor,
+ ctx->master_key_descriptor,
+ sizeof(policy->master_key_descriptor));
+ return 0;
+ }
+ case FSCRYPT_CONTEXT_V2: {
+ const struct fscrypt_context_v2 *ctx = &ctx_u->v2;
+ struct fscrypt_policy_v2 *policy = &policy_u->v2;
+
+ policy->version = FSCRYPT_POLICY_V2;
+ policy->contents_encryption_mode =
+ ctx->contents_encryption_mode;
+ policy->filenames_encryption_mode =
+ ctx->filenames_encryption_mode;
+ policy->flags = ctx->flags;
+ memcpy(policy->__reserved, ctx->__reserved,
+ sizeof(policy->__reserved));
+ memcpy(policy->master_key_identifier,
+ ctx->master_key_identifier,
+ sizeof(policy->master_key_identifier));
+ return 0;
+ }
+ }
+ /* unreachable */
+ return -EINVAL;
+}
+
+/* Retrieve an inode's encryption policy */
+static int fscrypt_get_policy(struct inode *inode, union fscrypt_policy *policy)
+{
+ const struct fscrypt_info *ci;
+ union fscrypt_context ctx;
+ int ret;
+
+ ci = READ_ONCE(inode->i_crypt_info);
+ if (ci) {
+ /* key available, use the cached policy */
+ *policy = ci->ci_policy;
+ return 0;
+ }
+
+ if (!IS_ENCRYPTED(inode))
+ return -ENODATA;
+
+ ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+ if (ret < 0)
+ return (ret == -ERANGE) ? -EINVAL : ret;
+
+ return fscrypt_policy_from_context(policy, &ctx, ret);
+}
+
+static int set_encryption_policy(struct inode *inode,
+ const union fscrypt_policy *policy)
+{
+ union fscrypt_context ctx;
+ int ctxsize;
+ int err;
+
+ if (!fscrypt_supported_policy(policy, inode))
return -EINVAL;
- ctx.contents_encryption_mode = policy->contents_encryption_mode;
- ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
- ctx.flags = policy->flags;
- BUILD_BUG_ON(sizeof(ctx.nonce) != FS_KEY_DERIVATION_NONCE_SIZE);
- get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
+ switch (policy->version) {
+ case FSCRYPT_POLICY_V1:
+ /*
+ * The original encryption policy version provided no way of
+ * verifying that the correct master key was supplied, which was
+ * insecure in scenarios where multiple users have access to the
+ * same encrypted files (even just read-only access). The new
+ * encryption policy version fixes this and also implies use of
+ * an improved key derivation function and allows non-root users
+ * to securely remove keys. So as long as compatibility with
+ * old kernels isn't required, it is recommended to use the new
+ * policy version for all new encrypted directories.
+ */
+ pr_warn_once("%s (pid %d) is setting deprecated v1 encryption policy; recommend upgrading to v2.\n",
+ current->comm, current->pid);
+ break;
+ case FSCRYPT_POLICY_V2:
+ err = fscrypt_verify_key_added(inode->i_sb,
+ policy->v2.master_key_identifier);
+ if (err)
+ return err;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ctxsize = fscrypt_new_context_from_policy(&ctx, policy);
- return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL);
+ return inode->i_sb->s_cop->set_context(inode, &ctx, ctxsize, NULL);
}
int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
{
- struct fscrypt_policy policy;
+ union fscrypt_policy policy;
+ union fscrypt_policy existing_policy;
struct inode *inode = file_inode(filp);
+ u8 version;
+ int size;
int ret;
- struct fscrypt_context ctx;
- if (copy_from_user(&policy, arg, sizeof(policy)))
+ if (get_user(policy.version, (const u8 __user *)arg))
return -EFAULT;
+ size = fscrypt_policy_size(&policy);
+ if (size <= 0)
+ return -EINVAL;
+
+ /*
+ * We should just copy the remaining 'size - 1' bytes here, but a
+ * bizarre bug in gcc 7 and earlier (fixed by gcc r255731) causes gcc to
+ * think that size can be 0 here (despite the check above!) *and* that
+ * it's a compile-time constant. Thus it would think copy_from_user()
+ * is passed compile-time constant ULONG_MAX, causing the compile-time
+ * buffer overflow check to fail, breaking the build. This only occurred
+ * when building an i386 kernel with -Os and branch profiling enabled.
+ *
+ * Work around it by just copying the first byte again...
+ */
+ version = policy.version;
+ if (copy_from_user(&policy, arg, size))
+ return -EFAULT;
+ policy.version = version;
+
if (!inode_owner_or_capable(inode))
return -EACCES;
- if (policy.version != 0)
- return -EINVAL;
-
ret = mnt_want_write_file(filp);
if (ret)
return ret;
inode_lock(inode);
- ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+ ret = fscrypt_get_policy(inode, &existing_policy);
if (ret == -ENODATA) {
if (!S_ISDIR(inode->i_mode))
ret = -ENOTDIR;
@@ -86,14 +359,10 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
else if (!inode->i_sb->s_cop->empty_dir(inode))
ret = -ENOTEMPTY;
else
- ret = create_encryption_context_from_policy(inode,
- &policy);
- } else if (ret == sizeof(ctx) &&
- is_encryption_context_consistent_with_policy(&ctx,
- &policy)) {
- /* The file already uses the same encryption policy. */
- ret = 0;
- } else if (ret >= 0 || ret == -ERANGE) {
+ ret = set_encryption_policy(inode, &policy);
+ } else if (ret == -EINVAL ||
+ (ret == 0 && !fscrypt_policies_equal(&policy,
+ &existing_policy))) {
/* The file already uses a different encryption policy. */
ret = -EEXIST;
}
@@ -105,37 +374,57 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
}
EXPORT_SYMBOL(fscrypt_ioctl_set_policy);
+/* Original ioctl version; can only get the original policy version */
int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
{
- struct inode *inode = file_inode(filp);
- struct fscrypt_context ctx;
- struct fscrypt_policy policy;
- int res;
+ union fscrypt_policy policy;
+ int err;
- if (!IS_ENCRYPTED(inode))
- return -ENODATA;
+ err = fscrypt_get_policy(file_inode(filp), &policy);
+ if (err)
+ return err;
- res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
- if (res < 0 && res != -ERANGE)
- return res;
- if (res != sizeof(ctx))
- return -EINVAL;
- if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
+ if (policy.version != FSCRYPT_POLICY_V1)
return -EINVAL;
- policy.version = 0;
- policy.contents_encryption_mode = ctx.contents_encryption_mode;
- policy.filenames_encryption_mode = ctx.filenames_encryption_mode;
- policy.flags = ctx.flags;
- memcpy(policy.master_key_descriptor, ctx.master_key_descriptor,
- FS_KEY_DESCRIPTOR_SIZE);
-
- if (copy_to_user(arg, &policy, sizeof(policy)))
+ if (copy_to_user(arg, &policy, sizeof(policy.v1)))
return -EFAULT;
return 0;
}
EXPORT_SYMBOL(fscrypt_ioctl_get_policy);
+/* Extended ioctl version; can get policies of any version */
+int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *uarg)
+{
+ struct fscrypt_get_policy_ex_arg arg;
+ union fscrypt_policy *policy = (union fscrypt_policy *)&arg.policy;
+ size_t policy_size;
+ int err;
+
+ /* arg is policy_size, then policy */
+ BUILD_BUG_ON(offsetof(typeof(arg), policy_size) != 0);
+ BUILD_BUG_ON(offsetofend(typeof(arg), policy_size) !=
+ offsetof(typeof(arg), policy));
+ BUILD_BUG_ON(sizeof(arg.policy) != sizeof(*policy));
+
+ err = fscrypt_get_policy(file_inode(filp), policy);
+ if (err)
+ return err;
+ policy_size = fscrypt_policy_size(policy);
+
+ if (copy_from_user(&arg, uarg, sizeof(arg.policy_size)))
+ return -EFAULT;
+
+ if (policy_size > arg.policy_size)
+ return -EOVERFLOW;
+ arg.policy_size = policy_size;
+
+ if (copy_to_user(uarg, &arg, sizeof(arg.policy_size) + policy_size))
+ return -EFAULT;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_policy_ex);
+
/**
* fscrypt_has_permitted_context() - is a file's encryption policy permitted
* within its directory?
@@ -157,10 +446,8 @@ EXPORT_SYMBOL(fscrypt_ioctl_get_policy);
*/
int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
{
- const struct fscrypt_operations *cops = parent->i_sb->s_cop;
- const struct fscrypt_info *parent_ci, *child_ci;
- struct fscrypt_context parent_ctx, child_ctx;
- int res;
+ union fscrypt_policy parent_policy, child_policy;
+ int err;
/* No restrictions on file types which are never encrypted */
if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
@@ -190,41 +477,22 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
* In any case, if an unexpected error occurs, fall back to "forbidden".
*/
- res = fscrypt_get_encryption_info(parent);
- if (res)
+ err = fscrypt_get_encryption_info(parent);
+ if (err)
return 0;
- res = fscrypt_get_encryption_info(child);
- if (res)
+ err = fscrypt_get_encryption_info(child);
+ if (err)
return 0;
- parent_ci = READ_ONCE(parent->i_crypt_info);
- child_ci = READ_ONCE(child->i_crypt_info);
-
- if (parent_ci && child_ci) {
- return memcmp(parent_ci->ci_master_key_descriptor,
- child_ci->ci_master_key_descriptor,
- FS_KEY_DESCRIPTOR_SIZE) == 0 &&
- (parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
- (parent_ci->ci_filename_mode ==
- child_ci->ci_filename_mode) &&
- (parent_ci->ci_flags == child_ci->ci_flags);
- }
- res = cops->get_context(parent, &parent_ctx, sizeof(parent_ctx));
- if (res != sizeof(parent_ctx))
+ err = fscrypt_get_policy(parent, &parent_policy);
+ if (err)
return 0;
- res = cops->get_context(child, &child_ctx, sizeof(child_ctx));
- if (res != sizeof(child_ctx))
+ err = fscrypt_get_policy(child, &child_policy);
+ if (err)
return 0;
- return memcmp(parent_ctx.master_key_descriptor,
- child_ctx.master_key_descriptor,
- FS_KEY_DESCRIPTOR_SIZE) == 0 &&
- (parent_ctx.contents_encryption_mode ==
- child_ctx.contents_encryption_mode) &&
- (parent_ctx.filenames_encryption_mode ==
- child_ctx.filenames_encryption_mode) &&
- (parent_ctx.flags == child_ctx.flags);
+ return fscrypt_policies_equal(&parent_policy, &child_policy);
}
EXPORT_SYMBOL(fscrypt_has_permitted_context);
@@ -240,7 +508,8 @@ EXPORT_SYMBOL(fscrypt_has_permitted_context);
int fscrypt_inherit_context(struct inode *parent, struct inode *child,
void *fs_data, bool preload)
{
- struct fscrypt_context ctx;
+ union fscrypt_context ctx;
+ int ctxsize;
struct fscrypt_info *ci;
int res;
@@ -252,16 +521,10 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child,
if (ci == NULL)
return -ENOKEY;
- ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
- ctx.contents_encryption_mode = ci->ci_data_mode;
- ctx.filenames_encryption_mode = ci->ci_filename_mode;
- ctx.flags = ci->ci_flags;
- memcpy(ctx.master_key_descriptor, ci->ci_master_key_descriptor,
- FS_KEY_DESCRIPTOR_SIZE);
- get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
+ ctxsize = fscrypt_new_context_from_policy(&ctx, &ci->ci_policy);
+
BUILD_BUG_ON(sizeof(ctx) != FSCRYPT_SET_CONTEXT_MAX_SIZE);
- res = parent->i_sb->s_cop->set_context(child, &ctx,
- sizeof(ctx), fs_data);
+ res = parent->i_sb->s_cop->set_context(child, &ctx, ctxsize, fs_data);
if (res)
return res;
return preload ? fscrypt_get_encryption_info(child): 0;
diff --git a/fs/d_path.c b/fs/d_path.c
index a7d0a96b35ce..0f1fc1743302 100644
--- a/fs/d_path.c
+++ b/fs/d_path.c
@@ -116,8 +116,10 @@ restart:
vfsmnt = &mnt->mnt;
continue;
}
- if (!error)
- error = is_mounted(vfsmnt) ? 1 : 2;
+ if (is_mounted(vfsmnt) && !is_anon_ns(mnt->mnt_ns))
+ error = 1; // absolute root
+ else
+ error = 2; // detached or not attached yet
break;
}
parent = dentry->d_parent;
diff --git a/fs/dax.c b/fs/dax.c
index a237141d8787..1f1f0201cad1 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -220,10 +220,11 @@ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
for (;;) {
entry = xas_find_conflict(xas);
+ if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
+ return entry;
if (dax_entry_order(entry) < order)
return XA_RETRY_ENTRY;
- if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
- !dax_is_locked(entry))
+ if (!dax_is_locked(entry))
return entry;
wq = dax_entry_waitqueue(xas, entry, &ewait.key);
@@ -266,7 +267,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
static void put_unlocked_entry(struct xa_state *xas, void *entry)
{
/* If we were the only waiter woken, wake the next one */
- if (entry && dax_is_conflict(entry))
+ if (entry && !dax_is_conflict(entry))
dax_wake_entry(xas, entry, false);
}
@@ -600,7 +601,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
* guaranteed to either see new references or prevent new
* references from being established.
*/
- unmap_mapping_range(mapping, 0, 0, 1);
+ unmap_mapping_range(mapping, 0, 0, 0);
xas_lock_irq(&xas);
xas_for_each(&xas, entry, ULONG_MAX) {
@@ -1090,7 +1091,7 @@ EXPORT_SYMBOL_GPL(__dax_zero_page_range);
static loff_t
dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
+ struct iomap *iomap, struct iomap *srcmap)
{
struct block_device *bdev = iomap->bdev;
struct dax_device *dax_dev = iomap->dax_dev;
@@ -1247,7 +1248,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
struct inode *inode = mapping->host;
unsigned long vaddr = vmf->address;
loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
- struct iomap iomap = { 0 };
+ struct iomap iomap = { .type = IOMAP_HOLE };
+ struct iomap srcmap = { .type = IOMAP_HOLE };
unsigned flags = IOMAP_FAULT;
int error, major = 0;
bool write = vmf->flags & FAULT_FLAG_WRITE;
@@ -1292,7 +1294,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
* the file system block size to be equal the page size, which means
* that we never have to deal with more than a single extent here.
*/
- error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
+ error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap, &srcmap);
if (iomap_errp)
*iomap_errp = error;
if (error) {
@@ -1471,7 +1473,8 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
struct inode *inode = mapping->host;
vm_fault_t result = VM_FAULT_FALLBACK;
- struct iomap iomap = { 0 };
+ struct iomap iomap = { .type = IOMAP_HOLE };
+ struct iomap srcmap = { .type = IOMAP_HOLE };
pgoff_t max_pgoff;
void *entry;
loff_t pos;
@@ -1546,7 +1549,8 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* to look up our filesystem block.
*/
pos = (loff_t)xas.xa_index << PAGE_SHIFT;
- error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
+ error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap,
+ &srcmap);
if (error)
goto unlock_entry;
diff --git a/fs/dcache.c b/fs/dcache.c
index e88cf0554e65..f7931b682a0d 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1319,7 +1319,7 @@ resume:
if (!list_empty(&dentry->d_subdirs)) {
spin_unlock(&this_parent->d_lock);
- spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
+ spin_release(&dentry->d_lock.dep_map, _RET_IP_);
this_parent = dentry;
spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
goto repeat;
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 93e4ca6b2ad7..dede25247b81 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -19,6 +19,7 @@
#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/poll.h>
+#include <linux/security.h>
#include "internal.h"
@@ -136,6 +137,25 @@ void debugfs_file_put(struct dentry *dentry)
}
EXPORT_SYMBOL_GPL(debugfs_file_put);
+/*
+ * Only permit access to world-readable files when the kernel is locked down.
+ * We also need to exclude any file that has ways to write or alter it as root
+ * can bypass the permissions check.
+ */
+static bool debugfs_is_locked_down(struct inode *inode,
+ struct file *filp,
+ const struct file_operations *real_fops)
+{
+ if ((inode->i_mode & 07777) == 0444 &&
+ !(filp->f_mode & FMODE_WRITE) &&
+ !real_fops->unlocked_ioctl &&
+ !real_fops->compat_ioctl &&
+ !real_fops->mmap)
+ return false;
+
+ return security_locked_down(LOCKDOWN_DEBUGFS);
+}
+
static int open_proxy_open(struct inode *inode, struct file *filp)
{
struct dentry *dentry = F_DENTRY(filp);
@@ -147,6 +167,11 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
return r == -EIO ? -ENOENT : r;
real_fops = debugfs_real_fops(filp);
+
+ r = debugfs_is_locked_down(inode, filp, real_fops);
+ if (r)
+ goto out;
+
real_fops = fops_get(real_fops);
if (!real_fops) {
/* Huh? Module did not clean up after itself at exit? */
@@ -272,6 +297,11 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
return r == -EIO ? -ENOENT : r;
real_fops = debugfs_real_fops(filp);
+
+ r = debugfs_is_locked_down(inode, filp, real_fops);
+ if (r)
+ goto out;
+
real_fops = fops_get(real_fops);
if (!real_fops) {
/* Huh? Module did not cleanup after itself at exit? */
@@ -390,20 +420,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_u8_wo, NULL, debugfs_u8_set, "%llu\n");
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the debugfs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
- * returned.
- *
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
- * be returned.
*/
-struct dentry *debugfs_create_u8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value)
+void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent,
+ u8 *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u8,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u8,
&fops_u8_ro, &fops_u8_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_u8);
@@ -435,20 +456,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_u16_wo, NULL, debugfs_u16_set, "%llu\n");
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the debugfs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
- * returned.
- *
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
- * be returned.
*/
-struct dentry *debugfs_create_u16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value)
+void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent,
+ u16 *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u16,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u16,
&fops_u16_ro, &fops_u16_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_u16);
@@ -526,20 +538,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the debugfs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
- * returned.
- *
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
- * be returned.
*/
-struct dentry *debugfs_create_u64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value)
+void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent,
+ u64 *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u64,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u64,
&fops_u64_ro, &fops_u64_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_u64);
@@ -630,10 +633,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_x64_wo, NULL, debugfs_u64_set, "0x%016llx\n");
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_x8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value)
+void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent,
+ u8 *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x8,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x8,
&fops_x8_ro, &fops_x8_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_x8);
@@ -648,10 +651,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_x8);
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_x16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value)
+void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent,
+ u16 *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x16,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x16,
&fops_x16_ro, &fops_x16_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_x16);
@@ -666,10 +669,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_x16);
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_x32(const char *name, umode_t mode,
- struct dentry *parent, u32 *value)
+void debugfs_create_x32(const char *name, umode_t mode, struct dentry *parent,
+ u32 *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x32,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x32,
&fops_x32_ro, &fops_x32_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_x32);
@@ -684,10 +687,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_x32);
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_x64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value)
+void debugfs_create_x64(const char *name, umode_t mode, struct dentry *parent,
+ u64 *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x64,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x64,
&fops_x64_ro, &fops_x64_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_x64);
@@ -718,12 +721,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_size_t_wo, NULL, debugfs_size_t_set, "%llu\n");
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
- struct dentry *parent, size_t *value)
+void debugfs_create_size_t(const char *name, umode_t mode,
+ struct dentry *parent, size_t *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value,
- &fops_size_t, &fops_size_t_ro,
- &fops_size_t_wo);
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_size_t,
+ &fops_size_t_ro, &fops_size_t_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_size_t);
@@ -755,12 +757,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set,
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
- struct dentry *parent, atomic_t *value)
+void debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent, atomic_t *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value,
- &fops_atomic_t, &fops_atomic_t_ro,
- &fops_atomic_t_wo);
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_atomic_t,
+ &fops_atomic_t_ro, &fops_atomic_t_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_atomic_t);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 042b688ed124..7b975dbb2bb4 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -26,6 +26,7 @@
#include <linux/parser.h>
#include <linux/magic.h>
#include <linux/slab.h>
+#include <linux/security.h>
#include "internal.h"
@@ -35,6 +36,32 @@ static struct vfsmount *debugfs_mount;
static int debugfs_mount_count;
static bool debugfs_registered;
+/*
+ * Don't allow access attributes to be changed whilst the kernel is locked down
+ * so that we can use the file mode as part of a heuristic to determine whether
+ * to lock down individual files.
+ */
+static int debugfs_setattr(struct dentry *dentry, struct iattr *ia)
+{
+ int ret = security_locked_down(LOCKDOWN_DEBUGFS);
+
+ if (ret && (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
+ return ret;
+ return simple_setattr(dentry, ia);
+}
+
+static const struct inode_operations debugfs_file_inode_operations = {
+ .setattr = debugfs_setattr,
+};
+static const struct inode_operations debugfs_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .setattr = debugfs_setattr,
+};
+static const struct inode_operations debugfs_symlink_inode_operations = {
+ .get_link = simple_get_link,
+ .setattr = debugfs_setattr,
+};
+
static struct inode *debugfs_get_inode(struct super_block *sb)
{
struct inode *inode = new_inode(sb);
@@ -369,6 +396,7 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
inode->i_mode = mode;
inode->i_private = data;
+ inode->i_op = &debugfs_file_inode_operations;
inode->i_fop = proxy_fops;
dentry->d_fsdata = (void *)((unsigned long)real_fops |
DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
@@ -532,7 +560,7 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
}
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
- inode->i_op = &simple_dir_inode_operations;
+ inode->i_op = &debugfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
@@ -632,7 +660,7 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
return failed_creating(dentry);
}
inode->i_mode = S_IFLNK | S_IRWXUGO;
- inode->i_op = &simple_symlink_inode_operations;
+ inode->i_op = &debugfs_symlink_inode_operations;
inode->i_link = link;
d_instantiate(dentry, inode);
return end_creating(dentry);
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index beeadca23b05..42e5a766d33c 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -622,7 +622,7 @@ void devpts_pty_kill(struct dentry *dentry)
dentry->d_fsdata = NULL;
drop_nlink(dentry->d_inode);
fsnotify_unlink(d_inode(dentry->d_parent), dentry);
- d_delete(dentry);
+ d_drop(dentry);
dput(dentry); /* d_alloc_name() in devpts_pty_new() */
}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index ae196784f487..0ec4f270139f 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -221,29 +221,7 @@ static inline struct page *dio_get_page(struct dio *dio,
}
/*
- * Warn about a page cache invalidation failure during a direct io write.
- */
-void dio_warn_stale_pagecache(struct file *filp)
-{
- static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
- char pathname[128];
- struct inode *inode = file_inode(filp);
- char *path;
-
- errseq_set(&inode->i_mapping->wb_err, -EIO);
- if (__ratelimit(&_rs)) {
- path = file_path(filp, pathname, sizeof(pathname));
- if (IS_ERR(path))
- path = "(unknown)";
- pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n");
- pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
- current->comm);
- }
-}
-
-/**
* dio_complete() - called when all DIO BIO I/O has been completed
- * @offset: the byte offset in the file of the completed operation
*
* This drops i_dio_count, lets interested parties know that a DIO operation
* has completed, and calculates the resulting return code for the operation.
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index feecb57defa7..5fb45d865ce5 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -378,6 +378,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return rc;
switch (cmd) {
+ case FITRIM:
case FS_IOC32_GETFLAGS:
case FS_IOC32_SETFLAGS:
case FS_IOC32_GETVERSION:
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 18426f4855f1..e23752d9a79f 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -128,13 +128,20 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
struct inode *inode)
{
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
- struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
struct dentry *lower_dir_dentry;
+ struct inode *lower_dir_inode;
int rc;
- dget(lower_dentry);
- lower_dir_dentry = lock_parent(lower_dentry);
- rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
+ lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
+ lower_dir_inode = d_inode(lower_dir_dentry);
+ inode_lock_nested(lower_dir_inode, I_MUTEX_PARENT);
+ dget(lower_dentry); // don't even try to make the lower negative
+ if (lower_dentry->d_parent != lower_dir_dentry)
+ rc = -EINVAL;
+ else if (d_unhashed(lower_dentry))
+ rc = -EINVAL;
+ else
+ rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
if (rc) {
printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
goto out_unlock;
@@ -142,10 +149,11 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
fsstack_copy_attr_times(dir, lower_dir_inode);
set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
inode->i_ctime = dir->i_ctime;
- d_drop(dentry);
out_unlock:
- unlock_dir(lower_dir_dentry);
dput(lower_dentry);
+ inode_unlock(lower_dir_inode);
+ if (!rc)
+ d_drop(dentry);
return rc;
}
@@ -311,9 +319,9 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
struct dentry *lower_dentry)
{
- struct inode *inode, *lower_inode = d_inode(lower_dentry);
+ struct path *path = ecryptfs_dentry_to_lower_path(dentry->d_parent);
+ struct inode *inode, *lower_inode;
struct ecryptfs_dentry_info *dentry_info;
- struct vfsmount *lower_mnt;
int rc = 0;
dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
@@ -322,16 +330,23 @@ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
return ERR_PTR(-ENOMEM);
}
- lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
fsstack_copy_attr_atime(d_inode(dentry->d_parent),
- d_inode(lower_dentry->d_parent));
+ d_inode(path->dentry));
BUG_ON(!d_count(lower_dentry));
ecryptfs_set_dentry_private(dentry, dentry_info);
- dentry_info->lower_path.mnt = lower_mnt;
+ dentry_info->lower_path.mnt = mntget(path->mnt);
dentry_info->lower_path.dentry = lower_dentry;
- if (d_really_is_negative(lower_dentry)) {
+ /*
+ * negative dentry can go positive under us here - its parent is not
+ * locked. That's OK and that could happen just as we return from
+ * ecryptfs_lookup() anyway. Just need to be careful and fetch
+ * ->d_inode only once - it's not stable here.
+ */
+ lower_inode = READ_ONCE(lower_dentry->d_inode);
+
+ if (!lower_inode) {
/* We want to add because we couldn't find in lower */
d_add(dentry, NULL);
return NULL;
@@ -512,22 +527,30 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct dentry *lower_dentry;
struct dentry *lower_dir_dentry;
+ struct inode *lower_dir_inode;
int rc;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
- dget(dentry);
- lower_dir_dentry = lock_parent(lower_dentry);
- dget(lower_dentry);
- rc = vfs_rmdir(d_inode(lower_dir_dentry), lower_dentry);
- dput(lower_dentry);
- if (!rc && d_really_is_positive(dentry))
+ lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
+ lower_dir_inode = d_inode(lower_dir_dentry);
+
+ inode_lock_nested(lower_dir_inode, I_MUTEX_PARENT);
+ dget(lower_dentry); // don't even try to make the lower negative
+ if (lower_dentry->d_parent != lower_dir_dentry)
+ rc = -EINVAL;
+ else if (d_unhashed(lower_dentry))
+ rc = -EINVAL;
+ else
+ rc = vfs_rmdir(lower_dir_inode, lower_dentry);
+ if (!rc) {
clear_nlink(d_inode(dentry));
- fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry));
- set_nlink(dir, d_inode(lower_dir_dentry)->i_nlink);
- unlock_dir(lower_dir_dentry);
+ fsstack_copy_attr_times(dir, lower_dir_inode);
+ set_nlink(dir, lower_dir_inode->i_nlink);
+ }
+ dput(lower_dentry);
+ inode_unlock(lower_dir_inode);
if (!rc)
d_drop(dentry);
- dput(dentry);
return rc;
}
@@ -565,20 +588,22 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *lower_new_dentry;
struct dentry *lower_old_dir_dentry;
struct dentry *lower_new_dir_dentry;
- struct dentry *trap = NULL;
+ struct dentry *trap;
struct inode *target_inode;
if (flags)
return -EINVAL;
+ lower_old_dir_dentry = ecryptfs_dentry_to_lower(old_dentry->d_parent);
+ lower_new_dir_dentry = ecryptfs_dentry_to_lower(new_dentry->d_parent);
+
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
- dget(lower_old_dentry);
- dget(lower_new_dentry);
- lower_old_dir_dentry = dget_parent(lower_old_dentry);
- lower_new_dir_dentry = dget_parent(lower_new_dentry);
+
target_inode = d_inode(new_dentry);
+
trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ dget(lower_new_dentry);
rc = -EINVAL;
if (lower_old_dentry->d_parent != lower_old_dir_dentry)
goto out_lock;
@@ -606,11 +631,8 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (new_dir != old_dir)
fsstack_copy_attr_all(old_dir, d_inode(lower_old_dir_dentry));
out_lock:
- unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
- dput(lower_new_dir_dentry);
- dput(lower_old_dir_dentry);
dput(lower_new_dentry);
- dput(lower_old_dentry);
+ unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
return rc;
}
diff --git a/fs/efs/super.c b/fs/efs/super.c
index 867fc24dee20..4a6ebff2af76 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -257,6 +257,8 @@ static int efs_fill_super(struct super_block *s, void *d, int silent)
if (!sb)
return -ENOMEM;
s->s_fs_info = sb;
+ s->s_time_min = 0;
+ s->s_time_max = U32_MAX;
s->s_magic = EFS_SUPER_MAGIC;
if (!sb_set_blocksize(s, EFS_BLOCKSIZE)) {
diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig
new file mode 100644
index 000000000000..74b0aaa7114c
--- /dev/null
+++ b/fs/erofs/Kconfig
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config EROFS_FS
+ tristate "EROFS filesystem support"
+ depends on BLOCK
+ select LIBCRC32C
+ help
+ EROFS (Enhanced Read-Only File System) is a lightweight
+ read-only file system with modern designs (eg. page-sized
+ blocks, inline xattrs/data, etc.) for scenarios which need
+ high-performance read-only requirements, e.g. Android OS
+ for mobile phones and LIVECDs.
+
+ It also provides fixed-sized output compression support,
+ which improves storage density, keeps relatively higher
+ compression ratios, which is more useful to achieve high
+ performance for embedded devices with limited memory.
+
+ If unsure, say N.
+
+config EROFS_FS_DEBUG
+ bool "EROFS debugging feature"
+ depends on EROFS_FS
+ help
+ Print debugging messages and enable more BUG_ONs which check
+ filesystem consistency and find potential issues aggressively,
+ which can be used for Android eng build, for example.
+
+ For daily use, say N.
+
+config EROFS_FS_XATTR
+ bool "EROFS extended attributes"
+ depends on EROFS_FS
+ default y
+ help
+ Extended attributes are name:value pairs associated with inodes by
+ the kernel or by users (see the attr(5) manual page, or visit
+ <http://acl.bestbits.at/> for details).
+
+ If unsure, say N.
+
+config EROFS_FS_POSIX_ACL
+ bool "EROFS Access Control Lists"
+ depends on EROFS_FS_XATTR
+ select FS_POSIX_ACL
+ default y
+ help
+ Posix Access Control Lists (ACLs) support permissions for users and
+ groups beyond the owner/group/world scheme.
+
+ To learn more about Access Control Lists, visit the POSIX ACLs for
+ Linux website <http://acl.bestbits.at/>.
+
+ If you don't know what Access Control Lists are, say N.
+
+config EROFS_FS_SECURITY
+ bool "EROFS Security Labels"
+ depends on EROFS_FS_XATTR
+ default y
+ help
+ Security labels provide an access control facility to support Linux
+ Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO
+ Linux. This option enables an extended attribute handler for file
+ security labels in the erofs filesystem, so that it requires enabling
+ the extended attribute support in advance.
+
+ If you are not using a security module, say N.
+
+config EROFS_FS_ZIP
+ bool "EROFS Data Compression Support"
+ depends on EROFS_FS
+ select LZ4_DECOMPRESS
+ default y
+ help
+ Enable fixed-sized output compression for EROFS.
+
+ If you don't want to enable compression feature, say N.
+
+config EROFS_FS_CLUSTER_PAGE_LIMIT
+ int "EROFS Cluster Pages Hard Limit"
+ depends on EROFS_FS_ZIP
+ range 1 256
+ default "1"
+ help
+ Indicates maximum # of pages of a compressed
+ physical cluster.
+
+ For example, if files in a image were compressed
+ into 8k-unit, hard limit should not be configured
+ less than 2. Otherwise, the image will be refused
+ to mount on this kernel.
+
diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile
new file mode 100644
index 000000000000..46f2aa4ba46c
--- /dev/null
+++ b/fs/erofs/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+EROFS_VERSION = "1.0"
+
+ccflags-y += -DEROFS_VERSION=\"$(EROFS_VERSION)\"
+
+obj-$(CONFIG_EROFS_FS) += erofs.o
+erofs-objs := super.o inode.o data.o namei.o dir.o utils.o
+erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
+erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o
+
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
new file mode 100644
index 000000000000..07d279fd5d67
--- /dev/null
+++ b/fs/erofs/compress.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef __EROFS_FS_COMPRESS_H
+#define __EROFS_FS_COMPRESS_H
+
+#include "internal.h"
+
+enum {
+ Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX,
+ Z_EROFS_COMPRESSION_RUNTIME_MAX
+};
+
+struct z_erofs_decompress_req {
+ struct super_block *sb;
+ struct page **in, **out;
+
+ unsigned short pageofs_out;
+ unsigned int inputsize, outputsize;
+
+ /* indicate the algorithm will be used for decompression */
+ unsigned int alg;
+ bool inplace_io, partial_decoding;
+};
+
+/*
+ * - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) -
+ * used to mark temporary allocated pages from other
+ * file/cached pages and NULL mapping pages.
+ */
+#define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D)
+
+/* check if a page is marked as staging */
+static inline bool z_erofs_page_is_staging(struct page *page)
+{
+ return page->mapping == Z_EROFS_MAPPING_STAGING;
+}
+
+static inline bool z_erofs_put_stagingpage(struct list_head *pagepool,
+ struct page *page)
+{
+ if (!z_erofs_page_is_staging(page))
+ return false;
+
+ /* staging pages should not be used by others at the same time */
+ if (page_ref_count(page) > 1)
+ put_page(page);
+ else
+ list_add(&page->lru, pagepool);
+ return true;
+}
+
+int z_erofs_decompress(struct z_erofs_decompress_req *rq,
+ struct list_head *pagepool);
+
+#endif
+
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
new file mode 100644
index 000000000000..fc3a8d8064f8
--- /dev/null
+++ b/fs/erofs/data.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "internal.h"
+#include <linux/prefetch.h>
+
+#include <trace/events/erofs.h>
+
+static void erofs_readendio(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ blk_status_t err = bio->bi_status;
+ struct bvec_iter_all iter_all;
+
+ bio_for_each_segment_all(bvec, bio, iter_all) {
+ struct page *page = bvec->bv_page;
+
+ /* page is already locked */
+ DBG_BUGON(PageUptodate(page));
+
+ if (err)
+ SetPageError(page);
+ else
+ SetPageUptodate(page);
+
+ unlock_page(page);
+ /* page could be reclaimed now */
+ }
+ bio_put(bio);
+}
+
+struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
+{
+ struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
+ struct page *page;
+
+ page = read_cache_page_gfp(mapping, blkaddr,
+ mapping_gfp_constraint(mapping, ~__GFP_FS));
+ /* should already be PageUptodate */
+ if (!IS_ERR(page))
+ lock_page(page);
+ return page;
+}
+
+static int erofs_map_blocks_flatmode(struct inode *inode,
+ struct erofs_map_blocks *map,
+ int flags)
+{
+ int err = 0;
+ erofs_blk_t nblocks, lastblk;
+ u64 offset = map->m_la;
+ struct erofs_inode *vi = EROFS_I(inode);
+ bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
+
+ trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
+
+ nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
+ lastblk = nblocks - tailendpacking;
+
+ if (offset >= inode->i_size) {
+ /* leave out-of-bound access unmapped */
+ map->m_flags = 0;
+ map->m_plen = 0;
+ goto out;
+ }
+
+ /* there is no hole in flatmode */
+ map->m_flags = EROFS_MAP_MAPPED;
+
+ if (offset < blknr_to_addr(lastblk)) {
+ map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
+ map->m_plen = blknr_to_addr(lastblk) - offset;
+ } else if (tailendpacking) {
+ /* 2 - inode inline B: inode, [xattrs], inline last blk... */
+ struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
+
+ map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
+ vi->xattr_isize + erofs_blkoff(map->m_la);
+ map->m_plen = inode->i_size - offset;
+
+ /* inline data should be located in one meta block */
+ if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
+ erofs_err(inode->i_sb,
+ "inline data cross block boundary @ nid %llu",
+ vi->nid);
+ DBG_BUGON(1);
+ err = -EFSCORRUPTED;
+ goto err_out;
+ }
+
+ map->m_flags |= EROFS_MAP_META;
+ } else {
+ erofs_err(inode->i_sb,
+ "internal error @ nid: %llu (size %llu), m_la 0x%llx",
+ vi->nid, inode->i_size, map->m_la);
+ DBG_BUGON(1);
+ err = -EIO;
+ goto err_out;
+ }
+
+out:
+ map->m_llen = map->m_plen;
+
+err_out:
+ trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
+ return err;
+}
+
+int erofs_map_blocks(struct inode *inode,
+ struct erofs_map_blocks *map, int flags)
+{
+ if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
+ int err = z_erofs_map_blocks_iter(inode, map, flags);
+
+ if (map->mpage) {
+ put_page(map->mpage);
+ map->mpage = NULL;
+ }
+ return err;
+ }
+ return erofs_map_blocks_flatmode(inode, map, flags);
+}
+
+static inline struct bio *erofs_read_raw_page(struct bio *bio,
+ struct address_space *mapping,
+ struct page *page,
+ erofs_off_t *last_block,
+ unsigned int nblocks,
+ bool ra)
+{
+ struct inode *const inode = mapping->host;
+ struct super_block *const sb = inode->i_sb;
+ erofs_off_t current_block = (erofs_off_t)page->index;
+ int err;
+
+ DBG_BUGON(!nblocks);
+
+ if (PageUptodate(page)) {
+ err = 0;
+ goto has_updated;
+ }
+
+ /* note that for readpage case, bio also equals to NULL */
+ if (bio &&
+ /* not continuous */
+ *last_block + 1 != current_block) {
+submit_bio_retry:
+ submit_bio(bio);
+ bio = NULL;
+ }
+
+ if (!bio) {
+ struct erofs_map_blocks map = {
+ .m_la = blknr_to_addr(current_block),
+ };
+ erofs_blk_t blknr;
+ unsigned int blkoff;
+
+ err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
+ if (err)
+ goto err_out;
+
+ /* zero out the holed page */
+ if (!(map.m_flags & EROFS_MAP_MAPPED)) {
+ zero_user_segment(page, 0, PAGE_SIZE);
+ SetPageUptodate(page);
+
+ /* imply err = 0, see erofs_map_blocks */
+ goto has_updated;
+ }
+
+ /* for RAW access mode, m_plen must be equal to m_llen */
+ DBG_BUGON(map.m_plen != map.m_llen);
+
+ blknr = erofs_blknr(map.m_pa);
+ blkoff = erofs_blkoff(map.m_pa);
+
+ /* deal with inline page */
+ if (map.m_flags & EROFS_MAP_META) {
+ void *vsrc, *vto;
+ struct page *ipage;
+
+ DBG_BUGON(map.m_plen > PAGE_SIZE);
+
+ ipage = erofs_get_meta_page(inode->i_sb, blknr);
+
+ if (IS_ERR(ipage)) {
+ err = PTR_ERR(ipage);
+ goto err_out;
+ }
+
+ vsrc = kmap_atomic(ipage);
+ vto = kmap_atomic(page);
+ memcpy(vto, vsrc + blkoff, map.m_plen);
+ memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
+ kunmap_atomic(vto);
+ kunmap_atomic(vsrc);
+ flush_dcache_page(page);
+
+ SetPageUptodate(page);
+ /* TODO: could we unlock the page earlier? */
+ unlock_page(ipage);
+ put_page(ipage);
+
+ /* imply err = 0, see erofs_map_blocks */
+ goto has_updated;
+ }
+
+ /* pa must be block-aligned for raw reading */
+ DBG_BUGON(erofs_blkoff(map.m_pa));
+
+ /* max # of continuous pages */
+ if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
+ nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
+ if (nblocks > BIO_MAX_PAGES)
+ nblocks = BIO_MAX_PAGES;
+
+ bio = bio_alloc(GFP_NOIO, nblocks);
+
+ bio->bi_end_io = erofs_readendio;
+ bio_set_dev(bio, sb->s_bdev);
+ bio->bi_iter.bi_sector = (sector_t)blknr <<
+ LOG_SECTORS_PER_BLOCK;
+ bio->bi_opf = REQ_OP_READ;
+ }
+
+ err = bio_add_page(bio, page, PAGE_SIZE, 0);
+ /* out of the extent or bio is full */
+ if (err < PAGE_SIZE)
+ goto submit_bio_retry;
+
+ *last_block = current_block;
+
+ /* shift in advance in case of it followed by too many gaps */
+ if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
+ /* err should reassign to 0 after submitting */
+ err = 0;
+ goto submit_bio_out;
+ }
+
+ return bio;
+
+err_out:
+ /* for sync reading, set page error immediately */
+ if (!ra) {
+ SetPageError(page);
+ ClearPageUptodate(page);
+ }
+has_updated:
+ unlock_page(page);
+
+ /* if updated manually, continuous pages has a gap */
+ if (bio)
+submit_bio_out:
+ submit_bio(bio);
+ return err ? ERR_PTR(err) : NULL;
+}
+
+/*
+ * since we dont have write or truncate flows, so no inode
+ * locking needs to be held at the moment.
+ */
+static int erofs_raw_access_readpage(struct file *file, struct page *page)
+{
+ erofs_off_t last_block;
+ struct bio *bio;
+
+ trace_erofs_readpage(page, true);
+
+ bio = erofs_read_raw_page(NULL, page->mapping,
+ page, &last_block, 1, false);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
+ return 0;
+}
+
+static int erofs_raw_access_readpages(struct file *filp,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned int nr_pages)
+{
+ erofs_off_t last_block;
+ struct bio *bio = NULL;
+ gfp_t gfp = readahead_gfp_mask(mapping);
+ struct page *page = list_last_entry(pages, struct page, lru);
+
+ trace_erofs_readpages(mapping->host, page, nr_pages, true);
+
+ for (; nr_pages; --nr_pages) {
+ page = list_entry(pages->prev, struct page, lru);
+
+ prefetchw(&page->flags);
+ list_del(&page->lru);
+
+ if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
+ bio = erofs_read_raw_page(bio, mapping, page,
+ &last_block, nr_pages, true);
+
+ /* all the page errors are ignored when readahead */
+ if (IS_ERR(bio)) {
+ pr_err("%s, readahead error at page %lu of nid %llu\n",
+ __func__, page->index,
+ EROFS_I(mapping->host)->nid);
+
+ bio = NULL;
+ }
+ }
+
+ /* pages could still be locked */
+ put_page(page);
+ }
+ DBG_BUGON(!list_empty(pages));
+
+ /* the rare case (end in gaps) */
+ if (bio)
+ submit_bio(bio);
+ return 0;
+}
+
+static int erofs_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh, int create)
+{
+ struct erofs_map_blocks map = {
+ .m_la = iblock << 9,
+ };
+ int err;
+
+ err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
+ if (err)
+ return err;
+
+ if (map.m_flags & EROFS_MAP_MAPPED)
+ bh->b_blocknr = erofs_blknr(map.m_pa);
+
+ return err;
+}
+
+static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
+{
+ struct inode *inode = mapping->host;
+
+ if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
+ erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
+
+ if (block >> LOG_SECTORS_PER_BLOCK >= blks)
+ return 0;
+ }
+
+ return generic_block_bmap(mapping, block, erofs_get_block);
+}
+
+/* for uncompressed (aligned) files and raw access for other files */
+const struct address_space_operations erofs_raw_access_aops = {
+ .readpage = erofs_raw_access_readpage,
+ .readpages = erofs_raw_access_readpages,
+ .bmap = erofs_bmap,
+};
+
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
new file mode 100644
index 000000000000..2890a67a1ded
--- /dev/null
+++ b/fs/erofs/decompressor.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "compress.h"
+#include <linux/module.h>
+#include <linux/lz4.h>
+
+#ifndef LZ4_DISTANCE_MAX /* history window size */
+#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
+#endif
+
+#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
+#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
+#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
+#endif
+
+struct z_erofs_decompressor {
+ /*
+ * if destpages have sparsed pages, fill them with bounce pages.
+ * it also check whether destpages indicate continuous physical memory.
+ */
+ int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
+ struct list_head *pagepool);
+ int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
+ char *name;
+};
+
+static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
+ struct list_head *pagepool)
+{
+ const unsigned int nr =
+ PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
+ struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
+ unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
+ BITS_PER_LONG)] = { 0 };
+ void *kaddr = NULL;
+ unsigned int i, j, top;
+
+ top = 0;
+ for (i = j = 0; i < nr; ++i, ++j) {
+ struct page *const page = rq->out[i];
+ struct page *victim;
+
+ if (j >= LZ4_MAX_DISTANCE_PAGES)
+ j = 0;
+
+ /* 'valid' bounced can only be tested after a complete round */
+ if (test_bit(j, bounced)) {
+ DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES);
+ DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES);
+ availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES];
+ }
+
+ if (page) {
+ __clear_bit(j, bounced);
+ if (kaddr) {
+ if (kaddr + PAGE_SIZE == page_address(page))
+ kaddr += PAGE_SIZE;
+ else
+ kaddr = NULL;
+ } else if (!i) {
+ kaddr = page_address(page);
+ }
+ continue;
+ }
+ kaddr = NULL;
+ __set_bit(j, bounced);
+
+ if (top) {
+ victim = availables[--top];
+ get_page(victim);
+ } else {
+ victim = erofs_allocpage(pagepool, GFP_KERNEL);
+ if (!victim)
+ return -ENOMEM;
+ victim->mapping = Z_EROFS_MAPPING_STAGING;
+ }
+ rq->out[i] = victim;
+ }
+ return kaddr ? 1 : 0;
+}
+
+static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq,
+ u8 *src, unsigned int pageofs_in)
+{
+ /*
+ * if in-place decompression is ongoing, those decompressed
+ * pages should be copied in order to avoid being overlapped.
+ */
+ struct page **in = rq->in;
+ u8 *const tmp = erofs_get_pcpubuf(0);
+ u8 *tmpp = tmp;
+ unsigned int inlen = rq->inputsize - pageofs_in;
+ unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in);
+
+ while (tmpp < tmp + inlen) {
+ if (!src)
+ src = kmap_atomic(*in);
+ memcpy(tmpp, src + pageofs_in, count);
+ kunmap_atomic(src);
+ src = NULL;
+ tmpp += count;
+ pageofs_in = 0;
+ count = PAGE_SIZE;
+ ++in;
+ }
+ return tmp;
+}
+
+static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
+{
+ unsigned int inputmargin, inlen;
+ u8 *src;
+ bool copied, support_0padding;
+ int ret;
+
+ if (rq->inputsize > PAGE_SIZE)
+ return -EOPNOTSUPP;
+
+ src = kmap_atomic(*rq->in);
+ inputmargin = 0;
+ support_0padding = false;
+
+ /* decompression inplace is only safe when 0padding is enabled */
+ if (EROFS_SB(rq->sb)->feature_incompat &
+ EROFS_FEATURE_INCOMPAT_LZ4_0PADDING) {
+ support_0padding = true;
+
+ while (!src[inputmargin & ~PAGE_MASK])
+ if (!(++inputmargin & ~PAGE_MASK))
+ break;
+
+ if (inputmargin >= rq->inputsize) {
+ kunmap_atomic(src);
+ return -EIO;
+ }
+ }
+
+ copied = false;
+ inlen = rq->inputsize - inputmargin;
+ if (rq->inplace_io) {
+ const uint oend = (rq->pageofs_out +
+ rq->outputsize) & ~PAGE_MASK;
+ const uint nr = PAGE_ALIGN(rq->pageofs_out +
+ rq->outputsize) >> PAGE_SHIFT;
+
+ if (rq->partial_decoding || !support_0padding ||
+ rq->out[nr - 1] != rq->in[0] ||
+ rq->inputsize - oend <
+ LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) {
+ src = generic_copy_inplace_data(rq, src, inputmargin);
+ inputmargin = 0;
+ copied = true;
+ }
+ }
+
+ ret = LZ4_decompress_safe_partial(src + inputmargin, out,
+ inlen, rq->outputsize,
+ rq->outputsize);
+ if (ret < 0) {
+ erofs_err(rq->sb, "failed to decompress, in[%u, %u] out[%u]",
+ inlen, inputmargin, rq->outputsize);
+ WARN_ON(1);
+ print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
+ 16, 1, src + inputmargin, inlen, true);
+ print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
+ 16, 1, out, rq->outputsize, true);
+ ret = -EIO;
+ }
+
+ if (copied)
+ erofs_put_pcpubuf(src);
+ else
+ kunmap_atomic(src);
+ return ret;
+}
+
+static struct z_erofs_decompressor decompressors[] = {
+ [Z_EROFS_COMPRESSION_SHIFTED] = {
+ .name = "shifted"
+ },
+ [Z_EROFS_COMPRESSION_LZ4] = {
+ .prepare_destpages = z_erofs_lz4_prepare_destpages,
+ .decompress = z_erofs_lz4_decompress,
+ .name = "lz4"
+ },
+};
+
+static void copy_from_pcpubuf(struct page **out, const char *dst,
+ unsigned short pageofs_out,
+ unsigned int outputsize)
+{
+ const char *end = dst + outputsize;
+ const unsigned int righthalf = PAGE_SIZE - pageofs_out;
+ const char *cur = dst - pageofs_out;
+
+ while (cur < end) {
+ struct page *const page = *out++;
+
+ if (page) {
+ char *buf = kmap_atomic(page);
+
+ if (cur >= dst) {
+ memcpy(buf, cur, min_t(uint, PAGE_SIZE,
+ end - cur));
+ } else {
+ memcpy(buf + pageofs_out, cur + pageofs_out,
+ min_t(uint, righthalf, end - cur));
+ }
+ kunmap_atomic(buf);
+ }
+ cur += PAGE_SIZE;
+ }
+}
+
+static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
+ struct list_head *pagepool)
+{
+ const unsigned int nrpages_out =
+ PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
+ const struct z_erofs_decompressor *alg = decompressors + rq->alg;
+ unsigned int dst_maptype;
+ void *dst;
+ int ret, i;
+
+ if (nrpages_out == 1 && !rq->inplace_io) {
+ DBG_BUGON(!*rq->out);
+ dst = kmap_atomic(*rq->out);
+ dst_maptype = 0;
+ goto dstmap_out;
+ }
+
+ /*
+ * For the case of small output size (especially much less
+ * than PAGE_SIZE), memcpy the decompressed data rather than
+ * compressed data is preferred.
+ */
+ if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
+ dst = erofs_get_pcpubuf(0);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ rq->inplace_io = false;
+ ret = alg->decompress(rq, dst);
+ if (!ret)
+ copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
+ rq->outputsize);
+
+ erofs_put_pcpubuf(dst);
+ return ret;
+ }
+
+ ret = alg->prepare_destpages(rq, pagepool);
+ if (ret < 0) {
+ return ret;
+ } else if (ret) {
+ dst = page_address(*rq->out);
+ dst_maptype = 1;
+ goto dstmap_out;
+ }
+
+ i = 0;
+ while (1) {
+ dst = vm_map_ram(rq->out, nrpages_out, -1, PAGE_KERNEL);
+
+ /* retry two more times (totally 3 times) */
+ if (dst || ++i >= 3)
+ break;
+ vm_unmap_aliases();
+ }
+
+ if (!dst)
+ return -ENOMEM;
+
+ dst_maptype = 2;
+
+dstmap_out:
+ ret = alg->decompress(rq, dst + rq->pageofs_out);
+
+ if (!dst_maptype)
+ kunmap_atomic(dst);
+ else if (dst_maptype == 2)
+ vm_unmap_ram(dst, nrpages_out);
+ return ret;
+}
+
+static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
+ struct list_head *pagepool)
+{
+ const unsigned int nrpages_out =
+ PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
+ const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
+ unsigned char *src, *dst;
+
+ if (nrpages_out > 2) {
+ DBG_BUGON(1);
+ return -EIO;
+ }
+
+ if (rq->out[0] == *rq->in) {
+ DBG_BUGON(nrpages_out != 1);
+ return 0;
+ }
+
+ src = kmap_atomic(*rq->in);
+ if (!rq->out[0]) {
+ dst = NULL;
+ } else {
+ dst = kmap_atomic(rq->out[0]);
+ memcpy(dst + rq->pageofs_out, src, righthalf);
+ }
+
+ if (rq->out[1] == *rq->in) {
+ memmove(src, src + righthalf, rq->pageofs_out);
+ } else if (nrpages_out == 2) {
+ if (dst)
+ kunmap_atomic(dst);
+ DBG_BUGON(!rq->out[1]);
+ dst = kmap_atomic(rq->out[1]);
+ memcpy(dst, src + righthalf, rq->pageofs_out);
+ }
+ if (dst)
+ kunmap_atomic(dst);
+ kunmap_atomic(src);
+ return 0;
+}
+
+int z_erofs_decompress(struct z_erofs_decompress_req *rq,
+ struct list_head *pagepool)
+{
+ if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
+ return z_erofs_shifted_transform(rq, pagepool);
+ return z_erofs_decompress_generic(rq, pagepool);
+}
+
diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c
new file mode 100644
index 000000000000..d28c623dfef9
--- /dev/null
+++ b/fs/erofs/dir.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "internal.h"
+
+static void debug_one_dentry(unsigned char d_type, const char *de_name,
+ unsigned int de_namelen)
+{
+#ifdef CONFIG_EROFS_FS_DEBUG
+ /* since the on-disk name could not have the trailing '\0' */
+ unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
+
+ memcpy(dbg_namebuf, de_name, de_namelen);
+ dbg_namebuf[de_namelen] = '\0';
+
+ erofs_dbg("found dirent %s de_len %u d_type %d", dbg_namebuf,
+ de_namelen, d_type);
+#endif
+}
+
+static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx,
+ void *dentry_blk, unsigned int *ofs,
+ unsigned int nameoff, unsigned int maxsize)
+{
+ struct erofs_dirent *de = dentry_blk + *ofs;
+ const struct erofs_dirent *end = dentry_blk + nameoff;
+
+ while (de < end) {
+ const char *de_name;
+ unsigned int de_namelen;
+ unsigned char d_type;
+
+ d_type = fs_ftype_to_dtype(de->file_type);
+
+ nameoff = le16_to_cpu(de->nameoff);
+ de_name = (char *)dentry_blk + nameoff;
+
+ /* the last dirent in the block? */
+ if (de + 1 >= end)
+ de_namelen = strnlen(de_name, maxsize - nameoff);
+ else
+ de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
+
+ /* a corrupted entry is found */
+ if (nameoff + de_namelen > maxsize ||
+ de_namelen > EROFS_NAME_LEN) {
+ erofs_err(dir->i_sb, "bogus dirent @ nid %llu",
+ EROFS_I(dir)->nid);
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+
+ debug_one_dentry(d_type, de_name, de_namelen);
+ if (!dir_emit(ctx, de_name, de_namelen,
+ le64_to_cpu(de->nid), d_type))
+ /* stopped by some reason */
+ return 1;
+ ++de;
+ *ofs += sizeof(struct erofs_dirent);
+ }
+ *ofs = maxsize;
+ return 0;
+}
+
+static int erofs_readdir(struct file *f, struct dir_context *ctx)
+{
+ struct inode *dir = file_inode(f);
+ struct address_space *mapping = dir->i_mapping;
+ const size_t dirsize = i_size_read(dir);
+ unsigned int i = ctx->pos / EROFS_BLKSIZ;
+ unsigned int ofs = ctx->pos % EROFS_BLKSIZ;
+ int err = 0;
+ bool initial = true;
+
+ while (ctx->pos < dirsize) {
+ struct page *dentry_page;
+ struct erofs_dirent *de;
+ unsigned int nameoff, maxsize;
+
+ dentry_page = read_mapping_page(mapping, i, NULL);
+ if (dentry_page == ERR_PTR(-ENOMEM)) {
+ err = -ENOMEM;
+ break;
+ } else if (IS_ERR(dentry_page)) {
+ erofs_err(dir->i_sb,
+ "fail to readdir of logical block %u of nid %llu",
+ i, EROFS_I(dir)->nid);
+ err = -EFSCORRUPTED;
+ break;
+ }
+
+ de = (struct erofs_dirent *)kmap(dentry_page);
+
+ nameoff = le16_to_cpu(de->nameoff);
+
+ if (nameoff < sizeof(struct erofs_dirent) ||
+ nameoff >= PAGE_SIZE) {
+ erofs_err(dir->i_sb,
+ "invalid de[0].nameoff %u @ nid %llu",
+ nameoff, EROFS_I(dir)->nid);
+ err = -EFSCORRUPTED;
+ goto skip_this;
+ }
+
+ maxsize = min_t(unsigned int,
+ dirsize - ctx->pos + ofs, PAGE_SIZE);
+
+ /* search dirents at the arbitrary position */
+ if (initial) {
+ initial = false;
+
+ ofs = roundup(ofs, sizeof(struct erofs_dirent));
+ if (ofs >= nameoff)
+ goto skip_this;
+ }
+
+ err = erofs_fill_dentries(dir, ctx, de, &ofs,
+ nameoff, maxsize);
+skip_this:
+ kunmap(dentry_page);
+
+ put_page(dentry_page);
+
+ ctx->pos = blknr_to_addr(i) + ofs;
+
+ if (err)
+ break;
+ ++i;
+ ofs = 0;
+ }
+ return err < 0 ? err : 0;
+}
+
+const struct file_operations erofs_dir_fops = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = erofs_readdir,
+};
+
diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
new file mode 100644
index 000000000000..385fa49c7749
--- /dev/null
+++ b/fs/erofs/erofs_fs.h
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0 */
+/*
+ * EROFS (Enhanced ROM File System) on-disk format definition
+ *
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef __EROFS_FS_H
+#define __EROFS_FS_H
+
+#define EROFS_SUPER_OFFSET 1024
+
+#define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001
+
+/*
+ * Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should
+ * be incompatible with this kernel version.
+ */
+#define EROFS_FEATURE_INCOMPAT_LZ4_0PADDING 0x00000001
+#define EROFS_ALL_FEATURE_INCOMPAT EROFS_FEATURE_INCOMPAT_LZ4_0PADDING
+
+/* 128-byte erofs on-disk super block */
+struct erofs_super_block {
+ __le32 magic; /* file system magic number */
+ __le32 checksum; /* crc32c(super_block) */
+ __le32 feature_compat;
+ __u8 blkszbits; /* support block_size == PAGE_SIZE only */
+ __u8 reserved;
+
+ __le16 root_nid; /* nid of root directory */
+ __le64 inos; /* total valid ino # (== f_files - f_favail) */
+
+ __le64 build_time; /* inode v1 time derivation */
+ __le32 build_time_nsec; /* inode v1 time derivation in nano scale */
+ __le32 blocks; /* used for statfs */
+ __le32 meta_blkaddr; /* start block address of metadata area */
+ __le32 xattr_blkaddr; /* start block address of shared xattr area */
+ __u8 uuid[16]; /* 128-bit uuid for volume */
+ __u8 volume_name[16]; /* volume name */
+ __le32 feature_incompat;
+ __u8 reserved2[44];
+};
+
+/*
+ * erofs inode datalayout (i_format in on-disk inode):
+ * 0 - inode plain without inline data A:
+ * inode, [xattrs], ... | ... | no-holed data
+ * 1 - inode VLE compression B (legacy):
+ * inode, [xattrs], extents ... | ...
+ * 2 - inode plain with inline data C:
+ * inode, [xattrs], last_inline_data, ... | ... | no-holed data
+ * 3 - inode compression D:
+ * inode, [xattrs], map_header, extents ... | ...
+ * 4~7 - reserved
+ */
+enum {
+ EROFS_INODE_FLAT_PLAIN = 0,
+ EROFS_INODE_FLAT_COMPRESSION_LEGACY = 1,
+ EROFS_INODE_FLAT_INLINE = 2,
+ EROFS_INODE_FLAT_COMPRESSION = 3,
+ EROFS_INODE_DATALAYOUT_MAX
+};
+
+static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
+{
+ return datamode == EROFS_INODE_FLAT_COMPRESSION ||
+ datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY;
+}
+
+/* bit definitions of inode i_advise */
+#define EROFS_I_VERSION_BITS 1
+#define EROFS_I_DATALAYOUT_BITS 3
+
+#define EROFS_I_VERSION_BIT 0
+#define EROFS_I_DATALAYOUT_BIT 1
+
+/* 32-byte reduced form of an ondisk inode */
+struct erofs_inode_compact {
+ __le16 i_format; /* inode format hints */
+
+/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
+ __le16 i_xattr_icount;
+ __le16 i_mode;
+ __le16 i_nlink;
+ __le32 i_size;
+ __le32 i_reserved;
+ union {
+ /* file total compressed blocks for data mapping 1 */
+ __le32 compressed_blocks;
+ __le32 raw_blkaddr;
+
+ /* for device files, used to indicate old/new device # */
+ __le32 rdev;
+ } i_u;
+ __le32 i_ino; /* only used for 32-bit stat compatibility */
+ __le16 i_uid;
+ __le16 i_gid;
+ __le32 i_reserved2;
+};
+
+/* 32 bytes on-disk inode */
+#define EROFS_INODE_LAYOUT_COMPACT 0
+/* 64 bytes on-disk inode */
+#define EROFS_INODE_LAYOUT_EXTENDED 1
+
+/* 64-byte complete form of an ondisk inode */
+struct erofs_inode_extended {
+ __le16 i_format; /* inode format hints */
+
+/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
+ __le16 i_xattr_icount;
+ __le16 i_mode;
+ __le16 i_reserved;
+ __le64 i_size;
+ union {
+ /* file total compressed blocks for data mapping 1 */
+ __le32 compressed_blocks;
+ __le32 raw_blkaddr;
+
+ /* for device files, used to indicate old/new device # */
+ __le32 rdev;
+ } i_u;
+
+ /* only used for 32-bit stat compatibility */
+ __le32 i_ino;
+
+ __le32 i_uid;
+ __le32 i_gid;
+ __le64 i_ctime;
+ __le32 i_ctime_nsec;
+ __le32 i_nlink;
+ __u8 i_reserved2[16];
+};
+
+#define EROFS_MAX_SHARED_XATTRS (128)
+/* h_shared_count between 129 ... 255 are special # */
+#define EROFS_SHARED_XATTR_EXTENT (255)
+
+/*
+ * inline xattrs (n == i_xattr_icount):
+ * erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes
+ * 12 bytes / \
+ * / \
+ * /-----------------------\
+ * | erofs_xattr_entries+ |
+ * +-----------------------+
+ * inline xattrs must starts in erofs_xattr_ibody_header,
+ * for read-only fs, no need to introduce h_refcount
+ */
+struct erofs_xattr_ibody_header {
+ __le32 h_reserved;
+ __u8 h_shared_count;
+ __u8 h_reserved2[7];
+ __le32 h_shared_xattrs[0]; /* shared xattr id array */
+};
+
+/* Name indexes */
+#define EROFS_XATTR_INDEX_USER 1
+#define EROFS_XATTR_INDEX_POSIX_ACL_ACCESS 2
+#define EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT 3
+#define EROFS_XATTR_INDEX_TRUSTED 4
+#define EROFS_XATTR_INDEX_LUSTRE 5
+#define EROFS_XATTR_INDEX_SECURITY 6
+
+/* xattr entry (for both inline & shared xattrs) */
+struct erofs_xattr_entry {
+ __u8 e_name_len; /* length of name */
+ __u8 e_name_index; /* attribute name index */
+ __le16 e_value_size; /* size of attribute value */
+ /* followed by e_name and e_value */
+ char e_name[0]; /* attribute name */
+};
+
+static inline unsigned int erofs_xattr_ibody_size(__le16 i_xattr_icount)
+{
+ if (!i_xattr_icount)
+ return 0;
+
+ return sizeof(struct erofs_xattr_ibody_header) +
+ sizeof(__u32) * (le16_to_cpu(i_xattr_icount) - 1);
+}
+
+#define EROFS_XATTR_ALIGN(size) round_up(size, sizeof(struct erofs_xattr_entry))
+
+static inline unsigned int erofs_xattr_entry_size(struct erofs_xattr_entry *e)
+{
+ return EROFS_XATTR_ALIGN(sizeof(struct erofs_xattr_entry) +
+ e->e_name_len + le16_to_cpu(e->e_value_size));
+}
+
+/* available compression algorithm types (for h_algorithmtype) */
+enum {
+ Z_EROFS_COMPRESSION_LZ4 = 0,
+ Z_EROFS_COMPRESSION_MAX
+};
+
+/*
+ * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on)
+ * e.g. for 4k logical cluster size, 4B if compacted 2B is off;
+ * (4B) + 2B + (4B) if compacted 2B is on.
+ */
+#define Z_EROFS_ADVISE_COMPACTED_2B_BIT 0
+
+#define Z_EROFS_ADVISE_COMPACTED_2B (1 << Z_EROFS_ADVISE_COMPACTED_2B_BIT)
+
+struct z_erofs_map_header {
+ __le32 h_reserved1;
+ __le16 h_advise;
+ /*
+ * bit 0-3 : algorithm type of head 1 (logical cluster type 01);
+ * bit 4-7 : algorithm type of head 2 (logical cluster type 11).
+ */
+ __u8 h_algorithmtype;
+ /*
+ * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096;
+ * bit 3-4 : (physical - logical) cluster bits of head 1:
+ * For example, if logical clustersize = 4096, 1 for 8192.
+ * bit 5-7 : (physical - logical) cluster bits of head 2.
+ */
+ __u8 h_clusterbits;
+};
+
+#define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8
+
+/*
+ * Fixed-sized output compression ondisk Logical Extent cluster type:
+ * 0 - literal (uncompressed) cluster
+ * 1 - compressed cluster (for the head logical cluster)
+ * 2 - compressed cluster (for the other logical clusters)
+ *
+ * In detail,
+ * 0 - literal (uncompressed) cluster,
+ * di_advise = 0
+ * di_clusterofs = the literal data offset of the cluster
+ * di_blkaddr = the blkaddr of the literal cluster
+ *
+ * 1 - compressed cluster (for the head logical cluster)
+ * di_advise = 1
+ * di_clusterofs = the decompressed data offset of the cluster
+ * di_blkaddr = the blkaddr of the compressed cluster
+ *
+ * 2 - compressed cluster (for the other logical clusters)
+ * di_advise = 2
+ * di_clusterofs =
+ * the decompressed data offset in its own head cluster
+ * di_u.delta[0] = distance to its corresponding head cluster
+ * di_u.delta[1] = distance to its corresponding tail cluster
+ * (di_advise could be 0, 1 or 2)
+ */
+enum {
+ Z_EROFS_VLE_CLUSTER_TYPE_PLAIN = 0,
+ Z_EROFS_VLE_CLUSTER_TYPE_HEAD = 1,
+ Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD = 2,
+ Z_EROFS_VLE_CLUSTER_TYPE_RESERVED = 3,
+ Z_EROFS_VLE_CLUSTER_TYPE_MAX
+};
+
+#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2
+#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0
+
+struct z_erofs_vle_decompressed_index {
+ __le16 di_advise;
+ /* where to decompress in the head cluster */
+ __le16 di_clusterofs;
+
+ union {
+ /* for the head cluster */
+ __le32 blkaddr;
+ /*
+ * for the rest clusters
+ * eg. for 4k page-sized cluster, maximum 4K*64k = 256M)
+ * [0] - pointing to the head cluster
+ * [1] - pointing to the tail cluster
+ */
+ __le16 delta[2];
+ } di_u;
+};
+
+#define Z_EROFS_VLE_LEGACY_INDEX_ALIGN(size) \
+ (round_up(size, sizeof(struct z_erofs_vle_decompressed_index)) + \
+ sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING)
+
+/* dirent sorts in alphabet order, thus we can do binary search */
+struct erofs_dirent {
+ __le64 nid; /* node number */
+ __le16 nameoff; /* start offset of file name */
+ __u8 file_type; /* file type */
+ __u8 reserved; /* reserved */
+} __packed;
+
+/*
+ * EROFS file types should match generic FT_* types and
+ * it seems no need to add BUILD_BUG_ONs since potential
+ * unmatchness will break other fses as well...
+ */
+
+#define EROFS_NAME_LEN 255
+
+/* check the EROFS on-disk layout strictly at compile time */
+static inline void erofs_check_ondisk_layout_definitions(void)
+{
+ BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128);
+ BUILD_BUG_ON(sizeof(struct erofs_inode_compact) != 32);
+ BUILD_BUG_ON(sizeof(struct erofs_inode_extended) != 64);
+ BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12);
+ BUILD_BUG_ON(sizeof(struct erofs_xattr_entry) != 4);
+ BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8);
+ BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8);
+ BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12);
+
+ BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) <
+ Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1);
+}
+
+#endif
+
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
new file mode 100644
index 000000000000..3350ab65d892
--- /dev/null
+++ b/fs/erofs/inode.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "xattr.h"
+
+#include <trace/events/erofs.h>
+
+/* no locking */
+static int erofs_read_inode(struct inode *inode, void *data)
+{
+ struct erofs_inode *vi = EROFS_I(inode);
+ struct erofs_inode_compact *dic = data;
+ struct erofs_inode_extended *die;
+
+ const unsigned int ifmt = le16_to_cpu(dic->i_format);
+ struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
+ erofs_blk_t nblks = 0;
+
+ vi->datalayout = erofs_inode_datalayout(ifmt);
+
+ if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
+ erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
+ vi->datalayout, vi->nid);
+ DBG_BUGON(1);
+ return -EOPNOTSUPP;
+ }
+
+ switch (erofs_inode_version(ifmt)) {
+ case EROFS_INODE_LAYOUT_EXTENDED:
+ die = data;
+
+ vi->inode_isize = sizeof(struct erofs_inode_extended);
+ vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
+
+ inode->i_mode = le16_to_cpu(die->i_mode);
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFREG:
+ case S_IFDIR:
+ case S_IFLNK:
+ vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+ inode->i_rdev =
+ new_decode_dev(le32_to_cpu(die->i_u.rdev));
+ break;
+ case S_IFIFO:
+ case S_IFSOCK:
+ inode->i_rdev = 0;
+ break;
+ default:
+ goto bogusimode;
+ }
+ i_uid_write(inode, le32_to_cpu(die->i_uid));
+ i_gid_write(inode, le32_to_cpu(die->i_gid));
+ set_nlink(inode, le32_to_cpu(die->i_nlink));
+
+ /* ns timestamp */
+ inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
+ le64_to_cpu(die->i_ctime);
+ inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
+ le32_to_cpu(die->i_ctime_nsec);
+
+ inode->i_size = le64_to_cpu(die->i_size);
+
+ /* total blocks for compressed files */
+ if (erofs_inode_is_data_compressed(vi->datalayout))
+ nblks = le32_to_cpu(die->i_u.compressed_blocks);
+ break;
+ case EROFS_INODE_LAYOUT_COMPACT:
+ vi->inode_isize = sizeof(struct erofs_inode_compact);
+ vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
+
+ inode->i_mode = le16_to_cpu(dic->i_mode);
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFREG:
+ case S_IFDIR:
+ case S_IFLNK:
+ vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+ inode->i_rdev =
+ new_decode_dev(le32_to_cpu(dic->i_u.rdev));
+ break;
+ case S_IFIFO:
+ case S_IFSOCK:
+ inode->i_rdev = 0;
+ break;
+ default:
+ goto bogusimode;
+ }
+ i_uid_write(inode, le16_to_cpu(dic->i_uid));
+ i_gid_write(inode, le16_to_cpu(dic->i_gid));
+ set_nlink(inode, le16_to_cpu(dic->i_nlink));
+
+ /* use build time to derive all file time */
+ inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
+ sbi->build_time;
+ inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
+ sbi->build_time_nsec;
+
+ inode->i_size = le32_to_cpu(dic->i_size);
+ if (erofs_inode_is_data_compressed(vi->datalayout))
+ nblks = le32_to_cpu(dic->i_u.compressed_blocks);
+ break;
+ default:
+ erofs_err(inode->i_sb,
+ "unsupported on-disk inode version %u of nid %llu",
+ erofs_inode_version(ifmt), vi->nid);
+ DBG_BUGON(1);
+ return -EOPNOTSUPP;
+ }
+
+ if (!nblks)
+ /* measure inode.i_blocks as generic filesystems */
+ inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
+ else
+ inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
+ return 0;
+
+bogusimode:
+ erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
+ inode->i_mode, vi->nid);
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+}
+
+static int erofs_fill_symlink(struct inode *inode, void *data,
+ unsigned int m_pofs)
+{
+ struct erofs_inode *vi = EROFS_I(inode);
+ char *lnk;
+
+ /* if it cannot be handled with fast symlink scheme */
+ if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
+ inode->i_size >= PAGE_SIZE) {
+ inode->i_op = &erofs_symlink_iops;
+ return 0;
+ }
+
+ lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
+ if (!lnk)
+ return -ENOMEM;
+
+ m_pofs += vi->inode_isize + vi->xattr_isize;
+ /* inline symlink data shouldn't cross page boundary as well */
+ if (m_pofs + inode->i_size > PAGE_SIZE) {
+ kfree(lnk);
+ erofs_err(inode->i_sb,
+ "inline data cross block boundary @ nid %llu",
+ vi->nid);
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+
+ memcpy(lnk, data + m_pofs, inode->i_size);
+ lnk[inode->i_size] = '\0';
+
+ inode->i_link = lnk;
+ inode->i_op = &erofs_fast_symlink_iops;
+ return 0;
+}
+
+static int erofs_fill_inode(struct inode *inode, int isdir)
+{
+ struct super_block *sb = inode->i_sb;
+ struct erofs_inode *vi = EROFS_I(inode);
+ struct page *page;
+ void *data;
+ int err;
+ erofs_blk_t blkaddr;
+ unsigned int ofs;
+ erofs_off_t inode_loc;
+
+ trace_erofs_fill_inode(inode, isdir);
+ inode_loc = iloc(EROFS_SB(sb), vi->nid);
+ blkaddr = erofs_blknr(inode_loc);
+ ofs = erofs_blkoff(inode_loc);
+
+ erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
+ __func__, vi->nid, ofs, blkaddr);
+
+ page = erofs_get_meta_page(sb, blkaddr);
+
+ if (IS_ERR(page)) {
+ erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
+ vi->nid, PTR_ERR(page));
+ return PTR_ERR(page);
+ }
+
+ DBG_BUGON(!PageUptodate(page));
+ data = page_address(page);
+
+ err = erofs_read_inode(inode, data + ofs);
+ if (err)
+ goto out_unlock;
+
+ /* setup the new inode */
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFREG:
+ inode->i_op = &erofs_generic_iops;
+ inode->i_fop = &generic_ro_fops;
+ break;
+ case S_IFDIR:
+ inode->i_op = &erofs_dir_iops;
+ inode->i_fop = &erofs_dir_fops;
+ break;
+ case S_IFLNK:
+ err = erofs_fill_symlink(inode, data, ofs);
+ if (err)
+ goto out_unlock;
+ inode_nohighmem(inode);
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFIFO:
+ case S_IFSOCK:
+ inode->i_op = &erofs_generic_iops;
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ goto out_unlock;
+ default:
+ err = -EFSCORRUPTED;
+ goto out_unlock;
+ }
+
+ if (erofs_inode_is_data_compressed(vi->datalayout)) {
+ err = z_erofs_fill_inode(inode);
+ goto out_unlock;
+ }
+ inode->i_mapping->a_ops = &erofs_raw_access_aops;
+
+out_unlock:
+ unlock_page(page);
+ put_page(page);
+ return err;
+}
+
+/*
+ * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
+ * we should do more for 32-bit platform to find the right inode.
+ */
+static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
+{
+ const erofs_nid_t nid = *(erofs_nid_t *)opaque;
+
+ return EROFS_I(inode)->nid == nid;
+}
+
+static int erofs_iget_set_actor(struct inode *inode, void *opaque)
+{
+ const erofs_nid_t nid = *(erofs_nid_t *)opaque;
+
+ inode->i_ino = erofs_inode_hash(nid);
+ return 0;
+}
+
+static inline struct inode *erofs_iget_locked(struct super_block *sb,
+ erofs_nid_t nid)
+{
+ const unsigned long hashval = erofs_inode_hash(nid);
+
+ return iget5_locked(sb, hashval, erofs_ilookup_test_actor,
+ erofs_iget_set_actor, &nid);
+}
+
+struct inode *erofs_iget(struct super_block *sb,
+ erofs_nid_t nid,
+ bool isdir)
+{
+ struct inode *inode = erofs_iget_locked(sb, nid);
+
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ if (inode->i_state & I_NEW) {
+ int err;
+ struct erofs_inode *vi = EROFS_I(inode);
+
+ vi->nid = nid;
+
+ err = erofs_fill_inode(inode, isdir);
+ if (!err)
+ unlock_new_inode(inode);
+ else {
+ iget_failed(inode);
+ inode = ERR_PTR(err);
+ }
+ }
+ return inode;
+}
+
+int erofs_getattr(const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int query_flags)
+{
+ struct inode *const inode = d_inode(path->dentry);
+
+ if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
+ stat->attributes |= STATX_ATTR_COMPRESSED;
+
+ stat->attributes |= STATX_ATTR_IMMUTABLE;
+ stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
+ STATX_ATTR_IMMUTABLE);
+
+ generic_fillattr(inode, stat);
+ return 0;
+}
+
+const struct inode_operations erofs_generic_iops = {
+ .getattr = erofs_getattr,
+#ifdef CONFIG_EROFS_FS_XATTR
+ .listxattr = erofs_listxattr,
+#endif
+ .get_acl = erofs_get_acl,
+};
+
+const struct inode_operations erofs_symlink_iops = {
+ .get_link = page_get_link,
+ .getattr = erofs_getattr,
+#ifdef CONFIG_EROFS_FS_XATTR
+ .listxattr = erofs_listxattr,
+#endif
+ .get_acl = erofs_get_acl,
+};
+
+const struct inode_operations erofs_fast_symlink_iops = {
+ .get_link = simple_get_link,
+ .getattr = erofs_getattr,
+#ifdef CONFIG_EROFS_FS_XATTR
+ .listxattr = erofs_listxattr,
+#endif
+ .get_acl = erofs_get_acl,
+};
+
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
new file mode 100644
index 000000000000..1ed5beff7d11
--- /dev/null
+++ b/fs/erofs/internal.h
@@ -0,0 +1,430 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef __EROFS_INTERNAL_H
+#define __EROFS_INTERNAL_H
+
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/bio.h>
+#include <linux/buffer_head.h>
+#include <linux/magic.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "erofs_fs.h"
+
+/* redefine pr_fmt "erofs: " */
+#undef pr_fmt
+#define pr_fmt(fmt) "erofs: " fmt
+
+__printf(3, 4) void _erofs_err(struct super_block *sb,
+ const char *function, const char *fmt, ...);
+#define erofs_err(sb, fmt, ...) \
+ _erofs_err(sb, __func__, fmt "\n", ##__VA_ARGS__)
+__printf(3, 4) void _erofs_info(struct super_block *sb,
+ const char *function, const char *fmt, ...);
+#define erofs_info(sb, fmt, ...) \
+ _erofs_info(sb, __func__, fmt "\n", ##__VA_ARGS__)
+#ifdef CONFIG_EROFS_FS_DEBUG
+#define erofs_dbg(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
+#define DBG_BUGON BUG_ON
+#else
+#define erofs_dbg(x, ...) ((void)0)
+#define DBG_BUGON(x) ((void)(x))
+#endif /* !CONFIG_EROFS_FS_DEBUG */
+
+/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
+#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
+
+typedef u64 erofs_nid_t;
+typedef u64 erofs_off_t;
+/* data type for filesystem-wide blocks number */
+typedef u32 erofs_blk_t;
+
+struct erofs_sb_info {
+#ifdef CONFIG_EROFS_FS_ZIP
+ /* list for all registered superblocks, mainly for shrinker */
+ struct list_head list;
+ struct mutex umount_mutex;
+
+ /* the dedicated workstation for compression */
+ struct radix_tree_root workstn_tree;
+
+ /* threshold for decompression synchronously */
+ unsigned int max_sync_decompress_pages;
+
+ unsigned int shrinker_run_no;
+
+ /* current strategy of how to use managed cache */
+ unsigned char cache_strategy;
+
+ /* pseudo inode to manage cached pages */
+ struct inode *managed_cache;
+#endif /* CONFIG_EROFS_FS_ZIP */
+ u32 blocks;
+ u32 meta_blkaddr;
+#ifdef CONFIG_EROFS_FS_XATTR
+ u32 xattr_blkaddr;
+#endif
+
+ /* inode slot unit size in bit shift */
+ unsigned char islotbits;
+
+ u32 build_time_nsec;
+ u64 build_time;
+
+ /* what we really care is nid, rather than ino.. */
+ erofs_nid_t root_nid;
+ /* used for statfs, f_files - f_favail */
+ u64 inos;
+
+ u8 uuid[16]; /* 128-bit uuid for volume */
+ u8 volume_name[16]; /* volume name */
+ u32 feature_compat;
+ u32 feature_incompat;
+
+ unsigned int mount_opt;
+};
+
+#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
+#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
+
+/* Mount flags set via mount options or defaults */
+#define EROFS_MOUNT_XATTR_USER 0x00000010
+#define EROFS_MOUNT_POSIX_ACL 0x00000020
+
+#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option)
+#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option)
+#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
+
+#ifdef CONFIG_EROFS_FS_ZIP
+enum {
+ EROFS_ZIP_CACHE_DISABLED,
+ EROFS_ZIP_CACHE_READAHEAD,
+ EROFS_ZIP_CACHE_READAROUND
+};
+
+#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
+
+/* basic unit of the workstation of a super_block */
+struct erofs_workgroup {
+ /* the workgroup index in the workstation */
+ pgoff_t index;
+
+ /* overall workgroup reference count */
+ atomic_t refcount;
+};
+
+#if defined(CONFIG_SMP)
+static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
+ int val)
+{
+ preempt_disable();
+ if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
+ preempt_enable();
+ return false;
+ }
+ return true;
+}
+
+static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
+ int orig_val)
+{
+ /*
+ * other observers should notice all modifications
+ * in the freezing period.
+ */
+ smp_mb();
+ atomic_set(&grp->refcount, orig_val);
+ preempt_enable();
+}
+
+static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
+{
+ return atomic_cond_read_relaxed(&grp->refcount,
+ VAL != EROFS_LOCKED_MAGIC);
+}
+#else
+static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
+ int val)
+{
+ preempt_disable();
+ /* no need to spin on UP platforms, let's just disable preemption. */
+ if (val != atomic_read(&grp->refcount)) {
+ preempt_enable();
+ return false;
+ }
+ return true;
+}
+
+static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
+ int orig_val)
+{
+ preempt_enable();
+}
+
+static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
+{
+ int v = atomic_read(&grp->refcount);
+
+ /* workgroup is never freezed on uniprocessor systems */
+ DBG_BUGON(v == EROFS_LOCKED_MAGIC);
+ return v;
+}
+#endif /* !CONFIG_SMP */
+
+/* hard limit of pages per compressed cluster */
+#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
+#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES
+#else
+#define EROFS_PCPUBUF_NR_PAGES 0
+#endif /* !CONFIG_EROFS_FS_ZIP */
+
+/* we strictly follow PAGE_SIZE and no buffer head yet */
+#define LOG_BLOCK_SIZE PAGE_SHIFT
+
+#undef LOG_SECTORS_PER_BLOCK
+#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9)
+
+#undef SECTORS_PER_BLOCK
+#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK)
+
+#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE)
+
+#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
+#error erofs cannot be used in this platform
+#endif
+
+#define ROOT_NID(sb) ((sb)->root_nid)
+
+#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
+#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
+#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
+
+static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
+{
+ return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
+}
+
+/* atomic flag definitions */
+#define EROFS_I_EA_INITED_BIT 0
+#define EROFS_I_Z_INITED_BIT 1
+
+/* bitlock definitions (arranged in reverse order) */
+#define EROFS_I_BL_XATTR_BIT (BITS_PER_LONG - 1)
+#define EROFS_I_BL_Z_BIT (BITS_PER_LONG - 2)
+
+struct erofs_inode {
+ erofs_nid_t nid;
+
+ /* atomic flags (including bitlocks) */
+ unsigned long flags;
+
+ unsigned char datalayout;
+ unsigned char inode_isize;
+ unsigned short xattr_isize;
+
+ unsigned int xattr_shared_count;
+ unsigned int *xattr_shared_xattrs;
+
+ union {
+ erofs_blk_t raw_blkaddr;
+#ifdef CONFIG_EROFS_FS_ZIP
+ struct {
+ unsigned short z_advise;
+ unsigned char z_algorithmtype[2];
+ unsigned char z_logical_clusterbits;
+ unsigned char z_physical_clusterbits[2];
+ };
+#endif /* CONFIG_EROFS_FS_ZIP */
+ };
+ /* the corresponding vfs inode */
+ struct inode vfs_inode;
+};
+
+#define EROFS_I(ptr) \
+ container_of(ptr, struct erofs_inode, vfs_inode)
+
+static inline unsigned long erofs_inode_datablocks(struct inode *inode)
+{
+ /* since i_size cannot be changed */
+ return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
+}
+
+static inline unsigned int erofs_bitrange(unsigned int value, unsigned int bit,
+ unsigned int bits)
+{
+
+ return (value >> bit) & ((1 << bits) - 1);
+}
+
+
+static inline unsigned int erofs_inode_version(unsigned int value)
+{
+ return erofs_bitrange(value, EROFS_I_VERSION_BIT,
+ EROFS_I_VERSION_BITS);
+}
+
+static inline unsigned int erofs_inode_datalayout(unsigned int value)
+{
+ return erofs_bitrange(value, EROFS_I_DATALAYOUT_BIT,
+ EROFS_I_DATALAYOUT_BITS);
+}
+
+extern const struct super_operations erofs_sops;
+
+extern const struct address_space_operations erofs_raw_access_aops;
+extern const struct address_space_operations z_erofs_aops;
+
+/*
+ * Logical to physical block mapping, used by erofs_map_blocks()
+ *
+ * Different with other file systems, it is used for 2 access modes:
+ *
+ * 1) RAW access mode:
+ *
+ * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
+ * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
+ *
+ * Note that m_lblk in the RAW access mode refers to the number of
+ * the compressed ondisk block rather than the uncompressed
+ * in-memory block for the compressed file.
+ *
+ * m_pofs equals to m_lofs except for the inline data page.
+ *
+ * 2) Normal access mode:
+ *
+ * If the inode is not compressed, it has no difference with
+ * the RAW access mode. However, if the inode is compressed,
+ * users should pass a valid (m_lblk, m_lofs) pair, and get
+ * the needed m_pblk, m_pofs, m_len to get the compressed data
+ * and the updated m_lblk, m_lofs which indicates the start
+ * of the corresponding uncompressed data in the file.
+ */
+enum {
+ BH_Zipped = BH_PrivateStart,
+ BH_FullMapped,
+};
+
+/* Has a disk mapping */
+#define EROFS_MAP_MAPPED (1 << BH_Mapped)
+/* Located in metadata (could be copied from bd_inode) */
+#define EROFS_MAP_META (1 << BH_Meta)
+/* The extent has been compressed */
+#define EROFS_MAP_ZIPPED (1 << BH_Zipped)
+/* The length of extent is full */
+#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped)
+
+struct erofs_map_blocks {
+ erofs_off_t m_pa, m_la;
+ u64 m_plen, m_llen;
+
+ unsigned int m_flags;
+
+ struct page *mpage;
+};
+
+/* Flags used by erofs_map_blocks() */
+#define EROFS_GET_BLOCKS_RAW 0x0001
+
+/* zmap.c */
+#ifdef CONFIG_EROFS_FS_ZIP
+int z_erofs_fill_inode(struct inode *inode);
+int z_erofs_map_blocks_iter(struct inode *inode,
+ struct erofs_map_blocks *map,
+ int flags);
+#else
+static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; }
+static inline int z_erofs_map_blocks_iter(struct inode *inode,
+ struct erofs_map_blocks *map,
+ int flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* !CONFIG_EROFS_FS_ZIP */
+
+/* data.c */
+struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr);
+
+int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
+
+/* inode.c */
+static inline unsigned long erofs_inode_hash(erofs_nid_t nid)
+{
+#if BITS_PER_LONG == 32
+ return (nid >> 32) ^ (nid & 0xffffffff);
+#else
+ return nid;
+#endif
+}
+
+extern const struct inode_operations erofs_generic_iops;
+extern const struct inode_operations erofs_symlink_iops;
+extern const struct inode_operations erofs_fast_symlink_iops;
+
+struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir);
+int erofs_getattr(const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int query_flags);
+
+/* namei.c */
+extern const struct inode_operations erofs_dir_iops;
+
+int erofs_namei(struct inode *dir, struct qstr *name,
+ erofs_nid_t *nid, unsigned int *d_type);
+
+/* dir.c */
+extern const struct file_operations erofs_dir_fops;
+
+/* utils.c / zdata.c */
+struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
+
+#if (EROFS_PCPUBUF_NR_PAGES > 0)
+void *erofs_get_pcpubuf(unsigned int pagenr);
+#define erofs_put_pcpubuf(buf) do { \
+ (void)&(buf); \
+ preempt_enable(); \
+} while (0)
+#else
+static inline void *erofs_get_pcpubuf(unsigned int pagenr)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+#define erofs_put_pcpubuf(buf) do {} while (0)
+#endif
+
+#ifdef CONFIG_EROFS_FS_ZIP
+int erofs_workgroup_put(struct erofs_workgroup *grp);
+struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
+ pgoff_t index, bool *tag);
+int erofs_register_workgroup(struct super_block *sb,
+ struct erofs_workgroup *grp, bool tag);
+void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
+void erofs_shrinker_register(struct super_block *sb);
+void erofs_shrinker_unregister(struct super_block *sb);
+int __init erofs_init_shrinker(void);
+void erofs_exit_shrinker(void);
+int __init z_erofs_init_zip_subsystem(void);
+void z_erofs_exit_zip_subsystem(void);
+int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *egrp);
+int erofs_try_to_free_cached_page(struct address_space *mapping,
+ struct page *page);
+#else
+static inline void erofs_shrinker_register(struct super_block *sb) {}
+static inline void erofs_shrinker_unregister(struct super_block *sb) {}
+static inline int erofs_init_shrinker(void) { return 0; }
+static inline void erofs_exit_shrinker(void) {}
+static inline int z_erofs_init_zip_subsystem(void) { return 0; }
+static inline void z_erofs_exit_zip_subsystem(void) {}
+#endif /* !CONFIG_EROFS_FS_ZIP */
+
+#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
+
+#endif /* __EROFS_INTERNAL_H */
+
diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c
new file mode 100644
index 000000000000..3abbecbf73de
--- /dev/null
+++ b/fs/erofs/namei.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "xattr.h"
+
+#include <trace/events/erofs.h>
+
+struct erofs_qstr {
+ const unsigned char *name;
+ const unsigned char *end;
+};
+
+/* based on the end of qn is accurate and it must have the trailing '\0' */
+static inline int erofs_dirnamecmp(const struct erofs_qstr *qn,
+ const struct erofs_qstr *qd,
+ unsigned int *matched)
+{
+ unsigned int i = *matched;
+
+ /*
+ * on-disk error, let's only BUG_ON in the debugging mode.
+ * otherwise, it will return 1 to just skip the invalid name
+ * and go on (in consideration of the lookup performance).
+ */
+ DBG_BUGON(qd->name > qd->end);
+
+ /* qd could not have trailing '\0' */
+ /* However it is absolutely safe if < qd->end */
+ while (qd->name + i < qd->end && qd->name[i] != '\0') {
+ if (qn->name[i] != qd->name[i]) {
+ *matched = i;
+ return qn->name[i] > qd->name[i] ? 1 : -1;
+ }
+ ++i;
+ }
+ *matched = i;
+ /* See comments in __d_alloc on the terminating NUL character */
+ return qn->name[i] == '\0' ? 0 : 1;
+}
+
+#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1))
+
+static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
+ u8 *data,
+ unsigned int dirblksize,
+ const int ndirents)
+{
+ int head, back;
+ unsigned int startprfx, endprfx;
+ struct erofs_dirent *const de = (struct erofs_dirent *)data;
+
+ /* since the 1st dirent has been evaluated previously */
+ head = 1;
+ back = ndirents - 1;
+ startprfx = endprfx = 0;
+
+ while (head <= back) {
+ const int mid = head + (back - head) / 2;
+ const int nameoff = nameoff_from_disk(de[mid].nameoff,
+ dirblksize);
+ unsigned int matched = min(startprfx, endprfx);
+ struct erofs_qstr dname = {
+ .name = data + nameoff,
+ .end = mid >= ndirents - 1 ?
+ data + dirblksize :
+ data + nameoff_from_disk(de[mid + 1].nameoff,
+ dirblksize)
+ };
+
+ /* string comparison without already matched prefix */
+ int ret = erofs_dirnamecmp(name, &dname, &matched);
+
+ if (!ret) {
+ return de + mid;
+ } else if (ret > 0) {
+ head = mid + 1;
+ startprfx = matched;
+ } else {
+ back = mid - 1;
+ endprfx = matched;
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct page *find_target_block_classic(struct inode *dir,
+ struct erofs_qstr *name,
+ int *_ndirents)
+{
+ unsigned int startprfx, endprfx;
+ int head, back;
+ struct address_space *const mapping = dir->i_mapping;
+ struct page *candidate = ERR_PTR(-ENOENT);
+
+ startprfx = endprfx = 0;
+ head = 0;
+ back = erofs_inode_datablocks(dir) - 1;
+
+ while (head <= back) {
+ const int mid = head + (back - head) / 2;
+ struct page *page = read_mapping_page(mapping, mid, NULL);
+
+ if (!IS_ERR(page)) {
+ struct erofs_dirent *de = kmap_atomic(page);
+ const int nameoff = nameoff_from_disk(de->nameoff,
+ EROFS_BLKSIZ);
+ const int ndirents = nameoff / sizeof(*de);
+ int diff;
+ unsigned int matched;
+ struct erofs_qstr dname;
+
+ if (!ndirents) {
+ kunmap_atomic(de);
+ put_page(page);
+ erofs_err(dir->i_sb,
+ "corrupted dir block %d @ nid %llu",
+ mid, EROFS_I(dir)->nid);
+ DBG_BUGON(1);
+ page = ERR_PTR(-EFSCORRUPTED);
+ goto out;
+ }
+
+ matched = min(startprfx, endprfx);
+
+ dname.name = (u8 *)de + nameoff;
+ if (ndirents == 1)
+ dname.end = (u8 *)de + EROFS_BLKSIZ;
+ else
+ dname.end = (u8 *)de +
+ nameoff_from_disk(de[1].nameoff,
+ EROFS_BLKSIZ);
+
+ /* string comparison without already matched prefix */
+ diff = erofs_dirnamecmp(name, &dname, &matched);
+ kunmap_atomic(de);
+
+ if (!diff) {
+ *_ndirents = 0;
+ goto out;
+ } else if (diff > 0) {
+ head = mid + 1;
+ startprfx = matched;
+
+ if (!IS_ERR(candidate))
+ put_page(candidate);
+ candidate = page;
+ *_ndirents = ndirents;
+ } else {
+ put_page(page);
+
+ back = mid - 1;
+ endprfx = matched;
+ }
+ continue;
+ }
+out: /* free if the candidate is valid */
+ if (!IS_ERR(candidate))
+ put_page(candidate);
+ return page;
+ }
+ return candidate;
+}
+
+int erofs_namei(struct inode *dir,
+ struct qstr *name,
+ erofs_nid_t *nid, unsigned int *d_type)
+{
+ int ndirents;
+ struct page *page;
+ void *data;
+ struct erofs_dirent *de;
+ struct erofs_qstr qn;
+
+ if (!dir->i_size)
+ return -ENOENT;
+
+ qn.name = name->name;
+ qn.end = name->name + name->len;
+
+ ndirents = 0;
+ page = find_target_block_classic(dir, &qn, &ndirents);
+
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ data = kmap_atomic(page);
+ /* the target page has been mapped */
+ if (ndirents)
+ de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
+ else
+ de = (struct erofs_dirent *)data;
+
+ if (!IS_ERR(de)) {
+ *nid = le64_to_cpu(de->nid);
+ *d_type = de->file_type;
+ }
+
+ kunmap_atomic(data);
+ put_page(page);
+
+ return PTR_ERR_OR_ZERO(de);
+}
+
+/* NOTE: i_mutex is already held by vfs */
+static struct dentry *erofs_lookup(struct inode *dir,
+ struct dentry *dentry,
+ unsigned int flags)
+{
+ int err;
+ erofs_nid_t nid;
+ unsigned int d_type;
+ struct inode *inode;
+
+ DBG_BUGON(!d_really_is_negative(dentry));
+ /* dentry must be unhashed in lookup, no need to worry about */
+ DBG_BUGON(!d_unhashed(dentry));
+
+ trace_erofs_lookup(dir, dentry, flags);
+
+ /* file name exceeds fs limit */
+ if (dentry->d_name.len > EROFS_NAME_LEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ /* false uninitialized warnings on gcc 4.8.x */
+ err = erofs_namei(dir, &dentry->d_name, &nid, &d_type);
+
+ if (err == -ENOENT) {
+ /* negative dentry */
+ inode = NULL;
+ } else if (err) {
+ inode = ERR_PTR(err);
+ } else {
+ erofs_dbg("%s, %s (nid %llu) found, d_type %u", __func__,
+ dentry->d_name.name, nid, d_type);
+ inode = erofs_iget(dir->i_sb, nid, d_type == FT_DIR);
+ }
+ return d_splice_alias(inode, dentry);
+}
+
+const struct inode_operations erofs_dir_iops = {
+ .lookup = erofs_lookup,
+ .getattr = erofs_getattr,
+#ifdef CONFIG_EROFS_FS_XATTR
+ .listxattr = erofs_listxattr,
+#endif
+ .get_acl = erofs_get_acl,
+};
+
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
new file mode 100644
index 000000000000..057e6d7b5b7f
--- /dev/null
+++ b/fs/erofs/super.c
@@ -0,0 +1,644 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include <linux/module.h>
+#include <linux/buffer_head.h>
+#include <linux/statfs.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+#include <linux/crc32c.h>
+#include "xattr.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/erofs.h>
+
+static struct kmem_cache *erofs_inode_cachep __read_mostly;
+
+void _erofs_err(struct super_block *sb, const char *function,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf);
+ va_end(args);
+}
+
+void _erofs_info(struct super_block *sb, const char *function,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_info("(device %s): %pV", sb->s_id, &vaf);
+ va_end(args);
+}
+
+static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
+{
+ struct erofs_super_block *dsb;
+ u32 expected_crc, crc;
+
+ dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET,
+ EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL);
+ if (!dsb)
+ return -ENOMEM;
+
+ expected_crc = le32_to_cpu(dsb->checksum);
+ dsb->checksum = 0;
+ /* to allow for x86 boot sectors and other oddities. */
+ crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET);
+ kfree(dsb);
+
+ if (crc != expected_crc) {
+ erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
+ crc, expected_crc);
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+static void erofs_inode_init_once(void *ptr)
+{
+ struct erofs_inode *vi = ptr;
+
+ inode_init_once(&vi->vfs_inode);
+}
+
+static struct inode *erofs_alloc_inode(struct super_block *sb)
+{
+ struct erofs_inode *vi =
+ kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL);
+
+ if (!vi)
+ return NULL;
+
+ /* zero out everything except vfs_inode */
+ memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
+ return &vi->vfs_inode;
+}
+
+static void erofs_free_inode(struct inode *inode)
+{
+ struct erofs_inode *vi = EROFS_I(inode);
+
+ /* be careful of RCU symlink path */
+ if (inode->i_op == &erofs_fast_symlink_iops)
+ kfree(inode->i_link);
+ kfree(vi->xattr_shared_xattrs);
+
+ kmem_cache_free(erofs_inode_cachep, vi);
+}
+
+static bool check_layout_compatibility(struct super_block *sb,
+ struct erofs_super_block *dsb)
+{
+ const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
+
+ EROFS_SB(sb)->feature_incompat = feature;
+
+ /* check if current kernel meets all mandatory requirements */
+ if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
+ erofs_err(sb,
+ "unidentified incompatible feature %x, please upgrade kernel version",
+ feature & ~EROFS_ALL_FEATURE_INCOMPAT);
+ return false;
+ }
+ return true;
+}
+
+static int erofs_read_superblock(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi;
+ struct page *page;
+ struct erofs_super_block *dsb;
+ unsigned int blkszbits;
+ void *data;
+ int ret;
+
+ page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL);
+ if (IS_ERR(page)) {
+ erofs_err(sb, "cannot read erofs superblock");
+ return PTR_ERR(page);
+ }
+
+ sbi = EROFS_SB(sb);
+
+ data = kmap(page);
+ dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
+
+ ret = -EINVAL;
+ if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
+ erofs_err(sb, "cannot find valid erofs superblock");
+ goto out;
+ }
+
+ sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
+ if (sbi->feature_compat & EROFS_FEATURE_COMPAT_SB_CHKSUM) {
+ ret = erofs_superblock_csum_verify(sb, data);
+ if (ret)
+ goto out;
+ }
+
+ blkszbits = dsb->blkszbits;
+ /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
+ if (blkszbits != LOG_BLOCK_SIZE) {
+ erofs_err(sb, "blksize %u isn't supported on this platform",
+ 1 << blkszbits);
+ goto out;
+ }
+
+ if (!check_layout_compatibility(sb, dsb))
+ goto out;
+
+ sbi->blocks = le32_to_cpu(dsb->blocks);
+ sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
+#ifdef CONFIG_EROFS_FS_XATTR
+ sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
+#endif
+ sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
+ sbi->root_nid = le16_to_cpu(dsb->root_nid);
+ sbi->inos = le64_to_cpu(dsb->inos);
+
+ sbi->build_time = le64_to_cpu(dsb->build_time);
+ sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
+
+ memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
+
+ ret = strscpy(sbi->volume_name, dsb->volume_name,
+ sizeof(dsb->volume_name));
+ if (ret < 0) { /* -E2BIG */
+ erofs_err(sb, "bad volume name without NIL terminator");
+ ret = -EFSCORRUPTED;
+ goto out;
+ }
+ ret = 0;
+out:
+ kunmap(page);
+ put_page(page);
+ return ret;
+}
+
+#ifdef CONFIG_EROFS_FS_ZIP
+static int erofs_build_cache_strategy(struct super_block *sb,
+ substring_t *args)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ const char *cs = match_strdup(args);
+ int err = 0;
+
+ if (!cs) {
+ erofs_err(sb, "Not enough memory to store cache strategy");
+ return -ENOMEM;
+ }
+
+ if (!strcmp(cs, "disabled")) {
+ sbi->cache_strategy = EROFS_ZIP_CACHE_DISABLED;
+ } else if (!strcmp(cs, "readahead")) {
+ sbi->cache_strategy = EROFS_ZIP_CACHE_READAHEAD;
+ } else if (!strcmp(cs, "readaround")) {
+ sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND;
+ } else {
+ erofs_err(sb, "Unrecognized cache strategy \"%s\"", cs);
+ err = -EINVAL;
+ }
+ kfree(cs);
+ return err;
+}
+#else
+static int erofs_build_cache_strategy(struct super_block *sb,
+ substring_t *args)
+{
+ erofs_info(sb, "EROFS compression is disabled, so cache strategy is ignored");
+ return 0;
+}
+#endif
+
+/* set up default EROFS parameters */
+static void erofs_default_options(struct erofs_sb_info *sbi)
+{
+#ifdef CONFIG_EROFS_FS_ZIP
+ sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND;
+ sbi->max_sync_decompress_pages = 3;
+#endif
+#ifdef CONFIG_EROFS_FS_XATTR
+ set_opt(sbi, XATTR_USER);
+#endif
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ set_opt(sbi, POSIX_ACL);
+#endif
+}
+
+enum {
+ Opt_user_xattr,
+ Opt_nouser_xattr,
+ Opt_acl,
+ Opt_noacl,
+ Opt_cache_strategy,
+ Opt_err
+};
+
+static match_table_t erofs_tokens = {
+ {Opt_user_xattr, "user_xattr"},
+ {Opt_nouser_xattr, "nouser_xattr"},
+ {Opt_acl, "acl"},
+ {Opt_noacl, "noacl"},
+ {Opt_cache_strategy, "cache_strategy=%s"},
+ {Opt_err, NULL}
+};
+
+static int erofs_parse_options(struct super_block *sb, char *options)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *p;
+ int err;
+
+ if (!options)
+ return 0;
+
+ while ((p = strsep(&options, ","))) {
+ int token;
+
+ if (!*p)
+ continue;
+
+ args[0].to = args[0].from = NULL;
+ token = match_token(p, erofs_tokens, args);
+
+ switch (token) {
+#ifdef CONFIG_EROFS_FS_XATTR
+ case Opt_user_xattr:
+ set_opt(EROFS_SB(sb), XATTR_USER);
+ break;
+ case Opt_nouser_xattr:
+ clear_opt(EROFS_SB(sb), XATTR_USER);
+ break;
+#else
+ case Opt_user_xattr:
+ erofs_info(sb, "user_xattr options not supported");
+ break;
+ case Opt_nouser_xattr:
+ erofs_info(sb, "nouser_xattr options not supported");
+ break;
+#endif
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ case Opt_acl:
+ set_opt(EROFS_SB(sb), POSIX_ACL);
+ break;
+ case Opt_noacl:
+ clear_opt(EROFS_SB(sb), POSIX_ACL);
+ break;
+#else
+ case Opt_acl:
+ erofs_info(sb, "acl options not supported");
+ break;
+ case Opt_noacl:
+ erofs_info(sb, "noacl options not supported");
+ break;
+#endif
+ case Opt_cache_strategy:
+ err = erofs_build_cache_strategy(sb, args);
+ if (err)
+ return err;
+ break;
+ default:
+ erofs_err(sb, "Unrecognized mount option \"%s\" or missing value", p);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_EROFS_FS_ZIP
+static const struct address_space_operations managed_cache_aops;
+
+static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
+{
+ int ret = 1; /* 0 - busy */
+ struct address_space *const mapping = page->mapping;
+
+ DBG_BUGON(!PageLocked(page));
+ DBG_BUGON(mapping->a_ops != &managed_cache_aops);
+
+ if (PagePrivate(page))
+ ret = erofs_try_to_free_cached_page(mapping, page);
+
+ return ret;
+}
+
+static void erofs_managed_cache_invalidatepage(struct page *page,
+ unsigned int offset,
+ unsigned int length)
+{
+ const unsigned int stop = length + offset;
+
+ DBG_BUGON(!PageLocked(page));
+
+ /* Check for potential overflow in debug mode */
+ DBG_BUGON(stop > PAGE_SIZE || stop < length);
+
+ if (offset == 0 && stop == PAGE_SIZE)
+ while (!erofs_managed_cache_releasepage(page, GFP_NOFS))
+ cond_resched();
+}
+
+static const struct address_space_operations managed_cache_aops = {
+ .releasepage = erofs_managed_cache_releasepage,
+ .invalidatepage = erofs_managed_cache_invalidatepage,
+};
+
+static int erofs_init_managed_cache(struct super_block *sb)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ struct inode *const inode = new_inode(sb);
+
+ if (!inode)
+ return -ENOMEM;
+
+ set_nlink(inode, 1);
+ inode->i_size = OFFSET_MAX;
+
+ inode->i_mapping->a_ops = &managed_cache_aops;
+ mapping_set_gfp_mask(inode->i_mapping,
+ GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
+ sbi->managed_cache = inode;
+ return 0;
+}
+#else
+static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
+#endif
+
+static int erofs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *inode;
+ struct erofs_sb_info *sbi;
+ int err;
+
+ sb->s_magic = EROFS_SUPER_MAGIC;
+
+ if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) {
+ erofs_err(sb, "failed to set erofs blksize");
+ return -EINVAL;
+ }
+
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ if (!sbi)
+ return -ENOMEM;
+
+ sb->s_fs_info = sbi;
+ err = erofs_read_superblock(sb);
+ if (err)
+ return err;
+
+ sb->s_flags |= SB_RDONLY | SB_NOATIME;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_time_gran = 1;
+
+ sb->s_op = &erofs_sops;
+
+#ifdef CONFIG_EROFS_FS_XATTR
+ sb->s_xattr = erofs_xattr_handlers;
+#endif
+ /* set erofs default mount options */
+ erofs_default_options(sbi);
+
+ err = erofs_parse_options(sb, data);
+ if (err)
+ return err;
+
+ if (test_opt(sbi, POSIX_ACL))
+ sb->s_flags |= SB_POSIXACL;
+ else
+ sb->s_flags &= ~SB_POSIXACL;
+
+#ifdef CONFIG_EROFS_FS_ZIP
+ INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC);
+#endif
+
+ /* get the root inode */
+ inode = erofs_iget(sb, ROOT_NID(sbi), true);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ if (!S_ISDIR(inode->i_mode)) {
+ erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
+ ROOT_NID(sbi), inode->i_mode);
+ iput(inode);
+ return -EINVAL;
+ }
+
+ sb->s_root = d_make_root(inode);
+ if (!sb->s_root)
+ return -ENOMEM;
+
+ erofs_shrinker_register(sb);
+ /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
+ err = erofs_init_managed_cache(sb);
+ if (err)
+ return err;
+
+ erofs_info(sb, "mounted with opts: %s, root inode @ nid %llu.",
+ (char *)data, ROOT_NID(sbi));
+ return 0;
+}
+
+static struct dentry *erofs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, erofs_fill_super);
+}
+
+/*
+ * could be triggered after deactivate_locked_super()
+ * is called, thus including umount and failed to initialize.
+ */
+static void erofs_kill_sb(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi;
+
+ WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
+
+ kill_block_super(sb);
+
+ sbi = EROFS_SB(sb);
+ if (!sbi)
+ return;
+ kfree(sbi);
+ sb->s_fs_info = NULL;
+}
+
+/* called when ->s_root is non-NULL */
+static void erofs_put_super(struct super_block *sb)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+
+ DBG_BUGON(!sbi);
+
+ erofs_shrinker_unregister(sb);
+#ifdef CONFIG_EROFS_FS_ZIP
+ iput(sbi->managed_cache);
+ sbi->managed_cache = NULL;
+#endif
+}
+
+static struct file_system_type erofs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "erofs",
+ .mount = erofs_mount,
+ .kill_sb = erofs_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+MODULE_ALIAS_FS("erofs");
+
+static int __init erofs_module_init(void)
+{
+ int err;
+
+ erofs_check_ondisk_layout_definitions();
+
+ erofs_inode_cachep = kmem_cache_create("erofs_inode",
+ sizeof(struct erofs_inode), 0,
+ SLAB_RECLAIM_ACCOUNT,
+ erofs_inode_init_once);
+ if (!erofs_inode_cachep) {
+ err = -ENOMEM;
+ goto icache_err;
+ }
+
+ err = erofs_init_shrinker();
+ if (err)
+ goto shrinker_err;
+
+ err = z_erofs_init_zip_subsystem();
+ if (err)
+ goto zip_err;
+
+ err = register_filesystem(&erofs_fs_type);
+ if (err)
+ goto fs_err;
+
+ return 0;
+
+fs_err:
+ z_erofs_exit_zip_subsystem();
+zip_err:
+ erofs_exit_shrinker();
+shrinker_err:
+ kmem_cache_destroy(erofs_inode_cachep);
+icache_err:
+ return err;
+}
+
+static void __exit erofs_module_exit(void)
+{
+ unregister_filesystem(&erofs_fs_type);
+ z_erofs_exit_zip_subsystem();
+ erofs_exit_shrinker();
+
+ /* Ensure all RCU free inodes are safe before cache is destroyed. */
+ rcu_barrier();
+ kmem_cache_destroy(erofs_inode_cachep);
+}
+
+/* get filesystem statistics */
+static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct super_block *sb = dentry->d_sb;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+
+ buf->f_type = sb->s_magic;
+ buf->f_bsize = EROFS_BLKSIZ;
+ buf->f_blocks = sbi->blocks;
+ buf->f_bfree = buf->f_bavail = 0;
+
+ buf->f_files = ULLONG_MAX;
+ buf->f_ffree = ULLONG_MAX - sbi->inos;
+
+ buf->f_namelen = EROFS_NAME_LEN;
+
+ buf->f_fsid.val[0] = (u32)id;
+ buf->f_fsid.val[1] = (u32)(id >> 32);
+ return 0;
+}
+
+static int erofs_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb);
+
+#ifdef CONFIG_EROFS_FS_XATTR
+ if (test_opt(sbi, XATTR_USER))
+ seq_puts(seq, ",user_xattr");
+ else
+ seq_puts(seq, ",nouser_xattr");
+#endif
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ if (test_opt(sbi, POSIX_ACL))
+ seq_puts(seq, ",acl");
+ else
+ seq_puts(seq, ",noacl");
+#endif
+#ifdef CONFIG_EROFS_FS_ZIP
+ if (sbi->cache_strategy == EROFS_ZIP_CACHE_DISABLED) {
+ seq_puts(seq, ",cache_strategy=disabled");
+ } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) {
+ seq_puts(seq, ",cache_strategy=readahead");
+ } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) {
+ seq_puts(seq, ",cache_strategy=readaround");
+ }
+#endif
+ return 0;
+}
+
+static int erofs_remount(struct super_block *sb, int *flags, char *data)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ unsigned int org_mnt_opt = sbi->mount_opt;
+ int err;
+
+ DBG_BUGON(!sb_rdonly(sb));
+ err = erofs_parse_options(sb, data);
+ if (err)
+ goto out;
+
+ if (test_opt(sbi, POSIX_ACL))
+ sb->s_flags |= SB_POSIXACL;
+ else
+ sb->s_flags &= ~SB_POSIXACL;
+
+ *flags |= SB_RDONLY;
+ return 0;
+out:
+ sbi->mount_opt = org_mnt_opt;
+ return err;
+}
+
+const struct super_operations erofs_sops = {
+ .put_super = erofs_put_super,
+ .alloc_inode = erofs_alloc_inode,
+ .free_inode = erofs_free_inode,
+ .statfs = erofs_statfs,
+ .show_options = erofs_show_options,
+ .remount_fs = erofs_remount,
+};
+
+module_init(erofs_module_init);
+module_exit(erofs_module_exit);
+
+MODULE_DESCRIPTION("Enhanced ROM File System");
+MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
+MODULE_LICENSE("GPL");
+
diff --git a/fs/erofs/tagptr.h b/fs/erofs/tagptr.h
new file mode 100644
index 000000000000..a72897c86744
--- /dev/null
+++ b/fs/erofs/tagptr.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * A tagged pointer implementation
+ *
+ * Copyright (C) 2018 Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef __EROFS_FS_TAGPTR_H
+#define __EROFS_FS_TAGPTR_H
+
+#include <linux/types.h>
+#include <linux/build_bug.h>
+
+/*
+ * the name of tagged pointer types are tagptr{1, 2, 3...}_t
+ * avoid directly using the internal structs __tagptr{1, 2, 3...}
+ */
+#define __MAKE_TAGPTR(n) \
+typedef struct __tagptr##n { \
+ uintptr_t v; \
+} tagptr##n##_t;
+
+__MAKE_TAGPTR(1)
+__MAKE_TAGPTR(2)
+__MAKE_TAGPTR(3)
+__MAKE_TAGPTR(4)
+
+#undef __MAKE_TAGPTR
+
+extern void __compiletime_error("bad tagptr tags")
+ __bad_tagptr_tags(void);
+
+extern void __compiletime_error("bad tagptr type")
+ __bad_tagptr_type(void);
+
+/* fix the broken usage of "#define tagptr2_t tagptr3_t" by users */
+#define __tagptr_mask_1(ptr, n) \
+ __builtin_types_compatible_p(typeof(ptr), struct __tagptr##n) ? \
+ (1UL << (n)) - 1 :
+
+#define __tagptr_mask(ptr) (\
+ __tagptr_mask_1(ptr, 1) ( \
+ __tagptr_mask_1(ptr, 2) ( \
+ __tagptr_mask_1(ptr, 3) ( \
+ __tagptr_mask_1(ptr, 4) ( \
+ __bad_tagptr_type(), 0)))))
+
+/* generate a tagged pointer from a raw value */
+#define tagptr_init(type, val) \
+ ((typeof(type)){ .v = (uintptr_t)(val) })
+
+/*
+ * directly cast a tagged pointer to the native pointer type, which
+ * could be used for backward compatibility of existing code.
+ */
+#define tagptr_cast_ptr(tptr) ((void *)(tptr).v)
+
+/* encode tagged pointers */
+#define tagptr_fold(type, ptr, _tags) ({ \
+ const typeof(_tags) tags = (_tags); \
+ if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(type))) \
+ __bad_tagptr_tags(); \
+tagptr_init(type, (uintptr_t)(ptr) | tags); })
+
+/* decode tagged pointers */
+#define tagptr_unfold_ptr(tptr) \
+ ((void *)((tptr).v & ~__tagptr_mask(tptr)))
+
+#define tagptr_unfold_tags(tptr) \
+ ((tptr).v & __tagptr_mask(tptr))
+
+/* operations for the tagger pointer */
+#define tagptr_eq(_tptr1, _tptr2) ({ \
+ typeof(_tptr1) tptr1 = (_tptr1); \
+ typeof(_tptr2) tptr2 = (_tptr2); \
+ (void)(&tptr1 == &tptr2); \
+(tptr1).v == (tptr2).v; })
+
+/* lock-free CAS operation */
+#define tagptr_cmpxchg(_ptptr, _o, _n) ({ \
+ typeof(_ptptr) ptptr = (_ptptr); \
+ typeof(_o) o = (_o); \
+ typeof(_n) n = (_n); \
+ (void)(&o == &n); \
+ (void)(&o == ptptr); \
+tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); })
+
+/* wrap WRITE_ONCE if atomic update is needed */
+#define tagptr_replace_tags(_ptptr, tags) ({ \
+ typeof(_ptptr) ptptr = (_ptptr); \
+ *ptptr = tagptr_fold(*ptptr, tagptr_unfold_ptr(*ptptr), tags); \
+*ptptr; })
+
+#define tagptr_set_tags(_ptptr, _tags) ({ \
+ typeof(_ptptr) ptptr = (_ptptr); \
+ const typeof(_tags) tags = (_tags); \
+ if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \
+ __bad_tagptr_tags(); \
+ ptptr->v |= tags; \
+*ptptr; })
+
+#define tagptr_clear_tags(_ptptr, _tags) ({ \
+ typeof(_ptptr) ptptr = (_ptptr); \
+ const typeof(_tags) tags = (_tags); \
+ if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \
+ __bad_tagptr_tags(); \
+ ptptr->v &= ~tags; \
+*ptptr; })
+
+#endif /* __EROFS_FS_TAGPTR_H */
+
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
new file mode 100644
index 000000000000..1e8e1450d5b0
--- /dev/null
+++ b/fs/erofs/utils.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "internal.h"
+#include <linux/pagevec.h>
+
+struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
+{
+ struct page *page;
+
+ if (!list_empty(pool)) {
+ page = lru_to_page(pool);
+ DBG_BUGON(page_ref_count(page) != 1);
+ list_del(&page->lru);
+ } else {
+ page = alloc_page(gfp);
+ }
+ return page;
+}
+
+#if (EROFS_PCPUBUF_NR_PAGES > 0)
+static struct {
+ u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES];
+} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS];
+
+void *erofs_get_pcpubuf(unsigned int pagenr)
+{
+ preempt_disable();
+ return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE];
+}
+#endif
+
+#ifdef CONFIG_EROFS_FS_ZIP
+/* global shrink count (for all mounted EROFS instances) */
+static atomic_long_t erofs_global_shrink_cnt;
+
+#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
+#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount)
+
+static int erofs_workgroup_get(struct erofs_workgroup *grp)
+{
+ int o;
+
+repeat:
+ o = erofs_wait_on_workgroup_freezed(grp);
+ if (o <= 0)
+ return -1;
+
+ if (atomic_cmpxchg(&grp->refcount, o, o + 1) != o)
+ goto repeat;
+
+ /* decrease refcount paired by erofs_workgroup_put */
+ if (o == 1)
+ atomic_long_dec(&erofs_global_shrink_cnt);
+ return 0;
+}
+
+struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
+ pgoff_t index, bool *tag)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_workgroup *grp;
+
+repeat:
+ rcu_read_lock();
+ grp = radix_tree_lookup(&sbi->workstn_tree, index);
+ if (grp) {
+ *tag = xa_pointer_tag(grp);
+ grp = xa_untag_pointer(grp);
+
+ if (erofs_workgroup_get(grp)) {
+ /* prefer to relax rcu read side */
+ rcu_read_unlock();
+ goto repeat;
+ }
+
+ DBG_BUGON(index != grp->index);
+ }
+ rcu_read_unlock();
+ return grp;
+}
+
+int erofs_register_workgroup(struct super_block *sb,
+ struct erofs_workgroup *grp,
+ bool tag)
+{
+ struct erofs_sb_info *sbi;
+ int err;
+
+ /* grp shouldn't be broken or used before */
+ if (atomic_read(&grp->refcount) != 1) {
+ DBG_BUGON(1);
+ return -EINVAL;
+ }
+
+ err = radix_tree_preload(GFP_NOFS);
+ if (err)
+ return err;
+
+ sbi = EROFS_SB(sb);
+ xa_lock(&sbi->workstn_tree);
+
+ grp = xa_tag_pointer(grp, tag);
+
+ /*
+ * Bump up reference count before making this workgroup
+ * visible to other users in order to avoid potential UAF
+ * without serialized by workstn_lock.
+ */
+ __erofs_workgroup_get(grp);
+
+ err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp);
+ if (err)
+ /*
+ * it's safe to decrease since the workgroup isn't visible
+ * and refcount >= 2 (cannot be freezed).
+ */
+ __erofs_workgroup_put(grp);
+
+ xa_unlock(&sbi->workstn_tree);
+ radix_tree_preload_end();
+ return err;
+}
+
+static void __erofs_workgroup_free(struct erofs_workgroup *grp)
+{
+ atomic_long_dec(&erofs_global_shrink_cnt);
+ erofs_workgroup_free_rcu(grp);
+}
+
+int erofs_workgroup_put(struct erofs_workgroup *grp)
+{
+ int count = atomic_dec_return(&grp->refcount);
+
+ if (count == 1)
+ atomic_long_inc(&erofs_global_shrink_cnt);
+ else if (!count)
+ __erofs_workgroup_free(grp);
+ return count;
+}
+
+static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
+{
+ erofs_workgroup_unfreeze(grp, 0);
+ __erofs_workgroup_free(grp);
+}
+
+static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *grp)
+{
+ /*
+ * If managed cache is on, refcount of workgroups
+ * themselves could be < 0 (freezed). In other words,
+ * there is no guarantee that all refcounts > 0.
+ */
+ if (!erofs_workgroup_try_to_freeze(grp, 1))
+ return false;
+
+ /*
+ * Note that all cached pages should be unattached
+ * before deleted from the radix tree. Otherwise some
+ * cached pages could be still attached to the orphan
+ * old workgroup when the new one is available in the tree.
+ */
+ if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
+ erofs_workgroup_unfreeze(grp, 1);
+ return false;
+ }
+
+ /*
+ * It's impossible to fail after the workgroup is freezed,
+ * however in order to avoid some race conditions, add a
+ * DBG_BUGON to observe this in advance.
+ */
+ DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
+ grp->index)) != grp);
+
+ /*
+ * If managed cache is on, last refcount should indicate
+ * the related workstation.
+ */
+ erofs_workgroup_unfreeze_final(grp);
+ return true;
+}
+
+static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
+ unsigned long nr_shrink)
+{
+ pgoff_t first_index = 0;
+ void *batch[PAGEVEC_SIZE];
+ unsigned int freed = 0;
+
+ int i, found;
+repeat:
+ xa_lock(&sbi->workstn_tree);
+
+ found = radix_tree_gang_lookup(&sbi->workstn_tree,
+ batch, first_index, PAGEVEC_SIZE);
+
+ for (i = 0; i < found; ++i) {
+ struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
+
+ first_index = grp->index + 1;
+
+ /* try to shrink each valid workgroup */
+ if (!erofs_try_to_release_workgroup(sbi, grp))
+ continue;
+
+ ++freed;
+ if (!--nr_shrink)
+ break;
+ }
+ xa_unlock(&sbi->workstn_tree);
+
+ if (i && nr_shrink)
+ goto repeat;
+ return freed;
+}
+
+/* protected by 'erofs_sb_list_lock' */
+static unsigned int shrinker_run_no;
+
+/* protects the mounted 'erofs_sb_list' */
+static DEFINE_SPINLOCK(erofs_sb_list_lock);
+static LIST_HEAD(erofs_sb_list);
+
+void erofs_shrinker_register(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+
+ mutex_init(&sbi->umount_mutex);
+
+ spin_lock(&erofs_sb_list_lock);
+ list_add(&sbi->list, &erofs_sb_list);
+ spin_unlock(&erofs_sb_list_lock);
+}
+
+void erofs_shrinker_unregister(struct super_block *sb)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+
+ mutex_lock(&sbi->umount_mutex);
+ /* clean up all remaining workgroups in memory */
+ erofs_shrink_workstation(sbi, ~0UL);
+
+ spin_lock(&erofs_sb_list_lock);
+ list_del(&sbi->list);
+ spin_unlock(&erofs_sb_list_lock);
+ mutex_unlock(&sbi->umount_mutex);
+}
+
+static unsigned long erofs_shrink_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ return atomic_long_read(&erofs_global_shrink_cnt);
+}
+
+static unsigned long erofs_shrink_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct erofs_sb_info *sbi;
+ struct list_head *p;
+
+ unsigned long nr = sc->nr_to_scan;
+ unsigned int run_no;
+ unsigned long freed = 0;
+
+ spin_lock(&erofs_sb_list_lock);
+ do {
+ run_no = ++shrinker_run_no;
+ } while (run_no == 0);
+
+ /* Iterate over all mounted superblocks and try to shrink them */
+ p = erofs_sb_list.next;
+ while (p != &erofs_sb_list) {
+ sbi = list_entry(p, struct erofs_sb_info, list);
+
+ /*
+ * We move the ones we do to the end of the list, so we stop
+ * when we see one we have already done.
+ */
+ if (sbi->shrinker_run_no == run_no)
+ break;
+
+ if (!mutex_trylock(&sbi->umount_mutex)) {
+ p = p->next;
+ continue;
+ }
+
+ spin_unlock(&erofs_sb_list_lock);
+ sbi->shrinker_run_no = run_no;
+
+ freed += erofs_shrink_workstation(sbi, nr);
+
+ spin_lock(&erofs_sb_list_lock);
+ /* Get the next list element before we move this one */
+ p = p->next;
+
+ /*
+ * Move this one to the end of the list to provide some
+ * fairness.
+ */
+ list_move_tail(&sbi->list, &erofs_sb_list);
+ mutex_unlock(&sbi->umount_mutex);
+
+ if (freed >= nr)
+ break;
+ }
+ spin_unlock(&erofs_sb_list_lock);
+ return freed;
+}
+
+static struct shrinker erofs_shrinker_info = {
+ .scan_objects = erofs_shrink_scan,
+ .count_objects = erofs_shrink_count,
+ .seeks = DEFAULT_SEEKS,
+};
+
+int __init erofs_init_shrinker(void)
+{
+ return register_shrinker(&erofs_shrinker_info);
+}
+
+void erofs_exit_shrinker(void)
+{
+ unregister_shrinker(&erofs_shrinker_info);
+}
+#endif /* !CONFIG_EROFS_FS_ZIP */
+
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
new file mode 100644
index 000000000000..a13a78725c57
--- /dev/null
+++ b/fs/erofs/xattr.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include <linux/security.h>
+#include "xattr.h"
+
+struct xattr_iter {
+ struct super_block *sb;
+ struct page *page;
+ void *kaddr;
+
+ erofs_blk_t blkaddr;
+ unsigned int ofs;
+};
+
+static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
+{
+ /* the only user of kunmap() is 'init_inode_xattrs' */
+ if (!atomic)
+ kunmap(it->page);
+ else
+ kunmap_atomic(it->kaddr);
+
+ unlock_page(it->page);
+ put_page(it->page);
+}
+
+static inline void xattr_iter_end_final(struct xattr_iter *it)
+{
+ if (!it->page)
+ return;
+
+ xattr_iter_end(it, true);
+}
+
+static int init_inode_xattrs(struct inode *inode)
+{
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct xattr_iter it;
+ unsigned int i;
+ struct erofs_xattr_ibody_header *ih;
+ struct super_block *sb;
+ struct erofs_sb_info *sbi;
+ bool atomic_map;
+ int ret = 0;
+
+ /* the most case is that xattrs of this inode are initialized. */
+ if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
+ return 0;
+
+ if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
+ return -ERESTARTSYS;
+
+ /* someone has initialized xattrs for us? */
+ if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
+ goto out_unlock;
+
+ /*
+ * bypass all xattr operations if ->xattr_isize is not greater than
+ * sizeof(struct erofs_xattr_ibody_header), in detail:
+ * 1) it is not enough to contain erofs_xattr_ibody_header then
+ * ->xattr_isize should be 0 (it means no xattr);
+ * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
+ * undefined right now (maybe use later with some new sb feature).
+ */
+ if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
+ erofs_err(inode->i_sb,
+ "xattr_isize %d of nid %llu is not supported yet",
+ vi->xattr_isize, vi->nid);
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
+ if (vi->xattr_isize) {
+ erofs_err(inode->i_sb,
+ "bogus xattr ibody @ nid %llu", vi->nid);
+ DBG_BUGON(1);
+ ret = -EFSCORRUPTED;
+ goto out_unlock; /* xattr ondisk layout error */
+ }
+ ret = -ENOATTR;
+ goto out_unlock;
+ }
+
+ sb = inode->i_sb;
+ sbi = EROFS_SB(sb);
+ it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
+ it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
+
+ it.page = erofs_get_meta_page(sb, it.blkaddr);
+ if (IS_ERR(it.page)) {
+ ret = PTR_ERR(it.page);
+ goto out_unlock;
+ }
+
+ /* read in shared xattr array (non-atomic, see kmalloc below) */
+ it.kaddr = kmap(it.page);
+ atomic_map = false;
+
+ ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
+
+ vi->xattr_shared_count = ih->h_shared_count;
+ vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
+ sizeof(uint), GFP_KERNEL);
+ if (!vi->xattr_shared_xattrs) {
+ xattr_iter_end(&it, atomic_map);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ /* let's skip ibody header */
+ it.ofs += sizeof(struct erofs_xattr_ibody_header);
+
+ for (i = 0; i < vi->xattr_shared_count; ++i) {
+ if (it.ofs >= EROFS_BLKSIZ) {
+ /* cannot be unaligned */
+ DBG_BUGON(it.ofs != EROFS_BLKSIZ);
+ xattr_iter_end(&it, atomic_map);
+
+ it.page = erofs_get_meta_page(sb, ++it.blkaddr);
+ if (IS_ERR(it.page)) {
+ kfree(vi->xattr_shared_xattrs);
+ vi->xattr_shared_xattrs = NULL;
+ ret = PTR_ERR(it.page);
+ goto out_unlock;
+ }
+
+ it.kaddr = kmap_atomic(it.page);
+ atomic_map = true;
+ it.ofs = 0;
+ }
+ vi->xattr_shared_xattrs[i] =
+ le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
+ it.ofs += sizeof(__le32);
+ }
+ xattr_iter_end(&it, atomic_map);
+
+ set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
+
+out_unlock:
+ clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags);
+ return ret;
+}
+
+/*
+ * the general idea for these return values is
+ * if 0 is returned, go on processing the current xattr;
+ * 1 (> 0) is returned, skip this round to process the next xattr;
+ * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
+ * and need to be handled
+ */
+struct xattr_iter_handlers {
+ int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
+ int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
+ unsigned int len);
+ int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
+ void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
+ unsigned int len);
+};
+
+static inline int xattr_iter_fixup(struct xattr_iter *it)
+{
+ if (it->ofs < EROFS_BLKSIZ)
+ return 0;
+
+ xattr_iter_end(it, true);
+
+ it->blkaddr += erofs_blknr(it->ofs);
+
+ it->page = erofs_get_meta_page(it->sb, it->blkaddr);
+ if (IS_ERR(it->page)) {
+ int err = PTR_ERR(it->page);
+
+ it->page = NULL;
+ return err;
+ }
+
+ it->kaddr = kmap_atomic(it->page);
+ it->ofs = erofs_blkoff(it->ofs);
+ return 0;
+}
+
+static int inline_xattr_iter_begin(struct xattr_iter *it,
+ struct inode *inode)
+{
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
+ unsigned int xattr_header_sz, inline_xattr_ofs;
+
+ xattr_header_sz = inlinexattr_header_size(inode);
+ if (xattr_header_sz >= vi->xattr_isize) {
+ DBG_BUGON(xattr_header_sz > vi->xattr_isize);
+ return -ENOATTR;
+ }
+
+ inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
+
+ it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
+ it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
+
+ it->page = erofs_get_meta_page(inode->i_sb, it->blkaddr);
+ if (IS_ERR(it->page))
+ return PTR_ERR(it->page);
+
+ it->kaddr = kmap_atomic(it->page);
+ return vi->xattr_isize - xattr_header_sz;
+}
+
+/*
+ * Regardless of success or failure, `xattr_foreach' will end up with
+ * `ofs' pointing to the next xattr item rather than an arbitrary position.
+ */
+static int xattr_foreach(struct xattr_iter *it,
+ const struct xattr_iter_handlers *op,
+ unsigned int *tlimit)
+{
+ struct erofs_xattr_entry entry;
+ unsigned int value_sz, processed, slice;
+ int err;
+
+ /* 0. fixup blkaddr, ofs, ipage */
+ err = xattr_iter_fixup(it);
+ if (err)
+ return err;
+
+ /*
+ * 1. read xattr entry to the memory,
+ * since we do EROFS_XATTR_ALIGN
+ * therefore entry should be in the page
+ */
+ entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
+ if (tlimit) {
+ unsigned int entry_sz = erofs_xattr_entry_size(&entry);
+
+ /* xattr on-disk corruption: xattr entry beyond xattr_isize */
+ if (*tlimit < entry_sz) {
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+ *tlimit -= entry_sz;
+ }
+
+ it->ofs += sizeof(struct erofs_xattr_entry);
+ value_sz = le16_to_cpu(entry.e_value_size);
+
+ /* handle entry */
+ err = op->entry(it, &entry);
+ if (err) {
+ it->ofs += entry.e_name_len + value_sz;
+ goto out;
+ }
+
+ /* 2. handle xattr name (ofs will finally be at the end of name) */
+ processed = 0;
+
+ while (processed < entry.e_name_len) {
+ if (it->ofs >= EROFS_BLKSIZ) {
+ DBG_BUGON(it->ofs > EROFS_BLKSIZ);
+
+ err = xattr_iter_fixup(it);
+ if (err)
+ goto out;
+ it->ofs = 0;
+ }
+
+ slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
+ entry.e_name_len - processed);
+
+ /* handle name */
+ err = op->name(it, processed, it->kaddr + it->ofs, slice);
+ if (err) {
+ it->ofs += entry.e_name_len - processed + value_sz;
+ goto out;
+ }
+
+ it->ofs += slice;
+ processed += slice;
+ }
+
+ /* 3. handle xattr value */
+ processed = 0;
+
+ if (op->alloc_buffer) {
+ err = op->alloc_buffer(it, value_sz);
+ if (err) {
+ it->ofs += value_sz;
+ goto out;
+ }
+ }
+
+ while (processed < value_sz) {
+ if (it->ofs >= EROFS_BLKSIZ) {
+ DBG_BUGON(it->ofs > EROFS_BLKSIZ);
+
+ err = xattr_iter_fixup(it);
+ if (err)
+ goto out;
+ it->ofs = 0;
+ }
+
+ slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
+ value_sz - processed);
+ op->value(it, processed, it->kaddr + it->ofs, slice);
+ it->ofs += slice;
+ processed += slice;
+ }
+
+out:
+ /* xattrs should be 4-byte aligned (on-disk constraint) */
+ it->ofs = EROFS_XATTR_ALIGN(it->ofs);
+ return err < 0 ? err : 0;
+}
+
+struct getxattr_iter {
+ struct xattr_iter it;
+
+ char *buffer;
+ int buffer_size, index;
+ struct qstr name;
+};
+
+static int xattr_entrymatch(struct xattr_iter *_it,
+ struct erofs_xattr_entry *entry)
+{
+ struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
+
+ return (it->index != entry->e_name_index ||
+ it->name.len != entry->e_name_len) ? -ENOATTR : 0;
+}
+
+static int xattr_namematch(struct xattr_iter *_it,
+ unsigned int processed, char *buf, unsigned int len)
+{
+ struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
+
+ return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
+}
+
+static int xattr_checkbuffer(struct xattr_iter *_it,
+ unsigned int value_sz)
+{
+ struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
+ int err = it->buffer_size < value_sz ? -ERANGE : 0;
+
+ it->buffer_size = value_sz;
+ return !it->buffer ? 1 : err;
+}
+
+static void xattr_copyvalue(struct xattr_iter *_it,
+ unsigned int processed,
+ char *buf, unsigned int len)
+{
+ struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
+
+ memcpy(it->buffer + processed, buf, len);
+}
+
+static const struct xattr_iter_handlers find_xattr_handlers = {
+ .entry = xattr_entrymatch,
+ .name = xattr_namematch,
+ .alloc_buffer = xattr_checkbuffer,
+ .value = xattr_copyvalue
+};
+
+static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
+{
+ int ret;
+ unsigned int remaining;
+
+ ret = inline_xattr_iter_begin(&it->it, inode);
+ if (ret < 0)
+ return ret;
+
+ remaining = ret;
+ while (remaining) {
+ ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
+ if (ret != -ENOATTR)
+ break;
+ }
+ xattr_iter_end_final(&it->it);
+
+ return ret ? ret : it->buffer_size;
+}
+
+static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
+{
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct super_block *const sb = inode->i_sb;
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ unsigned int i;
+ int ret = -ENOATTR;
+
+ for (i = 0; i < vi->xattr_shared_count; ++i) {
+ erofs_blk_t blkaddr =
+ xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
+
+ it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
+
+ if (!i || blkaddr != it->it.blkaddr) {
+ if (i)
+ xattr_iter_end(&it->it, true);
+
+ it->it.page = erofs_get_meta_page(sb, blkaddr);
+ if (IS_ERR(it->it.page))
+ return PTR_ERR(it->it.page);
+
+ it->it.kaddr = kmap_atomic(it->it.page);
+ it->it.blkaddr = blkaddr;
+ }
+
+ ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
+ if (ret != -ENOATTR)
+ break;
+ }
+ if (vi->xattr_shared_count)
+ xattr_iter_end_final(&it->it);
+
+ return ret ? ret : it->buffer_size;
+}
+
+static bool erofs_xattr_user_list(struct dentry *dentry)
+{
+ return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER);
+}
+
+static bool erofs_xattr_trusted_list(struct dentry *dentry)
+{
+ return capable(CAP_SYS_ADMIN);
+}
+
+int erofs_getxattr(struct inode *inode, int index,
+ const char *name,
+ void *buffer, size_t buffer_size)
+{
+ int ret;
+ struct getxattr_iter it;
+
+ if (!name)
+ return -EINVAL;
+
+ ret = init_inode_xattrs(inode);
+ if (ret)
+ return ret;
+
+ it.index = index;
+
+ it.name.len = strlen(name);
+ if (it.name.len > EROFS_NAME_LEN)
+ return -ERANGE;
+ it.name.name = name;
+
+ it.buffer = buffer;
+ it.buffer_size = buffer_size;
+
+ it.it.sb = inode->i_sb;
+ ret = inline_getxattr(inode, &it);
+ if (ret == -ENOATTR)
+ ret = shared_getxattr(inode, &it);
+ return ret;
+}
+
+static int erofs_xattr_generic_get(const struct xattr_handler *handler,
+ struct dentry *unused, struct inode *inode,
+ const char *name, void *buffer, size_t size)
+{
+ struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
+
+ switch (handler->flags) {
+ case EROFS_XATTR_INDEX_USER:
+ if (!test_opt(sbi, XATTR_USER))
+ return -EOPNOTSUPP;
+ break;
+ case EROFS_XATTR_INDEX_TRUSTED:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ break;
+ case EROFS_XATTR_INDEX_SECURITY:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return erofs_getxattr(inode, handler->flags, name, buffer, size);
+}
+
+const struct xattr_handler erofs_xattr_user_handler = {
+ .prefix = XATTR_USER_PREFIX,
+ .flags = EROFS_XATTR_INDEX_USER,
+ .list = erofs_xattr_user_list,
+ .get = erofs_xattr_generic_get,
+};
+
+const struct xattr_handler erofs_xattr_trusted_handler = {
+ .prefix = XATTR_TRUSTED_PREFIX,
+ .flags = EROFS_XATTR_INDEX_TRUSTED,
+ .list = erofs_xattr_trusted_list,
+ .get = erofs_xattr_generic_get,
+};
+
+#ifdef CONFIG_EROFS_FS_SECURITY
+const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
+ .prefix = XATTR_SECURITY_PREFIX,
+ .flags = EROFS_XATTR_INDEX_SECURITY,
+ .get = erofs_xattr_generic_get,
+};
+#endif
+
+const struct xattr_handler *erofs_xattr_handlers[] = {
+ &erofs_xattr_user_handler,
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ &posix_acl_access_xattr_handler,
+ &posix_acl_default_xattr_handler,
+#endif
+ &erofs_xattr_trusted_handler,
+#ifdef CONFIG_EROFS_FS_SECURITY
+ &erofs_xattr_security_handler,
+#endif
+ NULL,
+};
+
+struct listxattr_iter {
+ struct xattr_iter it;
+
+ struct dentry *dentry;
+ char *buffer;
+ int buffer_size, buffer_ofs;
+};
+
+static int xattr_entrylist(struct xattr_iter *_it,
+ struct erofs_xattr_entry *entry)
+{
+ struct listxattr_iter *it =
+ container_of(_it, struct listxattr_iter, it);
+ unsigned int prefix_len;
+ const char *prefix;
+
+ const struct xattr_handler *h =
+ erofs_xattr_handler(entry->e_name_index);
+
+ if (!h || (h->list && !h->list(it->dentry)))
+ return 1;
+
+ prefix = xattr_prefix(h);
+ prefix_len = strlen(prefix);
+
+ if (!it->buffer) {
+ it->buffer_ofs += prefix_len + entry->e_name_len + 1;
+ return 1;
+ }
+
+ if (it->buffer_ofs + prefix_len
+ + entry->e_name_len + 1 > it->buffer_size)
+ return -ERANGE;
+
+ memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
+ it->buffer_ofs += prefix_len;
+ return 0;
+}
+
+static int xattr_namelist(struct xattr_iter *_it,
+ unsigned int processed, char *buf, unsigned int len)
+{
+ struct listxattr_iter *it =
+ container_of(_it, struct listxattr_iter, it);
+
+ memcpy(it->buffer + it->buffer_ofs, buf, len);
+ it->buffer_ofs += len;
+ return 0;
+}
+
+static int xattr_skipvalue(struct xattr_iter *_it,
+ unsigned int value_sz)
+{
+ struct listxattr_iter *it =
+ container_of(_it, struct listxattr_iter, it);
+
+ it->buffer[it->buffer_ofs++] = '\0';
+ return 1;
+}
+
+static const struct xattr_iter_handlers list_xattr_handlers = {
+ .entry = xattr_entrylist,
+ .name = xattr_namelist,
+ .alloc_buffer = xattr_skipvalue,
+ .value = NULL
+};
+
+static int inline_listxattr(struct listxattr_iter *it)
+{
+ int ret;
+ unsigned int remaining;
+
+ ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
+ if (ret < 0)
+ return ret;
+
+ remaining = ret;
+ while (remaining) {
+ ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
+ if (ret)
+ break;
+ }
+ xattr_iter_end_final(&it->it);
+ return ret ? ret : it->buffer_ofs;
+}
+
+static int shared_listxattr(struct listxattr_iter *it)
+{
+ struct inode *const inode = d_inode(it->dentry);
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct super_block *const sb = inode->i_sb;
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < vi->xattr_shared_count; ++i) {
+ erofs_blk_t blkaddr =
+ xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
+
+ it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
+ if (!i || blkaddr != it->it.blkaddr) {
+ if (i)
+ xattr_iter_end(&it->it, true);
+
+ it->it.page = erofs_get_meta_page(sb, blkaddr);
+ if (IS_ERR(it->it.page))
+ return PTR_ERR(it->it.page);
+
+ it->it.kaddr = kmap_atomic(it->it.page);
+ it->it.blkaddr = blkaddr;
+ }
+
+ ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
+ if (ret)
+ break;
+ }
+ if (vi->xattr_shared_count)
+ xattr_iter_end_final(&it->it);
+
+ return ret ? ret : it->buffer_ofs;
+}
+
+ssize_t erofs_listxattr(struct dentry *dentry,
+ char *buffer, size_t buffer_size)
+{
+ int ret;
+ struct listxattr_iter it;
+
+ ret = init_inode_xattrs(d_inode(dentry));
+ if (ret)
+ return ret;
+
+ it.dentry = dentry;
+ it.buffer = buffer;
+ it.buffer_size = buffer_size;
+ it.buffer_ofs = 0;
+
+ it.it.sb = dentry->d_sb;
+
+ ret = inline_listxattr(&it);
+ if (ret < 0 && ret != -ENOATTR)
+ return ret;
+ return shared_listxattr(&it);
+}
+
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+struct posix_acl *erofs_get_acl(struct inode *inode, int type)
+{
+ struct posix_acl *acl;
+ int prefix, rc;
+ char *value = NULL;
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
+ break;
+ case ACL_TYPE_DEFAULT:
+ prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = erofs_getxattr(inode, prefix, "", NULL, 0);
+ if (rc > 0) {
+ value = kmalloc(rc, GFP_KERNEL);
+ if (!value)
+ return ERR_PTR(-ENOMEM);
+ rc = erofs_getxattr(inode, prefix, "", value, rc);
+ }
+
+ if (rc == -ENOATTR)
+ acl = NULL;
+ else if (rc < 0)
+ acl = ERR_PTR(rc);
+ else
+ acl = posix_acl_from_xattr(&init_user_ns, value, rc);
+ kfree(value);
+ return acl;
+}
+#endif
+
diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h
new file mode 100644
index 000000000000..3585b84d2f20
--- /dev/null
+++ b/fs/erofs/xattr.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef __EROFS_XATTR_H
+#define __EROFS_XATTR_H
+
+#include "internal.h"
+#include <linux/posix_acl_xattr.h>
+#include <linux/xattr.h>
+
+/* Attribute not found */
+#define ENOATTR ENODATA
+
+static inline unsigned int inlinexattr_header_size(struct inode *inode)
+{
+ return sizeof(struct erofs_xattr_ibody_header) +
+ sizeof(u32) * EROFS_I(inode)->xattr_shared_count;
+}
+
+static inline erofs_blk_t xattrblock_addr(struct erofs_sb_info *sbi,
+ unsigned int xattr_id)
+{
+#ifdef CONFIG_EROFS_FS_XATTR
+ return sbi->xattr_blkaddr +
+ xattr_id * sizeof(__u32) / EROFS_BLKSIZ;
+#else
+ return 0;
+#endif
+}
+
+static inline unsigned int xattrblock_offset(struct erofs_sb_info *sbi,
+ unsigned int xattr_id)
+{
+ return (xattr_id * sizeof(__u32)) % EROFS_BLKSIZ;
+}
+
+#ifdef CONFIG_EROFS_FS_XATTR
+extern const struct xattr_handler erofs_xattr_user_handler;
+extern const struct xattr_handler erofs_xattr_trusted_handler;
+#ifdef CONFIG_EROFS_FS_SECURITY
+extern const struct xattr_handler erofs_xattr_security_handler;
+#endif
+
+static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx)
+{
+static const struct xattr_handler *xattr_handler_map[] = {
+ [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
+ [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
+ &posix_acl_default_xattr_handler,
+#endif
+ [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
+#ifdef CONFIG_EROFS_FS_SECURITY
+ [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
+#endif
+};
+
+ return idx && idx < ARRAY_SIZE(xattr_handler_map) ?
+ xattr_handler_map[idx] : NULL;
+}
+
+extern const struct xattr_handler *erofs_xattr_handlers[];
+
+int erofs_getxattr(struct inode *, int, const char *, void *, size_t);
+ssize_t erofs_listxattr(struct dentry *, char *, size_t);
+#else
+static inline int erofs_getxattr(struct inode *inode, int index,
+ const char *name, void *buffer,
+ size_t buffer_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline ssize_t erofs_listxattr(struct dentry *dentry,
+ char *buffer, size_t buffer_size)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* !CONFIG_EROFS_FS_XATTR */
+
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+struct posix_acl *erofs_get_acl(struct inode *inode, int type);
+#else
+#define erofs_get_acl (NULL)
+#endif
+
+#endif
+
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
new file mode 100644
index 000000000000..ca99425a4536
--- /dev/null
+++ b/fs/erofs/zdata.c
@@ -0,0 +1,1401 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "zdata.h"
+#include "compress.h"
+#include <linux/prefetch.h>
+
+#include <trace/events/erofs.h>
+
+/*
+ * a compressed_pages[] placeholder in order to avoid
+ * being filled with file pages for in-place decompression.
+ */
+#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
+
+/* how to allocate cached pages for a pcluster */
+enum z_erofs_cache_alloctype {
+ DONTALLOC, /* don't allocate any cached pages */
+ DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
+};
+
+/*
+ * tagged pointer with 1-bit tag for all compressed pages
+ * tag 0 - the page is just found with an extra page reference
+ */
+typedef tagptr1_t compressed_page_t;
+
+#define tag_compressed_page_justfound(page) \
+ tagptr_fold(compressed_page_t, page, 1)
+
+static struct workqueue_struct *z_erofs_workqueue __read_mostly;
+static struct kmem_cache *pcluster_cachep __read_mostly;
+
+void z_erofs_exit_zip_subsystem(void)
+{
+ destroy_workqueue(z_erofs_workqueue);
+ kmem_cache_destroy(pcluster_cachep);
+}
+
+static inline int z_erofs_init_workqueue(void)
+{
+ const unsigned int onlinecpus = num_possible_cpus();
+ const unsigned int flags = WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE;
+
+ /*
+ * no need to spawn too many threads, limiting threads could minimum
+ * scheduling overhead, perhaps per-CPU threads should be better?
+ */
+ z_erofs_workqueue = alloc_workqueue("erofs_unzipd", flags,
+ onlinecpus + onlinecpus / 4);
+ return z_erofs_workqueue ? 0 : -ENOMEM;
+}
+
+static void z_erofs_pcluster_init_once(void *ptr)
+{
+ struct z_erofs_pcluster *pcl = ptr;
+ struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
+ unsigned int i;
+
+ mutex_init(&cl->lock);
+ cl->nr_pages = 0;
+ cl->vcnt = 0;
+ for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
+ pcl->compressed_pages[i] = NULL;
+}
+
+static void z_erofs_pcluster_init_always(struct z_erofs_pcluster *pcl)
+{
+ struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
+
+ atomic_set(&pcl->obj.refcount, 1);
+
+ DBG_BUGON(cl->nr_pages);
+ DBG_BUGON(cl->vcnt);
+}
+
+int __init z_erofs_init_zip_subsystem(void)
+{
+ pcluster_cachep = kmem_cache_create("erofs_compress",
+ Z_EROFS_WORKGROUP_SIZE, 0,
+ SLAB_RECLAIM_ACCOUNT,
+ z_erofs_pcluster_init_once);
+ if (pcluster_cachep) {
+ if (!z_erofs_init_workqueue())
+ return 0;
+
+ kmem_cache_destroy(pcluster_cachep);
+ }
+ return -ENOMEM;
+}
+
+enum z_erofs_collectmode {
+ COLLECT_SECONDARY,
+ COLLECT_PRIMARY,
+ /*
+ * The current collection was the tail of an exist chain, in addition
+ * that the previous processed chained collections are all decided to
+ * be hooked up to it.
+ * A new chain will be created for the remaining collections which are
+ * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED,
+ * the next collection cannot reuse the whole page safely in
+ * the following scenario:
+ * ________________________________________________________________
+ * | tail (partial) page | head (partial) page |
+ * | (belongs to the next cl) | (belongs to the current cl) |
+ * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
+ */
+ COLLECT_PRIMARY_HOOKED,
+ COLLECT_PRIMARY_FOLLOWED_NOINPLACE,
+ /*
+ * The current collection has been linked with the owned chain, and
+ * could also be linked with the remaining collections, which means
+ * if the processing page is the tail page of the collection, thus
+ * the current collection can safely use the whole page (since
+ * the previous collection is under control) for in-place I/O, as
+ * illustrated below:
+ * ________________________________________________________________
+ * | tail (partial) page | head (partial) page |
+ * | (of the current cl) | (of the previous collection) |
+ * | PRIMARY_FOLLOWED or | |
+ * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________|
+ *
+ * [ (*) the above page can be used as inplace I/O. ]
+ */
+ COLLECT_PRIMARY_FOLLOWED,
+};
+
+struct z_erofs_collector {
+ struct z_erofs_pagevec_ctor vector;
+
+ struct z_erofs_pcluster *pcl, *tailpcl;
+ struct z_erofs_collection *cl;
+ struct page **compressedpages;
+ z_erofs_next_pcluster_t owned_head;
+
+ enum z_erofs_collectmode mode;
+};
+
+struct z_erofs_decompress_frontend {
+ struct inode *const inode;
+
+ struct z_erofs_collector clt;
+ struct erofs_map_blocks map;
+
+ /* used for applying cache strategy on the fly */
+ bool backmost;
+ erofs_off_t headoffset;
+};
+
+#define COLLECTOR_INIT() { \
+ .owned_head = Z_EROFS_PCLUSTER_TAIL, \
+ .mode = COLLECT_PRIMARY_FOLLOWED }
+
+#define DECOMPRESS_FRONTEND_INIT(__i) { \
+ .inode = __i, .clt = COLLECTOR_INIT(), \
+ .backmost = true, }
+
+static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
+static DEFINE_MUTEX(z_pagemap_global_lock);
+
+static void preload_compressed_pages(struct z_erofs_collector *clt,
+ struct address_space *mc,
+ enum z_erofs_cache_alloctype type,
+ struct list_head *pagepool)
+{
+ const struct z_erofs_pcluster *pcl = clt->pcl;
+ const unsigned int clusterpages = BIT(pcl->clusterbits);
+ struct page **pages = clt->compressedpages;
+ pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages);
+ bool standalone = true;
+
+ if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
+ return;
+
+ for (; pages < pcl->compressed_pages + clusterpages; ++pages) {
+ struct page *page;
+ compressed_page_t t;
+
+ /* the compressed page was loaded before */
+ if (READ_ONCE(*pages))
+ continue;
+
+ page = find_get_page(mc, index);
+
+ if (page) {
+ t = tag_compressed_page_justfound(page);
+ } else if (type == DELAYEDALLOC) {
+ t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
+ } else { /* DONTALLOC */
+ if (standalone)
+ clt->compressedpages = pages;
+ standalone = false;
+ continue;
+ }
+
+ if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t)))
+ continue;
+
+ if (page)
+ put_page(page);
+ }
+
+ if (standalone) /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */
+ clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
+}
+
+/* called by erofs_shrinker to get rid of all compressed_pages */
+int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *grp)
+{
+ struct z_erofs_pcluster *const pcl =
+ container_of(grp, struct z_erofs_pcluster, obj);
+ struct address_space *const mapping = MNGD_MAPPING(sbi);
+ const unsigned int clusterpages = BIT(pcl->clusterbits);
+ int i;
+
+ /*
+ * refcount of workgroup is now freezed as 1,
+ * therefore no need to worry about available decompression users.
+ */
+ for (i = 0; i < clusterpages; ++i) {
+ struct page *page = pcl->compressed_pages[i];
+
+ if (!page)
+ continue;
+
+ /* block other users from reclaiming or migrating the page */
+ if (!trylock_page(page))
+ return -EBUSY;
+
+ if (page->mapping != mapping)
+ continue;
+
+ /* barrier is implied in the following 'unlock_page' */
+ WRITE_ONCE(pcl->compressed_pages[i], NULL);
+ set_page_private(page, 0);
+ ClearPagePrivate(page);
+
+ unlock_page(page);
+ put_page(page);
+ }
+ return 0;
+}
+
+int erofs_try_to_free_cached_page(struct address_space *mapping,
+ struct page *page)
+{
+ struct z_erofs_pcluster *const pcl = (void *)page_private(page);
+ const unsigned int clusterpages = BIT(pcl->clusterbits);
+ int ret = 0; /* 0 - busy */
+
+ if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) {
+ unsigned int i;
+
+ for (i = 0; i < clusterpages; ++i) {
+ if (pcl->compressed_pages[i] == page) {
+ WRITE_ONCE(pcl->compressed_pages[i], NULL);
+ ret = 1;
+ break;
+ }
+ }
+ erofs_workgroup_unfreeze(&pcl->obj, 1);
+
+ if (ret) {
+ ClearPagePrivate(page);
+ put_page(page);
+ }
+ }
+ return ret;
+}
+
+/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
+static inline bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
+ struct page *page)
+{
+ struct z_erofs_pcluster *const pcl = clt->pcl;
+ const unsigned int clusterpages = BIT(pcl->clusterbits);
+
+ while (clt->compressedpages < pcl->compressed_pages + clusterpages) {
+ if (!cmpxchg(clt->compressedpages++, NULL, page))
+ return true;
+ }
+ return false;
+}
+
+/* callers must be with collection lock held */
+static int z_erofs_attach_page(struct z_erofs_collector *clt,
+ struct page *page,
+ enum z_erofs_page_type type)
+{
+ int ret;
+ bool occupied;
+
+ /* give priority for inplaceio */
+ if (clt->mode >= COLLECT_PRIMARY &&
+ type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
+ z_erofs_try_inplace_io(clt, page))
+ return 0;
+
+ ret = z_erofs_pagevec_enqueue(&clt->vector,
+ page, type, &occupied);
+ clt->cl->vcnt += (unsigned int)ret;
+
+ return ret ? 0 : -EAGAIN;
+}
+
+static enum z_erofs_collectmode
+try_to_claim_pcluster(struct z_erofs_pcluster *pcl,
+ z_erofs_next_pcluster_t *owned_head)
+{
+ /* let's claim these following types of pclusters */
+retry:
+ if (pcl->next == Z_EROFS_PCLUSTER_NIL) {
+ /* type 1, nil pcluster */
+ if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
+ *owned_head) != Z_EROFS_PCLUSTER_NIL)
+ goto retry;
+
+ *owned_head = &pcl->next;
+ /* lucky, I am the followee :) */
+ return COLLECT_PRIMARY_FOLLOWED;
+ } else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) {
+ /*
+ * type 2, link to the end of a existing open chain,
+ * be careful that its submission itself is governed
+ * by the original owned chain.
+ */
+ if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
+ *owned_head) != Z_EROFS_PCLUSTER_TAIL)
+ goto retry;
+ *owned_head = Z_EROFS_PCLUSTER_TAIL;
+ return COLLECT_PRIMARY_HOOKED;
+ }
+ return COLLECT_PRIMARY; /* :( better luck next time */
+}
+
+static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
+ struct inode *inode,
+ struct erofs_map_blocks *map)
+{
+ struct erofs_workgroup *grp;
+ struct z_erofs_pcluster *pcl;
+ struct z_erofs_collection *cl;
+ unsigned int length;
+ bool tag;
+
+ grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag);
+ if (!grp)
+ return -ENOENT;
+
+ pcl = container_of(grp, struct z_erofs_pcluster, obj);
+ if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
+ DBG_BUGON(1);
+ erofs_workgroup_put(grp);
+ return -EFSCORRUPTED;
+ }
+
+ cl = z_erofs_primarycollection(pcl);
+ if (cl->pageofs != (map->m_la & ~PAGE_MASK)) {
+ DBG_BUGON(1);
+ erofs_workgroup_put(grp);
+ return -EFSCORRUPTED;
+ }
+
+ length = READ_ONCE(pcl->length);
+ if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) {
+ if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {
+ DBG_BUGON(1);
+ erofs_workgroup_put(grp);
+ return -EFSCORRUPTED;
+ }
+ } else {
+ unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT;
+
+ if (map->m_flags & EROFS_MAP_FULL_MAPPED)
+ llen |= Z_EROFS_PCLUSTER_FULL_LENGTH;
+
+ while (llen > length &&
+ length != cmpxchg_relaxed(&pcl->length, length, llen)) {
+ cpu_relax();
+ length = READ_ONCE(pcl->length);
+ }
+ }
+ mutex_lock(&cl->lock);
+ /* used to check tail merging loop due to corrupted images */
+ if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
+ clt->tailpcl = pcl;
+ clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head);
+ /* clean tailpcl if the current owned_head is Z_EROFS_PCLUSTER_TAIL */
+ if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
+ clt->tailpcl = NULL;
+ clt->pcl = pcl;
+ clt->cl = cl;
+ return 0;
+}
+
+static int z_erofs_register_collection(struct z_erofs_collector *clt,
+ struct inode *inode,
+ struct erofs_map_blocks *map)
+{
+ struct z_erofs_pcluster *pcl;
+ struct z_erofs_collection *cl;
+ int err;
+
+ /* no available workgroup, let's allocate one */
+ pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS);
+ if (!pcl)
+ return -ENOMEM;
+
+ z_erofs_pcluster_init_always(pcl);
+ pcl->obj.index = map->m_pa >> PAGE_SHIFT;
+
+ pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) |
+ (map->m_flags & EROFS_MAP_FULL_MAPPED ?
+ Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
+
+ if (map->m_flags & EROFS_MAP_ZIPPED)
+ pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4;
+ else
+ pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
+
+ pcl->clusterbits = EROFS_I(inode)->z_physical_clusterbits[0];
+ pcl->clusterbits -= PAGE_SHIFT;
+
+ /* new pclusters should be claimed as type 1, primary and followed */
+ pcl->next = clt->owned_head;
+ clt->mode = COLLECT_PRIMARY_FOLLOWED;
+
+ cl = z_erofs_primarycollection(pcl);
+ cl->pageofs = map->m_la & ~PAGE_MASK;
+
+ /*
+ * lock all primary followed works before visible to others
+ * and mutex_trylock *never* fails for a new pcluster.
+ */
+ mutex_trylock(&cl->lock);
+
+ err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0);
+ if (err) {
+ mutex_unlock(&cl->lock);
+ kmem_cache_free(pcluster_cachep, pcl);
+ return -EAGAIN;
+ }
+ /* used to check tail merging loop due to corrupted images */
+ if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
+ clt->tailpcl = pcl;
+ clt->owned_head = &pcl->next;
+ clt->pcl = pcl;
+ clt->cl = cl;
+ return 0;
+}
+
+static int z_erofs_collector_begin(struct z_erofs_collector *clt,
+ struct inode *inode,
+ struct erofs_map_blocks *map)
+{
+ int ret;
+
+ DBG_BUGON(clt->cl);
+
+ /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
+ DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL);
+ DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+
+ if (!PAGE_ALIGNED(map->m_pa)) {
+ DBG_BUGON(1);
+ return -EINVAL;
+ }
+
+repeat:
+ ret = z_erofs_lookup_collection(clt, inode, map);
+ if (ret == -ENOENT) {
+ ret = z_erofs_register_collection(clt, inode, map);
+
+ /* someone registered at the same time, give another try */
+ if (ret == -EAGAIN) {
+ cond_resched();
+ goto repeat;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
+ clt->cl->pagevec, clt->cl->vcnt);
+
+ clt->compressedpages = clt->pcl->compressed_pages;
+ if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */
+ clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES;
+ return 0;
+}
+
+/*
+ * keep in mind that no referenced pclusters will be freed
+ * only after a RCU grace period.
+ */
+static void z_erofs_rcu_callback(struct rcu_head *head)
+{
+ struct z_erofs_collection *const cl =
+ container_of(head, struct z_erofs_collection, rcu);
+
+ kmem_cache_free(pcluster_cachep,
+ container_of(cl, struct z_erofs_pcluster,
+ primary_collection));
+}
+
+void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
+{
+ struct z_erofs_pcluster *const pcl =
+ container_of(grp, struct z_erofs_pcluster, obj);
+ struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl);
+
+ call_rcu(&cl->rcu, z_erofs_rcu_callback);
+}
+
+static void z_erofs_collection_put(struct z_erofs_collection *cl)
+{
+ struct z_erofs_pcluster *const pcl =
+ container_of(cl, struct z_erofs_pcluster, primary_collection);
+
+ erofs_workgroup_put(&pcl->obj);
+}
+
+static bool z_erofs_collector_end(struct z_erofs_collector *clt)
+{
+ struct z_erofs_collection *cl = clt->cl;
+
+ if (!cl)
+ return false;
+
+ z_erofs_pagevec_ctor_exit(&clt->vector, false);
+ mutex_unlock(&cl->lock);
+
+ /*
+ * if all pending pages are added, don't hold its reference
+ * any longer if the pcluster isn't hosted by ourselves.
+ */
+ if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
+ z_erofs_collection_put(cl);
+
+ clt->cl = NULL;
+ return true;
+}
+
+static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
+ unsigned int cachestrategy,
+ erofs_off_t la)
+{
+ if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
+ return false;
+
+ if (fe->backmost)
+ return true;
+
+ return cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
+ la < fe->headoffset;
+}
+
+static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
+ struct page *page,
+ struct list_head *pagepool)
+{
+ struct inode *const inode = fe->inode;
+ struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
+ struct erofs_map_blocks *const map = &fe->map;
+ struct z_erofs_collector *const clt = &fe->clt;
+ const loff_t offset = page_offset(page);
+ bool tight = true;
+
+ enum z_erofs_cache_alloctype cache_strategy;
+ enum z_erofs_page_type page_type;
+ unsigned int cur, end, spiltted, index;
+ int err = 0;
+
+ /* register locked file pages as online pages in pack */
+ z_erofs_onlinepage_init(page);
+
+ spiltted = 0;
+ end = PAGE_SIZE;
+repeat:
+ cur = end - 1;
+
+ /* lucky, within the range of the current map_blocks */
+ if (offset + cur >= map->m_la &&
+ offset + cur < map->m_la + map->m_llen) {
+ /* didn't get a valid collection previously (very rare) */
+ if (!clt->cl)
+ goto restart_now;
+ goto hitted;
+ }
+
+ /* go ahead the next map_blocks */
+ erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
+
+ if (z_erofs_collector_end(clt))
+ fe->backmost = false;
+
+ map->m_la = offset + cur;
+ map->m_llen = 0;
+ err = z_erofs_map_blocks_iter(inode, map, 0);
+ if (err)
+ goto err_out;
+
+restart_now:
+ if (!(map->m_flags & EROFS_MAP_MAPPED))
+ goto hitted;
+
+ err = z_erofs_collector_begin(clt, inode, map);
+ if (err)
+ goto err_out;
+
+ /* preload all compressed pages (maybe downgrade role if necessary) */
+ if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la))
+ cache_strategy = DELAYEDALLOC;
+ else
+ cache_strategy = DONTALLOC;
+
+ preload_compressed_pages(clt, MNGD_MAPPING(sbi),
+ cache_strategy, pagepool);
+
+hitted:
+ /*
+ * Ensure the current partial page belongs to this submit chain rather
+ * than other concurrent submit chains or the noio(bypass) chain since
+ * those chains are handled asynchronously thus the page cannot be used
+ * for inplace I/O or pagevec (should be processed in strict order.)
+ */
+ tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
+ clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
+
+ cur = end - min_t(unsigned int, offset + end - map->m_la, end);
+ if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+ zero_user_segment(page, cur, end);
+ goto next_part;
+ }
+
+ /* let's derive page type */
+ page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
+ (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
+ (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
+ Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
+
+ if (cur)
+ tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
+
+retry:
+ err = z_erofs_attach_page(clt, page, page_type);
+ /* should allocate an additional staging page for pagevec */
+ if (err == -EAGAIN) {
+ struct page *const newpage =
+ erofs_allocpage(pagepool, GFP_NOFS | __GFP_NOFAIL);
+
+ newpage->mapping = Z_EROFS_MAPPING_STAGING;
+ err = z_erofs_attach_page(clt, newpage,
+ Z_EROFS_PAGE_TYPE_EXCLUSIVE);
+ if (!err)
+ goto retry;
+ }
+
+ if (err)
+ goto err_out;
+
+ index = page->index - (map->m_la >> PAGE_SHIFT);
+
+ z_erofs_onlinepage_fixup(page, index, true);
+
+ /* bump up the number of spiltted parts of a page */
+ ++spiltted;
+ /* also update nr_pages */
+ clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1);
+next_part:
+ /* can be used for verification */
+ map->m_llen = offset + cur - map->m_la;
+
+ end = cur;
+ if (end > 0)
+ goto repeat;
+
+out:
+ z_erofs_onlinepage_endio(page);
+
+ erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu",
+ __func__, page, spiltted, map->m_llen);
+ return err;
+
+ /* if some error occurred while processing this page */
+err_out:
+ SetPageError(page);
+ goto out;
+}
+
+static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
+ bool sync, int bios)
+{
+ /* wake up the caller thread for sync decompression */
+ if (sync) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&io->u.wait.lock, flags);
+ if (!atomic_add_return(bios, &io->pending_bios))
+ wake_up_locked(&io->u.wait);
+ spin_unlock_irqrestore(&io->u.wait.lock, flags);
+ return;
+ }
+
+ if (!atomic_add_return(bios, &io->pending_bios))
+ queue_work(z_erofs_workqueue, &io->u.work);
+}
+
+static void z_erofs_decompressqueue_endio(struct bio *bio)
+{
+ tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
+ struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
+ blk_status_t err = bio->bi_status;
+ struct bio_vec *bvec;
+ struct bvec_iter_all iter_all;
+
+ bio_for_each_segment_all(bvec, bio, iter_all) {
+ struct page *page = bvec->bv_page;
+
+ DBG_BUGON(PageUptodate(page));
+ DBG_BUGON(!page->mapping);
+
+ if (err)
+ SetPageError(page);
+
+ if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
+ if (!err)
+ SetPageUptodate(page);
+ unlock_page(page);
+ }
+ }
+ z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
+ bio_put(bio);
+}
+
+static int z_erofs_decompress_pcluster(struct super_block *sb,
+ struct z_erofs_pcluster *pcl,
+ struct list_head *pagepool)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ const unsigned int clusterpages = BIT(pcl->clusterbits);
+ struct z_erofs_pagevec_ctor ctor;
+ unsigned int i, outputsize, llen, nr_pages;
+ struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES];
+ struct page **pages, **compressed_pages, *page;
+
+ enum z_erofs_page_type page_type;
+ bool overlapped, partial;
+ struct z_erofs_collection *cl;
+ int err;
+
+ might_sleep();
+ cl = z_erofs_primarycollection(pcl);
+ DBG_BUGON(!READ_ONCE(cl->nr_pages));
+
+ mutex_lock(&cl->lock);
+ nr_pages = cl->nr_pages;
+
+ if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) {
+ pages = pages_onstack;
+ } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES &&
+ mutex_trylock(&z_pagemap_global_lock)) {
+ pages = z_pagemap_global;
+ } else {
+ gfp_t gfp_flags = GFP_KERNEL;
+
+ if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES)
+ gfp_flags |= __GFP_NOFAIL;
+
+ pages = kvmalloc_array(nr_pages, sizeof(struct page *),
+ gfp_flags);
+
+ /* fallback to global pagemap for the lowmem scenario */
+ if (!pages) {
+ mutex_lock(&z_pagemap_global_lock);
+ pages = z_pagemap_global;
+ }
+ }
+
+ for (i = 0; i < nr_pages; ++i)
+ pages[i] = NULL;
+
+ err = 0;
+ z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
+ cl->pagevec, 0);
+
+ for (i = 0; i < cl->vcnt; ++i) {
+ unsigned int pagenr;
+
+ page = z_erofs_pagevec_dequeue(&ctor, &page_type);
+
+ /* all pages in pagevec ought to be valid */
+ DBG_BUGON(!page);
+ DBG_BUGON(!page->mapping);
+
+ if (z_erofs_put_stagingpage(pagepool, page))
+ continue;
+
+ if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
+ pagenr = 0;
+ else
+ pagenr = z_erofs_onlinepage_index(page);
+
+ DBG_BUGON(pagenr >= nr_pages);
+
+ /*
+ * currently EROFS doesn't support multiref(dedup),
+ * so here erroring out one multiref page.
+ */
+ if (pages[pagenr]) {
+ DBG_BUGON(1);
+ SetPageError(pages[pagenr]);
+ z_erofs_onlinepage_endio(pages[pagenr]);
+ err = -EFSCORRUPTED;
+ }
+ pages[pagenr] = page;
+ }
+ z_erofs_pagevec_ctor_exit(&ctor, true);
+
+ overlapped = false;
+ compressed_pages = pcl->compressed_pages;
+
+ for (i = 0; i < clusterpages; ++i) {
+ unsigned int pagenr;
+
+ page = compressed_pages[i];
+
+ /* all compressed pages ought to be valid */
+ DBG_BUGON(!page);
+ DBG_BUGON(!page->mapping);
+
+ if (!z_erofs_page_is_staging(page)) {
+ if (erofs_page_is_managed(sbi, page)) {
+ if (!PageUptodate(page))
+ err = -EIO;
+ continue;
+ }
+
+ /*
+ * only if non-head page can be selected
+ * for inplace decompression
+ */
+ pagenr = z_erofs_onlinepage_index(page);
+
+ DBG_BUGON(pagenr >= nr_pages);
+ if (pages[pagenr]) {
+ DBG_BUGON(1);
+ SetPageError(pages[pagenr]);
+ z_erofs_onlinepage_endio(pages[pagenr]);
+ err = -EFSCORRUPTED;
+ }
+ pages[pagenr] = page;
+
+ overlapped = true;
+ }
+
+ /* PG_error needs checking for inplaced and staging pages */
+ if (PageError(page)) {
+ DBG_BUGON(PageUptodate(page));
+ err = -EIO;
+ }
+ }
+
+ if (err)
+ goto out;
+
+ llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT;
+ if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) {
+ outputsize = llen;
+ partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH);
+ } else {
+ outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs;
+ partial = true;
+ }
+
+ err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
+ .sb = sb,
+ .in = compressed_pages,
+ .out = pages,
+ .pageofs_out = cl->pageofs,
+ .inputsize = PAGE_SIZE,
+ .outputsize = outputsize,
+ .alg = pcl->algorithmformat,
+ .inplace_io = overlapped,
+ .partial_decoding = partial
+ }, pagepool);
+
+out:
+ /* must handle all compressed pages before endding pages */
+ for (i = 0; i < clusterpages; ++i) {
+ page = compressed_pages[i];
+
+ if (erofs_page_is_managed(sbi, page))
+ continue;
+
+ /* recycle all individual staging pages */
+ (void)z_erofs_put_stagingpage(pagepool, page);
+
+ WRITE_ONCE(compressed_pages[i], NULL);
+ }
+
+ for (i = 0; i < nr_pages; ++i) {
+ page = pages[i];
+ if (!page)
+ continue;
+
+ DBG_BUGON(!page->mapping);
+
+ /* recycle all individual staging pages */
+ if (z_erofs_put_stagingpage(pagepool, page))
+ continue;
+
+ if (err < 0)
+ SetPageError(page);
+
+ z_erofs_onlinepage_endio(page);
+ }
+
+ if (pages == z_pagemap_global)
+ mutex_unlock(&z_pagemap_global_lock);
+ else if (pages != pages_onstack)
+ kvfree(pages);
+
+ cl->nr_pages = 0;
+ cl->vcnt = 0;
+
+ /* all cl locks MUST be taken before the following line */
+ WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
+
+ /* all cl locks SHOULD be released right now */
+ mutex_unlock(&cl->lock);
+
+ z_erofs_collection_put(cl);
+ return err;
+}
+
+static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
+ struct list_head *pagepool)
+{
+ z_erofs_next_pcluster_t owned = io->head;
+
+ while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
+ struct z_erofs_pcluster *pcl;
+
+ /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
+ DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
+
+ /* no possible that 'owned' equals NULL */
+ DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
+
+ pcl = container_of(owned, struct z_erofs_pcluster, next);
+ owned = READ_ONCE(pcl->next);
+
+ z_erofs_decompress_pcluster(io->sb, pcl, pagepool);
+ }
+}
+
+static void z_erofs_decompressqueue_work(struct work_struct *work)
+{
+ struct z_erofs_decompressqueue *bgq =
+ container_of(work, struct z_erofs_decompressqueue, u.work);
+ LIST_HEAD(pagepool);
+
+ DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ z_erofs_decompress_queue(bgq, &pagepool);
+
+ put_pages_list(&pagepool);
+ kvfree(bgq);
+}
+
+static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
+ unsigned int nr,
+ struct list_head *pagepool,
+ struct address_space *mc,
+ gfp_t gfp)
+{
+ const pgoff_t index = pcl->obj.index;
+ bool tocache = false;
+
+ struct address_space *mapping;
+ struct page *oldpage, *page;
+
+ compressed_page_t t;
+ int justfound;
+
+repeat:
+ page = READ_ONCE(pcl->compressed_pages[nr]);
+ oldpage = page;
+
+ if (!page)
+ goto out_allocpage;
+
+ /*
+ * the cached page has not been allocated and
+ * an placeholder is out there, prepare it now.
+ */
+ if (page == PAGE_UNALLOCATED) {
+ tocache = true;
+ goto out_allocpage;
+ }
+
+ /* process the target tagged pointer */
+ t = tagptr_init(compressed_page_t, page);
+ justfound = tagptr_unfold_tags(t);
+ page = tagptr_unfold_ptr(t);
+
+ mapping = READ_ONCE(page->mapping);
+
+ /*
+ * unmanaged (file) pages are all locked solidly,
+ * therefore it is impossible for `mapping' to be NULL.
+ */
+ if (mapping && mapping != mc)
+ /* ought to be unmanaged pages */
+ goto out;
+
+ lock_page(page);
+
+ /* only true if page reclaim goes wrong, should never happen */
+ DBG_BUGON(justfound && PagePrivate(page));
+
+ /* the page is still in manage cache */
+ if (page->mapping == mc) {
+ WRITE_ONCE(pcl->compressed_pages[nr], page);
+
+ ClearPageError(page);
+ if (!PagePrivate(page)) {
+ /*
+ * impossible to be !PagePrivate(page) for
+ * the current restriction as well if
+ * the page is already in compressed_pages[].
+ */
+ DBG_BUGON(!justfound);
+
+ justfound = 0;
+ set_page_private(page, (unsigned long)pcl);
+ SetPagePrivate(page);
+ }
+
+ /* no need to submit io if it is already up-to-date */
+ if (PageUptodate(page)) {
+ unlock_page(page);
+ page = NULL;
+ }
+ goto out;
+ }
+
+ /*
+ * the managed page has been truncated, it's unsafe to
+ * reuse this one, let's allocate a new cache-managed page.
+ */
+ DBG_BUGON(page->mapping);
+ DBG_BUGON(!justfound);
+
+ tocache = true;
+ unlock_page(page);
+ put_page(page);
+out_allocpage:
+ page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
+ if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
+ /* non-LRU / non-movable temporary page is needed */
+ page->mapping = Z_EROFS_MAPPING_STAGING;
+ tocache = false;
+ }
+
+ if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
+ if (tocache) {
+ /* since it added to managed cache successfully */
+ unlock_page(page);
+ put_page(page);
+ } else {
+ list_add(&page->lru, pagepool);
+ }
+ cond_resched();
+ goto repeat;
+ }
+ set_page_private(page, (unsigned long)pcl);
+ SetPagePrivate(page);
+out: /* the only exit (for tracing and debugging) */
+ return page;
+}
+
+static struct z_erofs_decompressqueue *
+jobqueue_init(struct super_block *sb,
+ struct z_erofs_decompressqueue *fgq, bool *fg)
+{
+ struct z_erofs_decompressqueue *q;
+
+ if (fg && !*fg) {
+ q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
+ if (!q) {
+ *fg = true;
+ goto fg_out;
+ }
+ INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
+ } else {
+fg_out:
+ q = fgq;
+ init_waitqueue_head(&fgq->u.wait);
+ atomic_set(&fgq->pending_bios, 0);
+ }
+ q->sb = sb;
+ q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
+ return q;
+}
+
+/* define decompression jobqueue types */
+enum {
+ JQ_BYPASS,
+ JQ_SUBMIT,
+ NR_JOBQUEUES,
+};
+
+static void *jobqueueset_init(struct super_block *sb,
+ struct z_erofs_decompressqueue *q[],
+ struct z_erofs_decompressqueue *fgq, bool *fg)
+{
+ /*
+ * if managed cache is enabled, bypass jobqueue is needed,
+ * no need to read from device for all pclusters in this queue.
+ */
+ q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
+ q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg);
+
+ return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg));
+}
+
+static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
+ z_erofs_next_pcluster_t qtail[],
+ z_erofs_next_pcluster_t owned_head)
+{
+ z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
+ z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
+
+ DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ if (owned_head == Z_EROFS_PCLUSTER_TAIL)
+ owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
+
+ WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED);
+
+ WRITE_ONCE(*submit_qtail, owned_head);
+ WRITE_ONCE(*bypass_qtail, &pcl->next);
+
+ qtail[JQ_BYPASS] = &pcl->next;
+}
+
+static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[],
+ unsigned int nr_bios, bool force_fg)
+{
+ /*
+ * although background is preferred, no one is pending for submission.
+ * don't issue workqueue for decompression but drop it directly instead.
+ */
+ if (force_fg || nr_bios)
+ return false;
+
+ kvfree(q[JQ_SUBMIT]);
+ return true;
+}
+
+static bool z_erofs_submit_queue(struct super_block *sb,
+ z_erofs_next_pcluster_t owned_head,
+ struct list_head *pagepool,
+ struct z_erofs_decompressqueue *fgq,
+ bool *force_fg)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
+ struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
+ struct bio *bio;
+ void *bi_private;
+ /* since bio will be NULL, no need to initialize last_index */
+ pgoff_t uninitialized_var(last_index);
+ bool force_submit = false;
+ unsigned int nr_bios;
+
+ if (owned_head == Z_EROFS_PCLUSTER_TAIL)
+ return false;
+
+ force_submit = false;
+ bio = NULL;
+ nr_bios = 0;
+ bi_private = jobqueueset_init(sb, q, fgq, force_fg);
+ qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
+ qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
+
+ /* by default, all need io submission */
+ q[JQ_SUBMIT]->head = owned_head;
+
+ do {
+ struct z_erofs_pcluster *pcl;
+ unsigned int clusterpages;
+ pgoff_t first_index;
+ struct page *page;
+ unsigned int i = 0, bypass = 0;
+ int err;
+
+ /* no possible 'owned_head' equals the following */
+ DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
+
+ pcl = container_of(owned_head, struct z_erofs_pcluster, next);
+
+ clusterpages = BIT(pcl->clusterbits);
+
+ /* close the main owned chain at first */
+ owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
+ Z_EROFS_PCLUSTER_TAIL_CLOSED);
+
+ first_index = pcl->obj.index;
+ force_submit |= (first_index != last_index + 1);
+
+repeat:
+ page = pickup_page_for_submission(pcl, i, pagepool,
+ MNGD_MAPPING(sbi),
+ GFP_NOFS);
+ if (!page) {
+ force_submit = true;
+ ++bypass;
+ goto skippage;
+ }
+
+ if (bio && force_submit) {
+submit_bio_retry:
+ submit_bio(bio);
+ bio = NULL;
+ }
+
+ if (!bio) {
+ bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
+
+ bio->bi_end_io = z_erofs_decompressqueue_endio;
+ bio_set_dev(bio, sb->s_bdev);
+ bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
+ LOG_SECTORS_PER_BLOCK;
+ bio->bi_private = bi_private;
+ bio->bi_opf = REQ_OP_READ;
+
+ ++nr_bios;
+ }
+
+ err = bio_add_page(bio, page, PAGE_SIZE, 0);
+ if (err < PAGE_SIZE)
+ goto submit_bio_retry;
+
+ force_submit = false;
+ last_index = first_index + i;
+skippage:
+ if (++i < clusterpages)
+ goto repeat;
+
+ if (bypass < clusterpages)
+ qtail[JQ_SUBMIT] = &pcl->next;
+ else
+ move_to_bypass_jobqueue(pcl, qtail, owned_head);
+ } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
+
+ if (bio)
+ submit_bio(bio);
+
+ if (postsubmit_is_all_bypassed(q, nr_bios, *force_fg))
+ return true;
+
+ z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
+ return true;
+}
+
+static void z_erofs_runqueue(struct super_block *sb,
+ struct z_erofs_collector *clt,
+ struct list_head *pagepool, bool force_fg)
+{
+ struct z_erofs_decompressqueue io[NR_JOBQUEUES];
+
+ if (!z_erofs_submit_queue(sb, clt->owned_head,
+ pagepool, io, &force_fg))
+ return;
+
+ /* handle bypass queue (no i/o pclusters) immediately */
+ z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
+
+ if (!force_fg)
+ return;
+
+ /* wait until all bios are completed */
+ io_wait_event(io[JQ_SUBMIT].u.wait,
+ !atomic_read(&io[JQ_SUBMIT].pending_bios));
+
+ /* handle synchronous decompress queue in the caller context */
+ z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
+}
+
+static int z_erofs_readpage(struct file *file, struct page *page)
+{
+ struct inode *const inode = page->mapping->host;
+ struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
+ int err;
+ LIST_HEAD(pagepool);
+
+ trace_erofs_readpage(page, false);
+
+ f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
+
+ err = z_erofs_do_read_page(&f, page, &pagepool);
+ (void)z_erofs_collector_end(&f.clt);
+
+ /* if some compressed cluster ready, need submit them anyway */
+ z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, true);
+
+ if (err)
+ erofs_err(inode->i_sb, "failed to read, err [%d]", err);
+
+ if (f.map.mpage)
+ put_page(f.map.mpage);
+
+ /* clean up the remaining free pages */
+ put_pages_list(&pagepool);
+ return err;
+}
+
+static bool should_decompress_synchronously(struct erofs_sb_info *sbi,
+ unsigned int nr)
+{
+ return nr <= sbi->max_sync_decompress_pages;
+}
+
+static int z_erofs_readpages(struct file *filp, struct address_space *mapping,
+ struct list_head *pages, unsigned int nr_pages)
+{
+ struct inode *const inode = mapping->host;
+ struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
+
+ bool sync = should_decompress_synchronously(sbi, nr_pages);
+ struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
+ gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
+ struct page *head = NULL;
+ LIST_HEAD(pagepool);
+
+ trace_erofs_readpages(mapping->host, lru_to_page(pages),
+ nr_pages, false);
+
+ f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
+
+ for (; nr_pages; --nr_pages) {
+ struct page *page = lru_to_page(pages);
+
+ prefetchw(&page->flags);
+ list_del(&page->lru);
+
+ /*
+ * A pure asynchronous readahead is indicated if
+ * a PG_readahead marked page is hitted at first.
+ * Let's also do asynchronous decompression for this case.
+ */
+ sync &= !(PageReadahead(page) && !head);
+
+ if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
+ list_add(&page->lru, &pagepool);
+ continue;
+ }
+
+ set_page_private(page, (unsigned long)head);
+ head = page;
+ }
+
+ while (head) {
+ struct page *page = head;
+ int err;
+
+ /* traversal in reverse order */
+ head = (void *)page_private(page);
+
+ err = z_erofs_do_read_page(&f, page, &pagepool);
+ if (err)
+ erofs_err(inode->i_sb,
+ "readahead error at page %lu @ nid %llu",
+ page->index, EROFS_I(inode)->nid);
+ put_page(page);
+ }
+
+ (void)z_erofs_collector_end(&f.clt);
+
+ z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, sync);
+
+ if (f.map.mpage)
+ put_page(f.map.mpage);
+
+ /* clean up the remaining free pages */
+ put_pages_list(&pagepool);
+ return 0;
+}
+
+const struct address_space_operations z_erofs_aops = {
+ .readpage = z_erofs_readpage,
+ .readpages = z_erofs_readpages,
+};
+
diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
new file mode 100644
index 000000000000..7824f5563a55
--- /dev/null
+++ b/fs/erofs/zdata.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef __EROFS_FS_ZDATA_H
+#define __EROFS_FS_ZDATA_H
+
+#include "internal.h"
+#include "zpvec.h"
+
+#define Z_EROFS_NR_INLINE_PAGEVECS 3
+
+/*
+ * Structure fields follow one of the following exclusion rules.
+ *
+ * I: Modifiable by initialization/destruction paths and read-only
+ * for everyone else;
+ *
+ * L: Field should be protected by pageset lock;
+ *
+ * A: Field should be accessed / updated in atomic for parallelized code.
+ */
+struct z_erofs_collection {
+ struct mutex lock;
+
+ /* I: page offset of start position of decompression */
+ unsigned short pageofs;
+
+ /* L: maximum relative page index in pagevec[] */
+ unsigned short nr_pages;
+
+ /* L: total number of pages in pagevec[] */
+ unsigned int vcnt;
+
+ union {
+ /* L: inline a certain number of pagevecs for bootstrap */
+ erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
+
+ /* I: can be used to free the pcluster by RCU. */
+ struct rcu_head rcu;
+ };
+};
+
+#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
+#define Z_EROFS_PCLUSTER_LENGTH_BIT 1
+
+/*
+ * let's leave a type here in case of introducing
+ * another tagged pointer later.
+ */
+typedef void *z_erofs_next_pcluster_t;
+
+struct z_erofs_pcluster {
+ struct erofs_workgroup obj;
+ struct z_erofs_collection primary_collection;
+
+ /* A: point to next chained pcluster or TAILs */
+ z_erofs_next_pcluster_t next;
+
+ /* A: compressed pages (including multi-usage pages) */
+ struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];
+
+ /* A: lower limit of decompressed length and if full length or not */
+ unsigned int length;
+
+ /* I: compression algorithm format */
+ unsigned char algorithmformat;
+ /* I: bit shift of physical cluster size */
+ unsigned char clusterbits;
+};
+
+#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
+
+/* let's avoid the valid 32-bit kernel addresses */
+
+/* the chained workgroup has't submitted io (still open) */
+#define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE)
+/* the chained workgroup has already submitted io */
+#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD)
+
+#define Z_EROFS_PCLUSTER_NIL (NULL)
+
+#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster)
+
+struct z_erofs_decompressqueue {
+ struct super_block *sb;
+ atomic_t pending_bios;
+ z_erofs_next_pcluster_t head;
+
+ union {
+ wait_queue_head_t wait;
+ struct work_struct work;
+ } u;
+};
+
+#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
+static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
+ struct page *page)
+{
+ return page->mapping == MNGD_MAPPING(sbi);
+}
+
+#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
+#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
+#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
+
+/*
+ * waiters (aka. ongoing_packs): # to unlock the page
+ * sub-index: 0 - for partial page, >= 1 full page sub-index
+ */
+typedef atomic_t z_erofs_onlinepage_t;
+
+/* type punning */
+union z_erofs_onlinepage_converter {
+ z_erofs_onlinepage_t *o;
+ unsigned long *v;
+};
+
+static inline unsigned int z_erofs_onlinepage_index(struct page *page)
+{
+ union z_erofs_onlinepage_converter u;
+
+ DBG_BUGON(!PagePrivate(page));
+ u.v = &page_private(page);
+
+ return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
+}
+
+static inline void z_erofs_onlinepage_init(struct page *page)
+{
+ union {
+ z_erofs_onlinepage_t o;
+ unsigned long v;
+ /* keep from being unlocked in advance */
+ } u = { .o = ATOMIC_INIT(1) };
+
+ set_page_private(page, u.v);
+ smp_wmb();
+ SetPagePrivate(page);
+}
+
+static inline void z_erofs_onlinepage_fixup(struct page *page,
+ uintptr_t index, bool down)
+{
+ unsigned long *p, o, v, id;
+repeat:
+ p = &page_private(page);
+ o = READ_ONCE(*p);
+
+ id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
+ if (id) {
+ if (!index)
+ return;
+
+ DBG_BUGON(id != index);
+ }
+
+ v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
+ ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
+ if (cmpxchg(p, o, v) != o)
+ goto repeat;
+}
+
+static inline void z_erofs_onlinepage_endio(struct page *page)
+{
+ union z_erofs_onlinepage_converter u;
+ unsigned int v;
+
+ DBG_BUGON(!PagePrivate(page));
+ u.v = &page_private(page);
+
+ v = atomic_dec_return(u.o);
+ if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
+ ClearPagePrivate(page);
+ if (!PageError(page))
+ SetPageUptodate(page);
+ unlock_page(page);
+ }
+ erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
+}
+
+#define Z_EROFS_VMAP_ONSTACK_PAGES \
+ min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
+#define Z_EROFS_VMAP_GLOBAL_PAGES 2048
+
+#endif
+
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
new file mode 100644
index 000000000000..736db3a4cdef
--- /dev/null
+++ b/fs/erofs/zmap.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2018-2019 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "internal.h"
+#include <asm/unaligned.h>
+#include <trace/events/erofs.h>
+
+int z_erofs_fill_inode(struct inode *inode)
+{
+ struct erofs_inode *const vi = EROFS_I(inode);
+
+ if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
+ vi->z_advise = 0;
+ vi->z_algorithmtype[0] = 0;
+ vi->z_algorithmtype[1] = 0;
+ vi->z_logical_clusterbits = LOG_BLOCK_SIZE;
+ vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits;
+ vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits;
+ set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
+ }
+
+ inode->i_mapping->a_ops = &z_erofs_aops;
+ return 0;
+}
+
+static int z_erofs_fill_inode_lazy(struct inode *inode)
+{
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct super_block *const sb = inode->i_sb;
+ int err;
+ erofs_off_t pos;
+ struct page *page;
+ void *kaddr;
+ struct z_erofs_map_header *h;
+
+ if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
+ return 0;
+
+ if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
+ return -ERESTARTSYS;
+
+ err = 0;
+ if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
+ goto out_unlock;
+
+ DBG_BUGON(vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY);
+
+ pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
+ vi->xattr_isize, 8);
+ page = erofs_get_meta_page(sb, erofs_blknr(pos));
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto out_unlock;
+ }
+
+ kaddr = kmap_atomic(page);
+
+ h = kaddr + erofs_blkoff(pos);
+ vi->z_advise = le16_to_cpu(h->h_advise);
+ vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
+ vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
+
+ if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) {
+ erofs_err(sb, "unknown compression format %u for nid %llu, please upgrade kernel",
+ vi->z_algorithmtype[0], vi->nid);
+ err = -EOPNOTSUPP;
+ goto unmap_done;
+ }
+
+ vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
+ vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits +
+ ((h->h_clusterbits >> 3) & 3);
+
+ if (vi->z_physical_clusterbits[0] != LOG_BLOCK_SIZE) {
+ erofs_err(sb, "unsupported physical clusterbits %u for nid %llu, please upgrade kernel",
+ vi->z_physical_clusterbits[0], vi->nid);
+ err = -EOPNOTSUPP;
+ goto unmap_done;
+ }
+
+ vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits +
+ ((h->h_clusterbits >> 5) & 7);
+ set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
+unmap_done:
+ kunmap_atomic(kaddr);
+ unlock_page(page);
+ put_page(page);
+out_unlock:
+ clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
+ return err;
+}
+
+struct z_erofs_maprecorder {
+ struct inode *inode;
+ struct erofs_map_blocks *map;
+ void *kaddr;
+
+ unsigned long lcn;
+ /* compression extent information gathered */
+ u8 type;
+ u16 clusterofs;
+ u16 delta[2];
+ erofs_blk_t pblk;
+};
+
+static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
+ erofs_blk_t eblk)
+{
+ struct super_block *const sb = m->inode->i_sb;
+ struct erofs_map_blocks *const map = m->map;
+ struct page *mpage = map->mpage;
+
+ if (mpage) {
+ if (mpage->index == eblk) {
+ if (!m->kaddr)
+ m->kaddr = kmap_atomic(mpage);
+ return 0;
+ }
+
+ if (m->kaddr) {
+ kunmap_atomic(m->kaddr);
+ m->kaddr = NULL;
+ }
+ put_page(mpage);
+ }
+
+ mpage = erofs_get_meta_page(sb, eblk);
+ if (IS_ERR(mpage)) {
+ map->mpage = NULL;
+ return PTR_ERR(mpage);
+ }
+ m->kaddr = kmap_atomic(mpage);
+ unlock_page(mpage);
+ map->mpage = mpage;
+ return 0;
+}
+
+static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
+ unsigned long lcn)
+{
+ struct inode *const inode = m->inode;
+ struct erofs_inode *const vi = EROFS_I(inode);
+ const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid);
+ const erofs_off_t pos =
+ Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize +
+ vi->xattr_isize) +
+ lcn * sizeof(struct z_erofs_vle_decompressed_index);
+ struct z_erofs_vle_decompressed_index *di;
+ unsigned int advise, type;
+ int err;
+
+ err = z_erofs_reload_indexes(m, erofs_blknr(pos));
+ if (err)
+ return err;
+
+ m->lcn = lcn;
+ di = m->kaddr + erofs_blkoff(pos);
+
+ advise = le16_to_cpu(di->di_advise);
+ type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
+ ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
+ switch (type) {
+ case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+ m->clusterofs = 1 << vi->z_logical_clusterbits;
+ m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
+ m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
+ break;
+ case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+ case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+ m->clusterofs = le16_to_cpu(di->di_clusterofs);
+ m->pblk = le32_to_cpu(di->di_u.blkaddr);
+ break;
+ default:
+ DBG_BUGON(1);
+ return -EOPNOTSUPP;
+ }
+ m->type = type;
+ return 0;
+}
+
+static unsigned int decode_compactedbits(unsigned int lobits,
+ unsigned int lomask,
+ u8 *in, unsigned int pos, u8 *type)
+{
+ const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
+ const unsigned int lo = v & lomask;
+
+ *type = (v >> lobits) & 3;
+ return lo;
+}
+
+static int unpack_compacted_index(struct z_erofs_maprecorder *m,
+ unsigned int amortizedshift,
+ unsigned int eofs)
+{
+ struct erofs_inode *const vi = EROFS_I(m->inode);
+ const unsigned int lclusterbits = vi->z_logical_clusterbits;
+ const unsigned int lomask = (1 << lclusterbits) - 1;
+ unsigned int vcnt, base, lo, encodebits, nblk;
+ int i;
+ u8 *in, type;
+
+ if (1 << amortizedshift == 4)
+ vcnt = 2;
+ else if (1 << amortizedshift == 2 && lclusterbits == 12)
+ vcnt = 16;
+ else
+ return -EOPNOTSUPP;
+
+ encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
+ base = round_down(eofs, vcnt << amortizedshift);
+ in = m->kaddr + base;
+
+ i = (eofs - base) >> amortizedshift;
+
+ lo = decode_compactedbits(lclusterbits, lomask,
+ in, encodebits * i, &type);
+ m->type = type;
+ if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
+ m->clusterofs = 1 << lclusterbits;
+ if (i + 1 != vcnt) {
+ m->delta[0] = lo;
+ return 0;
+ }
+ /*
+ * since the last lcluster in the pack is special,
+ * of which lo saves delta[1] rather than delta[0].
+ * Hence, get delta[0] by the previous lcluster indirectly.
+ */
+ lo = decode_compactedbits(lclusterbits, lomask,
+ in, encodebits * (i - 1), &type);
+ if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
+ lo = 0;
+ m->delta[0] = lo + 1;
+ return 0;
+ }
+ m->clusterofs = lo;
+ m->delta[0] = 0;
+ /* figout out blkaddr (pblk) for HEAD lclusters */
+ nblk = 1;
+ while (i > 0) {
+ --i;
+ lo = decode_compactedbits(lclusterbits, lomask,
+ in, encodebits * i, &type);
+ if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
+ i -= lo;
+
+ if (i >= 0)
+ ++nblk;
+ }
+ in += (vcnt << amortizedshift) - sizeof(__le32);
+ m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
+ return 0;
+}
+
+static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
+ unsigned long lcn)
+{
+ struct inode *const inode = m->inode;
+ struct erofs_inode *const vi = EROFS_I(inode);
+ const unsigned int lclusterbits = vi->z_logical_clusterbits;
+ const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) +
+ vi->inode_isize + vi->xattr_isize, 8) +
+ sizeof(struct z_erofs_map_header);
+ const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
+ unsigned int compacted_4b_initial, compacted_2b;
+ unsigned int amortizedshift;
+ erofs_off_t pos;
+ int err;
+
+ if (lclusterbits != 12)
+ return -EOPNOTSUPP;
+
+ if (lcn >= totalidx)
+ return -EINVAL;
+
+ m->lcn = lcn;
+ /* used to align to 32-byte (compacted_2b) alignment */
+ compacted_4b_initial = (32 - ebase % 32) / 4;
+ if (compacted_4b_initial == 32 / 4)
+ compacted_4b_initial = 0;
+
+ if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B)
+ compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
+ else
+ compacted_2b = 0;
+
+ pos = ebase;
+ if (lcn < compacted_4b_initial) {
+ amortizedshift = 2;
+ goto out;
+ }
+ pos += compacted_4b_initial * 4;
+ lcn -= compacted_4b_initial;
+
+ if (lcn < compacted_2b) {
+ amortizedshift = 1;
+ goto out;
+ }
+ pos += compacted_2b * 2;
+ lcn -= compacted_2b;
+ amortizedshift = 2;
+out:
+ pos += lcn * (1 << amortizedshift);
+ err = z_erofs_reload_indexes(m, erofs_blknr(pos));
+ if (err)
+ return err;
+ return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos));
+}
+
+static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
+ unsigned int lcn)
+{
+ const unsigned int datamode = EROFS_I(m->inode)->datalayout;
+
+ if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
+ return legacy_load_cluster_from_disk(m, lcn);
+
+ if (datamode == EROFS_INODE_FLAT_COMPRESSION)
+ return compacted_load_cluster_from_disk(m, lcn);
+
+ return -EINVAL;
+}
+
+static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
+ unsigned int lookback_distance)
+{
+ struct erofs_inode *const vi = EROFS_I(m->inode);
+ struct erofs_map_blocks *const map = m->map;
+ const unsigned int lclusterbits = vi->z_logical_clusterbits;
+ unsigned long lcn = m->lcn;
+ int err;
+
+ if (lcn < lookback_distance) {
+ erofs_err(m->inode->i_sb,
+ "bogus lookback distance @ nid %llu", vi->nid);
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+
+ /* load extent head logical cluster if needed */
+ lcn -= lookback_distance;
+ err = z_erofs_load_cluster_from_disk(m, lcn);
+ if (err)
+ return err;
+
+ switch (m->type) {
+ case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+ if (!m->delta[0]) {
+ erofs_err(m->inode->i_sb,
+ "invalid lookback distance 0 @ nid %llu",
+ vi->nid);
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+ return z_erofs_extent_lookback(m, m->delta[0]);
+ case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+ map->m_flags &= ~EROFS_MAP_ZIPPED;
+ /* fallthrough */
+ case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+ map->m_la = (lcn << lclusterbits) | m->clusterofs;
+ break;
+ default:
+ erofs_err(m->inode->i_sb,
+ "unknown type %u @ lcn %lu of nid %llu",
+ m->type, lcn, vi->nid);
+ DBG_BUGON(1);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+int z_erofs_map_blocks_iter(struct inode *inode,
+ struct erofs_map_blocks *map,
+ int flags)
+{
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct z_erofs_maprecorder m = {
+ .inode = inode,
+ .map = map,
+ };
+ int err = 0;
+ unsigned int lclusterbits, endoff;
+ unsigned long long ofs, end;
+
+ trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
+
+ /* when trying to read beyond EOF, leave it unmapped */
+ if (map->m_la >= inode->i_size) {
+ map->m_llen = map->m_la + 1 - inode->i_size;
+ map->m_la = inode->i_size;
+ map->m_flags = 0;
+ goto out;
+ }
+
+ err = z_erofs_fill_inode_lazy(inode);
+ if (err)
+ goto out;
+
+ lclusterbits = vi->z_logical_clusterbits;
+ ofs = map->m_la;
+ m.lcn = ofs >> lclusterbits;
+ endoff = ofs & ((1 << lclusterbits) - 1);
+
+ err = z_erofs_load_cluster_from_disk(&m, m.lcn);
+ if (err)
+ goto unmap_out;
+
+ map->m_flags = EROFS_MAP_ZIPPED; /* by default, compressed */
+ end = (m.lcn + 1ULL) << lclusterbits;
+
+ switch (m.type) {
+ case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+ if (endoff >= m.clusterofs)
+ map->m_flags &= ~EROFS_MAP_ZIPPED;
+ /* fallthrough */
+ case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+ if (endoff >= m.clusterofs) {
+ map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
+ break;
+ }
+ /* m.lcn should be >= 1 if endoff < m.clusterofs */
+ if (!m.lcn) {
+ erofs_err(inode->i_sb,
+ "invalid logical cluster 0 at nid %llu",
+ vi->nid);
+ err = -EFSCORRUPTED;
+ goto unmap_out;
+ }
+ end = (m.lcn << lclusterbits) | m.clusterofs;
+ map->m_flags |= EROFS_MAP_FULL_MAPPED;
+ m.delta[0] = 1;
+ /* fallthrough */
+ case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+ /* get the correspoinding first chunk */
+ err = z_erofs_extent_lookback(&m, m.delta[0]);
+ if (err)
+ goto unmap_out;
+ break;
+ default:
+ erofs_err(inode->i_sb,
+ "unknown type %u @ offset %llu of nid %llu",
+ m.type, ofs, vi->nid);
+ err = -EOPNOTSUPP;
+ goto unmap_out;
+ }
+
+ map->m_llen = end - map->m_la;
+ map->m_plen = 1 << lclusterbits;
+ map->m_pa = blknr_to_addr(m.pblk);
+ map->m_flags |= EROFS_MAP_MAPPED;
+
+unmap_out:
+ if (m.kaddr)
+ kunmap_atomic(m.kaddr);
+
+out:
+ erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
+ __func__, map->m_la, map->m_pa,
+ map->m_llen, map->m_plen, map->m_flags);
+
+ trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
+
+ /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
+ DBG_BUGON(err < 0 && err != -ENOMEM);
+ return err;
+}
+
diff --git a/fs/erofs/zpvec.h b/fs/erofs/zpvec.h
new file mode 100644
index 000000000000..58556903aa94
--- /dev/null
+++ b/fs/erofs/zpvec.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef __EROFS_FS_ZPVEC_H
+#define __EROFS_FS_ZPVEC_H
+
+#include "tagptr.h"
+
+/* page type in pagevec for decompress subsystem */
+enum z_erofs_page_type {
+ /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
+ Z_EROFS_PAGE_TYPE_EXCLUSIVE,
+
+ Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
+
+ Z_EROFS_VLE_PAGE_TYPE_HEAD,
+ Z_EROFS_VLE_PAGE_TYPE_MAX
+};
+
+extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
+ __bad_page_type_exclusive(void);
+
+/* pagevec tagged pointer */
+typedef tagptr2_t erofs_vtptr_t;
+
+/* pagevec collector */
+struct z_erofs_pagevec_ctor {
+ struct page *curr, *next;
+ erofs_vtptr_t *pages;
+
+ unsigned int nr, index;
+};
+
+static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
+ bool atomic)
+{
+ if (!ctor->curr)
+ return;
+
+ if (atomic)
+ kunmap_atomic(ctor->pages);
+ else
+ kunmap(ctor->curr);
+}
+
+static inline struct page *
+z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
+ unsigned int nr)
+{
+ unsigned int index;
+
+ /* keep away from occupied pages */
+ if (ctor->next)
+ return ctor->next;
+
+ for (index = 0; index < nr; ++index) {
+ const erofs_vtptr_t t = ctor->pages[index];
+ const unsigned int tags = tagptr_unfold_tags(t);
+
+ if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE)
+ return tagptr_unfold_ptr(t);
+ }
+ DBG_BUGON(nr >= ctor->nr);
+ return NULL;
+}
+
+static inline void
+z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor,
+ bool atomic)
+{
+ struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
+
+ z_erofs_pagevec_ctor_exit(ctor, atomic);
+
+ ctor->curr = next;
+ ctor->next = NULL;
+ ctor->pages = atomic ?
+ kmap_atomic(ctor->curr) : kmap(ctor->curr);
+
+ ctor->nr = PAGE_SIZE / sizeof(struct page *);
+ ctor->index = 0;
+}
+
+static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
+ unsigned int nr,
+ erofs_vtptr_t *pages,
+ unsigned int i)
+{
+ ctor->nr = nr;
+ ctor->curr = ctor->next = NULL;
+ ctor->pages = pages;
+
+ if (i >= nr) {
+ i -= nr;
+ z_erofs_pagevec_ctor_pagedown(ctor, false);
+ while (i > ctor->nr) {
+ i -= ctor->nr;
+ z_erofs_pagevec_ctor_pagedown(ctor, false);
+ }
+ }
+ ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i);
+ ctor->index = i;
+}
+
+static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
+ struct page *page,
+ enum z_erofs_page_type type,
+ bool *occupied)
+{
+ *occupied = false;
+ if (!ctor->next && type)
+ if (ctor->index + 1 == ctor->nr)
+ return false;
+
+ if (ctor->index >= ctor->nr)
+ z_erofs_pagevec_ctor_pagedown(ctor, false);
+
+ /* exclusive page type must be 0 */
+ if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL)
+ __bad_page_type_exclusive();
+
+ /* should remind that collector->next never equal to 1, 2 */
+ if (type == (uintptr_t)ctor->next) {
+ ctor->next = page;
+ *occupied = true;
+ }
+ ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type);
+ return true;
+}
+
+static inline struct page *
+z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor,
+ enum z_erofs_page_type *type)
+{
+ erofs_vtptr_t t;
+
+ if (ctor->index >= ctor->nr) {
+ DBG_BUGON(!ctor->next);
+ z_erofs_pagevec_ctor_pagedown(ctor, true);
+ }
+
+ t = ctor->pages[ctor->index];
+
+ *type = tagptr_unfold_tags(t);
+
+ /* should remind that collector->next never equal to 1, 2 */
+ if (*type == (uintptr_t)ctor->next)
+ ctor->next = tagptr_unfold_ptr(t);
+
+ ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0);
+ return tagptr_unfold_ptr(t);
+}
+#endif
+
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index d7f1f5011fac..67a395039268 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -551,28 +551,23 @@ out_unlock:
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static struct nested_calls poll_safewake_ncalls;
-
-static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
-{
- unsigned long flags;
- wait_queue_head_t *wqueue = (wait_queue_head_t *)cookie;
-
- spin_lock_irqsave_nested(&wqueue->lock, flags, call_nests + 1);
- wake_up_locked_poll(wqueue, EPOLLIN);
- spin_unlock_irqrestore(&wqueue->lock, flags);
-
- return 0;
-}
+static DEFINE_PER_CPU(int, wakeup_nest);
static void ep_poll_safewake(wait_queue_head_t *wq)
{
- int this_cpu = get_cpu();
-
- ep_call_nested(&poll_safewake_ncalls,
- ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
+ unsigned long flags;
+ int subclass;
- put_cpu();
+ local_irq_save(flags);
+ preempt_disable();
+ subclass = __this_cpu_read(wakeup_nest);
+ spin_lock_nested(&wq->lock, subclass + 1);
+ __this_cpu_inc(wakeup_nest);
+ wake_up_locked_poll(wq, POLLIN);
+ __this_cpu_dec(wakeup_nest);
+ spin_unlock(&wq->lock);
+ local_irq_restore(flags);
+ preempt_enable();
}
#else
@@ -671,7 +666,6 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep,
void *priv, int depth, bool ep_locked)
{
__poll_t res;
- int pwake = 0;
struct epitem *epi, *nepi;
LIST_HEAD(txlist);
@@ -738,26 +732,11 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep,
*/
list_splice(&txlist, &ep->rdllist);
__pm_relax(ep->ws);
-
- if (!list_empty(&ep->rdllist)) {
- /*
- * Wake up (if active) both the eventpoll wait list and
- * the ->poll() wait list (delayed after we release the lock).
- */
- if (waitqueue_active(&ep->wq))
- wake_up(&ep->wq);
- if (waitqueue_active(&ep->poll_wait))
- pwake++;
- }
write_unlock_irq(&ep->lock);
if (!ep_locked)
mutex_unlock(&ep->mtx);
- /* We have to call this outside the lock */
- if (pwake)
- ep_poll_safewake(&ep->poll_wait);
-
return res;
}
@@ -1459,13 +1438,13 @@ static int ep_create_wakeup_source(struct epitem *epi)
struct wakeup_source *ws;
if (!epi->ep->ws) {
- epi->ep->ws = wakeup_source_register("eventpoll");
+ epi->ep->ws = wakeup_source_register(NULL, "eventpoll");
if (!epi->ep->ws)
return -ENOMEM;
}
name = epi->ffd.file->f_path.dentry->d_name.name;
- ws = wakeup_source_register(name);
+ ws = wakeup_source_register(NULL, name);
if (!ws)
return -ENOMEM;
@@ -2370,11 +2349,6 @@ static int __init eventpoll_init(void)
*/
ep_nested_calls_init(&poll_loop_ncalls);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- /* Initialize the structure used to perform safe poll wait head wake ups */
- ep_nested_calls_init(&poll_safewake_ncalls);
-#endif
-
/*
* We can have many thousands of epitems, so prevent this from
* using an extra cache line on 64-bit (and smaller) CPUs
diff --git a/fs/exec.c b/fs/exec.c
index c71cbfe6826a..74d88dab98dd 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -59,7 +59,6 @@
#include <linux/kmod.h>
#include <linux/fsnotify.h>
#include <linux/fs_struct.h>
-#include <linux/pipe_fs_i.h>
#include <linux/oom.h>
#include <linux/compat.h>
#include <linux/vmalloc.h>
@@ -1015,7 +1014,7 @@ static int exec_mmap(struct mm_struct *mm)
/* Notify parent that we're no longer interested in the old VM */
tsk = current;
old_mm = current->mm;
- mm_release(tsk, old_mm);
+ exec_mm_release(tsk, old_mm);
if (old_mm) {
sync_mm_rss(old_mm);
@@ -1033,6 +1032,7 @@ static int exec_mmap(struct mm_struct *mm)
}
task_lock(tsk);
active_mm = tsk->active_mm;
+ membarrier_exec_mmap(mm);
tsk->mm = mm;
tsk->active_mm = mm;
activate_mm(active_mm, mm);
@@ -1131,7 +1131,7 @@ static int de_thread(struct task_struct *tsk)
* also take its birthdate (always earlier than our own).
*/
tsk->start_time = leader->start_time;
- tsk->real_start_time = leader->real_start_time;
+ tsk->start_boottime = leader->start_boottime;
BUG_ON(!same_thread_group(leader, tsk));
BUG_ON(has_group_leader_pid(tsk));
@@ -1825,10 +1825,9 @@ static int __do_execve_file(int fd, struct filename *filename,
/* execve succeeded */
current->fs->in_exec = 0;
current->in_execve = 0;
- membarrier_execve(current);
rseq_execve(current);
acct_update_integrals(current);
- task_numa_free(current);
+ task_numa_free(current, false);
free_bprm(bprm);
kfree(pathbuf);
if (filename)
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index f0e549783caf..2dd55b172d57 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -7,7 +7,7 @@
* and for mapping back from file handles to dentries.
*
* For details on why we do all the strange and hairy things in here
- * take a look at Documentation/filesystems/nfs/Exporting.
+ * take a look at Documentation/filesystems/nfs/exporting.rst.
*/
#include <linux/exportfs.h>
#include <linux/fs.h>
@@ -519,26 +519,33 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
* inode is actually connected to the parent.
*/
err = exportfs_get_name(mnt, target_dir, nbuf, result);
- if (!err) {
- inode_lock(target_dir->d_inode);
- nresult = lookup_one_len(nbuf, target_dir,
- strlen(nbuf));
- inode_unlock(target_dir->d_inode);
- if (!IS_ERR(nresult)) {
- if (nresult->d_inode) {
- dput(result);
- result = nresult;
- } else
- dput(nresult);
- }
+ if (err) {
+ dput(target_dir);
+ goto err_result;
}
+ inode_lock(target_dir->d_inode);
+ nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
+ if (!IS_ERR(nresult)) {
+ if (unlikely(nresult->d_inode != result->d_inode)) {
+ dput(nresult);
+ nresult = ERR_PTR(-ESTALE);
+ }
+ }
+ inode_unlock(target_dir->d_inode);
/*
* At this point we are done with the parent, but it's pinned
* by the child dentry anyway.
*/
dput(target_dir);
+ if (IS_ERR(nresult)) {
+ err = PTR_ERR(nresult);
+ goto err_result;
+ }
+ dput(result);
+ result = nresult;
+
/*
* And finally make sure the dentry is actually acceptable
* to NFSD.
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 547c165299c0..fa9c951d3471 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -269,7 +269,7 @@ goal_in_my_reservation(struct ext2_reserve_window *rsv, ext2_grpblk_t grp_goal,
ext2_fsblk_t group_first_block, group_last_block;
group_first_block = ext2_group_first_block_no(sb, group);
- group_last_block = group_first_block + EXT2_BLOCKS_PER_GROUP(sb) - 1;
+ group_last_block = ext2_group_last_block_no(sb, group);
if ((rsv->_rsv_start > group_last_block) ||
(rsv->_rsv_end < group_first_block))
@@ -490,9 +490,7 @@ void ext2_free_blocks (struct inode * inode, unsigned long block,
struct ext2_super_block * es = sbi->s_es;
unsigned freed = 0, group_freed;
- if (block < le32_to_cpu(es->s_first_data_block) ||
- block + count < block ||
- block + count > le32_to_cpu(es->s_blocks_count)) {
+ if (!ext2_data_block_valid(sbi, block, count)) {
ext2_error (sb, "ext2_free_blocks",
"Freeing blocks not in datazone - "
"block = %lu, count = %lu", block, count);
@@ -668,37 +666,24 @@ ext2_try_to_allocate(struct super_block *sb, int group,
unsigned long *count,
struct ext2_reserve_window *my_rsv)
{
- ext2_fsblk_t group_first_block;
+ ext2_fsblk_t group_first_block = ext2_group_first_block_no(sb, group);
+ ext2_fsblk_t group_last_block = ext2_group_last_block_no(sb, group);
ext2_grpblk_t start, end;
unsigned long num = 0;
+ start = 0;
+ end = group_last_block - group_first_block + 1;
/* we do allocation within the reservation window if we have a window */
if (my_rsv) {
- group_first_block = ext2_group_first_block_no(sb, group);
if (my_rsv->_rsv_start >= group_first_block)
start = my_rsv->_rsv_start - group_first_block;
- else
- /* reservation window cross group boundary */
- start = 0;
- end = my_rsv->_rsv_end - group_first_block + 1;
- if (end > EXT2_BLOCKS_PER_GROUP(sb))
- /* reservation window crosses group boundary */
- end = EXT2_BLOCKS_PER_GROUP(sb);
- if ((start <= grp_goal) && (grp_goal < end))
- start = grp_goal;
- else
+ if (my_rsv->_rsv_end < group_last_block)
+ end = my_rsv->_rsv_end - group_first_block + 1;
+ if (grp_goal < start || grp_goal >= end)
grp_goal = -1;
- } else {
- if (grp_goal > 0)
- start = grp_goal;
- else
- start = 0;
- end = EXT2_BLOCKS_PER_GROUP(sb);
}
-
BUG_ON(start > EXT2_BLOCKS_PER_GROUP(sb));
-repeat:
if (grp_goal < 0) {
grp_goal = find_next_usable_block(start, bitmap_bh, end);
if (grp_goal < 0)
@@ -713,32 +698,23 @@ repeat:
;
}
}
- start = grp_goal;
- if (ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group), grp_goal,
- bitmap_bh->b_data)) {
- /*
- * The block was allocated by another thread, or it was
- * allocated and then freed by another thread
- */
- start++;
- grp_goal++;
- if (start >= end)
- goto fail_access;
- goto repeat;
- }
- num++;
- grp_goal++;
- while (num < *count && grp_goal < end
- && !ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group),
+ for (; num < *count && grp_goal < end; grp_goal++) {
+ if (ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group),
grp_goal, bitmap_bh->b_data)) {
+ if (num == 0)
+ continue;
+ break;
+ }
num++;
- grp_goal++;
}
+
+ if (num == 0)
+ goto fail_access;
+
*count = num;
return grp_goal - num;
fail_access:
- *count = num;
return -1;
}
@@ -756,10 +732,9 @@ fail_access:
* but we will shift to the place where start_block is,
* then start from there, when looking for a reservable space.
*
- * @size: the target new reservation window size
+ * @sb: the super block.
*
- * @group_first_block: the first block we consider to start
- * the real search from
+ * @start_block: the first block we consider to start the real search from
*
* @last_block:
* the maximum block number that our goal reservable space
@@ -910,7 +885,7 @@ static int alloc_new_reservation(struct ext2_reserve_window_node *my_rsv,
spinlock_t *rsv_lock = &EXT2_SB(sb)->s_rsv_window_lock;
group_first_block = ext2_group_first_block_no(sb, group);
- group_end_block = group_first_block + (EXT2_BLOCKS_PER_GROUP(sb) - 1);
+ group_end_block = ext2_group_last_block_no(sb, group);
if (grp_goal < 0)
start_block = group_first_block;
@@ -1117,7 +1092,7 @@ ext2_try_to_allocate_with_rsv(struct super_block *sb, unsigned int group,
* first block is the block number of the first block in this group
*/
group_first_block = ext2_group_first_block_no(sb, group);
- group_last_block = group_first_block + (EXT2_BLOCKS_PER_GROUP(sb) - 1);
+ group_last_block = ext2_group_last_block_no(sb, group);
/*
* Basically we will allocate a new block from inode's reservation
@@ -1203,13 +1178,13 @@ int ext2_data_block_valid(struct ext2_sb_info *sbi, ext2_fsblk_t start_blk,
unsigned int count)
{
if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
- (start_blk + count < start_blk) ||
- (start_blk > le32_to_cpu(sbi->s_es->s_blocks_count)))
+ (start_blk + count - 1 < start_blk) ||
+ (start_blk + count - 1 >= le32_to_cpu(sbi->s_es->s_blocks_count)))
return 0;
/* Ensure we do not step over superblock */
if ((start_blk <= sbi->s_sb_block) &&
- (start_blk + count >= sbi->s_sb_block))
+ (start_blk + count - 1 >= sbi->s_sb_block))
return 0;
return 1;
@@ -1315,6 +1290,13 @@ retry_alloc:
if (free_blocks > 0) {
grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) %
EXT2_BLOCKS_PER_GROUP(sb));
+ /*
+ * In case we retry allocation (due to fs reservation not
+ * working out or fs corruption), the bitmap_bh is non-null
+ * pointer and we have to release it before calling
+ * read_block_bitmap().
+ */
+ brelse(bitmap_bh);
bitmap_bh = read_block_bitmap(sb, group_no);
if (!bitmap_bh)
goto io_error;
@@ -1406,6 +1388,7 @@ allocated:
* use. So we may want to selectively mark some of the blocks
* as free
*/
+ num = *count;
goto retry_alloc;
}
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 10ab238de9a6..8178bd38a9d6 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -813,6 +813,18 @@ ext2_group_first_block_no(struct super_block *sb, unsigned long group_no)
le32_to_cpu(EXT2_SB(sb)->s_es->s_first_data_block);
}
+static inline ext2_fsblk_t
+ext2_group_last_block_no(struct super_block *sb, unsigned long group_no)
+{
+ struct ext2_sb_info *sbi = EXT2_SB(sb);
+
+ if (group_no == sbi->s_groups_count - 1)
+ return le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
+ else
+ return ext2_group_first_block_no(sb, group_no) +
+ EXT2_BLOCKS_PER_GROUP(sb) - 1;
+}
+
#define ext2_set_bit __test_and_set_bit_le
#define ext2_clear_bit __test_and_clear_bit_le
#define ext2_test_bit test_bit_le
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 7004ce581a32..119667e65890 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -701,10 +701,13 @@ static int ext2_get_blocks(struct inode *inode,
if (!partial) {
count++;
mutex_unlock(&ei->truncate_mutex);
- if (err)
- goto cleanup;
goto got_it;
}
+
+ if (err) {
+ mutex_unlock(&ei->truncate_mutex);
+ goto cleanup;
+ }
}
/*
@@ -801,7 +804,7 @@ int ext2_get_block(struct inode *inode, sector_t iblock,
#ifdef CONFIG_FS_DAX
static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
- unsigned flags, struct iomap *iomap)
+ unsigned flags, struct iomap *iomap, struct iomap *srcmap)
{
unsigned int blkbits = inode->i_blkbits;
unsigned long first_block = offset >> blkbits;
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 1b853fb0b163..32a8d10b579d 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -145,10 +145,13 @@ setversion_out:
if (ei->i_block_alloc_info){
struct ext2_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
rsv->rsv_goal_size = rsv_window_size;
+ } else {
+ ret = -ENOMEM;
}
+
mutex_unlock(&ei->truncate_mutex);
mnt_drop_write_file(filp);
- return 0;
+ return ret;
}
default:
return -ENOTTY;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 44eb6e7eb492..bcffe25da2f0 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -162,8 +162,7 @@ static void ext2_put_super (struct super_block * sb)
}
db_count = sbi->s_gdb_count;
for (i = 0; i < db_count; i++)
- if (sbi->s_group_desc[i])
- brelse (sbi->s_group_desc[i]);
+ brelse(sbi->s_group_desc[i]);
kfree(sbi->s_group_desc);
kfree(sbi->s_debts);
percpu_counter_destroy(&sbi->s_freeblocks_counter);
@@ -703,13 +702,7 @@ static int ext2_check_descriptors(struct super_block *sb)
for (i = 0; i < sbi->s_groups_count; i++) {
struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL);
ext2_fsblk_t first_block = ext2_group_first_block_no(sb, i);
- ext2_fsblk_t last_block;
-
- if (i == sbi->s_groups_count - 1)
- last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
- else
- last_block = first_block +
- (EXT2_BLOCKS_PER_GROUP(sb) - 1);
+ ext2_fsblk_t last_block = ext2_group_last_block_no(sb, i);
if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
le32_to_cpu(gdp->bg_block_bitmap) > last_block)
@@ -807,7 +800,6 @@ static unsigned long descriptor_loc(struct super_block *sb,
{
struct ext2_sb_info *sbi = EXT2_SB(sb);
unsigned long bg, first_meta_bg;
- int has_super = 0;
first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
@@ -815,10 +807,8 @@ static unsigned long descriptor_loc(struct super_block *sb,
nr < first_meta_bg)
return (logic_sb_block + nr + 1);
bg = sbi->s_desc_per_block * nr;
- if (ext2_bg_has_super(sb, bg))
- has_super = 1;
- return ext2_group_first_block_no(sb, bg) + has_super;
+ return ext2_group_first_block_no(sb, bg) + ext2_bg_has_super(sb, bg);
}
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
@@ -1002,6 +992,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
sb->s_max_links = EXT2_LINK_MAX;
+ sb->s_time_min = S32_MIN;
+ sb->s_time_max = S32_MAX;
if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 79369c13cc55..0456bc990b5e 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -794,7 +794,7 @@ ext2_xattr_delete_inode(struct inode *inode)
if (!EXT2_I(inode)->i_file_acl)
goto cleanup;
- if (!ext2_data_block_valid(sbi, EXT2_I(inode)->i_file_acl, 0)) {
+ if (!ext2_data_block_valid(sbi, EXT2_I(inode)->i_file_acl, 1)) {
ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
"inode %ld: xattr block %d is out of data blocks range",
inode->i_ino, EXT2_I(inode)->i_file_acl);
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index cbb5ca830e57..ef42ab040905 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -106,3 +106,20 @@ config EXT4_DEBUG
If you select Y here, then you will be able to turn on debugging
with a command such as:
echo 1 > /sys/module/ext4/parameters/mballoc_debug
+
+config EXT4_KUNIT_TESTS
+ bool "KUnit tests for ext4"
+ select EXT4_FS
+ depends on KUNIT
+ help
+ This builds the ext4 KUnit tests.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running KUnit test harness and are not for inclusion into a production
+ build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 8fdfcd3c3e04..840b91d040f1 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -13,3 +13,5 @@ ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
+ext4-$(CONFIG_EXT4_KUNIT_TESTS) += inode-test.o
+ext4-$(CONFIG_FS_VERITY) += verity.o
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 8e83741b02e0..d4d4fdfac1a6 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -38,6 +38,7 @@ int __init ext4_init_system_zone(void)
void ext4_exit_system_zone(void)
{
+ rcu_barrier();
kmem_cache_destroy(ext4_system_zone_cachep);
}
@@ -49,17 +50,26 @@ static inline int can_merge(struct ext4_system_zone *entry1,
return 0;
}
+static void release_system_zone(struct ext4_system_blocks *system_blks)
+{
+ struct ext4_system_zone *entry, *n;
+
+ rbtree_postorder_for_each_entry_safe(entry, n,
+ &system_blks->root, node)
+ kmem_cache_free(ext4_system_zone_cachep, entry);
+}
+
/*
* Mark a range of blocks as belonging to the "system zone" --- that
* is, filesystem metadata blocks which should never be used by
* inodes.
*/
-static int add_system_zone(struct ext4_sb_info *sbi,
+static int add_system_zone(struct ext4_system_blocks *system_blks,
ext4_fsblk_t start_blk,
unsigned int count)
{
struct ext4_system_zone *new_entry = NULL, *entry;
- struct rb_node **n = &sbi->system_blks.rb_node, *node;
+ struct rb_node **n = &system_blks->root.rb_node, *node;
struct rb_node *parent = NULL, *new_node = NULL;
while (*n) {
@@ -91,7 +101,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
new_node = &new_entry->node;
rb_link_node(new_node, parent, n);
- rb_insert_color(new_node, &sbi->system_blks);
+ rb_insert_color(new_node, &system_blks->root);
}
/* Can we merge to the left? */
@@ -101,7 +111,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
if (can_merge(entry, new_entry)) {
new_entry->start_blk = entry->start_blk;
new_entry->count += entry->count;
- rb_erase(node, &sbi->system_blks);
+ rb_erase(node, &system_blks->root);
kmem_cache_free(ext4_system_zone_cachep, entry);
}
}
@@ -112,7 +122,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
entry = rb_entry(node, struct ext4_system_zone, node);
if (can_merge(new_entry, entry)) {
new_entry->count += entry->count;
- rb_erase(node, &sbi->system_blks);
+ rb_erase(node, &system_blks->root);
kmem_cache_free(ext4_system_zone_cachep, entry);
}
}
@@ -126,7 +136,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
int first = 1;
printk(KERN_INFO "System zones: ");
- node = rb_first(&sbi->system_blks);
+ node = rb_first(&sbi->system_blks->root);
while (node) {
entry = rb_entry(node, struct ext4_system_zone, node);
printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
@@ -137,7 +147,47 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
printk(KERN_CONT "\n");
}
-static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
+/*
+ * Returns 1 if the passed-in block region (start_blk,
+ * start_blk+count) is valid; 0 if some part of the block region
+ * overlaps with filesystem metadata blocks.
+ */
+static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
+ struct ext4_system_blocks *system_blks,
+ ext4_fsblk_t start_blk,
+ unsigned int count)
+{
+ struct ext4_system_zone *entry;
+ struct rb_node *n;
+
+ if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+ (start_blk + count < start_blk) ||
+ (start_blk + count > ext4_blocks_count(sbi->s_es))) {
+ sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
+ return 0;
+ }
+
+ if (system_blks == NULL)
+ return 1;
+
+ n = system_blks->root.rb_node;
+ while (n) {
+ entry = rb_entry(n, struct ext4_system_zone, node);
+ if (start_blk + count - 1 < entry->start_blk)
+ n = n->rb_left;
+ else if (start_blk >= (entry->start_blk + entry->count))
+ n = n->rb_right;
+ else {
+ sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static int ext4_protect_reserved_inode(struct super_block *sb,
+ struct ext4_system_blocks *system_blks,
+ u32 ino)
{
struct inode *inode;
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -163,14 +213,15 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
if (n == 0) {
i++;
} else {
- if (!ext4_data_block_valid(sbi, map.m_pblk, n)) {
+ if (!ext4_data_block_valid_rcu(sbi, system_blks,
+ map.m_pblk, n)) {
ext4_error(sb, "blocks %llu-%llu from inode %u "
"overlap system zone", map.m_pblk,
map.m_pblk + map.m_len - 1, ino);
err = -EFSCORRUPTED;
break;
}
- err = add_system_zone(sbi, map.m_pblk, n);
+ err = add_system_zone(system_blks, map.m_pblk, n);
if (err < 0)
break;
i += n;
@@ -180,94 +231,130 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
return err;
}
+static void ext4_destroy_system_zone(struct rcu_head *rcu)
+{
+ struct ext4_system_blocks *system_blks;
+
+ system_blks = container_of(rcu, struct ext4_system_blocks, rcu);
+ release_system_zone(system_blks);
+ kfree(system_blks);
+}
+
+/*
+ * Build system zone rbtree which is used for block validity checking.
+ *
+ * The update of system_blks pointer in this function is protected by
+ * sb->s_umount semaphore. However we have to be careful as we can be
+ * racing with ext4_data_block_valid() calls reading system_blks rbtree
+ * protected only by RCU. That's why we first build the rbtree and then
+ * swap it in place.
+ */
int ext4_setup_system_zone(struct super_block *sb)
{
ext4_group_t ngroups = ext4_get_groups_count(sb);
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_system_blocks *system_blks;
struct ext4_group_desc *gdp;
ext4_group_t i;
int flex_size = ext4_flex_bg_size(sbi);
int ret;
if (!test_opt(sb, BLOCK_VALIDITY)) {
- if (sbi->system_blks.rb_node)
+ if (sbi->system_blks)
ext4_release_system_zone(sb);
return 0;
}
- if (sbi->system_blks.rb_node)
+ if (sbi->system_blks)
return 0;
+ system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
+ if (!system_blks)
+ return -ENOMEM;
+
for (i=0; i < ngroups; i++) {
cond_resched();
if (ext4_bg_has_super(sb, i) &&
((i < 5) || ((i % flex_size) == 0)))
- add_system_zone(sbi, ext4_group_first_block_no(sb, i),
+ add_system_zone(system_blks,
+ ext4_group_first_block_no(sb, i),
ext4_bg_num_gdb(sb, i) + 1);
gdp = ext4_get_group_desc(sb, i, NULL);
- ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1);
+ ret = add_system_zone(system_blks,
+ ext4_block_bitmap(sb, gdp), 1);
if (ret)
- return ret;
- ret = add_system_zone(sbi, ext4_inode_bitmap(sb, gdp), 1);
+ goto err;
+ ret = add_system_zone(system_blks,
+ ext4_inode_bitmap(sb, gdp), 1);
if (ret)
- return ret;
- ret = add_system_zone(sbi, ext4_inode_table(sb, gdp),
+ goto err;
+ ret = add_system_zone(system_blks,
+ ext4_inode_table(sb, gdp),
sbi->s_itb_per_group);
if (ret)
- return ret;
+ goto err;
}
if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) {
- ret = ext4_protect_reserved_inode(sb,
+ ret = ext4_protect_reserved_inode(sb, system_blks,
le32_to_cpu(sbi->s_es->s_journal_inum));
if (ret)
- return ret;
+ goto err;
}
+ /*
+ * System blks rbtree complete, announce it once to prevent racing
+ * with ext4_data_block_valid() accessing the rbtree at the same
+ * time.
+ */
+ rcu_assign_pointer(sbi->system_blks, system_blks);
+
if (test_opt(sb, DEBUG))
debug_print_tree(sbi);
return 0;
+err:
+ release_system_zone(system_blks);
+ kfree(system_blks);
+ return ret;
}
-/* Called when the filesystem is unmounted */
+/*
+ * Called when the filesystem is unmounted or when remounting it with
+ * noblock_validity specified.
+ *
+ * The update of system_blks pointer in this function is protected by
+ * sb->s_umount semaphore. However we have to be careful as we can be
+ * racing with ext4_data_block_valid() calls reading system_blks rbtree
+ * protected only by RCU. So we first clear the system_blks pointer and
+ * then free the rbtree only after RCU grace period expires.
+ */
void ext4_release_system_zone(struct super_block *sb)
{
- struct ext4_system_zone *entry, *n;
+ struct ext4_system_blocks *system_blks;
- rbtree_postorder_for_each_entry_safe(entry, n,
- &EXT4_SB(sb)->system_blks, node)
- kmem_cache_free(ext4_system_zone_cachep, entry);
+ system_blks = rcu_dereference_protected(EXT4_SB(sb)->system_blks,
+ lockdep_is_held(&sb->s_umount));
+ rcu_assign_pointer(EXT4_SB(sb)->system_blks, NULL);
- EXT4_SB(sb)->system_blks = RB_ROOT;
+ if (system_blks)
+ call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
}
-/*
- * Returns 1 if the passed-in block region (start_blk,
- * start_blk+count) is valid; 0 if some part of the block region
- * overlaps with filesystem metadata blocks.
- */
int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
unsigned int count)
{
- struct ext4_system_zone *entry;
- struct rb_node *n = sbi->system_blks.rb_node;
+ struct ext4_system_blocks *system_blks;
+ int ret;
- if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
- (start_blk + count < start_blk) ||
- (start_blk + count > ext4_blocks_count(sbi->s_es))) {
- sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
- return 0;
- }
- while (n) {
- entry = rb_entry(n, struct ext4_system_zone, node);
- if (start_blk + count - 1 < entry->start_blk)
- n = n->rb_left;
- else if (start_blk >= (entry->start_blk + entry->count))
- n = n->rb_right;
- else {
- sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
- return 0;
- }
- }
- return 1;
+ /*
+ * Lock the system zone to prevent it being released concurrently
+ * when doing a remount which inverse current "[no]block_validity"
+ * mount option.
+ */
+ rcu_read_lock();
+ system_blks = rcu_dereference(sbi->system_blks);
+ ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
+ count);
+ rcu_read_unlock();
+ return ret;
}
int ext4_check_blockref(const char *function, unsigned int line,
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 86054f31fe4d..9fdd2b269d61 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -668,14 +668,15 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len,
const char *str, const struct qstr *name)
{
struct qstr qstr = {.name = str, .len = len };
+ struct inode *inode = dentry->d_parent->d_inode;
- if (!IS_CASEFOLDED(dentry->d_parent->d_inode)) {
+ if (!IS_CASEFOLDED(inode) || !EXT4_SB(inode->i_sb)->s_encoding) {
if (len != name->len)
return -1;
return memcmp(str, name->name, len);
}
- return ext4_ci_compare(dentry->d_parent->d_inode, name, &qstr, false);
+ return ext4_ci_compare(inode, name, &qstr, false);
}
static int ext4_d_hash(const struct dentry *dentry, struct qstr *str)
@@ -685,7 +686,7 @@ static int ext4_d_hash(const struct dentry *dentry, struct qstr *str)
unsigned char *norm;
int len, ret = 0;
- if (!IS_CASEFOLDED(dentry->d_inode))
+ if (!IS_CASEFOLDED(dentry->d_inode) || !um)
return 0;
norm = kmalloc(PATH_MAX, GFP_ATOMIC);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index bf660aa7a9e0..f8578caba40d 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -41,6 +41,7 @@
#endif
#include <linux/fscrypt.h>
+#include <linux/fsverity.h>
#include <linux/compiler.h>
@@ -185,10 +186,24 @@ struct ext4_map_blocks {
};
/*
+ * Block validity checking, system zone rbtree.
+ */
+struct ext4_system_blocks {
+ struct rb_root root;
+ struct rcu_head rcu;
+};
+
+/*
* Flags for ext4_io_end->flags
*/
#define EXT4_IO_END_UNWRITTEN 0x0001
+struct ext4_io_end_vec {
+ struct list_head list; /* list of io_end_vec */
+ loff_t offset; /* offset in the file */
+ ssize_t size; /* size of the extent */
+};
+
/*
* For converting unwritten extents on a work queue. 'handle' is used for
* buffered writeback.
@@ -202,8 +217,7 @@ typedef struct ext4_io_end {
* bios covering the extent */
unsigned int flag; /* unwritten or not */
atomic_t count; /* reference counter */
- loff_t offset; /* offset in the file */
- ssize_t size; /* size of the extent */
+ struct list_head list_vec; /* list of ext4_io_end_vec */
} ext4_io_end_t;
struct ext4_io_submit {
@@ -284,6 +298,9 @@ struct ext4_io_submit {
~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \
~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
+/* Fill in the low bits to get the last block of the cluster */
+#define EXT4_LBLK_CFILL(sbi, lblk) ((lblk) | \
+ ((ext4_lblk_t) (sbi)->s_cluster_ratio - 1))
/* Get the cluster offset */
#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \
((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
@@ -395,6 +412,7 @@ struct flex_groups {
#define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
#define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */
#define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
+#define EXT4_VERITY_FL 0x00100000 /* Verity protected inode */
#define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */
#define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */
#define EXT4_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */
@@ -402,7 +420,7 @@ struct flex_groups {
#define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded file */
#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
-#define EXT4_FL_USER_VISIBLE 0x704BDFFF /* User visible flags */
+#define EXT4_FL_USER_VISIBLE 0x705BDFFF /* User visible flags */
#define EXT4_FL_USER_MODIFIABLE 0x604BC0FF /* User modifiable flags */
/* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */
@@ -467,6 +485,7 @@ enum {
EXT4_INODE_TOPDIR = 17, /* Top of directory hierarchies*/
EXT4_INODE_HUGE_FILE = 18, /* Set to each huge file */
EXT4_INODE_EXTENTS = 19, /* Inode uses extents */
+ EXT4_INODE_VERITY = 20, /* Verity protected inode */
EXT4_INODE_EA_INODE = 21, /* Inode used for large EA */
EXT4_INODE_EOFBLOCKS = 22, /* Blocks allocated beyond EOF */
EXT4_INODE_INLINE_DATA = 28, /* Data in inode. */
@@ -512,6 +531,7 @@ static inline void ext4_check_flag_values(void)
CHECK_FLAG_VALUE(TOPDIR);
CHECK_FLAG_VALUE(HUGE_FILE);
CHECK_FLAG_VALUE(EXTENTS);
+ CHECK_FLAG_VALUE(VERITY);
CHECK_FLAG_VALUE(EA_INODE);
CHECK_FLAG_VALUE(EOFBLOCKS);
CHECK_FLAG_VALUE(INLINE_DATA);
@@ -649,6 +669,10 @@ enum {
#define EXT4_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
#define EXT4_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT
#define EXT4_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
+/* ioctl codes 19--39 are reserved for fscrypt */
+#define EXT4_IOC_CLEAR_ES_CACHE _IO('f', 40)
+#define EXT4_IOC_GETSTATE _IOW('f', 41, __u32)
+#define EXT4_IOC_GET_ES_CACHE _IOWR('f', 42, struct fiemap)
#define EXT4_IOC_FSGETXATTR FS_IOC_FSGETXATTR
#define EXT4_IOC_FSSETXATTR FS_IOC_FSSETXATTR
@@ -662,6 +686,16 @@ enum {
#define EXT4_GOING_FLAGS_LOGFLUSH 0x1 /* flush log but not data */
#define EXT4_GOING_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */
+/*
+ * Flags returned by EXT4_IOC_GETSTATE
+ *
+ * We only expose to userspace a subset of the state flags in
+ * i_state_flags
+ */
+#define EXT4_STATE_FLAG_EXT_PRECACHED 0x00000001
+#define EXT4_STATE_FLAG_NEW 0x00000002
+#define EXT4_STATE_FLAG_NEWENTRY 0x00000004
+#define EXT4_STATE_FLAG_DA_ALLOC_CLOSE 0x00000008
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
@@ -679,6 +713,12 @@ enum {
#define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION
#endif
+/*
+ * Returned by EXT4_IOC_GET_ES_CACHE as an additional possible flag.
+ * It indicates that the entry in extent status cache is for a hole.
+ */
+#define EXT4_FIEMAP_EXTENT_HOLE 0x08000000
+
/* Max physical block we can address w/o extents */
#define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF
@@ -808,31 +848,20 @@ static inline __le32 ext4_encode_extra_time(struct timespec64 *time)
static inline void ext4_decode_extra_time(struct timespec64 *time,
__le32 extra)
{
- if (unlikely(extra & cpu_to_le32(EXT4_EPOCH_MASK))) {
-
-#if 1
- /* Handle legacy encoding of pre-1970 dates with epoch
- * bits 1,1. (This backwards compatibility may be removed
- * at the discretion of the ext4 developers.)
- */
- u64 extra_bits = le32_to_cpu(extra) & EXT4_EPOCH_MASK;
- if (extra_bits == 3 && ((time->tv_sec) & 0x80000000) != 0)
- extra_bits = 0;
- time->tv_sec += extra_bits << 32;
-#else
+ if (unlikely(extra & cpu_to_le32(EXT4_EPOCH_MASK)))
time->tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
-#endif
- }
time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
}
#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
do { \
- (raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec); \
if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) {\
+ (raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec); \
(raw_inode)->xtime ## _extra = \
ext4_encode_extra_time(&(inode)->xtime); \
} \
+ else \
+ (raw_inode)->xtime = cpu_to_le32(clamp_t(int32_t, (inode)->xtime.tv_sec, S32_MIN, S32_MAX)); \
} while (0)
#define EXT4_EINODE_SET_XTIME(xtime, einode, raw_inode) \
@@ -1421,7 +1450,7 @@ struct ext4_sb_info {
int s_jquota_fmt; /* Format of quota to use */
#endif
unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
- struct rb_root system_blks;
+ struct ext4_system_blocks __rcu *system_blks;
#ifdef EXTENTS_STATS
/* ext4 extents stats */
@@ -1555,11 +1584,11 @@ enum {
EXT4_STATE_NO_EXPAND, /* No space for expansion */
EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */
EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
- EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
EXT4_STATE_NEWENTRY, /* File just added to dir */
EXT4_STATE_MAY_INLINE_DATA, /* may have in-inode data */
EXT4_STATE_EXT_PRECACHED, /* extents have been precached */
EXT4_STATE_LUSTRE_EA_INODE, /* Lustre-style ea_inode */
+ EXT4_STATE_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
};
#define EXT4_INODE_BIT_FNS(name, field, offset) \
@@ -1610,6 +1639,12 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
#define EXT4_SB(sb) (sb)
#endif
+static inline bool ext4_verity_in_progress(struct inode *inode)
+{
+ return IS_ENABLED(CONFIG_FS_VERITY) &&
+ ext4_test_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
+}
+
#define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
/*
@@ -1632,6 +1667,10 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
#define EXT4_GOOD_OLD_INODE_SIZE 128
+#define EXT4_EXTRA_TIMESTAMP_MAX (((s64)1 << 34) - 1 + S32_MIN)
+#define EXT4_NON_EXTRA_TIMESTAMP_MAX S32_MAX
+#define EXT4_TIMESTAMP_MIN S32_MIN
+
/*
* Feature set definitions
*/
@@ -1643,6 +1682,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
#define EXT4_FEATURE_COMPAT_RESIZE_INODE 0x0010
#define EXT4_FEATURE_COMPAT_DIR_INDEX 0x0020
#define EXT4_FEATURE_COMPAT_SPARSE_SUPER2 0x0200
+#define EXT4_FEATURE_COMPAT_STABLE_INODES 0x0800
#define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
#define EXT4_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
@@ -1662,6 +1702,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
#define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM 0x0400
#define EXT4_FEATURE_RO_COMPAT_READONLY 0x1000
#define EXT4_FEATURE_RO_COMPAT_PROJECT 0x2000
+#define EXT4_FEATURE_RO_COMPAT_VERITY 0x8000
#define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001
#define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002
@@ -1743,6 +1784,7 @@ EXT4_FEATURE_COMPAT_FUNCS(xattr, EXT_ATTR)
EXT4_FEATURE_COMPAT_FUNCS(resize_inode, RESIZE_INODE)
EXT4_FEATURE_COMPAT_FUNCS(dir_index, DIR_INDEX)
EXT4_FEATURE_COMPAT_FUNCS(sparse_super2, SPARSE_SUPER2)
+EXT4_FEATURE_COMPAT_FUNCS(stable_inodes, STABLE_INODES)
EXT4_FEATURE_RO_COMPAT_FUNCS(sparse_super, SPARSE_SUPER)
EXT4_FEATURE_RO_COMPAT_FUNCS(large_file, LARGE_FILE)
@@ -1756,6 +1798,7 @@ EXT4_FEATURE_RO_COMPAT_FUNCS(bigalloc, BIGALLOC)
EXT4_FEATURE_RO_COMPAT_FUNCS(metadata_csum, METADATA_CSUM)
EXT4_FEATURE_RO_COMPAT_FUNCS(readonly, READONLY)
EXT4_FEATURE_RO_COMPAT_FUNCS(project, PROJECT)
+EXT4_FEATURE_RO_COMPAT_FUNCS(verity, VERITY)
EXT4_FEATURE_INCOMPAT_FUNCS(compression, COMPRESSION)
EXT4_FEATURE_INCOMPAT_FUNCS(filetype, FILETYPE)
@@ -1813,7 +1856,8 @@ EXT4_FEATURE_INCOMPAT_FUNCS(casefold, CASEFOLD)
EXT4_FEATURE_RO_COMPAT_BIGALLOC |\
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM|\
EXT4_FEATURE_RO_COMPAT_QUOTA |\
- EXT4_FEATURE_RO_COMPAT_PROJECT)
+ EXT4_FEATURE_RO_COMPAT_PROJECT |\
+ EXT4_FEATURE_RO_COMPAT_VERITY)
#define EXTN_FEATURE_FUNCS(ver) \
static inline bool ext4_has_unknown_ext##ver##_compat_features(struct super_block *sb) \
@@ -2522,8 +2566,6 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
int ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
-int ext4_dio_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create);
int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create);
int ext4_walk_page_buffers(handle_t *handle,
@@ -2566,7 +2608,6 @@ extern int ext4_can_truncate(struct inode *inode);
extern int ext4_truncate(struct inode *);
extern int ext4_break_layouts(struct inode *);
extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
-extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks);
extern void ext4_set_inode_flags(struct inode *);
extern int ext4_alloc_da_blocks(struct inode *inode);
extern void ext4_set_aops(struct inode *inode);
@@ -3177,6 +3218,8 @@ static inline void ext4_set_de_type(struct super_block *sb,
extern int ext4_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
unsigned nr_pages, bool is_readahead);
+extern int __init ext4_init_post_read_processing(void);
+extern void ext4_exit_post_read_processing(void);
/* symlink.c */
extern const struct inode_operations ext4_encrypted_symlink_inode_operations;
@@ -3224,6 +3267,8 @@ extern long ext4_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
loff_t offset, ssize_t len);
+extern int ext4_convert_unwritten_io_end_vec(handle_t *handle,
+ ext4_io_end_t *io_end);
extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
extern int ext4_ext_calc_metadata_amount(struct inode *inode,
@@ -3245,6 +3290,9 @@ extern int ext4_ext_check_inode(struct inode *inode);
extern ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path);
extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
+extern int ext4_get_es_cache(struct inode *inode,
+ struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len);
extern int ext4_ext_precache(struct inode *inode);
extern int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
extern int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
@@ -3253,6 +3301,10 @@ extern int ext4_swap_extents(handle_t *handle, struct inode *inode1,
ext4_lblk_t lblk2, ext4_lblk_t count,
int mark_unwritten,int *err);
extern int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu);
+extern int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
+ int check_cred, int restart_cred,
+ int revoke_cred);
+
/* move_extent.c */
extern void ext4_double_down_write_data_sem(struct inode *first,
@@ -3279,10 +3331,15 @@ extern int ext4_bio_write_page(struct ext4_io_submit *io,
int len,
struct writeback_control *wbc,
bool keep_towrite);
+extern struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end);
+extern struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end);
/* mmp.c */
extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
+/* verity.c */
+extern const struct fsverity_operations ext4_verityops;
+
/*
* Add new method to test whether block and inode bitmaps are properly
* initialized. With uninit_bg reading the block from disk is not enough
@@ -3333,6 +3390,20 @@ static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
}
extern const struct iomap_ops ext4_iomap_ops;
+extern const struct iomap_ops ext4_iomap_report_ops;
+
+static inline int ext4_buffer_uptodate(struct buffer_head *bh)
+{
+ /*
+ * If the buffer has the write error flag, we have failed
+ * to write out data in the block. In this case, we don't
+ * have to read the block because we may read the old data
+ * successfully.
+ */
+ if (!buffer_uptodate(bh) && buffer_write_io_error(bh))
+ set_buffer_uptodate(bh);
+ return buffer_uptodate(bh);
+}
#endif /* __KERNEL__ */
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 7c70b08d104c..d3b8cdea5df7 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -65,12 +65,14 @@ static int ext4_journal_check_start(struct super_block *sb)
}
handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
- int type, int blocks, int rsv_blocks)
+ int type, int blocks, int rsv_blocks,
+ int revoke_creds)
{
journal_t *journal;
int err;
- trace_ext4_journal_start(sb, blocks, rsv_blocks, _RET_IP_);
+ trace_ext4_journal_start(sb, blocks, rsv_blocks, revoke_creds,
+ _RET_IP_);
err = ext4_journal_check_start(sb);
if (err < 0)
return ERR_PTR(err);
@@ -78,8 +80,8 @@ handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
journal = EXT4_SB(sb)->s_journal;
if (!journal)
return ext4_get_nojournal();
- return jbd2__journal_start(journal, blocks, rsv_blocks, GFP_NOFS,
- type, line);
+ return jbd2__journal_start(journal, blocks, rsv_blocks, revoke_creds,
+ GFP_NOFS, type, line);
}
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
@@ -119,8 +121,8 @@ handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
return ext4_get_nojournal();
sb = handle->h_journal->j_private;
- trace_ext4_journal_start_reserved(sb, handle->h_buffer_credits,
- _RET_IP_);
+ trace_ext4_journal_start_reserved(sb,
+ jbd2_handle_buffer_credits(handle), _RET_IP_);
err = ext4_journal_check_start(sb);
if (err < 0) {
jbd2_journal_free_reserved(handle);
@@ -133,6 +135,19 @@ handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
return handle;
}
+int __ext4_journal_ensure_credits(handle_t *handle, int check_cred,
+ int extend_cred, int revoke_cred)
+{
+ if (!ext4_handle_valid(handle))
+ return 0;
+ if (jbd2_handle_buffer_credits(handle) >= check_cred &&
+ handle->h_revoke_credits >= revoke_cred)
+ return 0;
+ extend_cred = max(0, extend_cred - jbd2_handle_buffer_credits(handle));
+ revoke_cred = max(0, revoke_cred - handle->h_revoke_credits);
+ return ext4_journal_extend(handle, extend_cred, revoke_cred);
+}
+
static void ext4_journal_abort_handle(const char *caller, unsigned int line,
const char *err_fn,
struct buffer_head *bh,
@@ -278,7 +293,7 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
handle->h_type,
handle->h_line_no,
handle->h_requested_credits,
- handle->h_buffer_credits, err);
+ jbd2_handle_buffer_credits(handle), err);
return err;
}
ext4_error_inode(inode, where, line,
@@ -289,7 +304,8 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
handle->h_type,
handle->h_line_no,
handle->h_requested_credits,
- handle->h_buffer_credits, err);
+ jbd2_handle_buffer_credits(handle),
+ err);
}
} else {
if (inode)
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index ef8fcf7d0d3b..a6b9b66dbfad 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -261,7 +261,8 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
__ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
- int type, int blocks, int rsv_blocks);
+ int type, int blocks, int rsv_blocks,
+ int revoke_creds);
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
#define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096)
@@ -288,28 +289,41 @@ static inline int ext4_handle_is_aborted(handle_t *handle)
return 0;
}
-static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed)
+static inline int ext4_free_metadata_revoke_credits(struct super_block *sb,
+ int blocks)
{
- if (ext4_handle_valid(handle) && handle->h_buffer_credits < needed)
- return 0;
- return 1;
+ /* Freeing each metadata block can result in freeing one cluster */
+ return blocks * EXT4_SB(sb)->s_cluster_ratio;
+}
+
+static inline int ext4_trans_default_revoke_credits(struct super_block *sb)
+{
+ return ext4_free_metadata_revoke_credits(sb, 8);
}
#define ext4_journal_start_sb(sb, type, nblocks) \
- __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0)
+ __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0, \
+ ext4_trans_default_revoke_credits(sb))
#define ext4_journal_start(inode, type, nblocks) \
- __ext4_journal_start((inode), __LINE__, (type), (nblocks), 0)
+ __ext4_journal_start((inode), __LINE__, (type), (nblocks), 0, \
+ ext4_trans_default_revoke_credits((inode)->i_sb))
+
+#define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks)\
+ __ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks),\
+ ext4_trans_default_revoke_credits((inode)->i_sb))
-#define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks) \
- __ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks))
+#define ext4_journal_start_with_revoke(inode, type, blocks, revoke_creds) \
+ __ext4_journal_start((inode), __LINE__, (type), (blocks), 0, \
+ (revoke_creds))
static inline handle_t *__ext4_journal_start(struct inode *inode,
unsigned int line, int type,
- int blocks, int rsv_blocks)
+ int blocks, int rsv_blocks,
+ int revoke_creds)
{
return __ext4_journal_start_sb(inode->i_sb, line, type, blocks,
- rsv_blocks);
+ rsv_blocks, revoke_creds);
}
#define ext4_journal_stop(handle) \
@@ -332,20 +346,68 @@ static inline handle_t *ext4_journal_current_handle(void)
return journal_current_handle();
}
-static inline int ext4_journal_extend(handle_t *handle, int nblocks)
+static inline int ext4_journal_extend(handle_t *handle, int nblocks, int revoke)
{
if (ext4_handle_valid(handle))
- return jbd2_journal_extend(handle, nblocks);
+ return jbd2_journal_extend(handle, nblocks, revoke);
return 0;
}
-static inline int ext4_journal_restart(handle_t *handle, int nblocks)
+static inline int ext4_journal_restart(handle_t *handle, int nblocks,
+ int revoke)
{
if (ext4_handle_valid(handle))
- return jbd2_journal_restart(handle, nblocks);
+ return jbd2__journal_restart(handle, nblocks, revoke, GFP_NOFS);
return 0;
}
+int __ext4_journal_ensure_credits(handle_t *handle, int check_cred,
+ int extend_cred, int revoke_cred);
+
+
+/*
+ * Ensure @handle has at least @check_creds credits available. If not,
+ * transaction will be extended or restarted to contain at least @extend_cred
+ * credits. Before restarting transaction @fn is executed to allow for cleanup
+ * before the transaction is restarted.
+ *
+ * The return value is < 0 in case of error, 0 in case the handle has enough
+ * credits or transaction extension succeeded, 1 in case transaction had to be
+ * restarted.
+ */
+#define ext4_journal_ensure_credits_fn(handle, check_cred, extend_cred, \
+ revoke_cred, fn) \
+({ \
+ __label__ __ensure_end; \
+ int err = __ext4_journal_ensure_credits((handle), (check_cred), \
+ (extend_cred), (revoke_cred)); \
+ \
+ if (err <= 0) \
+ goto __ensure_end; \
+ err = (fn); \
+ if (err < 0) \
+ goto __ensure_end; \
+ err = ext4_journal_restart((handle), (extend_cred), (revoke_cred)); \
+ if (err == 0) \
+ err = 1; \
+__ensure_end: \
+ err; \
+})
+
+/*
+ * Ensure given handle has at least requested amount of credits available,
+ * possibly restarting transaction if needed. We also make sure the transaction
+ * has space for at least ext4_trans_default_revoke_credits(sb) revoke records
+ * as freeing one or two blocks is very common pattern and requesting this is
+ * very cheap.
+ */
+static inline int ext4_journal_ensure_credits(handle_t *handle, int credits,
+ int revoke_creds)
+{
+ return ext4_journal_ensure_credits_fn(handle, credits, credits,
+ revoke_creds, 0);
+}
+
static inline int ext4_journal_blocks_per_page(struct inode *inode)
{
if (EXT4_JOURNAL(inode) != NULL)
@@ -407,6 +469,7 @@ static inline int ext4_inode_journal_mode(struct inode *inode)
return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
/* We do not support data journalling with delayed allocation */
if (!S_ISREG(inode->i_mode) ||
+ ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE) ||
test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
(ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
!test_opt(inode->i_sb, DELALLOC))) {
@@ -437,6 +500,19 @@ static inline int ext4_should_writeback_data(struct inode *inode)
return ext4_inode_journal_mode(inode) & EXT4_INODE_WRITEBACK_DATA_MODE;
}
+static inline int ext4_free_data_revoke_credits(struct inode *inode, int blocks)
+{
+ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
+ return 0;
+ if (!ext4_should_journal_data(inode))
+ return 0;
+ /*
+ * Data blocks in one extent are contiguous, just account for partial
+ * clusters at extent boundaries
+ */
+ return blocks + 2*(EXT4_SB(inode->i_sb)->s_cluster_ratio - 1);
+}
+
/*
* This function controls whether or not we should try to go down the
* dioread_nolock code paths, which makes it safe to avoid taking
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 92266a2da7d6..0e8708b77da6 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -100,29 +100,41 @@ static int ext4_split_extent_at(handle_t *handle,
static int ext4_find_delayed_extent(struct inode *inode,
struct extent_status *newes);
-static int ext4_ext_truncate_extend_restart(handle_t *handle,
- struct inode *inode,
- int needed)
+static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
{
- int err;
-
- if (!ext4_handle_valid(handle))
- return 0;
- if (handle->h_buffer_credits >= needed)
- return 0;
/*
- * If we need to extend the journal get a few extra blocks
- * while we're at it for efficiency's sake.
+ * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
+ * moment, get_block can be called only for blocks inside i_size since
+ * page cache has been already dropped and writes are blocked by
+ * i_mutex. So we can safely drop the i_data_sem here.
*/
- needed += 3;
- err = ext4_journal_extend(handle, needed - handle->h_buffer_credits);
- if (err <= 0)
- return err;
- err = ext4_truncate_restart_trans(handle, inode, needed);
- if (err == 0)
- err = -EAGAIN;
+ BUG_ON(EXT4_JOURNAL(inode) == NULL);
+ ext4_discard_preallocations(inode);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ *dropped = 1;
+ return 0;
+}
- return err;
+/*
+ * Make sure 'handle' has at least 'check_cred' credits. If not, restart
+ * transaction with 'restart_cred' credits. The function drops i_data_sem
+ * when restarting transaction and gets it after transaction is restarted.
+ *
+ * The function returns 0 on success, 1 if transaction had to be restarted,
+ * and < 0 in case of fatal error.
+ */
+int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
+ int check_cred, int restart_cred,
+ int revoke_cred)
+{
+ int ret;
+ int dropped = 0;
+
+ ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred,
+ revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped));
+ if (dropped)
+ down_write(&EXT4_I(inode)->i_data_sem);
+ return ret;
}
/*
@@ -1753,16 +1765,9 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
*/
if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
return 0;
- /*
- * The check for IO to unwritten extent is somewhat racy as we
- * increment i_unwritten / set EXT4_STATE_DIO_UNWRITTEN only after
- * dropping i_data_sem. But reserved blocks should save us in that
- * case.
- */
+
if (ext4_ext_is_unwritten(ex1) &&
- (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) ||
- atomic_read(&EXT4_I(inode)->i_unwritten) ||
- (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)))
+ ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)
return 0;
#ifdef AGGRESSIVE_TEST
if (ext1_ee_len >= 4)
@@ -1840,7 +1845,8 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
* group descriptor to release the extent tree block. If we
* can't get the journal credits, give up.
*/
- if (ext4_journal_extend(handle, 2))
+ if (ext4_journal_extend(handle, 2,
+ ext4_free_metadata_revoke_credits(inode->i_sb, 1)))
return;
/*
@@ -2315,6 +2321,52 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
return err;
}
+static int ext4_fill_es_cache_info(struct inode *inode,
+ ext4_lblk_t block, ext4_lblk_t num,
+ struct fiemap_extent_info *fieinfo)
+{
+ ext4_lblk_t next, end = block + num - 1;
+ struct extent_status es;
+ unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
+ unsigned int flags;
+ int err;
+
+ while (block <= end) {
+ next = 0;
+ flags = 0;
+ if (!ext4_es_lookup_extent(inode, block, &next, &es))
+ break;
+ if (ext4_es_is_unwritten(&es))
+ flags |= FIEMAP_EXTENT_UNWRITTEN;
+ if (ext4_es_is_delayed(&es))
+ flags |= (FIEMAP_EXTENT_DELALLOC |
+ FIEMAP_EXTENT_UNKNOWN);
+ if (ext4_es_is_hole(&es))
+ flags |= EXT4_FIEMAP_EXTENT_HOLE;
+ if (next == 0)
+ flags |= FIEMAP_EXTENT_LAST;
+ if (flags & (FIEMAP_EXTENT_DELALLOC|
+ EXT4_FIEMAP_EXTENT_HOLE))
+ es.es_pblk = 0;
+ else
+ es.es_pblk = ext4_es_pblock(&es);
+ err = fiemap_fill_next_extent(fieinfo,
+ (__u64)es.es_lblk << blksize_bits,
+ (__u64)es.es_pblk << blksize_bits,
+ (__u64)es.es_len << blksize_bits,
+ flags);
+ if (next == 0)
+ break;
+ block = next;
+ if (err < 0)
+ return err;
+ if (err == 1)
+ return 0;
+ }
+ return 0;
+}
+
+
/*
* ext4_ext_determine_hole - determine hole around given block
* @inode: inode we lookup in
@@ -2681,7 +2733,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int err = 0, correct_index = 0;
- int depth = ext_depth(inode), credits;
+ int depth = ext_depth(inode), credits, revoke_credits;
struct ext4_extent_header *eh;
ext4_lblk_t a, b;
unsigned num;
@@ -2773,10 +2825,23 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
credits += (ext_depth(inode)) + 1;
}
credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
-
- err = ext4_ext_truncate_extend_restart(handle, inode, credits);
- if (err)
+ /*
+ * We may end up freeing some index blocks and data from the
+ * punched range. Note that partial clusters are accounted for
+ * by ext4_free_data_revoke_credits().
+ */
+ revoke_credits =
+ ext4_free_metadata_revoke_credits(inode->i_sb,
+ ext_depth(inode)) +
+ ext4_free_data_revoke_credits(inode, b - a + 1);
+
+ err = ext4_datasem_ensure_credits(handle, inode, credits,
+ credits, revoke_credits);
+ if (err) {
+ if (err > 0)
+ err = -EAGAIN;
goto out;
+ }
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
@@ -2902,7 +2967,9 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
ext_debug("truncate since %u to %u\n", start, end);
/* probably first extent we're gonna free will be last in block */
- handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
+ handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE,
+ depth + 1,
+ ext4_free_metadata_revoke_credits(inode->i_sb, depth));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -3813,8 +3880,8 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
* illegal.
*/
if (ee_block != map->m_lblk || ee_len > map->m_len) {
-#ifdef EXT4_DEBUG
- ext4_warning("Inode (%ld) finished: extent logical block %llu,"
+#ifdef CONFIG_EXT4_DEBUG
+ ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
" len %u; IO logical block %llu, len %u",
inode->i_ino, (unsigned long long)ee_block, ee_len,
(unsigned long long)map->m_lblk, map->m_len);
@@ -4916,23 +4983,13 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
int ret = 0;
int ret2 = 0;
struct ext4_map_blocks map;
- unsigned int credits, blkbits = inode->i_blkbits;
+ unsigned int blkbits = inode->i_blkbits;
+ unsigned int credits = 0;
map.m_lblk = offset >> blkbits;
max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
- /*
- * This is somewhat ugly but the idea is clear: When transaction is
- * reserved, everything goes into it. Otherwise we rather start several
- * smaller transactions for conversion of each extent separately.
- */
- if (handle) {
- handle = ext4_journal_start_reserved(handle,
- EXT4_HT_EXT_CONVERT);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- credits = 0;
- } else {
+ if (!handle) {
/*
* credits to insert 1 extent into extent tree
*/
@@ -4963,11 +5020,40 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
if (ret <= 0 || ret2)
break;
}
- if (!credits)
- ret2 = ext4_journal_stop(handle);
return ret > 0 ? ret2 : ret;
}
+int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
+{
+ int ret, err = 0;
+ struct ext4_io_end_vec *io_end_vec;
+
+ /*
+ * This is somewhat ugly but the idea is clear: When transaction is
+ * reserved, everything goes into it. Otherwise we rather start several
+ * smaller transactions for conversion of each extent separately.
+ */
+ if (handle) {
+ handle = ext4_journal_start_reserved(handle,
+ EXT4_HT_EXT_CONVERT);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ }
+
+ list_for_each_entry(io_end_vec, &io_end->list_vec, list) {
+ ret = ext4_convert_unwritten_extents(handle, io_end->inode,
+ io_end_vec->offset,
+ io_end_vec->size);
+ if (ret)
+ break;
+ }
+
+ if (handle)
+ err = ext4_journal_stop(handle);
+
+ return ret < 0 ? ret : err;
+}
+
/*
* If newes is not existing extent (newes->ec_pblk equals zero) find
* delayed extent at start of newes and update newes accordingly and
@@ -5017,8 +5103,6 @@ static int ext4_find_delayed_extent(struct inode *inode,
return next_del;
}
-/* fiemap flags we can handle specified here */
-#define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
static int ext4_xattr_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo)
@@ -5055,10 +5139,16 @@ static int ext4_xattr_fiemap(struct inode *inode,
return (error < 0 ? error : 0);
}
-int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- __u64 start, __u64 len)
+static int _ext4_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len,
+ int (*fill)(struct inode *, ext4_lblk_t,
+ ext4_lblk_t,
+ struct fiemap_extent_info *))
{
ext4_lblk_t start_blk;
+ u32 ext4_fiemap_flags = FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR;
+
int error = 0;
if (ext4_has_inline_data(inode)) {
@@ -5075,14 +5165,18 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
error = ext4_ext_precache(inode);
if (error)
return error;
+ fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
}
/* fallback to generic here if not in extents fmt */
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
+ fill == ext4_fill_fiemap_extents)
return generic_block_fiemap(inode, fieinfo, start, len,
ext4_get_block);
- if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
+ if (fill == ext4_fill_es_cache_info)
+ ext4_fiemap_flags &= FIEMAP_FLAG_XATTR;
+ if (fiemap_check_flags(fieinfo, ext4_fiemap_flags))
return -EBADR;
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
@@ -5101,12 +5195,36 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
* Walk the extent tree gathering extent information
* and pushing extents back to the user.
*/
- error = ext4_fill_fiemap_extents(inode, start_blk,
- len_blks, fieinfo);
+ error = fill(inode, start_blk, len_blks, fieinfo);
}
return error;
}
+int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len)
+{
+ return _ext4_fiemap(inode, fieinfo, start, len,
+ ext4_fill_fiemap_extents);
+}
+
+int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len)
+{
+ if (ext4_has_inline_data(inode)) {
+ int has_inline;
+
+ down_read(&EXT4_I(inode)->xattr_sem);
+ has_inline = ext4_has_inline_data(inode);
+ up_read(&EXT4_I(inode)->xattr_sem);
+ if (has_inline)
+ return 0;
+ }
+
+ return _ext4_fiemap(inode, fieinfo, start, len,
+ ext4_fill_es_cache_info);
+}
+
+
/*
* ext4_access_path:
* Function to access the path buffer for marking it dirty.
@@ -5128,13 +5246,10 @@ ext4_access_path(handle_t *handle, struct inode *inode,
* descriptor) for each block group; assume two block
* groups
*/
- if (handle->h_buffer_credits < 7) {
- credits = ext4_writepage_trans_blocks(inode);
- err = ext4_ext_truncate_extend_restart(handle, inode, credits);
- /* EAGAIN is success */
- if (err && err != -EAGAIN)
- return err;
- }
+ credits = ext4_writepage_trans_blocks(inode);
+ err = ext4_datasem_ensure_credits(handle, inode, 7, credits, 0);
+ if (err < 0)
+ return err;
err = ext4_ext_get_access(handle, inode, path);
return err;
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 7521de2dcf3a..d996b44d2265 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -146,7 +146,7 @@ static struct kmem_cache *ext4_pending_cachep;
static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t end);
+ ext4_lblk_t end, int *reserved);
static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
struct ext4_inode_info *locked_ei);
@@ -836,7 +836,7 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_es_insert_extent_check(inode, &newes);
write_lock(&EXT4_I(inode)->i_es_lock);
- err = __es_remove_extent(inode, lblk, end);
+ err = __es_remove_extent(inode, lblk, end, NULL);
if (err != 0)
goto error;
retry:
@@ -899,6 +899,7 @@ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
* Return: 1 on found, 0 on not
*/
int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t *next_lblk,
struct extent_status *es)
{
struct ext4_es_tree *tree;
@@ -947,9 +948,18 @@ out:
es->es_pblk = es1->es_pblk;
if (!ext4_es_is_referenced(es1))
ext4_es_set_referenced(es1);
- stats->es_stats_cache_hits++;
+ percpu_counter_inc(&stats->es_stats_cache_hits);
+ if (next_lblk) {
+ node = rb_next(&es1->rb_node);
+ if (node) {
+ es1 = rb_entry(node, struct extent_status,
+ rb_node);
+ *next_lblk = es1->es_lblk;
+ } else
+ *next_lblk = 0;
+ }
} else {
- stats->es_stats_cache_misses++;
+ percpu_counter_inc(&stats->es_stats_cache_misses);
}
read_unlock(&EXT4_I(inode)->i_es_lock);
@@ -958,8 +968,322 @@ out:
return found;
}
+struct rsvd_count {
+ int ndelonly;
+ bool first_do_lblk_found;
+ ext4_lblk_t first_do_lblk;
+ ext4_lblk_t last_do_lblk;
+ struct extent_status *left_es;
+ bool partial;
+ ext4_lblk_t lclu;
+};
+
+/*
+ * init_rsvd - initialize reserved count data before removing block range
+ * in file from extent status tree
+ *
+ * @inode - file containing range
+ * @lblk - first block in range
+ * @es - pointer to first extent in range
+ * @rc - pointer to reserved count data
+ *
+ * Assumes es is not NULL
+ */
+static void init_rsvd(struct inode *inode, ext4_lblk_t lblk,
+ struct extent_status *es, struct rsvd_count *rc)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct rb_node *node;
+
+ rc->ndelonly = 0;
+
+ /*
+ * for bigalloc, note the first delonly block in the range has not
+ * been found, record the extent containing the block to the left of
+ * the region to be removed, if any, and note that there's no partial
+ * cluster to track
+ */
+ if (sbi->s_cluster_ratio > 1) {
+ rc->first_do_lblk_found = false;
+ if (lblk > es->es_lblk) {
+ rc->left_es = es;
+ } else {
+ node = rb_prev(&es->rb_node);
+ rc->left_es = node ? rb_entry(node,
+ struct extent_status,
+ rb_node) : NULL;
+ }
+ rc->partial = false;
+ }
+}
+
+/*
+ * count_rsvd - count the clusters containing delayed and not unwritten
+ * (delonly) blocks in a range within an extent and add to
+ * the running tally in rsvd_count
+ *
+ * @inode - file containing extent
+ * @lblk - first block in range
+ * @len - length of range in blocks
+ * @es - pointer to extent containing clusters to be counted
+ * @rc - pointer to reserved count data
+ *
+ * Tracks partial clusters found at the beginning and end of extents so
+ * they aren't overcounted when they span adjacent extents
+ */
+static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
+ struct extent_status *es, struct rsvd_count *rc)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_lblk_t i, end, nclu;
+
+ if (!ext4_es_is_delonly(es))
+ return;
+
+ WARN_ON(len <= 0);
+
+ if (sbi->s_cluster_ratio == 1) {
+ rc->ndelonly += (int) len;
+ return;
+ }
+
+ /* bigalloc */
+
+ i = (lblk < es->es_lblk) ? es->es_lblk : lblk;
+ end = lblk + (ext4_lblk_t) len - 1;
+ end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end;
+
+ /* record the first block of the first delonly extent seen */
+ if (rc->first_do_lblk_found == false) {
+ rc->first_do_lblk = i;
+ rc->first_do_lblk_found = true;
+ }
+
+ /* update the last lblk in the region seen so far */
+ rc->last_do_lblk = end;
+
+ /*
+ * if we're tracking a partial cluster and the current extent
+ * doesn't start with it, count it and stop tracking
+ */
+ if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) {
+ rc->ndelonly++;
+ rc->partial = false;
+ }
+
+ /*
+ * if the first cluster doesn't start on a cluster boundary but
+ * ends on one, count it
+ */
+ if (EXT4_LBLK_COFF(sbi, i) != 0) {
+ if (end >= EXT4_LBLK_CFILL(sbi, i)) {
+ rc->ndelonly++;
+ rc->partial = false;
+ i = EXT4_LBLK_CFILL(sbi, i) + 1;
+ }
+ }
+
+ /*
+ * if the current cluster starts on a cluster boundary, count the
+ * number of whole delonly clusters in the extent
+ */
+ if ((i + sbi->s_cluster_ratio - 1) <= end) {
+ nclu = (end - i + 1) >> sbi->s_cluster_bits;
+ rc->ndelonly += nclu;
+ i += nclu << sbi->s_cluster_bits;
+ }
+
+ /*
+ * start tracking a partial cluster if there's a partial at the end
+ * of the current extent and we're not already tracking one
+ */
+ if (!rc->partial && i <= end) {
+ rc->partial = true;
+ rc->lclu = EXT4_B2C(sbi, i);
+ }
+}
+
+/*
+ * __pr_tree_search - search for a pending cluster reservation
+ *
+ * @root - root of pending reservation tree
+ * @lclu - logical cluster to search for
+ *
+ * Returns the pending reservation for the cluster identified by @lclu
+ * if found. If not, returns a reservation for the next cluster if any,
+ * and if not, returns NULL.
+ */
+static struct pending_reservation *__pr_tree_search(struct rb_root *root,
+ ext4_lblk_t lclu)
+{
+ struct rb_node *node = root->rb_node;
+ struct pending_reservation *pr = NULL;
+
+ while (node) {
+ pr = rb_entry(node, struct pending_reservation, rb_node);
+ if (lclu < pr->lclu)
+ node = node->rb_left;
+ else if (lclu > pr->lclu)
+ node = node->rb_right;
+ else
+ return pr;
+ }
+ if (pr && lclu < pr->lclu)
+ return pr;
+ if (pr && lclu > pr->lclu) {
+ node = rb_next(&pr->rb_node);
+ return node ? rb_entry(node, struct pending_reservation,
+ rb_node) : NULL;
+ }
+ return NULL;
+}
+
+/*
+ * get_rsvd - calculates and returns the number of cluster reservations to be
+ * released when removing a block range from the extent status tree
+ * and releases any pending reservations within the range
+ *
+ * @inode - file containing block range
+ * @end - last block in range
+ * @right_es - pointer to extent containing next block beyond end or NULL
+ * @rc - pointer to reserved count data
+ *
+ * The number of reservations to be released is equal to the number of
+ * clusters containing delayed and not unwritten (delonly) blocks within
+ * the range, minus the number of clusters still containing delonly blocks
+ * at the ends of the range, and minus the number of pending reservations
+ * within the range.
+ */
+static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
+ struct extent_status *right_es,
+ struct rsvd_count *rc)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct pending_reservation *pr;
+ struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
+ struct rb_node *node;
+ ext4_lblk_t first_lclu, last_lclu;
+ bool left_delonly, right_delonly, count_pending;
+ struct extent_status *es;
+
+ if (sbi->s_cluster_ratio > 1) {
+ /* count any remaining partial cluster */
+ if (rc->partial)
+ rc->ndelonly++;
+
+ if (rc->ndelonly == 0)
+ return 0;
+
+ first_lclu = EXT4_B2C(sbi, rc->first_do_lblk);
+ last_lclu = EXT4_B2C(sbi, rc->last_do_lblk);
+
+ /*
+ * decrease the delonly count by the number of clusters at the
+ * ends of the range that still contain delonly blocks -
+ * these clusters still need to be reserved
+ */
+ left_delonly = right_delonly = false;
+
+ es = rc->left_es;
+ while (es && ext4_es_end(es) >=
+ EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) {
+ if (ext4_es_is_delonly(es)) {
+ rc->ndelonly--;
+ left_delonly = true;
+ break;
+ }
+ node = rb_prev(&es->rb_node);
+ if (!node)
+ break;
+ es = rb_entry(node, struct extent_status, rb_node);
+ }
+ if (right_es && (!left_delonly || first_lclu != last_lclu)) {
+ if (end < ext4_es_end(right_es)) {
+ es = right_es;
+ } else {
+ node = rb_next(&right_es->rb_node);
+ es = node ? rb_entry(node, struct extent_status,
+ rb_node) : NULL;
+ }
+ while (es && es->es_lblk <=
+ EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) {
+ if (ext4_es_is_delonly(es)) {
+ rc->ndelonly--;
+ right_delonly = true;
+ break;
+ }
+ node = rb_next(&es->rb_node);
+ if (!node)
+ break;
+ es = rb_entry(node, struct extent_status,
+ rb_node);
+ }
+ }
+
+ /*
+ * Determine the block range that should be searched for
+ * pending reservations, if any. Clusters on the ends of the
+ * original removed range containing delonly blocks are
+ * excluded. They've already been accounted for and it's not
+ * possible to determine if an associated pending reservation
+ * should be released with the information available in the
+ * extents status tree.
+ */
+ if (first_lclu == last_lclu) {
+ if (left_delonly | right_delonly)
+ count_pending = false;
+ else
+ count_pending = true;
+ } else {
+ if (left_delonly)
+ first_lclu++;
+ if (right_delonly)
+ last_lclu--;
+ if (first_lclu <= last_lclu)
+ count_pending = true;
+ else
+ count_pending = false;
+ }
+
+ /*
+ * a pending reservation found between first_lclu and last_lclu
+ * represents an allocated cluster that contained at least one
+ * delonly block, so the delonly total must be reduced by one
+ * for each pending reservation found and released
+ */
+ if (count_pending) {
+ pr = __pr_tree_search(&tree->root, first_lclu);
+ while (pr && pr->lclu <= last_lclu) {
+ rc->ndelonly--;
+ node = rb_next(&pr->rb_node);
+ rb_erase(&pr->rb_node, &tree->root);
+ kmem_cache_free(ext4_pending_cachep, pr);
+ if (!node)
+ break;
+ pr = rb_entry(node, struct pending_reservation,
+ rb_node);
+ }
+ }
+ }
+ return rc->ndelonly;
+}
+
+
+/*
+ * __es_remove_extent - removes block range from extent status tree
+ *
+ * @inode - file containing range
+ * @lblk - first block in range
+ * @end - last block in range
+ * @reserved - number of cluster reservations released
+ *
+ * If @reserved is not NULL and delayed allocation is enabled, counts
+ * block/cluster reservations freed by removing range and if bigalloc
+ * enabled cancels pending reservations as needed. Returns 0 on success,
+ * error code on failure.
+ */
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t end)
+ ext4_lblk_t end, int *reserved)
{
struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
struct rb_node *node;
@@ -968,9 +1292,14 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len1, len2;
ext4_fsblk_t block;
int err;
+ bool count_reserved = true;
+ struct rsvd_count rc;
+ if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC))
+ count_reserved = false;
retry:
err = 0;
+
es = __es_tree_search(&tree->root, lblk);
if (!es)
goto out;
@@ -979,6 +1308,8 @@ retry:
/* Simply invalidate cache_es. */
tree->cache_es = NULL;
+ if (count_reserved)
+ init_rsvd(inode, lblk, es, &rc);
orig_es.es_lblk = es->es_lblk;
orig_es.es_len = es->es_len;
@@ -1020,10 +1351,16 @@ retry:
ext4_es_store_pblock(es, block);
}
}
+ if (count_reserved)
+ count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
+ &orig_es, &rc);
goto out;
}
if (len1 > 0) {
+ if (count_reserved)
+ count_rsvd(inode, lblk, orig_es.es_len - len1,
+ &orig_es, &rc);
node = rb_next(&es->rb_node);
if (node)
es = rb_entry(node, struct extent_status, rb_node);
@@ -1032,6 +1369,8 @@ retry:
}
while (es && ext4_es_end(es) <= end) {
+ if (count_reserved)
+ count_rsvd(inode, es->es_lblk, es->es_len, es, &rc);
node = rb_next(&es->rb_node);
rb_erase(&es->rb_node, &tree->root);
ext4_es_free_extent(inode, es);
@@ -1046,6 +1385,9 @@ retry:
ext4_lblk_t orig_len = es->es_len;
len1 = ext4_es_end(es) - end;
+ if (count_reserved)
+ count_rsvd(inode, es->es_lblk, orig_len - len1,
+ es, &rc);
es->es_lblk = end + 1;
es->es_len = len1;
if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
@@ -1054,20 +1396,28 @@ retry:
}
}
+ if (count_reserved)
+ *reserved = get_rsvd(inode, end, es, &rc);
out:
return err;
}
/*
- * ext4_es_remove_extent() removes a space from a extent status tree.
+ * ext4_es_remove_extent - removes block range from extent status tree
*
- * Return 0 on success, error code on failure.
+ * @inode - file containing range
+ * @lblk - first block in range
+ * @len - number of blocks to remove
+ *
+ * Reduces block/cluster reservation count and for bigalloc cancels pending
+ * reservations as needed. Returns 0 on success, error code on failure.
*/
int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len)
{
ext4_lblk_t end;
int err = 0;
+ int reserved = 0;
trace_ext4_es_remove_extent(inode, lblk, len);
es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
@@ -1085,9 +1435,10 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
* is reclaimed.
*/
write_lock(&EXT4_I(inode)->i_es_lock);
- err = __es_remove_extent(inode, lblk, end);
+ err = __es_remove_extent(inode, lblk, end, &reserved);
write_unlock(&EXT4_I(inode)->i_es_lock);
ext4_es_print_tree(inode);
+ ext4_da_release_space(inode, reserved);
return err;
}
@@ -1235,9 +1586,9 @@ int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v)
seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n",
percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt));
- seq_printf(seq, " %lu/%lu cache hits/misses\n",
- es_stats->es_stats_cache_hits,
- es_stats->es_stats_cache_misses);
+ seq_printf(seq, " %lld/%lld cache hits/misses\n",
+ percpu_counter_sum_positive(&es_stats->es_stats_cache_hits),
+ percpu_counter_sum_positive(&es_stats->es_stats_cache_misses));
if (inode_cnt)
seq_printf(seq, " %d inodes on list\n", inode_cnt);
@@ -1264,35 +1615,46 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
sbi->s_es_nr_inode = 0;
spin_lock_init(&sbi->s_es_lock);
sbi->s_es_stats.es_stats_shrunk = 0;
- sbi->s_es_stats.es_stats_cache_hits = 0;
- sbi->s_es_stats.es_stats_cache_misses = 0;
+ err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_hits, 0,
+ GFP_KERNEL);
+ if (err)
+ return err;
+ err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_misses, 0,
+ GFP_KERNEL);
+ if (err)
+ goto err1;
sbi->s_es_stats.es_stats_scan_time = 0;
sbi->s_es_stats.es_stats_max_scan_time = 0;
err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL);
if (err)
- return err;
+ goto err2;
err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL);
if (err)
- goto err1;
+ goto err3;
sbi->s_es_shrinker.scan_objects = ext4_es_scan;
sbi->s_es_shrinker.count_objects = ext4_es_count;
sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
err = register_shrinker(&sbi->s_es_shrinker);
if (err)
- goto err2;
+ goto err4;
return 0;
-
-err2:
+err4:
percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
-err1:
+err3:
percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
+err2:
+ percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses);
+err1:
+ percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits);
return err;
}
void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
{
+ percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits);
+ percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses);
percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
unregister_shrinker(&sbi->s_es_shrinker);
@@ -1317,6 +1679,7 @@ static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk);
if (!es)
goto out_wrap;
+
while (*nr_to_scan > 0) {
if (es->es_lblk > end) {
ei->i_es_shrink_lblk = end + 1;
@@ -1374,6 +1737,34 @@ static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan)
return nr_shrunk;
}
+/*
+ * Called to support EXT4_IOC_CLEAR_ES_CACHE. We can only remove
+ * discretionary entries from the extent status cache. (Some entries
+ * must be present for proper operations.)
+ */
+void ext4_clear_inode_es(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct extent_status *es;
+ struct ext4_es_tree *tree;
+ struct rb_node *node;
+
+ write_lock(&ei->i_es_lock);
+ tree = &EXT4_I(inode)->i_es_tree;
+ tree->cache_es = NULL;
+ node = rb_first(&tree->root);
+ while (node) {
+ es = rb_entry(node, struct extent_status, rb_node);
+ node = rb_next(node);
+ if (!ext4_es_is_delayed(es)) {
+ rb_erase(&es->rb_node, &tree->root);
+ ext4_es_free_extent(inode, es);
+ }
+ }
+ ext4_clear_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
+ write_unlock(&ei->i_es_lock);
+}
+
#ifdef ES_DEBUG__
static void ext4_print_pending_tree(struct inode *inode)
{
@@ -1590,7 +1981,7 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
write_lock(&EXT4_I(inode)->i_es_lock);
- err = __es_remove_extent(inode, lblk, lblk);
+ err = __es_remove_extent(inode, lblk, lblk, NULL);
if (err != 0)
goto error;
retry:
@@ -1779,93 +2170,3 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
__remove_pending(inode, last);
}
}
-
-/*
- * ext4_es_remove_blks - remove block range from extents status tree and
- * reduce reservation count or cancel pending
- * reservation as needed
- *
- * @inode - file containing range
- * @lblk - first block in range
- * @len - number of blocks to remove
- *
- */
-void ext4_es_remove_blks(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len)
-{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- unsigned int clu_size, reserved = 0;
- ext4_lblk_t last_lclu, first, length, remainder, last;
- bool delonly;
- int err = 0;
- struct pending_reservation *pr;
- struct ext4_pending_tree *tree;
-
- /*
- * Process cluster by cluster for bigalloc - there may be up to
- * two clusters in a 4k page with a 1k block size and two blocks
- * per cluster. Also necessary for systems with larger page sizes
- * and potentially larger block sizes.
- */
- clu_size = sbi->s_cluster_ratio;
- last_lclu = EXT4_B2C(sbi, lblk + len - 1);
-
- write_lock(&EXT4_I(inode)->i_es_lock);
-
- for (first = lblk, remainder = len;
- remainder > 0;
- first += length, remainder -= length) {
-
- if (EXT4_B2C(sbi, first) == last_lclu)
- length = remainder;
- else
- length = clu_size - EXT4_LBLK_COFF(sbi, first);
-
- /*
- * The BH_Delay flag, which triggers calls to this function,
- * and the contents of the extents status tree can be
- * inconsistent due to writepages activity. So, note whether
- * the blocks to be removed actually belong to an extent with
- * delayed only status.
- */
- delonly = __es_scan_clu(inode, &ext4_es_is_delonly, first);
-
- /*
- * because of the writepages effect, written and unwritten
- * blocks could be removed here
- */
- last = first + length - 1;
- err = __es_remove_extent(inode, first, last);
- if (err)
- ext4_warning(inode->i_sb,
- "%s: couldn't remove page (err = %d)",
- __func__, err);
-
- /* non-bigalloc case: simply count the cluster for release */
- if (sbi->s_cluster_ratio == 1 && delonly) {
- reserved++;
- continue;
- }
-
- /*
- * bigalloc case: if all delayed allocated only blocks have
- * just been removed from a cluster, either cancel a pending
- * reservation if it exists or count a cluster for release
- */
- if (delonly &&
- !__es_scan_clu(inode, &ext4_es_is_delonly, first)) {
- pr = __get_pending(inode, EXT4_B2C(sbi, first));
- if (pr != NULL) {
- tree = &EXT4_I(inode)->i_pending_tree;
- rb_erase(&pr->rb_node, &tree->root);
- kmem_cache_free(ext4_pending_cachep, pr);
- } else {
- reserved++;
- }
- }
- }
-
- write_unlock(&EXT4_I(inode)->i_es_lock);
-
- ext4_da_release_space(inode, reserved);
-}
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index 131a8b7df265..825313c59752 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -70,8 +70,8 @@ struct ext4_es_tree {
struct ext4_es_stats {
unsigned long es_stats_shrunk;
- unsigned long es_stats_cache_hits;
- unsigned long es_stats_cache_misses;
+ struct percpu_counter es_stats_cache_hits;
+ struct percpu_counter es_stats_cache_misses;
u64 es_stats_scan_time;
u64 es_stats_max_scan_time;
struct percpu_counter es_stats_all_cnt;
@@ -140,6 +140,7 @@ extern void ext4_es_find_extent_range(struct inode *inode,
ext4_lblk_t lblk, ext4_lblk_t end,
struct extent_status *es);
extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t *next_lblk,
struct extent_status *es);
extern bool ext4_es_scan_range(struct inode *inode,
int (*matching_fn)(struct extent_status *es),
@@ -246,7 +247,6 @@ extern int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
bool allocated);
extern unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len);
-extern void ext4_es_remove_blks(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len);
+extern void ext4_clear_inode_es(struct inode *inode);
#endif /* _EXT4_EXTENTS_STATUS_H */
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 70b0438dbc94..6a7293a5cda2 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -29,10 +29,58 @@
#include <linux/pagevec.h>
#include <linux/uio.h>
#include <linux/mman.h>
+#include <linux/backing-dev.h>
#include "ext4.h"
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
+#include "truncate.h"
+
+static bool ext4_dio_supported(struct inode *inode)
+{
+ if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
+ return false;
+ if (fsverity_active(inode))
+ return false;
+ if (ext4_should_journal_data(inode))
+ return false;
+ if (ext4_has_inline_data(inode))
+ return false;
+ return true;
+}
+
+static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ ssize_t ret;
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock_shared(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock_shared(inode);
+ }
+
+ if (!ext4_dio_supported(inode)) {
+ inode_unlock_shared(inode);
+ /*
+ * Fallback to buffered I/O if the operation being performed on
+ * the inode is not supported by direct I/O. The IOCB_DIRECT
+ * flag needs to be cleared here in order to ensure that the
+ * direct I/O path within generic_file_read_iter() is not
+ * taken.
+ */
+ iocb->ki_flags &= ~IOCB_DIRECT;
+ return generic_file_read_iter(iocb, to);
+ }
+
+ ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL,
+ is_sync_kiocb(iocb));
+ inode_unlock_shared(inode);
+
+ file_accessed(iocb->ki_filp);
+ return ret;
+}
#ifdef CONFIG_FS_DAX
static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -64,16 +112,21 @@ static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return -EIO;
if (!iov_iter_count(to))
return 0; /* skip atime */
#ifdef CONFIG_FS_DAX
- if (IS_DAX(file_inode(iocb->ki_filp)))
+ if (IS_DAX(inode))
return ext4_dax_read_iter(iocb, to);
#endif
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return ext4_dio_read_iter(iocb, to);
+
return generic_file_read_iter(iocb, to);
}
@@ -103,13 +156,6 @@ static int ext4_release_file(struct inode *inode, struct file *filp)
return 0;
}
-static void ext4_unwritten_wait(struct inode *inode)
-{
- wait_queue_head_t *wq = ext4_ioend_wq(inode);
-
- wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
-}
-
/*
* This tests whether the IO in question is block-aligned or not.
* Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
@@ -162,13 +208,13 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = file_inode(iocb->ki_filp);
ssize_t ret;
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return -EPERM;
+
ret = generic_write_checks(iocb, from);
if (ret <= 0)
return ret;
- if (unlikely(IS_IMMUTABLE(inode)))
- return -EPERM;
-
/*
* If we have encountered a bitmap-format file, the size limit
* is smaller than s_maxbytes, which is for extent-mapped files.
@@ -180,58 +226,266 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
return -EFBIG;
iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
}
+
+ ret = file_modified(iocb->ki_filp);
+ if (ret)
+ return ret;
+
return iov_iter_count(from);
}
-#ifdef CONFIG_FS_DAX
-static ssize_t
-ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
{
- struct inode *inode = file_inode(iocb->ki_filp);
ssize_t ret;
+ struct inode *inode = file_inode(iocb->ki_filp);
- if (!inode_trylock(inode)) {
- if (iocb->ki_flags & IOCB_NOWAIT)
- return -EAGAIN;
- inode_lock(inode);
- }
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ return -EOPNOTSUPP;
+
+ inode_lock(inode);
ret = ext4_write_checks(iocb, from);
if (ret <= 0)
goto out;
- ret = file_remove_privs(iocb->ki_filp);
- if (ret)
- goto out;
- ret = file_update_time(iocb->ki_filp);
- if (ret)
- goto out;
- ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
+ current->backing_dev_info = inode_to_bdi(inode);
+ ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
+ current->backing_dev_info = NULL;
+
out:
inode_unlock(inode);
- if (ret > 0)
+ if (likely(ret > 0)) {
+ iocb->ki_pos += ret;
ret = generic_write_sync(iocb, ret);
+ }
+
return ret;
}
-#endif
-static ssize_t
-ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
+ ssize_t written, size_t count)
+{
+ handle_t *handle;
+ bool truncate = false;
+ u8 blkbits = inode->i_blkbits;
+ ext4_lblk_t written_blk, end_blk;
+
+ /*
+ * Note that EXT4_I(inode)->i_disksize can get extended up to
+ * inode->i_size while the I/O was running due to writeback of delalloc
+ * blocks. But, the code in ext4_iomap_alloc() is careful to use
+ * zeroed/unwritten extents if this is possible; thus we won't leave
+ * uninitialized blocks in a file even if we didn't succeed in writing
+ * as much as we intended.
+ */
+ WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
+ if (offset + count <= EXT4_I(inode)->i_disksize) {
+ /*
+ * We need to ensure that the inode is removed from the orphan
+ * list if it has been added prematurely, due to writeback of
+ * delalloc blocks.
+ */
+ if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+
+ if (IS_ERR(handle)) {
+ ext4_orphan_del(NULL, inode);
+ return PTR_ERR(handle);
+ }
+
+ ext4_orphan_del(handle, inode);
+ ext4_journal_stop(handle);
+ }
+
+ return written;
+ }
+
+ if (written < 0)
+ goto truncate;
+
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+ if (IS_ERR(handle)) {
+ written = PTR_ERR(handle);
+ goto truncate;
+ }
+
+ if (ext4_update_inode_size(inode, offset + written))
+ ext4_mark_inode_dirty(handle, inode);
+
+ /*
+ * We may need to truncate allocated but not written blocks beyond EOF.
+ */
+ written_blk = ALIGN(offset + written, 1 << blkbits);
+ end_blk = ALIGN(offset + count, 1 << blkbits);
+ if (written_blk < end_blk && ext4_can_truncate(inode))
+ truncate = true;
+
+ /*
+ * Remove the inode from the orphan list if it has been extended and
+ * everything went OK.
+ */
+ if (!truncate && inode->i_nlink)
+ ext4_orphan_del(handle, inode);
+ ext4_journal_stop(handle);
+
+ if (truncate) {
+truncate:
+ ext4_truncate_failed_write(inode);
+ /*
+ * If the truncate operation failed early, then the inode may
+ * still be on the orphan list. In that case, we need to try
+ * remove the inode from the in-memory linked list.
+ */
+ if (inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
+ }
+
+ return written;
+}
+
+static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+ int error, unsigned int flags)
{
+ loff_t offset = iocb->ki_pos;
struct inode *inode = file_inode(iocb->ki_filp);
- int o_direct = iocb->ki_flags & IOCB_DIRECT;
- int unaligned_aio = 0;
- int overwrite = 0;
+
+ if (error)
+ return error;
+
+ if (size && flags & IOMAP_DIO_UNWRITTEN)
+ return ext4_convert_unwritten_extents(NULL, inode,
+ offset, size);
+
+ return 0;
+}
+
+static const struct iomap_dio_ops ext4_dio_write_ops = {
+ .end_io = ext4_dio_write_end_io,
+};
+
+static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
ssize_t ret;
+ size_t count;
+ loff_t offset;
+ handle_t *handle;
+ struct inode *inode = file_inode(iocb->ki_filp);
+ bool extend = false, overwrite = false, unaligned_aio = false;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
- return -EIO;
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock(inode);
+ }
+
+ if (!ext4_dio_supported(inode)) {
+ inode_unlock(inode);
+ /*
+ * Fallback to buffered I/O if the inode does not support
+ * direct I/O.
+ */
+ return ext4_buffered_write_iter(iocb, from);
+ }
+
+ ret = ext4_write_checks(iocb, from);
+ if (ret <= 0) {
+ inode_unlock(inode);
+ return ret;
+ }
+
+ /*
+ * Unaligned asynchronous direct I/O must be serialized among each
+ * other as the zeroing of partial blocks of two competing unaligned
+ * asynchronous direct I/O writes can result in data corruption.
+ */
+ offset = iocb->ki_pos;
+ count = iov_iter_count(from);
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
+ !is_sync_kiocb(iocb) && ext4_unaligned_aio(inode, from, offset)) {
+ unaligned_aio = true;
+ inode_dio_wait(inode);
+ }
+
+ /*
+ * Determine whether the I/O will overwrite allocated and initialized
+ * blocks. If so, check to see whether it is possible to take the
+ * dioread_nolock path.
+ */
+ if (!unaligned_aio && ext4_overwrite_io(inode, offset, count) &&
+ ext4_should_dioread_nolock(inode)) {
+ overwrite = true;
+ downgrade_write(&inode->i_rwsem);
+ }
+
+ if (offset + count > EXT4_I(inode)->i_disksize) {
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ goto out;
+ }
+
+ ret = ext4_orphan_add(handle, inode);
+ if (ret) {
+ ext4_journal_stop(handle);
+ goto out;
+ }
+
+ extend = true;
+ ext4_journal_stop(handle);
+ }
+
+ ret = iomap_dio_rw(iocb, from, &ext4_iomap_ops, &ext4_dio_write_ops,
+ is_sync_kiocb(iocb) || unaligned_aio || extend);
+
+ if (extend)
+ ret = ext4_handle_inode_extension(inode, offset, ret, count);
+
+out:
+ if (overwrite)
+ inode_unlock_shared(inode);
+ else
+ inode_unlock(inode);
+
+ if (ret >= 0 && iov_iter_count(from)) {
+ ssize_t err;
+ loff_t endbyte;
+
+ offset = iocb->ki_pos;
+ err = ext4_buffered_write_iter(iocb, from);
+ if (err < 0)
+ return err;
+
+ /*
+ * We need to ensure that the pages within the page cache for
+ * the range covered by this I/O are written to disk and
+ * invalidated. This is in attempt to preserve the expected
+ * direct I/O semantics in the case we fallback to buffered I/O
+ * to complete off the I/O request.
+ */
+ ret += err;
+ endbyte = offset + err - 1;
+ err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
+ offset, endbyte);
+ if (!err)
+ invalidate_mapping_pages(iocb->ki_filp->f_mapping,
+ offset >> PAGE_SHIFT,
+ endbyte >> PAGE_SHIFT);
+ }
+
+ return ret;
+}
#ifdef CONFIG_FS_DAX
- if (IS_DAX(inode))
- return ext4_dax_write_iter(iocb, from);
-#endif
- if (!o_direct && (iocb->ki_flags & IOCB_NOWAIT))
- return -EOPNOTSUPP;
+static ssize_t
+ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ ssize_t ret;
+ size_t count;
+ loff_t offset;
+ handle_t *handle;
+ bool extend = false;
+ struct inode *inode = file_inode(iocb->ki_filp);
if (!inode_trylock(inode)) {
if (iocb->ki_flags & IOCB_NOWAIT)
@@ -243,49 +497,55 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (ret <= 0)
goto out;
- /*
- * Unaligned direct AIO must be serialized among each other as zeroing
- * of partial blocks of two competing unaligned AIOs can result in data
- * corruption.
- */
- if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
- !is_sync_kiocb(iocb) &&
- ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
- unaligned_aio = 1;
- ext4_unwritten_wait(inode);
- }
+ offset = iocb->ki_pos;
+ count = iov_iter_count(from);
- iocb->private = &overwrite;
- /* Check whether we do a DIO overwrite or not */
- if (o_direct && !unaligned_aio) {
- if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
- if (ext4_should_dioread_nolock(inode))
- overwrite = 1;
- } else if (iocb->ki_flags & IOCB_NOWAIT) {
- ret = -EAGAIN;
+ if (offset + count > EXT4_I(inode)->i_disksize) {
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
goto out;
}
- }
- ret = __generic_file_write_iter(iocb, from);
- /*
- * Unaligned direct AIO must be the only IO in flight. Otherwise
- * overlapping aligned IO after unaligned might result in data
- * corruption.
- */
- if (ret == -EIOCBQUEUED && unaligned_aio)
- ext4_unwritten_wait(inode);
- inode_unlock(inode);
+ ret = ext4_orphan_add(handle, inode);
+ if (ret) {
+ ext4_journal_stop(handle);
+ goto out;
+ }
- if (ret > 0)
- ret = generic_write_sync(iocb, ret);
+ extend = true;
+ ext4_journal_stop(handle);
+ }
- return ret;
+ ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
+ if (extend)
+ ret = ext4_handle_inode_extension(inode, offset, ret, count);
out:
inode_unlock(inode);
+ if (ret > 0)
+ ret = generic_write_sync(iocb, ret);
return ret;
}
+#endif
+
+static ssize_t
+ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ return -EIO;
+
+#ifdef CONFIG_FS_DAX
+ if (IS_DAX(inode))
+ return ext4_dax_write_iter(iocb, from);
+#endif
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return ext4_dio_write_iter(iocb, from);
+
+ return ext4_buffered_write_iter(iocb, from);
+}
#ifdef CONFIG_FS_DAX
static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
@@ -457,6 +717,10 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
if (ret)
return ret;
+ ret = fsverity_file_open(inode, filp);
+ if (ret)
+ return ret;
+
/*
* Set up the jbd2_inode if we are opening the inode for
* writing and the journal is present
@@ -492,12 +756,14 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
maxbytes, i_size_read(inode));
case SEEK_HOLE:
inode_lock_shared(inode);
- offset = iomap_seek_hole(inode, offset, &ext4_iomap_ops);
+ offset = iomap_seek_hole(inode, offset,
+ &ext4_iomap_report_ops);
inode_unlock_shared(inode);
break;
case SEEK_DATA:
inode_lock_shared(inode);
- offset = iomap_seek_data(inode, offset, &ext4_iomap_ops);
+ offset = iomap_seek_data(inode, offset,
+ &ext4_iomap_report_ops);
inode_unlock_shared(inode);
break;
}
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 5508baa11bb6..e10206e7f4bb 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -80,6 +80,43 @@ static int ext4_sync_parent(struct inode *inode)
return ret;
}
+static int ext4_fsync_nojournal(struct inode *inode, bool datasync,
+ bool *needs_barrier)
+{
+ int ret, err;
+
+ ret = sync_mapping_buffers(inode->i_mapping);
+ if (!(inode->i_state & I_DIRTY_ALL))
+ return ret;
+ if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+ return ret;
+
+ err = sync_inode_metadata(inode, 1);
+ if (!ret)
+ ret = err;
+
+ if (!ret)
+ ret = ext4_sync_parent(inode);
+ if (test_opt(inode->i_sb, BARRIER))
+ *needs_barrier = true;
+
+ return ret;
+}
+
+static int ext4_fsync_journal(struct inode *inode, bool datasync,
+ bool *needs_barrier)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+ tid_t commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
+
+ if (journal->j_flags & JBD2_BARRIER &&
+ !jbd2_trans_will_send_data_barrier(journal, commit_tid))
+ *needs_barrier = true;
+
+ return jbd2_complete_transaction(journal, commit_tid);
+}
+
/*
* akpm: A new design for ext4_sync_file().
*
@@ -91,17 +128,14 @@ static int ext4_sync_parent(struct inode *inode)
* What we do is just kick off a commit and wait on it. This will snapshot the
* inode to disk.
*/
-
int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct inode *inode = file->f_mapping->host;
- struct ext4_inode_info *ei = EXT4_I(inode);
- journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
int ret = 0, err;
- tid_t commit_tid;
bool needs_barrier = false;
+ struct inode *inode = file->f_mapping->host;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ if (unlikely(ext4_forced_shutdown(sbi)))
return -EIO;
J_ASSERT(ext4_journal_current_handle() == NULL);
@@ -111,23 +145,15 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (sb_rdonly(inode->i_sb)) {
/* Make sure that we read updated s_mount_flags value */
smp_rmb();
- if (EXT4_SB(inode->i_sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+ if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
ret = -EROFS;
goto out;
}
- if (!journal) {
- ret = __generic_file_fsync(file, start, end, datasync);
- if (!ret)
- ret = ext4_sync_parent(inode);
- if (test_opt(inode->i_sb, BARRIER))
- goto issue_flush;
- goto out;
- }
-
ret = file_write_and_wait_range(file, start, end);
if (ret)
return ret;
+
/*
* data=writeback,ordered:
* The caller's filemap_fdatawrite()/wait will sync the data.
@@ -142,18 +168,14 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* (they were dirtied by commit). But that's OK - the blocks are
* safe in-journal, which is all fsync() needs to ensure.
*/
- if (ext4_should_journal_data(inode)) {
+ if (!sbi->s_journal)
+ ret = ext4_fsync_nojournal(inode, datasync, &needs_barrier);
+ else if (ext4_should_journal_data(inode))
ret = ext4_force_commit(inode->i_sb);
- goto out;
- }
+ else
+ ret = ext4_fsync_journal(inode, datasync, &needs_barrier);
- commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
- if (journal->j_flags & JBD2_BARRIER &&
- !jbd2_trans_will_send_data_barrier(journal, commit_tid))
- needs_barrier = true;
- ret = jbd2_complete_transaction(journal, commit_tid);
if (needs_barrier) {
- issue_flush:
err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
if (!ret)
ret = err;
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index d358bfcb6b3f..3e133793a5a3 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -280,7 +280,7 @@ int ext4fs_dirhash(const struct inode *dir, const char *name, int len,
unsigned char *buff;
struct qstr qstr = {.name = name, .len = len };
- if (len && IS_CASEFOLDED(dir)) {
+ if (len && IS_CASEFOLDED(dir) && um) {
buff = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL);
if (!buff)
return -ENOMEM;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 764ff4c56233..dc333e8e51e8 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -265,13 +265,8 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
ext4_debug("freeing inode %lu\n", ino);
trace_ext4_free_inode(inode);
- /*
- * Note: we must free any quota before locking the superblock,
- * as writing the quota to disk may need the lock as well.
- */
dquot_initialize(inode);
dquot_free_inode(inode);
- dquot_drop(inode);
is_directory = S_ISDIR(inode->i_mode);
@@ -927,7 +922,7 @@ repeat_in_this_group:
BUG_ON(nblocks <= 0);
handle = __ext4_journal_start_sb(dir->i_sb, line_no,
handle_type, nblocks,
- 0);
+ 0, 0);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
ext4_std_error(sb, err);
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 36699a131168..3a4ab70fe9e0 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -331,11 +331,14 @@ static int ext4_alloc_branch(handle_t *handle,
for (i = 0; i <= indirect_blks; i++) {
if (i == indirect_blks) {
new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
- } else
+ } else {
ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
ar->inode, ar->goal,
ar->flags & EXT4_MB_DELALLOC_RESERVED,
NULL, &err);
+ /* Simplify error cleanup... */
+ branch[i+1].bh = NULL;
+ }
if (err) {
i--;
goto failed;
@@ -377,18 +380,25 @@ static int ext4_alloc_branch(handle_t *handle,
}
return 0;
failed:
+ if (i == indirect_blks) {
+ /* Free data blocks */
+ ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
+ ar->len, 0);
+ i--;
+ }
for (; i >= 0; i--) {
/*
* We want to ext4_forget() only freshly allocated indirect
- * blocks. Buffer for new_blocks[i-1] is at branch[i].bh and
- * buffer at branch[0].bh is indirect block / inode already
- * existing before ext4_alloc_branch() was called.
+ * blocks. Buffer for new_blocks[i] is at branch[i+1].bh
+ * (buffer at branch[0].bh is indirect block / inode already
+ * existing before ext4_alloc_branch() was called). Also
+ * because blocks are freshly allocated, we don't need to
+ * revoke them which is why we don't set
+ * EXT4_FREE_BLOCKS_METADATA.
*/
- if (i > 0 && i != indirect_blks && branch[i].bh)
- ext4_forget(handle, 1, ar->inode, branch[i].bh,
- branch[i].bh->b_blocknr);
- ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
- (i == indirect_blks) ? ar->len : 1, 0);
+ ext4_free_blocks(handle, ar->inode, branch[i+1].bh,
+ new_blocks[i], 1,
+ branch[i+1].bh ? EXT4_FREE_BLOCKS_FORGET : 0);
}
return err;
}
@@ -689,27 +699,63 @@ int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
}
+static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
+ struct buffer_head *bh, int *dropped)
+{
+ int err;
+
+ if (bh) {
+ BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_metadata(handle, inode, bh);
+ if (unlikely(err))
+ return err;
+ }
+ err = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(err))
+ return err;
+ /*
+ * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
+ * moment, get_block can be called only for blocks inside i_size since
+ * page cache has been already dropped and writes are blocked by
+ * i_mutex. So we can safely drop the i_data_sem here.
+ */
+ BUG_ON(EXT4_JOURNAL(inode) == NULL);
+ ext4_discard_preallocations(inode);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ *dropped = 1;
+ return 0;
+}
+
/*
* Truncate transactions can be complex and absolutely huge. So we need to
* be able to restart the transaction at a conventient checkpoint to make
* sure we don't overflow the journal.
*
* Try to extend this transaction for the purposes of truncation. If
- * extend fails, we need to propagate the failure up and restart the
- * transaction in the top-level truncate loop. --sct
- *
- * Returns 0 if we managed to create more room. If we can't create more
- * room, and the transaction must be restarted we return 1.
+ * extend fails, we restart transaction.
*/
-static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
+static int ext4_ind_truncate_ensure_credits(handle_t *handle,
+ struct inode *inode,
+ struct buffer_head *bh,
+ int revoke_creds)
{
- if (!ext4_handle_valid(handle))
- return 0;
- if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
- return 0;
- if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
- return 0;
- return 1;
+ int ret;
+ int dropped = 0;
+
+ ret = ext4_journal_ensure_credits_fn(handle, EXT4_RESERVE_TRANS_BLOCKS,
+ ext4_blocks_for_truncate(inode), revoke_creds,
+ ext4_ind_trunc_restart_fn(handle, inode, bh, &dropped));
+ if (dropped)
+ down_write(&EXT4_I(inode)->i_data_sem);
+ if (ret <= 0)
+ return ret;
+ if (bh) {
+ BUFFER_TRACE(bh, "retaking write access");
+ ret = ext4_journal_get_write_access(handle, bh);
+ if (unlikely(ret))
+ return ret;
+ }
+ return 0;
}
/*
@@ -844,27 +890,10 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
return 1;
}
- if (try_to_extend_transaction(handle, inode)) {
- if (bh) {
- BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, inode, bh);
- if (unlikely(err))
- goto out_err;
- }
- err = ext4_mark_inode_dirty(handle, inode);
- if (unlikely(err))
- goto out_err;
- err = ext4_truncate_restart_trans(handle, inode,
- ext4_blocks_for_truncate(inode));
- if (unlikely(err))
- goto out_err;
- if (bh) {
- BUFFER_TRACE(bh, "retaking write access");
- err = ext4_journal_get_write_access(handle, bh);
- if (unlikely(err))
- goto out_err;
- }
- }
+ err = ext4_ind_truncate_ensure_credits(handle, inode, bh,
+ ext4_free_data_revoke_credits(inode, count));
+ if (err < 0)
+ goto out_err;
for (p = first; p < last; p++)
*p = 0;
@@ -1047,11 +1076,11 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
*/
if (ext4_handle_is_aborted(handle))
return;
- if (try_to_extend_transaction(handle, inode)) {
- ext4_mark_inode_dirty(handle, inode);
- ext4_truncate_restart_trans(handle, inode,
- ext4_blocks_for_truncate(inode));
- }
+ if (ext4_ind_truncate_ensure_credits(handle, inode,
+ NULL,
+ ext4_free_metadata_revoke_credits(
+ inode->i_sb, 1)) < 0)
+ return;
/*
* The forget flag here is critical because if
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 88cdf3c90bd1..2fec62d764fa 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1416,7 +1416,7 @@ int ext4_inlinedir_to_tree(struct file *dir_file,
err = ext4_htree_store_dirent(dir_file, hinfo->hash,
hinfo->minor_hash, de, &tmp_str);
if (err) {
- count = err;
+ ret = err;
goto out;
}
count++;
diff --git a/fs/ext4/inode-test.c b/fs/ext4/inode-test.c
new file mode 100644
index 000000000000..92a9da1774aa
--- /dev/null
+++ b/fs/ext4/inode-test.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test of ext4 inode that verify the seconds part of [a/c/m]
+ * timestamps in ext4 inode structs are decoded correctly.
+ */
+
+#include <kunit/test.h>
+#include <linux/kernel.h>
+#include <linux/time64.h>
+
+#include "ext4.h"
+
+/*
+ * For constructing the nonnegative timestamp lower bound value.
+ * binary: 00000000 00000000 00000000 00000000
+ */
+#define LOWER_MSB_0 0L
+/*
+ * For constructing the nonnegative timestamp upper bound value.
+ * binary: 01111111 11111111 11111111 11111111
+ *
+ */
+#define UPPER_MSB_0 0x7fffffffL
+/*
+ * For constructing the negative timestamp lower bound value.
+ * binary: 10000000 00000000 00000000 00000000
+ */
+#define LOWER_MSB_1 (-0x80000000L)
+/*
+ * For constructing the negative timestamp upper bound value.
+ * binary: 11111111 11111111 11111111 11111111
+ */
+#define UPPER_MSB_1 (-1L)
+/*
+ * Upper bound for nanoseconds value supported by the encoding.
+ * binary: 00111111 11111111 11111111 11111111
+ */
+#define MAX_NANOSECONDS ((1L << 30) - 1)
+
+#define CASE_NAME_FORMAT "%s: msb:%x lower_bound:%x extra_bits: %x"
+
+#define LOWER_BOUND_NEG_NO_EXTRA_BITS_CASE\
+ "1901-12-13 Lower bound of 32bit < 0 timestamp, no extra bits"
+#define UPPER_BOUND_NEG_NO_EXTRA_BITS_CASE\
+ "1969-12-31 Upper bound of 32bit < 0 timestamp, no extra bits"
+#define LOWER_BOUND_NONNEG_NO_EXTRA_BITS_CASE\
+ "1970-01-01 Lower bound of 32bit >=0 timestamp, no extra bits"
+#define UPPER_BOUND_NONNEG_NO_EXTRA_BITS_CASE\
+ "2038-01-19 Upper bound of 32bit >=0 timestamp, no extra bits"
+#define LOWER_BOUND_NEG_LO_1_CASE\
+ "2038-01-19 Lower bound of 32bit <0 timestamp, lo extra sec bit on"
+#define UPPER_BOUND_NEG_LO_1_CASE\
+ "2106-02-07 Upper bound of 32bit <0 timestamp, lo extra sec bit on"
+#define LOWER_BOUND_NONNEG_LO_1_CASE\
+ "2106-02-07 Lower bound of 32bit >=0 timestamp, lo extra sec bit on"
+#define UPPER_BOUND_NONNEG_LO_1_CASE\
+ "2174-02-25 Upper bound of 32bit >=0 timestamp, lo extra sec bit on"
+#define LOWER_BOUND_NEG_HI_1_CASE\
+ "2174-02-25 Lower bound of 32bit <0 timestamp, hi extra sec bit on"
+#define UPPER_BOUND_NEG_HI_1_CASE\
+ "2242-03-16 Upper bound of 32bit <0 timestamp, hi extra sec bit on"
+#define LOWER_BOUND_NONNEG_HI_1_CASE\
+ "2242-03-16 Lower bound of 32bit >=0 timestamp, hi extra sec bit on"
+#define UPPER_BOUND_NONNEG_HI_1_CASE\
+ "2310-04-04 Upper bound of 32bit >=0 timestamp, hi extra sec bit on"
+#define UPPER_BOUND_NONNEG_HI_1_NS_1_CASE\
+ "2310-04-04 Upper bound of 32bit>=0 timestamp, hi extra sec bit 1. 1 ns"
+#define LOWER_BOUND_NONNEG_HI_1_NS_MAX_CASE\
+ "2378-04-22 Lower bound of 32bit>= timestamp. Extra sec bits 1. Max ns"
+#define LOWER_BOUND_NONNEG_EXTRA_BITS_1_CASE\
+ "2378-04-22 Lower bound of 32bit >=0 timestamp. All extra sec bits on"
+#define UPPER_BOUND_NONNEG_EXTRA_BITS_1_CASE\
+ "2446-05-10 Upper bound of 32bit >=0 timestamp. All extra sec bits on"
+
+struct timestamp_expectation {
+ const char *test_case_name;
+ struct timespec64 expected;
+ u32 extra_bits;
+ bool msb_set;
+ bool lower_bound;
+};
+
+static time64_t get_32bit_time(const struct timestamp_expectation * const test)
+{
+ if (test->msb_set) {
+ if (test->lower_bound)
+ return LOWER_MSB_1;
+
+ return UPPER_MSB_1;
+ }
+
+ if (test->lower_bound)
+ return LOWER_MSB_0;
+ return UPPER_MSB_0;
+}
+
+
+/*
+ * Test data is derived from the table in the Inode Timestamps section of
+ * Documentation/filesystems/ext4/inodes.rst.
+ */
+static void inode_test_xtimestamp_decoding(struct kunit *test)
+{
+ const struct timestamp_expectation test_data[] = {
+ {
+ .test_case_name = LOWER_BOUND_NEG_NO_EXTRA_BITS_CASE,
+ .msb_set = true,
+ .lower_bound = true,
+ .extra_bits = 0,
+ .expected = {.tv_sec = -0x80000000LL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NEG_NO_EXTRA_BITS_CASE,
+ .msb_set = true,
+ .lower_bound = false,
+ .extra_bits = 0,
+ .expected = {.tv_sec = -1LL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = LOWER_BOUND_NONNEG_NO_EXTRA_BITS_CASE,
+ .msb_set = false,
+ .lower_bound = true,
+ .extra_bits = 0,
+ .expected = {0LL, 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NONNEG_NO_EXTRA_BITS_CASE,
+ .msb_set = false,
+ .lower_bound = false,
+ .extra_bits = 0,
+ .expected = {.tv_sec = 0x7fffffffLL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = LOWER_BOUND_NEG_LO_1_CASE,
+ .msb_set = true,
+ .lower_bound = true,
+ .extra_bits = 1,
+ .expected = {.tv_sec = 0x80000000LL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NEG_LO_1_CASE,
+ .msb_set = true,
+ .lower_bound = false,
+ .extra_bits = 1,
+ .expected = {.tv_sec = 0xffffffffLL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = LOWER_BOUND_NONNEG_LO_1_CASE,
+ .msb_set = false,
+ .lower_bound = true,
+ .extra_bits = 1,
+ .expected = {.tv_sec = 0x100000000LL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NONNEG_LO_1_CASE,
+ .msb_set = false,
+ .lower_bound = false,
+ .extra_bits = 1,
+ .expected = {.tv_sec = 0x17fffffffLL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = LOWER_BOUND_NEG_HI_1_CASE,
+ .msb_set = true,
+ .lower_bound = true,
+ .extra_bits = 2,
+ .expected = {.tv_sec = 0x180000000LL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NEG_HI_1_CASE,
+ .msb_set = true,
+ .lower_bound = false,
+ .extra_bits = 2,
+ .expected = {.tv_sec = 0x1ffffffffLL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = LOWER_BOUND_NONNEG_HI_1_CASE,
+ .msb_set = false,
+ .lower_bound = true,
+ .extra_bits = 2,
+ .expected = {.tv_sec = 0x200000000LL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NONNEG_HI_1_CASE,
+ .msb_set = false,
+ .lower_bound = false,
+ .extra_bits = 2,
+ .expected = {.tv_sec = 0x27fffffffLL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NONNEG_HI_1_NS_1_CASE,
+ .msb_set = false,
+ .lower_bound = false,
+ .extra_bits = 6,
+ .expected = {.tv_sec = 0x27fffffffLL, .tv_nsec = 1L},
+ },
+
+ {
+ .test_case_name = LOWER_BOUND_NONNEG_HI_1_NS_MAX_CASE,
+ .msb_set = false,
+ .lower_bound = true,
+ .extra_bits = 0xFFFFFFFF,
+ .expected = {.tv_sec = 0x300000000LL,
+ .tv_nsec = MAX_NANOSECONDS},
+ },
+
+ {
+ .test_case_name = LOWER_BOUND_NONNEG_EXTRA_BITS_1_CASE,
+ .msb_set = false,
+ .lower_bound = true,
+ .extra_bits = 3,
+ .expected = {.tv_sec = 0x300000000LL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NONNEG_EXTRA_BITS_1_CASE,
+ .msb_set = false,
+ .lower_bound = false,
+ .extra_bits = 3,
+ .expected = {.tv_sec = 0x37fffffffLL, .tv_nsec = 0L},
+ }
+ };
+
+ struct timespec64 timestamp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_data); ++i) {
+ timestamp.tv_sec = get_32bit_time(&test_data[i]);
+ ext4_decode_extra_time(&timestamp,
+ cpu_to_le32(test_data[i].extra_bits));
+
+ KUNIT_EXPECT_EQ_MSG(test,
+ test_data[i].expected.tv_sec,
+ timestamp.tv_sec,
+ CASE_NAME_FORMAT,
+ test_data[i].test_case_name,
+ test_data[i].msb_set,
+ test_data[i].lower_bound,
+ test_data[i].extra_bits);
+ KUNIT_EXPECT_EQ_MSG(test,
+ test_data[i].expected.tv_nsec,
+ timestamp.tv_nsec,
+ CASE_NAME_FORMAT,
+ test_data[i].test_case_name,
+ test_data[i].msb_set,
+ test_data[i].lower_bound,
+ test_data[i].extra_bits);
+ }
+}
+
+static struct kunit_case ext4_inode_test_cases[] = {
+ KUNIT_CASE(inode_test_xtimestamp_decoding),
+ {}
+};
+
+static struct kunit_suite ext4_inode_test_suite = {
+ .name = "ext4_inode_test",
+ .test_cases = ext4_inode_test_cases,
+};
+
+kunit_test_suite(ext4_inode_test_suite);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 420fe3deed39..28f28de0c1b6 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -164,39 +164,18 @@ int ext4_inode_is_fast_symlink(struct inode *inode)
}
/*
- * Restart the transaction associated with *handle. This does a commit,
- * so before we call here everything must be consistently dirtied against
- * this transaction.
- */
-int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
- int nblocks)
-{
- int ret;
-
- /*
- * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
- * moment, get_block can be called only for blocks inside i_size since
- * page cache has been already dropped and writes are blocked by
- * i_mutex. So we can safely drop the i_data_sem here.
- */
- BUG_ON(EXT4_JOURNAL(inode) == NULL);
- jbd_debug(2, "restarting handle %p\n", handle);
- up_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_journal_restart(handle, nblocks);
- down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode);
-
- return ret;
-}
-
-/*
* Called at the last iput() if i_nlink is zero.
*/
void ext4_evict_inode(struct inode *inode)
{
handle_t *handle;
int err;
- int extra_credits = 3;
+ /*
+ * Credits for final inode cleanup and freeing:
+ * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
+ * (xattr block freeing), bitmap, group descriptor (inode freeing)
+ */
+ int extra_credits = 6;
struct ext4_xattr_inode_array *ea_inode_array = NULL;
trace_ext4_evict_inode(inode);
@@ -252,8 +231,12 @@ void ext4_evict_inode(struct inode *inode)
if (!IS_NOQUOTA(inode))
extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
+ /*
+ * Block bitmap, group descriptor, and inode are accounted in both
+ * ext4_blocks_for_truncate() and extra_credits. So subtract 3.
+ */
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
- ext4_blocks_for_truncate(inode)+extra_credits);
+ ext4_blocks_for_truncate(inode) + extra_credits - 3);
if (IS_ERR(handle)) {
ext4_std_error(inode->i_sb, PTR_ERR(handle));
/*
@@ -527,7 +510,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
return -EFSCORRUPTED;
/* Lookup extent status tree firstly */
- if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
+ if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
map->m_pblk = ext4_es_pblock(&es) +
map->m_lblk - es.es_lblk;
@@ -695,7 +678,7 @@ found:
* extent status tree.
*/
if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
- ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
+ ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
if (ext4_es_is_written(&es))
goto out_sem;
}
@@ -827,136 +810,6 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
#define DIO_MAX_BLOCKS 4096
/*
- * Get blocks function for the cases that need to start a transaction -
- * generally difference cases of direct IO and DAX IO. It also handles retries
- * in case of ENOSPC.
- */
-static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int flags)
-{
- int dio_credits;
- handle_t *handle;
- int retries = 0;
- int ret;
-
- /* Trim mapping request to maximum we can map at once for DIO */
- if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
- bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
- dio_credits = ext4_chunk_trans_blocks(inode,
- bh_result->b_size >> inode->i_blkbits);
-retry:
- handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- ret = _ext4_get_block(inode, iblock, bh_result, flags);
- ext4_journal_stop(handle);
-
- if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
- goto retry;
- return ret;
-}
-
-/* Get block function for DIO reads and writes to inodes without extents */
-int ext4_dio_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int create)
-{
- /* We don't expect handle for direct IO */
- WARN_ON_ONCE(ext4_journal_current_handle());
-
- if (!create)
- return _ext4_get_block(inode, iblock, bh, 0);
- return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
-}
-
-/*
- * Get block function for AIO DIO writes when we create unwritten extent if
- * blocks are not allocated yet. The extent will be converted to written
- * after IO is complete.
- */
-static int ext4_dio_get_block_unwritten_async(struct inode *inode,
- sector_t iblock, struct buffer_head *bh_result, int create)
-{
- int ret;
-
- /* We don't expect handle for direct IO */
- WARN_ON_ONCE(ext4_journal_current_handle());
-
- ret = ext4_get_block_trans(inode, iblock, bh_result,
- EXT4_GET_BLOCKS_IO_CREATE_EXT);
-
- /*
- * When doing DIO using unwritten extents, we need io_end to convert
- * unwritten extents to written on IO completion. We allocate io_end
- * once we spot unwritten extent and store it in b_private. Generic
- * DIO code keeps b_private set and furthermore passes the value to
- * our completion callback in 'private' argument.
- */
- if (!ret && buffer_unwritten(bh_result)) {
- if (!bh_result->b_private) {
- ext4_io_end_t *io_end;
-
- io_end = ext4_init_io_end(inode, GFP_KERNEL);
- if (!io_end)
- return -ENOMEM;
- bh_result->b_private = io_end;
- ext4_set_io_unwritten_flag(inode, io_end);
- }
- set_buffer_defer_completion(bh_result);
- }
-
- return ret;
-}
-
-/*
- * Get block function for non-AIO DIO writes when we create unwritten extent if
- * blocks are not allocated yet. The extent will be converted to written
- * after IO is complete by ext4_direct_IO_write().
- */
-static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
- sector_t iblock, struct buffer_head *bh_result, int create)
-{
- int ret;
-
- /* We don't expect handle for direct IO */
- WARN_ON_ONCE(ext4_journal_current_handle());
-
- ret = ext4_get_block_trans(inode, iblock, bh_result,
- EXT4_GET_BLOCKS_IO_CREATE_EXT);
-
- /*
- * Mark inode as having pending DIO writes to unwritten extents.
- * ext4_direct_IO_write() checks this flag and converts extents to
- * written.
- */
- if (!ret && buffer_unwritten(bh_result))
- ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
-
- return ret;
-}
-
-static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- int ret;
-
- ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n",
- inode->i_ino, create);
- /* We don't expect handle for direct IO */
- WARN_ON_ONCE(ext4_journal_current_handle());
-
- ret = _ext4_get_block(inode, iblock, bh_result, 0);
- /*
- * Blocks should have been preallocated! ext4_file_write_iter() checks
- * that.
- */
- WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result));
-
- return ret;
-}
-
-
-/*
* `handle' can be NULL if create is zero
*/
struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
@@ -1024,7 +877,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
bh = ext4_getblk(handle, inode, block, map_flags);
if (IS_ERR(bh))
return bh;
- if (!bh || buffer_uptodate(bh))
+ if (!bh || ext4_buffer_uptodate(bh))
return bh;
ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
wait_on_buffer(bh);
@@ -1051,7 +904,7 @@ int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
for (i = 0; i < bh_count; i++)
/* Note that NULL bhs[i] is valid because of holes. */
- if (bhs[i] && !buffer_uptodate(bhs[i]))
+ if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1,
&bhs[i]);
@@ -1340,6 +1193,9 @@ retry_journal:
}
if (ret) {
+ bool extended = (pos + len > inode->i_size) &&
+ !ext4_verity_in_progress(inode);
+
unlock_page(page);
/*
* __block_write_begin may have instantiated a few blocks
@@ -1349,11 +1205,11 @@ retry_journal:
* Add inode to orphan list in case we crash before
* truncate finishes
*/
- if (pos + len > inode->i_size && ext4_can_truncate(inode))
+ if (extended && ext4_can_truncate(inode))
ext4_orphan_add(handle, inode);
ext4_journal_stop(handle);
- if (pos + len > inode->i_size) {
+ if (extended) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might
@@ -1406,6 +1262,7 @@ static int ext4_write_end(struct file *file,
int ret = 0, ret2;
int i_size_changed = 0;
int inline_data = ext4_has_inline_data(inode);
+ bool verity = ext4_verity_in_progress(inode);
trace_ext4_write_end(inode, pos, len, copied);
if (inline_data) {
@@ -1423,12 +1280,16 @@ static int ext4_write_end(struct file *file,
/*
* it's important to update i_size while still holding page lock:
* page writeout could otherwise come in and zero beyond i_size.
+ *
+ * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
+ * blocks are being written past EOF, so skip the i_size update.
*/
- i_size_changed = ext4_update_inode_size(inode, pos + copied);
+ if (!verity)
+ i_size_changed = ext4_update_inode_size(inode, pos + copied);
unlock_page(page);
put_page(page);
- if (old_size < pos)
+ if (old_size < pos && !verity)
pagecache_isize_extended(inode, old_size, pos);
/*
* Don't mark the inode dirty under page lock. First, it unnecessarily
@@ -1439,7 +1300,7 @@ static int ext4_write_end(struct file *file,
if (i_size_changed || inline_data)
ext4_mark_inode_dirty(handle, inode);
- if (pos + len > inode->i_size && ext4_can_truncate(inode))
+ if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
@@ -1450,7 +1311,7 @@ errout:
if (!ret)
ret = ret2;
- if (pos + len > inode->i_size) {
+ if (pos + len > inode->i_size && !verity) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might still be
@@ -1511,6 +1372,7 @@ static int ext4_journalled_write_end(struct file *file,
unsigned from, to;
int size_changed = 0;
int inline_data = ext4_has_inline_data(inode);
+ bool verity = ext4_verity_in_progress(inode);
trace_ext4_journalled_write_end(inode, pos, len, copied);
from = pos & (PAGE_SIZE - 1);
@@ -1540,13 +1402,14 @@ static int ext4_journalled_write_end(struct file *file,
if (!partial)
SetPageUptodate(page);
}
- size_changed = ext4_update_inode_size(inode, pos + copied);
+ if (!verity)
+ size_changed = ext4_update_inode_size(inode, pos + copied);
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
unlock_page(page);
put_page(page);
- if (old_size < pos)
+ if (old_size < pos && !verity)
pagecache_isize_extended(inode, old_size, pos);
if (size_changed || inline_data) {
@@ -1555,7 +1418,7 @@ static int ext4_journalled_write_end(struct file *file,
ret = ret2;
}
- if (pos + len > inode->i_size && ext4_can_truncate(inode))
+ if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
@@ -1566,7 +1429,7 @@ errout:
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
- if (pos + len > inode->i_size) {
+ if (pos + len > inode->i_size && !verity) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might still be
@@ -1646,49 +1509,6 @@ void ext4_da_release_space(struct inode *inode, int to_free)
dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
}
-static void ext4_da_page_release_reservation(struct page *page,
- unsigned int offset,
- unsigned int length)
-{
- int contiguous_blks = 0;
- struct buffer_head *head, *bh;
- unsigned int curr_off = 0;
- struct inode *inode = page->mapping->host;
- unsigned int stop = offset + length;
- ext4_fsblk_t lblk;
-
- BUG_ON(stop > PAGE_SIZE || stop < length);
-
- head = page_buffers(page);
- bh = head;
- do {
- unsigned int next_off = curr_off + bh->b_size;
-
- if (next_off > stop)
- break;
-
- if ((offset <= curr_off) && (buffer_delay(bh))) {
- contiguous_blks++;
- clear_buffer_delay(bh);
- } else if (contiguous_blks) {
- lblk = page->index <<
- (PAGE_SHIFT - inode->i_blkbits);
- lblk += (curr_off >> inode->i_blkbits) -
- contiguous_blks;
- ext4_es_remove_blks(inode, lblk, contiguous_blks);
- contiguous_blks = 0;
- }
- curr_off = next_off;
- } while ((bh = bh->b_this_page) != head);
-
- if (contiguous_blks) {
- lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
- lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
- ext4_es_remove_blks(inode, lblk, contiguous_blks);
- }
-
-}
-
/*
* Delayed allocation stuff
*/
@@ -1868,7 +1688,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
(unsigned long) map->m_lblk);
/* Lookup extent status tree firstly */
- if (ext4_es_lookup_extent(inode, iblock, &es)) {
+ if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
if (ext4_es_is_hole(&es)) {
retval = 0;
down_read(&EXT4_I(inode)->i_data_sem);
@@ -2162,7 +1982,8 @@ static int ext4_writepage(struct page *page,
trace_ext4_writepage(page);
size = i_size_read(inode);
- if (page->index == size >> PAGE_SHIFT)
+ if (page->index == size >> PAGE_SHIFT &&
+ !ext4_verity_in_progress(inode))
len = size & ~PAGE_MASK;
else
len = PAGE_SIZE;
@@ -2246,7 +2067,8 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
* after page tables are updated.
*/
size = i_size_read(mpd->inode);
- if (page->index == size >> PAGE_SHIFT)
+ if (page->index == size >> PAGE_SHIFT &&
+ !ext4_verity_in_progress(mpd->inode))
len = size & ~PAGE_MASK;
else
len = PAGE_SIZE;
@@ -2345,6 +2167,9 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
>> inode->i_blkbits;
+ if (ext4_verity_in_progress(inode))
+ blocks = EXT_MAX_BLOCKS;
+
do {
BUG_ON(buffer_locked(bh));
@@ -2369,6 +2194,79 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
}
/*
+ * mpage_process_page - update page buffers corresponding to changed extent and
+ * may submit fully mapped page for IO
+ *
+ * @mpd - description of extent to map, on return next extent to map
+ * @m_lblk - logical block mapping.
+ * @m_pblk - corresponding physical mapping.
+ * @map_bh - determines on return whether this page requires any further
+ * mapping or not.
+ * Scan given page buffers corresponding to changed extent and update buffer
+ * state according to new extent state.
+ * We map delalloc buffers to their physical location, clear unwritten bits.
+ * If the given page is not fully mapped, we update @map to the next extent in
+ * the given page that needs mapping & return @map_bh as true.
+ */
+static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
+ ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
+ bool *map_bh)
+{
+ struct buffer_head *head, *bh;
+ ext4_io_end_t *io_end = mpd->io_submit.io_end;
+ ext4_lblk_t lblk = *m_lblk;
+ ext4_fsblk_t pblock = *m_pblk;
+ int err = 0;
+ int blkbits = mpd->inode->i_blkbits;
+ ssize_t io_end_size = 0;
+ struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
+
+ bh = head = page_buffers(page);
+ do {
+ if (lblk < mpd->map.m_lblk)
+ continue;
+ if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
+ /*
+ * Buffer after end of mapped extent.
+ * Find next buffer in the page to map.
+ */
+ mpd->map.m_len = 0;
+ mpd->map.m_flags = 0;
+ io_end_vec->size += io_end_size;
+ io_end_size = 0;
+
+ err = mpage_process_page_bufs(mpd, head, bh, lblk);
+ if (err > 0)
+ err = 0;
+ if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
+ io_end_vec = ext4_alloc_io_end_vec(io_end);
+ if (IS_ERR(io_end_vec)) {
+ err = PTR_ERR(io_end_vec);
+ goto out;
+ }
+ io_end_vec->offset = mpd->map.m_lblk << blkbits;
+ }
+ *map_bh = true;
+ goto out;
+ }
+ if (buffer_delay(bh)) {
+ clear_buffer_delay(bh);
+ bh->b_blocknr = pblock++;
+ }
+ clear_buffer_unwritten(bh);
+ io_end_size += (1 << blkbits);
+ } while (lblk++, (bh = bh->b_this_page) != head);
+
+ io_end_vec->size += io_end_size;
+ io_end_size = 0;
+ *map_bh = false;
+out:
+ *m_lblk = lblk;
+ *m_pblk = pblock;
+ return err;
+}
+
+/*
* mpage_map_buffers - update buffers corresponding to changed extent and
* submit fully mapped pages for IO
*
@@ -2387,12 +2285,12 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
struct pagevec pvec;
int nr_pages, i;
struct inode *inode = mpd->inode;
- struct buffer_head *head, *bh;
int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
pgoff_t start, end;
ext4_lblk_t lblk;
- sector_t pblock;
+ ext4_fsblk_t pblock;
int err;
+ bool map_bh = false;
start = mpd->map.m_lblk >> bpp_bits;
end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
@@ -2408,50 +2306,19 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- bh = head = page_buffers(page);
- do {
- if (lblk < mpd->map.m_lblk)
- continue;
- if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
- /*
- * Buffer after end of mapped extent.
- * Find next buffer in the page to map.
- */
- mpd->map.m_len = 0;
- mpd->map.m_flags = 0;
- /*
- * FIXME: If dioread_nolock supports
- * blocksize < pagesize, we need to make
- * sure we add size mapped so far to
- * io_end->size as the following call
- * can submit the page for IO.
- */
- err = mpage_process_page_bufs(mpd, head,
- bh, lblk);
- pagevec_release(&pvec);
- if (err > 0)
- err = 0;
- return err;
- }
- if (buffer_delay(bh)) {
- clear_buffer_delay(bh);
- bh->b_blocknr = pblock++;
- }
- clear_buffer_unwritten(bh);
- } while (lblk++, (bh = bh->b_this_page) != head);
-
+ err = mpage_process_page(mpd, page, &lblk, &pblock,
+ &map_bh);
/*
- * FIXME: This is going to break if dioread_nolock
- * supports blocksize < pagesize as we will try to
- * convert potentially unmapped parts of inode.
+ * If map_bh is true, means page may require further bh
+ * mapping, or maybe the page was submitted for IO.
+ * So we return to call further extent mapping.
*/
- mpd->io_submit.io_end->size += PAGE_SIZE;
+ if (err < 0 || map_bh == true)
+ goto out;
/* Page fully mapped - let IO run! */
err = mpage_submit_page(mpd, page);
- if (err < 0) {
- pagevec_release(&pvec);
- return err;
- }
+ if (err < 0)
+ goto out;
}
pagevec_release(&pvec);
}
@@ -2459,6 +2326,9 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
mpd->map.m_len = 0;
mpd->map.m_flags = 0;
return 0;
+out:
+ pagevec_release(&pvec);
+ return err;
}
static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
@@ -2538,9 +2408,13 @@ static int mpage_map_and_submit_extent(handle_t *handle,
int err;
loff_t disksize;
int progress = 0;
+ ext4_io_end_t *io_end = mpd->io_submit.io_end;
+ struct ext4_io_end_vec *io_end_vec;
- mpd->io_submit.io_end->offset =
- ((loff_t)map->m_lblk) << inode->i_blkbits;
+ io_end_vec = ext4_alloc_io_end_vec(io_end);
+ if (IS_ERR(io_end_vec))
+ return PTR_ERR(io_end_vec);
+ io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
do {
err = mpage_map_one_extent(handle, mpd);
if (err < 0) {
@@ -2785,15 +2659,6 @@ static int ext4_writepages(struct address_space *mapping,
goto out_writepages;
}
- if (ext4_should_dioread_nolock(inode)) {
- /*
- * We may need to convert up to one extent per block in
- * the page and we may dirty the inode.
- */
- rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
- PAGE_SIZE >> inode->i_blkbits);
- }
-
/*
* If we have inline data and arrive here, it means that
* we will soon create the block for the 1st page, so
@@ -2812,6 +2677,15 @@ static int ext4_writepages(struct address_space *mapping,
ext4_journal_stop(handle);
}
+ if (ext4_should_dioread_nolock(inode)) {
+ /*
+ * We may need to convert up to one extent per block in
+ * the page and we may dirty the inode.
+ */
+ rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
+ PAGE_SIZE >> inode->i_blkbits);
+ }
+
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
@@ -3061,8 +2935,8 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
index = pos >> PAGE_SHIFT;
- if (ext4_nonda_switch(inode->i_sb) ||
- S_ISLNK(inode->i_mode)) {
+ if (ext4_nonda_switch(inode->i_sb) || S_ISLNK(inode->i_mode) ||
+ ext4_verity_in_progress(inode)) {
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
return ext4_write_begin(file, mapping, pos,
len, flags, pagep, fsdata);
@@ -3227,24 +3101,6 @@ static int ext4_da_write_end(struct file *file,
return ret ? ret : copied;
}
-static void ext4_da_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
-{
- /*
- * Drop reserved blocks
- */
- BUG_ON(!PageLocked(page));
- if (!page_has_buffers(page))
- goto out;
-
- ext4_da_page_release_reservation(page, offset, length);
-
-out:
- ext4_invalidatepage(page, offset, length);
-
- return;
-}
-
/*
* Force all delayed allocation blocks to be allocated for a given inode.
*/
@@ -3452,471 +3308,235 @@ static bool ext4_inode_datasync_dirty(struct inode *inode)
return inode->i_state & I_DIRTY_DATASYNC;
}
-static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
- unsigned flags, struct iomap *iomap)
+static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
+ struct ext4_map_blocks *map, loff_t offset,
+ loff_t length)
{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- unsigned int blkbits = inode->i_blkbits;
- unsigned long first_block, last_block;
- struct ext4_map_blocks map;
- bool delalloc = false;
- int ret;
-
- if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
- return -EINVAL;
- first_block = offset >> blkbits;
- last_block = min_t(loff_t, (offset + length - 1) >> blkbits,
- EXT4_MAX_LOGICAL_BLOCK);
-
- if (flags & IOMAP_REPORT) {
- if (ext4_has_inline_data(inode)) {
- ret = ext4_inline_data_iomap(inode, iomap);
- if (ret != -EAGAIN) {
- if (ret == 0 && offset >= iomap->length)
- ret = -ENOENT;
- return ret;
- }
- }
- } else {
- if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
- return -ERANGE;
- }
-
- map.m_lblk = first_block;
- map.m_len = last_block - first_block + 1;
-
- if (flags & IOMAP_REPORT) {
- ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret < 0)
- return ret;
-
- if (ret == 0) {
- ext4_lblk_t end = map.m_lblk + map.m_len - 1;
- struct extent_status es;
-
- ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
- map.m_lblk, end, &es);
-
- if (!es.es_len || es.es_lblk > end) {
- /* entire range is a hole */
- } else if (es.es_lblk > map.m_lblk) {
- /* range starts with a hole */
- map.m_len = es.es_lblk - map.m_lblk;
- } else {
- ext4_lblk_t offs = 0;
-
- if (es.es_lblk < map.m_lblk)
- offs = map.m_lblk - es.es_lblk;
- map.m_lblk = es.es_lblk + offs;
- map.m_len = es.es_len - offs;
- delalloc = true;
- }
- }
- } else if (flags & IOMAP_WRITE) {
- int dio_credits;
- handle_t *handle;
- int retries = 0;
-
- /* Trim mapping request to maximum we can map at once for DIO */
- if (map.m_len > DIO_MAX_BLOCKS)
- map.m_len = DIO_MAX_BLOCKS;
- dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
-retry:
- /*
- * Either we allocate blocks and then we don't get unwritten
- * extent so we have reserved enough credits, or the blocks
- * are already allocated and unwritten and in that case
- * extent conversion fits in the credits as well.
- */
- handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
- dio_credits);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- ret = ext4_map_blocks(handle, inode, &map,
- EXT4_GET_BLOCKS_CREATE_ZERO);
- if (ret < 0) {
- ext4_journal_stop(handle);
- if (ret == -ENOSPC &&
- ext4_should_retry_alloc(inode->i_sb, &retries))
- goto retry;
- return ret;
- }
-
- /*
- * If we added blocks beyond i_size, we need to make sure they
- * will get truncated if we crash before updating i_size in
- * ext4_iomap_end(). For faults we don't need to do that (and
- * even cannot because for orphan list operations inode_lock is
- * required) - if we happen to instantiate block beyond i_size,
- * it is because we race with truncate which has already added
- * the inode to the orphan list.
- */
- if (!(flags & IOMAP_FAULT) && first_block + map.m_len >
- (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits) {
- int err;
-
- err = ext4_orphan_add(handle, inode);
- if (err < 0) {
- ext4_journal_stop(handle);
- return err;
- }
- }
- ext4_journal_stop(handle);
- } else {
- ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret < 0)
- return ret;
- }
+ u8 blkbits = inode->i_blkbits;
+ /*
+ * Writes that span EOF might trigger an I/O size update on completion,
+ * so consider them to be dirty for the purpose of O_DSYNC, even if
+ * there is no other metadata changes being made or are pending.
+ */
iomap->flags = 0;
- if (ext4_inode_datasync_dirty(inode))
+ if (ext4_inode_datasync_dirty(inode) ||
+ offset + length > i_size_read(inode))
iomap->flags |= IOMAP_F_DIRTY;
+
+ if (map->m_flags & EXT4_MAP_NEW)
+ iomap->flags |= IOMAP_F_NEW;
+
iomap->bdev = inode->i_sb->s_bdev;
- iomap->dax_dev = sbi->s_daxdev;
- iomap->offset = (u64)first_block << blkbits;
- iomap->length = (u64)map.m_len << blkbits;
+ iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
+ iomap->offset = (u64) map->m_lblk << blkbits;
+ iomap->length = (u64) map->m_len << blkbits;
- if (ret == 0) {
- iomap->type = delalloc ? IOMAP_DELALLOC : IOMAP_HOLE;
- iomap->addr = IOMAP_NULL_ADDR;
+ /*
+ * Flags passed to ext4_map_blocks() for direct I/O writes can result
+ * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
+ * set. In order for any allocated unwritten extents to be converted
+ * into written extents correctly within the ->end_io() handler, we
+ * need to ensure that the iomap->type is set appropriately. Hence, the
+ * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has
+ * been set first.
+ */
+ if (map->m_flags & EXT4_MAP_UNWRITTEN) {
+ iomap->type = IOMAP_UNWRITTEN;
+ iomap->addr = (u64) map->m_pblk << blkbits;
+ } else if (map->m_flags & EXT4_MAP_MAPPED) {
+ iomap->type = IOMAP_MAPPED;
+ iomap->addr = (u64) map->m_pblk << blkbits;
} else {
- if (map.m_flags & EXT4_MAP_MAPPED) {
- iomap->type = IOMAP_MAPPED;
- } else if (map.m_flags & EXT4_MAP_UNWRITTEN) {
- iomap->type = IOMAP_UNWRITTEN;
- } else {
- WARN_ON_ONCE(1);
- return -EIO;
- }
- iomap->addr = (u64)map.m_pblk << blkbits;
+ iomap->type = IOMAP_HOLE;
+ iomap->addr = IOMAP_NULL_ADDR;
}
-
- if (map.m_flags & EXT4_MAP_NEW)
- iomap->flags |= IOMAP_F_NEW;
-
- return 0;
}
-static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
- ssize_t written, unsigned flags, struct iomap *iomap)
+static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
+ unsigned int flags)
{
- int ret = 0;
handle_t *handle;
- int blkbits = inode->i_blkbits;
- bool truncate = false;
+ u8 blkbits = inode->i_blkbits;
+ int ret, dio_credits, m_flags = 0, retries = 0;
- if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT))
- return 0;
-
- handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto orphan_del;
- }
- if (ext4_update_inode_size(inode, offset + written))
- ext4_mark_inode_dirty(handle, inode);
/*
- * We may need to truncate allocated but not written blocks beyond EOF.
+ * Trim the mapping request to the maximum value that we can map at
+ * once for direct I/O.
*/
- if (iomap->offset + iomap->length >
- ALIGN(inode->i_size, 1 << blkbits)) {
- ext4_lblk_t written_blk, end_blk;
+ if (map->m_len > DIO_MAX_BLOCKS)
+ map->m_len = DIO_MAX_BLOCKS;
+ dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
- written_blk = (offset + written) >> blkbits;
- end_blk = (offset + length) >> blkbits;
- if (written_blk < end_blk && ext4_can_truncate(inode))
- truncate = true;
- }
+retry:
/*
- * Remove inode from orphan list if we were extending a inode and
- * everything went fine.
+ * Either we allocate blocks and then don't get an unwritten extent, so
+ * in that case we have reserved enough credits. Or, the blocks are
+ * already allocated and unwritten. In that case, the extent conversion
+ * fits into the credits as well.
*/
- if (!truncate && inode->i_nlink &&
- !list_empty(&EXT4_I(inode)->i_orphan))
- ext4_orphan_del(handle, inode);
- ext4_journal_stop(handle);
- if (truncate) {
- ext4_truncate_failed_write(inode);
-orphan_del:
- /*
- * If truncate failed early the inode might still be on the
- * orphan list; we need to make sure the inode is removed from
- * the orphan list in that case.
- */
- if (inode->i_nlink)
- ext4_orphan_del(NULL, inode);
- }
- return ret;
-}
-
-const struct iomap_ops ext4_iomap_ops = {
- .iomap_begin = ext4_iomap_begin,
- .iomap_end = ext4_iomap_end,
-};
-
-static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
- ssize_t size, void *private)
-{
- ext4_io_end_t *io_end = private;
+ handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
- /* if not async direct IO just return */
- if (!io_end)
- return 0;
+ /*
+ * DAX and direct I/O are the only two operations that are currently
+ * supported with IOMAP_WRITE.
+ */
+ WARN_ON(!IS_DAX(inode) && !(flags & IOMAP_DIRECT));
+ if (IS_DAX(inode))
+ m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
+ /*
+ * We use i_size instead of i_disksize here because delalloc writeback
+ * can complete at any point during the I/O and subsequently push the
+ * i_disksize out to i_size. This could be beyond where direct I/O is
+ * happening and thus expose allocated blocks to direct I/O reads.
+ */
+ else if ((map->m_lblk * (1 << blkbits)) >= i_size_read(inode))
+ m_flags = EXT4_GET_BLOCKS_CREATE;
+ else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
- ext_debug("ext4_end_io_dio(): io_end 0x%p "
- "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
- io_end, io_end->inode->i_ino, iocb, offset, size);
+ ret = ext4_map_blocks(handle, inode, map, m_flags);
/*
- * Error during AIO DIO. We cannot convert unwritten extents as the
- * data was not written. Just clear the unwritten flag and drop io_end.
+ * We cannot fill holes in indirect tree based inodes as that could
+ * expose stale data in the case of a crash. Use the magic error code
+ * to fallback to buffered I/O.
*/
- if (size <= 0) {
- ext4_clear_io_unwritten_flag(io_end);
- size = 0;
- }
- io_end->offset = offset;
- io_end->size = size;
- ext4_put_io_end(io_end);
+ if (!m_flags && !ret)
+ ret = -ENOTBLK;
- return 0;
+ ext4_journal_stop(handle);
+ if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
+
+ return ret;
}
-/*
- * Handling of direct IO writes.
- *
- * For ext4 extent files, ext4 will do direct-io write even to holes,
- * preallocated extents, and those write extend the file, no need to
- * fall back to buffered IO.
- *
- * For holes, we fallocate those blocks, mark them as unwritten
- * If those blocks were preallocated, we mark sure they are split, but
- * still keep the range to write as unwritten.
- *
- * The unwritten extents will be converted to written when DIO is completed.
- * For async direct IO, since the IO may still pending when return, we
- * set up an end_io call back function, which will do the conversion
- * when async direct IO completed.
- *
- * If the O_DIRECT write will extend the file then add this inode to the
- * orphan list. So recovery will truncate it back to the original size
- * if the machine crashes during the write.
- *
- */
-static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
+
+static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ unsigned flags, struct iomap *iomap, struct iomap *srcmap)
{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- struct ext4_inode_info *ei = EXT4_I(inode);
- ssize_t ret;
- loff_t offset = iocb->ki_pos;
- size_t count = iov_iter_count(iter);
- int overwrite = 0;
- get_block_t *get_block_func = NULL;
- int dio_flags = 0;
- loff_t final_size = offset + count;
- int orphan = 0;
- handle_t *handle;
+ int ret;
+ struct ext4_map_blocks map;
+ u8 blkbits = inode->i_blkbits;
- if (final_size > inode->i_size || final_size > ei->i_disksize) {
- /* Credits for sb + inode write */
- handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out;
- }
- ret = ext4_orphan_add(handle, inode);
- if (ret) {
- ext4_journal_stop(handle);
- goto out;
- }
- orphan = 1;
- ext4_update_i_disksize(inode, inode->i_size);
- ext4_journal_stop(handle);
- }
+ if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
+ return -EINVAL;
- BUG_ON(iocb->private == NULL);
+ if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
+ return -ERANGE;
/*
- * Make all waiters for direct IO properly wait also for extent
- * conversion. This also disallows race between truncate() and
- * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
+ * Calculate the first and last logical blocks respectively.
*/
- inode_dio_begin(inode);
+ map.m_lblk = offset >> blkbits;
+ map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
+ EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
- /* If we do a overwrite dio, i_mutex locking can be released */
- overwrite = *((int *)iocb->private);
+ if (flags & IOMAP_WRITE)
+ ret = ext4_iomap_alloc(inode, &map, flags);
+ else
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+
+ if (ret < 0)
+ return ret;
- if (overwrite)
- inode_unlock(inode);
+ ext4_set_iomap(inode, iomap, &map, offset, length);
+
+ return 0;
+}
+static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
+ ssize_t written, unsigned flags, struct iomap *iomap)
+{
/*
- * For extent mapped files we could direct write to holes and fallocate.
- *
- * Allocated blocks to fill the hole are marked as unwritten to prevent
- * parallel buffered read to expose the stale data before DIO complete
- * the data IO.
- *
- * As to previously fallocated extents, ext4 get_block will just simply
- * mark the buffer mapped but still keep the extents unwritten.
- *
- * For non AIO case, we will convert those unwritten extents to written
- * after return back from blockdev_direct_IO. That way we save us from
- * allocating io_end structure and also the overhead of offloading
- * the extent convertion to a workqueue.
- *
- * For async DIO, the conversion needs to be deferred when the
- * IO is completed. The ext4 end_io callback function will be
- * called to take care of the conversion work. Here for async
- * case, we allocate an io_end structure to hook to the iocb.
+ * Check to see whether an error occurred while writing out the data to
+ * the allocated blocks. If so, return the magic error code so that we
+ * fallback to buffered I/O and attempt to complete the remainder of
+ * the I/O. Any blocks that may have been allocated in preparation for
+ * the direct I/O will be reused during buffered I/O.
*/
- iocb->private = NULL;
- if (overwrite)
- get_block_func = ext4_dio_get_block_overwrite;
- else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
- round_down(offset, i_blocksize(inode)) >= inode->i_size) {
- get_block_func = ext4_dio_get_block;
- dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
- } else if (is_sync_kiocb(iocb)) {
- get_block_func = ext4_dio_get_block_unwritten_sync;
- dio_flags = DIO_LOCKING;
- } else {
- get_block_func = ext4_dio_get_block_unwritten_async;
- dio_flags = DIO_LOCKING;
- }
- ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
- get_block_func, ext4_end_io_dio, NULL,
- dio_flags);
+ if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0)
+ return -ENOTBLK;
- if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
- EXT4_STATE_DIO_UNWRITTEN)) {
- int err;
- /*
- * for non AIO case, since the IO is already
- * completed, we could do the conversion right here
- */
- err = ext4_convert_unwritten_extents(NULL, inode,
- offset, ret);
- if (err < 0)
- ret = err;
- ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
- }
+ return 0;
+}
- inode_dio_end(inode);
- /* take i_mutex locking again if we do a ovewrite dio */
- if (overwrite)
- inode_lock(inode);
+const struct iomap_ops ext4_iomap_ops = {
+ .iomap_begin = ext4_iomap_begin,
+ .iomap_end = ext4_iomap_end,
+};
- if (ret < 0 && final_size > inode->i_size)
- ext4_truncate_failed_write(inode);
+static bool ext4_iomap_is_delalloc(struct inode *inode,
+ struct ext4_map_blocks *map)
+{
+ struct extent_status es;
+ ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
- /* Handle extending of i_size after direct IO write */
- if (orphan) {
- int err;
+ ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
+ map->m_lblk, end, &es);
- /* Credits for sb + inode write */
- handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
- if (IS_ERR(handle)) {
- /*
- * We wrote the data but cannot extend
- * i_size. Bail out. In async io case, we do
- * not return error here because we have
- * already submmitted the corresponding
- * bio. Returning error here makes the caller
- * think that this IO is done and failed
- * resulting in race with bio's completion
- * handler.
- */
- if (!ret)
- ret = PTR_ERR(handle);
- if (inode->i_nlink)
- ext4_orphan_del(NULL, inode);
+ if (!es.es_len || es.es_lblk > end)
+ return false;
- goto out;
- }
- if (inode->i_nlink)
- ext4_orphan_del(handle, inode);
- if (ret > 0) {
- loff_t end = offset + ret;
- if (end > inode->i_size || end > ei->i_disksize) {
- ext4_update_i_disksize(inode, end);
- if (end > inode->i_size)
- i_size_write(inode, end);
- /*
- * We're going to return a positive `ret'
- * here due to non-zero-length I/O, so there's
- * no way of reporting error returns from
- * ext4_mark_inode_dirty() to userspace. So
- * ignore it.
- */
- ext4_mark_inode_dirty(handle, inode);
- }
- }
- err = ext4_journal_stop(handle);
- if (ret == 0)
- ret = err;
+ if (es.es_lblk > map->m_lblk) {
+ map->m_len = es.es_lblk - map->m_lblk;
+ return false;
}
-out:
- return ret;
-}
-static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct address_space *mapping = iocb->ki_filp->f_mapping;
- struct inode *inode = mapping->host;
- size_t count = iov_iter_count(iter);
- ssize_t ret;
+ offset = map->m_lblk - es.es_lblk;
+ map->m_len = es.es_len - offset;
- /*
- * Shared inode_lock is enough for us - it protects against concurrent
- * writes & truncates and since we take care of writing back page cache,
- * we are protected against page writeback as well.
- */
- inode_lock_shared(inode);
- ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
- iocb->ki_pos + count - 1);
- if (ret)
- goto out_unlock;
- ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
- iter, ext4_dio_get_block, NULL, NULL, 0);
-out_unlock:
- inode_unlock_shared(inode);
- return ret;
+ return true;
}
-static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
+ loff_t length, unsigned int flags,
+ struct iomap *iomap, struct iomap *srcmap)
{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- size_t count = iov_iter_count(iter);
- loff_t offset = iocb->ki_pos;
- ssize_t ret;
+ int ret;
+ bool delalloc = false;
+ struct ext4_map_blocks map;
+ u8 blkbits = inode->i_blkbits;
-#ifdef CONFIG_FS_ENCRYPTION
- if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
- return 0;
-#endif
+ if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
+ return -EINVAL;
+
+ if (ext4_has_inline_data(inode)) {
+ ret = ext4_inline_data_iomap(inode, iomap);
+ if (ret != -EAGAIN) {
+ if (ret == 0 && offset >= iomap->length)
+ ret = -ENOENT;
+ return ret;
+ }
+ }
/*
- * If we are doing data journalling we don't support O_DIRECT
+ * Calculate the first and last logical block respectively.
*/
- if (ext4_should_journal_data(inode))
- return 0;
+ map.m_lblk = offset >> blkbits;
+ map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
+ EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
- /* Let buffer I/O handle the inline data case. */
- if (ext4_has_inline_data(inode))
- return 0;
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ delalloc = ext4_iomap_is_delalloc(inode, &map);
- trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
- if (iov_iter_rw(iter) == READ)
- ret = ext4_direct_IO_read(iocb, iter);
- else
- ret = ext4_direct_IO_write(iocb, iter);
- trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
- return ret;
+ ext4_set_iomap(inode, iomap, &map, offset, length);
+ if (delalloc && iomap->type == IOMAP_HOLE)
+ iomap->type = IOMAP_DELALLOC;
+
+ return 0;
}
+const struct iomap_ops ext4_iomap_report_ops = {
+ .iomap_begin = ext4_iomap_begin_report,
+};
+
/*
* Pages can be marked dirty completely asynchronously from ext4's journalling
* activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
@@ -3954,7 +3574,7 @@ static const struct address_space_operations ext4_aops = {
.bmap = ext4_bmap,
.invalidatepage = ext4_invalidatepage,
.releasepage = ext4_releasepage,
- .direct_IO = ext4_direct_IO,
+ .direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
@@ -3971,7 +3591,7 @@ static const struct address_space_operations ext4_journalled_aops = {
.bmap = ext4_bmap,
.invalidatepage = ext4_journalled_invalidatepage,
.releasepage = ext4_releasepage,
- .direct_IO = ext4_direct_IO,
+ .direct_IO = noop_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
@@ -3985,9 +3605,9 @@ static const struct address_space_operations ext4_da_aops = {
.write_end = ext4_da_write_end,
.set_page_dirty = ext4_set_page_dirty,
.bmap = ext4_bmap,
- .invalidatepage = ext4_da_invalidatepage,
+ .invalidatepage = ext4_invalidatepage,
.releasepage = ext4_releasepage,
- .direct_IO = ext4_direct_IO,
+ .direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
@@ -4297,6 +3917,15 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
trace_ext4_punch_hole(inode, offset, length, 0);
+ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ if (ext4_has_inline_data(inode)) {
+ down_write(&EXT4_I(inode)->i_mmap_sem);
+ ret = ext4_convert_inline_data(inode);
+ up_write(&EXT4_I(inode)->i_mmap_sem);
+ if (ret)
+ return ret;
+ }
+
/*
* Write out all dirty pages to avoid race conditions
* Then release them.
@@ -4739,6 +4368,8 @@ static bool ext4_should_use_dax(struct inode *inode)
return false;
if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
return false;
+ if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
+ return false;
return true;
}
@@ -4763,9 +4394,11 @@ void ext4_set_inode_flags(struct inode *inode)
new_fl |= S_ENCRYPTED;
if (flags & EXT4_CASEFOLD_FL)
new_fl |= S_CASEFOLD;
+ if (flags & EXT4_VERITY_FL)
+ new_fl |= S_VERITY;
inode_set_flags(inode, new_fl,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
- S_ENCRYPTED|S_CASEFOLD);
+ S_ENCRYPTED|S_CASEFOLD|S_VERITY);
}
static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
@@ -5119,6 +4752,9 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
"iget: bogus i_mode (%o)", inode->i_mode);
goto bad_inode;
}
+ if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
+ ext4_error_inode(inode, function, line, 0,
+ "casefold flag without casefold feature");
brelse(iloc.bh);
unlock_new_inode(inode);
@@ -5478,11 +5114,15 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
offset = inode->i_size & (PAGE_SIZE - 1);
/*
- * All buffers in the last page remain valid? Then there's nothing to
- * do. We do the check mainly to optimize the common PAGE_SIZE ==
- * blocksize case
+ * If the page is fully truncated, we don't need to wait for any commit
+ * (and we even should not as __ext4_journalled_invalidatepage() may
+ * strip all buffers from the page but keep the page dirty which can then
+ * confuse e.g. concurrent ext4_writepage() seeing dirty page without
+ * buffers). Also we don't need to wait for any commit if all buffers in
+ * the page remain valid. This is most beneficial for the common case of
+ * blocksize == PAGESIZE.
*/
- if (offset > PAGE_SIZE - i_blocksize(inode))
+ if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
return;
while (1) {
page = find_lock_page(inode->i_mapping,
@@ -5555,6 +5195,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
return error;
+ error = fsverity_prepare_setattr(dentry, attr);
+ if (error)
+ return error;
+
if (is_quota_modification(inode, attr)) {
error = dquot_initialize(inode);
if (error)
@@ -5741,12 +5385,15 @@ int ext4_getattr(const struct path *path, struct kstat *stat,
stat->attributes |= STATX_ATTR_IMMUTABLE;
if (flags & EXT4_NODUMP_FL)
stat->attributes |= STATX_ATTR_NODUMP;
+ if (flags & EXT4_VERITY_FL)
+ stat->attributes |= STATX_ATTR_VERITY;
stat->attributes_mask |= (STATX_ATTR_APPEND |
STATX_ATTR_COMPRESSED |
STATX_ATTR_ENCRYPTED |
STATX_ATTR_IMMUTABLE |
- STATX_ATTR_NODUMP);
+ STATX_ATTR_NODUMP |
+ STATX_ATTR_VERITY);
generic_fillattr(inode, stat);
return 0;
@@ -5936,8 +5583,23 @@ static int __ext4_expand_extra_isize(struct inode *inode,
{
struct ext4_inode *raw_inode;
struct ext4_xattr_ibody_header *header;
+ unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
+ struct ext4_inode_info *ei = EXT4_I(inode);
int error;
+ /* this was checked at iget time, but double check for good measure */
+ if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
+ (ei->i_extra_isize & 3)) {
+ EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
+ ei->i_extra_isize,
+ EXT4_INODE_SIZE(inode->i_sb));
+ return -EFSCORRUPTED;
+ }
+ if ((new_extra_isize < ei->i_extra_isize) ||
+ (new_extra_isize < 4) ||
+ (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
+ return -EINVAL; /* Should never happen */
+
raw_inode = ext4_raw_inode(iloc);
header = IHDR(inode, raw_inode);
@@ -5989,9 +5651,8 @@ static int ext4_try_to_expand_extra_isize(struct inode *inode,
* If this is felt to be critical, then e2fsck should be run to
* force a large enough s_min_extra_isize.
*/
- if (ext4_handle_valid(handle) &&
- jbd2_journal_extend(handle,
- EXT4_DATA_TRANS_BLOCKS(inode->i_sb)) != 0)
+ if (ext4_journal_extend(handle,
+ EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
return -ENOSPC;
if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 442f7ef873fc..e8870fff8224 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -745,6 +745,74 @@ static void ext4_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
fa->fsx_projid = from_kprojid(&init_user_ns, ei->i_projid);
}
+/* copied from fs/ioctl.c */
+static int fiemap_check_ranges(struct super_block *sb,
+ u64 start, u64 len, u64 *new_len)
+{
+ u64 maxbytes = (u64) sb->s_maxbytes;
+
+ *new_len = len;
+
+ if (len == 0)
+ return -EINVAL;
+
+ if (start > maxbytes)
+ return -EFBIG;
+
+ /*
+ * Shrink request scope to what the fs can actually handle.
+ */
+ if (len > maxbytes || (maxbytes - len) < start)
+ *new_len = maxbytes - start;
+
+ return 0;
+}
+
+/* So that the fiemap access checks can't overflow on 32 bit machines. */
+#define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
+
+static int ext4_ioctl_get_es_cache(struct file *filp, unsigned long arg)
+{
+ struct fiemap fiemap;
+ struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
+ struct fiemap_extent_info fieinfo = { 0, };
+ struct inode *inode = file_inode(filp);
+ struct super_block *sb = inode->i_sb;
+ u64 len;
+ int error;
+
+ if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap)))
+ return -EFAULT;
+
+ if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
+ return -EINVAL;
+
+ error = fiemap_check_ranges(sb, fiemap.fm_start, fiemap.fm_length,
+ &len);
+ if (error)
+ return error;
+
+ fieinfo.fi_flags = fiemap.fm_flags;
+ fieinfo.fi_extents_max = fiemap.fm_extent_count;
+ fieinfo.fi_extents_start = ufiemap->fm_extents;
+
+ if (fiemap.fm_extent_count != 0 &&
+ !access_ok(fieinfo.fi_extents_start,
+ fieinfo.fi_extents_max * sizeof(struct fiemap_extent)))
+ return -EFAULT;
+
+ if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
+ filemap_write_and_wait(inode->i_mapping);
+
+ error = ext4_get_es_cache(inode, &fieinfo, fiemap.fm_start, len);
+ fiemap.fm_flags = fieinfo.fi_flags;
+ fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
+ if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
+ error = -EFAULT;
+
+ return error;
+}
+
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -1113,8 +1181,62 @@ resizefs_out:
#endif
}
case EXT4_IOC_GET_ENCRYPTION_POLICY:
+ if (!ext4_has_feature_encrypt(sb))
+ return -EOPNOTSUPP;
return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
+ case FS_IOC_GET_ENCRYPTION_POLICY_EX:
+ if (!ext4_has_feature_encrypt(sb))
+ return -EOPNOTSUPP;
+ return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
+
+ case FS_IOC_ADD_ENCRYPTION_KEY:
+ if (!ext4_has_feature_encrypt(sb))
+ return -EOPNOTSUPP;
+ return fscrypt_ioctl_add_key(filp, (void __user *)arg);
+
+ case FS_IOC_REMOVE_ENCRYPTION_KEY:
+ if (!ext4_has_feature_encrypt(sb))
+ return -EOPNOTSUPP;
+ return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
+
+ case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
+ if (!ext4_has_feature_encrypt(sb))
+ return -EOPNOTSUPP;
+ return fscrypt_ioctl_remove_key_all_users(filp,
+ (void __user *)arg);
+ case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
+ if (!ext4_has_feature_encrypt(sb))
+ return -EOPNOTSUPP;
+ return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
+
+ case EXT4_IOC_CLEAR_ES_CACHE:
+ {
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+ ext4_clear_inode_es(inode);
+ return 0;
+ }
+
+ case EXT4_IOC_GETSTATE:
+ {
+ __u32 state = 0;
+
+ if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED))
+ state |= EXT4_STATE_FLAG_EXT_PRECACHED;
+ if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
+ state |= EXT4_STATE_FLAG_NEW;
+ if (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY))
+ state |= EXT4_STATE_FLAG_NEWENTRY;
+ if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE))
+ state |= EXT4_STATE_FLAG_DA_ALLOC_CLOSE;
+
+ return put_user(state, (__u32 __user *) arg);
+ }
+
+ case EXT4_IOC_GET_ES_CACHE:
+ return ext4_ioctl_get_es_cache(filp, arg);
+
case EXT4_IOC_FSGETXATTR:
{
struct fsxattr fa;
@@ -1171,6 +1293,17 @@ out:
}
case EXT4_IOC_SHUTDOWN:
return ext4_shutdown(sb, arg);
+
+ case FS_IOC_ENABLE_VERITY:
+ if (!ext4_has_feature_verity(sb))
+ return -EOPNOTSUPP;
+ return fsverity_ioctl_enable(filp, (const void __user *)arg);
+
+ case FS_IOC_MEASURE_VERITY:
+ if (!ext4_has_feature_verity(sb))
+ return -EOPNOTSUPP;
+ return fsverity_ioctl_measure(filp, (void __user *)arg);
+
default:
return -ENOTTY;
}
@@ -1227,12 +1360,23 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
case EXT4_IOC_MOVE_EXT:
case EXT4_IOC_RESIZE_FS:
+ case FITRIM:
case EXT4_IOC_PRECACHE_EXTENTS:
case EXT4_IOC_SET_ENCRYPTION_POLICY:
case EXT4_IOC_GET_ENCRYPTION_PWSALT:
case EXT4_IOC_GET_ENCRYPTION_POLICY:
+ case FS_IOC_GET_ENCRYPTION_POLICY_EX:
+ case FS_IOC_ADD_ENCRYPTION_KEY:
+ case FS_IOC_REMOVE_ENCRYPTION_KEY:
+ case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
+ case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
case EXT4_IOC_SHUTDOWN:
case FS_IOC_GETFSMAP:
+ case FS_IOC_ENABLE_VERITY:
+ case FS_IOC_MEASURE_VERITY:
+ case EXT4_IOC_CLEAR_ES_CACHE:
+ case EXT4_IOC_GETSTATE:
+ case EXT4_IOC_GET_ES_CACHE:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index b1e4d359f73b..89725fa42573 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -50,29 +50,9 @@ static int finish_range(handle_t *handle, struct inode *inode,
needed = ext4_ext_calc_credits_for_single_extent(inode,
lb->last_block - lb->first_block + 1, path);
- /*
- * Make sure the credit we accumalated is not really high
- */
- if (needed && ext4_handle_has_enough_credits(handle,
- EXT4_RESERVE_TRANS_BLOCKS)) {
- up_write((&EXT4_I(inode)->i_data_sem));
- retval = ext4_journal_restart(handle, needed);
- down_write((&EXT4_I(inode)->i_data_sem));
- if (retval)
- goto err_out;
- } else if (needed) {
- retval = ext4_journal_extend(handle, needed);
- if (retval) {
- /*
- * IF not able to extend the journal restart the journal
- */
- up_write((&EXT4_I(inode)->i_data_sem));
- retval = ext4_journal_restart(handle, needed);
- down_write((&EXT4_I(inode)->i_data_sem));
- if (retval)
- goto err_out;
- }
- }
+ retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
+ if (retval < 0)
+ goto err_out;
retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
err_out:
up_write((&EXT4_I(inode)->i_data_sem));
@@ -196,42 +176,30 @@ static int update_tind_extent_range(handle_t *handle, struct inode *inode,
}
-static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
-{
- int retval = 0, needed;
-
- if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
- return 0;
- /*
- * We are freeing a blocks. During this we touch
- * superblock, group descriptor and block bitmap.
- * So allocate a credit of 3. We may update
- * quota (user and group).
- */
- needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
-
- if (ext4_journal_extend(handle, needed) != 0)
- retval = ext4_journal_restart(handle, needed);
-
- return retval;
-}
-
static int free_dind_blocks(handle_t *handle,
struct inode *inode, __le32 i_data)
{
int i;
__le32 *tmp_idata;
struct buffer_head *bh;
+ struct super_block *sb = inode->i_sb;
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
+ int err;
- bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
+ bh = ext4_sb_bread(sb, le32_to_cpu(i_data), 0);
if (IS_ERR(bh))
return PTR_ERR(bh);
tmp_idata = (__le32 *)bh->b_data;
for (i = 0; i < max_entries; i++) {
if (tmp_idata[i]) {
- extend_credit_for_blkdel(handle, inode);
+ err = ext4_journal_ensure_credits(handle,
+ EXT4_RESERVE_TRANS_BLOCKS,
+ ext4_free_metadata_revoke_credits(sb, 1));
+ if (err < 0) {
+ put_bh(bh);
+ return err;
+ }
ext4_free_blocks(handle, inode, NULL,
le32_to_cpu(tmp_idata[i]), 1,
EXT4_FREE_BLOCKS_METADATA |
@@ -239,7 +207,10 @@ static int free_dind_blocks(handle_t *handle,
}
}
put_bh(bh);
- extend_credit_for_blkdel(handle, inode);
+ err = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
+ ext4_free_metadata_revoke_credits(sb, 1));
+ if (err < 0)
+ return err;
ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
@@ -270,7 +241,10 @@ static int free_tind_blocks(handle_t *handle,
}
}
put_bh(bh);
- extend_credit_for_blkdel(handle, inode);
+ retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
+ ext4_free_metadata_revoke_credits(inode->i_sb, 1));
+ if (retval < 0)
+ return retval;
ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
@@ -283,7 +257,11 @@ static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
/* ei->i_data[EXT4_IND_BLOCK] */
if (i_data[0]) {
- extend_credit_for_blkdel(handle, inode);
+ retval = ext4_journal_ensure_credits(handle,
+ EXT4_RESERVE_TRANS_BLOCKS,
+ ext4_free_metadata_revoke_credits(inode->i_sb, 1));
+ if (retval < 0)
+ return retval;
ext4_free_blocks(handle, inode, NULL,
le32_to_cpu(i_data[0]), 1,
EXT4_FREE_BLOCKS_METADATA |
@@ -318,12 +296,9 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
* One credit accounted for writing the
* i_data field of the original inode
*/
- retval = ext4_journal_extend(handle, 1);
- if (retval) {
- retval = ext4_journal_restart(handle, 1);
- if (retval)
- goto err_out;
- }
+ retval = ext4_journal_ensure_credits(handle, 1, 0);
+ if (retval < 0)
+ goto err_out;
i_data[0] = ei->i_data[EXT4_IND_BLOCK];
i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
@@ -391,15 +366,20 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
ix = EXT_FIRST_INDEX(eh);
for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
retval = free_ext_idx(handle, inode, ix);
- if (retval)
- break;
+ if (retval) {
+ put_bh(bh);
+ return retval;
+ }
}
}
put_bh(bh);
- extend_credit_for_blkdel(handle, inode);
+ retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
+ ext4_free_metadata_revoke_credits(inode->i_sb, 1));
+ if (retval < 0)
+ return retval;
ext4_free_blocks(handle, inode, NULL, block, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
- return retval;
+ return 0;
}
/*
@@ -574,9 +554,9 @@ err_out:
}
/* We mark the tmp_inode dirty via ext4_ext_tree_init. */
- if (ext4_journal_extend(handle, 1) != 0)
- ext4_journal_restart(handle, 1);
-
+ retval = ext4_journal_ensure_credits(handle, 1, 0);
+ if (retval < 0)
+ goto out_stop;
/*
* Mark the tmp_inode as of size zero
*/
@@ -594,6 +574,7 @@ err_out:
/* Reset the extent details */
ext4_ext_tree_init(handle, tmp_inode);
+out_stop:
ext4_journal_stop(handle);
out:
unlock_new_inode(tmp_inode);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 129029534075..a856997d87b5 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1312,7 +1312,7 @@ void ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname,
{
int len;
- if (!IS_CASEFOLDED(dir)) {
+ if (!IS_CASEFOLDED(dir) || !EXT4_SB(dir->i_sb)->s_encoding) {
cf_name->name = NULL;
return;
}
@@ -2183,7 +2183,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
#ifdef CONFIG_UNICODE
if (ext4_has_strict_mode(sbi) && IS_CASEFOLDED(dir) &&
- utf8_validate(sbi->s_encoding, &dentry->d_name))
+ sbi->s_encoding && utf8_validate(sbi->s_encoding, &dentry->d_name))
return -EINVAL;
#endif
@@ -2547,18 +2547,29 @@ static void ext4_dec_count(handle_t *handle, struct inode *inode)
}
+/*
+ * Add non-directory inode to a directory. On success, the inode reference is
+ * consumed by dentry is instantiation. This is also indicated by clearing of
+ * *inodep pointer. On failure, the caller is responsible for dropping the
+ * inode reference in the safe context.
+ */
static int ext4_add_nondir(handle_t *handle,
- struct dentry *dentry, struct inode *inode)
+ struct dentry *dentry, struct inode **inodep)
{
+ struct inode *dir = d_inode(dentry->d_parent);
+ struct inode *inode = *inodep;
int err = ext4_add_entry(handle, dentry, inode);
if (!err) {
ext4_mark_inode_dirty(handle, inode);
+ if (IS_DIRSYNC(dir))
+ ext4_handle_sync(handle);
d_instantiate_new(dentry, inode);
+ *inodep = NULL;
return 0;
}
drop_nlink(inode);
+ ext4_orphan_add(handle, inode);
unlock_new_inode(inode);
- iput(inode);
return err;
}
@@ -2592,12 +2603,12 @@ retry:
inode->i_op = &ext4_file_inode_operations;
inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
- err = ext4_add_nondir(handle, dentry, inode);
- if (!err && IS_DIRSYNC(dir))
- ext4_handle_sync(handle);
+ err = ext4_add_nondir(handle, dentry, &inode);
}
if (handle)
ext4_journal_stop(handle);
+ if (!IS_ERR_OR_NULL(inode))
+ iput(inode);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
@@ -2624,12 +2635,12 @@ retry:
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
inode->i_op = &ext4_special_inode_operations;
- err = ext4_add_nondir(handle, dentry, inode);
- if (!err && IS_DIRSYNC(dir))
- ext4_handle_sync(handle);
+ err = ext4_add_nondir(handle, dentry, &inode);
}
if (handle)
ext4_journal_stop(handle);
+ if (!IS_ERR_OR_NULL(inode))
+ iput(inode);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
@@ -2779,10 +2790,12 @@ retry:
if (err) {
out_clear_inode:
clear_nlink(inode);
+ ext4_orphan_add(handle, inode);
unlock_new_inode(inode);
ext4_mark_inode_dirty(handle, inode);
+ ext4_journal_stop(handle);
iput(inode);
- goto out_stop;
+ goto out_retry;
}
ext4_inc_count(handle, dir);
ext4_update_dx_flag(dir);
@@ -2796,6 +2809,7 @@ out_clear_inode:
out_stop:
if (handle)
ext4_journal_stop(handle);
+out_retry:
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
@@ -3182,18 +3196,17 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
- if (inode->i_nlink == 0) {
- ext4_warning_inode(inode, "Deleting file '%.*s' with no links",
- dentry->d_name.len, dentry->d_name.name);
- set_nlink(inode, 1);
- }
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto end_unlink;
dir->i_ctime = dir->i_mtime = current_time(dir);
ext4_update_dx_flag(dir);
ext4_mark_inode_dirty(handle, dir);
- drop_nlink(inode);
+ if (inode->i_nlink == 0)
+ ext4_warning_inode(inode, "Deleting file '%.*s' with no links",
+ dentry->d_name.len, dentry->d_name.name);
+ else
+ drop_nlink(inode);
if (!inode->i_nlink)
ext4_orphan_add(handle, inode);
inode->i_ctime = current_time(inode);
@@ -3328,12 +3341,11 @@ static int ext4_symlink(struct inode *dir,
inode->i_size = disk_link.len - 1;
}
EXT4_I(inode)->i_disksize = inode->i_size;
- err = ext4_add_nondir(handle, dentry, inode);
- if (!err && IS_DIRSYNC(dir))
- ext4_handle_sync(handle);
-
+ err = ext4_add_nondir(handle, dentry, &inode);
if (handle)
ext4_journal_stop(handle);
+ if (inode)
+ iput(inode);
goto out_free_encrypted_link;
err_drop_inode:
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 12ceadef32c5..24aeedb8fc75 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -31,18 +31,56 @@
#include "acl.h"
static struct kmem_cache *io_end_cachep;
+static struct kmem_cache *io_end_vec_cachep;
int __init ext4_init_pageio(void)
{
io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
if (io_end_cachep == NULL)
return -ENOMEM;
+
+ io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0);
+ if (io_end_vec_cachep == NULL) {
+ kmem_cache_destroy(io_end_cachep);
+ return -ENOMEM;
+ }
return 0;
}
void ext4_exit_pageio(void)
{
kmem_cache_destroy(io_end_cachep);
+ kmem_cache_destroy(io_end_vec_cachep);
+}
+
+struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end)
+{
+ struct ext4_io_end_vec *io_end_vec;
+
+ io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS);
+ if (!io_end_vec)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&io_end_vec->list);
+ list_add_tail(&io_end_vec->list, &io_end->list_vec);
+ return io_end_vec;
+}
+
+static void ext4_free_io_end_vec(ext4_io_end_t *io_end)
+{
+ struct ext4_io_end_vec *io_end_vec, *tmp;
+
+ if (list_empty(&io_end->list_vec))
+ return;
+ list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) {
+ list_del(&io_end_vec->list);
+ kmem_cache_free(io_end_vec_cachep, io_end_vec);
+ }
+}
+
+struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end)
+{
+ BUG_ON(list_empty(&io_end->list_vec));
+ return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list);
}
/*
@@ -125,6 +163,7 @@ static void ext4_release_io_end(ext4_io_end_t *io_end)
ext4_finish_bio(bio);
bio_put(bio);
}
+ ext4_free_io_end_vec(io_end);
kmem_cache_free(io_end_cachep, io_end);
}
@@ -136,29 +175,26 @@ static void ext4_release_io_end(ext4_io_end_t *io_end)
* cannot get to ext4_ext_truncate() before all IOs overlapping that range are
* completed (happens from ext4_free_ioend()).
*/
-static int ext4_end_io(ext4_io_end_t *io)
+static int ext4_end_io_end(ext4_io_end_t *io_end)
{
- struct inode *inode = io->inode;
- loff_t offset = io->offset;
- ssize_t size = io->size;
- handle_t *handle = io->handle;
+ struct inode *inode = io_end->inode;
+ handle_t *handle = io_end->handle;
int ret = 0;
- ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
+ ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
"list->prev 0x%p\n",
- io, inode->i_ino, io->list.next, io->list.prev);
+ io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
- io->handle = NULL; /* Following call will use up the handle */
- ret = ext4_convert_unwritten_extents(handle, inode, offset, size);
+ io_end->handle = NULL; /* Following call will use up the handle */
+ ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
if (ret < 0 && !ext4_forced_shutdown(EXT4_SB(inode->i_sb))) {
ext4_msg(inode->i_sb, KERN_EMERG,
"failed to convert unwritten extents to written "
"extents -- potential data loss! "
- "(inode %lu, offset %llu, size %zd, error %d)",
- inode->i_ino, offset, size, ret);
+ "(inode %lu, error %d)", inode->i_ino, ret);
}
- ext4_clear_io_unwritten_flag(io);
- ext4_release_io_end(io);
+ ext4_clear_io_unwritten_flag(io_end);
+ ext4_release_io_end(io_end);
return ret;
}
@@ -166,21 +202,21 @@ static void dump_completed_IO(struct inode *inode, struct list_head *head)
{
#ifdef EXT4FS_DEBUG
struct list_head *cur, *before, *after;
- ext4_io_end_t *io, *io0, *io1;
+ ext4_io_end_t *io_end, *io_end0, *io_end1;
if (list_empty(head))
return;
ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
- list_for_each_entry(io, head, list) {
- cur = &io->list;
+ list_for_each_entry(io_end, head, list) {
+ cur = &io_end->list;
before = cur->prev;
- io0 = container_of(before, ext4_io_end_t, list);
+ io_end0 = container_of(before, ext4_io_end_t, list);
after = cur->next;
- io1 = container_of(after, ext4_io_end_t, list);
+ io_end1 = container_of(after, ext4_io_end_t, list);
ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
- io, inode->i_ino, io0, io1);
+ io_end, inode->i_ino, io_end0, io_end1);
}
#endif
}
@@ -207,7 +243,7 @@ static void ext4_add_complete_io(ext4_io_end_t *io_end)
static int ext4_do_flush_completed_IO(struct inode *inode,
struct list_head *head)
{
- ext4_io_end_t *io;
+ ext4_io_end_t *io_end;
struct list_head unwritten;
unsigned long flags;
struct ext4_inode_info *ei = EXT4_I(inode);
@@ -219,11 +255,11 @@ static int ext4_do_flush_completed_IO(struct inode *inode,
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
while (!list_empty(&unwritten)) {
- io = list_entry(unwritten.next, ext4_io_end_t, list);
- BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
- list_del_init(&io->list);
+ io_end = list_entry(unwritten.next, ext4_io_end_t, list);
+ BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
+ list_del_init(&io_end->list);
- err = ext4_end_io(io);
+ err = ext4_end_io_end(io_end);
if (unlikely(!ret && err))
ret = err;
}
@@ -242,19 +278,22 @@ void ext4_end_io_rsv_work(struct work_struct *work)
ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
{
- ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
- if (io) {
- io->inode = inode;
- INIT_LIST_HEAD(&io->list);
- atomic_set(&io->count, 1);
+ ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags);
+
+ if (io_end) {
+ io_end->inode = inode;
+ INIT_LIST_HEAD(&io_end->list);
+ INIT_LIST_HEAD(&io_end->list_vec);
+ atomic_set(&io_end->count, 1);
}
- return io;
+ return io_end;
}
void ext4_put_io_end_defer(ext4_io_end_t *io_end)
{
if (atomic_dec_and_test(&io_end->count)) {
- if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
+ list_empty(&io_end->list_vec)) {
ext4_release_io_end(io_end);
return;
}
@@ -268,9 +307,8 @@ int ext4_put_io_end(ext4_io_end_t *io_end)
if (atomic_dec_and_test(&io_end->count)) {
if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
- err = ext4_convert_unwritten_extents(io_end->handle,
- io_end->inode, io_end->offset,
- io_end->size);
+ err = ext4_convert_unwritten_io_end_vec(io_end->handle,
+ io_end);
io_end->handle = NULL;
ext4_clear_io_unwritten_flag(io_end);
}
@@ -307,10 +345,8 @@ static void ext4_end_bio(struct bio *bio)
struct inode *inode = io_end->inode;
ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
- "(offset %llu size %ld starting block %llu)",
+ "starting block %llu)",
bio->bi_status, inode->i_ino,
- (unsigned long long) io_end->offset,
- (long) io_end->size,
(unsigned long long)
bi_sector >> (inode->i_blkbits - 9));
mapping_set_error(inode->i_mapping,
@@ -358,14 +394,16 @@ void ext4_io_submit_init(struct ext4_io_submit *io,
io->io_end = NULL;
}
-static int io_submit_init_bio(struct ext4_io_submit *io,
- struct buffer_head *bh)
+static void io_submit_init_bio(struct ext4_io_submit *io,
+ struct buffer_head *bh)
{
struct bio *bio;
+ /*
+ * bio_alloc will _always_ be able to allocate a bio if
+ * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
+ */
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
- if (!bio)
- return -ENOMEM;
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
bio->bi_end_io = ext4_end_bio;
@@ -373,13 +411,12 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
io->io_bio = bio;
io->io_next_block = bh->b_blocknr;
wbc_init_bio(io->io_wbc, bio);
- return 0;
}
-static int io_submit_add_bh(struct ext4_io_submit *io,
- struct inode *inode,
- struct page *page,
- struct buffer_head *bh)
+static void io_submit_add_bh(struct ext4_io_submit *io,
+ struct inode *inode,
+ struct page *page,
+ struct buffer_head *bh)
{
int ret;
@@ -388,9 +425,7 @@ submit_and_retry:
ext4_io_submit(io);
}
if (io->io_bio == NULL) {
- ret = io_submit_init_bio(io, bh);
- if (ret)
- return ret;
+ io_submit_init_bio(io, bh);
io->io_bio->bi_write_hint = inode->i_write_hint;
}
ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
@@ -398,7 +433,6 @@ submit_and_retry:
goto submit_and_retry;
wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
io->io_next_block++;
- return 0;
}
int ext4_bio_write_page(struct ext4_io_submit *io,
@@ -491,8 +525,14 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
gfp_flags |= __GFP_NOFAIL;
goto retry_encrypt;
}
- bounce_page = NULL;
- goto out;
+
+ printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
+ redirty_page_for_writepage(wbc, page);
+ do {
+ clear_buffer_async_write(bh);
+ bh = bh->b_this_page;
+ } while (bh != head);
+ goto unlock;
}
}
@@ -500,30 +540,13 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
do {
if (!buffer_async_write(bh))
continue;
- ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh);
- if (ret) {
- /*
- * We only get here on ENOMEM. Not much else
- * we can do but mark the page as dirty, and
- * better luck next time.
- */
- break;
- }
+ io_submit_add_bh(io, inode,
+ bounce_page ? bounce_page : page, bh);
nr_submitted++;
clear_buffer_dirty(bh);
} while ((bh = bh->b_this_page) != head);
- /* Error stopped previous loop? Clean up buffers... */
- if (ret) {
- out:
- fscrypt_free_bounce_page(bounce_page);
- printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
- redirty_page_for_writepage(wbc, page);
- do {
- clear_buffer_async_write(bh);
- bh = bh->b_this_page;
- } while (bh != head);
- }
+unlock:
unlock_page(page);
/* Nothing submitted - we have to end page writeback */
if (!nr_submitted)
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index c916017db334..fef7755300c3 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -47,13 +47,103 @@
#include "ext4.h"
-static inline bool ext4_bio_encrypted(struct bio *bio)
+#define NUM_PREALLOC_POST_READ_CTXS 128
+
+static struct kmem_cache *bio_post_read_ctx_cache;
+static mempool_t *bio_post_read_ctx_pool;
+
+/* postprocessing steps for read bios */
+enum bio_post_read_step {
+ STEP_INITIAL = 0,
+ STEP_DECRYPT,
+ STEP_VERITY,
+};
+
+struct bio_post_read_ctx {
+ struct bio *bio;
+ struct work_struct work;
+ unsigned int cur_step;
+ unsigned int enabled_steps;
+};
+
+static void __read_end_io(struct bio *bio)
+{
+ struct page *page;
+ struct bio_vec *bv;
+ struct bvec_iter_all iter_all;
+
+ bio_for_each_segment_all(bv, bio, iter_all) {
+ page = bv->bv_page;
+
+ /* PG_error was set if any post_read step failed */
+ if (bio->bi_status || PageError(page)) {
+ ClearPageUptodate(page);
+ /* will re-read again later */
+ ClearPageError(page);
+ } else {
+ SetPageUptodate(page);
+ }
+ unlock_page(page);
+ }
+ if (bio->bi_private)
+ mempool_free(bio->bi_private, bio_post_read_ctx_pool);
+ bio_put(bio);
+}
+
+static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
+
+static void decrypt_work(struct work_struct *work)
+{
+ struct bio_post_read_ctx *ctx =
+ container_of(work, struct bio_post_read_ctx, work);
+
+ fscrypt_decrypt_bio(ctx->bio);
+
+ bio_post_read_processing(ctx);
+}
+
+static void verity_work(struct work_struct *work)
+{
+ struct bio_post_read_ctx *ctx =
+ container_of(work, struct bio_post_read_ctx, work);
+
+ fsverity_verify_bio(ctx->bio);
+
+ bio_post_read_processing(ctx);
+}
+
+static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
+{
+ /*
+ * We use different work queues for decryption and for verity because
+ * verity may require reading metadata pages that need decryption, and
+ * we shouldn't recurse to the same workqueue.
+ */
+ switch (++ctx->cur_step) {
+ case STEP_DECRYPT:
+ if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
+ INIT_WORK(&ctx->work, decrypt_work);
+ fscrypt_enqueue_decrypt_work(&ctx->work);
+ return;
+ }
+ ctx->cur_step++;
+ /* fall-through */
+ case STEP_VERITY:
+ if (ctx->enabled_steps & (1 << STEP_VERITY)) {
+ INIT_WORK(&ctx->work, verity_work);
+ fsverity_enqueue_verify_work(&ctx->work);
+ return;
+ }
+ ctx->cur_step++;
+ /* fall-through */
+ default:
+ __read_end_io(ctx->bio);
+ }
+}
+
+static bool bio_post_read_required(struct bio *bio)
{
-#ifdef CONFIG_FS_ENCRYPTION
- return unlikely(bio->bi_private != NULL);
-#else
- return false;
-#endif
+ return bio->bi_private && !bio->bi_status;
}
/*
@@ -70,30 +160,53 @@ static inline bool ext4_bio_encrypted(struct bio *bio)
*/
static void mpage_end_io(struct bio *bio)
{
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ if (bio_post_read_required(bio)) {
+ struct bio_post_read_ctx *ctx = bio->bi_private;
- if (ext4_bio_encrypted(bio)) {
- if (bio->bi_status) {
- fscrypt_release_ctx(bio->bi_private);
- } else {
- fscrypt_enqueue_decrypt_bio(bio->bi_private, bio);
- return;
- }
+ ctx->cur_step = STEP_INITIAL;
+ bio_post_read_processing(ctx);
+ return;
}
- bio_for_each_segment_all(bv, bio, iter_all) {
- struct page *page = bv->bv_page;
+ __read_end_io(bio);
+}
- if (!bio->bi_status) {
- SetPageUptodate(page);
- } else {
- ClearPageUptodate(page);
- SetPageError(page);
- }
- unlock_page(page);
+static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
+{
+ return fsverity_active(inode) &&
+ idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
+}
+
+static struct bio_post_read_ctx *get_bio_post_read_ctx(struct inode *inode,
+ struct bio *bio,
+ pgoff_t first_idx)
+{
+ unsigned int post_read_steps = 0;
+ struct bio_post_read_ctx *ctx = NULL;
+
+ if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
+ post_read_steps |= 1 << STEP_DECRYPT;
+
+ if (ext4_need_verity(inode, first_idx))
+ post_read_steps |= 1 << STEP_VERITY;
+
+ if (post_read_steps) {
+ ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+ ctx->bio = bio;
+ ctx->enabled_steps = post_read_steps;
+ bio->bi_private = ctx;
}
+ return ctx;
+}
- bio_put(bio);
+static inline loff_t ext4_readpage_limit(struct inode *inode)
+{
+ if (IS_ENABLED(CONFIG_FS_VERITY) &&
+ (IS_VERITY(inode) || ext4_verity_in_progress(inode)))
+ return inode->i_sb->s_maxbytes;
+
+ return i_size_read(inode);
}
int ext4_mpage_readpages(struct address_space *mapping,
@@ -141,7 +254,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
last_block = block_in_file + nr_pages * blocks_per_page;
- last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
+ last_block_in_file = (ext4_readpage_limit(inode) +
+ blocksize - 1) >> blkbits;
if (last_block > last_block_in_file)
last_block = last_block_in_file;
page_block = 0;
@@ -218,6 +332,9 @@ int ext4_mpage_readpages(struct address_space *mapping,
zero_user_segment(page, first_hole << blkbits,
PAGE_SIZE);
if (first_hole == 0) {
+ if (ext4_need_verity(inode, page->index) &&
+ !fsverity_verify_page(page))
+ goto set_error_page;
SetPageUptodate(page);
unlock_page(page);
goto next_page;
@@ -241,18 +358,18 @@ int ext4_mpage_readpages(struct address_space *mapping,
bio = NULL;
}
if (bio == NULL) {
- struct fscrypt_ctx *ctx = NULL;
+ struct bio_post_read_ctx *ctx;
- if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
- ctx = fscrypt_get_ctx(GFP_NOFS);
- if (IS_ERR(ctx))
- goto set_error_page;
- }
+ /*
+ * bio_alloc will _always_ be able to allocate a bio if
+ * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
+ */
bio = bio_alloc(GFP_KERNEL,
min_t(int, nr_pages, BIO_MAX_PAGES));
- if (!bio) {
- if (ctx)
- fscrypt_release_ctx(ctx);
+ ctx = get_bio_post_read_ctx(inode, bio, page->index);
+ if (IS_ERR(ctx)) {
+ bio_put(bio);
+ bio = NULL;
goto set_error_page;
}
bio_set_dev(bio, bdev);
@@ -293,3 +410,29 @@ int ext4_mpage_readpages(struct address_space *mapping,
submit_bio(bio);
return 0;
}
+
+int __init ext4_init_post_read_processing(void)
+{
+ bio_post_read_ctx_cache =
+ kmem_cache_create("ext4_bio_post_read_ctx",
+ sizeof(struct bio_post_read_ctx), 0, 0, NULL);
+ if (!bio_post_read_ctx_cache)
+ goto fail;
+ bio_post_read_ctx_pool =
+ mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
+ bio_post_read_ctx_cache);
+ if (!bio_post_read_ctx_pool)
+ goto fail_free_cache;
+ return 0;
+
+fail_free_cache:
+ kmem_cache_destroy(bio_post_read_ctx_cache);
+fail:
+ return -ENOMEM;
+}
+
+void ext4_exit_post_read_processing(void)
+{
+ mempool_destroy(bio_post_read_ctx_pool);
+ kmem_cache_destroy(bio_post_read_ctx_cache);
+}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index c0e9aef376a7..a8c0f2b5b6e1 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -388,28 +388,10 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
return bh;
}
-/*
- * If we have fewer than thresh credits, extend by EXT4_MAX_TRANS_DATA.
- * If that fails, restart the transaction & regain write access for the
- * buffer head which is used for block_bitmap modifications.
- */
-static int extend_or_restart_transaction(handle_t *handle, int thresh)
+static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits)
{
- int err;
-
- if (ext4_handle_has_enough_credits(handle, thresh))
- return 0;
-
- err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA);
- if (err < 0)
- return err;
- if (err) {
- err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA);
- if (err)
- return err;
- }
-
- return 0;
+ return ext4_journal_ensure_credits_fn(handle, credits,
+ EXT4_MAX_TRANS_DATA, 0, 0);
}
/*
@@ -451,8 +433,8 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
continue;
}
- err = extend_or_restart_transaction(handle, 1);
- if (err)
+ err = ext4_resize_ensure_credits_batch(handle, 1);
+ if (err < 0)
return err;
bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
@@ -544,8 +526,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
struct buffer_head *gdb;
ext4_debug("update backup group %#04llx\n", block);
- err = extend_or_restart_transaction(handle, 1);
- if (err)
+ err = ext4_resize_ensure_credits_batch(handle, 1);
+ if (err < 0)
goto out;
gdb = sb_getblk(sb, block);
@@ -602,8 +584,8 @@ handle_bb:
/* Initialize block bitmap of the @group */
block = group_data[i].block_bitmap;
- err = extend_or_restart_transaction(handle, 1);
- if (err)
+ err = ext4_resize_ensure_credits_batch(handle, 1);
+ if (err < 0)
goto out;
bh = bclean(handle, sb, block);
@@ -631,8 +613,8 @@ handle_ib:
/* Initialize inode bitmap of the @group */
block = group_data[i].inode_bitmap;
- err = extend_or_restart_transaction(handle, 1);
- if (err)
+ err = ext4_resize_ensure_credits_batch(handle, 1);
+ if (err < 0)
goto out;
/* Mark unused entries in inode bitmap used */
bh = bclean(handle, sb, block);
@@ -1109,10 +1091,8 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
ext4_fsblk_t backup_block;
/* Out of journal space, and can't get more - abort - so sad */
- if (ext4_handle_valid(handle) &&
- handle->h_buffer_credits == 0 &&
- ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) &&
- (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
+ err = ext4_resize_ensure_credits_batch(handle, 1);
+ if (err < 0)
break;
if (meta_bg == 0)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 4079605d437a..1d82b56d9b11 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1107,6 +1107,9 @@ static int ext4_drop_inode(struct inode *inode)
{
int drop = generic_drop_inode(inode);
+ if (!drop)
+ drop = fscrypt_drop_inode(inode);
+
trace_ext4_drop_inode(inode, drop);
return drop;
}
@@ -1169,9 +1172,9 @@ void ext4_clear_inode(struct inode *inode)
{
invalidate_inode_buffers(inode);
clear_inode(inode);
- dquot_drop(inode);
ext4_discard_preallocations(inode);
ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
+ dquot_drop(inode);
if (EXT4_I(inode)->jinode) {
jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
EXT4_I(inode)->jinode);
@@ -1179,6 +1182,7 @@ void ext4_clear_inode(struct inode *inode)
EXT4_I(inode)->jinode = NULL;
}
fscrypt_put_encryption_info(inode);
+ fsverity_cleanup_inode(inode);
}
static struct inode *ext4_nfs_get_inode(struct super_block *sb,
@@ -1341,6 +1345,18 @@ static bool ext4_dummy_context(struct inode *inode)
return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
}
+static bool ext4_has_stable_inodes(struct super_block *sb)
+{
+ return ext4_has_feature_stable_inodes(sb);
+}
+
+static void ext4_get_ino_and_lblk_bits(struct super_block *sb,
+ int *ino_bits_ret, int *lblk_bits_ret)
+{
+ *ino_bits_ret = 8 * sizeof(EXT4_SB(sb)->s_es->s_inodes_count);
+ *lblk_bits_ret = 8 * sizeof(ext4_lblk_t);
+}
+
static const struct fscrypt_operations ext4_cryptops = {
.key_prefix = "ext4:",
.get_context = ext4_get_context,
@@ -1348,6 +1364,8 @@ static const struct fscrypt_operations ext4_cryptops = {
.dummy_context = ext4_dummy_context,
.empty_dir = ext4_empty_dir,
.max_namelen = EXT4_NAME_LEN,
+ .has_stable_inodes = ext4_has_stable_inodes,
+ .get_ino_and_lblk_bits = ext4_get_ino_and_lblk_bits,
};
#endif
@@ -1370,7 +1388,6 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
unsigned int flags);
static int ext4_enable_quotas(struct super_block *sb);
-static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
static struct dquot **ext4_get_dquots(struct inode *inode)
{
@@ -1388,7 +1405,7 @@ static const struct dquot_operations ext4_quota_operations = {
.destroy_dquot = dquot_destroy,
.get_projid = ext4_get_projid,
.get_inode_usage = ext4_get_inode_usage,
- .get_next_id = ext4_get_next_id,
+ .get_next_id = dquot_get_next_id,
};
static const struct quotactl_ops ext4_qctl_operations = {
@@ -1874,6 +1891,13 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
} else if (token == Opt_commit) {
if (arg == 0)
arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
+ else if (arg > INT_MAX / HZ) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid commit interval %d, "
+ "must be smaller than %d",
+ arg, INT_MAX / HZ);
+ return -1;
+ }
sbi->s_commit_interval = HZ * arg;
} else if (token == Opt_debug_want_extra_isize) {
sbi->s_want_extra_isize = arg;
@@ -2040,7 +2064,7 @@ static int parse_options(char *options, struct super_block *sb,
unsigned int *journal_ioprio,
int is_remount)
{
- struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_sb_info __maybe_unused *sbi = EXT4_SB(sb);
char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
substring_t args[MAX_OPT_ARGS];
int token;
@@ -2094,16 +2118,6 @@ static int parse_options(char *options, struct super_block *sb,
}
}
#endif
- if (test_opt(sb, DIOREAD_NOLOCK)) {
- int blocksize =
- BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
-
- if (blocksize < PAGE_SIZE) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "dioread_nolock if block size != PAGE_SIZE");
- return 0;
- }
- }
return 1;
}
@@ -3544,12 +3558,15 @@ static void ext4_clamp_want_extra_isize(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
+ unsigned def_extra_isize = sizeof(struct ext4_inode) -
+ EXT4_GOOD_OLD_INODE_SIZE;
- /* determine the minimum size of new large inodes, if present */
- if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
- sbi->s_want_extra_isize == 0) {
- sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
- EXT4_GOOD_OLD_INODE_SIZE;
+ if (sbi->s_inode_size == EXT4_GOOD_OLD_INODE_SIZE) {
+ sbi->s_want_extra_isize = 0;
+ return;
+ }
+ if (sbi->s_want_extra_isize < 4) {
+ sbi->s_want_extra_isize = def_extra_isize;
if (ext4_has_feature_extra_isize(sb)) {
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_want_extra_isize))
@@ -3562,10 +3579,10 @@ static void ext4_clamp_want_extra_isize(struct super_block *sb)
}
}
/* Check if enough inode space is available */
- if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
- sbi->s_inode_size) {
- sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
- EXT4_GOOD_OLD_INODE_SIZE;
+ if ((sbi->s_want_extra_isize > sbi->s_inode_size) ||
+ (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
+ sbi->s_inode_size)) {
+ sbi->s_want_extra_isize = def_extra_isize;
ext4_msg(sb, KERN_INFO,
"required extra inode space not available");
}
@@ -4035,8 +4052,21 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_inode_size);
goto failed_mount;
}
- if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
- sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
+ /*
+ * i_atime_extra is the last extra field available for [acm]times in
+ * struct ext4_inode. Checking for that field should suffice to ensure
+ * we have extra space for all three.
+ */
+ if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
+ sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
+ sb->s_time_gran = 1;
+ sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
+ } else {
+ sb->s_time_gran = NSEC_PER_SEC;
+ sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
+ }
+
+ sb->s_time_min = EXT4_TIMESTAMP_MIN;
}
sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
@@ -4272,6 +4302,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
#ifdef CONFIG_FS_ENCRYPTION
sb->s_cop = &ext4_cryptops;
#endif
+#ifdef CONFIG_FS_VERITY
+ sb->s_vop = &ext4_verityops;
+#endif
#ifdef CONFIG_QUOTA
sb->dq_op = &ext4_quota_operations;
if (ext4_has_feature_quota(sb))
@@ -4412,10 +4445,8 @@ no_journal:
}
}
- if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
- (blocksize != PAGE_SIZE)) {
- ext4_msg(sb, KERN_ERR,
- "Unsupported blocksize for fs encryption");
+ if (ext4_has_feature_verity(sb) && blocksize != PAGE_SIZE) {
+ ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs-verity");
goto failed_mount_wq;
}
@@ -5803,7 +5834,7 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
/* Don't account quota for quota files to avoid recursion */
qf_inode->i_flags |= S_NOQUOTA;
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
- err = dquot_enable(qf_inode, type, format_id, flags);
+ err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
if (err)
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
iput(qf_inode);
@@ -5987,18 +6018,6 @@ out:
}
return len;
}
-
-static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
-{
- const struct quota_format_ops *ops;
-
- if (!sb_has_quota_loaded(sb, qid->type))
- return -ESRCH;
- ops = sb_dqopt(sb)->ops[qid->type];
- if (!ops || !ops->get_next_id)
- return -ENOSYS;
- return dquot_get_next_id(sb, qid);
-}
#endif
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
@@ -6095,6 +6114,10 @@ static int __init ext4_init_fs(void)
err = ext4_init_pending();
if (err)
+ goto out7;
+
+ err = ext4_init_post_read_processing();
+ if (err)
goto out6;
err = ext4_init_pageio();
@@ -6135,8 +6158,10 @@ out3:
out4:
ext4_exit_pageio();
out5:
- ext4_exit_pending();
+ ext4_exit_post_read_processing();
out6:
+ ext4_exit_pending();
+out7:
ext4_exit_es();
return err;
@@ -6153,6 +6178,7 @@ static void __exit ext4_exit_fs(void)
ext4_exit_sysfs();
ext4_exit_system_zone();
ext4_exit_pageio();
+ ext4_exit_post_read_processing();
ext4_exit_es();
ext4_exit_pending();
}
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index b3cd7655a6ff..eb1efad0e20a 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -242,6 +242,9 @@ EXT4_ATTR_FEATURE(encryption);
#ifdef CONFIG_UNICODE
EXT4_ATTR_FEATURE(casefold);
#endif
+#ifdef CONFIG_FS_VERITY
+EXT4_ATTR_FEATURE(verity);
+#endif
EXT4_ATTR_FEATURE(metadata_csum_seed);
static struct attribute *ext4_feat_attrs[] = {
@@ -254,6 +257,9 @@ static struct attribute *ext4_feat_attrs[] = {
#ifdef CONFIG_UNICODE
ATTR_LIST(casefold),
#endif
+#ifdef CONFIG_FS_VERITY
+ ATTR_LIST(verity),
+#endif
ATTR_LIST(metadata_csum_seed),
NULL,
};
diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
new file mode 100644
index 000000000000..d0d8a9795dd6
--- /dev/null
+++ b/fs/ext4/verity.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fs/ext4/verity.c: fs-verity support for ext4
+ *
+ * Copyright 2019 Google LLC
+ */
+
+/*
+ * Implementation of fsverity_operations for ext4.
+ *
+ * ext4 stores the verity metadata (Merkle tree and fsverity_descriptor) past
+ * the end of the file, starting at the first 64K boundary beyond i_size. This
+ * approach works because (a) verity files are readonly, and (b) pages fully
+ * beyond i_size aren't visible to userspace but can be read/written internally
+ * by ext4 with only some relatively small changes to ext4. This approach
+ * avoids having to depend on the EA_INODE feature and on rearchitecturing
+ * ext4's xattr support to support paging multi-gigabyte xattrs into memory, and
+ * to support encrypting xattrs. Note that the verity metadata *must* be
+ * encrypted when the file is, since it contains hashes of the plaintext data.
+ *
+ * Using a 64K boundary rather than a 4K one keeps things ready for
+ * architectures with 64K pages, and it doesn't necessarily waste space on-disk
+ * since there can be a hole between i_size and the start of the Merkle tree.
+ */
+
+#include <linux/quotaops.h>
+
+#include "ext4.h"
+#include "ext4_extents.h"
+#include "ext4_jbd2.h"
+
+static inline loff_t ext4_verity_metadata_pos(const struct inode *inode)
+{
+ return round_up(inode->i_size, 65536);
+}
+
+/*
+ * Read some verity metadata from the inode. __vfs_read() can't be used because
+ * we need to read beyond i_size.
+ */
+static int pagecache_read(struct inode *inode, void *buf, size_t count,
+ loff_t pos)
+{
+ while (count) {
+ size_t n = min_t(size_t, count,
+ PAGE_SIZE - offset_in_page(pos));
+ struct page *page;
+ void *addr;
+
+ page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT,
+ NULL);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ addr = kmap_atomic(page);
+ memcpy(buf, addr + offset_in_page(pos), n);
+ kunmap_atomic(addr);
+
+ put_page(page);
+
+ buf += n;
+ pos += n;
+ count -= n;
+ }
+ return 0;
+}
+
+/*
+ * Write some verity metadata to the inode for FS_IOC_ENABLE_VERITY.
+ * kernel_write() can't be used because the file descriptor is readonly.
+ */
+static int pagecache_write(struct inode *inode, const void *buf, size_t count,
+ loff_t pos)
+{
+ if (pos + count > inode->i_sb->s_maxbytes)
+ return -EFBIG;
+
+ while (count) {
+ size_t n = min_t(size_t, count,
+ PAGE_SIZE - offset_in_page(pos));
+ struct page *page;
+ void *fsdata;
+ void *addr;
+ int res;
+
+ res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
+ &page, &fsdata);
+ if (res)
+ return res;
+
+ addr = kmap_atomic(page);
+ memcpy(addr + offset_in_page(pos), buf, n);
+ kunmap_atomic(addr);
+
+ res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
+ page, fsdata);
+ if (res < 0)
+ return res;
+ if (res != n)
+ return -EIO;
+
+ buf += n;
+ pos += n;
+ count -= n;
+ }
+ return 0;
+}
+
+static int ext4_begin_enable_verity(struct file *filp)
+{
+ struct inode *inode = file_inode(filp);
+ const int credits = 2; /* superblock and inode for ext4_orphan_add() */
+ handle_t *handle;
+ int err;
+
+ if (ext4_verity_in_progress(inode))
+ return -EBUSY;
+
+ /*
+ * Since the file was opened readonly, we have to initialize the jbd
+ * inode and quotas here and not rely on ->open() doing it. This must
+ * be done before evicting the inline data.
+ */
+
+ err = ext4_inode_attach_jinode(inode);
+ if (err)
+ return err;
+
+ err = dquot_initialize(inode);
+ if (err)
+ return err;
+
+ err = ext4_convert_inline_data(inode);
+ if (err)
+ return err;
+
+ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ ext4_warning_inode(inode,
+ "verity is only allowed on extent-based files");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * ext4 uses the last allocated block to find the verity descriptor, so
+ * we must remove any other blocks past EOF which might confuse things.
+ */
+ err = ext4_truncate(inode);
+ if (err)
+ return err;
+
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, credits);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ err = ext4_orphan_add(handle, inode);
+ if (err == 0)
+ ext4_set_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
+
+ ext4_journal_stop(handle);
+ return err;
+}
+
+/*
+ * ext4 stores the verity descriptor beginning on the next filesystem block
+ * boundary after the Merkle tree. Then, the descriptor size is stored in the
+ * last 4 bytes of the last allocated filesystem block --- which is either the
+ * block in which the descriptor ends, or the next block after that if there
+ * weren't at least 4 bytes remaining.
+ *
+ * We can't simply store the descriptor in an xattr because it *must* be
+ * encrypted when ext4 encryption is used, but ext4 encryption doesn't encrypt
+ * xattrs. Also, if the descriptor includes a large signature blob it may be
+ * too large to store in an xattr without the EA_INODE feature.
+ */
+static int ext4_write_verity_descriptor(struct inode *inode, const void *desc,
+ size_t desc_size, u64 merkle_tree_size)
+{
+ const u64 desc_pos = round_up(ext4_verity_metadata_pos(inode) +
+ merkle_tree_size, i_blocksize(inode));
+ const u64 desc_end = desc_pos + desc_size;
+ const __le32 desc_size_disk = cpu_to_le32(desc_size);
+ const u64 desc_size_pos = round_up(desc_end + sizeof(desc_size_disk),
+ i_blocksize(inode)) -
+ sizeof(desc_size_disk);
+ int err;
+
+ err = pagecache_write(inode, desc, desc_size, desc_pos);
+ if (err)
+ return err;
+
+ return pagecache_write(inode, &desc_size_disk, sizeof(desc_size_disk),
+ desc_size_pos);
+}
+
+static int ext4_end_enable_verity(struct file *filp, const void *desc,
+ size_t desc_size, u64 merkle_tree_size)
+{
+ struct inode *inode = file_inode(filp);
+ const int credits = 2; /* superblock and inode for ext4_orphan_del() */
+ handle_t *handle;
+ int err = 0;
+ int err2;
+
+ if (desc != NULL) {
+ /* Succeeded; write the verity descriptor. */
+ err = ext4_write_verity_descriptor(inode, desc, desc_size,
+ merkle_tree_size);
+
+ /* Write all pages before clearing VERITY_IN_PROGRESS. */
+ if (!err)
+ err = filemap_write_and_wait(inode->i_mapping);
+ }
+
+ /* If we failed, truncate anything we wrote past i_size. */
+ if (desc == NULL || err)
+ ext4_truncate(inode);
+
+ /*
+ * We must always clean up by clearing EXT4_STATE_VERITY_IN_PROGRESS and
+ * deleting the inode from the orphan list, even if something failed.
+ * If everything succeeded, we'll also set the verity bit in the same
+ * transaction.
+ */
+
+ ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
+
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, credits);
+ if (IS_ERR(handle)) {
+ ext4_orphan_del(NULL, inode);
+ return PTR_ERR(handle);
+ }
+
+ err2 = ext4_orphan_del(handle, inode);
+ if (err2)
+ goto out_stop;
+
+ if (desc != NULL && !err) {
+ struct ext4_iloc iloc;
+
+ err = ext4_reserve_inode_write(handle, inode, &iloc);
+ if (err)
+ goto out_stop;
+ ext4_set_inode_flag(inode, EXT4_INODE_VERITY);
+ ext4_set_inode_flags(inode);
+ err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+ }
+out_stop:
+ ext4_journal_stop(handle);
+ return err ?: err2;
+}
+
+static int ext4_get_verity_descriptor_location(struct inode *inode,
+ size_t *desc_size_ret,
+ u64 *desc_pos_ret)
+{
+ struct ext4_ext_path *path;
+ struct ext4_extent *last_extent;
+ u32 end_lblk;
+ u64 desc_size_pos;
+ __le32 desc_size_disk;
+ u32 desc_size;
+ u64 desc_pos;
+ int err;
+
+ /*
+ * Descriptor size is in last 4 bytes of last allocated block.
+ * See ext4_write_verity_descriptor().
+ */
+
+ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ EXT4_ERROR_INODE(inode, "verity file doesn't use extents");
+ return -EFSCORRUPTED;
+ }
+
+ path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+
+ last_extent = path[path->p_depth].p_ext;
+ if (!last_extent) {
+ EXT4_ERROR_INODE(inode, "verity file has no extents");
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ return -EFSCORRUPTED;
+ }
+
+ end_lblk = le32_to_cpu(last_extent->ee_block) +
+ ext4_ext_get_actual_len(last_extent);
+ desc_size_pos = (u64)end_lblk << inode->i_blkbits;
+ ext4_ext_drop_refs(path);
+ kfree(path);
+
+ if (desc_size_pos < sizeof(desc_size_disk))
+ goto bad;
+ desc_size_pos -= sizeof(desc_size_disk);
+
+ err = pagecache_read(inode, &desc_size_disk, sizeof(desc_size_disk),
+ desc_size_pos);
+ if (err)
+ return err;
+ desc_size = le32_to_cpu(desc_size_disk);
+
+ /*
+ * The descriptor is stored just before the desc_size_disk, but starting
+ * on a filesystem block boundary.
+ */
+
+ if (desc_size > INT_MAX || desc_size > desc_size_pos)
+ goto bad;
+
+ desc_pos = round_down(desc_size_pos - desc_size, i_blocksize(inode));
+ if (desc_pos < ext4_verity_metadata_pos(inode))
+ goto bad;
+
+ *desc_size_ret = desc_size;
+ *desc_pos_ret = desc_pos;
+ return 0;
+
+bad:
+ EXT4_ERROR_INODE(inode, "verity file corrupted; can't find descriptor");
+ return -EFSCORRUPTED;
+}
+
+static int ext4_get_verity_descriptor(struct inode *inode, void *buf,
+ size_t buf_size)
+{
+ size_t desc_size = 0;
+ u64 desc_pos = 0;
+ int err;
+
+ err = ext4_get_verity_descriptor_location(inode, &desc_size, &desc_pos);
+ if (err)
+ return err;
+
+ if (buf_size) {
+ if (desc_size > buf_size)
+ return -ERANGE;
+ err = pagecache_read(inode, buf, desc_size, desc_pos);
+ if (err)
+ return err;
+ }
+ return desc_size;
+}
+
+static struct page *ext4_read_merkle_tree_page(struct inode *inode,
+ pgoff_t index)
+{
+ index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT;
+
+ return read_mapping_page(inode->i_mapping, index, NULL);
+}
+
+static int ext4_write_merkle_tree_block(struct inode *inode, const void *buf,
+ u64 index, int log_blocksize)
+{
+ loff_t pos = ext4_verity_metadata_pos(inode) + (index << log_blocksize);
+
+ return pagecache_write(inode, buf, 1 << log_blocksize, pos);
+}
+
+const struct fsverity_operations ext4_verityops = {
+ .begin_enable_verity = ext4_begin_enable_verity,
+ .end_enable_verity = ext4_end_enable_verity,
+ .get_verity_descriptor = ext4_get_verity_descriptor,
+ .read_merkle_tree_page = ext4_read_merkle_tree_page,
+ .write_merkle_tree_block = ext4_write_merkle_tree_block,
+};
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 491f9ee4040e..8966a5439a22 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -967,55 +967,6 @@ int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode,
return credits;
}
-static int ext4_xattr_ensure_credits(handle_t *handle, struct inode *inode,
- int credits, struct buffer_head *bh,
- bool dirty, bool block_csum)
-{
- int error;
-
- if (!ext4_handle_valid(handle))
- return 0;
-
- if (handle->h_buffer_credits >= credits)
- return 0;
-
- error = ext4_journal_extend(handle, credits - handle->h_buffer_credits);
- if (!error)
- return 0;
- if (error < 0) {
- ext4_warning(inode->i_sb, "Extend journal (error %d)", error);
- return error;
- }
-
- if (bh && dirty) {
- if (block_csum)
- ext4_xattr_block_csum_set(inode, bh);
- error = ext4_handle_dirty_metadata(handle, NULL, bh);
- if (error) {
- ext4_warning(inode->i_sb, "Handle metadata (error %d)",
- error);
- return error;
- }
- }
-
- error = ext4_journal_restart(handle, credits);
- if (error) {
- ext4_warning(inode->i_sb, "Restart journal (error %d)", error);
- return error;
- }
-
- if (bh) {
- error = ext4_journal_get_write_access(handle, bh);
- if (error) {
- ext4_warning(inode->i_sb,
- "Get write access failed (error %d)",
- error);
- return error;
- }
- }
- return 0;
-}
-
static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
int ref_change)
{
@@ -1149,6 +1100,24 @@ cleanup:
return saved_err;
}
+static int ext4_xattr_restart_fn(handle_t *handle, struct inode *inode,
+ struct buffer_head *bh, bool block_csum, bool dirty)
+{
+ int error;
+
+ if (bh && dirty) {
+ if (block_csum)
+ ext4_xattr_block_csum_set(inode, bh);
+ error = ext4_handle_dirty_metadata(handle, NULL, bh);
+ if (error) {
+ ext4_warning(inode->i_sb, "Handle metadata (error %d)",
+ error);
+ return error;
+ }
+ }
+ return 0;
+}
+
static void
ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
struct buffer_head *bh,
@@ -1185,13 +1154,24 @@ ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
continue;
}
- err = ext4_xattr_ensure_credits(handle, parent, credits, bh,
- dirty, block_csum);
- if (err) {
+ err = ext4_journal_ensure_credits_fn(handle, credits, credits,
+ ext4_free_metadata_revoke_credits(parent->i_sb, 1),
+ ext4_xattr_restart_fn(handle, parent, bh, block_csum,
+ dirty));
+ if (err < 0) {
ext4_warning_inode(ea_inode, "Ensure credits err=%d",
err);
continue;
}
+ if (err > 0) {
+ err = ext4_journal_get_write_access(handle, bh);
+ if (err) {
+ ext4_warning_inode(ea_inode,
+ "Re-get write access err=%d",
+ err);
+ continue;
+ }
+ }
err = ext4_xattr_inode_dec_ref(handle, ea_inode);
if (err) {
@@ -2335,7 +2315,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
flags & XATTR_CREATE);
brelse(bh);
- if (!ext4_handle_has_enough_credits(handle, credits)) {
+ if (jbd2_handle_buffer_credits(handle) < credits) {
error = -ENOSPC;
goto cleanup;
}
@@ -2862,11 +2842,9 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
struct inode *ea_inode;
int error;
- error = ext4_xattr_ensure_credits(handle, inode, extra_credits,
- NULL /* bh */,
- false /* dirty */,
- false /* block_csum */);
- if (error) {
+ error = ext4_journal_ensure_credits(handle, extra_credits,
+ ext4_free_metadata_revoke_credits(inode->i_sb, 1));
+ if (error < 0) {
EXT4_ERROR_INODE(inode, "ensure credits (error %d)", error);
goto cleanup;
}
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index 110a38ca5d53..652fd2e2b23d 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -2,6 +2,7 @@
config F2FS_FS
tristate "F2FS filesystem support"
depends on BLOCK
+ select NLS
select CRYPTO
select CRYPTO_CRC32
select F2FS_FS_XATTR if FS_ENCRYPTION
@@ -60,7 +61,9 @@ config F2FS_FS_SECURITY
Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO
Linux. This option enables an extended attribute handler for file
security labels in the f2fs filesystem, so that it requires enabling
- the extended attribute support in advance.
+ the extended attribute support in advance. In particular you need this
+ option if you use the setcap command to assign initial process capabi-
+ lities to executables (the security.* extended attributes).
If you are not using a security module, say N.
diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile
index 776c4b936504..2aaecc63834f 100644
--- a/fs/f2fs/Makefile
+++ b/fs/f2fs/Makefile
@@ -8,3 +8,4 @@ f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o
+f2fs-$(CONFIG_FS_VERITY) += verity.o
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index a0eef95b9e0e..ffdaba0c55d2 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -581,7 +581,7 @@ int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
if (time_to_inject(sbi, FAULT_ORPHAN)) {
spin_unlock(&im->ino_lock);
- f2fs_show_injection_info(FAULT_ORPHAN);
+ f2fs_show_injection_info(sbi, FAULT_ORPHAN);
return -ENOSPC;
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index abbf14e9bd72..a034cd0ce021 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -29,6 +29,7 @@
#define NUM_PREALLOC_POST_READ_CTXS 128
static struct kmem_cache *bio_post_read_ctx_cache;
+static struct kmem_cache *bio_entry_slab;
static mempool_t *bio_post_read_ctx_pool;
static bool __is_cp_guaranteed(struct page *page)
@@ -74,6 +75,7 @@ static enum count_type __read_io_type(struct page *page)
enum bio_post_read_step {
STEP_INITIAL = 0,
STEP_DECRYPT,
+ STEP_VERITY,
};
struct bio_post_read_ctx {
@@ -120,8 +122,23 @@ static void decrypt_work(struct work_struct *work)
bio_post_read_processing(ctx);
}
+static void verity_work(struct work_struct *work)
+{
+ struct bio_post_read_ctx *ctx =
+ container_of(work, struct bio_post_read_ctx, work);
+
+ fsverity_verify_bio(ctx->bio);
+
+ bio_post_read_processing(ctx);
+}
+
static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
{
+ /*
+ * We use different work queues for decryption and for verity because
+ * verity may require reading metadata pages that need decryption, and
+ * we shouldn't recurse to the same workqueue.
+ */
switch (++ctx->cur_step) {
case STEP_DECRYPT:
if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
@@ -131,6 +148,14 @@ static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
}
ctx->cur_step++;
/* fall-through */
+ case STEP_VERITY:
+ if (ctx->enabled_steps & (1 << STEP_VERITY)) {
+ INIT_WORK(&ctx->work, verity_work);
+ fsverity_enqueue_verify_work(&ctx->work);
+ return;
+ }
+ ctx->cur_step++;
+ /* fall-through */
default:
__read_end_io(ctx->bio);
}
@@ -143,9 +168,10 @@ static bool f2fs_bio_post_read_required(struct bio *bio)
static void f2fs_read_end_io(struct bio *bio)
{
- if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)),
- FAULT_READ_IO)) {
- f2fs_show_injection_info(FAULT_READ_IO);
+ struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
+
+ if (time_to_inject(sbi, FAULT_READ_IO)) {
+ f2fs_show_injection_info(sbi, FAULT_READ_IO);
bio->bi_status = BLK_STS_IOERR;
}
@@ -167,7 +193,7 @@ static void f2fs_write_end_io(struct bio *bio)
struct bvec_iter_all iter_all;
if (time_to_inject(sbi, FAULT_WRITE_IO)) {
- f2fs_show_injection_info(FAULT_WRITE_IO);
+ f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
bio->bi_status = BLK_STS_IOERR;
}
@@ -259,26 +285,25 @@ static bool __same_bdev(struct f2fs_sb_info *sbi,
/*
* Low-level block read/write IO operations.
*/
-static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
- struct writeback_control *wbc,
- int npages, bool is_read,
- enum page_type type, enum temp_type temp)
+static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
{
+ struct f2fs_sb_info *sbi = fio->sbi;
struct bio *bio;
bio = f2fs_bio_alloc(sbi, npages, true);
- f2fs_target_device(sbi, blk_addr, bio);
- if (is_read) {
+ f2fs_target_device(sbi, fio->new_blkaddr, bio);
+ if (is_read_io(fio->op)) {
bio->bi_end_io = f2fs_read_end_io;
bio->bi_private = NULL;
} else {
bio->bi_end_io = f2fs_write_end_io;
bio->bi_private = sbi;
- bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, type, temp);
+ bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
+ fio->type, fio->temp);
}
- if (wbc)
- wbc_init_bio(wbc, bio);
+ if (fio->io_wbc)
+ wbc_init_bio(fio->io_wbc, bio);
return bio;
}
@@ -295,6 +320,9 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
if (test_opt(sbi, LFS) && current->plug)
blk_finish_plug(current->plug);
+ if (F2FS_IO_ALIGNED(sbi))
+ goto submit_io;
+
start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
start %= F2FS_IO_SIZE(sbi);
@@ -461,8 +489,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
f2fs_trace_ios(fio, 0);
/* Allocate a new bio */
- bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
- 1, is_read_io(fio->op), fio->type, fio->temp);
+ bio = __bio_alloc(fio, 1);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
@@ -481,6 +508,163 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
return 0;
}
+static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
+ block_t last_blkaddr, block_t cur_blkaddr)
+{
+ if (last_blkaddr + 1 != cur_blkaddr)
+ return false;
+ return __same_bdev(sbi, cur_blkaddr, bio);
+}
+
+static bool io_type_is_mergeable(struct f2fs_bio_info *io,
+ struct f2fs_io_info *fio)
+{
+ if (io->fio.op != fio->op)
+ return false;
+ return io->fio.op_flags == fio->op_flags;
+}
+
+static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
+ struct f2fs_bio_info *io,
+ struct f2fs_io_info *fio,
+ block_t last_blkaddr,
+ block_t cur_blkaddr)
+{
+ if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
+ unsigned int filled_blocks =
+ F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
+ unsigned int io_size = F2FS_IO_SIZE(sbi);
+ unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
+
+ /* IOs in bio is aligned and left space of vectors is not enough */
+ if (!(filled_blocks % io_size) && left_vecs < io_size)
+ return false;
+ }
+ if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
+ return false;
+ return io_type_is_mergeable(io, fio);
+}
+
+static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
+ struct page *page, enum temp_type temp)
+{
+ struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
+ struct bio_entry *be;
+
+ be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
+ be->bio = bio;
+ bio_get(bio);
+
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
+ f2fs_bug_on(sbi, 1);
+
+ down_write(&io->bio_list_lock);
+ list_add_tail(&be->list, &io->bio_list);
+ up_write(&io->bio_list_lock);
+}
+
+static void del_bio_entry(struct bio_entry *be)
+{
+ list_del(&be->list);
+ kmem_cache_free(bio_entry_slab, be);
+}
+
+static int add_ipu_page(struct f2fs_sb_info *sbi, struct bio **bio,
+ struct page *page)
+{
+ enum temp_type temp;
+ bool found = false;
+ int ret = -EAGAIN;
+
+ for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
+ struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
+ struct list_head *head = &io->bio_list;
+ struct bio_entry *be;
+
+ down_write(&io->bio_list_lock);
+ list_for_each_entry(be, head, list) {
+ if (be->bio != *bio)
+ continue;
+
+ found = true;
+
+ if (bio_add_page(*bio, page, PAGE_SIZE, 0) == PAGE_SIZE) {
+ ret = 0;
+ break;
+ }
+
+ /* bio is full */
+ del_bio_entry(be);
+ __submit_bio(sbi, *bio, DATA);
+ break;
+ }
+ up_write(&io->bio_list_lock);
+ }
+
+ if (ret) {
+ bio_put(*bio);
+ *bio = NULL;
+ }
+
+ return ret;
+}
+
+void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
+ struct bio **bio, struct page *page)
+{
+ enum temp_type temp;
+ bool found = false;
+ struct bio *target = bio ? *bio : NULL;
+
+ for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
+ struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
+ struct list_head *head = &io->bio_list;
+ struct bio_entry *be;
+
+ if (list_empty(head))
+ continue;
+
+ down_read(&io->bio_list_lock);
+ list_for_each_entry(be, head, list) {
+ if (target)
+ found = (target == be->bio);
+ else
+ found = __has_merged_page(be->bio, NULL,
+ page, 0);
+ if (found)
+ break;
+ }
+ up_read(&io->bio_list_lock);
+
+ if (!found)
+ continue;
+
+ found = false;
+
+ down_write(&io->bio_list_lock);
+ list_for_each_entry(be, head, list) {
+ if (target)
+ found = (target == be->bio);
+ else
+ found = __has_merged_page(be->bio, NULL,
+ page, 0);
+ if (found) {
+ target = be->bio;
+ del_bio_entry(be);
+ break;
+ }
+ }
+ up_write(&io->bio_list_lock);
+ }
+
+ if (found)
+ __submit_bio(sbi, target, DATA);
+ if (bio && *bio) {
+ bio_put(*bio);
+ *bio = NULL;
+ }
+}
+
int f2fs_merge_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio = *fio->bio;
@@ -494,22 +678,18 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
- if (bio && (*fio->last_block + 1 != fio->new_blkaddr ||
- !__same_bdev(fio->sbi, fio->new_blkaddr, bio))) {
- __submit_bio(fio->sbi, bio, fio->type);
- bio = NULL;
- }
+ if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
+ fio->new_blkaddr))
+ f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
alloc_new:
if (!bio) {
- bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
- BIO_MAX_PAGES, false, fio->type, fio->temp);
+ bio = __bio_alloc(fio, BIO_MAX_PAGES);
bio_set_op_attrs(bio, fio->op, fio->op_flags);
- }
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
- __submit_bio(fio->sbi, bio, fio->type);
- bio = NULL;
- goto alloc_new;
+ add_bio_entry(fio->sbi, bio, page, fio->temp);
+ } else {
+ if (add_ipu_page(fio->sbi, &bio, page))
+ goto alloc_new;
}
if (fio->io_wbc)
@@ -523,19 +703,6 @@ alloc_new:
return 0;
}
-static void f2fs_submit_ipu_bio(struct f2fs_sb_info *sbi, struct bio **bio,
- struct page *page)
-{
- if (!bio)
- return;
-
- if (!__has_merged_page(*bio, NULL, page, 0))
- return;
-
- __submit_bio(sbi, *bio, DATA);
- *bio = NULL;
-}
-
void f2fs_submit_page_write(struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = fio->sbi;
@@ -568,21 +735,19 @@ next:
inc_page_count(sbi, WB_DATA_TYPE(bio_page));
- if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
- (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
- !__same_bdev(sbi, fio->new_blkaddr, io->bio)))
+ if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio,
+ io->last_block_in_bio, fio->new_blkaddr))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
- if ((fio->type == DATA || fio->type == NODE) &&
+ if (F2FS_IO_ALIGNED(sbi) &&
+ (fio->type == DATA || fio->type == NODE) &&
fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
dec_page_count(sbi, WB_DATA_TYPE(bio_page));
fio->retry = true;
goto skip;
}
- io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
- BIO_MAX_PAGES, false,
- fio->type, fio->temp);
+ io->bio = __bio_alloc(fio, BIO_MAX_PAGES);
io->fio = *fio;
}
@@ -603,13 +768,20 @@ skip:
goto next;
out:
if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
- f2fs_is_checkpoint_ready(sbi))
+ !f2fs_is_checkpoint_ready(sbi))
__submit_merged_bio(io);
up_write(&io->io_rwsem);
}
+static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
+{
+ return fsverity_active(inode) &&
+ idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
+}
+
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
- unsigned nr_pages, unsigned op_flag)
+ unsigned nr_pages, unsigned op_flag,
+ pgoff_t first_idx)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct bio *bio;
@@ -625,6 +797,10 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
if (f2fs_encrypted_file(inode))
post_read_steps |= 1 << STEP_DECRYPT;
+
+ if (f2fs_need_verity(inode, first_idx))
+ post_read_steps |= 1 << STEP_VERITY;
+
if (post_read_steps) {
ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
if (!ctx) {
@@ -646,7 +822,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct bio *bio;
- bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
+ bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, page->index);
if (IS_ERR(bio))
return PTR_ERR(bio);
@@ -987,7 +1163,7 @@ alloc:
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
invalidate_mapping_pages(META_MAPPING(sbi),
old_blkaddr, old_blkaddr);
- f2fs_set_data_blkaddr(dn);
+ f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
/*
* i_size will be updated by direct_IO. Otherwise, we'll get stale
@@ -1164,10 +1340,10 @@ next_block:
if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
map->m_may_create) {
err = __allocate_data_block(&dn, map->m_seg_type);
- if (!err) {
- blkaddr = dn.data_blkaddr;
- set_inode_flag(inode, FI_APPEND_WRITE);
- }
+ if (err)
+ goto sync_out;
+ blkaddr = dn.data_blkaddr;
+ set_inode_flag(inode, FI_APPEND_WRITE);
}
} else {
if (create) {
@@ -1372,7 +1548,7 @@ static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
return __get_data_block(inode, iblock, bh_result, create,
F2FS_GET_BLOCK_DIO, NULL,
f2fs_rw_hint_to_seg_type(inode->i_write_hint),
- true);
+ IS_SWAPFILE(inode) ? false : true);
}
static int get_data_block_dio(struct inode *inode, sector_t iblock,
@@ -1503,7 +1679,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
goto out;
}
- if (f2fs_has_inline_data(inode)) {
+ if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
if (ret != -EAGAIN)
goto out;
@@ -1569,6 +1745,15 @@ out:
return ret;
}
+static inline loff_t f2fs_readpage_limit(struct inode *inode)
+{
+ if (IS_ENABLED(CONFIG_FS_VERITY) &&
+ (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
+ return inode->i_sb->s_maxbytes;
+
+ return i_size_read(inode);
+}
+
static int f2fs_read_single_page(struct inode *inode, struct page *page,
unsigned nr_pages,
struct f2fs_map_blocks *map,
@@ -1587,7 +1772,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
block_in_file = (sector_t)page_index(page);
last_block = block_in_file + nr_pages;
- last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
+ last_block_in_file = (f2fs_readpage_limit(inode) + blocksize - 1) >>
blkbits;
if (last_block > last_block_in_file)
last_block = last_block_in_file;
@@ -1632,6 +1817,11 @@ got_it:
} else {
zero_out:
zero_user_segment(page, 0, PAGE_SIZE);
+ if (f2fs_need_verity(inode, page->index) &&
+ !fsverity_verify_page(page)) {
+ ret = -EIO;
+ goto out;
+ }
if (!PageUptodate(page))
SetPageUptodate(page);
unlock_page(page);
@@ -1642,15 +1832,15 @@ zero_out:
* This page will go to BIO. Do we need to send this
* BIO off first?
*/
- if (bio && (*last_block_in_bio != block_nr - 1 ||
- !__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
+ if (bio && !page_is_mergeable(F2FS_I_SB(inode), bio,
+ *last_block_in_bio, block_nr)) {
submit_and_realloc:
__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
if (bio == NULL) {
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
- is_readahead ? REQ_RAHEAD : 0);
+ is_readahead ? REQ_RAHEAD : 0, page->index);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
bio = NULL;
@@ -2014,7 +2204,7 @@ static int __write_data_page(struct page *page, bool *submitted,
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long) i_size)
>> PAGE_SHIFT;
- loff_t psize = (page->index + 1) << PAGE_SHIFT;
+ loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
int err = 0;
@@ -2052,7 +2242,7 @@ static int __write_data_page(struct page *page, bool *submitted,
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (page->index < end_index)
+ if (page->index < end_index || f2fs_verity_in_progress(inode))
goto write;
/*
@@ -2131,14 +2321,12 @@ out:
unlock_page(page);
if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
- !F2FS_I(inode)->cp_task) {
- f2fs_submit_ipu_bio(sbi, bio, page);
+ !F2FS_I(inode)->cp_task)
f2fs_balance_fs(sbi, need_balance_fs);
- }
if (unlikely(f2fs_cp_error(sbi))) {
- f2fs_submit_ipu_bio(sbi, bio, page);
f2fs_submit_merged_write(sbi, DATA);
+ f2fs_submit_merged_ipu_write(sbi, bio, NULL);
submitted = NULL;
}
@@ -2258,13 +2446,11 @@ continue_unlock:
}
if (PageWriteback(page)) {
- if (wbc->sync_mode != WB_SYNC_NONE) {
+ if (wbc->sync_mode != WB_SYNC_NONE)
f2fs_wait_on_page_writeback(page,
DATA, true, true);
- f2fs_submit_ipu_bio(sbi, &bio, page);
- } else {
+ else
goto continue_unlock;
- }
}
if (!clear_page_dirty_for_io(page))
@@ -2322,7 +2508,7 @@ continue_unlock:
NULL, 0, DATA);
/* submit cached bio of IPU write */
if (bio)
- __submit_bio(sbi, bio, DATA);
+ f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
return ret;
}
@@ -2427,7 +2613,8 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
struct inode *inode = mapping->host;
loff_t i_size = i_size_read(inode);
- if (to > i_size) {
+ /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
+ if (to > i_size && !f2fs_verity_in_progress(inode)) {
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
down_write(&F2FS_I(inode)->i_mmap_sem);
@@ -2458,7 +2645,8 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
* the block addresses when there is no need to fill the page.
*/
if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
- !is_inode_flag_set(inode, FI_NO_PREALLOC))
+ !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
+ !f2fs_verity_in_progress(inode))
return 0;
/* f2fs_lock_op avoids race between write CP and convert_inline_page */
@@ -2539,9 +2727,10 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
trace_f2fs_write_begin(inode, pos, len, flags);
- err = f2fs_is_checkpoint_ready(sbi);
- if (err)
+ if (!f2fs_is_checkpoint_ready(sbi)) {
+ err = -ENOSPC;
goto fail;
+ }
if ((f2fs_is_atomic_file(inode) &&
!f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
@@ -2597,7 +2786,8 @@ repeat:
if (len == PAGE_SIZE || PageUptodate(page))
return 0;
- if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
+ if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
+ !f2fs_verity_in_progress(inode)) {
zero_user_segment(page, len, PAGE_SIZE);
return 0;
}
@@ -2660,7 +2850,8 @@ static int f2fs_write_end(struct file *file,
set_page_dirty(page);
- if (pos + copied > i_size_read(inode))
+ if (pos + copied > i_size_read(inode) &&
+ !f2fs_verity_in_progress(inode))
f2fs_i_size_write(inode, pos + copied);
unlock_out:
f2fs_put_page(page, 1);
@@ -3104,7 +3295,9 @@ void f2fs_clear_page_cache_dirty_tag(struct page *page)
int __init f2fs_init_post_read_processing(void)
{
- bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, 0);
+ bio_post_read_ctx_cache =
+ kmem_cache_create("f2fs_bio_post_read_ctx",
+ sizeof(struct bio_post_read_ctx), 0, 0, NULL);
if (!bio_post_read_ctx_cache)
goto fail;
bio_post_read_ctx_pool =
@@ -3120,8 +3313,22 @@ fail:
return -ENOMEM;
}
-void __exit f2fs_destroy_post_read_processing(void)
+void f2fs_destroy_post_read_processing(void)
{
mempool_destroy(bio_post_read_ctx_pool);
kmem_cache_destroy(bio_post_read_ctx_cache);
}
+
+int __init f2fs_init_bio_entry_cache(void)
+{
+ bio_entry_slab = f2fs_kmem_cache_create("bio_entry_slab",
+ sizeof(struct bio_entry));
+ if (!bio_entry_slab)
+ return -ENOMEM;
+ return 0;
+}
+
+void __exit f2fs_destroy_bio_entry_cache(void)
+{
+ kmem_cache_destroy(bio_entry_slab);
+}
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 7706049d23bf..9b0bedd82581 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -67,7 +67,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->nr_rd_data = get_pages(sbi, F2FS_RD_DATA);
si->nr_rd_node = get_pages(sbi, F2FS_RD_NODE);
si->nr_rd_meta = get_pages(sbi, F2FS_RD_META);
- if (SM_I(sbi) && SM_I(sbi)->fcc_info) {
+ if (SM_I(sbi)->fcc_info) {
si->nr_flushed =
atomic_read(&SM_I(sbi)->fcc_info->issued_flush);
si->nr_flushing =
@@ -75,7 +75,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->flush_list_empty =
llist_empty(&SM_I(sbi)->fcc_info->issue_list);
}
- if (SM_I(sbi) && SM_I(sbi)->dcc_info) {
+ if (SM_I(sbi)->dcc_info) {
si->nr_discarded =
atomic_read(&SM_I(sbi)->dcc_info->issued_discard);
si->nr_discarding =
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 85a1528f319f..c967cacf979e 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -8,6 +8,7 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/sched/signal.h>
+#include <linux/unicode.h>
#include "f2fs.h"
#include "node.h"
#include "acl.h"
@@ -81,7 +82,8 @@ static unsigned long dir_block_index(unsigned int level,
return bidx;
}
-static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
+static struct f2fs_dir_entry *find_in_block(struct inode *dir,
+ struct page *dentry_page,
struct fscrypt_name *fname,
f2fs_hash_t namehash,
int *max_slots,
@@ -93,7 +95,7 @@ static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page);
- make_dentry_ptr_block(NULL, &d, dentry_blk);
+ make_dentry_ptr_block(dir, &d, dentry_blk);
de = f2fs_find_target_dentry(fname, namehash, max_slots, &d);
if (de)
*res_page = dentry_page;
@@ -101,14 +103,116 @@ static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
return de;
}
+#ifdef CONFIG_UNICODE
+/*
+ * Test whether a case-insensitive directory entry matches the filename
+ * being searched for.
+ *
+ * Returns: 0 if the directory entry matches, more than 0 if it
+ * doesn't match or less than zero on error.
+ */
+int f2fs_ci_compare(const struct inode *parent, const struct qstr *name,
+ const struct qstr *entry, bool quick)
+{
+ const struct f2fs_sb_info *sbi = F2FS_SB(parent->i_sb);
+ const struct unicode_map *um = sbi->s_encoding;
+ int ret;
+
+ if (quick)
+ ret = utf8_strncasecmp_folded(um, name, entry);
+ else
+ ret = utf8_strncasecmp(um, name, entry);
+
+ if (ret < 0) {
+ /* Handle invalid character sequence as either an error
+ * or as an opaque byte sequence.
+ */
+ if (f2fs_has_strict_mode(sbi))
+ return -EINVAL;
+
+ if (name->len != entry->len)
+ return 1;
+
+ return !!memcmp(name->name, entry->name, name->len);
+ }
+
+ return ret;
+}
+
+static void f2fs_fname_setup_ci_filename(struct inode *dir,
+ const struct qstr *iname,
+ struct fscrypt_str *cf_name)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+
+ if (!IS_CASEFOLDED(dir)) {
+ cf_name->name = NULL;
+ return;
+ }
+
+ cf_name->name = f2fs_kmalloc(sbi, F2FS_NAME_LEN, GFP_NOFS);
+ if (!cf_name->name)
+ return;
+
+ cf_name->len = utf8_casefold(sbi->s_encoding,
+ iname, cf_name->name,
+ F2FS_NAME_LEN);
+ if ((int)cf_name->len <= 0) {
+ kvfree(cf_name->name);
+ cf_name->name = NULL;
+ }
+}
+#endif
+
+static inline bool f2fs_match_name(struct f2fs_dentry_ptr *d,
+ struct f2fs_dir_entry *de,
+ struct fscrypt_name *fname,
+ struct fscrypt_str *cf_str,
+ unsigned long bit_pos,
+ f2fs_hash_t namehash)
+{
+#ifdef CONFIG_UNICODE
+ struct inode *parent = d->inode;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(parent);
+ struct qstr entry;
+#endif
+
+ if (de->hash_code != namehash)
+ return false;
+
+#ifdef CONFIG_UNICODE
+ entry.name = d->filename[bit_pos];
+ entry.len = de->name_len;
+
+ if (sbi->s_encoding && IS_CASEFOLDED(parent)) {
+ if (cf_str->name) {
+ struct qstr cf = {.name = cf_str->name,
+ .len = cf_str->len};
+ return !f2fs_ci_compare(parent, &cf, &entry, true);
+ }
+ return !f2fs_ci_compare(parent, fname->usr_fname, &entry,
+ false);
+ }
+#endif
+ if (fscrypt_match_name(fname, d->filename[bit_pos],
+ le16_to_cpu(de->name_len)))
+ return true;
+ return false;
+}
+
struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname,
f2fs_hash_t namehash, int *max_slots,
struct f2fs_dentry_ptr *d)
{
struct f2fs_dir_entry *de;
+ struct fscrypt_str cf_str = { .name = NULL, .len = 0 };
unsigned long bit_pos = 0;
int max_len = 0;
+#ifdef CONFIG_UNICODE
+ f2fs_fname_setup_ci_filename(d->inode, fname->usr_fname, &cf_str);
+#endif
+
if (max_slots)
*max_slots = 0;
while (bit_pos < d->max) {
@@ -125,9 +229,7 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname,
continue;
}
- if (de->hash_code == namehash &&
- fscrypt_match_name(fname, d->filename[bit_pos],
- le16_to_cpu(de->name_len)))
+ if (f2fs_match_name(d, de, fname, &cf_str, bit_pos, namehash))
goto found;
if (max_slots && max_len > *max_slots)
@@ -141,6 +243,10 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname,
found:
if (max_slots && max_len > *max_slots)
*max_slots = max_len;
+
+#ifdef CONFIG_UNICODE
+ kvfree(cf_str.name);
+#endif
return de;
}
@@ -157,7 +263,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
struct f2fs_dir_entry *de = NULL;
bool room = false;
int max_slots;
- f2fs_hash_t namehash = f2fs_dentry_hash(&name, fname);
+ f2fs_hash_t namehash = f2fs_dentry_hash(dir, &name, fname);
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
@@ -179,8 +285,8 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
}
}
- de = find_in_block(dentry_page, fname, namehash, &max_slots,
- res_page);
+ de = find_in_block(dir, dentry_page, fname, namehash,
+ &max_slots, res_page);
if (de)
break;
@@ -250,6 +356,14 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
struct fscrypt_name fname;
int err;
+#ifdef CONFIG_UNICODE
+ if (f2fs_has_strict_mode(F2FS_I_SB(dir)) && IS_CASEFOLDED(dir) &&
+ utf8_validate(F2FS_I_SB(dir)->s_encoding, child)) {
+ *res_page = ERR_PTR(-EINVAL);
+ return NULL;
+ }
+#endif
+
err = fscrypt_setup_filename(dir, child, 1, &fname);
if (err) {
if (err == -ENOENT)
@@ -504,7 +618,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
level = 0;
slots = GET_DENTRY_SLOTS(new_name->len);
- dentry_hash = f2fs_dentry_hash(new_name, NULL);
+ dentry_hash = f2fs_dentry_hash(dir, new_name, NULL);
current_depth = F2FS_I(dir)->i_current_depth;
if (F2FS_I(dir)->chash == dentry_hash) {
@@ -514,7 +628,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
start:
if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH)) {
- f2fs_show_injection_info(FAULT_DIR_DEPTH);
+ f2fs_show_injection_info(F2FS_I_SB(dir), FAULT_DIR_DEPTH);
return -ENOSPC;
}
@@ -568,6 +682,11 @@ add_dentry:
if (inode) {
f2fs_i_pino_write(inode, dir->i_ino);
+
+ /* synchronize inode page's data from inode cache */
+ if (is_inode_flag_set(inode, FI_NEW_INODE))
+ f2fs_update_inode(inode, page);
+
f2fs_put_page(page, 1);
}
@@ -800,8 +919,9 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
bit_pos++;
ctx->pos = start_pos + bit_pos;
printk_ratelimited(
- "%s, invalid namelen(0), ino:%u, run fsck to fix.",
- KERN_WARNING, le32_to_cpu(de->ino));
+ "%sF2FS-fs (%s): invalid namelen(0), ino:%u, run fsck to fix.",
+ KERN_WARNING, sbi->sb->s_id,
+ le32_to_cpu(de->ino));
set_sbi_flag(sbi, SBI_NEED_FSCK);
continue;
}
@@ -943,3 +1063,50 @@ const struct file_operations f2fs_dir_operations = {
.compat_ioctl = f2fs_compat_ioctl,
#endif
};
+
+#ifdef CONFIG_UNICODE
+static int f2fs_d_compare(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name)
+{
+ struct qstr qstr = {.name = str, .len = len };
+
+ if (!IS_CASEFOLDED(dentry->d_parent->d_inode)) {
+ if (len != name->len)
+ return -1;
+ return memcmp(str, name, len);
+ }
+
+ return f2fs_ci_compare(dentry->d_parent->d_inode, name, &qstr, false);
+}
+
+static int f2fs_d_hash(const struct dentry *dentry, struct qstr *str)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
+ const struct unicode_map *um = sbi->s_encoding;
+ unsigned char *norm;
+ int len, ret = 0;
+
+ if (!IS_CASEFOLDED(dentry->d_inode))
+ return 0;
+
+ norm = f2fs_kmalloc(sbi, PATH_MAX, GFP_ATOMIC);
+ if (!norm)
+ return -ENOMEM;
+
+ len = utf8_casefold(um, str, norm, PATH_MAX);
+ if (len < 0) {
+ if (f2fs_has_strict_mode(sbi))
+ ret = -EINVAL;
+ goto out;
+ }
+ str->hash = full_name_hash(dentry, norm, len);
+out:
+ kvfree(norm);
+ return ret;
+}
+
+const struct dentry_operations f2fs_dentry_ops = {
+ .d_hash = f2fs_d_hash,
+ .d_compare = f2fs_d_compare,
+};
+#endif
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 17382da7f0bd..5a888a063c7f 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -25,6 +25,7 @@
#include <crypto/hash.h>
#include <linux/fscrypt.h>
+#include <linux/fsverity.h>
#ifdef CONFIG_F2FS_CHECK_FS
#define f2fs_bug_on(sbi, condition) BUG_ON(condition)
@@ -151,8 +152,9 @@ struct f2fs_mount_info {
#define F2FS_FEATURE_QUOTA_INO 0x0080
#define F2FS_FEATURE_INODE_CRTIME 0x0100
#define F2FS_FEATURE_LOST_FOUND 0x0200
-#define F2FS_FEATURE_VERITY 0x0400 /* reserved */
+#define F2FS_FEATURE_VERITY 0x0400
#define F2FS_FEATURE_SB_CHKSUM 0x0800
+#define F2FS_FEATURE_CASEFOLD 0x1000
#define __F2FS_HAS_FEATURE(raw_super, mask) \
((raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -417,6 +419,9 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
#define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15)
#define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64)
+#define F2FS_IOC_GET_VOLUME_NAME FS_IOC_GETFSLABEL
+#define F2FS_IOC_SET_VOLUME_NAME FS_IOC_SETFSLABEL
+
#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
#define F2FS_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT
@@ -630,7 +635,7 @@ enum {
#define FADVISE_ENC_NAME_BIT 0x08
#define FADVISE_KEEP_SIZE_BIT 0x10
#define FADVISE_HOT_BIT 0x20
-#define FADVISE_VERITY_BIT 0x40 /* reserved */
+#define FADVISE_VERITY_BIT 0x40
#define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
@@ -650,6 +655,8 @@ enum {
#define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT)
#define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT)
#define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT)
+#define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT)
+#define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT)
#define DEF_DIR_LEVEL 0
@@ -883,6 +890,7 @@ enum {
CURSEG_WARM_NODE, /* direct node blocks of normal files */
CURSEG_COLD_NODE, /* indirect node blocks */
NO_CHECK_TYPE,
+ CURSEG_COLD_DATA_PINNED,/* cold data for pinned file */
};
struct flush_cmd {
@@ -1061,6 +1069,11 @@ struct f2fs_io_info {
unsigned char version; /* version of the node */
};
+struct bio_entry {
+ struct bio *bio;
+ struct list_head list;
+};
+
#define is_read_io(rw) ((rw) == READ)
struct f2fs_bio_info {
struct f2fs_sb_info *sbi; /* f2fs superblock */
@@ -1070,6 +1083,8 @@ struct f2fs_bio_info {
struct rw_semaphore io_rwsem; /* blocking op for bio */
spinlock_t io_lock; /* serialize DATA/NODE IOs */
struct list_head io_list; /* track fios */
+ struct list_head bio_list; /* bio entry list head */
+ struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */
};
#define FDEV(i) (sbi->devs[i])
@@ -1169,6 +1184,10 @@ struct f2fs_sb_info {
int valid_super_block; /* valid super block no */
unsigned long s_flag; /* flags for sbi */
struct mutex writepages; /* mutex for writepages() */
+#ifdef CONFIG_UNICODE
+ struct unicode_map *s_encoding;
+ __u16 s_encoding_flags;
+#endif
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int blocks_per_blkz; /* F2FS blocks per zone */
@@ -1278,11 +1297,13 @@ struct f2fs_sb_info {
unsigned int gc_mode; /* current GC state */
unsigned int next_victim_seg[2]; /* next segment in victim section */
/* for skip statistic */
+ unsigned int atomic_files; /* # of opened atomic file */
unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
unsigned long long skipped_gc_rwsem; /* FG_GC only */
/* threshold for gc trials on pinned files */
u64 gc_pin_file_threshold;
+ struct rw_semaphore pin_sem;
/* maximum # of trials to find a victim segment for SSR and GC */
unsigned int max_victim_search;
@@ -1354,9 +1375,10 @@ struct f2fs_private_dio {
};
#ifdef CONFIG_F2FS_FAULT_INJECTION
-#define f2fs_show_injection_info(type) \
- printk_ratelimited("%sF2FS-fs : inject %s in %s of %pS\n", \
- KERN_INFO, f2fs_fault_name[type], \
+#define f2fs_show_injection_info(sbi, type) \
+ printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \
+ KERN_INFO, sbi->sb->s_id, \
+ f2fs_fault_name[type], \
__func__, __builtin_return_address(0))
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
@@ -1376,7 +1398,7 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
return false;
}
#else
-#define f2fs_show_injection_info(type) do { } while (0)
+#define f2fs_show_injection_info(sbi, type) do { } while (0)
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
return false;
@@ -1640,6 +1662,7 @@ static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
{
unsigned long flags;
+ unsigned char *nat_bits;
/*
* In order to re-enable nat_bits we need to call fsck.f2fs by
@@ -1650,10 +1673,12 @@ static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
if (lock)
spin_lock_irqsave(&sbi->cp_lock, flags);
__clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
- kvfree(NM_I(sbi)->nat_bits);
+ nat_bits = NM_I(sbi)->nat_bits;
NM_I(sbi)->nat_bits = NULL;
if (lock)
spin_unlock_irqrestore(&sbi->cp_lock, flags);
+
+ kvfree(nat_bits);
}
static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
@@ -1758,9 +1783,9 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
return ret;
if (time_to_inject(sbi, FAULT_BLOCK)) {
- f2fs_show_injection_info(FAULT_BLOCK);
+ f2fs_show_injection_info(sbi, FAULT_BLOCK);
release = *count;
- goto enospc;
+ goto release_quota;
}
/*
@@ -1805,6 +1830,7 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
enospc:
percpu_counter_sub(&sbi->alloc_valid_block_count, release);
+release_quota:
dquot_release_reservation_block(inode, release);
return -ENOSPC;
}
@@ -2009,7 +2035,7 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
}
if (time_to_inject(sbi, FAULT_BLOCK)) {
- f2fs_show_injection_info(FAULT_BLOCK);
+ f2fs_show_injection_info(sbi, FAULT_BLOCK);
goto enospc;
}
@@ -2124,7 +2150,8 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
return page;
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
- f2fs_show_injection_info(FAULT_PAGE_ALLOC);
+ f2fs_show_injection_info(F2FS_M_SB(mapping),
+ FAULT_PAGE_ALLOC);
return NULL;
}
}
@@ -2139,7 +2166,7 @@ static inline struct page *f2fs_pagecache_get_page(
int fgp_flags, gfp_t gfp_mask)
{
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
- f2fs_show_injection_info(FAULT_PAGE_GET);
+ f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
return NULL;
}
@@ -2208,7 +2235,7 @@ static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi,
return bio;
}
if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
- f2fs_show_injection_info(FAULT_ALLOC_BIO);
+ f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
return NULL;
}
@@ -2359,13 +2386,16 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
#define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */
#define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
#define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
+#define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */
/* Flags that should be inherited by new inodes from their parent. */
#define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
- F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL)
+ F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
+ F2FS_CASEFOLD_FL)
/* Flags that are appropriate for regular files (all but dir-specific ones). */
-#define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL))
+#define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
+ F2FS_CASEFOLD_FL))
/* Flags that are appropriate for non-directories/regular files. */
#define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL)
@@ -2412,6 +2442,7 @@ enum {
FI_PROJ_INHERIT, /* indicate file inherits projectid */
FI_PIN_FILE, /* indicate file should not be gced */
FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
+ FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
};
static inline void __mark_inode_dirty_flag(struct inode *inode,
@@ -2451,6 +2482,12 @@ static inline void clear_inode_flag(struct inode *inode, int flag)
__mark_inode_dirty_flag(inode, flag, false);
}
+static inline bool f2fs_verity_in_progress(struct inode *inode)
+{
+ return IS_ENABLED(CONFIG_FS_VERITY) &&
+ is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
+}
+
static inline void set_acl_inode(struct inode *inode, umode_t mode)
{
F2FS_I(inode)->i_acl_mode = mode;
@@ -2679,6 +2716,20 @@ static inline void clear_file(struct inode *inode, int type)
f2fs_mark_inode_dirty_sync(inode, true);
}
+static inline bool f2fs_is_time_consistent(struct inode *inode)
+{
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
+ return false;
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
+ return false;
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
+ return false;
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
+ &F2FS_I(inode)->i_crtime))
+ return false;
+ return true;
+}
+
static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
{
bool ret;
@@ -2696,14 +2747,7 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
i_size_read(inode) & ~PAGE_MASK)
return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
- return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
- return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
- return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
- &F2FS_I(inode)->i_crtime))
+ if (!f2fs_is_time_consistent(inode))
return false;
down_read(&F2FS_I(inode)->i_sem);
@@ -2758,7 +2802,7 @@ static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
void *ret;
if (time_to_inject(sbi, FAULT_KMALLOC)) {
- f2fs_show_injection_info(FAULT_KMALLOC);
+ f2fs_show_injection_info(sbi, FAULT_KMALLOC);
return NULL;
}
@@ -2779,7 +2823,7 @@ static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{
if (time_to_inject(sbi, FAULT_KVMALLOC)) {
- f2fs_show_injection_info(FAULT_KVMALLOC);
+ f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
return NULL;
}
@@ -2925,6 +2969,11 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
bool hot, bool set);
struct dentry *f2fs_get_parent(struct dentry *child);
+extern int f2fs_ci_compare(const struct inode *parent,
+ const struct qstr *name,
+ const struct qstr *entry,
+ bool quick);
+
/*
* dir.c
*/
@@ -2988,8 +3037,8 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
/*
* hash.c
*/
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
- struct fscrypt_name *fname);
+f2fs_hash_t f2fs_dentry_hash(const struct inode *dir,
+ const struct qstr *name_info, struct fscrypt_name *fname);
/*
* node.c
@@ -3072,7 +3121,7 @@ void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
unsigned int start, unsigned int end);
-void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
+void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type);
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
struct cp_control *cpc);
@@ -3158,10 +3207,14 @@ void f2fs_destroy_checkpoint_caches(void);
*/
int f2fs_init_post_read_processing(void);
void f2fs_destroy_post_read_processing(void);
+int f2fs_init_bio_entry_cache(void);
+void f2fs_destroy_bio_entry_cache(void);
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
struct inode *inode, struct page *page,
nid_t ino, enum page_type type);
+void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
+ struct bio **bio, struct page *page);
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
int f2fs_submit_page_bio(struct f2fs_io_info *fio);
int f2fs_merge_page_bio(struct f2fs_io_info *fio);
@@ -3432,6 +3485,9 @@ static inline void f2fs_destroy_root_stats(void) { }
#endif
extern const struct file_operations f2fs_dir_operations;
+#ifdef CONFIG_UNICODE
+extern const struct dentry_operations f2fs_dentry_ops;
+#endif
extern const struct file_operations f2fs_file_operations;
extern const struct inode_operations f2fs_file_inode_operations;
extern const struct address_space_operations f2fs_dblock_aops;
@@ -3521,6 +3577,9 @@ void f2fs_exit_sysfs(void);
int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
+/* verity.c */
+extern const struct fsverity_operations f2fs_verityops;
+
/*
* crypto support
*/
@@ -3543,7 +3602,7 @@ static inline void f2fs_set_encrypted_inode(struct inode *inode)
*/
static inline bool f2fs_post_read_required(struct inode *inode)
{
- return f2fs_encrypted_file(inode);
+ return f2fs_encrypted_file(inode) || fsverity_active(inode);
}
#define F2FS_FEATURE_FUNCS(name, flagname) \
@@ -3561,7 +3620,9 @@ F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
+F2FS_FEATURE_FUNCS(verity, VERITY);
F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
+F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
#ifdef CONFIG_BLK_DEV_ZONED
static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
@@ -3680,11 +3741,14 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
*/
if (f2fs_sb_has_blkzoned(sbi))
return true;
- if (test_opt(sbi, LFS) && (rw == WRITE) &&
- block_unaligned_IO(inode, iocb, iter))
- return true;
+ if (test_opt(sbi, LFS) && (rw == WRITE)) {
+ if (block_unaligned_IO(inode, iocb, iter))
+ return true;
+ if (F2FS_IO_ALIGNED(sbi))
+ return true;
+ }
if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) &&
- !(inode->i_flags & S_SWAPFILE))
+ !IS_SWAPFILE(inode))
return true;
return false;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index f8d46df8fa9e..85af112e868d 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -20,6 +20,7 @@
#include <linux/uio.h>
#include <linux/uuid.h>
#include <linux/file.h>
+#include <linux/nls.h>
#include "f2fs.h"
#include "node.h"
@@ -57,6 +58,11 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
goto err;
}
+ if (!f2fs_is_checkpoint_ready(sbi)) {
+ err = -ENOSPC;
+ goto err;
+ }
+
sb_start_pagefault(inode->i_sb);
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
@@ -496,6 +502,10 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
if (err)
return err;
+ err = fsverity_file_open(inode, filp);
+ if (err)
+ return err;
+
filp->f_mode |= FMODE_NOWAIT;
return dquot_file_open(inode, filp);
@@ -671,7 +681,7 @@ int f2fs_truncate(struct inode *inode)
trace_f2fs_truncate(inode);
if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
- f2fs_show_injection_info(FAULT_TRUNCATE);
+ f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
return -EIO;
}
@@ -716,11 +726,14 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
stat->attributes |= STATX_ATTR_IMMUTABLE;
if (flags & F2FS_NODUMP_FL)
stat->attributes |= STATX_ATTR_NODUMP;
+ if (IS_VERITY(inode))
+ stat->attributes |= STATX_ATTR_VERITY;
stat->attributes_mask |= (STATX_ATTR_APPEND |
STATX_ATTR_ENCRYPTED |
STATX_ATTR_IMMUTABLE |
- STATX_ATTR_NODUMP);
+ STATX_ATTR_NODUMP |
+ STATX_ATTR_VERITY);
generic_fillattr(inode, stat);
@@ -741,15 +754,18 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
inode->i_uid = attr->ia_uid;
if (ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
- if (ia_valid & ATTR_ATIME)
- inode->i_atime = timespec64_trunc(attr->ia_atime,
- inode->i_sb->s_time_gran);
- if (ia_valid & ATTR_MTIME)
- inode->i_mtime = timespec64_trunc(attr->ia_mtime,
- inode->i_sb->s_time_gran);
- if (ia_valid & ATTR_CTIME)
- inode->i_ctime = timespec64_trunc(attr->ia_ctime,
- inode->i_sb->s_time_gran);
+ if (ia_valid & ATTR_ATIME) {
+ inode->i_atime = timestamp_truncate(attr->ia_atime,
+ inode);
+ }
+ if (ia_valid & ATTR_MTIME) {
+ inode->i_mtime = timestamp_truncate(attr->ia_mtime,
+ inode);
+ }
+ if (ia_valid & ATTR_CTIME) {
+ inode->i_ctime = timestamp_truncate(attr->ia_ctime,
+ inode);
+ }
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
@@ -778,6 +794,10 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
if (err)
return err;
+ err = fsverity_prepare_setattr(dentry, attr);
+ if (err)
+ return err;
+
if (is_quota_modification(inode, attr)) {
err = dquot_initialize(inode);
if (err)
@@ -808,14 +828,24 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
}
if (attr->ia_valid & ATTR_SIZE) {
- bool to_smaller = (attr->ia_size <= i_size_read(inode));
+ loff_t old_size = i_size_read(inode);
+
+ if (attr->ia_size > MAX_INLINE_DATA(inode)) {
+ /*
+ * should convert inline inode before i_size_write to
+ * keep smaller than inline_data size with inline flag.
+ */
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
+ }
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_setsize(inode, attr->ia_size);
- if (to_smaller)
+ if (attr->ia_size <= old_size)
err = f2fs_truncate(inode);
/*
* do not trim all blocks after i_size if target size is
@@ -823,21 +853,11 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
*/
up_write(&F2FS_I(inode)->i_mmap_sem);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
if (err)
return err;
- if (!to_smaller) {
- /* should convert inline inode here */
- if (!f2fs_may_inline_data(inode)) {
- err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
- }
- inode->i_mtime = inode->i_ctime = current_time(inode);
- }
-
down_write(&F2FS_I(inode)->i_sem);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
F2FS_I(inode)->last_disk_size = i_size_read(inode);
up_write(&F2FS_I(inode)->i_sem);
}
@@ -1030,7 +1050,7 @@ next_dnode:
if (test_opt(sbi, LFS)) {
f2fs_put_dnode(&dn);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
/* do not invalidate this block address */
@@ -1122,7 +1142,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
}
dn.ofs_in_node++;
i++;
- new_size = (dst + i) << PAGE_SHIFT;
+ new_size = (loff_t)(dst + i) << PAGE_SHIFT;
if (dst_inode->i_size < new_size)
f2fs_i_size_write(dst_inode, new_size);
} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
@@ -1528,12 +1548,44 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
if (off_end)
map.m_len++;
- if (f2fs_is_pinned_file(inode))
- map.m_seg_type = CURSEG_COLD_DATA;
+ if (!map.m_len)
+ return 0;
+
+ if (f2fs_is_pinned_file(inode)) {
+ block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
+ sbi->log_blocks_per_seg;
+ block_t done = 0;
+
+ if (map.m_len % sbi->blocks_per_seg)
+ len += sbi->blocks_per_seg;
+
+ map.m_len = sbi->blocks_per_seg;
+next_alloc:
+ if (has_not_enough_free_secs(sbi, 0,
+ GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
+ mutex_lock(&sbi->gc_mutex);
+ err = f2fs_gc(sbi, true, false, NULL_SEGNO);
+ if (err && err != -ENODATA && err != -EAGAIN)
+ goto out_err;
+ }
+
+ down_write(&sbi->pin_sem);
+ map.m_seg_type = CURSEG_COLD_DATA_PINNED;
+ f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA);
+ err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
+ up_write(&sbi->pin_sem);
- err = f2fs_map_blocks(inode, &map, 1, (f2fs_is_pinned_file(inode) ?
- F2FS_GET_BLOCK_PRE_DIO :
- F2FS_GET_BLOCK_PRE_AIO));
+ done += map.m_len;
+ len -= map.m_len;
+ map.m_lblk += map.m_len;
+ if (!err && len)
+ goto next_alloc;
+
+ map.m_len = done;
+ } else {
+ err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+ }
+out_err:
if (err) {
pgoff_t last_off;
@@ -1567,6 +1619,8 @@ static long f2fs_fallocate(struct file *file, int mode,
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
return -EIO;
+ if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
+ return -ENOSPC;
/* f2fs only support ->fallocate for regular file */
if (!S_ISREG(inode->i_mode))
@@ -1653,19 +1707,19 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
- u32 oldflags;
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode))
return -EPERM;
- oldflags = fi->i_flags;
-
- if ((iflags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL))
- if (!capable(CAP_LINUX_IMMUTABLE))
- return -EPERM;
+ if ((iflags ^ fi->i_flags) & F2FS_CASEFOLD_FL) {
+ if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
+ return -EOPNOTSUPP;
+ if (!f2fs_empty_dir(inode))
+ return -ENOTEMPTY;
+ }
- fi->i_flags = iflags | (oldflags & ~mask);
+ fi->i_flags = iflags | (fi->i_flags & ~mask);
if (fi->i_flags & F2FS_PROJINHERIT_FL)
set_inode_flag(inode, FI_PROJ_INHERIT);
@@ -1699,6 +1753,7 @@ static const struct {
{ F2FS_INDEX_FL, FS_INDEX_FL },
{ F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
{ F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
+ { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
};
#define F2FS_GETTABLE_FS_FL ( \
@@ -1712,7 +1767,9 @@ static const struct {
FS_PROJINHERIT_FL | \
FS_ENCRYPT_FL | \
FS_INLINE_DATA_FL | \
- FS_NOCOW_FL)
+ FS_NOCOW_FL | \
+ FS_VERITY_FL | \
+ FS_CASEFOLD_FL)
#define F2FS_SETTABLE_FS_FL ( \
FS_SYNC_FL | \
@@ -1721,7 +1778,8 @@ static const struct {
FS_NODUMP_FL | \
FS_NOATIME_FL | \
FS_DIRSYNC_FL | \
- FS_PROJINHERIT_FL)
+ FS_PROJINHERIT_FL | \
+ FS_CASEFOLD_FL)
/* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
@@ -1757,6 +1815,8 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
if (IS_ENCRYPTED(inode))
fsflags |= FS_ENCRYPT_FL;
+ if (IS_VERITY(inode))
+ fsflags |= FS_VERITY_FL;
if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
fsflags |= FS_INLINE_DATA_FL;
if (is_inode_flag_set(inode, FI_PIN_FILE))
@@ -1770,7 +1830,8 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
- u32 fsflags;
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ u32 fsflags, old_fsflags;
u32 iflags;
int ret;
@@ -1794,8 +1855,14 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
inode_lock(inode);
+ old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
+ ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
+ if (ret)
+ goto out;
+
ret = f2fs_setflags_common(inode, iflags,
f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
+out:
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
@@ -1811,6 +1878,8 @@ static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
static int f2fs_ioc_start_atomic_write(struct file *filp)
{
struct inode *inode = file_inode(filp);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int ret;
if (!inode_owner_or_capable(inode))
@@ -1819,6 +1888,9 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
if (!S_ISREG(inode->i_mode))
return -EINVAL;
+ if (filp->f_flags & O_DIRECT)
+ return -EINVAL;
+
ret = mnt_want_write_file(filp);
if (ret)
return ret;
@@ -1850,6 +1922,13 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
goto out;
}
+ spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+ if (list_empty(&fi->inmem_ilist))
+ list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
+ sbi->atomic_files++;
+ spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+
+ /* add inode in inmem_list first and set atomic_file */
set_inode_flag(inode, FI_ATOMIC_FILE);
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -1891,11 +1970,8 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
goto err_out;
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
- if (!ret) {
- clear_inode_flag(inode, FI_ATOMIC_FILE);
- F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
- stat_dec_atomic_write(inode);
- }
+ if (!ret)
+ f2fs_drop_inmem_pages(inode);
} else {
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
}
@@ -2184,6 +2260,49 @@ out_err:
return err;
}
+static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
+ unsigned long arg)
+{
+ if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
+ return -EOPNOTSUPP;
+
+ return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
+}
+
+static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
+{
+ if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
+ return -EOPNOTSUPP;
+
+ return fscrypt_ioctl_add_key(filp, (void __user *)arg);
+}
+
+static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
+{
+ if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
+ return -EOPNOTSUPP;
+
+ return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
+}
+
+static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
+ unsigned long arg)
+{
+ if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
+ return -EOPNOTSUPP;
+
+ return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
+}
+
+static int f2fs_ioc_get_encryption_key_status(struct file *filp,
+ unsigned long arg)
+{
+ if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
+ return -EOPNOTSUPP;
+
+ return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
+}
+
static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -2238,9 +2357,9 @@ static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
return -EROFS;
end = range.start + range.len;
- if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) {
+ if (end < range.start || range.start < MAIN_BLKADDR(sbi) ||
+ end >= MAX_BLKADDR(sbi))
return -EINVAL;
- }
ret = mnt_want_write_file(filp);
if (ret)
@@ -2364,8 +2483,10 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
map.m_lblk += map.m_len;
}
- if (!fragmented)
+ if (!fragmented) {
+ total = 0;
goto out;
+ }
sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
@@ -2395,7 +2516,7 @@ do_map:
if (!(map.m_flags & F2FS_MAP_FLAGS)) {
map.m_lblk = next_pgofs;
- continue;
+ goto check;
}
set_inode_flag(inode, FI_DO_DEFRAG);
@@ -2419,8 +2540,8 @@ do_map:
}
map.m_lblk = idx;
-
- if (idx < pg_end && cnt < blk_per_seg)
+check:
+ if (map.m_lblk < pg_end && cnt < blk_per_seg)
goto do_map;
clear_inode_flag(inode, FI_DO_DEFRAG);
@@ -2855,52 +2976,32 @@ static inline u32 f2fs_xflags_to_iflags(u32 xflags)
return iflags;
}
-static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
+static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
{
- struct inode *inode = file_inode(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
- struct fsxattr fa;
- memset(&fa, 0, sizeof(struct fsxattr));
- fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags);
+ simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
- fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
- fi->i_projid);
-
- if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
- return -EFAULT;
- return 0;
+ fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
}
-static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
+static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
{
- /*
- * Project Quota ID state is only allowed to change from within the init
- * namespace. Enforce that restriction only if we are trying to change
- * the quota ID state. Everything else is allowed in user namespaces.
- */
- if (current_user_ns() == &init_user_ns)
- return 0;
-
- if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid)
- return -EINVAL;
+ struct inode *inode = file_inode(filp);
+ struct fsxattr fa;
- if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) {
- if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
- return -EINVAL;
- } else {
- if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
- return -EINVAL;
- }
+ f2fs_fill_fsxattr(inode, &fa);
+ if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
+ return -EFAULT;
return 0;
}
static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
- struct fsxattr fa;
+ struct fsxattr fa, old_fa;
u32 iflags;
int err;
@@ -2923,9 +3024,12 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
return err;
inode_lock(inode);
- err = f2fs_ioctl_check_project(inode, &fa);
+
+ f2fs_fill_fsxattr(inode, &old_fa);
+ err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
if (err)
goto out;
+
err = f2fs_setflags_common(inode, iflags,
f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
if (err)
@@ -3077,10 +3181,98 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
return ret;
}
+static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+
+ if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
+ f2fs_warn(F2FS_I_SB(inode),
+ "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
+ inode->i_ino);
+ return -EOPNOTSUPP;
+ }
+
+ return fsverity_ioctl_enable(filp, (const void __user *)arg);
+}
+
+static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
+{
+ if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
+ return -EOPNOTSUPP;
+
+ return fsverity_ioctl_measure(filp, (void __user *)arg);
+}
+
+static int f2fs_get_volume_name(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ char *vbuf;
+ int count;
+ int err = 0;
+
+ vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
+ if (!vbuf)
+ return -ENOMEM;
+
+ down_read(&sbi->sb_lock);
+ count = utf16s_to_utf8s(sbi->raw_super->volume_name,
+ ARRAY_SIZE(sbi->raw_super->volume_name),
+ UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
+ up_read(&sbi->sb_lock);
+
+ if (copy_to_user((char __user *)arg, vbuf,
+ min(FSLABEL_MAX, count)))
+ err = -EFAULT;
+
+ kvfree(vbuf);
+ return err;
+}
+
+static int f2fs_set_volume_name(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ char *vbuf;
+ int err = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
+ if (IS_ERR(vbuf))
+ return PTR_ERR(vbuf);
+
+ err = mnt_want_write_file(filp);
+ if (err)
+ goto out;
+
+ down_write(&sbi->sb_lock);
+
+ memset(sbi->raw_super->volume_name, 0,
+ sizeof(sbi->raw_super->volume_name));
+ utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
+ sbi->raw_super->volume_name,
+ ARRAY_SIZE(sbi->raw_super->volume_name));
+
+ err = f2fs_commit_super(sbi, false);
+
+ up_write(&sbi->sb_lock);
+
+ mnt_drop_write_file(filp);
+out:
+ kfree(vbuf);
+ return err;
+}
+
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
return -EIO;
+ if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
+ return -ENOSPC;
switch (cmd) {
case F2FS_IOC_GETFLAGS:
@@ -3109,6 +3301,16 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_get_encryption_policy(filp, arg);
case F2FS_IOC_GET_ENCRYPTION_PWSALT:
return f2fs_ioc_get_encryption_pwsalt(filp, arg);
+ case FS_IOC_GET_ENCRYPTION_POLICY_EX:
+ return f2fs_ioc_get_encryption_policy_ex(filp, arg);
+ case FS_IOC_ADD_ENCRYPTION_KEY:
+ return f2fs_ioc_add_encryption_key(filp, arg);
+ case FS_IOC_REMOVE_ENCRYPTION_KEY:
+ return f2fs_ioc_remove_encryption_key(filp, arg);
+ case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
+ return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
+ case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
+ return f2fs_ioc_get_encryption_key_status(filp, arg);
case F2FS_IOC_GARBAGE_COLLECT:
return f2fs_ioc_gc(filp, arg);
case F2FS_IOC_GARBAGE_COLLECT_RANGE:
@@ -3135,6 +3337,14 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_precache_extents(filp, arg);
case F2FS_IOC_RESIZE_FS:
return f2fs_ioc_resize_fs(filp, arg);
+ case FS_IOC_ENABLE_VERITY:
+ return f2fs_ioc_enable_verity(filp, arg);
+ case FS_IOC_MEASURE_VERITY:
+ return f2fs_ioc_measure_verity(filp, arg);
+ case F2FS_IOC_GET_VOLUME_NAME:
+ return f2fs_get_volume_name(filp, arg);
+ case F2FS_IOC_SET_VOLUME_NAME:
+ return f2fs_set_volume_name(filp, arg);
default:
return -ENOTTY;
}
@@ -3151,16 +3361,12 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
- if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!inode_trylock(inode)) {
- if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock(inode)) {
ret = -EAGAIN;
goto out;
}
+ } else {
inode_lock(inode);
}
@@ -3233,9 +3439,15 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC_RELEASE_VOLATILE_WRITE:
case F2FS_IOC_ABORT_VOLATILE_WRITE:
case F2FS_IOC_SHUTDOWN:
+ case FITRIM:
case F2FS_IOC_SET_ENCRYPTION_POLICY:
case F2FS_IOC_GET_ENCRYPTION_PWSALT:
case F2FS_IOC_GET_ENCRYPTION_POLICY:
+ case FS_IOC_GET_ENCRYPTION_POLICY_EX:
+ case FS_IOC_ADD_ENCRYPTION_KEY:
+ case FS_IOC_REMOVE_ENCRYPTION_KEY:
+ case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
+ case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
case F2FS_IOC_GARBAGE_COLLECT:
case F2FS_IOC_GARBAGE_COLLECT_RANGE:
case F2FS_IOC_WRITE_CHECKPOINT:
@@ -3249,6 +3461,10 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC_SET_PIN_FILE:
case F2FS_IOC_PRECACHE_EXTENTS:
case F2FS_IOC_RESIZE_FS:
+ case FS_IOC_ENABLE_VERITY:
+ case FS_IOC_MEASURE_VERITY:
+ case F2FS_IOC_GET_VOLUME_NAME:
+ case F2FS_IOC_SET_VOLUME_NAME:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 6691f526fa40..b3d399623290 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -54,7 +54,7 @@ static int gc_thread_func(void *data)
}
if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
- f2fs_show_injection_info(FAULT_CHECKPOINT);
+ f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
f2fs_stop_checkpoint(sbi, false);
}
@@ -382,6 +382,16 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
nsearched++;
}
+#ifdef CONFIG_F2FS_CHECK_FS
+ /*
+ * skip selecting the invalid segno (that is failed due to block
+ * validity check failure during GC) to avoid endless GC loop in
+ * such cases.
+ */
+ if (test_bit(segno, sm->invalid_segmap))
+ goto next;
+#endif
+
secno = GET_SEC_FROM_SEG(sbi, segno);
if (sec_usage_check(sbi, secno))
@@ -627,8 +637,21 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
f2fs_put_page(node_page, 1);
- if (source_blkaddr != blkaddr)
+ if (source_blkaddr != blkaddr) {
+#ifdef CONFIG_F2FS_CHECK_FS
+ unsigned int segno = GET_SEGNO(sbi, blkaddr);
+ unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
+
+ if (unlikely(check_valid_map(sbi, segno, offset))) {
+ if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
+ f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n",
+ blkaddr, source_blkaddr, segno);
+ f2fs_bug_on(sbi, 1);
+ }
+ }
+#endif
return false;
+ }
return true;
}
@@ -796,6 +819,29 @@ static int move_data_block(struct inode *inode, block_t bidx,
if (lfs_mode)
down_write(&fio.sbi->io_order_lock);
+ mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
+ fio.old_blkaddr, false);
+ if (!mpage)
+ goto up_out;
+
+ fio.encrypted_page = mpage;
+
+ /* read source block in mpage */
+ if (!PageUptodate(mpage)) {
+ err = f2fs_submit_page_bio(&fio);
+ if (err) {
+ f2fs_put_page(mpage, 1);
+ goto up_out;
+ }
+ lock_page(mpage);
+ if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
+ !PageUptodate(mpage))) {
+ err = -EIO;
+ f2fs_put_page(mpage, 1);
+ goto up_out;
+ }
+ }
+
f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
&sum, CURSEG_COLD_DATA, NULL, false);
@@ -803,44 +849,18 @@ static int move_data_block(struct inode *inode, block_t bidx,
newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
if (!fio.encrypted_page) {
err = -ENOMEM;
- goto recover_block;
- }
-
- mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
- fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
- if (mpage) {
- bool updated = false;
-
- if (PageUptodate(mpage)) {
- memcpy(page_address(fio.encrypted_page),
- page_address(mpage), PAGE_SIZE);
- updated = true;
- }
f2fs_put_page(mpage, 1);
- invalidate_mapping_pages(META_MAPPING(fio.sbi),
- fio.old_blkaddr, fio.old_blkaddr);
- if (updated)
- goto write_page;
- }
-
- err = f2fs_submit_page_bio(&fio);
- if (err)
- goto put_page_out;
-
- /* write page */
- lock_page(fio.encrypted_page);
-
- if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
- err = -EIO;
- goto put_page_out;
- }
- if (unlikely(!PageUptodate(fio.encrypted_page))) {
- err = -EIO;
- goto put_page_out;
+ goto recover_block;
}
-write_page:
+ /* write target block */
f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
+ memcpy(page_address(fio.encrypted_page),
+ page_address(mpage), PAGE_SIZE);
+ f2fs_put_page(mpage, 1);
+ invalidate_mapping_pages(META_MAPPING(fio.sbi),
+ fio.old_blkaddr, fio.old_blkaddr);
+
set_page_dirty(fio.encrypted_page);
if (clear_page_dirty_for_io(fio.encrypted_page))
dec_page_count(fio.sbi, F2FS_DIRTY_META);
@@ -871,11 +891,12 @@ write_page:
put_page_out:
f2fs_put_page(fio.encrypted_page, 1);
recover_block:
- if (lfs_mode)
- up_write(&fio.sbi->io_order_lock);
if (err)
f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
true, true);
+up_out:
+ if (lfs_mode)
+ up_write(&fio.sbi->io_order_lock);
put_out:
f2fs_put_dnode(&dn);
out:
@@ -991,8 +1012,14 @@ next_step:
block_t start_bidx;
nid_t nid = le32_to_cpu(entry->nid);
- /* stop BG_GC if there is not enough free sections. */
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
+ /*
+ * stop BG_GC if there is not enough free sections.
+ * Or, stop GC if the segment becomes fully valid caused by
+ * race condition along with SSR block allocation.
+ */
+ if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
+ get_valid_blocks(sbi, segno, false) ==
+ sbi->blocks_per_seg)
return submitted;
if (check_valid_map(sbi, segno, off) == 0)
@@ -1305,7 +1332,7 @@ gc_more:
round++;
}
- if (gc_type == FG_GC)
+ if (gc_type == FG_GC && seg_freed)
sbi->cur_victim_sec = NULL_SEGNO;
if (sync)
@@ -1416,11 +1443,20 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
raw_sb->block_count = cpu_to_le64(block_count +
(long long)segs * sbi->blocks_per_seg);
+ if (f2fs_is_multi_device(sbi)) {
+ int last_dev = sbi->s_ndevs - 1;
+ int dev_segs =
+ le32_to_cpu(raw_sb->devs[last_dev].total_segments);
+
+ raw_sb->devs[last_dev].total_segments =
+ cpu_to_le32(dev_segs + segs);
+ }
}
static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
{
int segs = secs * sbi->segs_per_sec;
+ long long blks = (long long)segs * sbi->blocks_per_seg;
long long user_block_count =
le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
@@ -1428,8 +1464,20 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
- F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count +
- (long long)segs * sbi->blocks_per_seg);
+ F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
+
+ if (f2fs_is_multi_device(sbi)) {
+ int last_dev = sbi->s_ndevs - 1;
+
+ FDEV(last_dev).total_segments =
+ (int)FDEV(last_dev).total_segments + segs;
+ FDEV(last_dev).end_blk =
+ (long long)FDEV(last_dev).end_blk + blks;
+#ifdef CONFIG_BLK_DEV_ZONED
+ FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
+ (int)(blks >> sbi->log_blocks_per_blkz);
+#endif
+ }
}
int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
@@ -1444,6 +1492,15 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
if (block_count > old_block_count)
return -EINVAL;
+ if (f2fs_is_multi_device(sbi)) {
+ int last_dev = sbi->s_ndevs - 1;
+ __u64 last_segs = FDEV(last_dev).total_segments;
+
+ if (block_count + last_segs * sbi->blocks_per_seg <=
+ old_block_count)
+ return -EINVAL;
+ }
+
/* new fs size should align to section size */
div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
if (rem)
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index cc82f142f811..5bc4dcd8fc03 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -14,6 +14,7 @@
#include <linux/f2fs_fs.h>
#include <linux/cryptohash.h>
#include <linux/pagemap.h>
+#include <linux/unicode.h>
#include "f2fs.h"
@@ -67,7 +68,7 @@ static void str2hashbuf(const unsigned char *msg, size_t len,
*buf++ = pad;
}
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
+static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info,
struct fscrypt_name *fname)
{
__u32 hash;
@@ -103,3 +104,37 @@ f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT);
return f2fs_hash;
}
+
+f2fs_hash_t f2fs_dentry_hash(const struct inode *dir,
+ const struct qstr *name_info, struct fscrypt_name *fname)
+{
+#ifdef CONFIG_UNICODE
+ struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+ const struct unicode_map *um = sbi->s_encoding;
+ int r, dlen;
+ unsigned char *buff;
+ struct qstr folded;
+
+ if (!name_info->len || !IS_CASEFOLDED(dir))
+ goto opaque_seq;
+
+ buff = f2fs_kzalloc(sbi, sizeof(char) * PATH_MAX, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ dlen = utf8_casefold(um, name_info, buff, PATH_MAX);
+ if (dlen < 0) {
+ kvfree(buff);
+ goto opaque_seq;
+ }
+ folded.name = buff;
+ folded.len = dlen;
+ r = __f2fs_dentry_hash(&folded, fname);
+
+ kvfree(buff);
+ return r;
+
+opaque_seq:
+#endif
+ return __f2fs_dentry_hash(name_info, fname);
+}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 3613efca8c00..896db0416f0e 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -131,6 +131,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
err = f2fs_get_node_info(fio.sbi, dn->nid, &ni);
if (err) {
+ f2fs_truncate_data_blocks_range(dn, 1);
f2fs_put_dnode(dn);
return err;
}
@@ -320,7 +321,7 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
return NULL;
}
- namehash = f2fs_dentry_hash(&name, fname);
+ namehash = f2fs_dentry_hash(dir, &name, fname);
inline_dentry = inline_data_addr(dir, ipage);
@@ -580,7 +581,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
f2fs_wait_on_page_writeback(ipage, NODE, true, true);
- name_hash = f2fs_dentry_hash(new_name, NULL);
+ name_hash = f2fs_dentry_hash(dir, new_name, NULL);
f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
set_page_dirty(ipage);
@@ -588,6 +589,11 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
/* we don't need to mark_inode_dirty now */
if (inode) {
f2fs_i_pino_write(inode, dir->i_ino);
+
+ /* synchronize inode page's data from inode cache */
+ if (is_inode_flag_set(inode, FI_NEW_INODE))
+ f2fs_update_inode(inode, page);
+
f2fs_put_page(page, 1);
}
@@ -704,7 +710,13 @@ int f2fs_inline_data_fiemap(struct inode *inode,
if (IS_ERR(ipage))
return PTR_ERR(ipage);
- if (!f2fs_has_inline_data(inode)) {
+ if ((S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
+ !f2fs_has_inline_data(inode)) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ if (S_ISDIR(inode->i_mode) && !f2fs_has_inline_dentry(inode)) {
err = -EAGAIN;
goto out;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index a33d7a849b2d..502bd491336a 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -46,9 +46,13 @@ void f2fs_set_inode_flags(struct inode *inode)
new_fl |= S_DIRSYNC;
if (file_is_encrypt(inode))
new_fl |= S_ENCRYPTED;
+ if (file_is_verity(inode))
+ new_fl |= S_VERITY;
+ if (flags & F2FS_CASEFOLD_FL)
+ new_fl |= S_CASEFOLD;
inode_set_flags(inode, new_fl,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
- S_ENCRYPTED);
+ S_ENCRYPTED|S_VERITY|S_CASEFOLD);
}
static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -611,10 +615,14 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
inode->i_ino == F2FS_META_INO(sbi))
return 0;
- if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
+ /*
+ * atime could be updated without dirtying f2fs inode in lazytime mode
+ */
+ if (f2fs_is_time_consistent(inode) &&
+ !is_inode_flag_set(inode, FI_DIRTY_INODE))
return 0;
- if (f2fs_is_checkpoint_ready(sbi))
+ if (!f2fs_is_checkpoint_ready(sbi))
return -ENOSPC;
/*
@@ -673,7 +681,7 @@ retry:
err = f2fs_truncate(inode);
if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
- f2fs_show_injection_info(FAULT_EVICT_INODE);
+ f2fs_show_injection_info(sbi, FAULT_EVICT_INODE);
err = -EIO;
}
@@ -693,7 +701,8 @@ retry:
if (err) {
f2fs_update_inode_page(inode);
- set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ if (dquot_initialize_needed(inode))
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
}
sb_end_intwrite(inode->i_sb);
no_delete:
@@ -703,7 +712,7 @@ no_delete:
stat_dec_inline_dir(inode);
stat_dec_inline_inode(inode);
- if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG) &&
+ if (likely(!f2fs_cp_error(sbi) &&
!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
else
@@ -733,6 +742,7 @@ no_delete:
}
out_clear:
fscrypt_put_encryption_info(inode);
+ fsverity_cleanup_inode(inode);
clear_inode(inode);
}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index c5b99042e6f2..a1c507b0b4ac 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -272,9 +272,8 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- err = f2fs_is_checkpoint_ready(sbi);
- if (err)
- return err;
+ if (!f2fs_is_checkpoint_ready(sbi))
+ return -ENOSPC;
err = dquot_initialize(dir);
if (err)
@@ -321,9 +320,8 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- err = f2fs_is_checkpoint_ready(sbi);
- if (err)
- return err;
+ if (!f2fs_is_checkpoint_ready(sbi))
+ return -ENOSPC;
err = fscrypt_prepare_link(old_dentry, dir, dentry);
if (err)
@@ -489,6 +487,17 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
goto out_iput;
}
out_splice:
+#ifdef CONFIG_UNICODE
+ if (!inode && IS_CASEFOLDED(dir)) {
+ /* Eventually we want to call d_add_ci(dentry, NULL)
+ * for negative dentries in the encoding case as
+ * well. For now, prevent the negative dentry
+ * from being cached.
+ */
+ trace_f2fs_lookup_end(dir, dentry, ino, err);
+ return NULL;
+ }
+#endif
new = d_splice_alias(inode, dentry);
err = PTR_ERR_OR_ZERO(new);
trace_f2fs_lookup_end(dir, dentry, ino, err);
@@ -537,6 +546,16 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
goto fail;
}
f2fs_delete_entry(de, page, dir, inode);
+#ifdef CONFIG_UNICODE
+ /* VFS negative dentries are incompatible with Encoding and
+ * Case-insensitiveness. Eventually we'll want avoid
+ * invalidating the dentries here, alongside with returning the
+ * negative dentries at f2fs_lookup(), when it is better
+ * supported by the VFS for the CI case.
+ */
+ if (IS_CASEFOLDED(dir))
+ d_invalidate(dentry);
+#endif
f2fs_unlock_op(sbi);
if (IS_DIRSYNC(dir))
@@ -571,9 +590,8 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- err = f2fs_is_checkpoint_ready(sbi);
- if (err)
- return err;
+ if (!f2fs_is_checkpoint_ready(sbi))
+ return -ENOSPC;
err = fscrypt_prepare_symlink(dir, symname, len, dir->i_sb->s_blocksize,
&disk_link);
@@ -703,9 +721,8 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- err = f2fs_is_checkpoint_ready(sbi);
- if (err)
- return err;
+ if (!f2fs_is_checkpoint_ready(sbi))
+ return -ENOSPC;
err = dquot_initialize(dir);
if (err)
@@ -804,6 +821,8 @@ static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ if (!f2fs_is_checkpoint_ready(sbi))
+ return -ENOSPC;
if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) {
int err = fscrypt_get_encryption_info(dir);
@@ -840,9 +859,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- err = f2fs_is_checkpoint_ready(sbi);
- if (err)
- return err;
+ if (!f2fs_is_checkpoint_ready(sbi))
+ return -ENOSPC;
if (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
(!projid_eq(F2FS_I(new_dir)->i_projid,
@@ -963,7 +981,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!old_dir_entry || whiteout)
file_lost_pino(old_inode);
else
- F2FS_I(old_inode)->i_pino = new_dir->i_ino;
+ /* adjust dir's i_pino to pass fsck check */
+ f2fs_i_pino_write(old_inode, new_dir->i_ino);
up_write(&F2FS_I(old_inode)->i_sem);
old_inode->i_ctime = current_time(old_inode);
@@ -1035,9 +1054,8 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- err = f2fs_is_checkpoint_ready(sbi);
- if (err)
- return err;
+ if (!f2fs_is_checkpoint_ready(sbi))
+ return -ENOSPC;
if ((is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
!projid_eq(F2FS_I(new_dir)->i_projid,
@@ -1124,7 +1142,11 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_set_link(old_dir, old_entry, old_page, new_inode);
down_write(&F2FS_I(old_inode)->i_sem);
- file_lost_pino(old_inode);
+ if (!old_dir_entry)
+ file_lost_pino(old_inode);
+ else
+ /* adjust dir's i_pino to pass fsck check */
+ f2fs_i_pino_write(old_inode, new_dir->i_ino);
up_write(&F2FS_I(old_inode)->i_sem);
old_dir->i_ctime = current_time(old_dir);
@@ -1139,7 +1161,11 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
down_write(&F2FS_I(new_inode)->i_sem);
- file_lost_pino(new_inode);
+ if (!new_dir_entry)
+ file_lost_pino(new_inode);
+ else
+ /* adjust dir's i_pino to pass fsck check */
+ f2fs_i_pino_write(new_inode, old_dir->i_ino);
up_write(&F2FS_I(new_inode)->i_sem);
new_dir->i_ctime = current_time(new_dir);
@@ -1250,6 +1276,7 @@ const struct inode_operations f2fs_dir_inode_operations = {
#ifdef CONFIG_F2FS_FS_XATTR
.listxattr = f2fs_listxattr,
#endif
+ .fiemap = f2fs_fiemap,
};
const struct inode_operations f2fs_symlink_inode_operations = {
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index a18b2a895771..3314a0f3405e 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1524,7 +1524,8 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (wbc->sync_mode == WB_SYNC_NONE &&
+ if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+ wbc->sync_mode == WB_SYNC_NONE &&
IS_DNODE(page) && is_cold_node(page))
goto redirty_out;
@@ -1762,6 +1763,47 @@ out:
return ret ? -EIO: 0;
}
+static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ bool clean;
+
+ if (inode->i_ino != ino)
+ return 0;
+
+ if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
+ return 0;
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ clean = list_empty(&F2FS_I(inode)->gdirty_list);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+
+ if (clean)
+ return 0;
+
+ inode = igrab(inode);
+ if (!inode)
+ return 0;
+ return 1;
+}
+
+static bool flush_dirty_inode(struct page *page)
+{
+ struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ struct inode *inode;
+ nid_t ino = ino_of_node(page);
+
+ inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
+ if (!inode)
+ return false;
+
+ f2fs_update_inode(inode, page);
+ unlock_page(page);
+
+ iput(inode);
+ return true;
+}
+
int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
struct writeback_control *wbc,
bool do_balance, enum iostat_type io_type)
@@ -1785,6 +1827,7 @@ next_step:
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
bool submitted = false;
+ bool may_dirty = true;
/* give a priority to WB_SYNC threads */
if (atomic_read(&sbi->wb_sync_req[NODE]) &&
@@ -1832,6 +1875,13 @@ continue_unlock:
goto lock_node;
}
+ /* flush dirty inode */
+ if (IS_INODE(page) && may_dirty) {
+ may_dirty = false;
+ if (flush_dirty_inode(page))
+ goto lock_node;
+ }
+
f2fs_wait_on_page_writeback(page, NODE, true, true);
if (!clear_page_dirty_for_io(page))
@@ -1860,7 +1910,8 @@ continue_unlock:
}
if (step < 2) {
- if (wbc->sync_mode == WB_SYNC_NONE && step == 1)
+ if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+ wbc->sync_mode == WB_SYNC_NONE && step == 1)
goto out;
step++;
goto next_step;
@@ -2298,7 +2349,6 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
if (ret) {
up_read(&nm_i->nat_tree_lock);
- f2fs_bug_on(sbi, !mount);
f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
return ret;
}
@@ -2348,7 +2398,7 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct free_nid *i = NULL;
retry:
if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
- f2fs_show_injection_info(FAULT_ALLOC_NID);
+ f2fs_show_injection_info(sbi, FAULT_ALLOC_NID);
return false;
}
@@ -2964,7 +3014,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
/* not used nids: 0, node, meta, (and root counted as valid node) */
nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
- sbi->nquota_files - F2FS_RESERVED_NODE_NUM;
+ F2FS_RESERVED_NODE_NUM;
nm_i->nid_cnt[FREE_NID] = 0;
nm_i->nid_cnt[PREALLOC_NID] = 0;
nm_i->nat_cnt = 0;
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 783773e4560d..76477f71d4ee 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -711,7 +711,7 @@ next:
f2fs_put_page(page, 1);
}
if (!err)
- f2fs_allocate_new_segments(sbi);
+ f2fs_allocate_new_segments(sbi, NO_CHECK_TYPE);
return err;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index a661ac32e829..56e81447e2f3 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -185,8 +185,6 @@ bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
void f2fs_register_inmem_page(struct inode *inode, struct page *page)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_inode_info *fi = F2FS_I(inode);
struct inmem_pages *new;
f2fs_trace_pid(page);
@@ -200,15 +198,11 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
INIT_LIST_HEAD(&new->list);
/* increase reference count with clean state */
- mutex_lock(&fi->inmem_lock);
get_page(page);
- list_add_tail(&new->list, &fi->inmem_pages);
- spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
- if (list_empty(&fi->inmem_ilist))
- list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+ mutex_lock(&F2FS_I(inode)->inmem_lock);
+ list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages);
inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
- mutex_unlock(&fi->inmem_lock);
+ mutex_unlock(&F2FS_I(inode)->inmem_lock);
trace_f2fs_register_inmem_page(page, INMEM);
}
@@ -294,6 +288,8 @@ void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
struct inode *inode;
struct f2fs_inode_info *fi;
+ unsigned int count = sbi->atomic_files;
+ unsigned int looped = 0;
next:
spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
if (list_empty(head)) {
@@ -302,22 +298,26 @@ next:
}
fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
inode = igrab(&fi->vfs_inode);
+ if (inode)
+ list_move_tail(&fi->inmem_ilist, head);
spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
if (inode) {
if (gc_failure) {
- if (fi->i_gc_failures[GC_FAILURE_ATOMIC])
- goto drop;
- goto skip;
+ if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
+ goto skip;
}
-drop:
set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
f2fs_drop_inmem_pages(inode);
+skip:
iput(inode);
}
-skip:
congestion_wait(BLK_RW_ASYNC, HZ/50);
cond_resched();
+ if (gc_failure) {
+ if (++looped >= count)
+ return;
+ }
goto next;
}
@@ -330,19 +330,20 @@ void f2fs_drop_inmem_pages(struct inode *inode)
mutex_lock(&fi->inmem_lock);
__revoke_inmem_pages(inode, &fi->inmem_pages,
true, false, true);
-
- if (list_empty(&fi->inmem_pages)) {
- spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
- if (!list_empty(&fi->inmem_ilist))
- list_del_init(&fi->inmem_ilist);
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
- }
mutex_unlock(&fi->inmem_lock);
}
- clear_inode_flag(inode, FI_ATOMIC_FILE);
fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
stat_dec_atomic_write(inode);
+
+ spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+ if (!list_empty(&fi->inmem_ilist))
+ list_del_init(&fi->inmem_ilist);
+ if (f2fs_is_atomic_file(inode)) {
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
+ sbi->atomic_files--;
+ }
+ spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
}
void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
@@ -471,11 +472,6 @@ int f2fs_commit_inmem_pages(struct inode *inode)
mutex_lock(&fi->inmem_lock);
err = __f2fs_commit_inmem_pages(inode);
-
- spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
- if (!list_empty(&fi->inmem_ilist))
- list_del_init(&fi->inmem_ilist);
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
mutex_unlock(&fi->inmem_lock);
clear_inode_flag(inode, FI_ATOMIC_COMMIT);
@@ -493,7 +489,7 @@ int f2fs_commit_inmem_pages(struct inode *inode)
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
- f2fs_show_injection_info(FAULT_CHECKPOINT);
+ f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
f2fs_stop_checkpoint(sbi, false);
}
@@ -501,7 +497,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
if (need && excess_cached_nats(sbi))
f2fs_balance_fs_bg(sbi);
- if (f2fs_is_checkpoint_ready(sbi))
+ if (!f2fs_is_checkpoint_ready(sbi))
return;
/*
@@ -817,9 +813,13 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
dirty_i->nr_dirty[t]--;
- if (get_valid_blocks(sbi, segno, true) == 0)
+ if (get_valid_blocks(sbi, segno, true) == 0) {
clear_bit(GET_SEC_FROM_SEG(sbi, segno),
dirty_i->victim_secmap);
+#ifdef CONFIG_F2FS_CHECK_FS
+ clear_bit(segno, SIT_I(sbi)->invalid_segmap);
+#endif
+ }
}
}
@@ -1017,8 +1017,9 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
if (dc->error)
printk_ratelimited(
- "%sF2FS-fs: Issue discard(%u, %u, %u) failed, ret: %d",
- KERN_INFO, dc->lstart, dc->start, dc->len, dc->error);
+ "%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
+ KERN_INFO, sbi->sb->s_id,
+ dc->lstart, dc->start, dc->len, dc->error);
__detach_discard_cmd(dcc, dc);
}
@@ -1158,7 +1159,7 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
dc->len += len;
if (time_to_inject(sbi, FAULT_DISCARD)) {
- f2fs_show_injection_info(FAULT_DISCARD);
+ f2fs_show_injection_info(sbi, FAULT_DISCARD);
err = -EIO;
goto submit;
}
@@ -1780,7 +1781,8 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
return -EIO;
}
trace_f2fs_issue_reset_zone(bdev, blkstart);
- return blkdev_reset_zones(bdev, sector, nr_sects, GFP_NOFS);
+ return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
+ sector, nr_sects, GFP_NOFS);
}
/* For conventional zones, use regular discard if supported */
@@ -2084,6 +2086,13 @@ static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
f2fs_stop_discard_thread(sbi);
+ /*
+ * Recovery can cache discard commands, so in error path of
+ * fill_super(), it needs to give a chance to handle them.
+ */
+ if (unlikely(atomic_read(&dcc->discard_cmd_cnt)))
+ f2fs_issue_discard_timeout(sbi);
+
kvfree(dcc);
SM_I(sbi)->dcc_info = NULL;
}
@@ -2156,9 +2165,11 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
if (!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
- /* don't overwrite by SSR to keep node chain */
- if (IS_NODESEG(se->type) &&
- !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
+ /*
+ * SSR should never reuse block which is checkpointed
+ * or newly invalidated.
+ */
+ if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
se->ckpt_valid_blocks++;
}
@@ -2690,7 +2701,7 @@ unlock:
up_read(&SM_I(sbi)->curseg_lock);
}
-void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
+void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg;
unsigned int old_segno;
@@ -2699,10 +2710,17 @@ void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
down_write(&SIT_I(sbi)->sentry_lock);
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
+ if (type != NO_CHECK_TYPE && i != type)
+ continue;
+
curseg = CURSEG_I(sbi, i);
- old_segno = curseg->segno;
- SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
- locate_dirty_segment(sbi, old_segno);
+ if (type == NO_CHECK_TYPE || curseg->next_blkoff ||
+ get_valid_blocks(sbi, curseg->segno, false) ||
+ get_ckpt_valid_blocks(sbi, curseg->segno)) {
+ old_segno = curseg->segno;
+ SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
+ locate_dirty_segment(sbi, old_segno);
+ }
}
up_write(&SIT_I(sbi)->sentry_lock);
@@ -3068,6 +3086,19 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, type);
+ bool put_pin_sem = false;
+
+ if (type == CURSEG_COLD_DATA) {
+ /* GC during CURSEG_COLD_DATA_PINNED allocation */
+ if (down_read_trylock(&sbi->pin_sem)) {
+ put_pin_sem = true;
+ } else {
+ type = CURSEG_WARM_DATA;
+ curseg = CURSEG_I(sbi, type);
+ }
+ } else if (type == CURSEG_COLD_DATA_PINNED) {
+ type = CURSEG_COLD_DATA;
+ }
down_read(&SM_I(sbi)->curseg_lock);
@@ -3116,12 +3147,14 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
f2fs_inode_chksum_set(sbi, page);
}
+ if (F2FS_IO_ALIGNED(sbi))
+ fio->retry = false;
+
if (add_list) {
struct f2fs_bio_info *io;
INIT_LIST_HEAD(&fio->list);
fio->in_list = true;
- fio->retry = false;
io = sbi->write_io[fio->type] + fio->temp;
spin_lock(&io->io_lock);
list_add_tail(&fio->list, &io->io_list);
@@ -3131,6 +3164,9 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
mutex_unlock(&curseg->curseg_mutex);
up_read(&SM_I(sbi)->curseg_lock);
+
+ if (put_pin_sem)
+ up_read(&sbi->pin_sem);
}
static void update_device_state(struct f2fs_io_info *fio)
@@ -3377,7 +3413,10 @@ void f2fs_wait_on_page_writeback(struct page *page,
if (PageWriteback(page)) {
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ /* submit cached LFS IO */
f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
+ /* sbumit cached IPU IO */
+ f2fs_submit_merged_ipu_write(sbi, NULL, page);
if (ordered) {
wait_on_page_writeback(page);
f2fs_bug_on(sbi, locked && PageWriteback(page));
@@ -3447,11 +3486,6 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
seg_i = CURSEG_I(sbi, i);
segno = le32_to_cpu(ckpt->cur_data_segno[i]);
blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
- if (blk_off > ENTRIES_IN_SUM) {
- f2fs_bug_on(sbi, 1);
- f2fs_put_page(page, 1);
- return -EFAULT;
- }
seg_i->next_segno = segno;
reset_curseg(sbi, i, 0);
seg_i->alloc_type = ckpt->alloc_type[i];
@@ -3941,8 +3975,8 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct sit_info *sit_i;
unsigned int sit_segs, start;
- char *src_bitmap;
- unsigned int bitmap_size;
+ char *src_bitmap, *bitmap;
+ unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
/* allocate memory for SIT information */
sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
@@ -3958,33 +3992,37 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
if (!sit_i->sentries)
return -ENOMEM;
- bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
- sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, bitmap_size,
+ main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
+ sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
GFP_KERNEL);
if (!sit_i->dirty_sentries_bitmap)
return -ENOMEM;
+#ifdef CONFIG_F2FS_CHECK_FS
+ bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 4;
+#else
+ bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 3;
+#endif
+ sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
+ if (!sit_i->bitmap)
+ return -ENOMEM;
+
+ bitmap = sit_i->bitmap;
+
for (start = 0; start < MAIN_SEGS(sbi); start++) {
- sit_i->sentries[start].cur_valid_map
- = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
- sit_i->sentries[start].ckpt_valid_map
- = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
- if (!sit_i->sentries[start].cur_valid_map ||
- !sit_i->sentries[start].ckpt_valid_map)
- return -ENOMEM;
+ sit_i->sentries[start].cur_valid_map = bitmap;
+ bitmap += SIT_VBLOCK_MAP_SIZE;
+
+ sit_i->sentries[start].ckpt_valid_map = bitmap;
+ bitmap += SIT_VBLOCK_MAP_SIZE;
#ifdef CONFIG_F2FS_CHECK_FS
- sit_i->sentries[start].cur_valid_map_mir
- = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
- if (!sit_i->sentries[start].cur_valid_map_mir)
- return -ENOMEM;
+ sit_i->sentries[start].cur_valid_map_mir = bitmap;
+ bitmap += SIT_VBLOCK_MAP_SIZE;
#endif
- sit_i->sentries[start].discard_map
- = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE,
- GFP_KERNEL);
- if (!sit_i->sentries[start].discard_map)
- return -ENOMEM;
+ sit_i->sentries[start].discard_map = bitmap;
+ bitmap += SIT_VBLOCK_MAP_SIZE;
}
sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
@@ -4004,17 +4042,23 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
/* setup SIT bitmap from ckeckpoint pack */
- bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
+ sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
- sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
+ sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
if (!sit_i->sit_bitmap)
return -ENOMEM;
#ifdef CONFIG_F2FS_CHECK_FS
- sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
+ sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
+ sit_bitmap_size, GFP_KERNEL);
if (!sit_i->sit_bitmap_mir)
return -ENOMEM;
+
+ sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
+ main_bitmap_size, GFP_KERNEL);
+ if (!sit_i->invalid_segmap)
+ return -ENOMEM;
#endif
/* init SIT information */
@@ -4023,7 +4067,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
sit_i->written_valid_blocks = 0;
- sit_i->bitmap_size = bitmap_size;
+ sit_i->bitmap_size = sit_bitmap_size;
sit_i->dirty_sentries = 0;
sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
@@ -4161,7 +4205,6 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
if (start >= MAIN_SEGS(sbi)) {
f2fs_err(sbi, "Wrong journal entry on segno %u",
start);
- set_sbi_flag(sbi, SBI_NEED_FSCK);
err = -EFSCORRUPTED;
break;
}
@@ -4201,7 +4244,6 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
if (!err && total_node_blocks != valid_node_count(sbi)) {
f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
total_node_blocks, valid_node_count(sbi));
- set_sbi_flag(sbi, SBI_NEED_FSCK);
err = -EFSCORRUPTED;
}
@@ -4492,21 +4534,12 @@ static void destroy_free_segmap(struct f2fs_sb_info *sbi)
static void destroy_sit_info(struct f2fs_sb_info *sbi)
{
struct sit_info *sit_i = SIT_I(sbi);
- unsigned int start;
if (!sit_i)
return;
- if (sit_i->sentries) {
- for (start = 0; start < MAIN_SEGS(sbi); start++) {
- kvfree(sit_i->sentries[start].cur_valid_map);
-#ifdef CONFIG_F2FS_CHECK_FS
- kvfree(sit_i->sentries[start].cur_valid_map_mir);
-#endif
- kvfree(sit_i->sentries[start].ckpt_valid_map);
- kvfree(sit_i->sentries[start].discard_map);
- }
- }
+ if (sit_i->sentries)
+ kvfree(sit_i->bitmap);
kvfree(sit_i->tmp_map);
kvfree(sit_i->sentries);
@@ -4517,6 +4550,7 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi)
kvfree(sit_i->sit_bitmap);
#ifdef CONFIG_F2FS_CHECK_FS
kvfree(sit_i->sit_bitmap_mir);
+ kvfree(sit_i->invalid_segmap);
#endif
kvfree(sit_i);
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index b74602813a05..a95467b202ea 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -226,9 +226,13 @@ struct sit_info {
block_t sit_base_addr; /* start block address of SIT area */
block_t sit_blocks; /* # of blocks used by SIT area */
block_t written_valid_blocks; /* # of valid blocks in main area */
+ char *bitmap; /* all bitmaps pointer */
char *sit_bitmap; /* SIT bitmap pointer */
#ifdef CONFIG_F2FS_CHECK_FS
char *sit_bitmap_mir; /* SIT bitmap mirror */
+
+ /* bitmap of segments to be ignored by GC in case of errors */
+ unsigned long *invalid_segmap;
#endif
unsigned int bitmap_size; /* SIT bitmap size */
@@ -309,6 +313,8 @@ struct sit_entry_set {
*/
static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
{
+ if (type == CURSEG_COLD_DATA_PINNED)
+ type = CURSEG_COLD_DATA;
return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
}
@@ -582,13 +588,13 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
reserved_sections(sbi) + needed);
}
-static inline int f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
+static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
{
if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
- return 0;
+ return true;
if (likely(!has_not_enough_free_secs(sbi, 0, 0)))
- return 0;
- return -ENOSPC;
+ return true;
+ return false;
}
static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 6de6cda44031..5111e1ffe58a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -23,6 +23,7 @@
#include <linux/f2fs_fs.h>
#include <linux/sysfs.h>
#include <linux/quota.h>
+#include <linux/unicode.h>
#include "f2fs.h"
#include "node.h"
@@ -222,6 +223,36 @@ void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
va_end(args);
}
+#ifdef CONFIG_UNICODE
+static const struct f2fs_sb_encodings {
+ __u16 magic;
+ char *name;
+ char *version;
+} f2fs_sb_encoding_map[] = {
+ {F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"},
+};
+
+static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
+ const struct f2fs_sb_encodings **encoding,
+ __u16 *flags)
+{
+ __u16 magic = le16_to_cpu(sb->s_encoding);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
+ if (magic == f2fs_sb_encoding_map[i].magic)
+ break;
+
+ if (i >= ARRAY_SIZE(f2fs_sb_encoding_map))
+ return -EINVAL;
+
+ *encoding = &f2fs_sb_encoding_map[i];
+ *flags = le16_to_cpu(sb->s_encoding_flags);
+
+ return 0;
+}
+#endif
+
static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
{
block_t limit = min((sbi->user_block_count << 1) / 1000,
@@ -798,6 +829,13 @@ static int parse_options(struct super_block *sb, char *options)
return -EINVAL;
}
#endif
+#ifndef CONFIG_UNICODE
+ if (f2fs_sb_has_casefold(sbi)) {
+ f2fs_err(sbi,
+ "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
+ return -EINVAL;
+ }
+#endif
if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
@@ -873,7 +911,21 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
static int f2fs_drop_inode(struct inode *inode)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int ret;
+
+ /*
+ * during filesystem shutdown, if checkpoint is disabled,
+ * drop useless meta/node dirty pages.
+ */
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+ if (inode->i_ino == F2FS_NODE_INO(sbi) ||
+ inode->i_ino == F2FS_META_INO(sbi)) {
+ trace_f2fs_drop_inode(inode, 1);
+ return 1;
+ }
+ }
+
/*
* This is to avoid a deadlock condition like below.
* writeback_single_inode(inode)
@@ -913,6 +965,8 @@ static int f2fs_drop_inode(struct inode *inode)
return 0;
}
ret = generic_drop_inode(inode);
+ if (!ret)
+ ret = fscrypt_drop_inode(inode);
trace_f2fs_drop_inode(inode, ret);
return ret;
}
@@ -1089,6 +1143,9 @@ static void f2fs_put_super(struct super_block *sb)
destroy_percpu_info(sbi);
for (i = 0; i < NR_PAGE_TYPE; i++)
kvfree(sbi->write_io[i]);
+#ifdef CONFIG_UNICODE
+ utf8_unload(sbi->s_encoding);
+#endif
kvfree(sbi);
}
@@ -1156,9 +1213,13 @@ static int f2fs_statfs_project(struct super_block *sb,
return PTR_ERR(dquot);
spin_lock(&dquot->dq_dqb_lock);
- limit = (dquot->dq_dqb.dqb_bsoftlimit ?
- dquot->dq_dqb.dqb_bsoftlimit :
- dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
+ limit = 0;
+ if (dquot->dq_dqb.dqb_bsoftlimit)
+ limit = dquot->dq_dqb.dqb_bsoftlimit;
+ if (dquot->dq_dqb.dqb_bhardlimit &&
+ (!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
+ limit = dquot->dq_dqb.dqb_bhardlimit;
+
if (limit && buf->f_blocks > limit) {
curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
buf->f_blocks = limit;
@@ -1167,9 +1228,13 @@ static int f2fs_statfs_project(struct super_block *sb,
(buf->f_blocks - curblock) : 0;
}
- limit = dquot->dq_dqb.dqb_isoftlimit ?
- dquot->dq_dqb.dqb_isoftlimit :
- dquot->dq_dqb.dqb_ihardlimit;
+ limit = 0;
+ if (dquot->dq_dqb.dqb_isoftlimit)
+ limit = dquot->dq_dqb.dqb_isoftlimit;
+ if (dquot->dq_dqb.dqb_ihardlimit &&
+ (!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
+ limit = dquot->dq_dqb.dqb_ihardlimit;
+
if (limit && buf->f_files > limit) {
buf->f_files = limit;
buf->f_ffree =
@@ -1214,8 +1279,7 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
else
buf->f_bavail = 0;
- avail_node_count = sbi->total_node_count - sbi->nquota_files -
- F2FS_RESERVED_NODE_NUM;
+ avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
if (avail_node_count > user_block_count) {
buf->f_files = user_block_count;
@@ -1522,6 +1586,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
bool need_stop_gc = false;
bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
+ bool no_io_align = !F2FS_IO_ALIGNED(sbi);
bool checkpoint_changed;
#ifdef CONFIG_QUOTA
int i, j;
@@ -1601,6 +1666,12 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
+ if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
+ err = -EINVAL;
+ f2fs_warn(sbi, "switch io_bits option is not allowed");
+ goto restore_opts;
+ }
+
if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
err = -EINVAL;
f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
@@ -1869,7 +1940,7 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
/* Don't account quota for quota files to avoid recursion */
qf_inode->i_flags |= S_NOQUOTA;
- err = dquot_enable(qf_inode, type, format_id, flags);
+ err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
iput(qf_inode);
return err;
}
@@ -1979,6 +2050,12 @@ static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
struct inode *inode;
int err;
+ /* if quota sysfile exists, deny enabling quota with specific file */
+ if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
+ f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
+ return -EBUSY;
+ }
+
err = f2fs_quota_sync(sb, type);
if (err)
return err;
@@ -1998,7 +2075,7 @@ static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
return 0;
}
-static int f2fs_quota_off(struct super_block *sb, int type)
+static int __f2fs_quota_off(struct super_block *sb, int type)
{
struct inode *inode = sb_dqopt(sb)->files[type];
int err;
@@ -2024,13 +2101,30 @@ out_put:
return err;
}
+static int f2fs_quota_off(struct super_block *sb, int type)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int err;
+
+ err = __f2fs_quota_off(sb, type);
+
+ /*
+ * quotactl can shutdown journalled quota, result in inconsistence
+ * between quota record and fs data by following updates, tag the
+ * flag to let fsck be aware of it.
+ */
+ if (is_journalled_quota(sbi))
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ return err;
+}
+
void f2fs_quota_off_umount(struct super_block *sb)
{
int type;
int err;
for (type = 0; type < MAXQUOTAS; type++) {
- err = f2fs_quota_off(sb, type);
+ err = __f2fs_quota_off(sb, type);
if (err) {
int ret = dquot_quota_off(sb, type);
@@ -2222,13 +2316,27 @@ static bool f2fs_dummy_context(struct inode *inode)
return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
}
+static bool f2fs_has_stable_inodes(struct super_block *sb)
+{
+ return true;
+}
+
+static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
+ int *ino_bits_ret, int *lblk_bits_ret)
+{
+ *ino_bits_ret = 8 * sizeof(nid_t);
+ *lblk_bits_ret = 8 * sizeof(block_t);
+}
+
static const struct fscrypt_operations f2fs_cryptops = {
- .key_prefix = "f2fs:",
- .get_context = f2fs_get_context,
- .set_context = f2fs_set_context,
- .dummy_context = f2fs_dummy_context,
- .empty_dir = f2fs_empty_dir,
- .max_namelen = F2FS_NAME_LEN,
+ .key_prefix = "f2fs:",
+ .get_context = f2fs_get_context,
+ .set_context = f2fs_set_context,
+ .dummy_context = f2fs_dummy_context,
+ .empty_dir = f2fs_empty_dir,
+ .max_namelen = F2FS_NAME_LEN,
+ .has_stable_inodes = f2fs_has_stable_inodes,
+ .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
};
#endif
@@ -2422,6 +2530,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
size_t crc_offset = 0;
__u32 crc = 0;
+ if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
+ f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
+ F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
+ return -EINVAL;
+ }
+
/* Check checksum_offset and crc in superblock */
if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
crc_offset = le32_to_cpu(raw_super->checksum_offset);
@@ -2429,26 +2543,20 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
offsetof(struct f2fs_super_block, crc)) {
f2fs_info(sbi, "Invalid SB checksum offset: %zu",
crc_offset);
- return 1;
+ return -EFSCORRUPTED;
}
crc = le32_to_cpu(raw_super->crc);
if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
- return 1;
+ return -EFSCORRUPTED;
}
}
- if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
- f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
- F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
- return 1;
- }
-
/* Currently, support only 4KB page cache size */
if (F2FS_BLKSIZE != PAGE_SIZE) {
f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB",
PAGE_SIZE);
- return 1;
+ return -EFSCORRUPTED;
}
/* Currently, support only 4KB block size */
@@ -2456,14 +2564,14 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
if (blocksize != F2FS_BLKSIZE) {
f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
blocksize);
- return 1;
+ return -EFSCORRUPTED;
}
/* check log blocks per segment */
if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
f2fs_info(sbi, "Invalid log blocks per segment (%u)",
le32_to_cpu(raw_super->log_blocks_per_seg));
- return 1;
+ return -EFSCORRUPTED;
}
/* Currently, support 512/1024/2048/4096 bytes sector size */
@@ -2473,7 +2581,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
F2FS_MIN_LOG_SECTOR_SIZE) {
f2fs_info(sbi, "Invalid log sectorsize (%u)",
le32_to_cpu(raw_super->log_sectorsize));
- return 1;
+ return -EFSCORRUPTED;
}
if (le32_to_cpu(raw_super->log_sectors_per_block) +
le32_to_cpu(raw_super->log_sectorsize) !=
@@ -2481,7 +2589,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
le32_to_cpu(raw_super->log_sectors_per_block),
le32_to_cpu(raw_super->log_sectorsize));
- return 1;
+ return -EFSCORRUPTED;
}
segment_count = le32_to_cpu(raw_super->segment_count);
@@ -2495,7 +2603,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
if (segment_count > F2FS_MAX_SEGMENT ||
segment_count < F2FS_MIN_SEGMENTS) {
f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
- return 1;
+ return -EFSCORRUPTED;
}
if (total_sections > segment_count ||
@@ -2503,25 +2611,40 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
segs_per_sec > segment_count || !segs_per_sec) {
f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
segment_count, total_sections, segs_per_sec);
- return 1;
+ return -EFSCORRUPTED;
}
if ((segment_count / segs_per_sec) < total_sections) {
f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
segment_count, segs_per_sec, total_sections);
- return 1;
+ return -EFSCORRUPTED;
}
if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
segment_count, le64_to_cpu(raw_super->block_count));
- return 1;
+ return -EFSCORRUPTED;
+ }
+
+ if (RDEV(0).path[0]) {
+ block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
+ int i = 1;
+
+ while (i < MAX_DEVICES && RDEV(i).path[0]) {
+ dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
+ i++;
+ }
+ if (segment_count != dev_seg_count) {
+ f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
+ segment_count, dev_seg_count);
+ return -EFSCORRUPTED;
+ }
}
if (secs_per_zone > total_sections || !secs_per_zone) {
f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
secs_per_zone, total_sections);
- return 1;
+ return -EFSCORRUPTED;
}
if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
@@ -2531,7 +2654,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
le32_to_cpu(raw_super->extension_count),
raw_super->hot_ext_count,
F2FS_MAX_EXTENSION);
- return 1;
+ return -EFSCORRUPTED;
}
if (le32_to_cpu(raw_super->cp_payload) >
@@ -2539,7 +2662,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
f2fs_info(sbi, "Insane cp_payload (%u > %u)",
le32_to_cpu(raw_super->cp_payload),
blocks_per_seg - F2FS_CP_PACKS);
- return 1;
+ return -EFSCORRUPTED;
}
/* check reserved ino info */
@@ -2550,12 +2673,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
le32_to_cpu(raw_super->node_ino),
le32_to_cpu(raw_super->meta_ino),
le32_to_cpu(raw_super->root_ino));
- return 1;
+ return -EFSCORRUPTED;
}
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
if (sanity_check_area_boundary(sbi, bh))
- return 1;
+ return -EFSCORRUPTED;
return 0;
}
@@ -2615,8 +2738,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
}
valid_node_count = le32_to_cpu(ckpt->valid_node_count);
- avail_node_count = sbi->total_node_count - sbi->nquota_files -
- F2FS_RESERVED_NODE_NUM;
+ avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
if (valid_node_count > avail_node_count) {
f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
valid_node_count, avail_node_count);
@@ -2655,10 +2777,10 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
}
}
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
- for (j = i; j < NR_CURSEG_DATA_TYPE; j++) {
+ for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
le32_to_cpu(ckpt->cur_data_segno[j])) {
- f2fs_err(sbi, "Data segment (%u) and Data segment (%u) has the same segno: %u",
+ f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
i, j,
le32_to_cpu(ckpt->cur_node_segno[i]));
return 1;
@@ -2753,6 +2875,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
spin_lock_init(&sbi->dev_lock);
init_rwsem(&sbi->sb_lock);
+ init_rwsem(&sbi->pin_sem);
}
static int init_percpu_info(struct f2fs_sb_info *sbi)
@@ -2772,15 +2895,21 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
}
#ifdef CONFIG_BLK_DEV_ZONED
+static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
+{
+ struct f2fs_dev_info *dev = data;
+
+ if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL)
+ set_bit(idx, dev->blkz_seq);
+ return 0;
+}
+
static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
{
struct block_device *bdev = FDEV(devi).bdev;
sector_t nr_sectors = bdev->bd_part->nr_sects;
- sector_t sector = 0;
- struct blk_zone *zones;
- unsigned int i, nr_zones;
- unsigned int n = 0;
- int err = -EIO;
+ int ret;
if (!f2fs_sb_has_blkzoned(sbi))
return 0;
@@ -2805,38 +2934,13 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
if (!FDEV(devi).blkz_seq)
return -ENOMEM;
-#define F2FS_REPORT_NR_ZONES 4096
-
- zones = f2fs_kzalloc(sbi,
- array_size(F2FS_REPORT_NR_ZONES,
- sizeof(struct blk_zone)),
- GFP_KERNEL);
- if (!zones)
- return -ENOMEM;
-
/* Get block zones type */
- while (zones && sector < nr_sectors) {
-
- nr_zones = F2FS_REPORT_NR_ZONES;
- err = blkdev_report_zones(bdev, sector, zones, &nr_zones);
- if (err)
- break;
- if (!nr_zones) {
- err = -EIO;
- break;
- }
-
- for (i = 0; i < nr_zones; i++) {
- if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
- set_bit(n, FDEV(devi).blkz_seq);
- sector += zones[i].len;
- n++;
- }
- }
-
- kvfree(zones);
+ ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
+ &FDEV(devi));
+ if (ret < 0)
+ return ret;
- return err;
+ return 0;
}
#endif
@@ -2866,15 +2970,17 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
f2fs_err(sbi, "Unable to read %dth superblock",
block + 1);
err = -EIO;
+ *recovery = 1;
continue;
}
/* sanity checking of raw super */
- if (sanity_check_raw_super(sbi, bh)) {
+ err = sanity_check_raw_super(sbi, bh);
+ if (err) {
f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
block + 1);
- err = -EFSCORRUPTED;
brelse(bh);
+ *recovery = 1;
continue;
}
@@ -2887,10 +2993,6 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
brelse(bh);
}
- /* Fail to read any one of the superblocks*/
- if (err < 0)
- *recovery = 1;
-
/* No valid superblock */
if (!*raw_super)
kvfree(super);
@@ -3031,6 +3133,53 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
return 0;
}
+static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
+{
+#ifdef CONFIG_UNICODE
+ if (f2fs_sb_has_casefold(sbi) && !sbi->s_encoding) {
+ const struct f2fs_sb_encodings *encoding_info;
+ struct unicode_map *encoding;
+ __u16 encoding_flags;
+
+ if (f2fs_sb_has_encrypt(sbi)) {
+ f2fs_err(sbi,
+ "Can't mount with encoding and encryption");
+ return -EINVAL;
+ }
+
+ if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
+ &encoding_flags)) {
+ f2fs_err(sbi,
+ "Encoding requested by superblock is unknown");
+ return -EINVAL;
+ }
+
+ encoding = utf8_load(encoding_info->version);
+ if (IS_ERR(encoding)) {
+ f2fs_err(sbi,
+ "can't mount with superblock charset: %s-%s "
+ "not supported by the kernel. flags: 0x%x.",
+ encoding_info->name, encoding_info->version,
+ encoding_flags);
+ return PTR_ERR(encoding);
+ }
+ f2fs_info(sbi, "Using encoding defined by superblock: "
+ "%s-%s with flags 0x%hx", encoding_info->name,
+ encoding_info->version?:"\b", encoding_flags);
+
+ sbi->s_encoding = encoding;
+ sbi->s_encoding_flags = encoding_flags;
+ sbi->sb->s_d_op = &f2fs_dentry_ops;
+ }
+#else
+ if (f2fs_sb_has_casefold(sbi)) {
+ f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
+ return -EINVAL;
+ }
+#endif
+ return 0;
+}
+
static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
{
struct f2fs_sm_info *sm_i = SM_I(sbi);
@@ -3127,6 +3276,10 @@ try_onemore:
le32_to_cpu(raw_super->log_blocksize);
sb->s_max_links = F2FS_LINK_MAX;
+ err = f2fs_setup_casefold(sbi);
+ if (err)
+ goto free_options;
+
#ifdef CONFIG_QUOTA
sb->dq_op = &f2fs_quota_operations;
sb->s_qcop = &f2fs_quotactl_ops;
@@ -3144,6 +3297,9 @@ try_onemore:
#ifdef CONFIG_FS_ENCRYPTION
sb->s_cop = &f2fs_cryptops;
#endif
+#ifdef CONFIG_FS_VERITY
+ sb->s_vop = &f2fs_verityops;
+#endif
sb->s_xattr = f2fs_xattr_handlers;
sb->s_export_op = &f2fs_export_ops;
sb->s_magic = F2FS_SUPER_MAGIC;
@@ -3190,6 +3346,8 @@ try_onemore:
sbi->write_io[i][j].bio = NULL;
spin_lock_init(&sbi->write_io[i][j].io_lock);
INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
+ INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
+ init_rwsem(&sbi->write_io[i][j].bio_list_lock);
}
}
@@ -3202,7 +3360,7 @@ try_onemore:
if (err)
goto free_bio_info;
- if (F2FS_IO_SIZE(sbi) > 1) {
+ if (F2FS_IO_ALIGNED(sbi)) {
sbi->write_io_dummy =
mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
if (!sbi->write_io_dummy) {
@@ -3477,6 +3635,10 @@ free_percpu:
free_bio_info:
for (i = 0; i < NR_PAGE_TYPE; i++)
kvfree(sbi->write_io[i]);
+
+#ifdef CONFIG_UNICODE
+ utf8_unload(sbi->s_encoding);
+#endif
free_options:
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
@@ -3597,8 +3759,13 @@ static int __init init_f2fs_fs(void)
err = f2fs_init_post_read_processing();
if (err)
goto free_root_stats;
+ err = f2fs_init_bio_entry_cache();
+ if (err)
+ goto free_post_read;
return 0;
+free_post_read:
+ f2fs_destroy_post_read_processing();
free_root_stats:
f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type);
@@ -3622,6 +3789,7 @@ fail:
static void __exit exit_f2fs_fs(void)
{
+ f2fs_destroy_bio_entry_cache();
f2fs_destroy_post_read_processing();
f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 3aeacd0aacfd..70945ceb9c0c 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -10,6 +10,7 @@
#include <linux/proc_fs.h>
#include <linux/f2fs_fs.h>
#include <linux/seq_file.h>
+#include <linux/unicode.h>
#include "f2fs.h"
#include "segment.h"
@@ -81,6 +82,19 @@ static ssize_t unusable_show(struct f2fs_attr *a,
(unsigned long long)unusable);
}
+static ssize_t encoding_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+#ifdef CONFIG_UNICODE
+ if (f2fs_sb_has_casefold(sbi))
+ return snprintf(buf, PAGE_SIZE, "%s (%d.%d.%d)\n",
+ sbi->s_encoding->charset,
+ (sbi->s_encoding->version >> 16) & 0xff,
+ (sbi->s_encoding->version >> 8) & 0xff,
+ sbi->s_encoding->version & 0xff);
+#endif
+ return snprintf(buf, PAGE_SIZE, "(none)");
+}
static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
@@ -131,9 +145,17 @@ static ssize_t features_show(struct f2fs_attr *a,
if (f2fs_sb_has_lost_found(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "lost_found");
+ if (f2fs_sb_has_verity(sbi))
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "verity");
if (f2fs_sb_has_sb_chksum(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "sb_checksum");
+ if (f2fs_sb_has_casefold(sbi))
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "casefold");
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "pin_file");
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
@@ -364,7 +386,9 @@ enum feat_id {
FEAT_QUOTA_INO,
FEAT_INODE_CRTIME,
FEAT_LOST_FOUND,
+ FEAT_VERITY,
FEAT_SB_CHECKSUM,
+ FEAT_CASEFOLD,
};
static ssize_t f2fs_feature_show(struct f2fs_attr *a,
@@ -381,7 +405,9 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a,
case FEAT_QUOTA_INO:
case FEAT_INODE_CRTIME:
case FEAT_LOST_FOUND:
+ case FEAT_VERITY:
case FEAT_SB_CHECKSUM:
+ case FEAT_CASEFOLD:
return snprintf(buf, PAGE_SIZE, "supported\n");
}
return 0;
@@ -419,6 +445,7 @@ F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle, gc_mode);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_urgent, gc_mode);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, main_blkaddr, main_blkaddr);
F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_small_discards, max_discards);
F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_granularity, discard_granularity);
F2FS_RW_ATTR(RESERVED_BLOCKS, f2fs_sb_info, reserved_blocks, reserved_blocks);
@@ -455,6 +482,7 @@ F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
F2FS_GENERAL_RO_ATTR(features);
F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
F2FS_GENERAL_RO_ATTR(unusable);
+F2FS_GENERAL_RO_ATTR(encoding);
#ifdef CONFIG_FS_ENCRYPTION
F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
@@ -470,7 +498,11 @@ F2FS_FEATURE_RO_ATTR(flexible_inline_xattr, FEAT_FLEXIBLE_INLINE_XATTR);
F2FS_FEATURE_RO_ATTR(quota_ino, FEAT_QUOTA_INO);
F2FS_FEATURE_RO_ATTR(inode_crtime, FEAT_INODE_CRTIME);
F2FS_FEATURE_RO_ATTR(lost_found, FEAT_LOST_FOUND);
+#ifdef CONFIG_FS_VERITY
+F2FS_FEATURE_RO_ATTR(verity, FEAT_VERITY);
+#endif
F2FS_FEATURE_RO_ATTR(sb_checksum, FEAT_SB_CHECKSUM);
+F2FS_FEATURE_RO_ATTR(casefold, FEAT_CASEFOLD);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -481,6 +513,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_idle),
ATTR_LIST(gc_urgent),
ATTR_LIST(reclaim_segments),
+ ATTR_LIST(main_blkaddr),
ATTR_LIST(max_small_discards),
ATTR_LIST(discard_granularity),
ATTR_LIST(batched_trim_sections),
@@ -515,6 +548,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(features),
ATTR_LIST(reserved_blocks),
ATTR_LIST(current_reserved_blocks),
+ ATTR_LIST(encoding),
NULL,
};
ATTRIBUTE_GROUPS(f2fs);
@@ -534,7 +568,11 @@ static struct attribute *f2fs_feat_attrs[] = {
ATTR_LIST(quota_ino),
ATTR_LIST(inode_crtime),
ATTR_LIST(lost_found),
+#ifdef CONFIG_FS_VERITY
+ ATTR_LIST(verity),
+#endif
ATTR_LIST(sb_checksum),
+ ATTR_LIST(casefold),
NULL,
};
ATTRIBUTE_GROUPS(f2fs_feat);
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
new file mode 100644
index 000000000000..a401ef72bc82
--- /dev/null
+++ b/fs/f2fs/verity.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fs/f2fs/verity.c: fs-verity support for f2fs
+ *
+ * Copyright 2019 Google LLC
+ */
+
+/*
+ * Implementation of fsverity_operations for f2fs.
+ *
+ * Like ext4, f2fs stores the verity metadata (Merkle tree and
+ * fsverity_descriptor) past the end of the file, starting at the first 64K
+ * boundary beyond i_size. This approach works because (a) verity files are
+ * readonly, and (b) pages fully beyond i_size aren't visible to userspace but
+ * can be read/written internally by f2fs with only some relatively small
+ * changes to f2fs. Extended attributes cannot be used because (a) f2fs limits
+ * the total size of an inode's xattr entries to 4096 bytes, which wouldn't be
+ * enough for even a single Merkle tree block, and (b) f2fs encryption doesn't
+ * encrypt xattrs, yet the verity metadata *must* be encrypted when the file is
+ * because it contains hashes of the plaintext data.
+ *
+ * Using a 64K boundary rather than a 4K one keeps things ready for
+ * architectures with 64K pages, and it doesn't necessarily waste space on-disk
+ * since there can be a hole between i_size and the start of the Merkle tree.
+ */
+
+#include <linux/f2fs_fs.h>
+
+#include "f2fs.h"
+#include "xattr.h"
+
+static inline loff_t f2fs_verity_metadata_pos(const struct inode *inode)
+{
+ return round_up(inode->i_size, 65536);
+}
+
+/*
+ * Read some verity metadata from the inode. __vfs_read() can't be used because
+ * we need to read beyond i_size.
+ */
+static int pagecache_read(struct inode *inode, void *buf, size_t count,
+ loff_t pos)
+{
+ while (count) {
+ size_t n = min_t(size_t, count,
+ PAGE_SIZE - offset_in_page(pos));
+ struct page *page;
+ void *addr;
+
+ page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT,
+ NULL);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ addr = kmap_atomic(page);
+ memcpy(buf, addr + offset_in_page(pos), n);
+ kunmap_atomic(addr);
+
+ put_page(page);
+
+ buf += n;
+ pos += n;
+ count -= n;
+ }
+ return 0;
+}
+
+/*
+ * Write some verity metadata to the inode for FS_IOC_ENABLE_VERITY.
+ * kernel_write() can't be used because the file descriptor is readonly.
+ */
+static int pagecache_write(struct inode *inode, const void *buf, size_t count,
+ loff_t pos)
+{
+ if (pos + count > inode->i_sb->s_maxbytes)
+ return -EFBIG;
+
+ while (count) {
+ size_t n = min_t(size_t, count,
+ PAGE_SIZE - offset_in_page(pos));
+ struct page *page;
+ void *fsdata;
+ void *addr;
+ int res;
+
+ res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
+ &page, &fsdata);
+ if (res)
+ return res;
+
+ addr = kmap_atomic(page);
+ memcpy(addr + offset_in_page(pos), buf, n);
+ kunmap_atomic(addr);
+
+ res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
+ page, fsdata);
+ if (res < 0)
+ return res;
+ if (res != n)
+ return -EIO;
+
+ buf += n;
+ pos += n;
+ count -= n;
+ }
+ return 0;
+}
+
+/*
+ * Format of f2fs verity xattr. This points to the location of the verity
+ * descriptor within the file data rather than containing it directly because
+ * the verity descriptor *must* be encrypted when f2fs encryption is used. But,
+ * f2fs encryption does not encrypt xattrs.
+ */
+struct fsverity_descriptor_location {
+ __le32 version;
+ __le32 size;
+ __le64 pos;
+};
+
+static int f2fs_begin_enable_verity(struct file *filp)
+{
+ struct inode *inode = file_inode(filp);
+ int err;
+
+ if (f2fs_verity_in_progress(inode))
+ return -EBUSY;
+
+ if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode))
+ return -EOPNOTSUPP;
+
+ /*
+ * Since the file was opened readonly, we have to initialize the quotas
+ * here and not rely on ->open() doing it. This must be done before
+ * evicting the inline data.
+ */
+ err = dquot_initialize(inode);
+ if (err)
+ return err;
+
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
+
+ set_inode_flag(inode, FI_VERITY_IN_PROGRESS);
+ return 0;
+}
+
+static int f2fs_end_enable_verity(struct file *filp, const void *desc,
+ size_t desc_size, u64 merkle_tree_size)
+{
+ struct inode *inode = file_inode(filp);
+ u64 desc_pos = f2fs_verity_metadata_pos(inode) + merkle_tree_size;
+ struct fsverity_descriptor_location dloc = {
+ .version = cpu_to_le32(1),
+ .size = cpu_to_le32(desc_size),
+ .pos = cpu_to_le64(desc_pos),
+ };
+ int err = 0;
+
+ if (desc != NULL) {
+ /* Succeeded; write the verity descriptor. */
+ err = pagecache_write(inode, desc, desc_size, desc_pos);
+
+ /* Write all pages before clearing FI_VERITY_IN_PROGRESS. */
+ if (!err)
+ err = filemap_write_and_wait(inode->i_mapping);
+ }
+
+ /* If we failed, truncate anything we wrote past i_size. */
+ if (desc == NULL || err)
+ f2fs_truncate(inode);
+
+ clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
+
+ if (desc != NULL && !err) {
+ err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
+ F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
+ NULL, XATTR_CREATE);
+ if (!err) {
+ file_set_verity(inode);
+ f2fs_set_inode_flags(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
+ }
+ return err;
+}
+
+static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
+ size_t buf_size)
+{
+ struct fsverity_descriptor_location dloc;
+ int res;
+ u32 size;
+ u64 pos;
+
+ /* Get the descriptor location */
+ res = f2fs_getxattr(inode, F2FS_XATTR_INDEX_VERITY,
+ F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc), NULL);
+ if (res < 0 && res != -ERANGE)
+ return res;
+ if (res != sizeof(dloc) || dloc.version != cpu_to_le32(1)) {
+ f2fs_warn(F2FS_I_SB(inode), "unknown verity xattr format");
+ return -EINVAL;
+ }
+ size = le32_to_cpu(dloc.size);
+ pos = le64_to_cpu(dloc.pos);
+
+ /* Get the descriptor */
+ if (pos + size < pos || pos + size > inode->i_sb->s_maxbytes ||
+ pos < f2fs_verity_metadata_pos(inode) || size > INT_MAX) {
+ f2fs_warn(F2FS_I_SB(inode), "invalid verity xattr");
+ return -EFSCORRUPTED;
+ }
+ if (buf_size) {
+ if (size > buf_size)
+ return -ERANGE;
+ res = pagecache_read(inode, buf, size, pos);
+ if (res)
+ return res;
+ }
+ return size;
+}
+
+static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
+ pgoff_t index)
+{
+ index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT;
+
+ return read_mapping_page(inode->i_mapping, index, NULL);
+}
+
+static int f2fs_write_merkle_tree_block(struct inode *inode, const void *buf,
+ u64 index, int log_blocksize)
+{
+ loff_t pos = f2fs_verity_metadata_pos(inode) + (index << log_blocksize);
+
+ return pagecache_write(inode, buf, 1 << log_blocksize, pos);
+}
+
+const struct fsverity_operations f2fs_verityops = {
+ .begin_enable_verity = f2fs_begin_enable_verity,
+ .end_enable_verity = f2fs_end_enable_verity,
+ .get_verity_descriptor = f2fs_get_verity_descriptor,
+ .read_merkle_tree_page = f2fs_read_merkle_tree_page,
+ .write_merkle_tree_block = f2fs_write_merkle_tree_block,
+};
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index b32c45621679..296b3189448a 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -21,6 +21,7 @@
#include <linux/posix_acl_xattr.h>
#include "f2fs.h"
#include "xattr.h"
+#include "segment.h"
static int f2fs_xattr_generic_get(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
@@ -538,8 +539,9 @@ out:
ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
struct inode *inode = d_inode(dentry);
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
struct f2fs_xattr_entry *entry;
- void *base_addr;
+ void *base_addr, *last_base_addr;
int error = 0;
size_t rest = buffer_size;
@@ -549,6 +551,8 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
if (error)
return error;
+ last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode);
+
list_for_each_xattr(entry, base_addr) {
const struct xattr_handler *handler =
f2fs_xattr_handler(entry->e_name_index);
@@ -556,6 +560,15 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
size_t prefix_len;
size_t size;
+ if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
+ (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
+ f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
+ inode->i_ino);
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+ error = -EFSCORRUPTED;
+ goto cleanup;
+ }
+
if (!handler || (handler->list && !handler->list(dentry)))
continue;
@@ -729,6 +742,11 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int err;
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+ if (!f2fs_is_checkpoint_ready(sbi))
+ return -ENOSPC;
+
err = dquot_initialize(inode);
if (err)
return err;
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index a90920e2f949..de0c600b9cab 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -34,8 +34,10 @@
#define F2FS_XATTR_INDEX_ADVISE 7
/* Should be same as EXT4_XATTR_INDEX_ENCRYPTION */
#define F2FS_XATTR_INDEX_ENCRYPTION 9
+#define F2FS_XATTR_INDEX_VERITY 11
#define F2FS_XATTR_NAME_ENCRYPTION_CONTEXT "c"
+#define F2FS_XATTR_NAME_VERITY "v"
struct f2fs_xattr_header {
__le32 h_magic; /* magic number for identification */
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 1bda2ab6745b..054acd9fd033 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -88,9 +88,7 @@ static int fat__get_entry(struct inode *dir, loff_t *pos,
int err, offset;
next:
- if (*bh)
- brelse(*bh);
-
+ brelse(*bh);
*bh = NULL;
iblock = *pos >> sb->s_blocksize_bits;
err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0, false);
@@ -1100,8 +1098,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
err = -ENOMEM;
goto error;
}
+ /* Avoid race with userspace read via bdev */
+ lock_buffer(bhs[n]);
memset(bhs[n]->b_data, 0, sb->s_blocksize);
set_buffer_uptodate(bhs[n]);
+ unlock_buffer(bhs[n]);
mark_buffer_dirty_inode(bhs[n], dir);
n++;
@@ -1158,6 +1159,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
de = (struct msdos_dir_entry *)bhs[0]->b_data;
+ /* Avoid race with userspace read via bdev */
+ lock_buffer(bhs[0]);
/* filling the new directory slots ("." and ".." entries) */
memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
@@ -1180,6 +1183,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
de[0].size = de[1].size = 0;
memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
set_buffer_uptodate(bhs[0]);
+ unlock_buffer(bhs[0]);
mark_buffer_dirty_inode(bhs[0], dir);
err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
@@ -1237,11 +1241,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
/* fill the directory entry */
copy = min(size, sb->s_blocksize);
+ /* Avoid race with userspace read via bdev */
+ lock_buffer(bhs[n]);
memcpy(bhs[n]->b_data, slots, copy);
- slots += copy;
- size -= copy;
set_buffer_uptodate(bhs[n]);
+ unlock_buffer(bhs[n]);
mark_buffer_dirty_inode(bhs[n], dir);
+ slots += copy;
+ size -= copy;
if (!size)
break;
n++;
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 265983635f2b..3647c65a0f48 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -388,8 +388,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
err = -ENOMEM;
goto error;
}
+ /* Avoid race with userspace read via bdev */
+ lock_buffer(c_bh);
memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
set_buffer_uptodate(c_bh);
+ unlock_buffer(c_bh);
mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
if (sb->s_flags & SB_SYNCHRONOUS)
err = sync_dirty_buffer(c_bh);
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 4614c0ba5f1c..bdc4503c00a3 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -172,15 +172,6 @@ long fat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
-#ifdef CONFIG_COMPAT
-static long fat_generic_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-
-{
- return fat_generic_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static int fat_file_release(struct inode *inode, struct file *filp)
{
if ((filp->f_mode & FMODE_WRITE) &&
@@ -215,9 +206,7 @@ const struct file_operations fat_file_operations = {
.mmap = generic_file_mmap,
.release = fat_file_release,
.unlocked_ioctl = fat_generic_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = fat_generic_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.fsync = fat_file_fsync,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 05689198f5af..5f04c5c810fb 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -31,6 +31,11 @@
#define KB_IN_SECTORS 2
+/* DOS dates from 1980/1/1 through 2107/12/31 */
+#define FAT_DATE_MIN (0<<9 | 1<<5 | 1)
+#define FAT_DATE_MAX (127<<9 | 12<<5 | 31)
+#define FAT_TIME_MAX (23<<11 | 59<<5 | 29)
+
/*
* A deserialized copy of the on-disk structure laid out in struct
* fat_boot_sector.
@@ -1605,6 +1610,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
int debug;
long error;
char buf[50];
+ struct timespec64 ts;
/*
* GFP_KERNEL is ok here, because while we do hold the
@@ -1698,6 +1704,12 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
sbi->free_clus_valid = 0;
sbi->prev_free = FAT_START_ENT;
sb->s_maxbytes = 0xffffffff;
+ fat_time_fat2unix(sbi, &ts, 0, cpu_to_le16(FAT_DATE_MIN), 0);
+ sb->s_time_min = ts.tv_sec;
+
+ fat_time_fat2unix(sbi, &ts, cpu_to_le16(FAT_TIME_MAX),
+ cpu_to_le16(FAT_DATE_MAX), 0);
+ sb->s_time_max = ts.tv_sec;
if (!sbi->fat_length && bpb.fat32_length) {
struct fat_boot_fsinfo *fsinfo;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 3d40771e8e7c..41b6438bd2d9 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -261,7 +261,7 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
static bool rw_hint_valid(enum rw_hint hint)
{
switch (hint) {
- case RWF_WRITE_LIFE_NOT_SET:
+ case RWH_WRITE_LIFE_NOT_SET:
case RWH_WRITE_LIFE_NONE:
case RWH_WRITE_LIFE_SHORT:
case RWH_WRITE_LIFE_MEDIUM:
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 0ee727485615..01263ffbc4c0 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -246,7 +246,7 @@ static long do_handle_open(int mountdirfd, struct file_handle __user *ufh,
* sys_open_by_handle_at: Open the file handle
* @mountdirfd: directory file descriptor
* @handle: file handle to be opened
- * @flag: open flags.
+ * @flags: open flags.
*
* @mountdirfd indicate the directory file descriptor
* of the mount point. file handle is decoded relative
diff --git a/fs/file_table.c b/fs/file_table.c
index b07b53f24ff5..30d55c9a1744 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -327,6 +327,7 @@ void flush_delayed_fput(void)
{
delayed_fput(NULL);
}
+EXPORT_SYMBOL_GPL(flush_delayed_fput);
static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index a89f68c3cbed..578a5062706e 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -229,6 +229,8 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
sbp->s_op = &vxfs_super_ops;
sbp->s_fs_info = infp;
+ sbp->s_time_min = 0;
+ sbp->s_time_max = U32_MAX;
if (!vxfs_try_sb_magic(sbp, silent, 1,
(__force __fs32)cpu_to_le32(VXFS_SUPER_MAGIC))) {
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 542b02d170f8..335607b8c5c0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -36,10 +36,6 @@
*/
#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10))
-struct wb_completion {
- atomic_t cnt;
-};
-
/*
* Passed into wb_writeback(), essentially a subset of writeback_control
*/
@@ -61,19 +57,6 @@ struct wb_writeback_work {
};
/*
- * If one wants to wait for one or more wb_writeback_works, each work's
- * ->done should be set to a wb_completion defined using the following
- * macro. Once all work items are issued with wb_queue_work(), the caller
- * can wait for the completion of all using wb_wait_for_completion(). Work
- * items which are waited upon aren't freed automatically on completion.
- */
-#define DEFINE_WB_COMPLETION_ONSTACK(cmpl) \
- struct wb_completion cmpl = { \
- .cnt = ATOMIC_INIT(1), \
- }
-
-
-/*
* If an inode is constantly having its pages dirtied, but then the
* updates stop dirtytime_expire_interval seconds in the past, it's
* possible for the worst case time between when an inode has its
@@ -181,8 +164,13 @@ static void finish_writeback_work(struct bdi_writeback *wb,
if (work->auto_free)
kfree(work);
- if (done && atomic_dec_and_test(&done->cnt))
- wake_up_all(&wb->bdi->wb_waitq);
+ if (done) {
+ wait_queue_head_t *waitq = done->waitq;
+
+ /* @done can't be accessed after the following dec */
+ if (atomic_dec_and_test(&done->cnt))
+ wake_up_all(waitq);
+ }
}
static void wb_queue_work(struct bdi_writeback *wb,
@@ -206,28 +194,44 @@ static void wb_queue_work(struct bdi_writeback *wb,
/**
* wb_wait_for_completion - wait for completion of bdi_writeback_works
- * @bdi: bdi work items were issued to
* @done: target wb_completion
*
* Wait for one or more work items issued to @bdi with their ->done field
- * set to @done, which should have been defined with
- * DEFINE_WB_COMPLETION_ONSTACK(). This function returns after all such
- * work items are completed. Work items which are waited upon aren't freed
+ * set to @done, which should have been initialized with
+ * DEFINE_WB_COMPLETION(). This function returns after all such work items
+ * are completed. Work items which are waited upon aren't freed
* automatically on completion.
*/
-static void wb_wait_for_completion(struct backing_dev_info *bdi,
- struct wb_completion *done)
+void wb_wait_for_completion(struct wb_completion *done)
{
atomic_dec(&done->cnt); /* put down the initial count */
- wait_event(bdi->wb_waitq, !atomic_read(&done->cnt));
+ wait_event(*done->waitq, !atomic_read(&done->cnt));
}
#ifdef CONFIG_CGROUP_WRITEBACK
-/* parameters for foreign inode detection, see wb_detach_inode() */
+/*
+ * Parameters for foreign inode detection, see wbc_detach_inode() to see
+ * how they're used.
+ *
+ * These paramters are inherently heuristical as the detection target
+ * itself is fuzzy. All we want to do is detaching an inode from the
+ * current owner if it's being written to by some other cgroups too much.
+ *
+ * The current cgroup writeback is built on the assumption that multiple
+ * cgroups writing to the same inode concurrently is very rare and a mode
+ * of operation which isn't well supported. As such, the goal is not
+ * taking too long when a different cgroup takes over an inode while
+ * avoiding too aggressive flip-flops from occasional foreign writes.
+ *
+ * We record, very roughly, 2s worth of IO time history and if more than
+ * half of that is foreign, trigger the switch. The recording is quantized
+ * to 16 slots. To avoid tiny writes from swinging the decision too much,
+ * writes smaller than 1/8 of avg size are ignored.
+ */
#define WB_FRN_TIME_SHIFT 13 /* 1s = 2^13, upto 8 secs w/ 16bit */
#define WB_FRN_TIME_AVG_SHIFT 3 /* avg = avg * 7/8 + new * 1/8 */
-#define WB_FRN_TIME_CUT_DIV 2 /* ignore rounds < avg / 2 */
+#define WB_FRN_TIME_CUT_DIV 8 /* ignore rounds < avg / 8 */
#define WB_FRN_TIME_PERIOD (2 * (1 << WB_FRN_TIME_SHIFT)) /* 2s */
#define WB_FRN_HIST_SLOTS 16 /* inode->i_wb_frn_history is 16bit */
@@ -237,6 +241,7 @@ static void wb_wait_for_completion(struct backing_dev_info *bdi,
/* if foreign slots >= 8, switch */
#define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1)
/* one round can affect upto 5 slots */
+#define WB_FRN_MAX_IN_FLIGHT 1024 /* don't queue too many concurrently */
static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
static struct workqueue_struct *isw_wq;
@@ -389,6 +394,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
if (unlikely(inode->i_state & I_FREEING))
goto skip_switch;
+ trace_inode_switch_wbs(inode, old_wb, new_wb);
+
/*
* Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points
* to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
@@ -489,18 +496,13 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
if (inode->i_state & I_WB_SWITCH)
return;
- /*
- * Avoid starting new switches while sync_inodes_sb() is in
- * progress. Otherwise, if the down_write protected issue path
- * blocks heavily, we might end up starting a large number of
- * switches which will block on the rwsem.
- */
- if (!down_read_trylock(&bdi->wb_switch_rwsem))
+ /* avoid queueing a new switch if too many are already in flight */
+ if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
return;
isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
if (!isw)
- goto out_unlock;
+ return;
/* find and pin the new wb */
rcu_read_lock();
@@ -534,15 +536,12 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
atomic_inc(&isw_nr_in_flight);
-
- goto out_unlock;
+ return;
out_free:
if (isw->new_wb)
wb_put(isw->new_wb);
kfree(isw);
-out_unlock:
- up_read(&bdi->wb_switch_rwsem);
}
/**
@@ -577,10 +576,13 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
spin_unlock(&inode->i_lock);
/*
- * A dying wb indicates that the memcg-blkcg mapping has changed
- * and a new wb is already serving the memcg. Switch immediately.
+ * A dying wb indicates that either the blkcg associated with the
+ * memcg changed or the associated memcg is dying. In the first
+ * case, a replacement wb should already be available and we should
+ * refresh the wb immediately. In the second case, trying to
+ * refresh will keep failing.
*/
- if (unlikely(wb_dying(wbc->wb)))
+ if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
inode_switch_wbs(inode, wbc->wb_id);
}
EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
@@ -681,6 +683,9 @@ void wbc_detach_inode(struct writeback_control *wbc)
if (wbc->wb_id != max_id)
history |= (1U << slots) - 1;
+ if (history)
+ trace_inode_foreign_history(inode, wbc, history);
+
/*
* Switch if the current wb isn't the consistent winner.
* If there are multiple closely competing dirtiers, the
@@ -843,7 +848,7 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
restart:
rcu_read_lock();
list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
- DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done);
+ DEFINE_WB_COMPLETION(fallback_work_done, bdi);
struct wb_writeback_work fallback_work;
struct wb_writeback_work *work;
long nr_pages;
@@ -890,7 +895,7 @@ restart:
last_wb = wb;
rcu_read_unlock();
- wb_wait_for_completion(bdi, &fallback_work_done);
+ wb_wait_for_completion(&fallback_work_done);
goto restart;
}
rcu_read_unlock();
@@ -900,6 +905,89 @@ restart:
}
/**
+ * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
+ * @bdi_id: target bdi id
+ * @memcg_id: target memcg css id
+ * @nr: number of pages to write, 0 for best-effort dirty flushing
+ * @reason: reason why some writeback work initiated
+ * @done: target wb_completion
+ *
+ * Initiate flush of the bdi_writeback identified by @bdi_id and @memcg_id
+ * with the specified parameters.
+ */
+int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr,
+ enum wb_reason reason, struct wb_completion *done)
+{
+ struct backing_dev_info *bdi;
+ struct cgroup_subsys_state *memcg_css;
+ struct bdi_writeback *wb;
+ struct wb_writeback_work *work;
+ int ret;
+
+ /* lookup bdi and memcg */
+ bdi = bdi_get_by_id(bdi_id);
+ if (!bdi)
+ return -ENOENT;
+
+ rcu_read_lock();
+ memcg_css = css_from_id(memcg_id, &memory_cgrp_subsys);
+ if (memcg_css && !css_tryget(memcg_css))
+ memcg_css = NULL;
+ rcu_read_unlock();
+ if (!memcg_css) {
+ ret = -ENOENT;
+ goto out_bdi_put;
+ }
+
+ /*
+ * And find the associated wb. If the wb isn't there already
+ * there's nothing to flush, don't create one.
+ */
+ wb = wb_get_lookup(bdi, memcg_css);
+ if (!wb) {
+ ret = -ENOENT;
+ goto out_css_put;
+ }
+
+ /*
+ * If @nr is zero, the caller is attempting to write out most of
+ * the currently dirty pages. Let's take the current dirty page
+ * count and inflate it by 25% which should be large enough to
+ * flush out most dirty pages while avoiding getting livelocked by
+ * concurrent dirtiers.
+ */
+ if (!nr) {
+ unsigned long filepages, headroom, dirty, writeback;
+
+ mem_cgroup_wb_stats(wb, &filepages, &headroom, &dirty,
+ &writeback);
+ nr = dirty * 10 / 8;
+ }
+
+ /* issue the writeback work */
+ work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN);
+ if (work) {
+ work->nr_pages = nr;
+ work->sync_mode = WB_SYNC_NONE;
+ work->range_cyclic = 1;
+ work->reason = reason;
+ work->done = done;
+ work->auto_free = 1;
+ wb_queue_work(wb, work);
+ ret = 0;
+ } else {
+ ret = -ENOMEM;
+ }
+
+ wb_put(wb);
+out_css_put:
+ css_put(memcg_css);
+out_bdi_put:
+ bdi_put(bdi);
+ return ret;
+}
+
+/**
* cgroup_writeback_umount - flush inode wb switches for umount
*
* This function is called when a super_block is about to be destroyed and
@@ -2362,7 +2450,8 @@ static void wait_sb_inodes(struct super_block *sb)
static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
enum wb_reason reason, bool skip_if_busy)
{
- DEFINE_WB_COMPLETION_ONSTACK(done);
+ struct backing_dev_info *bdi = sb->s_bdi;
+ DEFINE_WB_COMPLETION(done, bdi);
struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_NONE,
@@ -2371,14 +2460,13 @@ static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
.nr_pages = nr,
.reason = reason,
};
- struct backing_dev_info *bdi = sb->s_bdi;
if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
- wb_wait_for_completion(bdi, &done);
+ wb_wait_for_completion(&done);
}
/**
@@ -2440,7 +2528,8 @@ EXPORT_SYMBOL(try_to_writeback_inodes_sb);
*/
void sync_inodes_sb(struct super_block *sb)
{
- DEFINE_WB_COMPLETION_ONSTACK(done);
+ struct backing_dev_info *bdi = sb->s_bdi;
+ DEFINE_WB_COMPLETION(done, bdi);
struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_ALL,
@@ -2450,7 +2539,6 @@ void sync_inodes_sb(struct super_block *sb)
.reason = WB_REASON_SYNC,
.for_sync = 1,
};
- struct backing_dev_info *bdi = sb->s_bdi;
/*
* Can't skip on !bdi_has_dirty() because we should wait for !dirty
@@ -2464,7 +2552,7 @@ void sync_inodes_sb(struct super_block *sb)
/* protect against inode wb switch, see inode_switch_wbs_work_fn() */
bdi_down_write_wb_switch_rwsem(bdi);
bdi_split_work_to_wbs(bdi, &work, false);
- wb_wait_for_completion(bdi, &done);
+ wb_wait_for_completion(&done);
bdi_up_write_wb_switch_rwsem(bdi);
wait_sb_inodes(sb);
diff --git a/fs/fs_context.c b/fs/fs_context.c
index 103643c68e3f..138b5b4d621d 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -279,10 +279,8 @@ static struct fs_context *alloc_fs_context(struct file_system_type *fs_type,
fc->user_ns = get_user_ns(reference->d_sb->s_user_ns);
break;
case FS_CONTEXT_FOR_RECONFIGURE:
- /* We don't pin any namespaces as the superblock's
- * subscriptions cannot be changed at this point.
- */
atomic_inc(&reference->d_sb->s_active);
+ fc->user_ns = get_user_ns(reference->d_sb->s_user_ns);
fc->root = dget(reference);
break;
}
@@ -506,7 +504,6 @@ void put_fs_context(struct fs_context *fc)
put_net(fc->net_ns);
put_user_ns(fc->user_ns);
put_cred(fc->cred);
- kfree(fc->subtype);
put_fc_log(fc);
put_filesystem(fc->fs_type);
kfree(fc->source);
@@ -573,17 +570,6 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
return 0;
}
- if ((fc->fs_type->fs_flags & FS_HAS_SUBTYPE) &&
- strcmp(param->key, "subtype") == 0) {
- if (param->type != fs_value_is_string)
- return invalf(fc, "VFS: Legacy: Non-string subtype");
- if (fc->subtype)
- return invalf(fc, "VFS: Legacy: Multiple subtype");
- fc->subtype = param->string;
- param->string = NULL;
- return 0;
- }
-
if (ctx->param_type == LEGACY_FS_MONOLITHIC_PARAMS)
return invalf(fc, "VFS: Legacy: Can't mix monolithic and individual options");
@@ -740,8 +726,6 @@ void vfs_clean_context(struct fs_context *fc)
fc->s_fs_info = NULL;
fc->sb_flags = 0;
security_free_mnt_opts(&fc->security);
- kfree(fc->subtype);
- fc->subtype = NULL;
kfree(fc->source);
fc->source = NULL;
diff --git a/fs/fs_parser.c b/fs/fs_parser.c
index 460ea4206fa2..d1930adce68d 100644
--- a/fs/fs_parser.c
+++ b/fs/fs_parser.c
@@ -204,9 +204,23 @@ int fs_parse(struct fs_context *fc,
goto okay;
case fs_param_is_fd: {
- if (param->type != fs_value_is_file)
+ switch (param->type) {
+ case fs_value_is_string:
+ if (!result->has_value)
+ goto bad_value;
+
+ ret = kstrtouint(param->string, 0, &result->uint_32);
+ break;
+ case fs_value_is_file:
+ result->uint_32 = param->dirfd;
+ ret = 0;
+ default:
goto bad_value;
- goto okay;
+ }
+
+ if (result->uint_32 > INT_MAX)
+ goto bad_value;
+ goto maybe_okay;
}
case fs_param_is_blockdev:
diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig
index 24fc5a5c1b97..eb2a585572dc 100644
--- a/fs/fuse/Kconfig
+++ b/fs/fuse/Kconfig
@@ -27,3 +27,14 @@ config CUSE
If you want to develop or use a userspace character device
based on CUSE, answer Y or M.
+
+config VIRTIO_FS
+ tristate "Virtio Filesystem"
+ depends on FUSE_FS
+ select VIRTIO
+ help
+ The Virtio Filesystem allows guests to mount file systems from the
+ host.
+
+ If you want to share files between guests or with the host, answer Y
+ or M.
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 9485019c2a14..3e8cebfb59b7 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -5,5 +5,7 @@
obj-$(CONFIG_FUSE_FS) += fuse.o
obj-$(CONFIG_CUSE) += cuse.o
+obj-$(CONFIG_VIRTIO_FS) += virtiofs.o
fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o
+virtiofs-y += virtio_fs.o
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index bab7a0db81dd..00015d851382 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -142,11 +142,10 @@ static int cuse_open(struct inode *inode, struct file *file)
static int cuse_release(struct inode *inode, struct file *file)
{
- struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
- fuse_sync_release(fi, ff, file->f_flags);
+ fuse_sync_release(NULL, ff, file->f_flags);
fuse_conn_put(fc);
return 0;
@@ -299,6 +298,14 @@ static void cuse_gendev_release(struct device *dev)
kfree(dev);
}
+struct cuse_init_args {
+ struct fuse_args_pages ap;
+ struct cuse_init_in in;
+ struct cuse_init_out out;
+ struct page *page;
+ struct fuse_page_desc desc;
+};
+
/**
* cuse_process_init_reply - finish initializing CUSE channel
*
@@ -306,21 +313,22 @@ static void cuse_gendev_release(struct device *dev)
* required data structures for it. Please read the comment at the
* top of this file for high level overview.
*/
-static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
+static void cuse_process_init_reply(struct fuse_conn *fc,
+ struct fuse_args *args, int error)
{
+ struct cuse_init_args *ia = container_of(args, typeof(*ia), ap.args);
+ struct fuse_args_pages *ap = &ia->ap;
struct cuse_conn *cc = fc_to_cc(fc), *pos;
- struct cuse_init_out *arg = req->out.args[0].value;
- struct page *page = req->pages[0];
+ struct cuse_init_out *arg = &ia->out;
+ struct page *page = ap->pages[0];
struct cuse_devinfo devinfo = { };
struct device *dev;
struct cdev *cdev;
dev_t devt;
int rc, i;
- if (req->out.h.error ||
- arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) {
+ if (error || arg->major != FUSE_KERNEL_VERSION || arg->minor < 11)
goto err;
- }
fc->minor = arg->minor;
fc->max_read = max_t(unsigned, arg->max_read, 4096);
@@ -329,7 +337,7 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
/* parse init reply */
cc->unrestricted_ioctl = arg->flags & CUSE_UNRESTRICTED_IOCTL;
- rc = cuse_parse_devinfo(page_address(page), req->out.args[1].size,
+ rc = cuse_parse_devinfo(page_address(page), ap->args.out_args[1].size,
&devinfo);
if (rc)
goto err;
@@ -396,7 +404,7 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
dev_set_uevent_suppress(dev, 0);
kobject_uevent(&dev->kobj, KOBJ_ADD);
out:
- kfree(arg);
+ kfree(ia);
__free_page(page);
return;
@@ -415,55 +423,49 @@ err:
static int cuse_send_init(struct cuse_conn *cc)
{
int rc;
- struct fuse_req *req;
struct page *page;
struct fuse_conn *fc = &cc->fc;
- struct cuse_init_in *arg;
- void *outarg;
+ struct cuse_init_args *ia;
+ struct fuse_args_pages *ap;
BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE);
- req = fuse_get_req_for_background(fc, 1);
- if (IS_ERR(req)) {
- rc = PTR_ERR(req);
- goto err;
- }
-
rc = -ENOMEM;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
- goto err_put_req;
+ goto err;
- outarg = kzalloc(sizeof(struct cuse_init_out), GFP_KERNEL);
- if (!outarg)
+ ia = kzalloc(sizeof(*ia), GFP_KERNEL);
+ if (!ia)
goto err_free_page;
- arg = &req->misc.cuse_init_in;
- arg->major = FUSE_KERNEL_VERSION;
- arg->minor = FUSE_KERNEL_MINOR_VERSION;
- arg->flags |= CUSE_UNRESTRICTED_IOCTL;
- req->in.h.opcode = CUSE_INIT;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(struct cuse_init_in);
- req->in.args[0].value = arg;
- req->out.numargs = 2;
- req->out.args[0].size = sizeof(struct cuse_init_out);
- req->out.args[0].value = outarg;
- req->out.args[1].size = CUSE_INIT_INFO_MAX;
- req->out.argvar = 1;
- req->out.argpages = 1;
- req->pages[0] = page;
- req->page_descs[0].length = req->out.args[1].size;
- req->num_pages = 1;
- req->end = cuse_process_init_reply;
- fuse_request_send_background(fc, req);
-
- return 0;
-
+ ap = &ia->ap;
+ ia->in.major = FUSE_KERNEL_VERSION;
+ ia->in.minor = FUSE_KERNEL_MINOR_VERSION;
+ ia->in.flags |= CUSE_UNRESTRICTED_IOCTL;
+ ap->args.opcode = CUSE_INIT;
+ ap->args.in_numargs = 1;
+ ap->args.in_args[0].size = sizeof(ia->in);
+ ap->args.in_args[0].value = &ia->in;
+ ap->args.out_numargs = 2;
+ ap->args.out_args[0].size = sizeof(ia->out);
+ ap->args.out_args[0].value = &ia->out;
+ ap->args.out_args[1].size = CUSE_INIT_INFO_MAX;
+ ap->args.out_argvar = 1;
+ ap->args.out_pages = 1;
+ ap->num_pages = 1;
+ ap->pages = &ia->page;
+ ap->descs = &ia->desc;
+ ia->page = page;
+ ia->desc.length = ap->args.out_args[1].size;
+ ap->args.end = cuse_process_init_reply;
+
+ rc = fuse_simple_background(fc, &ap->args, GFP_KERNEL);
+ if (rc) {
+ kfree(ia);
err_free_page:
- __free_page(page);
-err_put_req:
- fuse_put_request(fc, req);
+ __free_page(page);
+ }
err:
return rc;
}
@@ -504,9 +506,9 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
* Limit the cuse channel to requests that can
* be represented in file->f_cred->user_ns.
*/
- fuse_conn_init(&cc->fc, file->f_cred->user_ns);
+ fuse_conn_init(&cc->fc, file->f_cred->user_ns, &fuse_dev_fiq_ops, NULL);
- fud = fuse_dev_alloc(&cc->fc);
+ fud = fuse_dev_alloc_install(&cc->fc);
if (!fud) {
kfree(cc);
return -ENOMEM;
@@ -519,6 +521,7 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
rc = cuse_send_init(cc);
if (rc) {
fuse_dev_free(fud);
+ fuse_conn_put(&cc->fc);
return rc;
}
file->private_data = fud;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ea8237513dfa..d4e6691d2d92 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -40,107 +40,30 @@ static struct fuse_dev *fuse_get_dev(struct file *file)
return READ_ONCE(file->private_data);
}
-static void fuse_request_init(struct fuse_req *req, struct page **pages,
- struct fuse_page_desc *page_descs,
- unsigned npages)
+static void fuse_request_init(struct fuse_req *req)
{
INIT_LIST_HEAD(&req->list);
INIT_LIST_HEAD(&req->intr_entry);
init_waitqueue_head(&req->waitq);
refcount_set(&req->count, 1);
- req->pages = pages;
- req->page_descs = page_descs;
- req->max_pages = npages;
__set_bit(FR_PENDING, &req->flags);
}
-static struct page **fuse_req_pages_alloc(unsigned int npages, gfp_t flags,
- struct fuse_page_desc **desc)
-{
- struct page **pages;
-
- pages = kzalloc(npages * (sizeof(struct page *) +
- sizeof(struct fuse_page_desc)), flags);
- *desc = (void *) pages + npages * sizeof(struct page *);
-
- return pages;
-}
-
-static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
+static struct fuse_req *fuse_request_alloc(gfp_t flags)
{
struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
- if (req) {
- struct page **pages = NULL;
- struct fuse_page_desc *page_descs = NULL;
-
- WARN_ON(npages > FUSE_MAX_MAX_PAGES);
- if (npages > FUSE_REQ_INLINE_PAGES) {
- pages = fuse_req_pages_alloc(npages, flags,
- &page_descs);
- if (!pages) {
- kmem_cache_free(fuse_req_cachep, req);
- return NULL;
- }
- } else if (npages) {
- pages = req->inline_pages;
- page_descs = req->inline_page_descs;
- }
+ if (req)
+ fuse_request_init(req);
- fuse_request_init(req, pages, page_descs, npages);
- }
return req;
}
-struct fuse_req *fuse_request_alloc(unsigned npages)
+static void fuse_request_free(struct fuse_req *req)
{
- return __fuse_request_alloc(npages, GFP_KERNEL);
-}
-EXPORT_SYMBOL_GPL(fuse_request_alloc);
-
-struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
-{
- return __fuse_request_alloc(npages, GFP_NOFS);
-}
-
-static void fuse_req_pages_free(struct fuse_req *req)
-{
- if (req->pages != req->inline_pages)
- kfree(req->pages);
-}
-
-bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req,
- gfp_t flags)
-{
- struct page **pages;
- struct fuse_page_desc *page_descs;
- unsigned int npages = min_t(unsigned int,
- max_t(unsigned int, req->max_pages * 2,
- FUSE_DEFAULT_MAX_PAGES_PER_REQ),
- fc->max_pages);
- WARN_ON(npages <= req->max_pages);
-
- pages = fuse_req_pages_alloc(npages, flags, &page_descs);
- if (!pages)
- return false;
-
- memcpy(pages, req->pages, sizeof(struct page *) * req->max_pages);
- memcpy(page_descs, req->page_descs,
- sizeof(struct fuse_page_desc) * req->max_pages);
- fuse_req_pages_free(req);
- req->pages = pages;
- req->page_descs = page_descs;
- req->max_pages = npages;
-
- return true;
-}
-
-void fuse_request_free(struct fuse_req *req)
-{
- fuse_req_pages_free(req);
kmem_cache_free(fuse_req_cachep, req);
}
-void __fuse_get_request(struct fuse_req *req)
+static void __fuse_get_request(struct fuse_req *req)
{
refcount_inc(&req->count);
}
@@ -177,8 +100,9 @@ static void fuse_drop_waiting(struct fuse_conn *fc)
}
}
-static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
- bool for_background)
+static void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req);
+
+static struct fuse_req *fuse_get_req(struct fuse_conn *fc, bool for_background)
{
struct fuse_req *req;
int err;
@@ -201,7 +125,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
if (fc->conn_error)
goto out;
- req = fuse_request_alloc(npages);
+ req = fuse_request_alloc(GFP_KERNEL);
err = -ENOMEM;
if (!req) {
if (for_background)
@@ -229,101 +153,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
return ERR_PTR(err);
}
-struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
-{
- return __fuse_get_req(fc, npages, false);
-}
-EXPORT_SYMBOL_GPL(fuse_get_req);
-
-struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
- unsigned npages)
-{
- return __fuse_get_req(fc, npages, true);
-}
-EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
-
-/*
- * Return request in fuse_file->reserved_req. However that may
- * currently be in use. If that is the case, wait for it to become
- * available.
- */
-static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
- struct file *file)
-{
- struct fuse_req *req = NULL;
- struct fuse_inode *fi = get_fuse_inode(file_inode(file));
- struct fuse_file *ff = file->private_data;
-
- do {
- wait_event(fc->reserved_req_waitq, ff->reserved_req);
- spin_lock(&fi->lock);
- if (ff->reserved_req) {
- req = ff->reserved_req;
- ff->reserved_req = NULL;
- req->stolen_file = get_file(file);
- }
- spin_unlock(&fi->lock);
- } while (!req);
-
- return req;
-}
-
-/*
- * Put stolen request back into fuse_file->reserved_req
- */
-static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
-{
- struct file *file = req->stolen_file;
- struct fuse_inode *fi = get_fuse_inode(file_inode(file));
- struct fuse_file *ff = file->private_data;
-
- WARN_ON(req->max_pages);
- spin_lock(&fi->lock);
- memset(req, 0, sizeof(*req));
- fuse_request_init(req, NULL, NULL, 0);
- BUG_ON(ff->reserved_req);
- ff->reserved_req = req;
- wake_up_all(&fc->reserved_req_waitq);
- spin_unlock(&fi->lock);
- fput(file);
-}
-
-/*
- * Gets a requests for a file operation, always succeeds
- *
- * This is used for sending the FLUSH request, which must get to
- * userspace, due to POSIX locks which may need to be unlocked.
- *
- * If allocation fails due to OOM, use the reserved request in
- * fuse_file.
- *
- * This is very unlikely to deadlock accidentally, since the
- * filesystem should not have it's own file open. If deadlock is
- * intentional, it can still be broken by "aborting" the filesystem.
- */
-struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
- struct file *file)
-{
- struct fuse_req *req;
-
- atomic_inc(&fc->num_waiting);
- wait_event(fc->blocked_waitq, fc->initialized);
- /* Matches smp_wmb() in fuse_set_initialized() */
- smp_rmb();
- req = fuse_request_alloc(0);
- if (!req)
- req = get_reserved_req(fc, file);
-
- req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
- req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
- req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
-
- __set_bit(FR_WAITING, &req->flags);
- __clear_bit(FR_BACKGROUND, &req->flags);
- return req;
-}
-
-void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
{
if (refcount_dec_and_test(&req->count)) {
if (test_bit(FR_BACKGROUND, &req->flags)) {
@@ -342,15 +172,11 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
fuse_drop_waiting(fc);
}
- if (req->stolen_file)
- put_reserved_req(fc, req);
- else
- fuse_request_free(req);
+ fuse_request_free(req);
}
}
-EXPORT_SYMBOL_GPL(fuse_put_request);
-static unsigned len_args(unsigned numargs, struct fuse_arg *args)
+unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
{
unsigned nbytes = 0;
unsigned i;
@@ -360,25 +186,47 @@ static unsigned len_args(unsigned numargs, struct fuse_arg *args)
return nbytes;
}
+EXPORT_SYMBOL_GPL(fuse_len_args);
-static u64 fuse_get_unique(struct fuse_iqueue *fiq)
+u64 fuse_get_unique(struct fuse_iqueue *fiq)
{
fiq->reqctr += FUSE_REQ_ID_STEP;
return fiq->reqctr;
}
+EXPORT_SYMBOL_GPL(fuse_get_unique);
static unsigned int fuse_req_hash(u64 unique)
{
return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
}
-static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
+/**
+ * A new request is available, wake fiq->waitq
+ */
+static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq)
+__releases(fiq->lock)
+{
+ wake_up(&fiq->waitq);
+ kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ spin_unlock(&fiq->lock);
+}
+
+const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
+ .wake_forget_and_unlock = fuse_dev_wake_and_unlock,
+ .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
+ .wake_pending_and_unlock = fuse_dev_wake_and_unlock,
+};
+EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
+
+static void queue_request_and_unlock(struct fuse_iqueue *fiq,
+ struct fuse_req *req)
+__releases(fiq->lock)
{
req->in.h.len = sizeof(struct fuse_in_header) +
- len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
+ fuse_len_args(req->args->in_numargs,
+ (struct fuse_arg *) req->args->in_args);
list_add_tail(&req->list, &fiq->pending);
- wake_up_locked(&fiq->waitq);
- kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ fiq->ops->wake_pending_and_unlock(fiq);
}
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
@@ -389,16 +237,15 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
forget->forget_one.nodeid = nodeid;
forget->forget_one.nlookup = nlookup;
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
if (fiq->connected) {
fiq->forget_list_tail->next = forget;
fiq->forget_list_tail = forget;
- wake_up_locked(&fiq->waitq);
- kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ fiq->ops->wake_forget_and_unlock(fiq);
} else {
kfree(forget);
+ spin_unlock(&fiq->lock);
}
- spin_unlock(&fiq->waitq.lock);
}
static void flush_bg_queue(struct fuse_conn *fc)
@@ -412,10 +259,9 @@ static void flush_bg_queue(struct fuse_conn *fc)
req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
list_del(&req->list);
fc->active_background++;
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
req->in.h.unique = fuse_get_unique(fiq);
- queue_request(fiq, req);
- spin_unlock(&fiq->waitq.lock);
+ queue_request_and_unlock(fiq, req);
}
}
@@ -427,21 +273,24 @@ static void flush_bg_queue(struct fuse_conn *fc)
* the 'end' callback is called if given, else the reference to the
* request is released
*/
-static void request_end(struct fuse_conn *fc, struct fuse_req *req)
+void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
{
struct fuse_iqueue *fiq = &fc->iq;
+ bool async;
if (test_and_set_bit(FR_FINISHED, &req->flags))
goto put_request;
+
+ async = req->args->end;
/*
* test_and_set_bit() implies smp_mb() between bit
* changing and below intr_entry check. Pairs with
* smp_mb() from queue_interrupt().
*/
if (!list_empty(&req->intr_entry)) {
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
list_del_init(&req->intr_entry);
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
}
WARN_ON(test_bit(FR_PENDING, &req->flags));
WARN_ON(test_bit(FR_SENT, &req->flags));
@@ -475,18 +324,19 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
wake_up(&req->waitq);
}
- if (req->end)
- req->end(fc, req);
+ if (async)
+ req->args->end(fc, req->args, req->out.h.error);
put_request:
fuse_put_request(fc, req);
}
+EXPORT_SYMBOL_GPL(fuse_request_end);
static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
/* Check for we've sent request to interrupt this req */
if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return -EINVAL;
}
@@ -499,13 +349,13 @@ static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
smp_mb();
if (test_bit(FR_FINISHED, &req->flags)) {
list_del_init(&req->intr_entry);
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return 0;
}
- wake_up_locked(&fiq->waitq);
- kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ fiq->ops->wake_interrupt_and_unlock(fiq);
+ } else {
+ spin_unlock(&fiq->lock);
}
- spin_unlock(&fiq->waitq.lock);
return 0;
}
@@ -535,16 +385,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
if (!err)
return;
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
/* Request is not yet in userspace, bail out */
if (test_bit(FR_PENDING, &req->flags)) {
list_del(&req->list);
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
__fuse_put_request(req);
req->out.h.error = -EINTR;
return;
}
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
}
/*
@@ -559,101 +409,110 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
struct fuse_iqueue *fiq = &fc->iq;
BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
if (!fiq->connected) {
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
req->out.h.error = -ENOTCONN;
} else {
req->in.h.unique = fuse_get_unique(fiq);
- queue_request(fiq, req);
/* acquire extra reference, since request is still needed
- after request_end() */
+ after fuse_request_end() */
__fuse_get_request(req);
- spin_unlock(&fiq->waitq.lock);
+ queue_request_and_unlock(fiq, req);
request_wait_answer(fc, req);
- /* Pairs with smp_wmb() in request_end() */
+ /* Pairs with smp_wmb() in fuse_request_end() */
smp_rmb();
}
}
-void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
-{
- __set_bit(FR_ISREPLY, &req->flags);
- if (!test_bit(FR_WAITING, &req->flags)) {
- __set_bit(FR_WAITING, &req->flags);
- atomic_inc(&fc->num_waiting);
- }
- __fuse_request_send(fc, req);
-}
-EXPORT_SYMBOL_GPL(fuse_request_send);
-
static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
{
- if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
- args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
+ if (fc->minor < 4 && args->opcode == FUSE_STATFS)
+ args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE;
if (fc->minor < 9) {
- switch (args->in.h.opcode) {
+ switch (args->opcode) {
case FUSE_LOOKUP:
case FUSE_CREATE:
case FUSE_MKNOD:
case FUSE_MKDIR:
case FUSE_SYMLINK:
case FUSE_LINK:
- args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+ args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
break;
case FUSE_GETATTR:
case FUSE_SETATTR:
- args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
+ args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
break;
}
}
if (fc->minor < 12) {
- switch (args->in.h.opcode) {
+ switch (args->opcode) {
case FUSE_CREATE:
- args->in.args[0].size = sizeof(struct fuse_open_in);
+ args->in_args[0].size = sizeof(struct fuse_open_in);
break;
case FUSE_MKNOD:
- args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
+ args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
break;
}
}
}
+static void fuse_force_creds(struct fuse_conn *fc, struct fuse_req *req)
+{
+ req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
+ req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
+ req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
+}
+
+static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
+{
+ req->in.h.opcode = args->opcode;
+ req->in.h.nodeid = args->nodeid;
+ req->args = args;
+}
+
ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
{
struct fuse_req *req;
ssize_t ret;
- req = fuse_get_req(fc, 0);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ if (args->force) {
+ atomic_inc(&fc->num_waiting);
+ req = fuse_request_alloc(GFP_KERNEL | __GFP_NOFAIL);
+
+ if (!args->nocreds)
+ fuse_force_creds(fc, req);
+
+ __set_bit(FR_WAITING, &req->flags);
+ __set_bit(FR_FORCE, &req->flags);
+ } else {
+ WARN_ON(args->nocreds);
+ req = fuse_get_req(fc, false);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ }
/* Needs to be done after fuse_get_req() so that fc->minor is valid */
fuse_adjust_compat(fc, args);
+ fuse_args_to_req(req, args);
- req->in.h.opcode = args->in.h.opcode;
- req->in.h.nodeid = args->in.h.nodeid;
- req->in.numargs = args->in.numargs;
- memcpy(req->in.args, args->in.args,
- args->in.numargs * sizeof(struct fuse_in_arg));
- req->out.argvar = args->out.argvar;
- req->out.numargs = args->out.numargs;
- memcpy(req->out.args, args->out.args,
- args->out.numargs * sizeof(struct fuse_arg));
- fuse_request_send(fc, req);
+ if (!args->noreply)
+ __set_bit(FR_ISREPLY, &req->flags);
+ __fuse_request_send(fc, req);
ret = req->out.h.error;
- if (!ret && args->out.argvar) {
- BUG_ON(args->out.numargs != 1);
- ret = req->out.args[0].size;
+ if (!ret && args->out_argvar) {
+ BUG_ON(args->out_numargs == 0);
+ ret = args->out_args[args->out_numargs - 1].size;
}
fuse_put_request(fc, req);
return ret;
}
-bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req)
+static bool fuse_request_queue_background(struct fuse_conn *fc,
+ struct fuse_req *req)
{
bool queued = false;
@@ -681,56 +540,63 @@ bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req)
return queued;
}
-void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
+int fuse_simple_background(struct fuse_conn *fc, struct fuse_args *args,
+ gfp_t gfp_flags)
{
- WARN_ON(!req->end);
+ struct fuse_req *req;
+
+ if (args->force) {
+ WARN_ON(!args->nocreds);
+ req = fuse_request_alloc(gfp_flags);
+ if (!req)
+ return -ENOMEM;
+ __set_bit(FR_BACKGROUND, &req->flags);
+ } else {
+ WARN_ON(args->nocreds);
+ req = fuse_get_req(fc, true);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ }
+
+ fuse_args_to_req(req, args);
+
if (!fuse_request_queue_background(fc, req)) {
- req->out.h.error = -ENOTCONN;
- req->end(fc, req);
fuse_put_request(fc, req);
+ return -ENOTCONN;
}
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(fuse_request_send_background);
+EXPORT_SYMBOL_GPL(fuse_simple_background);
-static int fuse_request_send_notify_reply(struct fuse_conn *fc,
- struct fuse_req *req, u64 unique)
+static int fuse_simple_notify_reply(struct fuse_conn *fc,
+ struct fuse_args *args, u64 unique)
{
- int err = -ENODEV;
+ struct fuse_req *req;
struct fuse_iqueue *fiq = &fc->iq;
+ int err = 0;
+
+ req = fuse_get_req(fc, false);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
__clear_bit(FR_ISREPLY, &req->flags);
req->in.h.unique = unique;
- spin_lock(&fiq->waitq.lock);
+
+ fuse_args_to_req(req, args);
+
+ spin_lock(&fiq->lock);
if (fiq->connected) {
- queue_request(fiq, req);
- err = 0;
+ queue_request_and_unlock(fiq, req);
+ } else {
+ err = -ENODEV;
+ spin_unlock(&fiq->lock);
+ fuse_put_request(fc, req);
}
- spin_unlock(&fiq->waitq.lock);
return err;
}
-void fuse_force_forget(struct file *file, u64 nodeid)
-{
- struct inode *inode = file_inode(file);
- struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
- struct fuse_forget_in inarg;
-
- memset(&inarg, 0, sizeof(inarg));
- inarg.nlookup = 1;
- req = fuse_get_req_nofail_nopages(fc, file);
- req->in.h.opcode = FUSE_FORGET;
- req->in.h.nodeid = nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- __clear_bit(FR_ISREPLY, &req->flags);
- __fuse_request_send(fc, req);
- /* ignore errors */
- fuse_put_request(fc, req);
-}
-
/*
* Lock the request. Up to the next unlock_request() there mustn't be
* anything that could cause a page-fault. If the request was already
@@ -839,7 +705,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
cs->pipebufs++;
cs->nr_segs--;
} else {
- if (cs->nr_segs == cs->pipe->buffers)
+ if (cs->nr_segs >= cs->pipe->max_usage)
return -EIO;
page = alloc_page(GFP_HIGHUSER);
@@ -1015,7 +881,7 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
struct pipe_buffer *buf;
int err;
- if (cs->nr_segs == cs->pipe->buffers)
+ if (cs->nr_segs >= cs->pipe->max_usage)
return -EIO;
err = unlock_request(cs->req);
@@ -1084,14 +950,15 @@ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
{
unsigned i;
struct fuse_req *req = cs->req;
+ struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
- for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
+
+ for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) {
int err;
- unsigned offset = req->page_descs[i].offset;
- unsigned count = min(nbytes, req->page_descs[i].length);
+ unsigned int offset = ap->descs[i].offset;
+ unsigned int count = min(nbytes, ap->descs[i].length);
- err = fuse_copy_page(cs, &req->pages[i], offset, count,
- zeroing);
+ err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing);
if (err)
return err;
@@ -1149,12 +1016,12 @@ static int request_pending(struct fuse_iqueue *fiq)
* Unlike other requests this is assembled on demand, without a need
* to allocate a separate fuse_req structure.
*
- * Called with fiq->waitq.lock held, releases it
+ * Called with fiq->lock held, releases it
*/
static int fuse_read_interrupt(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
size_t nbytes, struct fuse_req *req)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
{
struct fuse_in_header ih;
struct fuse_interrupt_in arg;
@@ -1169,7 +1036,7 @@ __releases(fiq->waitq.lock)
ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
arg.unique = req->in.h.unique;
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
if (nbytes < reqsize)
return -EINVAL;
@@ -1181,9 +1048,9 @@ __releases(fiq->waitq.lock)
return err ? err : reqsize;
}
-static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
- unsigned max,
- unsigned *countp)
+struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
+ unsigned int max,
+ unsigned int *countp)
{
struct fuse_forget_link *head = fiq->forget_list_head.next;
struct fuse_forget_link **newhead = &head;
@@ -1202,14 +1069,15 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
return head;
}
+EXPORT_SYMBOL(fuse_dequeue_forget);
static int fuse_read_single_forget(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
{
int err;
- struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
+ struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL);
struct fuse_forget_in arg = {
.nlookup = forget->forget_one.nlookup,
};
@@ -1220,7 +1088,7 @@ __releases(fiq->waitq.lock)
.len = sizeof(ih) + sizeof(arg),
};
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
kfree(forget);
if (nbytes < ih.len)
return -EINVAL;
@@ -1238,7 +1106,7 @@ __releases(fiq->waitq.lock)
static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs, size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
{
int err;
unsigned max_forgets;
@@ -1252,13 +1120,13 @@ __releases(fiq->waitq.lock)
};
if (nbytes < ih.len) {
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return -EINVAL;
}
max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
- head = dequeue_forget(fiq, max_forgets, &count);
- spin_unlock(&fiq->waitq.lock);
+ head = fuse_dequeue_forget(fiq, max_forgets, &count);
+ spin_unlock(&fiq->lock);
arg.count = count;
ih.len += count * sizeof(struct fuse_forget_one);
@@ -1288,7 +1156,7 @@ __releases(fiq->waitq.lock)
static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
{
if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
return fuse_read_single_forget(fiq, cs, nbytes);
@@ -1302,7 +1170,7 @@ __releases(fiq->waitq.lock)
* the pending list and copies request data to userspace buffer. If
* no reply is needed (FORGET) or request has been aborted or there
* was an error during the copying then it's finished by calling
- * request_end(). Otherwise add it to the processing list, and set
+ * fuse_request_end(). Otherwise add it to the processing list, and set
* the 'sent' flag.
*/
static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
@@ -1313,21 +1181,42 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
struct fuse_iqueue *fiq = &fc->iq;
struct fuse_pqueue *fpq = &fud->pq;
struct fuse_req *req;
- struct fuse_in *in;
+ struct fuse_args *args;
unsigned reqsize;
unsigned int hash;
+ /*
+ * Require sane minimum read buffer - that has capacity for fixed part
+ * of any request header + negotiated max_write room for data.
+ *
+ * Historically libfuse reserves 4K for fixed header room, but e.g.
+ * GlusterFS reserves only 80 bytes
+ *
+ * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)`
+ *
+ * which is the absolute minimum any sane filesystem should be using
+ * for header room.
+ */
+ if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER,
+ sizeof(struct fuse_in_header) +
+ sizeof(struct fuse_write_in) +
+ fc->max_write))
+ return -EINVAL;
+
restart:
- spin_lock(&fiq->waitq.lock);
- err = -EAGAIN;
- if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
- !request_pending(fiq))
- goto err_unlock;
+ for (;;) {
+ spin_lock(&fiq->lock);
+ if (!fiq->connected || request_pending(fiq))
+ break;
+ spin_unlock(&fiq->lock);
- err = wait_event_interruptible_exclusive_locked(fiq->waitq,
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ err = wait_event_interruptible_exclusive(fiq->waitq,
!fiq->connected || request_pending(fiq));
- if (err)
- goto err_unlock;
+ if (err)
+ return err;
+ }
if (!fiq->connected) {
err = fc->aborted ? -ECONNABORTED : -ENODEV;
@@ -1351,28 +1240,28 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
req = list_entry(fiq->pending.next, struct fuse_req, list);
clear_bit(FR_PENDING, &req->flags);
list_del_init(&req->list);
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
- in = &req->in;
- reqsize = in->h.len;
+ args = req->args;
+ reqsize = req->in.h.len;
/* If request is too large, reply with an error and restart the read */
if (nbytes < reqsize) {
req->out.h.error = -EIO;
/* SETXATTR is special, since it may contain too large data */
- if (in->h.opcode == FUSE_SETXATTR)
+ if (args->opcode == FUSE_SETXATTR)
req->out.h.error = -E2BIG;
- request_end(fc, req);
+ fuse_request_end(fc, req);
goto restart;
}
spin_lock(&fpq->lock);
list_add(&req->list, &fpq->io);
spin_unlock(&fpq->lock);
cs->req = req;
- err = fuse_copy_one(cs, &in->h, sizeof(in->h));
+ err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
if (!err)
- err = fuse_copy_args(cs, in->numargs, in->argpages,
- (struct fuse_arg *) in->args, 0);
+ err = fuse_copy_args(cs, args->in_numargs, args->in_pages,
+ (struct fuse_arg *) args->in_args, 0);
fuse_copy_finish(cs);
spin_lock(&fpq->lock);
clear_bit(FR_LOCKED, &req->flags);
@@ -1405,11 +1294,11 @@ out_end:
if (!test_bit(FR_PRIVATE, &req->flags))
list_del_init(&req->list);
spin_unlock(&fpq->lock);
- request_end(fc, req);
+ fuse_request_end(fc, req);
return err;
err_unlock:
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return err;
}
@@ -1454,7 +1343,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
if (!fud)
return -EPERM;
- bufs = kvmalloc_array(pipe->buffers, sizeof(struct pipe_buffer),
+ bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer),
GFP_KERNEL);
if (!bufs)
return -ENOMEM;
@@ -1466,7 +1355,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
if (ret < 0)
goto out;
- if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
+ if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) {
ret = -EIO;
goto out;
}
@@ -1728,9 +1617,19 @@ out_finish:
return err;
}
-static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
+struct fuse_retrieve_args {
+ struct fuse_args_pages ap;
+ struct fuse_notify_retrieve_in inarg;
+};
+
+static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_args *args,
+ int error)
{
- release_pages(req->pages, req->num_pages);
+ struct fuse_retrieve_args *ra =
+ container_of(args, typeof(*ra), ap.args);
+
+ release_pages(ra->ap.pages, ra->ap.num_pages);
+ kfree(ra);
}
static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
@@ -1738,13 +1637,16 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
{
int err;
struct address_space *mapping = inode->i_mapping;
- struct fuse_req *req;
pgoff_t index;
loff_t file_size;
unsigned int num;
unsigned int offset;
size_t total_len = 0;
unsigned int num_pages;
+ struct fuse_retrieve_args *ra;
+ size_t args_size = sizeof(*ra);
+ struct fuse_args_pages *ap;
+ struct fuse_args *args;
offset = outarg->offset & ~PAGE_MASK;
file_size = i_size_read(inode);
@@ -1758,19 +1660,26 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
num_pages = min(num_pages, fc->max_pages);
- req = fuse_get_req(fc, num_pages);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0]));
+
+ ra = kzalloc(args_size, GFP_KERNEL);
+ if (!ra)
+ return -ENOMEM;
- req->in.h.opcode = FUSE_NOTIFY_REPLY;
- req->in.h.nodeid = outarg->nodeid;
- req->in.numargs = 2;
- req->in.argpages = 1;
- req->end = fuse_retrieve_end;
+ ap = &ra->ap;
+ ap->pages = (void *) (ra + 1);
+ ap->descs = (void *) (ap->pages + num_pages);
+
+ args = &ap->args;
+ args->nodeid = outarg->nodeid;
+ args->opcode = FUSE_NOTIFY_REPLY;
+ args->in_numargs = 2;
+ args->in_pages = true;
+ args->end = fuse_retrieve_end;
index = outarg->offset >> PAGE_SHIFT;
- while (num && req->num_pages < num_pages) {
+ while (num && ap->num_pages < num_pages) {
struct page *page;
unsigned int this_num;
@@ -1779,27 +1688,25 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
break;
this_num = min_t(unsigned, num, PAGE_SIZE - offset);
- req->pages[req->num_pages] = page;
- req->page_descs[req->num_pages].offset = offset;
- req->page_descs[req->num_pages].length = this_num;
- req->num_pages++;
+ ap->pages[ap->num_pages] = page;
+ ap->descs[ap->num_pages].offset = offset;
+ ap->descs[ap->num_pages].length = this_num;
+ ap->num_pages++;
offset = 0;
num -= this_num;
total_len += this_num;
index++;
}
- req->misc.retrieve_in.offset = outarg->offset;
- req->misc.retrieve_in.size = total_len;
- req->in.args[0].size = sizeof(req->misc.retrieve_in);
- req->in.args[0].value = &req->misc.retrieve_in;
- req->in.args[1].size = total_len;
+ ra->inarg.offset = outarg->offset;
+ ra->inarg.size = total_len;
+ args->in_args[0].size = sizeof(ra->inarg);
+ args->in_args[0].value = &ra->inarg;
+ args->in_args[1].size = total_len;
- err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
- if (err) {
- fuse_retrieve_end(fc, req);
- fuse_put_request(fc, req);
- }
+ err = fuse_simple_notify_reply(fc, args, outarg->notify_unique);
+ if (err)
+ fuse_retrieve_end(fc, args, err);
return err;
}
@@ -1885,27 +1792,25 @@ static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
return NULL;
}
-static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
+static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
unsigned nbytes)
{
unsigned reqsize = sizeof(struct fuse_out_header);
- if (out->h.error)
- return nbytes != reqsize ? -EINVAL : 0;
+ reqsize += fuse_len_args(args->out_numargs, args->out_args);
- reqsize += len_args(out->numargs, out->args);
-
- if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
+ if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar))
return -EINVAL;
else if (reqsize > nbytes) {
- struct fuse_arg *lastarg = &out->args[out->numargs-1];
+ struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1];
unsigned diffsize = reqsize - nbytes;
+
if (diffsize > lastarg->size)
return -EINVAL;
lastarg->size -= diffsize;
}
- return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
- out->page_zeroing);
+ return fuse_copy_args(cs, args->out_numargs, args->out_pages,
+ args->out_args, args->page_zeroing);
}
/*
@@ -1913,7 +1818,7 @@ static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
* the write buffer. The request is then searched on the processing
* list by the unique ID found in the header. If found, then remove
* it from the list and copy the rest of the buffer to the request.
- * The request is finished by calling request_end()
+ * The request is finished by calling fuse_request_end().
*/
static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
struct fuse_copy_state *cs, size_t nbytes)
@@ -1984,10 +1889,13 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
set_bit(FR_LOCKED, &req->flags);
spin_unlock(&fpq->lock);
cs->req = req;
- if (!req->out.page_replace)
+ if (!req->args->page_replace)
cs->move_pages = 0;
- err = copy_out_args(cs, &req->out, nbytes);
+ if (oh.error)
+ err = nbytes != sizeof(oh) ? -EINVAL : 0;
+ else
+ err = copy_out_args(cs, req->args, nbytes);
fuse_copy_finish(cs);
spin_lock(&fpq->lock);
@@ -2000,7 +1908,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
list_del_init(&req->list);
spin_unlock(&fpq->lock);
- request_end(fc, req);
+ fuse_request_end(fc, req);
out:
return err ? err : nbytes;
@@ -2029,6 +1937,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct file *out, loff_t *ppos,
size_t len, unsigned int flags)
{
+ unsigned int head, tail, mask, count;
unsigned nbuf;
unsigned idx;
struct pipe_buffer *bufs;
@@ -2043,8 +1952,12 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
pipe_lock(pipe);
- bufs = kvmalloc_array(pipe->nrbufs, sizeof(struct pipe_buffer),
- GFP_KERNEL);
+ head = pipe->head;
+ tail = pipe->tail;
+ mask = pipe->ring_size - 1;
+ count = head - tail;
+
+ bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL);
if (!bufs) {
pipe_unlock(pipe);
return -ENOMEM;
@@ -2052,8 +1965,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
nbuf = 0;
rem = 0;
- for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
- rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
+ for (idx = tail; idx < head && rem < len; idx++)
+ rem += pipe->bufs[idx & mask].len;
ret = -EINVAL;
if (rem < len)
@@ -2064,16 +1977,16 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct pipe_buffer *ibuf;
struct pipe_buffer *obuf;
- BUG_ON(nbuf >= pipe->buffers);
- BUG_ON(!pipe->nrbufs);
- ibuf = &pipe->bufs[pipe->curbuf];
+ BUG_ON(nbuf >= pipe->ring_size);
+ BUG_ON(tail == head);
+ ibuf = &pipe->bufs[tail & mask];
obuf = &bufs[nbuf];
if (rem >= ibuf->len) {
*obuf = *ibuf;
ibuf->ops = NULL;
- pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
- pipe->nrbufs--;
+ tail++;
+ pipe->tail = tail;
} else {
if (!pipe_buf_get(pipe, ibuf))
goto out_free;
@@ -2121,12 +2034,12 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
fiq = &fud->fc->iq;
poll_wait(file, &fiq->waitq, wait);
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
if (!fiq->connected)
mask = EPOLLERR;
else if (request_pending(fiq))
mask |= EPOLLIN | EPOLLRDNORM;
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return mask;
}
@@ -2140,7 +2053,7 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
req->out.h.error = -ECONNABORTED;
clear_bit(FR_SENT, &req->flags);
list_del_init(&req->list);
- request_end(fc, req);
+ fuse_request_end(fc, req);
}
}
@@ -2221,15 +2134,15 @@ void fuse_abort_conn(struct fuse_conn *fc)
flush_bg_queue(fc);
spin_unlock(&fc->bg_lock);
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
fiq->connected = 0;
list_for_each_entry(req, &fiq->pending, list)
clear_bit(FR_PENDING, &req->flags);
list_splice_tail_init(&fiq->pending, &to_end);
while (forget_pending(fiq))
- kfree(dequeue_forget(fiq, 1, NULL));
- wake_up_all_locked(&fiq->waitq);
- spin_unlock(&fiq->waitq.lock);
+ kfree(fuse_dequeue_forget(fiq, 1, NULL));
+ wake_up_all(&fiq->waitq);
+ spin_unlock(&fiq->lock);
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
end_polls(fc);
wake_up_all(&fc->blocked_waitq);
@@ -2296,7 +2209,7 @@ static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
if (new->private_data)
return -EINVAL;
- fud = fuse_dev_alloc(fc);
+ fud = fuse_dev_alloc_install(fc);
if (!fud)
return -ENOMEM;
@@ -2354,7 +2267,7 @@ const struct file_operations fuse_dev_operations = {
.release = fuse_dev_release,
.fasync = fuse_dev_fasync,
.unlocked_ioctl = fuse_dev_ioctl,
- .compat_ioctl = fuse_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
EXPORT_SYMBOL_GPL(fuse_dev_operations);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index dd0f64f7bc06..ee190119f45c 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -24,20 +24,54 @@ static void fuse_advise_use_readdirplus(struct inode *dir)
set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state);
}
+#if BITS_PER_LONG >= 64
+static inline void __fuse_dentry_settime(struct dentry *entry, u64 time)
+{
+ entry->d_fsdata = (void *) time;
+}
+
+static inline u64 fuse_dentry_time(const struct dentry *entry)
+{
+ return (u64)entry->d_fsdata;
+}
+
+#else
union fuse_dentry {
u64 time;
struct rcu_head rcu;
};
-static inline void fuse_dentry_settime(struct dentry *entry, u64 time)
+static inline void __fuse_dentry_settime(struct dentry *dentry, u64 time)
{
- ((union fuse_dentry *) entry->d_fsdata)->time = time;
+ ((union fuse_dentry *) dentry->d_fsdata)->time = time;
}
-static inline u64 fuse_dentry_time(struct dentry *entry)
+static inline u64 fuse_dentry_time(const struct dentry *entry)
{
return ((union fuse_dentry *) entry->d_fsdata)->time;
}
+#endif
+
+static void fuse_dentry_settime(struct dentry *dentry, u64 time)
+{
+ struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
+ bool delete = !time && fc->delete_stale;
+ /*
+ * Mess with DCACHE_OP_DELETE because dput() will be faster without it.
+ * Don't care about races, either way it's just an optimization
+ */
+ if ((!delete && (dentry->d_flags & DCACHE_OP_DELETE)) ||
+ (delete && !(dentry->d_flags & DCACHE_OP_DELETE))) {
+ spin_lock(&dentry->d_lock);
+ if (!delete)
+ dentry->d_flags &= ~DCACHE_OP_DELETE;
+ else
+ dentry->d_flags |= DCACHE_OP_DELETE;
+ spin_unlock(&dentry->d_lock);
+ }
+
+ __fuse_dentry_settime(dentry, time);
+}
/*
* FUSE caches dentries and attributes with separate timeout. The
@@ -139,14 +173,14 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
struct fuse_entry_out *outarg)
{
memset(outarg, 0, sizeof(struct fuse_entry_out));
- args->in.h.opcode = FUSE_LOOKUP;
- args->in.h.nodeid = nodeid;
- args->in.numargs = 1;
- args->in.args[0].size = name->len + 1;
- args->in.args[0].value = name->name;
- args->out.numargs = 1;
- args->out.args[0].size = sizeof(struct fuse_entry_out);
- args->out.args[0].value = outarg;
+ args->opcode = FUSE_LOOKUP;
+ args->nodeid = nodeid;
+ args->in_numargs = 1;
+ args->in_args[0].size = name->len + 1;
+ args->in_args[0].value = name->name;
+ args->out_numargs = 1;
+ args->out_args[0].size = sizeof(struct fuse_entry_out);
+ args->out_args[0].value = outarg;
}
/*
@@ -214,7 +248,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
kfree(forget);
if (ret == -ENOMEM)
goto out;
- if (ret || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
+ if (ret || fuse_invalid_attr(&outarg.attr) ||
+ (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
goto invalid;
forget_all_cached_acls(inode);
@@ -242,9 +277,11 @@ invalid:
goto out;
}
+#if BITS_PER_LONG < 64
static int fuse_dentry_init(struct dentry *dentry)
{
- dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry), GFP_KERNEL);
+ dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry),
+ GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE);
return dentry->d_fsdata ? 0 : -ENOMEM;
}
@@ -254,16 +291,27 @@ static void fuse_dentry_release(struct dentry *dentry)
kfree_rcu(fd, rcu);
}
+#endif
+
+static int fuse_dentry_delete(const struct dentry *dentry)
+{
+ return time_before64(fuse_dentry_time(dentry), get_jiffies_64());
+}
const struct dentry_operations fuse_dentry_operations = {
.d_revalidate = fuse_dentry_revalidate,
+ .d_delete = fuse_dentry_delete,
+#if BITS_PER_LONG < 64
.d_init = fuse_dentry_init,
.d_release = fuse_dentry_release,
+#endif
};
const struct dentry_operations fuse_root_dentry_operations = {
+#if BITS_PER_LONG < 64
.d_init = fuse_dentry_init,
.d_release = fuse_dentry_release,
+#endif
};
int fuse_valid_type(int m)
@@ -272,6 +320,12 @@ int fuse_valid_type(int m)
S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
}
+bool fuse_invalid_attr(struct fuse_attr *attr)
+{
+ return !fuse_valid_type(attr->mode) ||
+ attr->size > LLONG_MAX;
+}
+
int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
struct fuse_entry_out *outarg, struct inode **inode)
{
@@ -303,7 +357,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
err = -EIO;
if (!outarg->nodeid)
goto out_put_forget;
- if (!fuse_valid_type(outarg->attr.mode))
+ if (fuse_invalid_attr(&outarg->attr))
goto out_put_forget;
*inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
@@ -358,7 +412,8 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
else
fuse_invalidate_entry_cache(entry);
- fuse_advise_use_readdirplus(dir);
+ if (inode)
+ fuse_advise_use_readdirplus(dir);
return newent;
out_iput:
@@ -410,24 +465,25 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
inarg.flags = flags;
inarg.mode = mode;
inarg.umask = current_umask();
- args.in.h.opcode = FUSE_CREATE;
- args.in.h.nodeid = get_node_id(dir);
- args.in.numargs = 2;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.in.args[1].size = entry->d_name.len + 1;
- args.in.args[1].value = entry->d_name.name;
- args.out.numargs = 2;
- args.out.args[0].size = sizeof(outentry);
- args.out.args[0].value = &outentry;
- args.out.args[1].size = sizeof(outopen);
- args.out.args[1].value = &outopen;
+ args.opcode = FUSE_CREATE;
+ args.nodeid = get_node_id(dir);
+ args.in_numargs = 2;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
+ args.out_numargs = 2;
+ args.out_args[0].size = sizeof(outentry);
+ args.out_args[0].value = &outentry;
+ args.out_args[1].size = sizeof(outopen);
+ args.out_args[1].value = &outopen;
err = fuse_simple_request(fc, &args);
if (err)
goto out_free_ff;
err = -EIO;
- if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
+ if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid) ||
+ fuse_invalid_attr(&outentry.attr))
goto out_free_ff;
ff->fh = outopen.fh;
@@ -526,16 +582,16 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
return -ENOMEM;
memset(&outarg, 0, sizeof(outarg));
- args->in.h.nodeid = get_node_id(dir);
- args->out.numargs = 1;
- args->out.args[0].size = sizeof(outarg);
- args->out.args[0].value = &outarg;
+ args->nodeid = get_node_id(dir);
+ args->out_numargs = 1;
+ args->out_args[0].size = sizeof(outarg);
+ args->out_args[0].value = &outarg;
err = fuse_simple_request(fc, args);
if (err)
goto out_put_forget_req;
err = -EIO;
- if (invalid_nodeid(outarg.nodeid))
+ if (invalid_nodeid(outarg.nodeid) || fuse_invalid_attr(&outarg.attr))
goto out_put_forget_req;
if ((outarg.attr.mode ^ mode) & S_IFMT)
@@ -582,12 +638,12 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
inarg.mode = mode;
inarg.rdev = new_encode_dev(rdev);
inarg.umask = current_umask();
- args.in.h.opcode = FUSE_MKNOD;
- args.in.numargs = 2;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.in.args[1].size = entry->d_name.len + 1;
- args.in.args[1].value = entry->d_name.name;
+ args.opcode = FUSE_MKNOD;
+ args.in_numargs = 2;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
return create_new_entry(fc, &args, dir, entry, mode);
}
@@ -609,12 +665,12 @@ static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
memset(&inarg, 0, sizeof(inarg));
inarg.mode = mode;
inarg.umask = current_umask();
- args.in.h.opcode = FUSE_MKDIR;
- args.in.numargs = 2;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.in.args[1].size = entry->d_name.len + 1;
- args.in.args[1].value = entry->d_name.name;
+ args.opcode = FUSE_MKDIR;
+ args.in_numargs = 2;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
return create_new_entry(fc, &args, dir, entry, S_IFDIR);
}
@@ -625,12 +681,12 @@ static int fuse_symlink(struct inode *dir, struct dentry *entry,
unsigned len = strlen(link) + 1;
FUSE_ARGS(args);
- args.in.h.opcode = FUSE_SYMLINK;
- args.in.numargs = 2;
- args.in.args[0].size = entry->d_name.len + 1;
- args.in.args[0].value = entry->d_name.name;
- args.in.args[1].size = len;
- args.in.args[1].value = link;
+ args.opcode = FUSE_SYMLINK;
+ args.in_numargs = 2;
+ args.in_args[0].size = entry->d_name.len + 1;
+ args.in_args[0].value = entry->d_name.name;
+ args.in_args[1].size = len;
+ args.in_args[1].value = link;
return create_new_entry(fc, &args, dir, entry, S_IFLNK);
}
@@ -648,11 +704,11 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
struct fuse_conn *fc = get_fuse_conn(dir);
FUSE_ARGS(args);
- args.in.h.opcode = FUSE_UNLINK;
- args.in.h.nodeid = get_node_id(dir);
- args.in.numargs = 1;
- args.in.args[0].size = entry->d_name.len + 1;
- args.in.args[0].value = entry->d_name.name;
+ args.opcode = FUSE_UNLINK;
+ args.nodeid = get_node_id(dir);
+ args.in_numargs = 1;
+ args.in_args[0].size = entry->d_name.len + 1;
+ args.in_args[0].value = entry->d_name.name;
err = fuse_simple_request(fc, &args);
if (!err) {
struct inode *inode = d_inode(entry);
@@ -684,11 +740,11 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
struct fuse_conn *fc = get_fuse_conn(dir);
FUSE_ARGS(args);
- args.in.h.opcode = FUSE_RMDIR;
- args.in.h.nodeid = get_node_id(dir);
- args.in.numargs = 1;
- args.in.args[0].size = entry->d_name.len + 1;
- args.in.args[0].value = entry->d_name.name;
+ args.opcode = FUSE_RMDIR;
+ args.nodeid = get_node_id(dir);
+ args.in_numargs = 1;
+ args.in_args[0].size = entry->d_name.len + 1;
+ args.in_args[0].value = entry->d_name.name;
err = fuse_simple_request(fc, &args);
if (!err) {
clear_nlink(d_inode(entry));
@@ -711,15 +767,15 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
memset(&inarg, 0, argsize);
inarg.newdir = get_node_id(newdir);
inarg.flags = flags;
- args.in.h.opcode = opcode;
- args.in.h.nodeid = get_node_id(olddir);
- args.in.numargs = 3;
- args.in.args[0].size = argsize;
- args.in.args[0].value = &inarg;
- args.in.args[1].size = oldent->d_name.len + 1;
- args.in.args[1].value = oldent->d_name.name;
- args.in.args[2].size = newent->d_name.len + 1;
- args.in.args[2].value = newent->d_name.name;
+ args.opcode = opcode;
+ args.nodeid = get_node_id(olddir);
+ args.in_numargs = 3;
+ args.in_args[0].size = argsize;
+ args.in_args[0].value = &inarg;
+ args.in_args[1].size = oldent->d_name.len + 1;
+ args.in_args[1].value = oldent->d_name.name;
+ args.in_args[2].size = newent->d_name.len + 1;
+ args.in_args[2].value = newent->d_name.name;
err = fuse_simple_request(fc, &args);
if (!err) {
/* ctime changes */
@@ -796,12 +852,12 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
memset(&inarg, 0, sizeof(inarg));
inarg.oldnodeid = get_node_id(inode);
- args.in.h.opcode = FUSE_LINK;
- args.in.numargs = 2;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.in.args[1].size = newent->d_name.len + 1;
- args.in.args[1].value = newent->d_name.name;
+ args.opcode = FUSE_LINK;
+ args.in_numargs = 2;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.in_args[1].size = newent->d_name.len + 1;
+ args.in_args[1].value = newent->d_name.name;
err = create_new_entry(fc, &args, newdir, newent, inode->i_mode);
/* Contrary to "normal" filesystems it can happen that link
makes two "logical" inodes point to the same "physical"
@@ -814,7 +870,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
spin_lock(&fi->lock);
fi->attr_version = atomic64_inc_return(&fc->attr_version);
- inc_nlink(inode);
+ if (likely(inode->i_nlink < UINT_MAX))
+ inc_nlink(inode);
spin_unlock(&fi->lock);
fuse_invalidate_attr(inode);
fuse_update_ctime(inode);
@@ -884,17 +941,18 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
inarg.getattr_flags |= FUSE_GETATTR_FH;
inarg.fh = ff->fh;
}
- args.in.h.opcode = FUSE_GETATTR;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.opcode = FUSE_GETATTR;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err) {
- if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
+ if (fuse_invalid_attr(&outarg.attr) ||
+ (inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
make_bad_inode(inode);
err = -EIO;
} else {
@@ -1056,11 +1114,11 @@ static int fuse_access(struct inode *inode, int mask)
memset(&inarg, 0, sizeof(inarg));
inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
- args.in.h.opcode = FUSE_ACCESS;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
+ args.opcode = FUSE_ACCESS;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_access = 1;
@@ -1152,38 +1210,36 @@ static int fuse_permission(struct inode *inode, int mask)
static int fuse_readlink_page(struct inode *inode, struct page *page)
{
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
- int err;
+ struct fuse_page_desc desc = { .length = PAGE_SIZE - 1 };
+ struct fuse_args_pages ap = {
+ .num_pages = 1,
+ .pages = &page,
+ .descs = &desc,
+ };
+ char *link;
+ ssize_t res;
+
+ ap.args.opcode = FUSE_READLINK;
+ ap.args.nodeid = get_node_id(inode);
+ ap.args.out_pages = true;
+ ap.args.out_argvar = true;
+ ap.args.page_zeroing = true;
+ ap.args.out_numargs = 1;
+ ap.args.out_args[0].size = desc.length;
+ res = fuse_simple_request(fc, &ap.args);
- req = fuse_get_req(fc, 1);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- req->out.page_zeroing = 1;
- req->out.argpages = 1;
- req->num_pages = 1;
- req->pages[0] = page;
- req->page_descs[0].length = PAGE_SIZE - 1;
- req->in.h.opcode = FUSE_READLINK;
- req->in.h.nodeid = get_node_id(inode);
- req->out.argvar = 1;
- req->out.numargs = 1;
- req->out.args[0].size = PAGE_SIZE - 1;
- fuse_request_send(fc, req);
- err = req->out.h.error;
+ fuse_invalidate_atime(inode);
- if (!err) {
- char *link = page_address(page);
- size_t len = req->out.args[0].size;
+ if (res < 0)
+ return res;
- BUG_ON(len >= PAGE_SIZE);
- link[len] = '\0';
- }
+ if (WARN_ON(res >= PAGE_SIZE))
+ return -EIO;
- fuse_put_request(fc, req);
- fuse_invalidate_atime(inode);
+ link = page_address(page);
+ link[res] = '\0';
- return err;
+ return 0;
}
static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
@@ -1383,14 +1439,14 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
struct fuse_setattr_in *inarg_p,
struct fuse_attr_out *outarg_p)
{
- args->in.h.opcode = FUSE_SETATTR;
- args->in.h.nodeid = get_node_id(inode);
- args->in.numargs = 1;
- args->in.args[0].size = sizeof(*inarg_p);
- args->in.args[0].value = inarg_p;
- args->out.numargs = 1;
- args->out.args[0].size = sizeof(*outarg_p);
- args->out.args[0].value = outarg_p;
+ args->opcode = FUSE_SETATTR;
+ args->nodeid = get_node_id(inode);
+ args->in_numargs = 1;
+ args->in_args[0].size = sizeof(*inarg_p);
+ args->in_args[0].value = inarg_p;
+ args->out_numargs = 1;
+ args->out_args[0].size = sizeof(*outarg_p);
+ args->out_args[0].value = outarg_p;
}
/*
@@ -1476,6 +1532,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
is_truncate = true;
}
+ /* Flush dirty data/metadata before non-truncate SETATTR */
+ if (is_wb && S_ISREG(inode->i_mode) &&
+ attr->ia_valid &
+ (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
+ ATTR_TIMES_SET)) {
+ err = write_inode_now(inode, true);
+ if (err)
+ return err;
+
+ fuse_set_nowrite(inode);
+ fuse_release_nowrite(inode);
+ }
+
if (is_truncate) {
fuse_set_nowrite(inode);
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
@@ -1504,7 +1573,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
goto error;
}
- if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
+ if (fuse_invalid_attr(&outarg.attr) ||
+ (inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
make_bad_inode(inode);
err = -EIO;
goto error;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 5ae2828beb00..a63d779eac10 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -19,6 +19,18 @@
#include <linux/falloc.h>
#include <linux/uio.h>
+static struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags,
+ struct fuse_page_desc **desc)
+{
+ struct page **pages;
+
+ pages = kzalloc(npages * (sizeof(struct page *) +
+ sizeof(struct fuse_page_desc)), flags);
+ *desc = (void *) (pages + npages);
+
+ return pages;
+}
+
static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
int opcode, struct fuse_open_out *outargp)
{
@@ -29,29 +41,36 @@ static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
if (!fc->atomic_o_trunc)
inarg.flags &= ~O_TRUNC;
- args.in.h.opcode = opcode;
- args.in.h.nodeid = nodeid;
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(*outargp);
- args.out.args[0].value = outargp;
+ args.opcode = opcode;
+ args.nodeid = nodeid;
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(*outargp);
+ args.out_args[0].value = outargp;
return fuse_simple_request(fc, &args);
}
+struct fuse_release_args {
+ struct fuse_args args;
+ struct fuse_release_in inarg;
+ struct inode *inode;
+};
+
struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
{
struct fuse_file *ff;
- ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
+ ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT);
if (unlikely(!ff))
return NULL;
ff->fc = fc;
- ff->reserved_req = fuse_request_alloc(0);
- if (unlikely(!ff->reserved_req)) {
+ ff->release_args = kzalloc(sizeof(*ff->release_args),
+ GFP_KERNEL_ACCOUNT);
+ if (!ff->release_args) {
kfree(ff);
return NULL;
}
@@ -69,7 +88,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
void fuse_file_free(struct fuse_file *ff)
{
- fuse_request_free(ff->reserved_req);
+ kfree(ff->release_args);
mutex_destroy(&ff->readdir.lock);
kfree(ff);
}
@@ -80,34 +99,31 @@ static struct fuse_file *fuse_file_get(struct fuse_file *ff)
return ff;
}
-static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_release_end(struct fuse_conn *fc, struct fuse_args *args,
+ int error)
{
- iput(req->misc.release.inode);
+ struct fuse_release_args *ra = container_of(args, typeof(*ra), args);
+
+ iput(ra->inode);
+ kfree(ra);
}
static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
{
if (refcount_dec_and_test(&ff->count)) {
- struct fuse_req *req = ff->reserved_req;
+ struct fuse_args *args = &ff->release_args->args;
if (isdir ? ff->fc->no_opendir : ff->fc->no_open) {
- /*
- * Drop the release request when client does not
- * implement 'open'
- */
- __clear_bit(FR_BACKGROUND, &req->flags);
- iput(req->misc.release.inode);
- fuse_put_request(ff->fc, req);
+ /* Do nothing when client does not implement 'open' */
+ fuse_release_end(ff->fc, args, 0);
} else if (sync) {
- __set_bit(FR_FORCE, &req->flags);
- __clear_bit(FR_BACKGROUND, &req->flags);
- fuse_request_send(ff->fc, req);
- iput(req->misc.release.inode);
- fuse_put_request(ff->fc, req);
+ fuse_simple_request(ff->fc, args);
+ fuse_release_end(ff->fc, args, 0);
} else {
- req->end = fuse_release_end;
- __set_bit(FR_BACKGROUND, &req->flags);
- fuse_request_send_background(ff->fc, req);
+ args->end = fuse_release_end;
+ if (fuse_simple_background(ff->fc, args,
+ GFP_KERNEL | __GFP_NOFAIL))
+ fuse_release_end(ff->fc, args, -ENOTCONN);
}
kfree(ff);
}
@@ -201,7 +217,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
{
struct fuse_conn *fc = get_fuse_conn(inode);
int err;
- bool lock_inode = (file->f_flags & O_TRUNC) &&
+ bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
fc->atomic_o_trunc &&
fc->writeback_cache;
@@ -209,16 +225,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
if (err)
return err;
- if (lock_inode)
+ if (is_wb_truncate) {
inode_lock(inode);
+ fuse_set_nowrite(inode);
+ }
err = fuse_do_open(fc, get_node_id(inode), file, isdir);
if (!err)
fuse_finish_open(inode, file);
- if (lock_inode)
+ if (is_wb_truncate) {
+ fuse_release_nowrite(inode);
inode_unlock(inode);
+ }
return err;
}
@@ -227,8 +247,7 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
int flags, int opcode)
{
struct fuse_conn *fc = ff->fc;
- struct fuse_req *req = ff->reserved_req;
- struct fuse_release_in *inarg = &req->misc.release.in;
+ struct fuse_release_args *ra = ff->release_args;
/* Inode is NULL on error path of fuse_create_open() */
if (likely(fi)) {
@@ -243,32 +262,33 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
wake_up_interruptible_all(&ff->poll_wait);
- inarg->fh = ff->fh;
- inarg->flags = flags;
- req->in.h.opcode = opcode;
- req->in.h.nodeid = ff->nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(struct fuse_release_in);
- req->in.args[0].value = inarg;
+ ra->inarg.fh = ff->fh;
+ ra->inarg.flags = flags;
+ ra->args.in_numargs = 1;
+ ra->args.in_args[0].size = sizeof(struct fuse_release_in);
+ ra->args.in_args[0].value = &ra->inarg;
+ ra->args.opcode = opcode;
+ ra->args.nodeid = ff->nodeid;
+ ra->args.force = true;
+ ra->args.nocreds = true;
}
void fuse_release_common(struct file *file, bool isdir)
{
struct fuse_inode *fi = get_fuse_inode(file_inode(file));
struct fuse_file *ff = file->private_data;
- struct fuse_req *req = ff->reserved_req;
+ struct fuse_release_args *ra = ff->release_args;
int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
fuse_prepare_release(fi, ff, file->f_flags, opcode);
if (ff->flock) {
- struct fuse_release_in *inarg = &req->misc.release.in;
- inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
- inarg->lock_owner = fuse_lock_owner_id(ff->fc,
- (fl_owner_t) file);
+ ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
+ ra->inarg.lock_owner = fuse_lock_owner_id(ff->fc,
+ (fl_owner_t) file);
}
/* Hold inode until release is finished */
- req->misc.release.inode = igrab(file_inode(file));
+ ra->inode = igrab(file_inode(file));
/*
* Normally this will send the RELEASE request, however if
@@ -279,7 +299,7 @@ void fuse_release_common(struct file *file, bool isdir)
* synchronous RELEASE is allowed (and desirable) in this case
* because the server can be trusted not to screw up.
*/
- fuse_file_put(ff, ff->fc->destroy_req != NULL, isdir);
+ fuse_file_put(ff, ff->fc->destroy, isdir);
}
static int fuse_open(struct inode *inode, struct file *file)
@@ -335,19 +355,27 @@ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
return (u64) v0 + ((u64) v1 << 32);
}
-static struct fuse_req *fuse_find_writeback(struct fuse_inode *fi,
+struct fuse_writepage_args {
+ struct fuse_io_args ia;
+ struct list_head writepages_entry;
+ struct list_head queue_entry;
+ struct fuse_writepage_args *next;
+ struct inode *inode;
+};
+
+static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
pgoff_t idx_from, pgoff_t idx_to)
{
- struct fuse_req *req;
+ struct fuse_writepage_args *wpa;
- list_for_each_entry(req, &fi->writepages, writepages_entry) {
+ list_for_each_entry(wpa, &fi->writepages, writepages_entry) {
pgoff_t curr_index;
- WARN_ON(get_fuse_inode(req->inode) != fi);
- curr_index = req->misc.write.in.offset >> PAGE_SHIFT;
- if (idx_from < curr_index + req->num_pages &&
+ WARN_ON(get_fuse_inode(wpa->inode) != fi);
+ curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT;
+ if (idx_from < curr_index + wpa->ia.ap.num_pages &&
curr_index <= idx_to) {
- return req;
+ return wpa;
}
}
return NULL;
@@ -383,12 +411,11 @@ static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
* Since fuse doesn't rely on the VM writeback tracking, this has to
* use some other means.
*/
-static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
+static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
{
struct fuse_inode *fi = get_fuse_inode(inode);
wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
- return 0;
}
/*
@@ -411,8 +438,8 @@ static int fuse_flush(struct file *file, fl_owner_t id)
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
- struct fuse_req *req;
struct fuse_flush_in inarg;
+ FUSE_ARGS(args);
int err;
if (is_bad_inode(inode))
@@ -433,19 +460,17 @@ static int fuse_flush(struct file *file, fl_owner_t id)
if (err)
return err;
- req = fuse_get_req_nofail_nopages(fc, file);
memset(&inarg, 0, sizeof(inarg));
inarg.fh = ff->fh;
inarg.lock_owner = fuse_lock_owner_id(fc, id);
- req->in.h.opcode = FUSE_FLUSH;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- __set_bit(FR_FORCE, &req->flags);
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.opcode = FUSE_FLUSH;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.force = true;
+
+ err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_flush = 1;
err = 0;
@@ -465,11 +490,11 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
memset(&inarg, 0, sizeof(inarg));
inarg.fh = ff->fh;
inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0;
- args.in.h.opcode = opcode;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
+ args.opcode = opcode;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
return fuse_simple_request(fc, &args);
}
@@ -523,35 +548,35 @@ out:
return err;
}
-void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
- size_t count, int opcode)
+void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
+ size_t count, int opcode)
{
- struct fuse_read_in *inarg = &req->misc.read.in;
struct fuse_file *ff = file->private_data;
+ struct fuse_args *args = &ia->ap.args;
- inarg->fh = ff->fh;
- inarg->offset = pos;
- inarg->size = count;
- inarg->flags = file->f_flags;
- req->in.h.opcode = opcode;
- req->in.h.nodeid = ff->nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(struct fuse_read_in);
- req->in.args[0].value = inarg;
- req->out.argvar = 1;
- req->out.numargs = 1;
- req->out.args[0].size = count;
+ ia->read.in.fh = ff->fh;
+ ia->read.in.offset = pos;
+ ia->read.in.size = count;
+ ia->read.in.flags = file->f_flags;
+ args->opcode = opcode;
+ args->nodeid = ff->nodeid;
+ args->in_numargs = 1;
+ args->in_args[0].size = sizeof(ia->read.in);
+ args->in_args[0].value = &ia->read.in;
+ args->out_argvar = true;
+ args->out_numargs = 1;
+ args->out_args[0].size = count;
}
-static void fuse_release_user_pages(struct fuse_req *req, bool should_dirty)
+static void fuse_release_user_pages(struct fuse_args_pages *ap,
+ bool should_dirty)
{
- unsigned i;
+ unsigned int i;
- for (i = 0; i < req->num_pages; i++) {
- struct page *page = req->pages[i];
+ for (i = 0; i < ap->num_pages; i++) {
if (should_dirty)
- set_page_dirty_lock(page);
- put_page(page);
+ set_page_dirty_lock(ap->pages[i]);
+ put_page(ap->pages[i]);
}
}
@@ -621,64 +646,96 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
kref_put(&io->refcnt, fuse_io_release);
}
-static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
+static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
+ unsigned int npages)
{
- struct fuse_io_priv *io = req->io;
+ struct fuse_io_args *ia;
+
+ ia = kzalloc(sizeof(*ia), GFP_KERNEL);
+ if (ia) {
+ ia->io = io;
+ ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
+ &ia->ap.descs);
+ if (!ia->ap.pages) {
+ kfree(ia);
+ ia = NULL;
+ }
+ }
+ return ia;
+}
+
+static void fuse_io_free(struct fuse_io_args *ia)
+{
+ kfree(ia->ap.pages);
+ kfree(ia);
+}
+
+static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_args *args,
+ int err)
+{
+ struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
+ struct fuse_io_priv *io = ia->io;
ssize_t pos = -1;
- fuse_release_user_pages(req, io->should_dirty);
+ fuse_release_user_pages(&ia->ap, io->should_dirty);
- if (io->write) {
- if (req->misc.write.in.size != req->misc.write.out.size)
- pos = req->misc.write.in.offset - io->offset +
- req->misc.write.out.size;
+ if (err) {
+ /* Nothing */
+ } else if (io->write) {
+ if (ia->write.out.size > ia->write.in.size) {
+ err = -EIO;
+ } else if (ia->write.in.size != ia->write.out.size) {
+ pos = ia->write.in.offset - io->offset +
+ ia->write.out.size;
+ }
} else {
- if (req->misc.read.in.size != req->out.args[0].size)
- pos = req->misc.read.in.offset - io->offset +
- req->out.args[0].size;
+ u32 outsize = args->out_args[0].size;
+
+ if (ia->read.in.size != outsize)
+ pos = ia->read.in.offset - io->offset + outsize;
}
- fuse_aio_complete(io, req->out.h.error, pos);
+ fuse_aio_complete(io, err, pos);
+ fuse_io_free(ia);
}
-static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
- size_t num_bytes, struct fuse_io_priv *io)
+static ssize_t fuse_async_req_send(struct fuse_conn *fc,
+ struct fuse_io_args *ia, size_t num_bytes)
{
+ ssize_t err;
+ struct fuse_io_priv *io = ia->io;
+
spin_lock(&io->lock);
kref_get(&io->refcnt);
io->size += num_bytes;
io->reqs++;
spin_unlock(&io->lock);
- req->io = io;
- req->end = fuse_aio_complete_req;
-
- __fuse_get_request(req);
- fuse_request_send_background(fc, req);
+ ia->ap.args.end = fuse_aio_complete_req;
+ err = fuse_simple_background(fc, &ia->ap.args, GFP_KERNEL);
+ if (err)
+ fuse_aio_complete_req(fc, &ia->ap.args, err);
return num_bytes;
}
-static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io,
- loff_t pos, size_t count, fl_owner_t owner)
+static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count,
+ fl_owner_t owner)
{
- struct file *file = io->iocb->ki_filp;
+ struct file *file = ia->io->iocb->ki_filp;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
- fuse_read_fill(req, file, pos, count, FUSE_READ);
+ fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
if (owner != NULL) {
- struct fuse_read_in *inarg = &req->misc.read.in;
-
- inarg->read_flags |= FUSE_READ_LOCKOWNER;
- inarg->lock_owner = fuse_lock_owner_id(fc, owner);
+ ia->read.in.read_flags |= FUSE_READ_LOCKOWNER;
+ ia->read.in.lock_owner = fuse_lock_owner_id(fc, owner);
}
- if (io->async)
- return fuse_async_req_send(fc, req, count, io);
+ if (ia->io->async)
+ return fuse_async_req_send(fc, ia, count);
- fuse_request_send(fc, req);
- return req->out.args[0].size;
+ return fuse_simple_request(fc, &ia->ap.args);
}
static void fuse_read_update_size(struct inode *inode, loff_t size,
@@ -696,10 +753,9 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
spin_unlock(&fi->lock);
}
-static void fuse_short_read(struct fuse_req *req, struct inode *inode,
- u64 attr_ver)
+static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
+ struct fuse_args_pages *ap)
{
- size_t num_read = req->out.args[0].size;
struct fuse_conn *fc = get_fuse_conn(inode);
if (fc->writeback_cache) {
@@ -712,28 +768,31 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
int start_idx = num_read >> PAGE_SHIFT;
size_t off = num_read & (PAGE_SIZE - 1);
- for (i = start_idx; i < req->num_pages; i++) {
- zero_user_segment(req->pages[i], off, PAGE_SIZE);
+ for (i = start_idx; i < ap->num_pages; i++) {
+ zero_user_segment(ap->pages[i], off, PAGE_SIZE);
off = 0;
}
} else {
- loff_t pos = page_offset(req->pages[0]) + num_read;
+ loff_t pos = page_offset(ap->pages[0]) + num_read;
fuse_read_update_size(inode, pos, attr_ver);
}
}
static int fuse_do_readpage(struct file *file, struct page *page)
{
- struct kiocb iocb;
- struct fuse_io_priv io;
struct inode *inode = page->mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
- size_t num_read;
loff_t pos = page_offset(page);
- size_t count = PAGE_SIZE;
+ struct fuse_page_desc desc = { .length = PAGE_SIZE };
+ struct fuse_io_args ia = {
+ .ap.args.page_zeroing = true,
+ .ap.args.out_pages = true,
+ .ap.num_pages = 1,
+ .ap.pages = &page,
+ .ap.descs = &desc,
+ };
+ ssize_t res;
u64 attr_ver;
- int err;
/*
* Page writeback can extend beyond the lifetime of the
@@ -742,35 +801,21 @@ static int fuse_do_readpage(struct file *file, struct page *page)
*/
fuse_wait_on_page_writeback(inode, page->index);
- req = fuse_get_req(fc, 1);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
attr_ver = fuse_get_attr_version(fc);
- req->out.page_zeroing = 1;
- req->out.argpages = 1;
- req->num_pages = 1;
- req->pages[0] = page;
- req->page_descs[0].length = count;
- init_sync_kiocb(&iocb, file);
- io = (struct fuse_io_priv) FUSE_IO_PRIV_SYNC(&iocb);
- num_read = fuse_send_read(req, &io, pos, count, NULL);
- err = req->out.h.error;
-
- if (!err) {
- /*
- * Short read means EOF. If file size is larger, truncate it
- */
- if (num_read < count)
- fuse_short_read(req, inode, attr_ver);
-
- SetPageUptodate(page);
- }
+ fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
+ res = fuse_simple_request(fc, &ia.ap.args);
+ if (res < 0)
+ return res;
+ /*
+ * Short read means EOF. If file size is larger, truncate it
+ */
+ if (res < desc.length)
+ fuse_short_read(inode, attr_ver, res, &ia.ap);
- fuse_put_request(fc, req);
+ SetPageUptodate(page);
- return err;
+ return 0;
}
static int fuse_readpage(struct file *file, struct page *page)
@@ -789,15 +834,18 @@ static int fuse_readpage(struct file *file, struct page *page)
return err;
}
-static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_args *args,
+ int err)
{
int i;
- size_t count = req->misc.read.in.size;
- size_t num_read = req->out.args[0].size;
+ struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
+ struct fuse_args_pages *ap = &ia->ap;
+ size_t count = ia->read.in.size;
+ size_t num_read = args->out_args[0].size;
struct address_space *mapping = NULL;
- for (i = 0; mapping == NULL && i < req->num_pages; i++)
- mapping = req->pages[i]->mapping;
+ for (i = 0; mapping == NULL && i < ap->num_pages; i++)
+ mapping = ap->pages[i]->mapping;
if (mapping) {
struct inode *inode = mapping->host;
@@ -805,93 +853,97 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
/*
* Short read means EOF. If file size is larger, truncate it
*/
- if (!req->out.h.error && num_read < count)
- fuse_short_read(req, inode, req->misc.read.attr_ver);
+ if (!err && num_read < count)
+ fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
fuse_invalidate_atime(inode);
}
- for (i = 0; i < req->num_pages; i++) {
- struct page *page = req->pages[i];
- if (!req->out.h.error)
+ for (i = 0; i < ap->num_pages; i++) {
+ struct page *page = ap->pages[i];
+
+ if (!err)
SetPageUptodate(page);
else
SetPageError(page);
unlock_page(page);
put_page(page);
}
- if (req->ff)
- fuse_file_put(req->ff, false, false);
+ if (ia->ff)
+ fuse_file_put(ia->ff, false, false);
+
+ fuse_io_free(ia);
}
-static void fuse_send_readpages(struct fuse_req *req, struct file *file)
+static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
- loff_t pos = page_offset(req->pages[0]);
- size_t count = req->num_pages << PAGE_SHIFT;
-
- req->out.argpages = 1;
- req->out.page_zeroing = 1;
- req->out.page_replace = 1;
- fuse_read_fill(req, file, pos, count, FUSE_READ);
- req->misc.read.attr_ver = fuse_get_attr_version(fc);
+ struct fuse_args_pages *ap = &ia->ap;
+ loff_t pos = page_offset(ap->pages[0]);
+ size_t count = ap->num_pages << PAGE_SHIFT;
+ int err;
+
+ ap->args.out_pages = true;
+ ap->args.page_zeroing = true;
+ ap->args.page_replace = true;
+ fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
+ ia->read.attr_ver = fuse_get_attr_version(fc);
if (fc->async_read) {
- req->ff = fuse_file_get(ff);
- req->end = fuse_readpages_end;
- fuse_request_send_background(fc, req);
+ ia->ff = fuse_file_get(ff);
+ ap->args.end = fuse_readpages_end;
+ err = fuse_simple_background(fc, &ap->args, GFP_KERNEL);
+ if (!err)
+ return;
} else {
- fuse_request_send(fc, req);
- fuse_readpages_end(fc, req);
- fuse_put_request(fc, req);
+ err = fuse_simple_request(fc, &ap->args);
}
+ fuse_readpages_end(fc, &ap->args, err);
}
struct fuse_fill_data {
- struct fuse_req *req;
+ struct fuse_io_args *ia;
struct file *file;
struct inode *inode;
- unsigned nr_pages;
+ unsigned int nr_pages;
+ unsigned int max_pages;
};
static int fuse_readpages_fill(void *_data, struct page *page)
{
struct fuse_fill_data *data = _data;
- struct fuse_req *req = data->req;
+ struct fuse_io_args *ia = data->ia;
+ struct fuse_args_pages *ap = &ia->ap;
struct inode *inode = data->inode;
struct fuse_conn *fc = get_fuse_conn(inode);
fuse_wait_on_page_writeback(inode, page->index);
- if (req->num_pages &&
- (req->num_pages == fc->max_pages ||
- (req->num_pages + 1) * PAGE_SIZE > fc->max_read ||
- req->pages[req->num_pages - 1]->index + 1 != page->index)) {
- unsigned int nr_alloc = min_t(unsigned int, data->nr_pages,
- fc->max_pages);
- fuse_send_readpages(req, data->file);
- if (fc->async_read)
- req = fuse_get_req_for_background(fc, nr_alloc);
- else
- req = fuse_get_req(fc, nr_alloc);
-
- data->req = req;
- if (IS_ERR(req)) {
+ if (ap->num_pages &&
+ (ap->num_pages == fc->max_pages ||
+ (ap->num_pages + 1) * PAGE_SIZE > fc->max_read ||
+ ap->pages[ap->num_pages - 1]->index + 1 != page->index)) {
+ data->max_pages = min_t(unsigned int, data->nr_pages,
+ fc->max_pages);
+ fuse_send_readpages(ia, data->file);
+ data->ia = ia = fuse_io_alloc(NULL, data->max_pages);
+ if (!ia) {
unlock_page(page);
- return PTR_ERR(req);
+ return -ENOMEM;
}
+ ap = &ia->ap;
}
- if (WARN_ON(req->num_pages >= req->max_pages)) {
+ if (WARN_ON(ap->num_pages >= data->max_pages)) {
unlock_page(page);
- fuse_put_request(fc, req);
+ fuse_io_free(ia);
return -EIO;
}
get_page(page);
- req->pages[req->num_pages] = page;
- req->page_descs[req->num_pages].length = PAGE_SIZE;
- req->num_pages++;
+ ap->pages[ap->num_pages] = page;
+ ap->descs[ap->num_pages].length = PAGE_SIZE;
+ ap->num_pages++;
data->nr_pages--;
return 0;
}
@@ -903,7 +955,6 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_fill_data data;
int err;
- unsigned int nr_alloc = min_t(unsigned int, nr_pages, fc->max_pages);
err = -EIO;
if (is_bad_inode(inode))
@@ -911,21 +962,20 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
data.file = file;
data.inode = inode;
- if (fc->async_read)
- data.req = fuse_get_req_for_background(fc, nr_alloc);
- else
- data.req = fuse_get_req(fc, nr_alloc);
data.nr_pages = nr_pages;
- err = PTR_ERR(data.req);
- if (IS_ERR(data.req))
+ data.max_pages = min_t(unsigned int, nr_pages, fc->max_pages);
+;
+ data.ia = fuse_io_alloc(NULL, data.max_pages);
+ err = -ENOMEM;
+ if (!data.ia)
goto out;
err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
if (!err) {
- if (data.req->num_pages)
- fuse_send_readpages(data.req, file);
+ if (data.ia->ap.num_pages)
+ fuse_send_readpages(data.ia, file);
else
- fuse_put_request(fc, data.req);
+ fuse_io_free(data.ia);
}
out:
return err;
@@ -952,54 +1002,65 @@ static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
return generic_file_read_iter(iocb, to);
}
-static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
- loff_t pos, size_t count)
+static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff,
+ loff_t pos, size_t count)
{
- struct fuse_write_in *inarg = &req->misc.write.in;
- struct fuse_write_out *outarg = &req->misc.write.out;
+ struct fuse_args *args = &ia->ap.args;
- inarg->fh = ff->fh;
- inarg->offset = pos;
- inarg->size = count;
- req->in.h.opcode = FUSE_WRITE;
- req->in.h.nodeid = ff->nodeid;
- req->in.numargs = 2;
+ ia->write.in.fh = ff->fh;
+ ia->write.in.offset = pos;
+ ia->write.in.size = count;
+ args->opcode = FUSE_WRITE;
+ args->nodeid = ff->nodeid;
+ args->in_numargs = 2;
if (ff->fc->minor < 9)
- req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
+ args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
else
- req->in.args[0].size = sizeof(struct fuse_write_in);
- req->in.args[0].value = inarg;
- req->in.args[1].size = count;
- req->out.numargs = 1;
- req->out.args[0].size = sizeof(struct fuse_write_out);
- req->out.args[0].value = outarg;
+ args->in_args[0].size = sizeof(ia->write.in);
+ args->in_args[0].value = &ia->write.in;
+ args->in_args[1].size = count;
+ args->out_numargs = 1;
+ args->out_args[0].size = sizeof(ia->write.out);
+ args->out_args[0].value = &ia->write.out;
+}
+
+static unsigned int fuse_write_flags(struct kiocb *iocb)
+{
+ unsigned int flags = iocb->ki_filp->f_flags;
+
+ if (iocb->ki_flags & IOCB_DSYNC)
+ flags |= O_DSYNC;
+ if (iocb->ki_flags & IOCB_SYNC)
+ flags |= O_SYNC;
+
+ return flags;
}
-static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io,
- loff_t pos, size_t count, fl_owner_t owner)
+static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos,
+ size_t count, fl_owner_t owner)
{
- struct kiocb *iocb = io->iocb;
+ struct kiocb *iocb = ia->io->iocb;
struct file *file = iocb->ki_filp;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
- struct fuse_write_in *inarg = &req->misc.write.in;
+ struct fuse_write_in *inarg = &ia->write.in;
+ ssize_t err;
- fuse_write_fill(req, ff, pos, count);
- inarg->flags = file->f_flags;
- if (iocb->ki_flags & IOCB_DSYNC)
- inarg->flags |= O_DSYNC;
- if (iocb->ki_flags & IOCB_SYNC)
- inarg->flags |= O_SYNC;
+ fuse_write_args_fill(ia, ff, pos, count);
+ inarg->flags = fuse_write_flags(iocb);
if (owner != NULL) {
inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
inarg->lock_owner = fuse_lock_owner_id(fc, owner);
}
- if (io->async)
- return fuse_async_req_send(fc, req, count, io);
+ if (ia->io->async)
+ return fuse_async_req_send(fc, ia, count);
+
+ err = fuse_simple_request(fc, &ia->ap.args);
+ if (!err && ia->write.out.size > count)
+ err = -EIO;
- fuse_request_send(fc, req);
- return req->misc.write.out.size;
+ return err ?: ia->write.out.size;
}
bool fuse_write_update_size(struct inode *inode, loff_t pos)
@@ -1019,26 +1080,33 @@ bool fuse_write_update_size(struct inode *inode, loff_t pos)
return ret;
}
-static size_t fuse_send_write_pages(struct fuse_req *req, struct kiocb *iocb,
- struct inode *inode, loff_t pos,
- size_t count)
+static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
+ struct kiocb *iocb, struct inode *inode,
+ loff_t pos, size_t count)
{
- size_t res;
- unsigned offset;
- unsigned i;
- struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
+ struct fuse_args_pages *ap = &ia->ap;
+ struct file *file = iocb->ki_filp;
+ struct fuse_file *ff = file->private_data;
+ struct fuse_conn *fc = ff->fc;
+ unsigned int offset, i;
+ int err;
- for (i = 0; i < req->num_pages; i++)
- fuse_wait_on_page_writeback(inode, req->pages[i]->index);
+ for (i = 0; i < ap->num_pages; i++)
+ fuse_wait_on_page_writeback(inode, ap->pages[i]->index);
- res = fuse_send_write(req, &io, pos, count, NULL);
+ fuse_write_args_fill(ia, ff, pos, count);
+ ia->write.in.flags = fuse_write_flags(iocb);
- offset = req->page_descs[0].offset;
- count = res;
- for (i = 0; i < req->num_pages; i++) {
- struct page *page = req->pages[i];
+ err = fuse_simple_request(fc, &ap->args);
+ if (!err && ia->write.out.size > count)
+ err = -EIO;
- if (!req->out.h.error && !offset && count >= PAGE_SIZE)
+ offset = ap->descs[0].offset;
+ count = ia->write.out.size;
+ for (i = 0; i < ap->num_pages; i++) {
+ struct page *page = ap->pages[i];
+
+ if (!err && !offset && count >= PAGE_SIZE)
SetPageUptodate(page);
if (count > PAGE_SIZE - offset)
@@ -1051,20 +1119,21 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct kiocb *iocb,
put_page(page);
}
- return res;
+ return err;
}
-static ssize_t fuse_fill_write_pages(struct fuse_req *req,
- struct address_space *mapping,
- struct iov_iter *ii, loff_t pos)
+static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
+ struct address_space *mapping,
+ struct iov_iter *ii, loff_t pos,
+ unsigned int max_pages)
{
struct fuse_conn *fc = get_fuse_conn(mapping->host);
unsigned offset = pos & (PAGE_SIZE - 1);
size_t count = 0;
int err;
- req->in.argpages = 1;
- req->page_descs[0].offset = offset;
+ ap->args.in_pages = true;
+ ap->descs[0].offset = offset;
do {
size_t tmp;
@@ -1100,9 +1169,9 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
}
err = 0;
- req->pages[req->num_pages] = page;
- req->page_descs[req->num_pages].length = tmp;
- req->num_pages++;
+ ap->pages[ap->num_pages] = page;
+ ap->descs[ap->num_pages].length = tmp;
+ ap->num_pages++;
count += tmp;
pos += tmp;
@@ -1113,7 +1182,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
if (!fc->big_writes)
break;
} while (iov_iter_count(ii) && count < fc->max_write &&
- req->num_pages < req->max_pages && offset == 0);
+ ap->num_pages < max_pages && offset == 0);
return count > 0 ? count : err;
}
@@ -1141,27 +1210,27 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
do {
- struct fuse_req *req;
ssize_t count;
+ struct fuse_io_args ia = {};
+ struct fuse_args_pages *ap = &ia.ap;
unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
fc->max_pages);
- req = fuse_get_req(fc, nr_pages);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
+ ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs);
+ if (!ap->pages) {
+ err = -ENOMEM;
break;
}
- count = fuse_fill_write_pages(req, mapping, ii, pos);
+ count = fuse_fill_write_pages(ap, mapping, ii, pos, nr_pages);
if (count <= 0) {
err = count;
} else {
- size_t num_written;
-
- num_written = fuse_send_write_pages(req, iocb, inode,
- pos, count);
- err = req->out.h.error;
+ err = fuse_send_write_pages(&ia, iocb, inode,
+ pos, count);
if (!err) {
+ size_t num_written = ia.write.out.size;
+
res += num_written;
pos += num_written;
@@ -1170,7 +1239,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
err = -EIO;
}
}
- fuse_put_request(fc, req);
+ kfree(ap->pages);
} while (!err && iov_iter_count(ii));
if (res > 0)
@@ -1258,14 +1327,14 @@ out:
return written ? written : err;
}
-static inline void fuse_page_descs_length_init(struct fuse_req *req,
- unsigned index, unsigned nr_pages)
+static inline void fuse_page_descs_length_init(struct fuse_page_desc *descs,
+ unsigned int index,
+ unsigned int nr_pages)
{
int i;
for (i = index; i < index + nr_pages; i++)
- req->page_descs[i].length = PAGE_SIZE -
- req->page_descs[i].offset;
+ descs[i].length = PAGE_SIZE - descs[i].offset;
}
static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
@@ -1279,8 +1348,9 @@ static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
return min(iov_iter_single_seg_count(ii), max_size);
}
-static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
- size_t *nbytesp, int write)
+static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
+ size_t *nbytesp, int write,
+ unsigned int max_pages)
{
size_t nbytes = 0; /* # bytes already packed in req */
ssize_t ret = 0;
@@ -1291,21 +1361,21 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
if (write)
- req->in.args[1].value = (void *) user_addr;
+ ap->args.in_args[1].value = (void *) user_addr;
else
- req->out.args[0].value = (void *) user_addr;
+ ap->args.out_args[0].value = (void *) user_addr;
iov_iter_advance(ii, frag_size);
*nbytesp = frag_size;
return 0;
}
- while (nbytes < *nbytesp && req->num_pages < req->max_pages) {
+ while (nbytes < *nbytesp && ap->num_pages < max_pages) {
unsigned npages;
size_t start;
- ret = iov_iter_get_pages(ii, &req->pages[req->num_pages],
+ ret = iov_iter_get_pages(ii, &ap->pages[ap->num_pages],
*nbytesp - nbytes,
- req->max_pages - req->num_pages,
+ max_pages - ap->num_pages,
&start);
if (ret < 0)
break;
@@ -1316,18 +1386,18 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
ret += start;
npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
- req->page_descs[req->num_pages].offset = start;
- fuse_page_descs_length_init(req, req->num_pages, npages);
+ ap->descs[ap->num_pages].offset = start;
+ fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
- req->num_pages += npages;
- req->page_descs[req->num_pages - 1].length -=
+ ap->num_pages += npages;
+ ap->descs[ap->num_pages - 1].length -=
(PAGE_SIZE - ret) & (PAGE_SIZE - 1);
}
if (write)
- req->in.argpages = 1;
+ ap->args.in_pages = 1;
else
- req->out.argpages = 1;
+ ap->args.out_pages = 1;
*nbytesp = nbytes;
@@ -1349,17 +1419,16 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
pgoff_t idx_from = pos >> PAGE_SHIFT;
pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
ssize_t res = 0;
- struct fuse_req *req;
int err = 0;
+ struct fuse_io_args *ia;
+ unsigned int max_pages;
- if (io->async)
- req = fuse_get_req_for_background(fc, iov_iter_npages(iter,
- fc->max_pages));
- else
- req = fuse_get_req(fc, iov_iter_npages(iter, fc->max_pages));
- if (IS_ERR(req))
- return PTR_ERR(req);
+ max_pages = iov_iter_npages(iter, fc->max_pages);
+ ia = fuse_io_alloc(io, max_pages);
+ if (!ia)
+ return -ENOMEM;
+ ia->io = io;
if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
if (!write)
inode_lock(inode);
@@ -1370,54 +1439,49 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
io->should_dirty = !write && iter_is_iovec(iter);
while (count) {
- size_t nres;
+ ssize_t nres;
fl_owner_t owner = current->files;
size_t nbytes = min(count, nmax);
- err = fuse_get_user_pages(req, iter, &nbytes, write);
+
+ err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
+ max_pages);
if (err && !nbytes)
break;
if (write) {
- if (!capable(CAP_FSETID)) {
- struct fuse_write_in *inarg;
+ if (!capable(CAP_FSETID))
+ ia->write.in.write_flags |= FUSE_WRITE_KILL_PRIV;
- inarg = &req->misc.write.in;
- inarg->write_flags |= FUSE_WRITE_KILL_PRIV;
- }
- nres = fuse_send_write(req, io, pos, nbytes, owner);
+ nres = fuse_send_write(ia, pos, nbytes, owner);
} else {
- nres = fuse_send_read(req, io, pos, nbytes, owner);
+ nres = fuse_send_read(ia, pos, nbytes, owner);
}
- if (!io->async)
- fuse_release_user_pages(req, io->should_dirty);
- if (req->out.h.error) {
- err = req->out.h.error;
- break;
- } else if (nres > nbytes) {
- res = 0;
- err = -EIO;
+ if (!io->async || nres < 0) {
+ fuse_release_user_pages(&ia->ap, io->should_dirty);
+ fuse_io_free(ia);
+ }
+ ia = NULL;
+ if (nres < 0) {
+ err = nres;
break;
}
+ WARN_ON(nres > nbytes);
+
count -= nres;
res += nres;
pos += nres;
if (nres != nbytes)
break;
if (count) {
- fuse_put_request(fc, req);
- if (io->async)
- req = fuse_get_req_for_background(fc,
- iov_iter_npages(iter, fc->max_pages));
- else
- req = fuse_get_req(fc, iov_iter_npages(iter,
- fc->max_pages));
- if (IS_ERR(req))
+ max_pages = iov_iter_npages(iter, fc->max_pages);
+ ia = fuse_io_alloc(io, max_pages);
+ if (!ia)
break;
}
}
- if (!IS_ERR(req))
- fuse_put_request(fc, req);
+ if (ia)
+ fuse_io_free(ia);
if (res > 0)
*ppos = pos;
@@ -1509,45 +1573,53 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
return fuse_direct_write_iter(iocb, from);
}
-static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_writepage_free(struct fuse_writepage_args *wpa)
{
+ struct fuse_args_pages *ap = &wpa->ia.ap;
int i;
- for (i = 0; i < req->num_pages; i++)
- __free_page(req->pages[i]);
+ for (i = 0; i < ap->num_pages; i++)
+ __free_page(ap->pages[i]);
+
+ if (wpa->ia.ff)
+ fuse_file_put(wpa->ia.ff, false, false);
- if (req->ff)
- fuse_file_put(req->ff, false, false);
+ kfree(ap->pages);
+ kfree(wpa);
}
-static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_writepage_finish(struct fuse_conn *fc,
+ struct fuse_writepage_args *wpa)
{
- struct inode *inode = req->inode;
+ struct fuse_args_pages *ap = &wpa->ia.ap;
+ struct inode *inode = wpa->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
struct backing_dev_info *bdi = inode_to_bdi(inode);
int i;
- list_del(&req->writepages_entry);
- for (i = 0; i < req->num_pages; i++) {
+ list_del(&wpa->writepages_entry);
+ for (i = 0; i < ap->num_pages; i++) {
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(req->pages[i], NR_WRITEBACK_TEMP);
+ dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
wb_writeout_inc(&bdi->wb);
}
wake_up(&fi->page_waitq);
}
/* Called under fi->lock, may release and reacquire it */
-static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req,
- loff_t size)
+static void fuse_send_writepage(struct fuse_conn *fc,
+ struct fuse_writepage_args *wpa, loff_t size)
__releases(fi->lock)
__acquires(fi->lock)
{
- struct fuse_req *aux, *next;
- struct fuse_inode *fi = get_fuse_inode(req->inode);
- struct fuse_write_in *inarg = &req->misc.write.in;
- __u64 data_size = req->num_pages * PAGE_SIZE;
- bool queued;
+ struct fuse_writepage_args *aux, *next;
+ struct fuse_inode *fi = get_fuse_inode(wpa->inode);
+ struct fuse_write_in *inarg = &wpa->ia.write.in;
+ struct fuse_args *args = &wpa->ia.ap.args;
+ __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE;
+ int err;
+ fi->writectr++;
if (inarg->offset + data_size <= size) {
inarg->size = data_size;
} else if (inarg->offset < size) {
@@ -1557,29 +1629,36 @@ __acquires(fi->lock)
goto out_free;
}
- req->in.args[1].size = inarg->size;
- queued = fuse_request_queue_background(fc, req);
+ args->in_args[1].size = inarg->size;
+ args->force = true;
+ args->nocreds = true;
+
+ err = fuse_simple_background(fc, args, GFP_ATOMIC);
+ if (err == -ENOMEM) {
+ spin_unlock(&fi->lock);
+ err = fuse_simple_background(fc, args, GFP_NOFS | __GFP_NOFAIL);
+ spin_lock(&fi->lock);
+ }
+
/* Fails on broken connection only */
- if (unlikely(!queued))
+ if (unlikely(err))
goto out_free;
- fi->writectr++;
return;
out_free:
- fuse_writepage_finish(fc, req);
+ fi->writectr--;
+ fuse_writepage_finish(fc, wpa);
spin_unlock(&fi->lock);
/* After fuse_writepage_finish() aux request list is private */
- for (aux = req->misc.write.next; aux; aux = next) {
- next = aux->misc.write.next;
- aux->misc.write.next = NULL;
- fuse_writepage_free(fc, aux);
- fuse_put_request(fc, aux);
+ for (aux = wpa->next; aux; aux = next) {
+ next = aux->next;
+ aux->next = NULL;
+ fuse_writepage_free(aux);
}
- fuse_writepage_free(fc, req);
- fuse_put_request(fc, req);
+ fuse_writepage_free(wpa);
spin_lock(&fi->lock);
}
@@ -1596,29 +1675,34 @@ __acquires(fi->lock)
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
loff_t crop = i_size_read(inode);
- struct fuse_req *req;
+ struct fuse_writepage_args *wpa;
while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
- req = list_entry(fi->queued_writes.next, struct fuse_req, list);
- list_del_init(&req->list);
- fuse_send_writepage(fc, req, crop);
+ wpa = list_entry(fi->queued_writes.next,
+ struct fuse_writepage_args, queue_entry);
+ list_del_init(&wpa->queue_entry);
+ fuse_send_writepage(fc, wpa, crop);
}
}
-static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args,
+ int error)
{
- struct inode *inode = req->inode;
+ struct fuse_writepage_args *wpa =
+ container_of(args, typeof(*wpa), ia.ap.args);
+ struct inode *inode = wpa->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
- mapping_set_error(inode->i_mapping, req->out.h.error);
+ mapping_set_error(inode->i_mapping, error);
spin_lock(&fi->lock);
- while (req->misc.write.next) {
+ while (wpa->next) {
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_write_in *inarg = &req->misc.write.in;
- struct fuse_req *next = req->misc.write.next;
- req->misc.write.next = next->misc.write.next;
- next->misc.write.next = NULL;
- next->ff = fuse_file_get(req->ff);
+ struct fuse_write_in *inarg = &wpa->ia.write.in;
+ struct fuse_writepage_args *next = wpa->next;
+
+ wpa->next = next->next;
+ next->next = NULL;
+ next->ia.ff = fuse_file_get(wpa->ia.ff);
list_add(&next->writepages_entry, &fi->writepages);
/*
@@ -1647,9 +1731,9 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
fuse_send_writepage(fc, next, inarg->offset + inarg->size);
}
fi->writectr--;
- fuse_writepage_finish(fc, req);
+ fuse_writepage_finish(fc, wpa);
spin_unlock(&fi->lock);
- fuse_writepage_free(fc, req);
+ fuse_writepage_free(wpa);
}
static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc,
@@ -1691,52 +1775,71 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
return err;
}
+static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
+{
+ struct fuse_writepage_args *wpa;
+ struct fuse_args_pages *ap;
+
+ wpa = kzalloc(sizeof(*wpa), GFP_NOFS);
+ if (wpa) {
+ ap = &wpa->ia.ap;
+ ap->num_pages = 0;
+ ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs);
+ if (!ap->pages) {
+ kfree(wpa);
+ wpa = NULL;
+ }
+ }
+ return wpa;
+
+}
+
static int fuse_writepage_locked(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_req *req;
+ struct fuse_writepage_args *wpa;
+ struct fuse_args_pages *ap;
struct page *tmp_page;
int error = -ENOMEM;
set_page_writeback(page);
- req = fuse_request_alloc_nofs(1);
- if (!req)
+ wpa = fuse_writepage_args_alloc();
+ if (!wpa)
goto err;
+ ap = &wpa->ia.ap;
- /* writeback always goes to bg_queue */
- __set_bit(FR_BACKGROUND, &req->flags);
tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!tmp_page)
goto err_free;
error = -EIO;
- req->ff = fuse_write_file_get(fc, fi);
- if (!req->ff)
+ wpa->ia.ff = fuse_write_file_get(fc, fi);
+ if (!wpa->ia.ff)
goto err_nofile;
- fuse_write_fill(req, req->ff, page_offset(page), 0);
+ fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0);
copy_highpage(tmp_page, page);
- req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
- req->misc.write.next = NULL;
- req->in.argpages = 1;
- req->num_pages = 1;
- req->pages[0] = tmp_page;
- req->page_descs[0].offset = 0;
- req->page_descs[0].length = PAGE_SIZE;
- req->end = fuse_writepage_end;
- req->inode = inode;
+ wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
+ wpa->next = NULL;
+ ap->args.in_pages = true;
+ ap->num_pages = 1;
+ ap->pages[0] = tmp_page;
+ ap->descs[0].offset = 0;
+ ap->descs[0].length = PAGE_SIZE;
+ ap->args.end = fuse_writepage_end;
+ wpa->inode = inode;
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
spin_lock(&fi->lock);
- list_add(&req->writepages_entry, &fi->writepages);
- list_add_tail(&req->list, &fi->queued_writes);
+ list_add(&wpa->writepages_entry, &fi->writepages);
+ list_add_tail(&wpa->queue_entry, &fi->queued_writes);
fuse_flush_writepages(inode);
spin_unlock(&fi->lock);
@@ -1747,7 +1850,7 @@ static int fuse_writepage_locked(struct page *page)
err_nofile:
__free_page(tmp_page);
err_free:
- fuse_request_free(req);
+ kfree(wpa);
err:
mapping_set_error(page->mapping, error);
end_page_writeback(page);
@@ -1767,6 +1870,7 @@ static int fuse_writepage(struct page *page, struct writeback_control *wbc)
WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
return 0;
}
@@ -1777,23 +1881,50 @@ static int fuse_writepage(struct page *page, struct writeback_control *wbc)
}
struct fuse_fill_wb_data {
- struct fuse_req *req;
+ struct fuse_writepage_args *wpa;
struct fuse_file *ff;
struct inode *inode;
struct page **orig_pages;
+ unsigned int max_pages;
};
+static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
+{
+ struct fuse_args_pages *ap = &data->wpa->ia.ap;
+ struct fuse_conn *fc = get_fuse_conn(data->inode);
+ struct page **pages;
+ struct fuse_page_desc *descs;
+ unsigned int npages = min_t(unsigned int,
+ max_t(unsigned int, data->max_pages * 2,
+ FUSE_DEFAULT_MAX_PAGES_PER_REQ),
+ fc->max_pages);
+ WARN_ON(npages <= data->max_pages);
+
+ pages = fuse_pages_alloc(npages, GFP_NOFS, &descs);
+ if (!pages)
+ return false;
+
+ memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages);
+ memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages);
+ kfree(ap->pages);
+ ap->pages = pages;
+ ap->descs = descs;
+ data->max_pages = npages;
+
+ return true;
+}
+
static void fuse_writepages_send(struct fuse_fill_wb_data *data)
{
- struct fuse_req *req = data->req;
+ struct fuse_writepage_args *wpa = data->wpa;
struct inode *inode = data->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
- int num_pages = req->num_pages;
+ int num_pages = wpa->ia.ap.num_pages;
int i;
- req->ff = fuse_file_get(data->ff);
+ wpa->ia.ff = fuse_file_get(data->ff);
spin_lock(&fi->lock);
- list_add_tail(&req->list, &fi->queued_writes);
+ list_add_tail(&wpa->queue_entry, &fi->queued_writes);
fuse_flush_writepages(inode);
spin_unlock(&fi->lock);
@@ -1808,54 +1939,52 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data)
* this new request onto the auxiliary list, otherwise reuse the existing one by
* copying the new page contents over to the old temporary page.
*/
-static bool fuse_writepage_in_flight(struct fuse_req *new_req,
+static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa,
struct page *page)
{
- struct fuse_conn *fc = get_fuse_conn(new_req->inode);
- struct fuse_inode *fi = get_fuse_inode(new_req->inode);
- struct fuse_req *tmp;
- struct fuse_req *old_req;
+ struct fuse_inode *fi = get_fuse_inode(new_wpa->inode);
+ struct fuse_writepage_args *tmp;
+ struct fuse_writepage_args *old_wpa;
+ struct fuse_args_pages *new_ap = &new_wpa->ia.ap;
- WARN_ON(new_req->num_pages != 0);
+ WARN_ON(new_ap->num_pages != 0);
spin_lock(&fi->lock);
- list_del(&new_req->writepages_entry);
- old_req = fuse_find_writeback(fi, page->index, page->index);
- if (!old_req) {
- list_add(&new_req->writepages_entry, &fi->writepages);
+ list_del(&new_wpa->writepages_entry);
+ old_wpa = fuse_find_writeback(fi, page->index, page->index);
+ if (!old_wpa) {
+ list_add(&new_wpa->writepages_entry, &fi->writepages);
spin_unlock(&fi->lock);
return false;
}
- new_req->num_pages = 1;
- for (tmp = old_req->misc.write.next; tmp; tmp = tmp->misc.write.next) {
+ new_ap->num_pages = 1;
+ for (tmp = old_wpa->next; tmp; tmp = tmp->next) {
pgoff_t curr_index;
- WARN_ON(tmp->inode != new_req->inode);
- curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT;
+ WARN_ON(tmp->inode != new_wpa->inode);
+ curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT;
if (curr_index == page->index) {
- WARN_ON(tmp->num_pages != 1);
- WARN_ON(!test_bit(FR_PENDING, &tmp->flags));
- swap(tmp->pages[0], new_req->pages[0]);
+ WARN_ON(tmp->ia.ap.num_pages != 1);
+ swap(tmp->ia.ap.pages[0], new_ap->pages[0]);
break;
}
}
if (!tmp) {
- new_req->misc.write.next = old_req->misc.write.next;
- old_req->misc.write.next = new_req;
+ new_wpa->next = old_wpa->next;
+ old_wpa->next = new_wpa;
}
spin_unlock(&fi->lock);
if (tmp) {
- struct backing_dev_info *bdi = inode_to_bdi(new_req->inode);
+ struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode);
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP);
+ dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP);
wb_writeout_inc(&bdi->wb);
- fuse_writepage_free(fc, new_req);
- fuse_request_free(new_req);
+ fuse_writepage_free(new_wpa);
}
return true;
@@ -1865,7 +1994,8 @@ static int fuse_writepages_fill(struct page *page,
struct writeback_control *wbc, void *_data)
{
struct fuse_fill_wb_data *data = _data;
- struct fuse_req *req = data->req;
+ struct fuse_writepage_args *wpa = data->wpa;
+ struct fuse_args_pages *ap = &wpa->ia.ap;
struct inode *inode = data->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
@@ -1875,7 +2005,7 @@ static int fuse_writepages_fill(struct page *page,
if (!data->ff) {
err = -EIO;
- data->ff = fuse_write_file_get(fc, get_fuse_inode(inode));
+ data->ff = fuse_write_file_get(fc, fi);
if (!data->ff)
goto out_unlock;
}
@@ -1888,16 +2018,16 @@ static int fuse_writepages_fill(struct page *page,
*/
is_writeback = fuse_page_is_writeback(inode, page->index);
- if (req && req->num_pages &&
- (is_writeback || req->num_pages == fc->max_pages ||
- (req->num_pages + 1) * PAGE_SIZE > fc->max_write ||
- data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
+ if (wpa && ap->num_pages &&
+ (is_writeback || ap->num_pages == fc->max_pages ||
+ (ap->num_pages + 1) * PAGE_SIZE > fc->max_write ||
+ data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)) {
fuse_writepages_send(data);
- data->req = NULL;
- } else if (req && req->num_pages == req->max_pages) {
- if (!fuse_req_realloc_pages(fc, req, GFP_NOFS)) {
+ data->wpa = NULL;
+ } else if (wpa && ap->num_pages == data->max_pages) {
+ if (!fuse_pages_realloc(data)) {
fuse_writepages_send(data);
- req = data->req = NULL;
+ data->wpa = NULL;
}
}
@@ -1915,59 +2045,58 @@ static int fuse_writepages_fill(struct page *page,
* This is ensured by holding the page lock in page_mkwrite() while
* checking fuse_page_is_writeback(). We already hold the page lock
* since clear_page_dirty_for_io() and keep it held until we add the
- * request to the fi->writepages list and increment req->num_pages.
+ * request to the fi->writepages list and increment ap->num_pages.
* After this fuse_page_is_writeback() will indicate that the page is
* under writeback, so we can release the page lock.
*/
- if (data->req == NULL) {
- struct fuse_inode *fi = get_fuse_inode(inode);
-
+ if (data->wpa == NULL) {
err = -ENOMEM;
- req = fuse_request_alloc_nofs(FUSE_REQ_INLINE_PAGES);
- if (!req) {
+ wpa = fuse_writepage_args_alloc();
+ if (!wpa) {
__free_page(tmp_page);
goto out_unlock;
}
+ data->max_pages = 1;
- fuse_write_fill(req, data->ff, page_offset(page), 0);
- req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
- req->misc.write.next = NULL;
- req->in.argpages = 1;
- __set_bit(FR_BACKGROUND, &req->flags);
- req->num_pages = 0;
- req->end = fuse_writepage_end;
- req->inode = inode;
+ ap = &wpa->ia.ap;
+ fuse_write_args_fill(&wpa->ia, data->ff, page_offset(page), 0);
+ wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
+ wpa->next = NULL;
+ ap->args.in_pages = true;
+ ap->args.end = fuse_writepage_end;
+ ap->num_pages = 0;
+ wpa->inode = inode;
spin_lock(&fi->lock);
- list_add(&req->writepages_entry, &fi->writepages);
+ list_add(&wpa->writepages_entry, &fi->writepages);
spin_unlock(&fi->lock);
- data->req = req;
+ data->wpa = wpa;
}
set_page_writeback(page);
copy_highpage(tmp_page, page);
- req->pages[req->num_pages] = tmp_page;
- req->page_descs[req->num_pages].offset = 0;
- req->page_descs[req->num_pages].length = PAGE_SIZE;
+ ap->pages[ap->num_pages] = tmp_page;
+ ap->descs[ap->num_pages].offset = 0;
+ ap->descs[ap->num_pages].length = PAGE_SIZE;
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
err = 0;
- if (is_writeback && fuse_writepage_in_flight(req, page)) {
+ if (is_writeback && fuse_writepage_in_flight(wpa, page)) {
end_page_writeback(page);
- data->req = NULL;
+ data->wpa = NULL;
goto out_unlock;
}
- data->orig_pages[req->num_pages] = page;
+ data->orig_pages[ap->num_pages] = page;
/*
* Protected by fi->lock against concurrent access by
* fuse_page_is_writeback().
*/
spin_lock(&fi->lock);
- req->num_pages++;
+ ap->num_pages++;
spin_unlock(&fi->lock);
out_unlock:
@@ -1989,7 +2118,7 @@ static int fuse_writepages(struct address_space *mapping,
goto out;
data.inode = inode;
- data.req = NULL;
+ data.wpa = NULL;
data.ff = NULL;
err = -ENOMEM;
@@ -2000,9 +2129,9 @@ static int fuse_writepages(struct address_space *mapping,
goto out;
err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
- if (data.req) {
+ if (data.wpa) {
/* Ignore errors if we can write at least one page */
- BUG_ON(!data.req->num_pages);
+ WARN_ON(!data.wpa->ia.ap.num_pages);
fuse_writepages_send(&data);
err = 0;
}
@@ -2222,11 +2351,11 @@ static void fuse_lk_fill(struct fuse_args *args, struct file *file,
inarg->lk.pid = pid;
if (flock)
inarg->lk_flags |= FUSE_LK_FLOCK;
- args->in.h.opcode = opcode;
- args->in.h.nodeid = get_node_id(inode);
- args->in.numargs = 1;
- args->in.args[0].size = sizeof(*inarg);
- args->in.args[0].value = inarg;
+ args->opcode = opcode;
+ args->nodeid = get_node_id(inode);
+ args->in_numargs = 1;
+ args->in_args[0].size = sizeof(*inarg);
+ args->in_args[0].value = inarg;
}
static int fuse_getlk(struct file *file, struct file_lock *fl)
@@ -2239,9 +2368,9 @@ static int fuse_getlk(struct file *file, struct file_lock *fl)
int err;
fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err)
err = convert_fuse_file_lock(fc, &outarg.lk, fl);
@@ -2336,14 +2465,14 @@ static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
memset(&inarg, 0, sizeof(inarg));
inarg.block = block;
inarg.blocksize = inode->i_sb->s_blocksize;
- args.in.h.opcode = FUSE_BMAP;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.opcode = FUSE_BMAP;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS)
fc->no_bmap = 1;
@@ -2368,14 +2497,14 @@ static loff_t fuse_lseek(struct file *file, loff_t offset, int whence)
if (fc->no_lseek)
goto fallback;
- args.in.h.opcode = FUSE_LSEEK;
- args.in.h.nodeid = ff->nodeid;
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.opcode = FUSE_LSEEK;
+ args.nodeid = ff->nodeid;
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (err) {
if (err == -ENOSYS) {
@@ -2573,14 +2702,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
.flags = flags
};
struct fuse_ioctl_out outarg;
- struct fuse_req *req = NULL;
- struct page **pages = NULL;
struct iovec *iov_page = NULL;
struct iovec *in_iov = NULL, *out_iov = NULL;
- unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
- size_t in_size, out_size, transferred, c;
+ unsigned int in_iovs = 0, out_iovs = 0, max_pages;
+ size_t in_size, out_size, c;
+ ssize_t transferred;
int err, i;
struct iov_iter ii;
+ struct fuse_args_pages ap = {};
#if BITS_PER_LONG == 32
inarg.flags |= FUSE_IOCTL_32BIT;
@@ -2598,11 +2727,13 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
err = -ENOMEM;
- pages = kcalloc(fc->max_pages, sizeof(pages[0]), GFP_KERNEL);
+ ap.pages = fuse_pages_alloc(fc->max_pages, GFP_KERNEL, &ap.descs);
iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
- if (!pages || !iov_page)
+ if (!ap.pages || !iov_page)
goto out;
+ fuse_page_descs_length_init(ap.descs, 0, fc->max_pages);
+
/*
* If restricted, initialize IO parameters as encoded in @cmd.
* RETRY from server is not allowed.
@@ -2639,56 +2770,44 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
err = -ENOMEM;
if (max_pages > fc->max_pages)
goto out;
- while (num_pages < max_pages) {
- pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
- if (!pages[num_pages])
+ while (ap.num_pages < max_pages) {
+ ap.pages[ap.num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (!ap.pages[ap.num_pages])
goto out;
- num_pages++;
+ ap.num_pages++;
}
- req = fuse_get_req(fc, num_pages);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- req = NULL;
- goto out;
- }
- memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
- req->num_pages = num_pages;
- fuse_page_descs_length_init(req, 0, req->num_pages);
/* okay, let's send it to the client */
- req->in.h.opcode = FUSE_IOCTL;
- req->in.h.nodeid = ff->nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
+ ap.args.opcode = FUSE_IOCTL;
+ ap.args.nodeid = ff->nodeid;
+ ap.args.in_numargs = 1;
+ ap.args.in_args[0].size = sizeof(inarg);
+ ap.args.in_args[0].value = &inarg;
if (in_size) {
- req->in.numargs++;
- req->in.args[1].size = in_size;
- req->in.argpages = 1;
+ ap.args.in_numargs++;
+ ap.args.in_args[1].size = in_size;
+ ap.args.in_pages = true;
err = -EFAULT;
iov_iter_init(&ii, WRITE, in_iov, in_iovs, in_size);
- for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= num_pages); i++) {
- c = copy_page_from_iter(pages[i], 0, PAGE_SIZE, &ii);
+ for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) {
+ c = copy_page_from_iter(ap.pages[i], 0, PAGE_SIZE, &ii);
if (c != PAGE_SIZE && iov_iter_count(&ii))
goto out;
}
}
- req->out.numargs = 2;
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
- req->out.args[1].size = out_size;
- req->out.argpages = 1;
- req->out.argvar = 1;
+ ap.args.out_numargs = 2;
+ ap.args.out_args[0].size = sizeof(outarg);
+ ap.args.out_args[0].value = &outarg;
+ ap.args.out_args[1].size = out_size;
+ ap.args.out_pages = true;
+ ap.args.out_argvar = true;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- transferred = req->out.args[1].size;
- fuse_put_request(fc, req);
- req = NULL;
- if (err)
+ transferred = fuse_simple_request(fc, &ap.args);
+ err = transferred;
+ if (transferred < 0)
goto out;
/* did it ask for retry? */
@@ -2713,7 +2832,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
goto out;
- vaddr = kmap_atomic(pages[0]);
+ vaddr = kmap_atomic(ap.pages[0]);
err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
transferred, in_iovs + out_iovs,
(flags & FUSE_IOCTL_COMPAT) != 0);
@@ -2741,19 +2860,17 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
err = -EFAULT;
iov_iter_init(&ii, READ, out_iov, out_iovs, transferred);
- for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= num_pages); i++) {
- c = copy_page_to_iter(pages[i], 0, PAGE_SIZE, &ii);
+ for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) {
+ c = copy_page_to_iter(ap.pages[i], 0, PAGE_SIZE, &ii);
if (c != PAGE_SIZE && iov_iter_count(&ii))
goto out;
}
err = 0;
out:
- if (req)
- fuse_put_request(fc, req);
free_page((unsigned long) iov_page);
- while (num_pages)
- __free_page(pages[--num_pages]);
- kfree(pages);
+ while (ap.num_pages)
+ __free_page(ap.pages[--ap.num_pages]);
+ kfree(ap.pages);
return err ? err : outarg.result;
}
@@ -2861,14 +2978,14 @@ __poll_t fuse_file_poll(struct file *file, poll_table *wait)
fuse_register_polled_file(fc, ff);
}
- args.in.h.opcode = FUSE_POLL;
- args.in.h.nodeid = ff->nodeid;
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.opcode = FUSE_POLL;
+ args.nodeid = ff->nodeid;
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err)
@@ -3076,11 +3193,11 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
if (!(mode & FALLOC_FL_KEEP_SIZE))
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
- args.in.h.opcode = FUSE_FALLOCATE;
- args.in.h.nodeid = ff->nodeid;
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
+ args.opcode = FUSE_FALLOCATE;
+ args.nodeid = ff->nodeid;
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_fallocate = 1;
@@ -3168,14 +3285,14 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
if (is_unstable)
set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
- args.in.h.opcode = FUSE_COPY_FILE_RANGE;
- args.in.h.nodeid = ff_in->nodeid;
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.opcode = FUSE_COPY_FILE_RANGE;
+ args.nodeid = ff_in->nodeid;
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_copy_file_range = 1;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 24dbca777775..aa75e2305b75 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -47,9 +47,6 @@
/** Number of dentries for each connection in the control filesystem */
#define FUSE_CTL_NUM_DENTRIES 5
-/** Number of page pointers embedded in fuse_req */
-#define FUSE_REQ_INLINE_PAGES 1
-
/** List of active connections */
extern struct list_head fuse_conn_list;
@@ -164,17 +161,15 @@ enum {
};
struct fuse_conn;
+struct fuse_release_args;
/** FUSE specific file data */
struct fuse_file {
/** Fuse connection for this file */
struct fuse_conn *fc;
- /*
- * Request reserved for flush and release.
- * Modified under relative fuse_inode::lock.
- */
- struct fuse_req *reserved_req;
+ /* Argument space reserved for release */
+ struct fuse_release_args *release_args;
/** Kernel file handle guaranteed to be unique */
u64 kh;
@@ -229,57 +224,12 @@ struct fuse_in_arg {
const void *value;
};
-/** The request input */
-struct fuse_in {
- /** The request header */
- struct fuse_in_header h;
-
- /** True if the data for the last argument is in req->pages */
- unsigned argpages:1;
-
- /** Number of arguments */
- unsigned numargs;
-
- /** Array of arguments */
- struct fuse_in_arg args[3];
-};
-
/** One output argument of a request */
struct fuse_arg {
unsigned size;
void *value;
};
-/** The request output */
-struct fuse_out {
- /** Header returned from userspace */
- struct fuse_out_header h;
-
- /*
- * The following bitfields are not changed during the request
- * processing
- */
-
- /** Last argument is variable length (can be shorter than
- arg->size) */
- unsigned argvar:1;
-
- /** Last argument is a list of pages to copy data to */
- unsigned argpages:1;
-
- /** Zero partially or not copied pages */
- unsigned page_zeroing:1;
-
- /** Pages may be replaced with new ones */
- unsigned page_replace:1;
-
- /** Number or arguments */
- unsigned numargs;
-
- /** Array of arguments */
- struct fuse_arg args[2];
-};
-
/** FUSE page descriptor */
struct fuse_page_desc {
unsigned int length;
@@ -287,20 +237,28 @@ struct fuse_page_desc {
};
struct fuse_args {
- struct {
- struct {
- uint32_t opcode;
- uint64_t nodeid;
- } h;
- unsigned numargs;
- struct fuse_in_arg args[3];
+ uint64_t nodeid;
+ uint32_t opcode;
+ unsigned short in_numargs;
+ unsigned short out_numargs;
+ bool force:1;
+ bool noreply:1;
+ bool nocreds:1;
+ bool in_pages:1;
+ bool out_pages:1;
+ bool out_argvar:1;
+ bool page_zeroing:1;
+ bool page_replace:1;
+ struct fuse_in_arg in_args[3];
+ struct fuse_arg out_args[2];
+ void (*end)(struct fuse_conn *fc, struct fuse_args *args, int error);
+};
- } in;
- struct {
- unsigned argvar:1;
- unsigned numargs;
- struct fuse_arg args[2];
- } out;
+struct fuse_args_pages {
+ struct fuse_args args;
+ struct page **pages;
+ struct fuse_page_desc *descs;
+ unsigned int num_pages;
};
#define FUSE_ARGS(args) struct fuse_args args = {}
@@ -373,83 +331,79 @@ struct fuse_req {
/** Entry on the interrupts list */
struct list_head intr_entry;
+ /* Input/output arguments */
+ struct fuse_args *args;
+
/** refcount */
refcount_t count;
/* Request flags, updated with test/set/clear_bit() */
unsigned long flags;
- /** The request input */
- struct fuse_in in;
+ /* The request input header */
+ struct {
+ struct fuse_in_header h;
+ } in;
- /** The request output */
- struct fuse_out out;
+ /* The request output header */
+ struct {
+ struct fuse_out_header h;
+ } out;
/** Used to wake up the task waiting for completion of request*/
wait_queue_head_t waitq;
- /** Data for asynchronous requests */
- union {
- struct {
- struct fuse_release_in in;
- struct inode *inode;
- } release;
- struct fuse_init_in init_in;
- struct fuse_init_out init_out;
- struct cuse_init_in cuse_init_in;
- struct {
- struct fuse_read_in in;
- u64 attr_ver;
- } read;
- struct {
- struct fuse_write_in in;
- struct fuse_write_out out;
- struct fuse_req *next;
- } write;
- struct fuse_notify_retrieve_in retrieve_in;
- } misc;
-
- /** page vector */
- struct page **pages;
-
- /** page-descriptor vector */
- struct fuse_page_desc *page_descs;
-
- /** size of the 'pages' array */
- unsigned max_pages;
-
- /** inline page vector */
- struct page *inline_pages[FUSE_REQ_INLINE_PAGES];
-
- /** inline page-descriptor vector */
- struct fuse_page_desc inline_page_descs[FUSE_REQ_INLINE_PAGES];
-
- /** number of pages in vector */
- unsigned num_pages;
-
- /** File used in the request (or NULL) */
- struct fuse_file *ff;
+#if IS_ENABLED(CONFIG_VIRTIO_FS)
+ /** virtio-fs's physically contiguous buffer for in and out args */
+ void *argbuf;
+#endif
+};
- /** Inode used in the request or NULL */
- struct inode *inode;
+struct fuse_iqueue;
- /** AIO control block */
- struct fuse_io_priv *io;
+/**
+ * Input queue callbacks
+ *
+ * Input queue signalling is device-specific. For example, the /dev/fuse file
+ * uses fiq->waitq and fasync to wake processes that are waiting on queue
+ * readiness. These callbacks allow other device types to respond to input
+ * queue activity.
+ */
+struct fuse_iqueue_ops {
+ /**
+ * Signal that a forget has been queued
+ */
+ void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq)
+ __releases(fiq->lock);
- /** Link on fi->writepages */
- struct list_head writepages_entry;
+ /**
+ * Signal that an INTERRUPT request has been queued
+ */
+ void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq)
+ __releases(fiq->lock);
- /** Request completion callback */
- void (*end)(struct fuse_conn *, struct fuse_req *);
+ /**
+ * Signal that a request has been queued
+ */
+ void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq)
+ __releases(fiq->lock);
- /** Request is stolen from fuse_file->reserved_req */
- struct file *stolen_file;
+ /**
+ * Clean up when fuse_iqueue is destroyed
+ */
+ void (*release)(struct fuse_iqueue *fiq);
};
+/** /dev/fuse input queue operations */
+extern const struct fuse_iqueue_ops fuse_dev_fiq_ops;
+
struct fuse_iqueue {
/** Connection established */
unsigned connected;
+ /** Lock protecting accesses to members of this structure */
+ spinlock_t lock;
+
/** Readers of the connection are waiting on this */
wait_queue_head_t waitq;
@@ -471,6 +425,12 @@ struct fuse_iqueue {
/** O_ASYNC requests */
struct fasync_struct *fasync;
+
+ /** Device-specific callbacks */
+ const struct fuse_iqueue_ops *ops;
+
+ /** Device-specific state */
+ void *priv;
};
#define FUSE_PQ_HASH_BITS 8
@@ -504,6 +464,30 @@ struct fuse_dev {
struct list_head entry;
};
+struct fuse_fs_context {
+ int fd;
+ unsigned int rootmode;
+ kuid_t user_id;
+ kgid_t group_id;
+ bool is_bdev:1;
+ bool fd_present:1;
+ bool rootmode_present:1;
+ bool user_id_present:1;
+ bool group_id_present:1;
+ bool default_permissions:1;
+ bool allow_other:1;
+ bool destroy:1;
+ bool no_control:1;
+ bool no_force_umount:1;
+ bool no_mount_options:1;
+ unsigned int max_read;
+ unsigned int blksize;
+ const char *subtype;
+
+ /* fuse_dev pointer to fill in, should contain NULL on entry */
+ void **fudptr;
+};
+
/**
* A Fuse connection.
*
@@ -584,9 +568,6 @@ struct fuse_conn {
/** waitq for blocked connection */
wait_queue_head_t blocked_waitq;
- /** waitq for reserved requests */
- wait_queue_head_t reserved_req_waitq;
-
/** Connection established, cleared on umount, connection
abort and device release */
unsigned connected;
@@ -721,6 +702,21 @@ struct fuse_conn {
/** Does the filesystem support copy_file_range? */
unsigned no_copy_file_range:1;
+ /* Send DESTROY request */
+ unsigned int destroy:1;
+
+ /* Delete dentries that have gone stale */
+ unsigned int delete_stale:1;
+
+ /** Do not create entry in fusectl fs */
+ unsigned int no_control:1;
+
+ /** Do not allow MNT_FORCE umount */
+ unsigned int no_force_umount:1;
+
+ /* Do not show mount options */
+ unsigned int no_mount_options:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
@@ -742,9 +738,6 @@ struct fuse_conn {
/** Key for lock owner ID scrambling */
u32 scramble_key[4];
- /** Reserved request for the DESTROY message */
- struct fuse_req *destroy_req;
-
/** Version counter for attribute changes */
atomic64_t attr_version;
@@ -820,14 +813,32 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
struct fuse_forget_link *fuse_alloc_forget(void);
-/* Used by READDIRPLUS */
-void fuse_force_forget(struct file *file, u64 nodeid);
+struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
+ unsigned int max,
+ unsigned int *countp);
-/**
+/*
* Initialize READ or READDIR request
*/
-void fuse_read_fill(struct fuse_req *req, struct file *file,
- loff_t pos, size_t count, int opcode);
+struct fuse_io_args {
+ union {
+ struct {
+ struct fuse_read_in in;
+ u64 attr_ver;
+ } read;
+ struct {
+ struct fuse_write_in in;
+ struct fuse_write_out out;
+ } write;
+ };
+ struct fuse_args_pages ap;
+ struct fuse_io_priv *io;
+ struct fuse_file *ff;
+};
+
+void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
+ size_t count, int opcode);
+
/**
* Send OPEN or OPENDIR request
@@ -900,61 +911,16 @@ int fuse_ctl_init(void);
void __exit fuse_ctl_cleanup(void);
/**
- * Allocate a request
- */
-struct fuse_req *fuse_request_alloc(unsigned npages);
-
-struct fuse_req *fuse_request_alloc_nofs(unsigned npages);
-
-bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req,
- gfp_t flags);
-
-
-/**
- * Free a request
- */
-void fuse_request_free(struct fuse_req *req);
-
-/**
- * Get a request, may fail with -ENOMEM,
- * caller should specify # elements in req->pages[] explicitly
- */
-struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages);
-struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
- unsigned npages);
-
-/*
- * Increment reference count on request
- */
-void __fuse_get_request(struct fuse_req *req);
-
-/**
- * Gets a requests for a file operation, always succeeds
- */
-struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
- struct file *file);
-
-/**
- * Decrement reference count of a request. If count goes to zero free
- * the request.
- */
-void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req);
-
-/**
- * Send a request (synchronous)
- */
-void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req);
-
-/**
* Simple request sending that does request allocation and freeing
*/
ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args);
+int fuse_simple_background(struct fuse_conn *fc, struct fuse_args *args,
+ gfp_t gfp_flags);
/**
- * Send a request in the background
+ * End a finished request
*/
-void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
-bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req);
+void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req);
/* Abort all requests */
void fuse_abort_conn(struct fuse_conn *fc);
@@ -980,15 +946,33 @@ struct fuse_conn *fuse_conn_get(struct fuse_conn *fc);
/**
* Initialize fuse_conn
*/
-void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns);
+void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns,
+ const struct fuse_iqueue_ops *fiq_ops, void *fiq_priv);
/**
* Release reference to fuse_conn
*/
void fuse_conn_put(struct fuse_conn *fc);
-struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc);
+struct fuse_dev *fuse_dev_alloc_install(struct fuse_conn *fc);
+struct fuse_dev *fuse_dev_alloc(void);
+void fuse_dev_install(struct fuse_dev *fud, struct fuse_conn *fc);
void fuse_dev_free(struct fuse_dev *fud);
+void fuse_send_init(struct fuse_conn *fc);
+
+/**
+ * Fill in superblock and initialize fuse connection
+ * @sb: partially-initialized superblock to fill in
+ * @ctx: mount context
+ */
+int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx);
+
+/**
+ * Disassociate fuse connection from superblock and kill the superblock
+ *
+ * Calls kill_anon_super(), do not use with bdev mounts.
+ */
+void fuse_kill_sb_anon(struct super_block *sb);
/**
* Add connection to control filesystem
@@ -1005,6 +989,8 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc);
*/
int fuse_valid_type(int m);
+bool fuse_invalid_attr(struct fuse_attr *attr);
+
/**
* Is current process allowed to perform filesystem operation?
*/
@@ -1093,4 +1079,15 @@ int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type);
/* readdir.c */
int fuse_readdir(struct file *file, struct dir_context *ctx);
+/**
+ * Return the number of bytes in an arguments list
+ */
+unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args);
+
+/**
+ * Get the next unique ID for a request
+ */
+u64 fuse_get_unique(struct fuse_iqueue *fiq);
+void fuse_free_conn(struct fuse_conn *fc);
+
#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 4bb885b0f032..16aec32f7f3d 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -15,7 +15,8 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/parser.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/statfs.h>
#include <linux/random.h>
#include <linux/sched.h>
@@ -59,24 +60,13 @@ MODULE_PARM_DESC(max_user_congthresh,
/** Congestion starts at 75% of maximum */
#define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4)
-struct fuse_mount_data {
- int fd;
- unsigned rootmode;
- kuid_t user_id;
- kgid_t group_id;
- unsigned fd_present:1;
- unsigned rootmode_present:1;
- unsigned user_id_present:1;
- unsigned group_id_present:1;
- unsigned default_permissions:1;
- unsigned allow_other:1;
- unsigned max_read;
- unsigned blksize;
-};
+#ifdef CONFIG_BLOCK
+static struct file_system_type fuseblk_fs_type;
+#endif
struct fuse_forget_link *fuse_alloc_forget(void)
{
- return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL);
+ return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT);
}
static struct inode *fuse_alloc_inode(struct super_block *sb)
@@ -374,19 +364,21 @@ void fuse_unlock_inode(struct inode *inode, bool locked)
static void fuse_umount_begin(struct super_block *sb)
{
- fuse_abort_conn(get_fuse_conn_super(sb));
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
+
+ if (!fc->no_force_umount)
+ fuse_abort_conn(fc);
}
static void fuse_send_destroy(struct fuse_conn *fc)
{
- struct fuse_req *req = fc->destroy_req;
- if (req && fc->conn_init) {
- fc->destroy_req = NULL;
- req->in.h.opcode = FUSE_DESTROY;
- __set_bit(FR_FORCE, &req->flags);
- __clear_bit(FR_BACKGROUND, &req->flags);
- fuse_request_send(fc, req);
- fuse_put_request(fc, req);
+ if (fc->conn_init) {
+ FUSE_ARGS(args);
+
+ args.opcode = FUSE_DESTROY;
+ args.force = true;
+ args.nocreds = true;
+ fuse_simple_request(fc, &args);
}
}
@@ -430,12 +422,12 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
}
memset(&outarg, 0, sizeof(outarg));
- args.in.numargs = 0;
- args.in.h.opcode = FUSE_STATFS;
- args.in.h.nodeid = get_node_id(d_inode(dentry));
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.in_numargs = 0;
+ args.opcode = FUSE_STATFS;
+ args.nodeid = get_node_id(d_inode(dentry));
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err)
convert_fuse_statfs(buf, &outarg.st);
@@ -443,6 +435,8 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
}
enum {
+ OPT_SOURCE,
+ OPT_SUBTYPE,
OPT_FD,
OPT_ROOTMODE,
OPT_USER_ID,
@@ -454,111 +448,109 @@ enum {
OPT_ERR
};
-static const match_table_t tokens = {
- {OPT_FD, "fd=%u"},
- {OPT_ROOTMODE, "rootmode=%o"},
- {OPT_USER_ID, "user_id=%u"},
- {OPT_GROUP_ID, "group_id=%u"},
- {OPT_DEFAULT_PERMISSIONS, "default_permissions"},
- {OPT_ALLOW_OTHER, "allow_other"},
- {OPT_MAX_READ, "max_read=%u"},
- {OPT_BLKSIZE, "blksize=%u"},
- {OPT_ERR, NULL}
+static const struct fs_parameter_spec fuse_param_specs[] = {
+ fsparam_string ("source", OPT_SOURCE),
+ fsparam_u32 ("fd", OPT_FD),
+ fsparam_u32oct ("rootmode", OPT_ROOTMODE),
+ fsparam_u32 ("user_id", OPT_USER_ID),
+ fsparam_u32 ("group_id", OPT_GROUP_ID),
+ fsparam_flag ("default_permissions", OPT_DEFAULT_PERMISSIONS),
+ fsparam_flag ("allow_other", OPT_ALLOW_OTHER),
+ fsparam_u32 ("max_read", OPT_MAX_READ),
+ fsparam_u32 ("blksize", OPT_BLKSIZE),
+ fsparam_string ("subtype", OPT_SUBTYPE),
+ {}
+};
+
+static const struct fs_parameter_description fuse_fs_parameters = {
+ .name = "fuse",
+ .specs = fuse_param_specs,
};
-static int fuse_match_uint(substring_t *s, unsigned int *res)
+static int fuse_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- int err = -ENOMEM;
- char *buf = match_strdup(s);
- if (buf) {
- err = kstrtouint(buf, 10, res);
- kfree(buf);
+ struct fs_parse_result result;
+ struct fuse_fs_context *ctx = fc->fs_private;
+ int opt;
+
+ opt = fs_parse(fc, &fuse_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case OPT_SOURCE:
+ if (fc->source)
+ return invalf(fc, "fuse: Multiple sources specified");
+ fc->source = param->string;
+ param->string = NULL;
+ break;
+
+ case OPT_SUBTYPE:
+ if (ctx->subtype)
+ return invalf(fc, "fuse: Multiple subtypes specified");
+ ctx->subtype = param->string;
+ param->string = NULL;
+ return 0;
+
+ case OPT_FD:
+ ctx->fd = result.uint_32;
+ ctx->fd_present = 1;
+ break;
+
+ case OPT_ROOTMODE:
+ if (!fuse_valid_type(result.uint_32))
+ return invalf(fc, "fuse: Invalid rootmode");
+ ctx->rootmode = result.uint_32;
+ ctx->rootmode_present = 1;
+ break;
+
+ case OPT_USER_ID:
+ ctx->user_id = make_kuid(fc->user_ns, result.uint_32);
+ if (!uid_valid(ctx->user_id))
+ return invalf(fc, "fuse: Invalid user_id");
+ ctx->user_id_present = 1;
+ break;
+
+ case OPT_GROUP_ID:
+ ctx->group_id = make_kgid(fc->user_ns, result.uint_32);
+ if (!gid_valid(ctx->group_id))
+ return invalf(fc, "fuse: Invalid group_id");
+ ctx->group_id_present = 1;
+ break;
+
+ case OPT_DEFAULT_PERMISSIONS:
+ ctx->default_permissions = 1;
+ break;
+
+ case OPT_ALLOW_OTHER:
+ ctx->allow_other = 1;
+ break;
+
+ case OPT_MAX_READ:
+ ctx->max_read = result.uint_32;
+ break;
+
+ case OPT_BLKSIZE:
+ if (!ctx->is_bdev)
+ return invalf(fc, "fuse: blksize only supported for fuseblk");
+ ctx->blksize = result.uint_32;
+ break;
+
+ default:
+ return -EINVAL;
}
- return err;
+
+ return 0;
}
-static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev,
- struct user_namespace *user_ns)
+static void fuse_free_fc(struct fs_context *fc)
{
- char *p;
- memset(d, 0, sizeof(struct fuse_mount_data));
- d->max_read = ~0;
- d->blksize = FUSE_DEFAULT_BLKSIZE;
-
- while ((p = strsep(&opt, ",")) != NULL) {
- int token;
- int value;
- unsigned uv;
- substring_t args[MAX_OPT_ARGS];
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case OPT_FD:
- if (match_int(&args[0], &value))
- return 0;
- d->fd = value;
- d->fd_present = 1;
- break;
-
- case OPT_ROOTMODE:
- if (match_octal(&args[0], &value))
- return 0;
- if (!fuse_valid_type(value))
- return 0;
- d->rootmode = value;
- d->rootmode_present = 1;
- break;
-
- case OPT_USER_ID:
- if (fuse_match_uint(&args[0], &uv))
- return 0;
- d->user_id = make_kuid(user_ns, uv);
- if (!uid_valid(d->user_id))
- return 0;
- d->user_id_present = 1;
- break;
-
- case OPT_GROUP_ID:
- if (fuse_match_uint(&args[0], &uv))
- return 0;
- d->group_id = make_kgid(user_ns, uv);
- if (!gid_valid(d->group_id))
- return 0;
- d->group_id_present = 1;
- break;
-
- case OPT_DEFAULT_PERMISSIONS:
- d->default_permissions = 1;
- break;
-
- case OPT_ALLOW_OTHER:
- d->allow_other = 1;
- break;
-
- case OPT_MAX_READ:
- if (match_int(&args[0], &value))
- return 0;
- d->max_read = value;
- break;
-
- case OPT_BLKSIZE:
- if (!is_bdev || match_int(&args[0], &value))
- return 0;
- d->blksize = value;
- break;
-
- default:
- return 0;
- }
- }
+ struct fuse_fs_context *ctx = fc->fs_private;
- if (!d->fd_present || !d->rootmode_present ||
- !d->user_id_present || !d->group_id_present)
- return 0;
-
- return 1;
+ if (ctx) {
+ kfree(ctx->subtype);
+ kfree(ctx);
+ }
}
static int fuse_show_options(struct seq_file *m, struct dentry *root)
@@ -566,6 +558,9 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
struct super_block *sb = root->d_sb;
struct fuse_conn *fc = get_fuse_conn_super(sb);
+ if (fc->no_mount_options)
+ return 0;
+
seq_printf(m, ",user_id=%u", from_kuid_munged(fc->user_ns, fc->user_id));
seq_printf(m, ",group_id=%u", from_kgid_munged(fc->user_ns, fc->group_id));
if (fc->default_permissions)
@@ -579,14 +574,19 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
-static void fuse_iqueue_init(struct fuse_iqueue *fiq)
+static void fuse_iqueue_init(struct fuse_iqueue *fiq,
+ const struct fuse_iqueue_ops *ops,
+ void *priv)
{
memset(fiq, 0, sizeof(struct fuse_iqueue));
+ spin_lock_init(&fiq->lock);
init_waitqueue_head(&fiq->waitq);
INIT_LIST_HEAD(&fiq->pending);
INIT_LIST_HEAD(&fiq->interrupts);
fiq->forget_list_tail = &fiq->forget_list_head;
fiq->connected = 1;
+ fiq->ops = ops;
+ fiq->priv = priv;
}
static void fuse_pqueue_init(struct fuse_pqueue *fpq)
@@ -600,7 +600,8 @@ static void fuse_pqueue_init(struct fuse_pqueue *fpq)
fpq->connected = 1;
}
-void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
+void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns,
+ const struct fuse_iqueue_ops *fiq_ops, void *fiq_priv)
{
memset(fc, 0, sizeof(*fc));
spin_lock_init(&fc->lock);
@@ -609,8 +610,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
refcount_set(&fc->count, 1);
atomic_set(&fc->dev_count, 1);
init_waitqueue_head(&fc->blocked_waitq);
- init_waitqueue_head(&fc->reserved_req_waitq);
- fuse_iqueue_init(&fc->iq);
+ fuse_iqueue_init(&fc->iq, fiq_ops, fiq_priv);
INIT_LIST_HEAD(&fc->bg_queue);
INIT_LIST_HEAD(&fc->entry);
INIT_LIST_HEAD(&fc->devices);
@@ -633,8 +633,10 @@ EXPORT_SYMBOL_GPL(fuse_conn_init);
void fuse_conn_put(struct fuse_conn *fc)
{
if (refcount_dec_and_test(&fc->count)) {
- if (fc->destroy_req)
- fuse_request_free(fc->destroy_req);
+ struct fuse_iqueue *fiq = &fc->iq;
+
+ if (fiq->ops->release)
+ fiq->ops->release(fiq);
put_pid_ns(fc->pid_ns);
put_user_ns(fc->user_ns);
fc->release(fc);
@@ -822,9 +824,12 @@ static const struct super_operations fuse_super_operations = {
static void sanitize_global_limit(unsigned *limit)
{
+ /*
+ * The default maximum number of async requests is calculated to consume
+ * 1/2^13 of the total memory, assuming 392 bytes per request.
+ */
if (*limit == 0)
- *limit = ((totalram_pages() << PAGE_SHIFT) >> 13) /
- sizeof(struct fuse_req);
+ *limit = ((totalram_pages() << PAGE_SHIFT) >> 13) / 392;
if (*limit >= 1 << 16)
*limit = (1 << 16) - 1;
@@ -870,11 +875,19 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
spin_unlock(&fc->bg_lock);
}
-static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
+struct fuse_init_args {
+ struct fuse_args args;
+ struct fuse_init_in in;
+ struct fuse_init_out out;
+};
+
+static void process_init_reply(struct fuse_conn *fc, struct fuse_args *args,
+ int error)
{
- struct fuse_init_out *arg = &req->misc.init_out;
+ struct fuse_init_args *ia = container_of(args, typeof(*ia), args);
+ struct fuse_init_out *arg = &ia->out;
- if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION)
+ if (error || arg->major != FUSE_KERNEL_VERSION)
fc->conn_error = 1;
else {
unsigned long ra_pages;
@@ -951,18 +964,23 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->max_write = max_t(unsigned, 4096, fc->max_write);
fc->conn_init = 1;
}
+ kfree(ia);
+
fuse_set_initialized(fc);
wake_up_all(&fc->blocked_waitq);
}
-static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
+void fuse_send_init(struct fuse_conn *fc)
{
- struct fuse_init_in *arg = &req->misc.init_in;
+ struct fuse_init_args *ia;
+
+ ia = kzalloc(sizeof(*ia), GFP_KERNEL | __GFP_NOFAIL);
- arg->major = FUSE_KERNEL_VERSION;
- arg->minor = FUSE_KERNEL_MINOR_VERSION;
- arg->max_readahead = fc->sb->s_bdi->ra_pages * PAGE_SIZE;
- arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
+ ia->in.major = FUSE_KERNEL_VERSION;
+ ia->in.minor = FUSE_KERNEL_MINOR_VERSION;
+ ia->in.max_readahead = fc->sb->s_bdi->ra_pages * PAGE_SIZE;
+ ia->in.flags |=
+ FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
@@ -971,26 +989,32 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL |
FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS |
FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA;
- req->in.h.opcode = FUSE_INIT;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(*arg);
- req->in.args[0].value = arg;
- req->out.numargs = 1;
+ ia->args.opcode = FUSE_INIT;
+ ia->args.in_numargs = 1;
+ ia->args.in_args[0].size = sizeof(ia->in);
+ ia->args.in_args[0].value = &ia->in;
+ ia->args.out_numargs = 1;
/* Variable length argument used for backward compatibility
with interface version < 7.5. Rest of init_out is zeroed
by do_get_request(), so a short reply is not a problem */
- req->out.argvar = 1;
- req->out.args[0].size = sizeof(struct fuse_init_out);
- req->out.args[0].value = &req->misc.init_out;
- req->end = process_init_reply;
- fuse_request_send_background(fc, req);
+ ia->args.out_argvar = 1;
+ ia->args.out_args[0].size = sizeof(ia->out);
+ ia->args.out_args[0].value = &ia->out;
+ ia->args.force = true;
+ ia->args.nocreds = true;
+ ia->args.end = process_init_reply;
+
+ if (fuse_simple_background(fc, &ia->args, GFP_KERNEL) != 0)
+ process_init_reply(fc, &ia->args, -ENOTCONN);
}
+EXPORT_SYMBOL_GPL(fuse_send_init);
-static void fuse_free_conn(struct fuse_conn *fc)
+void fuse_free_conn(struct fuse_conn *fc)
{
WARN_ON(!list_empty(&fc->devices));
kfree_rcu(fc, rcu);
}
+EXPORT_SYMBOL_GPL(fuse_free_conn);
static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
{
@@ -1032,7 +1056,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
return 0;
}
-struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc)
+struct fuse_dev *fuse_dev_alloc(void)
{
struct fuse_dev *fud;
struct list_head *pq;
@@ -1048,16 +1072,33 @@ struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc)
}
fud->pq.processing = pq;
- fud->fc = fuse_conn_get(fc);
fuse_pqueue_init(&fud->pq);
+ return fud;
+}
+EXPORT_SYMBOL_GPL(fuse_dev_alloc);
+
+void fuse_dev_install(struct fuse_dev *fud, struct fuse_conn *fc)
+{
+ fud->fc = fuse_conn_get(fc);
spin_lock(&fc->lock);
list_add_tail(&fud->entry, &fc->devices);
spin_unlock(&fc->lock);
+}
+EXPORT_SYMBOL_GPL(fuse_dev_install);
+
+struct fuse_dev *fuse_dev_alloc_install(struct fuse_conn *fc)
+{
+ struct fuse_dev *fud;
+ fud = fuse_dev_alloc();
+ if (!fud)
+ return NULL;
+
+ fuse_dev_install(fud, fc);
return fud;
}
-EXPORT_SYMBOL_GPL(fuse_dev_alloc);
+EXPORT_SYMBOL_GPL(fuse_dev_alloc_install);
void fuse_dev_free(struct fuse_dev *fud)
{
@@ -1075,17 +1116,13 @@ void fuse_dev_free(struct fuse_dev *fud)
}
EXPORT_SYMBOL_GPL(fuse_dev_free);
-static int fuse_fill_super(struct super_block *sb, void *data, int silent)
+int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
{
struct fuse_dev *fud;
- struct fuse_conn *fc;
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
struct inode *root;
- struct fuse_mount_data d;
- struct file *file;
struct dentry *root_dentry;
- struct fuse_req *init_req;
int err;
- int is_bdev = sb->s_bdev != NULL;
err = -EINVAL;
if (sb->s_flags & SB_MANDLOCK)
@@ -1093,19 +1130,19 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
- if (!parse_fuse_opt(data, &d, is_bdev, sb->s_user_ns))
- goto err;
-
- if (is_bdev) {
+ if (ctx->is_bdev) {
#ifdef CONFIG_BLOCK
err = -EINVAL;
- if (!sb_set_blocksize(sb, d.blksize))
+ if (!sb_set_blocksize(sb, ctx->blksize))
goto err;
#endif
} else {
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
}
+
+ sb->s_subtype = ctx->subtype;
+ ctx->subtype = NULL;
sb->s_magic = FUSE_SUPER_MAGIC;
sb->s_op = &fuse_super_operations;
sb->s_xattr = fuse_xattr_handlers;
@@ -1116,19 +1153,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_user_ns != &init_user_ns)
sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER;
- file = fget(d.fd);
- err = -EINVAL;
- if (!file)
- goto err;
-
- /*
- * Require mount to happen from the same user namespace which
- * opened /dev/fuse to prevent potential attacks.
- */
- if (file->f_op != &fuse_dev_operations ||
- file->f_cred->user_ns != sb->s_user_ns)
- goto err_fput;
-
/*
* If we are not in the initial user namespace posix
* acls must be translated.
@@ -1136,17 +1160,9 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_user_ns != &init_user_ns)
sb->s_xattr = fuse_no_acl_xattr_handlers;
- fc = kmalloc(sizeof(*fc), GFP_KERNEL);
- err = -ENOMEM;
- if (!fc)
- goto err_fput;
-
- fuse_conn_init(fc, sb->s_user_ns);
- fc->release = fuse_free_conn;
-
- fud = fuse_dev_alloc(fc);
+ fud = fuse_dev_alloc_install(fc);
if (!fud)
- goto err_put_conn;
+ goto err;
fc->dev = sb->s_dev;
fc->sb = sb;
@@ -1159,17 +1175,18 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
fc->dont_mask = 1;
sb->s_flags |= SB_POSIXACL;
- fc->default_permissions = d.default_permissions;
- fc->allow_other = d.allow_other;
- fc->user_id = d.user_id;
- fc->group_id = d.group_id;
- fc->max_read = max_t(unsigned, 4096, d.max_read);
-
- /* Used by get_root_inode() */
- sb->s_fs_info = fc;
+ fc->default_permissions = ctx->default_permissions;
+ fc->allow_other = ctx->allow_other;
+ fc->user_id = ctx->user_id;
+ fc->group_id = ctx->group_id;
+ fc->max_read = max_t(unsigned, 4096, ctx->max_read);
+ fc->destroy = ctx->destroy;
+ fc->no_control = ctx->no_control;
+ fc->no_force_umount = ctx->no_force_umount;
+ fc->no_mount_options = ctx->no_mount_options;
err = -ENOMEM;
- root = fuse_get_root_inode(sb, d.rootmode);
+ root = fuse_get_root_inode(sb, ctx->rootmode);
sb->s_d_op = &fuse_root_dentry_operations;
root_dentry = d_make_root(root);
if (!root_dentry)
@@ -1177,20 +1194,9 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
/* Root dentry doesn't have .d_revalidate */
sb->s_d_op = &fuse_dentry_operations;
- init_req = fuse_request_alloc(0);
- if (!init_req)
- goto err_put_root;
- __set_bit(FR_BACKGROUND, &init_req->flags);
-
- if (is_bdev) {
- fc->destroy_req = fuse_request_alloc(0);
- if (!fc->destroy_req)
- goto err_free_init_req;
- }
-
mutex_lock(&fuse_mutex);
err = -EINVAL;
- if (file->private_data)
+ if (*ctx->fudptr)
goto err_unlock;
err = fuse_ctl_add_conn(fc);
@@ -1199,27 +1205,62 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
list_add_tail(&fc->entry, &fuse_conn_list);
sb->s_root = root_dentry;
- file->private_data = fud;
+ *ctx->fudptr = fud;
mutex_unlock(&fuse_mutex);
+ return 0;
+
+ err_unlock:
+ mutex_unlock(&fuse_mutex);
+ dput(root_dentry);
+ err_dev_free:
+ fuse_dev_free(fud);
+ err:
+ return err;
+}
+EXPORT_SYMBOL_GPL(fuse_fill_super_common);
+
+static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
+{
+ struct fuse_fs_context *ctx = fsc->fs_private;
+ struct file *file;
+ int err;
+ struct fuse_conn *fc;
+
+ err = -EINVAL;
+ file = fget(ctx->fd);
+ if (!file)
+ goto err;
+
+ /*
+ * Require mount to happen from the same user namespace which
+ * opened /dev/fuse to prevent potential attacks.
+ */
+ if ((file->f_op != &fuse_dev_operations) ||
+ (file->f_cred->user_ns != sb->s_user_ns))
+ goto err_fput;
+ ctx->fudptr = &file->private_data;
+
+ fc = kmalloc(sizeof(*fc), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!fc)
+ goto err_fput;
+
+ fuse_conn_init(fc, sb->s_user_ns, &fuse_dev_fiq_ops, NULL);
+ fc->release = fuse_free_conn;
+ sb->s_fs_info = fc;
+
+ err = fuse_fill_super_common(sb, ctx);
+ if (err)
+ goto err_put_conn;
/*
* atomic_dec_and_test() in fput() provides the necessary
* memory barrier for file->private_data to be visible on all
* CPUs after this
*/
fput(file);
-
- fuse_send_init(fc, init_req);
-
+ fuse_send_init(get_fuse_conn_super(sb));
return 0;
- err_unlock:
- mutex_unlock(&fuse_mutex);
- err_free_init_req:
- fuse_request_free(init_req);
- err_put_root:
- dput(root_dentry);
- err_dev_free:
- fuse_dev_free(fud);
err_put_conn:
fuse_conn_put(fc);
sb->s_fs_info = NULL;
@@ -1229,11 +1270,52 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
return err;
}
-static struct dentry *fuse_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *raw_data)
+static int fuse_get_tree(struct fs_context *fc)
{
- return mount_nodev(fs_type, flags, raw_data, fuse_fill_super);
+ struct fuse_fs_context *ctx = fc->fs_private;
+
+ if (!ctx->fd_present || !ctx->rootmode_present ||
+ !ctx->user_id_present || !ctx->group_id_present)
+ return -EINVAL;
+
+#ifdef CONFIG_BLOCK
+ if (ctx->is_bdev)
+ return get_tree_bdev(fc, fuse_fill_super);
+#endif
+
+ return get_tree_nodev(fc, fuse_fill_super);
+}
+
+static const struct fs_context_operations fuse_context_ops = {
+ .free = fuse_free_fc,
+ .parse_param = fuse_parse_param,
+ .get_tree = fuse_get_tree,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+static int fuse_init_fs_context(struct fs_context *fc)
+{
+ struct fuse_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->max_read = ~0;
+ ctx->blksize = FUSE_DEFAULT_BLKSIZE;
+
+#ifdef CONFIG_BLOCK
+ if (fc->fs_type == &fuseblk_fs_type) {
+ ctx->is_bdev = true;
+ ctx->destroy = true;
+ }
+#endif
+
+ fc->fs_private = ctx;
+ fc->ops = &fuse_context_ops;
+ return 0;
}
static void fuse_sb_destroy(struct super_block *sb)
@@ -1241,7 +1323,8 @@ static void fuse_sb_destroy(struct super_block *sb)
struct fuse_conn *fc = get_fuse_conn_super(sb);
if (fc) {
- fuse_send_destroy(fc);
+ if (fc->destroy)
+ fuse_send_destroy(fc);
fuse_abort_conn(fc);
fuse_wait_aborted(fc);
@@ -1252,29 +1335,24 @@ static void fuse_sb_destroy(struct super_block *sb)
}
}
-static void fuse_kill_sb_anon(struct super_block *sb)
+void fuse_kill_sb_anon(struct super_block *sb)
{
fuse_sb_destroy(sb);
kill_anon_super(sb);
}
+EXPORT_SYMBOL_GPL(fuse_kill_sb_anon);
static struct file_system_type fuse_fs_type = {
.owner = THIS_MODULE,
.name = "fuse",
.fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT,
- .mount = fuse_mount,
+ .init_fs_context = fuse_init_fs_context,
+ .parameters = &fuse_fs_parameters,
.kill_sb = fuse_kill_sb_anon,
};
MODULE_ALIAS_FS("fuse");
#ifdef CONFIG_BLOCK
-static struct dentry *fuse_mount_blk(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *raw_data)
-{
- return mount_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super);
-}
-
static void fuse_kill_sb_blk(struct super_block *sb)
{
fuse_sb_destroy(sb);
@@ -1284,7 +1362,8 @@ static void fuse_kill_sb_blk(struct super_block *sb)
static struct file_system_type fuseblk_fs_type = {
.owner = THIS_MODULE,
.name = "fuseblk",
- .mount = fuse_mount_blk,
+ .init_fs_context = fuse_init_fs_context,
+ .parameters = &fuse_fs_parameters,
.kill_sb = fuse_kill_sb_blk,
.fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
};
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
index 574d03f8a573..6a40f75a0d25 100644
--- a/fs/fuse/readdir.c
+++ b/fs/fuse/readdir.c
@@ -184,7 +184,7 @@ static int fuse_direntplus_link(struct file *file,
if (invalid_nodeid(o->nodeid))
return -EIO;
- if (!fuse_valid_type(o->attr.mode))
+ if (fuse_invalid_attr(&o->attr))
return -EIO;
fc = get_fuse_conn(dir);
@@ -249,6 +249,27 @@ retry:
return 0;
}
+static void fuse_force_forget(struct file *file, u64 nodeid)
+{
+ struct inode *inode = file_inode(file);
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_forget_in inarg;
+ FUSE_ARGS(args);
+
+ memset(&inarg, 0, sizeof(inarg));
+ inarg.nlookup = 1;
+ args.opcode = FUSE_FORGET;
+ args.nodeid = nodeid;
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.force = true;
+ args.noreply = true;
+
+ fuse_simple_request(fc, &args);
+ /* ignore errors */
+}
+
static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
struct dir_context *ctx, u64 attr_version)
{
@@ -295,62 +316,55 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx)
{
- int plus, err;
- size_t nbytes;
+ int plus;
+ ssize_t res;
struct page *page;
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ struct fuse_io_args ia = {};
+ struct fuse_args_pages *ap = &ia.ap;
+ struct fuse_page_desc desc = { .length = PAGE_SIZE };
u64 attr_version = 0;
bool locked;
- req = fuse_get_req(fc, 1);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
page = alloc_page(GFP_KERNEL);
- if (!page) {
- fuse_put_request(fc, req);
+ if (!page)
return -ENOMEM;
- }
plus = fuse_use_readdirplus(inode, ctx);
- req->out.argpages = 1;
- req->num_pages = 1;
- req->pages[0] = page;
- req->page_descs[0].length = PAGE_SIZE;
+ ap->args.out_pages = 1;
+ ap->num_pages = 1;
+ ap->pages = &page;
+ ap->descs = &desc;
if (plus) {
attr_version = fuse_get_attr_version(fc);
- fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
- FUSE_READDIRPLUS);
+ fuse_read_args_fill(&ia, file, ctx->pos, PAGE_SIZE,
+ FUSE_READDIRPLUS);
} else {
- fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
- FUSE_READDIR);
+ fuse_read_args_fill(&ia, file, ctx->pos, PAGE_SIZE,
+ FUSE_READDIR);
}
locked = fuse_lock_inode(inode);
- fuse_request_send(fc, req);
+ res = fuse_simple_request(fc, &ap->args);
fuse_unlock_inode(inode, locked);
- nbytes = req->out.args[0].size;
- err = req->out.h.error;
- fuse_put_request(fc, req);
- if (!err) {
- if (!nbytes) {
+ if (res >= 0) {
+ if (!res) {
struct fuse_file *ff = file->private_data;
if (ff->open_flags & FOPEN_CACHE_DIR)
fuse_readdir_cache_end(file, ctx->pos);
} else if (plus) {
- err = parse_dirplusfile(page_address(page), nbytes,
+ res = parse_dirplusfile(page_address(page), res,
file, ctx, attr_version);
} else {
- err = parse_dirfile(page_address(page), nbytes, file,
+ res = parse_dirfile(page_address(page), res, file,
ctx);
}
}
__free_page(page);
fuse_invalidate_atime(inode);
- return err;
+ return res;
}
enum fuse_parse_result {
@@ -372,11 +386,13 @@ static enum fuse_parse_result fuse_parse_cache(struct fuse_file *ff,
for (;;) {
struct fuse_dirent *dirent = addr + offset;
unsigned int nbytes = size - offset;
- size_t reclen = FUSE_DIRENT_SIZE(dirent);
+ size_t reclen;
if (nbytes < FUSE_NAME_OFFSET || !dirent->namelen)
break;
+ reclen = FUSE_DIRENT_SIZE(dirent); /* derefs ->namelen */
+
if (WARN_ON(dirent->namelen > FUSE_NAME_MAX))
return FOUND_ERR;
if (WARN_ON(reclen > nbytes))
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
new file mode 100644
index 000000000000..bade74768903
--- /dev/null
+++ b/fs/fuse/virtio_fs.c
@@ -0,0 +1,1254 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * virtio-fs: Virtio Filesystem
+ * Copyright (C) 2018 Red Hat, Inc.
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/virtio.h>
+#include <linux/virtio_fs.h>
+#include <linux/delay.h>
+#include <linux/fs_context.h>
+#include <linux/highmem.h>
+#include "fuse_i.h"
+
+/* List of virtio-fs device instances and a lock for the list. Also provides
+ * mutual exclusion in device removal and mounting path
+ */
+static DEFINE_MUTEX(virtio_fs_mutex);
+static LIST_HEAD(virtio_fs_instances);
+
+enum {
+ VQ_HIPRIO,
+ VQ_REQUEST
+};
+
+/* Per-virtqueue state */
+struct virtio_fs_vq {
+ spinlock_t lock;
+ struct virtqueue *vq; /* protected by ->lock */
+ struct work_struct done_work;
+ struct list_head queued_reqs;
+ struct list_head end_reqs; /* End these requests */
+ struct delayed_work dispatch_work;
+ struct fuse_dev *fud;
+ bool connected;
+ long in_flight;
+ struct completion in_flight_zero; /* No inflight requests */
+ char name[24];
+} ____cacheline_aligned_in_smp;
+
+/* A virtio-fs device instance */
+struct virtio_fs {
+ struct kref refcount;
+ struct list_head list; /* on virtio_fs_instances */
+ char *tag;
+ struct virtio_fs_vq *vqs;
+ unsigned int nvqs; /* number of virtqueues */
+ unsigned int num_request_queues; /* number of request queues */
+};
+
+struct virtio_fs_forget_req {
+ struct fuse_in_header ih;
+ struct fuse_forget_in arg;
+};
+
+struct virtio_fs_forget {
+ /* This request can be temporarily queued on virt queue */
+ struct list_head list;
+ struct virtio_fs_forget_req req;
+};
+
+static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
+ struct fuse_req *req, bool in_flight);
+
+static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
+{
+ struct virtio_fs *fs = vq->vdev->priv;
+
+ return &fs->vqs[vq->index];
+}
+
+static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq)
+{
+ return &vq_to_fsvq(vq)->fud->pq;
+}
+
+/* Should be called with fsvq->lock held. */
+static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
+{
+ fsvq->in_flight++;
+}
+
+/* Should be called with fsvq->lock held. */
+static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
+{
+ WARN_ON(fsvq->in_flight <= 0);
+ fsvq->in_flight--;
+ if (!fsvq->in_flight)
+ complete(&fsvq->in_flight_zero);
+}
+
+static void release_virtio_fs_obj(struct kref *ref)
+{
+ struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
+
+ kfree(vfs->vqs);
+ kfree(vfs);
+}
+
+/* Make sure virtiofs_mutex is held */
+static void virtio_fs_put(struct virtio_fs *fs)
+{
+ kref_put(&fs->refcount, release_virtio_fs_obj);
+}
+
+static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
+{
+ struct virtio_fs *vfs = fiq->priv;
+
+ mutex_lock(&virtio_fs_mutex);
+ virtio_fs_put(vfs);
+ mutex_unlock(&virtio_fs_mutex);
+}
+
+static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
+{
+ WARN_ON(fsvq->in_flight < 0);
+
+ /* Wait for in flight requests to finish.*/
+ spin_lock(&fsvq->lock);
+ if (fsvq->in_flight) {
+ /* We are holding virtio_fs_mutex. There should not be any
+ * waiters waiting for completion.
+ */
+ reinit_completion(&fsvq->in_flight_zero);
+ spin_unlock(&fsvq->lock);
+ wait_for_completion(&fsvq->in_flight_zero);
+ } else {
+ spin_unlock(&fsvq->lock);
+ }
+
+ flush_work(&fsvq->done_work);
+ flush_delayed_work(&fsvq->dispatch_work);
+}
+
+static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
+{
+ struct virtio_fs_vq *fsvq;
+ int i;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ fsvq = &fs->vqs[i];
+ virtio_fs_drain_queue(fsvq);
+ }
+}
+
+static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
+{
+ /* Provides mutual exclusion between ->remove and ->kill_sb
+ * paths. We don't want both of these draining queue at the
+ * same time. Current completion logic reinits completion
+ * and that means there should not be any other thread
+ * doing reinit or waiting for completion already.
+ */
+ mutex_lock(&virtio_fs_mutex);
+ virtio_fs_drain_all_queues_locked(fs);
+ mutex_unlock(&virtio_fs_mutex);
+}
+
+static void virtio_fs_start_all_queues(struct virtio_fs *fs)
+{
+ struct virtio_fs_vq *fsvq;
+ int i;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ fsvq = &fs->vqs[i];
+ spin_lock(&fsvq->lock);
+ fsvq->connected = true;
+ spin_unlock(&fsvq->lock);
+ }
+}
+
+/* Add a new instance to the list or return -EEXIST if tag name exists*/
+static int virtio_fs_add_instance(struct virtio_fs *fs)
+{
+ struct virtio_fs *fs2;
+ bool duplicate = false;
+
+ mutex_lock(&virtio_fs_mutex);
+
+ list_for_each_entry(fs2, &virtio_fs_instances, list) {
+ if (strcmp(fs->tag, fs2->tag) == 0)
+ duplicate = true;
+ }
+
+ if (!duplicate)
+ list_add_tail(&fs->list, &virtio_fs_instances);
+
+ mutex_unlock(&virtio_fs_mutex);
+
+ if (duplicate)
+ return -EEXIST;
+ return 0;
+}
+
+/* Return the virtio_fs with a given tag, or NULL */
+static struct virtio_fs *virtio_fs_find_instance(const char *tag)
+{
+ struct virtio_fs *fs;
+
+ mutex_lock(&virtio_fs_mutex);
+
+ list_for_each_entry(fs, &virtio_fs_instances, list) {
+ if (strcmp(fs->tag, tag) == 0) {
+ kref_get(&fs->refcount);
+ goto found;
+ }
+ }
+
+ fs = NULL; /* not found */
+
+found:
+ mutex_unlock(&virtio_fs_mutex);
+
+ return fs;
+}
+
+static void virtio_fs_free_devs(struct virtio_fs *fs)
+{
+ unsigned int i;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ struct virtio_fs_vq *fsvq = &fs->vqs[i];
+
+ if (!fsvq->fud)
+ continue;
+
+ fuse_dev_free(fsvq->fud);
+ fsvq->fud = NULL;
+ }
+}
+
+/* Read filesystem name from virtio config into fs->tag (must kfree()). */
+static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
+{
+ char tag_buf[sizeof_field(struct virtio_fs_config, tag)];
+ char *end;
+ size_t len;
+
+ virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag),
+ &tag_buf, sizeof(tag_buf));
+ end = memchr(tag_buf, '\0', sizeof(tag_buf));
+ if (end == tag_buf)
+ return -EINVAL; /* empty tag */
+ if (!end)
+ end = &tag_buf[sizeof(tag_buf)];
+
+ len = end - tag_buf;
+ fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL);
+ if (!fs->tag)
+ return -ENOMEM;
+ memcpy(fs->tag, tag_buf, len);
+ fs->tag[len] = '\0';
+ return 0;
+}
+
+/* Work function for hiprio completion */
+static void virtio_fs_hiprio_done_work(struct work_struct *work)
+{
+ struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
+ done_work);
+ struct virtqueue *vq = fsvq->vq;
+
+ /* Free completed FUSE_FORGET requests */
+ spin_lock(&fsvq->lock);
+ do {
+ unsigned int len;
+ void *req;
+
+ virtqueue_disable_cb(vq);
+
+ while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
+ kfree(req);
+ dec_in_flight_req(fsvq);
+ }
+ } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
+ spin_unlock(&fsvq->lock);
+}
+
+static void virtio_fs_request_dispatch_work(struct work_struct *work)
+{
+ struct fuse_req *req;
+ struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
+ dispatch_work.work);
+ struct fuse_conn *fc = fsvq->fud->fc;
+ int ret;
+
+ pr_debug("virtio-fs: worker %s called.\n", __func__);
+ while (1) {
+ spin_lock(&fsvq->lock);
+ req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
+ list);
+ if (!req) {
+ spin_unlock(&fsvq->lock);
+ break;
+ }
+
+ list_del_init(&req->list);
+ spin_unlock(&fsvq->lock);
+ fuse_request_end(fc, req);
+ }
+
+ /* Dispatch pending requests */
+ while (1) {
+ spin_lock(&fsvq->lock);
+ req = list_first_entry_or_null(&fsvq->queued_reqs,
+ struct fuse_req, list);
+ if (!req) {
+ spin_unlock(&fsvq->lock);
+ return;
+ }
+ list_del_init(&req->list);
+ spin_unlock(&fsvq->lock);
+
+ ret = virtio_fs_enqueue_req(fsvq, req, true);
+ if (ret < 0) {
+ if (ret == -ENOMEM || ret == -ENOSPC) {
+ spin_lock(&fsvq->lock);
+ list_add_tail(&req->list, &fsvq->queued_reqs);
+ schedule_delayed_work(&fsvq->dispatch_work,
+ msecs_to_jiffies(1));
+ spin_unlock(&fsvq->lock);
+ return;
+ }
+ req->out.h.error = ret;
+ spin_lock(&fsvq->lock);
+ dec_in_flight_req(fsvq);
+ spin_unlock(&fsvq->lock);
+ pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
+ ret);
+ fuse_request_end(fc, req);
+ }
+ }
+}
+
+/*
+ * Returns 1 if queue is full and sender should wait a bit before sending
+ * next request, 0 otherwise.
+ */
+static int send_forget_request(struct virtio_fs_vq *fsvq,
+ struct virtio_fs_forget *forget,
+ bool in_flight)
+{
+ struct scatterlist sg;
+ struct virtqueue *vq;
+ int ret = 0;
+ bool notify;
+ struct virtio_fs_forget_req *req = &forget->req;
+
+ spin_lock(&fsvq->lock);
+ if (!fsvq->connected) {
+ if (in_flight)
+ dec_in_flight_req(fsvq);
+ kfree(forget);
+ goto out;
+ }
+
+ sg_init_one(&sg, req, sizeof(*req));
+ vq = fsvq->vq;
+ dev_dbg(&vq->vdev->dev, "%s\n", __func__);
+
+ ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
+ if (ret < 0) {
+ if (ret == -ENOMEM || ret == -ENOSPC) {
+ pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
+ ret);
+ list_add_tail(&forget->list, &fsvq->queued_reqs);
+ schedule_delayed_work(&fsvq->dispatch_work,
+ msecs_to_jiffies(1));
+ if (!in_flight)
+ inc_in_flight_req(fsvq);
+ /* Queue is full */
+ ret = 1;
+ } else {
+ pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
+ ret);
+ kfree(forget);
+ if (in_flight)
+ dec_in_flight_req(fsvq);
+ }
+ goto out;
+ }
+
+ if (!in_flight)
+ inc_in_flight_req(fsvq);
+ notify = virtqueue_kick_prepare(vq);
+ spin_unlock(&fsvq->lock);
+
+ if (notify)
+ virtqueue_notify(vq);
+ return ret;
+out:
+ spin_unlock(&fsvq->lock);
+ return ret;
+}
+
+static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
+{
+ struct virtio_fs_forget *forget;
+ struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
+ dispatch_work.work);
+ pr_debug("virtio-fs: worker %s called.\n", __func__);
+ while (1) {
+ spin_lock(&fsvq->lock);
+ forget = list_first_entry_or_null(&fsvq->queued_reqs,
+ struct virtio_fs_forget, list);
+ if (!forget) {
+ spin_unlock(&fsvq->lock);
+ return;
+ }
+
+ list_del(&forget->list);
+ spin_unlock(&fsvq->lock);
+ if (send_forget_request(fsvq, forget, true))
+ return;
+ }
+}
+
+/* Allocate and copy args into req->argbuf */
+static int copy_args_to_argbuf(struct fuse_req *req)
+{
+ struct fuse_args *args = req->args;
+ unsigned int offset = 0;
+ unsigned int num_in;
+ unsigned int num_out;
+ unsigned int len;
+ unsigned int i;
+
+ num_in = args->in_numargs - args->in_pages;
+ num_out = args->out_numargs - args->out_pages;
+ len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
+ fuse_len_args(num_out, args->out_args);
+
+ req->argbuf = kmalloc(len, GFP_ATOMIC);
+ if (!req->argbuf)
+ return -ENOMEM;
+
+ for (i = 0; i < num_in; i++) {
+ memcpy(req->argbuf + offset,
+ args->in_args[i].value,
+ args->in_args[i].size);
+ offset += args->in_args[i].size;
+ }
+
+ return 0;
+}
+
+/* Copy args out of and free req->argbuf */
+static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
+{
+ unsigned int remaining;
+ unsigned int offset;
+ unsigned int num_in;
+ unsigned int num_out;
+ unsigned int i;
+
+ remaining = req->out.h.len - sizeof(req->out.h);
+ num_in = args->in_numargs - args->in_pages;
+ num_out = args->out_numargs - args->out_pages;
+ offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args);
+
+ for (i = 0; i < num_out; i++) {
+ unsigned int argsize = args->out_args[i].size;
+
+ if (args->out_argvar &&
+ i == args->out_numargs - 1 &&
+ argsize > remaining) {
+ argsize = remaining;
+ }
+
+ memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
+ offset += argsize;
+
+ if (i != args->out_numargs - 1)
+ remaining -= argsize;
+ }
+
+ /* Store the actual size of the variable-length arg */
+ if (args->out_argvar)
+ args->out_args[args->out_numargs - 1].size = remaining;
+
+ kfree(req->argbuf);
+ req->argbuf = NULL;
+}
+
+/* Work function for request completion */
+static void virtio_fs_requests_done_work(struct work_struct *work)
+{
+ struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
+ done_work);
+ struct fuse_pqueue *fpq = &fsvq->fud->pq;
+ struct fuse_conn *fc = fsvq->fud->fc;
+ struct virtqueue *vq = fsvq->vq;
+ struct fuse_req *req;
+ struct fuse_args_pages *ap;
+ struct fuse_req *next;
+ struct fuse_args *args;
+ unsigned int len, i, thislen;
+ struct page *page;
+ LIST_HEAD(reqs);
+
+ /* Collect completed requests off the virtqueue */
+ spin_lock(&fsvq->lock);
+ do {
+ virtqueue_disable_cb(vq);
+
+ while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
+ spin_lock(&fpq->lock);
+ list_move_tail(&req->list, &reqs);
+ spin_unlock(&fpq->lock);
+ }
+ } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
+ spin_unlock(&fsvq->lock);
+
+ /* End requests */
+ list_for_each_entry_safe(req, next, &reqs, list) {
+ /*
+ * TODO verify that server properly follows FUSE protocol
+ * (oh.uniq, oh.len)
+ */
+ args = req->args;
+ copy_args_from_argbuf(args, req);
+
+ if (args->out_pages && args->page_zeroing) {
+ len = args->out_args[args->out_numargs - 1].size;
+ ap = container_of(args, typeof(*ap), args);
+ for (i = 0; i < ap->num_pages; i++) {
+ thislen = ap->descs[i].length;
+ if (len < thislen) {
+ WARN_ON(ap->descs[i].offset);
+ page = ap->pages[i];
+ zero_user_segment(page, len, thislen);
+ len = 0;
+ } else {
+ len -= thislen;
+ }
+ }
+ }
+
+ spin_lock(&fpq->lock);
+ clear_bit(FR_SENT, &req->flags);
+ list_del_init(&req->list);
+ spin_unlock(&fpq->lock);
+
+ fuse_request_end(fc, req);
+ spin_lock(&fsvq->lock);
+ dec_in_flight_req(fsvq);
+ spin_unlock(&fsvq->lock);
+ }
+}
+
+/* Virtqueue interrupt handler */
+static void virtio_fs_vq_done(struct virtqueue *vq)
+{
+ struct virtio_fs_vq *fsvq = vq_to_fsvq(vq);
+
+ dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name);
+
+ schedule_work(&fsvq->done_work);
+}
+
+/* Initialize virtqueues */
+static int virtio_fs_setup_vqs(struct virtio_device *vdev,
+ struct virtio_fs *fs)
+{
+ struct virtqueue **vqs;
+ vq_callback_t **callbacks;
+ const char **names;
+ unsigned int i;
+ int ret = 0;
+
+ virtio_cread(vdev, struct virtio_fs_config, num_request_queues,
+ &fs->num_request_queues);
+ if (fs->num_request_queues == 0)
+ return -EINVAL;
+
+ fs->nvqs = 1 + fs->num_request_queues;
+ fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
+ if (!fs->vqs)
+ return -ENOMEM;
+
+ vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
+ callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
+ GFP_KERNEL);
+ names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
+ if (!vqs || !callbacks || !names) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ callbacks[VQ_HIPRIO] = virtio_fs_vq_done;
+ snprintf(fs->vqs[VQ_HIPRIO].name, sizeof(fs->vqs[VQ_HIPRIO].name),
+ "hiprio");
+ names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
+ INIT_WORK(&fs->vqs[VQ_HIPRIO].done_work, virtio_fs_hiprio_done_work);
+ INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].queued_reqs);
+ INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].end_reqs);
+ INIT_DELAYED_WORK(&fs->vqs[VQ_HIPRIO].dispatch_work,
+ virtio_fs_hiprio_dispatch_work);
+ init_completion(&fs->vqs[VQ_HIPRIO].in_flight_zero);
+ spin_lock_init(&fs->vqs[VQ_HIPRIO].lock);
+
+ /* Initialize the requests virtqueues */
+ for (i = VQ_REQUEST; i < fs->nvqs; i++) {
+ spin_lock_init(&fs->vqs[i].lock);
+ INIT_WORK(&fs->vqs[i].done_work, virtio_fs_requests_done_work);
+ INIT_DELAYED_WORK(&fs->vqs[i].dispatch_work,
+ virtio_fs_request_dispatch_work);
+ INIT_LIST_HEAD(&fs->vqs[i].queued_reqs);
+ INIT_LIST_HEAD(&fs->vqs[i].end_reqs);
+ init_completion(&fs->vqs[i].in_flight_zero);
+ snprintf(fs->vqs[i].name, sizeof(fs->vqs[i].name),
+ "requests.%u", i - VQ_REQUEST);
+ callbacks[i] = virtio_fs_vq_done;
+ names[i] = fs->vqs[i].name;
+ }
+
+ ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < fs->nvqs; i++)
+ fs->vqs[i].vq = vqs[i];
+
+ virtio_fs_start_all_queues(fs);
+out:
+ kfree(names);
+ kfree(callbacks);
+ kfree(vqs);
+ if (ret)
+ kfree(fs->vqs);
+ return ret;
+}
+
+/* Free virtqueues (device must already be reset) */
+static void virtio_fs_cleanup_vqs(struct virtio_device *vdev,
+ struct virtio_fs *fs)
+{
+ vdev->config->del_vqs(vdev);
+}
+
+static int virtio_fs_probe(struct virtio_device *vdev)
+{
+ struct virtio_fs *fs;
+ int ret;
+
+ fs = kzalloc(sizeof(*fs), GFP_KERNEL);
+ if (!fs)
+ return -ENOMEM;
+ kref_init(&fs->refcount);
+ vdev->priv = fs;
+
+ ret = virtio_fs_read_tag(vdev, fs);
+ if (ret < 0)
+ goto out;
+
+ ret = virtio_fs_setup_vqs(vdev, fs);
+ if (ret < 0)
+ goto out;
+
+ /* TODO vq affinity */
+
+ /* Bring the device online in case the filesystem is mounted and
+ * requests need to be sent before we return.
+ */
+ virtio_device_ready(vdev);
+
+ ret = virtio_fs_add_instance(fs);
+ if (ret < 0)
+ goto out_vqs;
+
+ return 0;
+
+out_vqs:
+ vdev->config->reset(vdev);
+ virtio_fs_cleanup_vqs(vdev, fs);
+
+out:
+ vdev->priv = NULL;
+ kfree(fs);
+ return ret;
+}
+
+static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
+{
+ struct virtio_fs_vq *fsvq;
+ int i;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ fsvq = &fs->vqs[i];
+ spin_lock(&fsvq->lock);
+ fsvq->connected = false;
+ spin_unlock(&fsvq->lock);
+ }
+}
+
+static void virtio_fs_remove(struct virtio_device *vdev)
+{
+ struct virtio_fs *fs = vdev->priv;
+
+ mutex_lock(&virtio_fs_mutex);
+ /* This device is going away. No one should get new reference */
+ list_del_init(&fs->list);
+ virtio_fs_stop_all_queues(fs);
+ virtio_fs_drain_all_queues_locked(fs);
+ vdev->config->reset(vdev);
+ virtio_fs_cleanup_vqs(vdev, fs);
+
+ vdev->priv = NULL;
+ /* Put device reference on virtio_fs object */
+ virtio_fs_put(fs);
+ mutex_unlock(&virtio_fs_mutex);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtio_fs_freeze(struct virtio_device *vdev)
+{
+ /* TODO need to save state here */
+ pr_warn("virtio-fs: suspend/resume not yet supported\n");
+ return -EOPNOTSUPP;
+}
+
+static int virtio_fs_restore(struct virtio_device *vdev)
+{
+ /* TODO need to restore state here */
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID },
+ {},
+};
+
+static const unsigned int feature_table[] = {};
+
+static struct virtio_driver virtio_fs_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .feature_table = feature_table,
+ .feature_table_size = ARRAY_SIZE(feature_table),
+ .probe = virtio_fs_probe,
+ .remove = virtio_fs_remove,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtio_fs_freeze,
+ .restore = virtio_fs_restore,
+#endif
+};
+
+static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
+__releases(fiq->lock)
+{
+ struct fuse_forget_link *link;
+ struct virtio_fs_forget *forget;
+ struct virtio_fs_forget_req *req;
+ struct virtio_fs *fs;
+ struct virtio_fs_vq *fsvq;
+ u64 unique;
+
+ link = fuse_dequeue_forget(fiq, 1, NULL);
+ unique = fuse_get_unique(fiq);
+
+ fs = fiq->priv;
+ fsvq = &fs->vqs[VQ_HIPRIO];
+ spin_unlock(&fiq->lock);
+
+ /* Allocate a buffer for the request */
+ forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
+ req = &forget->req;
+
+ req->ih = (struct fuse_in_header){
+ .opcode = FUSE_FORGET,
+ .nodeid = link->forget_one.nodeid,
+ .unique = unique,
+ .len = sizeof(*req),
+ };
+ req->arg = (struct fuse_forget_in){
+ .nlookup = link->forget_one.nlookup,
+ };
+
+ send_forget_request(fsvq, forget, false);
+ kfree(link);
+}
+
+static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
+__releases(fiq->lock)
+{
+ /*
+ * TODO interrupts.
+ *
+ * Normal fs operations on a local filesystems aren't interruptible.
+ * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
+ * with shared lock between host and guest.
+ */
+ spin_unlock(&fiq->lock);
+}
+
+/* Return the number of scatter-gather list elements required */
+static unsigned int sg_count_fuse_req(struct fuse_req *req)
+{
+ struct fuse_args *args = req->args;
+ struct fuse_args_pages *ap = container_of(args, typeof(*ap), args);
+ unsigned int total_sgs = 1 /* fuse_in_header */;
+
+ if (args->in_numargs - args->in_pages)
+ total_sgs += 1;
+
+ if (args->in_pages)
+ total_sgs += ap->num_pages;
+
+ if (!test_bit(FR_ISREPLY, &req->flags))
+ return total_sgs;
+
+ total_sgs += 1 /* fuse_out_header */;
+
+ if (args->out_numargs - args->out_pages)
+ total_sgs += 1;
+
+ if (args->out_pages)
+ total_sgs += ap->num_pages;
+
+ return total_sgs;
+}
+
+/* Add pages to scatter-gather list and return number of elements used */
+static unsigned int sg_init_fuse_pages(struct scatterlist *sg,
+ struct page **pages,
+ struct fuse_page_desc *page_descs,
+ unsigned int num_pages,
+ unsigned int total_len)
+{
+ unsigned int i;
+ unsigned int this_len;
+
+ for (i = 0; i < num_pages && total_len; i++) {
+ sg_init_table(&sg[i], 1);
+ this_len = min(page_descs[i].length, total_len);
+ sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset);
+ total_len -= this_len;
+ }
+
+ return i;
+}
+
+/* Add args to scatter-gather list and return number of elements used */
+static unsigned int sg_init_fuse_args(struct scatterlist *sg,
+ struct fuse_req *req,
+ struct fuse_arg *args,
+ unsigned int numargs,
+ bool argpages,
+ void *argbuf,
+ unsigned int *len_used)
+{
+ struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
+ unsigned int total_sgs = 0;
+ unsigned int len;
+
+ len = fuse_len_args(numargs - argpages, args);
+ if (len)
+ sg_init_one(&sg[total_sgs++], argbuf, len);
+
+ if (argpages)
+ total_sgs += sg_init_fuse_pages(&sg[total_sgs],
+ ap->pages, ap->descs,
+ ap->num_pages,
+ args[numargs - 1].size);
+
+ if (len_used)
+ *len_used = len;
+
+ return total_sgs;
+}
+
+/* Add a request to a virtqueue and kick the device */
+static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
+ struct fuse_req *req, bool in_flight)
+{
+ /* requests need at least 4 elements */
+ struct scatterlist *stack_sgs[6];
+ struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)];
+ struct scatterlist **sgs = stack_sgs;
+ struct scatterlist *sg = stack_sg;
+ struct virtqueue *vq;
+ struct fuse_args *args = req->args;
+ unsigned int argbuf_used = 0;
+ unsigned int out_sgs = 0;
+ unsigned int in_sgs = 0;
+ unsigned int total_sgs;
+ unsigned int i;
+ int ret;
+ bool notify;
+ struct fuse_pqueue *fpq;
+
+ /* Does the sglist fit on the stack? */
+ total_sgs = sg_count_fuse_req(req);
+ if (total_sgs > ARRAY_SIZE(stack_sgs)) {
+ sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC);
+ sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC);
+ if (!sgs || !sg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ /* Use a bounce buffer since stack args cannot be mapped */
+ ret = copy_args_to_argbuf(req);
+ if (ret < 0)
+ goto out;
+
+ /* Request elements */
+ sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
+ out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
+ (struct fuse_arg *)args->in_args,
+ args->in_numargs, args->in_pages,
+ req->argbuf, &argbuf_used);
+
+ /* Reply elements */
+ if (test_bit(FR_ISREPLY, &req->flags)) {
+ sg_init_one(&sg[out_sgs + in_sgs++],
+ &req->out.h, sizeof(req->out.h));
+ in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
+ args->out_args, args->out_numargs,
+ args->out_pages,
+ req->argbuf + argbuf_used, NULL);
+ }
+
+ WARN_ON(out_sgs + in_sgs != total_sgs);
+
+ for (i = 0; i < total_sgs; i++)
+ sgs[i] = &sg[i];
+
+ spin_lock(&fsvq->lock);
+
+ if (!fsvq->connected) {
+ spin_unlock(&fsvq->lock);
+ ret = -ENOTCONN;
+ goto out;
+ }
+
+ vq = fsvq->vq;
+ ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
+ if (ret < 0) {
+ spin_unlock(&fsvq->lock);
+ goto out;
+ }
+
+ /* Request successfully sent. */
+ fpq = &fsvq->fud->pq;
+ spin_lock(&fpq->lock);
+ list_add_tail(&req->list, fpq->processing);
+ spin_unlock(&fpq->lock);
+ set_bit(FR_SENT, &req->flags);
+ /* matches barrier in request_wait_answer() */
+ smp_mb__after_atomic();
+
+ if (!in_flight)
+ inc_in_flight_req(fsvq);
+ notify = virtqueue_kick_prepare(vq);
+
+ spin_unlock(&fsvq->lock);
+
+ if (notify)
+ virtqueue_notify(vq);
+
+out:
+ if (ret < 0 && req->argbuf) {
+ kfree(req->argbuf);
+ req->argbuf = NULL;
+ }
+ if (sgs != stack_sgs) {
+ kfree(sgs);
+ kfree(sg);
+ }
+
+ return ret;
+}
+
+static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
+__releases(fiq->lock)
+{
+ unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
+ struct virtio_fs *fs;
+ struct fuse_req *req;
+ struct virtio_fs_vq *fsvq;
+ int ret;
+
+ WARN_ON(list_empty(&fiq->pending));
+ req = list_last_entry(&fiq->pending, struct fuse_req, list);
+ clear_bit(FR_PENDING, &req->flags);
+ list_del_init(&req->list);
+ WARN_ON(!list_empty(&fiq->pending));
+ spin_unlock(&fiq->lock);
+
+ fs = fiq->priv;
+
+ pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
+ __func__, req->in.h.opcode, req->in.h.unique,
+ req->in.h.nodeid, req->in.h.len,
+ fuse_len_args(req->args->out_numargs, req->args->out_args));
+
+ fsvq = &fs->vqs[queue_id];
+ ret = virtio_fs_enqueue_req(fsvq, req, false);
+ if (ret < 0) {
+ if (ret == -ENOMEM || ret == -ENOSPC) {
+ /*
+ * Virtqueue full. Retry submission from worker
+ * context as we might be holding fc->bg_lock.
+ */
+ spin_lock(&fsvq->lock);
+ list_add_tail(&req->list, &fsvq->queued_reqs);
+ inc_in_flight_req(fsvq);
+ schedule_delayed_work(&fsvq->dispatch_work,
+ msecs_to_jiffies(1));
+ spin_unlock(&fsvq->lock);
+ return;
+ }
+ req->out.h.error = ret;
+ pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
+
+ /* Can't end request in submission context. Use a worker */
+ spin_lock(&fsvq->lock);
+ list_add_tail(&req->list, &fsvq->end_reqs);
+ schedule_delayed_work(&fsvq->dispatch_work, 0);
+ spin_unlock(&fsvq->lock);
+ return;
+ }
+}
+
+static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
+ .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
+ .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
+ .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
+ .release = virtio_fs_fiq_release,
+};
+
+static int virtio_fs_fill_super(struct super_block *sb)
+{
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
+ struct virtio_fs *fs = fc->iq.priv;
+ unsigned int i;
+ int err;
+ struct fuse_fs_context ctx = {
+ .rootmode = S_IFDIR,
+ .default_permissions = 1,
+ .allow_other = 1,
+ .max_read = UINT_MAX,
+ .blksize = 512,
+ .destroy = true,
+ .no_control = true,
+ .no_force_umount = true,
+ .no_mount_options = true,
+ };
+
+ mutex_lock(&virtio_fs_mutex);
+
+ /* After holding mutex, make sure virtiofs device is still there.
+ * Though we are holding a reference to it, drive ->remove might
+ * still have cleaned up virtual queues. In that case bail out.
+ */
+ err = -EINVAL;
+ if (list_empty(&fs->list)) {
+ pr_info("virtio-fs: tag <%s> not found\n", fs->tag);
+ goto err;
+ }
+
+ err = -ENOMEM;
+ /* Allocate fuse_dev for hiprio and notification queues */
+ for (i = 0; i < VQ_REQUEST; i++) {
+ struct virtio_fs_vq *fsvq = &fs->vqs[i];
+
+ fsvq->fud = fuse_dev_alloc();
+ if (!fsvq->fud)
+ goto err_free_fuse_devs;
+ }
+
+ ctx.fudptr = (void **)&fs->vqs[VQ_REQUEST].fud;
+ err = fuse_fill_super_common(sb, &ctx);
+ if (err < 0)
+ goto err_free_fuse_devs;
+
+ fc = fs->vqs[VQ_REQUEST].fud->fc;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ struct virtio_fs_vq *fsvq = &fs->vqs[i];
+
+ if (i == VQ_REQUEST)
+ continue; /* already initialized */
+ fuse_dev_install(fsvq->fud, fc);
+ }
+
+ /* Previous unmount will stop all queues. Start these again */
+ virtio_fs_start_all_queues(fs);
+ fuse_send_init(fc);
+ mutex_unlock(&virtio_fs_mutex);
+ return 0;
+
+err_free_fuse_devs:
+ virtio_fs_free_devs(fs);
+err:
+ mutex_unlock(&virtio_fs_mutex);
+ return err;
+}
+
+static void virtio_kill_sb(struct super_block *sb)
+{
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
+ struct virtio_fs *vfs;
+ struct virtio_fs_vq *fsvq;
+
+ /* If mount failed, we can still be called without any fc */
+ if (!fc)
+ return fuse_kill_sb_anon(sb);
+
+ vfs = fc->iq.priv;
+ fsvq = &vfs->vqs[VQ_HIPRIO];
+
+ /* Stop forget queue. Soon destroy will be sent */
+ spin_lock(&fsvq->lock);
+ fsvq->connected = false;
+ spin_unlock(&fsvq->lock);
+ virtio_fs_drain_all_queues(vfs);
+
+ fuse_kill_sb_anon(sb);
+
+ /* fuse_kill_sb_anon() must have sent destroy. Stop all queues
+ * and drain one more time and free fuse devices. Freeing fuse
+ * devices will drop their reference on fuse_conn and that in
+ * turn will drop its reference on virtio_fs object.
+ */
+ virtio_fs_stop_all_queues(vfs);
+ virtio_fs_drain_all_queues(vfs);
+ virtio_fs_free_devs(vfs);
+}
+
+static int virtio_fs_test_super(struct super_block *sb,
+ struct fs_context *fsc)
+{
+ struct fuse_conn *fc = fsc->s_fs_info;
+
+ return fc->iq.priv == get_fuse_conn_super(sb)->iq.priv;
+}
+
+static int virtio_fs_set_super(struct super_block *sb,
+ struct fs_context *fsc)
+{
+ int err;
+
+ err = get_anon_bdev(&sb->s_dev);
+ if (!err)
+ fuse_conn_get(fsc->s_fs_info);
+
+ return err;
+}
+
+static int virtio_fs_get_tree(struct fs_context *fsc)
+{
+ struct virtio_fs *fs;
+ struct super_block *sb;
+ struct fuse_conn *fc;
+ int err;
+
+ /* This gets a reference on virtio_fs object. This ptr gets installed
+ * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
+ * to drop the reference to this object.
+ */
+ fs = virtio_fs_find_instance(fsc->source);
+ if (!fs) {
+ pr_info("virtio-fs: tag <%s> not found\n", fsc->source);
+ return -EINVAL;
+ }
+
+ fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
+ if (!fc) {
+ mutex_lock(&virtio_fs_mutex);
+ virtio_fs_put(fs);
+ mutex_unlock(&virtio_fs_mutex);
+ return -ENOMEM;
+ }
+
+ fuse_conn_init(fc, get_user_ns(current_user_ns()), &virtio_fs_fiq_ops,
+ fs);
+ fc->release = fuse_free_conn;
+ fc->delete_stale = true;
+
+ fsc->s_fs_info = fc;
+ sb = sget_fc(fsc, virtio_fs_test_super, virtio_fs_set_super);
+ fuse_conn_put(fc);
+ if (IS_ERR(sb))
+ return PTR_ERR(sb);
+
+ if (!sb->s_root) {
+ err = virtio_fs_fill_super(sb);
+ if (err) {
+ deactivate_locked_super(sb);
+ return err;
+ }
+
+ sb->s_flags |= SB_ACTIVE;
+ }
+
+ WARN_ON(fsc->root);
+ fsc->root = dget(sb->s_root);
+ return 0;
+}
+
+static const struct fs_context_operations virtio_fs_context_ops = {
+ .get_tree = virtio_fs_get_tree,
+};
+
+static int virtio_fs_init_fs_context(struct fs_context *fsc)
+{
+ fsc->ops = &virtio_fs_context_ops;
+ return 0;
+}
+
+static struct file_system_type virtio_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "virtiofs",
+ .init_fs_context = virtio_fs_init_fs_context,
+ .kill_sb = virtio_kill_sb,
+};
+
+static int __init virtio_fs_init(void)
+{
+ int ret;
+
+ ret = register_virtio_driver(&virtio_fs_driver);
+ if (ret < 0)
+ return ret;
+
+ ret = register_filesystem(&virtio_fs_type);
+ if (ret < 0) {
+ unregister_virtio_driver(&virtio_fs_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(virtio_fs_init);
+
+static void __exit virtio_fs_exit(void)
+{
+ unregister_filesystem(&virtio_fs_type);
+ unregister_virtio_driver(&virtio_fs_driver);
+}
+module_exit(virtio_fs_exit);
+
+MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>");
+MODULE_DESCRIPTION("Virtio Filesystem");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_FS(KBUILD_MODNAME);
+MODULE_DEVICE_TABLE(virtio, id_table);
diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
index 433717640f78..20d052e08b3b 100644
--- a/fs/fuse/xattr.c
+++ b/fs/fuse/xattr.c
@@ -25,15 +25,15 @@ int fuse_setxattr(struct inode *inode, const char *name, const void *value,
memset(&inarg, 0, sizeof(inarg));
inarg.size = size;
inarg.flags = flags;
- args.in.h.opcode = FUSE_SETXATTR;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 3;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.in.args[1].size = strlen(name) + 1;
- args.in.args[1].value = name;
- args.in.args[2].size = size;
- args.in.args[2].value = value;
+ args.opcode = FUSE_SETXATTR;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 3;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.in_args[1].size = strlen(name) + 1;
+ args.in_args[1].value = name;
+ args.in_args[2].size = size;
+ args.in_args[2].value = value;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_setxattr = 1;
@@ -60,22 +60,22 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
memset(&inarg, 0, sizeof(inarg));
inarg.size = size;
- args.in.h.opcode = FUSE_GETXATTR;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 2;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.in.args[1].size = strlen(name) + 1;
- args.in.args[1].value = name;
+ args.opcode = FUSE_GETXATTR;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 2;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.in_args[1].size = strlen(name) + 1;
+ args.in_args[1].value = name;
/* This is really two different operations rolled into one */
- args.out.numargs = 1;
+ args.out_numargs = 1;
if (size) {
- args.out.argvar = 1;
- args.out.args[0].size = size;
- args.out.args[0].value = value;
+ args.out_argvar = true;
+ args.out_args[0].size = size;
+ args.out_args[0].value = value;
} else {
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
}
ret = fuse_simple_request(fc, &args);
if (!ret && !size)
@@ -121,20 +121,20 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
memset(&inarg, 0, sizeof(inarg));
inarg.size = size;
- args.in.h.opcode = FUSE_LISTXATTR;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
+ args.opcode = FUSE_LISTXATTR;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
/* This is really two different operations rolled into one */
- args.out.numargs = 1;
+ args.out_numargs = 1;
if (size) {
- args.out.argvar = 1;
- args.out.args[0].size = size;
- args.out.args[0].value = list;
+ args.out_argvar = true;
+ args.out_args[0].size = size;
+ args.out_args[0].value = list;
} else {
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
}
ret = fuse_simple_request(fc, &args);
if (!ret && !size)
@@ -157,11 +157,11 @@ int fuse_removexattr(struct inode *inode, const char *name)
if (fc->no_removexattr)
return -EOPNOTSUPP;
- args.in.h.opcode = FUSE_REMOVEXATTR;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = strlen(name) + 1;
- args.in.args[0].value = name;
+ args.opcode = FUSE_REMOVEXATTR;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = strlen(name) + 1;
+ args.in_args[0].value = name;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_removexattr = 1;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index f42048cc5454..9c6df721321a 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -133,7 +133,7 @@ static int gfs2_write_full_page(struct page *page, get_block_t *get_block,
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- offset = i_size & (PAGE_SIZE-1);
+ offset = i_size & (PAGE_SIZE - 1);
if (page->index == end_index && offset)
zero_user_segment(page, offset, PAGE_SIZE);
@@ -243,7 +243,7 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
{
struct inode *inode = mapping->host;
struct gfs2_sbd *sdp = GFS2_SB(inode);
- unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
+ unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
int i;
int ret;
@@ -497,7 +497,7 @@ static int __gfs2_readpage(void *file, struct page *page)
error = mpage_readpage(page, gfs2_block_map);
}
- if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
+ if (unlikely(gfs2_withdrawn(sdp)))
return -EIO;
return error;
@@ -552,7 +552,7 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
unsigned size)
{
struct address_space *mapping = ip->i_inode.i_mapping;
- unsigned long index = *pos / PAGE_SIZE;
+ unsigned long index = *pos >> PAGE_SHIFT;
unsigned offset = *pos & (PAGE_SIZE - 1);
unsigned copied = 0;
unsigned amt;
@@ -614,7 +614,7 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
gfs2_glock_dq(&gh);
out_uninit:
gfs2_holder_uninit(&gh);
- if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
+ if (unlikely(gfs2_withdrawn(sdp)))
ret = -EIO;
return ret;
}
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 79581b9bdebb..08f6fbb3655e 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -390,6 +390,19 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
return mp->mp_aheight - x - 1;
}
+static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
+{
+ sector_t factor = 1, block = 0;
+ int hgt;
+
+ for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
+ if (hgt < mp->mp_aheight)
+ block += mp->mp_list[hgt] * factor;
+ factor *= sdp->sd_inptrs;
+ }
+ return block;
+}
+
static void release_metapath(struct metapath *mp)
{
int i;
@@ -430,60 +443,84 @@ static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *pt
return ptr - first;
}
-typedef const __be64 *(*gfs2_metadata_walker)(
- struct metapath *mp,
- const __be64 *start, const __be64 *end,
- u64 factor, void *data);
+enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
-#define WALK_STOP ((__be64 *)0)
-#define WALK_NEXT ((__be64 *)1)
+/*
+ * gfs2_metadata_walker - walk an indirect block
+ * @mp: Metapath to indirect block
+ * @ptrs: Number of pointers to look at
+ *
+ * When returning WALK_FOLLOW, the walker must update @mp to point at the right
+ * indirect block to follow.
+ */
+typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
+ unsigned int ptrs);
-static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
- u64 len, struct metapath *mp, gfs2_metadata_walker walker,
- void *data)
+/*
+ * gfs2_walk_metadata - walk a tree of indirect blocks
+ * @inode: The inode
+ * @mp: Starting point of walk
+ * @max_len: Maximum number of blocks to walk
+ * @walker: Called during the walk
+ *
+ * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
+ * past the end of metadata, and a negative error code otherwise.
+ */
+
+static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
+ u64 max_len, gfs2_metadata_walker walker)
{
- struct metapath clone;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- const __be64 *start, *end, *ptr;
u64 factor = 1;
unsigned int hgt;
- int ret = 0;
+ int ret;
- for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--)
+ /*
+ * The walk starts in the lowest allocated indirect block, which may be
+ * before the position indicated by @mp. Adjust @max_len accordingly
+ * to avoid a short walk.
+ */
+ for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
+ max_len += mp->mp_list[hgt] * factor;
+ mp->mp_list[hgt] = 0;
factor *= sdp->sd_inptrs;
+ }
for (;;) {
- u64 step;
+ u16 start = mp->mp_list[hgt];
+ enum walker_status status;
+ unsigned int ptrs;
+ u64 len;
/* Walk indirect block. */
- start = metapointer(hgt, mp);
- end = metaend(hgt, mp);
-
- step = (end - start) * factor;
- if (step > len)
- end = start + DIV_ROUND_UP_ULL(len, factor);
-
- ptr = walker(mp, start, end, factor, data);
- if (ptr == WALK_STOP)
+ ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
+ len = ptrs * factor;
+ if (len > max_len)
+ ptrs = DIV_ROUND_UP_ULL(max_len, factor);
+ status = walker(mp, ptrs);
+ switch (status) {
+ case WALK_STOP:
+ return 1;
+ case WALK_FOLLOW:
+ BUG_ON(mp->mp_aheight == mp->mp_fheight);
+ ptrs = mp->mp_list[hgt] - start;
+ len = ptrs * factor;
break;
- if (step >= len)
+ case WALK_CONTINUE:
break;
- len -= step;
- if (ptr != WALK_NEXT) {
- BUG_ON(!*ptr);
- mp->mp_list[hgt] += ptr - start;
- goto fill_up_metapath;
}
+ if (len >= max_len)
+ break;
+ max_len -= len;
+ if (status == WALK_FOLLOW)
+ goto fill_up_metapath;
lower_metapath:
/* Decrease height of metapath. */
- if (mp != &clone) {
- clone_metapath(&clone, mp);
- mp = &clone;
- }
brelse(mp->mp_bh[hgt]);
mp->mp_bh[hgt] = NULL;
+ mp->mp_list[hgt] = 0;
if (!hgt)
break;
hgt--;
@@ -491,10 +528,7 @@ lower_metapath:
/* Advance in metadata tree. */
(mp->mp_list[hgt])++;
- start = metapointer(hgt, mp);
- end = metaend(hgt, mp);
- if (start >= end) {
- mp->mp_list[hgt] = 0;
+ if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
if (!hgt)
break;
goto lower_metapath;
@@ -502,44 +536,36 @@ lower_metapath:
fill_up_metapath:
/* Increase height of metapath. */
- if (mp != &clone) {
- clone_metapath(&clone, mp);
- mp = &clone;
- }
ret = fillup_metapath(ip, mp, ip->i_height - 1);
if (ret < 0)
- break;
+ return ret;
hgt += ret;
for (; ret; ret--)
do_div(factor, sdp->sd_inptrs);
mp->mp_aheight = hgt + 1;
}
- if (mp == &clone)
- release_metapath(mp);
- return ret;
+ return 0;
}
-struct gfs2_hole_walker_args {
- u64 blocks;
-};
-
-static const __be64 *gfs2_hole_walker(struct metapath *mp,
- const __be64 *start, const __be64 *end,
- u64 factor, void *data)
+static enum walker_status gfs2_hole_walker(struct metapath *mp,
+ unsigned int ptrs)
{
- struct gfs2_hole_walker_args *args = data;
- const __be64 *ptr;
+ const __be64 *start, *ptr, *end;
+ unsigned int hgt;
+
+ hgt = mp->mp_aheight - 1;
+ start = metapointer(hgt, mp);
+ end = start + ptrs;
for (ptr = start; ptr < end; ptr++) {
if (*ptr) {
- args->blocks += (ptr - start) * factor;
+ mp->mp_list[hgt] += ptr - start;
if (mp->mp_aheight == mp->mp_fheight)
return WALK_STOP;
- return ptr; /* increase height */
+ return WALK_FOLLOW;
}
}
- args->blocks += (end - start) * factor;
- return WALK_NEXT;
+ return WALK_CONTINUE;
}
/**
@@ -557,12 +583,24 @@ static const __be64 *gfs2_hole_walker(struct metapath *mp,
static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
struct metapath *mp, struct iomap *iomap)
{
- struct gfs2_hole_walker_args args = { };
- int ret = 0;
+ struct metapath clone;
+ u64 hole_size;
+ int ret;
- ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args);
- if (!ret)
- iomap->length = args.blocks << inode->i_blkbits;
+ clone_metapath(&clone, mp);
+ ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
+ if (ret < 0)
+ goto out;
+
+ if (ret == 1)
+ hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
+ else
+ hole_size = len;
+ iomap->length = hole_size << inode->i_blkbits;
+ ret = 0;
+
+out:
+ release_metapath(&clone);
return ret;
}
@@ -1002,11 +1040,16 @@ static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
unsigned copied, struct page *page,
struct iomap *iomap)
{
+ struct gfs2_trans *tr = current->journal_info;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
if (page && !gfs2_is_stuffed(ip))
gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
+
+ if (tr->tr_num_buf_new)
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+
gfs2_trans_end(sdp);
}
@@ -1022,54 +1065,38 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
- bool unstuff, alloc_required;
+ bool unstuff;
int ret;
- ret = gfs2_write_lock(inode);
- if (ret)
- return ret;
-
unstuff = gfs2_is_stuffed(ip) &&
pos + length > gfs2_max_stuffed_size(ip);
- ret = gfs2_iomap_get(inode, pos, length, flags, iomap, mp);
- if (ret)
- goto out_unlock;
-
- alloc_required = unstuff || iomap->type == IOMAP_HOLE;
+ if (unstuff || iomap->type == IOMAP_HOLE) {
+ unsigned int data_blocks, ind_blocks;
+ struct gfs2_alloc_parms ap = {};
+ unsigned int rblocks;
+ struct gfs2_trans *tr;
- if (alloc_required || gfs2_is_jdata(ip))
gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
&ind_blocks);
-
- if (alloc_required) {
- struct gfs2_alloc_parms ap = {
- .target = data_blocks + ind_blocks
- };
-
+ ap.target = data_blocks + ind_blocks;
ret = gfs2_quota_lock_check(ip, &ap);
if (ret)
- goto out_unlock;
+ return ret;
ret = gfs2_inplace_reserve(ip, &ap);
if (ret)
goto out_qunlock;
- }
- rblocks = RES_DINODE + ind_blocks;
- if (gfs2_is_jdata(ip))
- rblocks += data_blocks;
- if (ind_blocks || data_blocks)
- rblocks += RES_STATFS + RES_QUOTA;
- if (inode == sdp->sd_rindex)
- rblocks += 2 * RES_STATFS;
- if (alloc_required)
+ rblocks = RES_DINODE + ind_blocks;
+ if (gfs2_is_jdata(ip))
+ rblocks += data_blocks;
+ if (ind_blocks || data_blocks)
+ rblocks += RES_STATFS + RES_QUOTA;
+ if (inode == sdp->sd_rindex)
+ rblocks += 2 * RES_STATFS;
rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
- if (unstuff || iomap->type == IOMAP_HOLE) {
- struct gfs2_trans *tr;
-
ret = gfs2_trans_begin(sdp, rblocks,
iomap->length >> inode->i_blkbits);
if (ret)
@@ -1099,8 +1126,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
tr = current->journal_info;
if (tr->tr_num_buf_new)
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
- else
- gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[0]);
gfs2_trans_end(sdp);
}
@@ -1112,18 +1137,20 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
out_trans_end:
gfs2_trans_end(sdp);
out_trans_fail:
- if (alloc_required)
- gfs2_inplace_release(ip);
+ gfs2_inplace_release(ip);
out_qunlock:
- if (alloc_required)
- gfs2_quota_unlock(ip);
-out_unlock:
- gfs2_write_unlock(inode);
+ gfs2_quota_unlock(ip);
return ret;
}
+static inline bool gfs2_iomap_need_write_lock(unsigned flags)
+{
+ return (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT);
+}
+
static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
- unsigned flags, struct iomap *iomap)
+ unsigned flags, struct iomap *iomap,
+ struct iomap *srcmap)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct metapath mp = { .mp_aheight = 1, };
@@ -1132,20 +1159,43 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
iomap->flags |= IOMAP_F_BUFFER_HEAD;
trace_gfs2_iomap_start(ip, pos, length, flags);
- if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) {
- ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
- } else {
- ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
+ if (gfs2_iomap_need_write_lock(flags)) {
+ ret = gfs2_write_lock(inode);
+ if (ret)
+ goto out;
+ }
- /*
- * Silently fall back to buffered I/O for stuffed files or if
- * we've hot a hole (see gfs2_file_direct_write).
- */
- if ((flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT) &&
- iomap->type != IOMAP_MAPPED)
- ret = -ENOTBLK;
+ ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
+ if (ret)
+ goto out_unlock;
+
+ switch(flags & (IOMAP_WRITE | IOMAP_ZERO)) {
+ case IOMAP_WRITE:
+ if (flags & IOMAP_DIRECT) {
+ /*
+ * Silently fall back to buffered I/O for stuffed files
+ * or if we've got a hole (see gfs2_file_direct_write).
+ */
+ if (iomap->type != IOMAP_MAPPED)
+ ret = -ENOTBLK;
+ goto out_unlock;
+ }
+ break;
+ case IOMAP_ZERO:
+ if (iomap->type == IOMAP_HOLE)
+ goto out_unlock;
+ break;
+ default:
+ goto out_unlock;
}
+
+ ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
+
+out_unlock:
+ if (ret && gfs2_iomap_need_write_lock(flags))
+ gfs2_write_unlock(inode);
release_metapath(&mp);
+out:
trace_gfs2_iomap_end(ip, iomap, ret);
return ret;
}
@@ -1156,8 +1206,18 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- if ((flags & (IOMAP_WRITE | IOMAP_DIRECT)) != IOMAP_WRITE)
- goto out;
+ switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) {
+ case IOMAP_WRITE:
+ if (flags & IOMAP_DIRECT)
+ return 0;
+ break;
+ case IOMAP_ZERO:
+ if (iomap->type == IOMAP_HOLE)
+ return 0;
+ break;
+ default:
+ return 0;
+ }
if (!gfs2_is_stuffed(ip))
gfs2_ordered_add_inode(ip);
@@ -1181,11 +1241,17 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
if (ip->i_qadata && ip->i_qadata->qa_qd_num)
gfs2_quota_unlock(ip);
+
+ if (unlikely(!written))
+ goto out_unlock;
+
if (iomap->flags & IOMAP_F_SIZE_CHANGED)
mark_inode_dirty(inode);
- gfs2_write_unlock(inode);
+ set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
-out:
+out_unlock:
+ if (gfs2_iomap_need_write_lock(flags))
+ gfs2_write_unlock(inode);
return 0;
}
@@ -1283,76 +1349,10 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
return ret;
}
-/**
- * gfs2_block_zero_range - Deal with zeroing out data
- *
- * This is partly borrowed from ext3.
- */
static int gfs2_block_zero_range(struct inode *inode, loff_t from,
unsigned int length)
{
- struct address_space *mapping = inode->i_mapping;
- struct gfs2_inode *ip = GFS2_I(inode);
- unsigned long index = from >> PAGE_SHIFT;
- unsigned offset = from & (PAGE_SIZE-1);
- unsigned blocksize, iblock, pos;
- struct buffer_head *bh;
- struct page *page;
- int err;
-
- page = find_or_create_page(mapping, index, GFP_NOFS);
- if (!page)
- return 0;
-
- blocksize = inode->i_sb->s_blocksize;
- iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
-
- if (!page_has_buffers(page))
- create_empty_buffers(page, blocksize, 0);
-
- /* Find the buffer that contains "offset" */
- bh = page_buffers(page);
- pos = blocksize;
- while (offset >= pos) {
- bh = bh->b_this_page;
- iblock++;
- pos += blocksize;
- }
-
- err = 0;
-
- if (!buffer_mapped(bh)) {
- gfs2_block_map(inode, iblock, bh, 0);
- /* unmapped? It's a hole - nothing to do */
- if (!buffer_mapped(bh))
- goto unlock;
- }
-
- /* Ok, it's mapped. Make sure it's up-to-date */
- if (PageUptodate(page))
- set_buffer_uptodate(bh);
-
- if (!buffer_uptodate(bh)) {
- err = -EIO;
- ll_rw_block(REQ_OP_READ, 0, 1, &bh);
- wait_on_buffer(bh);
- /* Uhhuh. Read error. Complain and punt. */
- if (!buffer_uptodate(bh))
- goto unlock;
- err = 0;
- }
-
- if (gfs2_is_jdata(ip))
- gfs2_trans_add_data(ip->i_gl, bh);
- else
- gfs2_ordered_add_inode(ip);
-
- zero_user(page, offset, length);
- mark_buffer_dirty(bh);
-unlock:
- unlock_page(page);
- put_page(page);
- return err;
+ return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
}
#define GFS2_JTRUNC_REVOKES 8192
@@ -1633,6 +1633,7 @@ out_unlock:
brelse(dibh);
up_write(&ip->i_rw_mutex);
gfs2_trans_end(sdp);
+ buf_in_tr = false;
}
gfs2_glock_dq_uninit(rd_gh);
cond_resched();
@@ -2140,7 +2141,7 @@ static int do_grow(struct inode *inode, u64 size)
if (error)
goto do_end_trans;
- i_size_write(inode, size);
+ truncate_setsize(inode, size);
ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
@@ -2440,8 +2441,16 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
struct inode *inode = file_inode(file);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ unsigned int blocksize = i_blocksize(inode);
+ loff_t start, end;
int error;
+ start = round_down(offset, blocksize);
+ end = round_up(offset + length, blocksize) - 1;
+ error = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ if (error)
+ return error;
+
if (gfs2_is_jdata(ip))
error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
GFS2_JTRUNC_REVOKES);
@@ -2455,9 +2464,8 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
if (error)
goto out;
} else {
- unsigned int start_off, end_len, blocksize;
+ unsigned int start_off, end_len;
- blocksize = i_blocksize(inode);
start_off = offset & (blocksize - 1);
end_len = (offset + length) & (blocksize - 1);
if (start_off) {
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index a7bb76e9a82b..2e215e8c3c88 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -38,7 +38,7 @@ static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags)
struct inode *inode;
struct gfs2_holder d_gh;
struct gfs2_inode *ip = NULL;
- int error;
+ int error, valid = 0;
int had_lock = 0;
if (flags & LOOKUP_RCU)
@@ -51,53 +51,30 @@ static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags)
if (inode) {
if (is_bad_inode(inode))
- goto invalid;
+ goto out;
ip = GFS2_I(inode);
}
- if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
- goto valid;
+ if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) {
+ valid = 1;
+ goto out;
+ }
had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL);
if (!had_lock) {
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
if (error)
- goto fail;
- }
-
- error = gfs2_dir_check(d_inode(parent), &dentry->d_name, ip);
- switch (error) {
- case 0:
- if (!inode)
- goto invalid_gunlock;
- break;
- case -ENOENT:
- if (!inode)
- goto valid_gunlock;
- goto invalid_gunlock;
- default:
- goto fail_gunlock;
+ goto out;
}
-valid_gunlock:
- if (!had_lock)
- gfs2_glock_dq_uninit(&d_gh);
-valid:
- dput(parent);
- return 1;
+ error = gfs2_dir_check(d_inode(parent), &dentry->d_name, ip);
+ valid = inode ? !error : (error == -ENOENT);
-invalid_gunlock:
if (!had_lock)
gfs2_glock_dq_uninit(&d_gh);
-invalid:
+out:
dput(parent);
- return 0;
-
-fail_gunlock:
- gfs2_glock_dq_uninit(&d_gh);
-fail:
- dput(parent);
- return 0;
+ return valid;
}
static int gfs2_dhash(const struct dentry *dentry, struct qstr *str)
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 6f35d19eec25..eb9c0578978f 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1463,8 +1463,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, struct dir_context *ctx,
sort_offset : entries, copied);
out_free:
for(i = 0; i < leaf; i++)
- if (larr[i])
- brelse(larr[i]);
+ brelse(larr[i]);
kvfree(larr);
out:
return error;
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index 0ac2dc8564df..5b76480c17c9 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -32,8 +32,7 @@ extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
const struct gfs2_inode *ip, struct gfs2_diradd *da);
static inline void gfs2_dir_no_add(struct gfs2_diradd *da)
{
- if (da->bh)
- brelse(da->bh);
+ brelse(da->bh);
da->bh = NULL;
}
extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 52fa1ef8400b..9d58295ccf7a 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -6,6 +6,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/compat.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
@@ -354,6 +355,31 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -ENOTTY;
}
+#ifdef CONFIG_COMPAT
+static long gfs2_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ switch(cmd) {
+ /* These are just misnamed, they actually get/put from/to user an int */
+ case FS_IOC32_GETFLAGS:
+ cmd = FS_IOC_GETFLAGS;
+ break;
+ case FS_IOC32_SETFLAGS:
+ cmd = FS_IOC_SETFLAGS;
+ break;
+ /* Keep this list in sync with gfs2_ioctl */
+ case FITRIM:
+ case FS_IOC_GETFSLABEL:
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return gfs2_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define gfs2_compat_ioctl NULL
+#endif
+
/**
* gfs2_size_hint - Give a hint to the size of a write request
* @filep: The struct file
@@ -381,27 +407,28 @@ static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
/**
* gfs2_allocate_page_backing - Allocate blocks for a write fault
* @page: The (locked) page to allocate backing for
+ * @length: Size of the allocation
*
* We try to allocate all the blocks required for the page in one go. This
* might fail for various reasons, so we keep trying until all the blocks to
* back this page are allocated. If some of the blocks are already allocated,
* that is ok too.
*/
-static int gfs2_allocate_page_backing(struct page *page)
+static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
{
u64 pos = page_offset(page);
- u64 size = PAGE_SIZE;
do {
struct iomap iomap = { };
- if (gfs2_iomap_get_alloc(page->mapping->host, pos, 1, &iomap))
+ if (gfs2_iomap_get_alloc(page->mapping->host, pos, length, &iomap))
return -EIO;
- iomap.length = min(iomap.length, size);
- size -= iomap.length;
+ if (length < iomap.length)
+ iomap.length = length;
+ length -= iomap.length;
pos += iomap.length;
- } while (size > 0);
+ } while (length > 0);
return 0;
}
@@ -422,10 +449,10 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_alloc_parms ap = { .aflags = 0, };
- unsigned long last_index;
- u64 pos = page_offset(page);
+ u64 offset = page_offset(page);
unsigned int data_blocks, ind_blocks, rblocks;
struct gfs2_holder gh;
+ unsigned int length;
loff_t size;
int ret;
@@ -435,20 +462,39 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
if (ret)
goto out;
- gfs2_size_hint(vmf->vma->vm_file, pos, PAGE_SIZE);
-
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
ret = gfs2_glock_nq(&gh);
if (ret)
goto out_uninit;
+ /* Check page index against inode size */
+ size = i_size_read(inode);
+ if (offset >= size) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
/* Update file times before taking page lock */
file_update_time(vmf->vma->vm_file);
+ /* page is wholly or partially inside EOF */
+ if (offset > size - PAGE_SIZE)
+ length = offset_in_page(size);
+ else
+ length = PAGE_SIZE;
+
+ gfs2_size_hint(vmf->vma->vm_file, offset, length);
+
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags);
- if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
+ /*
+ * iomap_writepage / iomap_writepages currently don't support inline
+ * files, so always unstuff here.
+ */
+
+ if (!gfs2_is_stuffed(ip) &&
+ !gfs2_write_alloc_required(ip, offset, length)) {
lock_page(page);
if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
ret = -EAGAIN;
@@ -461,7 +507,7 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
if (ret)
goto out_unlock;
- gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
+ gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks);
ap.target = data_blocks + ind_blocks;
ret = gfs2_quota_lock_check(ip, &ap);
if (ret)
@@ -482,13 +528,6 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
goto out_trans_fail;
lock_page(page);
- ret = -EINVAL;
- size = i_size_read(inode);
- last_index = (size - 1) >> PAGE_SHIFT;
- /* Check page index against inode size */
- if (size == 0 || (page->index > last_index))
- goto out_trans_end;
-
ret = -EAGAIN;
/* If truncated, we must retry the operation, we may have raced
* with the glock demotion code.
@@ -501,7 +540,7 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
if (gfs2_is_stuffed(ip))
ret = gfs2_unstuff_dinode(ip, page);
if (ret == 0)
- ret = gfs2_allocate_page_backing(page);
+ ret = gfs2_allocate_page_backing(page, length);
out_trans_end:
if (ret)
@@ -732,7 +771,8 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to)
if (ret)
goto out_uninit;
- ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL);
+ ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
+ is_sync_kiocb(iocb));
gfs2_glock_dq(&gh);
out_uninit:
@@ -767,7 +807,8 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
if (offset + len > i_size_read(&ip->i_inode))
goto out;
- ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL);
+ ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
+ is_sync_kiocb(iocb));
out:
gfs2_glock_dq(&gh);
@@ -933,6 +974,7 @@ out:
brelse(dibh);
return error;
}
+
/**
* calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
* blocks, determine how many bytes can be written.
@@ -1049,7 +1091,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
rblocks += data_blocks ? data_blocks : 1;
error = gfs2_trans_begin(sdp, rblocks,
- PAGE_SIZE/sdp->sd_sb.sb_bsize);
+ PAGE_SIZE >> inode->i_blkbits);
if (error)
goto out_trans_fail;
@@ -1065,11 +1107,10 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
gfs2_quota_unlock(ip);
}
- if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) {
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size)
i_size_write(inode, pos + count);
- file_update_time(file);
- mark_inode_dirty(inode);
- }
+ file_update_time(file);
+ mark_inode_dirty(inode);
if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
return vfs_fsync_range(file, pos, pos + count - 1,
@@ -1181,7 +1222,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
cmd = F_SETLK;
fl->fl_type = F_UNLCK;
}
- if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags))) {
+ if (unlikely(gfs2_withdrawn(sdp))) {
if (fl->fl_type == F_UNLCK)
locks_lock_file_wait(file, fl);
return -EIO;
@@ -1294,6 +1335,7 @@ const struct file_operations gfs2_file_fops = {
.write_iter = gfs2_file_write_iter,
.iopoll = iomap_dio_iopoll,
.unlocked_ioctl = gfs2_ioctl,
+ .compat_ioctl = gfs2_compat_ioctl,
.mmap = gfs2_mmap,
.open = gfs2_open,
.release = gfs2_release,
@@ -1309,6 +1351,7 @@ const struct file_operations gfs2_file_fops = {
const struct file_operations gfs2_dir_fops = {
.iterate_shared = gfs2_readdir,
.unlocked_ioctl = gfs2_ioctl,
+ .compat_ioctl = gfs2_compat_ioctl,
.open = gfs2_open,
.release = gfs2_release,
.fsync = gfs2_fsync,
@@ -1325,6 +1368,7 @@ const struct file_operations gfs2_file_fops_nolock = {
.write_iter = gfs2_file_write_iter,
.iopoll = iomap_dio_iopoll,
.unlocked_ioctl = gfs2_ioctl,
+ .compat_ioctl = gfs2_compat_ioctl,
.mmap = gfs2_mmap,
.open = gfs2_open,
.release = gfs2_release,
@@ -1338,6 +1382,7 @@ const struct file_operations gfs2_file_fops_nolock = {
const struct file_operations gfs2_dir_fops_nolock = {
.iterate_shared = gfs2_readdir,
.unlocked_ioctl = gfs2_ioctl,
+ .compat_ioctl = gfs2_compat_ioctl,
.open = gfs2_open,
.release = gfs2_release,
.fsync = gfs2_fsync,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index e23fb8b7b020..b7123de7c180 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -305,6 +305,11 @@ static void gfs2_holder_wake(struct gfs2_holder *gh)
clear_bit(HIF_WAIT, &gh->gh_iflags);
smp_mb__after_atomic();
wake_up_bit(&gh->gh_iflags, HIF_WAIT);
+ if (gh->gh_flags & GL_ASYNC) {
+ struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
+
+ wake_up(&sdp->sd_async_glock_wait);
+ }
}
/**
@@ -544,7 +549,7 @@ __acquires(&gl->gl_lockref.lock)
unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
int ret;
- if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) &&
+ if (unlikely(gfs2_withdrawn(sdp)) &&
target != LM_ST_UNLOCKED)
return;
lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
@@ -553,7 +558,14 @@ __acquires(&gl->gl_lockref.lock)
GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
glops->go_inval) {
- set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
+ /*
+ * If another process is already doing the invalidate, let that
+ * finish first. The glock state machine will get back to this
+ * holder again later.
+ */
+ if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS,
+ &gl->gl_flags))
+ return;
do_error(gl, 0); /* Fail queued try locks */
}
gl->gl_req = target;
@@ -581,8 +593,7 @@ __acquires(&gl->gl_lockref.lock)
}
else if (ret) {
fs_err(sdp, "lm_lock ret %d\n", ret);
- GLOCK_BUG_ON(gl, !test_bit(SDF_WITHDRAWN,
- &sdp->sd_flags));
+ GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp));
}
} else { /* lock_nolock */
finish_xmote(gl, target);
@@ -931,6 +942,17 @@ void gfs2_holder_uninit(struct gfs2_holder *gh)
gh->gh_ip = 0;
}
+static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
+ unsigned long start_time)
+{
+ /* Have we waited longer that a second? */
+ if (time_after(jiffies, start_time + HZ)) {
+ /* Lengthen the minimum hold time. */
+ gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR,
+ GL_GLOCK_MAX_HOLD);
+ }
+}
+
/**
* gfs2_glock_wait - wait on a glock acquisition
* @gh: the glock holder
@@ -940,18 +962,99 @@ void gfs2_holder_uninit(struct gfs2_holder *gh)
int gfs2_glock_wait(struct gfs2_holder *gh)
{
- unsigned long time1 = jiffies;
+ unsigned long start_time = jiffies;
might_sleep();
wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
- if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
- /* Lengthen the minimum hold time. */
- gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
- GL_GLOCK_HOLD_INCR,
- GL_GLOCK_MAX_HOLD);
+ gfs2_glock_update_hold_time(gh->gh_gl, start_time);
return gh->gh_error;
}
+static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
+{
+ int i;
+
+ for (i = 0; i < num_gh; i++)
+ if (test_bit(HIF_WAIT, &ghs[i].gh_iflags))
+ return 1;
+ return 0;
+}
+
+/**
+ * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions
+ * @num_gh: the number of holders in the array
+ * @ghs: the glock holder array
+ *
+ * Returns: 0 on success, meaning all glocks have been granted and are held.
+ * -ESTALE if the request timed out, meaning all glocks were released,
+ * and the caller should retry the operation.
+ */
+
+int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
+{
+ struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
+ int i, ret = 0, timeout = 0;
+ unsigned long start_time = jiffies;
+ bool keep_waiting;
+
+ might_sleep();
+ /*
+ * Total up the (minimum hold time * 2) of all glocks and use that to
+ * determine the max amount of time we should wait.
+ */
+ for (i = 0; i < num_gh; i++)
+ timeout += ghs[i].gh_gl->gl_hold_time << 1;
+
+wait_for_dlm:
+ if (!wait_event_timeout(sdp->sd_async_glock_wait,
+ !glocks_pending(num_gh, ghs), timeout))
+ ret = -ESTALE; /* request timed out. */
+
+ /*
+ * If dlm granted all our requests, we need to adjust the glock
+ * minimum hold time values according to how long we waited.
+ *
+ * If our request timed out, we need to repeatedly release any held
+ * glocks we acquired thus far to allow dlm to acquire the remaining
+ * glocks without deadlocking. We cannot currently cancel outstanding
+ * glock acquisitions.
+ *
+ * The HIF_WAIT bit tells us which requests still need a response from
+ * dlm.
+ *
+ * If dlm sent us any errors, we return the first error we find.
+ */
+ keep_waiting = false;
+ for (i = 0; i < num_gh; i++) {
+ /* Skip holders we have already dequeued below. */
+ if (!gfs2_holder_queued(&ghs[i]))
+ continue;
+ /* Skip holders with a pending DLM response. */
+ if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) {
+ keep_waiting = true;
+ continue;
+ }
+
+ if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) {
+ if (ret == -ESTALE)
+ gfs2_glock_dq(&ghs[i]);
+ else
+ gfs2_glock_update_hold_time(ghs[i].gh_gl,
+ start_time);
+ }
+ if (!ret)
+ ret = ghs[i].gh_error;
+ }
+
+ if (keep_waiting)
+ goto wait_for_dlm;
+
+ /*
+ * At this point, we've either acquired all locks or released them all.
+ */
+ return ret;
+}
+
/**
* handle_callback - process a demote request
* @gl: the glock
@@ -1018,9 +1121,9 @@ __acquires(&gl->gl_lockref.lock)
struct gfs2_holder *gh2;
int try_futile = 0;
- BUG_ON(gh->gh_owner_pid == NULL);
+ GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
- BUG();
+ GLOCK_BUG_ON(gl, true);
if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
if (test_bit(GLF_LOCK, &gl->gl_flags))
@@ -1094,7 +1197,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
int error = 0;
- if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
+ if (unlikely(gfs2_withdrawn(sdp)))
return -EIO;
if (test_bit(GLF_LRU, &gl->gl_flags))
@@ -1788,8 +1891,8 @@ void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
unsigned long long dtime;
const struct gfs2_holder *gh;
char gflags_buf[32];
- char fs_id_buf[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
memset(fs_id_buf, 0, sizeof(fs_id_buf));
if (fsid && sdp) /* safety precaution */
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index e4e0bed5257c..b8adaf80e4c5 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -190,6 +190,7 @@ extern void gfs2_holder_uninit(struct gfs2_holder *gh);
extern int gfs2_glock_nq(struct gfs2_holder *gh);
extern int gfs2_glock_poll(struct gfs2_holder *gh);
extern int gfs2_glock_wait(struct gfs2_holder *gh);
+extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
extern void gfs2_glock_dq(struct gfs2_holder *gh);
extern void gfs2_glock_dq_wait(struct gfs2_holder *gh);
extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
@@ -260,6 +261,11 @@ static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
return gh->gh_gl;
}
+static inline bool gfs2_holder_queued(struct gfs2_holder *gh)
+{
+ return !list_empty(&gh->gh_list);
+}
+
/**
* glock_set_object - set the gl_object field of a glock
* @gl: the glock
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index ff213690e364..4ede1f18de85 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -350,7 +350,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
be32_to_cpu(str->di_minor));
break;
- };
+ }
i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
@@ -540,7 +540,7 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
gfs2_consist(sdp);
/* Initialize some head of the log stuff */
- if (!test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) {
+ if (!gfs2_withdrawn(sdp)) {
sdp->sd_log_sequence = head.lh_sequence + 1;
gfs2_log_pointers_init(sdp, head.lh_blkno);
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 7a993d7c022e..5f89c515f5bb 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -584,10 +584,10 @@ struct gfs2_args {
unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
unsigned int ar_loccookie:1; /* use location based readdir
cookies */
- int ar_commit; /* Commit interval */
- int ar_statfs_quantum; /* The fast statfs interval */
- int ar_quota_quantum; /* The quota interval */
- int ar_statfs_percent; /* The % change to force sync */
+ s32 ar_commit; /* Commit interval */
+ s32 ar_statfs_quantum; /* The fast statfs interval */
+ s32 ar_quota_quantum; /* The quota interval */
+ s32 ar_statfs_percent; /* The % change to force sync */
};
struct gfs2_tune {
@@ -725,6 +725,7 @@ struct gfs2_sbd {
struct gfs2_glock *sd_freeze_gl;
struct work_struct sd_freeze_work;
wait_queue_head_t sd_glock_wait;
+ wait_queue_head_t sd_async_glock_wait;
atomic_t sd_glock_disposal;
struct completion sd_locking_init;
struct completion sd_wdack;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 2e2a8a2fb51d..dafef10b91f1 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -656,7 +656,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
inode->i_rdev = dev;
inode->i_size = size;
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
- gfs2_set_inode_blocks(inode, 1);
munge_mode_uid_gid(dip, inode);
check_and_update_goal(dip);
ip->i_goal = dip->i_goal;
@@ -712,7 +711,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = gfs2_trans_begin(sdp, blocks, 0);
if (error)
- goto fail_gunlock2;
+ goto fail_free_inode;
if (blocks > 1) {
ip->i_eattr = ip->i_no_addr + 1;
@@ -723,7 +722,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
if (error)
- goto fail_gunlock2;
+ goto fail_free_inode;
BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags));
@@ -732,7 +731,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
goto fail_gunlock2;
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
- gfs2_glock_put(io_gl);
gfs2_set_iop(inode);
insert_inode_hash(inode);
@@ -765,6 +763,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
mark_inode_dirty(inode);
d_instantiate(dentry, inode);
+ /* After instantiate, errors should result in evict which will destroy
+ * both inode and iopen glocks properly. */
if (file) {
file->f_mode |= FMODE_CREATED;
error = finish_open(file, dentry, gfs2_open_common);
@@ -772,15 +772,15 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
gfs2_glock_dq_uninit(ghs);
gfs2_glock_dq_uninit(ghs + 1);
clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
+ gfs2_glock_put(io_gl);
return error;
fail_gunlock3:
glock_clear_object(io_gl, ip);
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
- gfs2_glock_put(io_gl);
fail_gunlock2:
- if (io_gl)
- clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
+ clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
+ gfs2_glock_put(io_gl);
fail_free_inode:
if (ip->i_gl) {
glock_clear_object(ip->i_gl, ip);
@@ -1348,7 +1348,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
struct gfs2_inode *ip = GFS2_I(d_inode(odentry));
struct gfs2_inode *nip = NULL;
struct gfs2_sbd *sdp = GFS2_SB(odir);
- struct gfs2_holder ghs[5], r_gh;
+ struct gfs2_holder ghs[4], r_gh, rd_gh;
struct gfs2_rgrpd *nrgd;
unsigned int num_gh;
int dir_rename = 0;
@@ -1357,6 +1357,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
int error;
gfs2_holder_mark_uninitialized(&r_gh);
+ gfs2_holder_mark_uninitialized(&rd_gh);
if (d_really_is_positive(ndentry)) {
nip = GFS2_I(d_inode(ndentry));
if (ip == nip)
@@ -1387,24 +1388,19 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
}
num_gh = 1;
- gfs2_holder_init(odip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
+ gfs2_holder_init(odip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs);
if (odip != ndip) {
- gfs2_holder_init(ndip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh);
+ gfs2_holder_init(ndip->i_gl, LM_ST_EXCLUSIVE,GL_ASYNC,
+ ghs + num_gh);
num_gh++;
}
- gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh);
+ gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs + num_gh);
num_gh++;
if (nip) {
- gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh);
+ gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC,
+ ghs + num_gh);
num_gh++;
- /* grab the resource lock for unlink flag twiddling
- * this is the case of the target file already existing
- * so we unlink before doing the rename
- */
- nrgd = gfs2_blk2rgrpd(sdp, nip->i_no_addr, 1);
- if (nrgd)
- gfs2_holder_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh++);
}
for (x = 0; x < num_gh; x++) {
@@ -1412,6 +1408,25 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
if (error)
goto out_gunlock;
}
+ error = gfs2_glock_async_wait(num_gh, ghs);
+ if (error)
+ goto out_gunlock;
+
+ if (nip) {
+ /* Grab the resource group glock for unlink flag twiddling.
+ * This is the case where the target dinode already exists
+ * so we unlink before doing the rename.
+ */
+ nrgd = gfs2_blk2rgrpd(sdp, nip->i_no_addr, 1);
+ if (!nrgd) {
+ error = -ENOENT;
+ goto out_gunlock;
+ }
+ error = gfs2_glock_nq_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0,
+ &rd_gh);
+ if (error)
+ goto out_gunlock;
+ }
error = -ENOENT;
if (ip->i_inode.i_nlink == 0)
@@ -1460,7 +1475,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
error = -EEXIST;
default:
goto out_gunlock;
- };
+ }
if (odip != ndip) {
if (!ndip->i_inode.i_nlink) {
@@ -1541,8 +1556,12 @@ out_gunlock_q:
gfs2_quota_unlock(ndip);
out_gunlock:
gfs2_dir_no_add(&da);
+ if (gfs2_holder_initialized(&rd_gh))
+ gfs2_glock_dq_uninit(&rd_gh);
+
while (x--) {
- gfs2_glock_dq(ghs + x);
+ if (gfs2_holder_queued(ghs + x))
+ gfs2_glock_dq(ghs + x);
gfs2_holder_uninit(ghs + x);
}
out_gunlock_r:
@@ -1572,7 +1591,7 @@ static int gfs2_exchange(struct inode *odir, struct dentry *odentry,
struct gfs2_inode *oip = GFS2_I(odentry->d_inode);
struct gfs2_inode *nip = GFS2_I(ndentry->d_inode);
struct gfs2_sbd *sdp = GFS2_SB(odir);
- struct gfs2_holder ghs[5], r_gh;
+ struct gfs2_holder ghs[4], r_gh;
unsigned int num_gh;
unsigned int x;
umode_t old_mode = oip->i_inode.i_mode;
@@ -1606,15 +1625,16 @@ static int gfs2_exchange(struct inode *odir, struct dentry *odentry,
}
num_gh = 1;
- gfs2_holder_init(odip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
+ gfs2_holder_init(odip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs);
if (odip != ndip) {
- gfs2_holder_init(ndip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh);
+ gfs2_holder_init(ndip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC,
+ ghs + num_gh);
num_gh++;
}
- gfs2_holder_init(oip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh);
+ gfs2_holder_init(oip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs + num_gh);
num_gh++;
- gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh);
+ gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs + num_gh);
num_gh++;
for (x = 0; x < num_gh; x++) {
@@ -1623,6 +1643,10 @@ static int gfs2_exchange(struct inode *odir, struct dentry *odentry,
goto out_gunlock;
}
+ error = gfs2_glock_async_wait(num_gh, ghs);
+ if (error)
+ goto out_gunlock;
+
error = -ENOENT;
if (oip->i_inode.i_nlink == 0 || nip->i_inode.i_nlink == 0)
goto out_gunlock;
@@ -1683,7 +1707,8 @@ out_end_trans:
gfs2_trans_end(sdp);
out_gunlock:
while (x--) {
- gfs2_glock_dq(ghs + x);
+ if (gfs2_holder_queued(ghs + x))
+ gfs2_glock_dq(ghs + x);
gfs2_holder_uninit(ghs + x);
}
out_gunlock_r:
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 4361804646d8..7c7197343ee2 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -1035,12 +1035,12 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
}
old_size = ls->ls_recover_size;
-
- if (old_size >= max_jid + 1)
+ new_size = old_size;
+ while (new_size < max_jid + 1)
+ new_size += RECOVER_SIZE_INC;
+ if (new_size == old_size)
return 0;
- new_size = old_size + RECOVER_SIZE_INC;
-
submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
if (!submit || !result) {
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 58e237fba565..eb3f2e7b8085 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -31,6 +31,8 @@
#include "dir.h"
#include "trace_gfs2.h"
+static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
+
/**
* gfs2_struct2blk - compute stuff
* @sdp: the filesystem
@@ -159,7 +161,8 @@ restart:
list_for_each_entry_reverse(tr, head, tr_list) {
if (wbc->nr_to_write <= 0)
break;
- if (gfs2_ail1_start_one(sdp, wbc, tr, &withdraw))
+ if (gfs2_ail1_start_one(sdp, wbc, tr, &withdraw) &&
+ !gfs2_withdrawn(sdp))
goto restart;
}
spin_unlock(&sdp->sd_ail_lock);
@@ -609,6 +612,14 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
list_add(&bd->bd_list, &sdp->sd_log_revokes);
}
+void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
+{
+ if (atomic_dec_return(&gl->gl_revokes) == 0) {
+ clear_bit(GLF_LFLUSH, &gl->gl_flags);
+ gfs2_glock_queue_put(gl);
+ }
+}
+
void gfs2_write_revokes(struct gfs2_sbd *sdp)
{
struct gfs2_trans *tr;
@@ -682,12 +693,16 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
{
struct gfs2_log_header *lh;
u32 hash, crc;
- struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
+ struct page *page;
struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
struct timespec64 tv;
struct super_block *sb = sdp->sd_vfs;
u64 dblock;
+ if (gfs2_withdrawn(sdp))
+ goto out;
+
+ page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
lh = page_address(page);
clear_page(lh);
@@ -707,7 +722,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
lh->lh_sec = cpu_to_be64(tv.tv_sec);
if (!list_empty(&jd->extent_list))
- dblock = gfs2_log_bmap(sdp);
+ dblock = gfs2_log_bmap(jd, lblock);
else {
int ret = gfs2_lblk_to_dblk(jd->jd_inode, lblock, &dblock);
if (gfs2_assert_withdraw(sdp, ret == 0))
@@ -740,6 +755,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
+out:
log_flush_wait(sdp);
}
@@ -768,6 +784,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
sdp->sd_log_flush_head, flags, op_flags);
+ gfs2_log_incr_head(sdp);
if (sdp->sd_log_tail != tail)
log_pull_tail(sdp, tail);
@@ -948,7 +965,7 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
*
*/
-void gfs2_log_shutdown(struct gfs2_sbd *sdp)
+static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
{
gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 2315fca47a2b..2ff163a8dce1 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -74,9 +74,9 @@ extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
-extern void gfs2_log_shutdown(struct gfs2_sbd *sdp);
extern int gfs2_logd(void *data);
extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
+extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
extern void gfs2_write_revokes(struct gfs2_sbd *sdp);
#endif /* __LOG_DOT_H__ */
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 5b17979af539..55fed7daf2b1 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -129,7 +129,7 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
atomic_dec(&sdp->sd_log_pinned);
}
-static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
+void gfs2_log_incr_head(struct gfs2_sbd *sdp)
{
BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
(sdp->sd_log_flush_head != sdp->sd_log_head));
@@ -138,18 +138,13 @@ static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
sdp->sd_log_flush_head = 0;
}
-u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
+u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
{
- unsigned int lbn = sdp->sd_log_flush_head;
struct gfs2_journal_extent *je;
- u64 block;
- list_for_each_entry(je, &sdp->sd_jdesc->extent_list, list) {
- if ((lbn >= je->lblock) && (lbn < (je->lblock + je->blocks))) {
- block = je->dblock + lbn - je->lblock;
- gfs2_log_incr_head(sdp);
- return block;
- }
+ list_for_each_entry(je, &jd->extent_list, list) {
+ if (lblock >= je->lblock && lblock < je->lblock + je->blocks)
+ return je->dblock + lblock - je->lblock;
}
return -1;
@@ -351,8 +346,11 @@ void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
{
- gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh),
- gfs2_log_bmap(sdp));
+ u64 dblock;
+
+ dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
+ gfs2_log_incr_head(sdp);
+ gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh), dblock);
}
/**
@@ -369,8 +367,11 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
{
struct super_block *sb = sdp->sd_vfs;
- gfs2_log_write(sdp, page, sb->s_blocksize, 0,
- gfs2_log_bmap(sdp));
+ u64 dblock;
+
+ dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
+ gfs2_log_incr_head(sdp);
+ gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
}
/**
@@ -882,10 +883,7 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
list_del_init(&bd->bd_list);
gl = bd->bd_gl;
- if (atomic_dec_return(&gl->gl_revokes) == 0) {
- clear_bit(GLF_LFLUSH, &gl->gl_flags);
- gfs2_glock_queue_put(gl);
- }
+ gfs2_glock_remove_revoke(gl);
kmem_cache_free(gfs2_bufdata_cachep, bd);
}
}
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 9c059957a733..9c5e4e491e03 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -18,7 +18,8 @@
~(2 * sizeof(__be64) - 1))
extern const struct gfs2_log_operations *gfs2_log_ops[];
-extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp);
+extern void gfs2_log_incr_head(struct gfs2_sbd *sdp);
+extern u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn);
extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
unsigned size, unsigned offset, u64 blkno);
extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 662ef36c1874..0c3772974030 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -251,7 +251,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
struct buffer_head *bh, *bhs[2];
int num = 0;
- if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags))) {
+ if (unlikely(gfs2_withdrawn(sdp))) {
*bhp = NULL;
return -EIO;
}
@@ -309,7 +309,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
{
- if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
+ if (unlikely(gfs2_withdrawn(sdp)))
return -EIO;
wait_on_buffer(bh);
@@ -320,7 +320,7 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
gfs2_io_error_bh_wd(sdp, bh);
return -EIO;
}
- if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
+ if (unlikely(gfs2_withdrawn(sdp)))
return -EIO;
return 0;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 4a8e5a7310f0..e8b7b0ce8404 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -21,6 +21,7 @@
#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
+#include <linux/fs_parser.h>
#include "gfs2.h"
#include "incore.h"
@@ -87,6 +88,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
gfs2_tune_init(&sdp->sd_tune);
init_waitqueue_head(&sdp->sd_glock_wait);
+ init_waitqueue_head(&sdp->sd_async_glock_wait);
atomic_set(&sdp->sd_glock_disposal, 0);
init_completion(&sdp->sd_locking_init);
init_completion(&sdp->sd_wdack);
@@ -1004,8 +1006,7 @@ hostdata_error:
void gfs2_lm_unmount(struct gfs2_sbd *sdp)
{
const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops;
- if (likely(!test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) &&
- lm->lm_unmount)
+ if (likely(!gfs2_withdrawn(sdp)) && lm->lm_unmount)
lm->lm_unmount(sdp);
}
@@ -1030,16 +1031,17 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp)
}
/**
- * fill_super - Read in superblock
+ * gfs2_fill_super - Read in superblock
* @sb: The VFS superblock
- * @data: Mount options
+ * @args: Mount options
* @silent: Don't complain if it's not a GFS2 filesystem
*
- * Returns: errno
+ * Returns: -errno
*/
-
-static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
+static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
{
+ struct gfs2_args *args = fc->fs_private;
+ int silent = fc->sb_flags & SB_SILENT;
struct gfs2_sbd *sdp;
struct gfs2_holder mount_gh;
int error;
@@ -1204,161 +1206,418 @@ fail_debug:
return error;
}
-static int set_gfs2_super(struct super_block *s, void *data)
+/**
+ * gfs2_get_tree - Get the GFS2 superblock and root directory
+ * @fc: The filesystem context
+ *
+ * Returns: 0 or -errno on error
+ */
+static int gfs2_get_tree(struct fs_context *fc)
{
- s->s_bdev = data;
- s->s_dev = s->s_bdev->bd_dev;
- s->s_bdi = bdi_get(s->s_bdev->bd_bdi);
+ struct gfs2_args *args = fc->fs_private;
+ struct gfs2_sbd *sdp;
+ int error;
+
+ error = get_tree_bdev(fc, gfs2_fill_super);
+ if (error)
+ return error;
+
+ sdp = fc->root->d_sb->s_fs_info;
+ dput(fc->root);
+ if (args->ar_meta)
+ fc->root = dget(sdp->sd_master_dir);
+ else
+ fc->root = dget(sdp->sd_root_dir);
return 0;
}
-static int test_gfs2_super(struct super_block *s, void *ptr)
+static void gfs2_fc_free(struct fs_context *fc)
{
- struct block_device *bdev = ptr;
- return (bdev == s->s_bdev);
+ struct gfs2_args *args = fc->fs_private;
+
+ kfree(args);
}
-/**
- * gfs2_mount - Get the GFS2 superblock
- * @fs_type: The GFS2 filesystem type
- * @flags: Mount flags
- * @dev_name: The name of the device
- * @data: The mount arguments
- *
- * Q. Why not use get_sb_bdev() ?
- * A. We need to select one of two root directories to mount, independent
- * of whether this is the initial, or subsequent, mount of this sb
- *
- * Returns: 0 or -ve on error
- */
+enum gfs2_param {
+ Opt_lockproto,
+ Opt_locktable,
+ Opt_hostdata,
+ Opt_spectator,
+ Opt_ignore_local_fs,
+ Opt_localflocks,
+ Opt_localcaching,
+ Opt_debug,
+ Opt_upgrade,
+ Opt_acl,
+ Opt_quota,
+ Opt_suiddir,
+ Opt_data,
+ Opt_meta,
+ Opt_discard,
+ Opt_commit,
+ Opt_errors,
+ Opt_statfs_quantum,
+ Opt_statfs_percent,
+ Opt_quota_quantum,
+ Opt_barrier,
+ Opt_rgrplvb,
+ Opt_loccookie,
+};
+
+enum opt_quota {
+ Opt_quota_unset = 0,
+ Opt_quota_off,
+ Opt_quota_account,
+ Opt_quota_on,
+};
+
+static const unsigned int opt_quota_values[] = {
+ [Opt_quota_off] = GFS2_QUOTA_OFF,
+ [Opt_quota_account] = GFS2_QUOTA_ACCOUNT,
+ [Opt_quota_on] = GFS2_QUOTA_ON,
+};
+
+enum opt_data {
+ Opt_data_writeback = GFS2_DATA_WRITEBACK,
+ Opt_data_ordered = GFS2_DATA_ORDERED,
+};
+
+enum opt_errors {
+ Opt_errors_withdraw = GFS2_ERRORS_WITHDRAW,
+ Opt_errors_panic = GFS2_ERRORS_PANIC,
+};
+
+static const struct fs_parameter_spec gfs2_param_specs[] = {
+ fsparam_string ("lockproto", Opt_lockproto),
+ fsparam_string ("locktable", Opt_locktable),
+ fsparam_string ("hostdata", Opt_hostdata),
+ fsparam_flag ("spectator", Opt_spectator),
+ fsparam_flag ("norecovery", Opt_spectator),
+ fsparam_flag ("ignore_local_fs", Opt_ignore_local_fs),
+ fsparam_flag ("localflocks", Opt_localflocks),
+ fsparam_flag ("localcaching", Opt_localcaching),
+ fsparam_flag_no("debug", Opt_debug),
+ fsparam_flag ("upgrade", Opt_upgrade),
+ fsparam_flag_no("acl", Opt_acl),
+ fsparam_flag_no("suiddir", Opt_suiddir),
+ fsparam_enum ("data", Opt_data),
+ fsparam_flag ("meta", Opt_meta),
+ fsparam_flag_no("discard", Opt_discard),
+ fsparam_s32 ("commit", Opt_commit),
+ fsparam_enum ("errors", Opt_errors),
+ fsparam_s32 ("statfs_quantum", Opt_statfs_quantum),
+ fsparam_s32 ("statfs_percent", Opt_statfs_percent),
+ fsparam_s32 ("quota_quantum", Opt_quota_quantum),
+ fsparam_flag_no("barrier", Opt_barrier),
+ fsparam_flag_no("rgrplvb", Opt_rgrplvb),
+ fsparam_flag_no("loccookie", Opt_loccookie),
+ /* quota can be a flag or an enum so it gets special treatment */
+ __fsparam(fs_param_is_enum, "quota", Opt_quota, fs_param_neg_with_no|fs_param_v_optional),
+ {}
+};
-static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static const struct fs_parameter_enum gfs2_param_enums[] = {
+ { Opt_quota, "off", Opt_quota_off },
+ { Opt_quota, "account", Opt_quota_account },
+ { Opt_quota, "on", Opt_quota_on },
+ { Opt_data, "writeback", Opt_data_writeback },
+ { Opt_data, "ordered", Opt_data_ordered },
+ { Opt_errors, "withdraw", Opt_errors_withdraw },
+ { Opt_errors, "panic", Opt_errors_panic },
+ {}
+};
+
+static const struct fs_parameter_description gfs2_fs_parameters = {
+ .name = "gfs2",
+ .specs = gfs2_param_specs,
+ .enums = gfs2_param_enums,
+};
+
+/* Parse a single mount parameter */
+static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- struct block_device *bdev;
- struct super_block *s;
- fmode_t mode = FMODE_READ | FMODE_EXCL;
- int error;
- struct gfs2_args args;
- struct gfs2_sbd *sdp;
+ struct gfs2_args *args = fc->fs_private;
+ struct fs_parse_result result;
+ int o;
+
+ o = fs_parse(fc, &gfs2_fs_parameters, param, &result);
+ if (o < 0)
+ return o;
+
+ switch (o) {
+ case Opt_lockproto:
+ strlcpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN);
+ break;
+ case Opt_locktable:
+ strlcpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN);
+ break;
+ case Opt_hostdata:
+ strlcpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN);
+ break;
+ case Opt_spectator:
+ args->ar_spectator = 1;
+ break;
+ case Opt_ignore_local_fs:
+ /* Retained for backwards compat only */
+ break;
+ case Opt_localflocks:
+ args->ar_localflocks = 1;
+ break;
+ case Opt_localcaching:
+ /* Retained for backwards compat only */
+ break;
+ case Opt_debug:
+ if (result.boolean && args->ar_errors == GFS2_ERRORS_PANIC)
+ return invalf(fc, "gfs2: -o debug and -o errors=panic are mutually exclusive");
+ args->ar_debug = result.boolean;
+ break;
+ case Opt_upgrade:
+ /* Retained for backwards compat only */
+ break;
+ case Opt_acl:
+ args->ar_posix_acl = result.boolean;
+ break;
+ case Opt_quota:
+ /* The quota option can be a flag or an enum. A non-zero int_32
+ result means that we have an enum index. Otherwise we have
+ to rely on the 'negated' flag to tell us whether 'quota' or
+ 'noquota' was specified. */
+ if (result.negated)
+ args->ar_quota = GFS2_QUOTA_OFF;
+ else if (result.int_32 > 0)
+ args->ar_quota = opt_quota_values[result.int_32];
+ else
+ args->ar_quota = GFS2_QUOTA_ON;
+ break;
+ case Opt_suiddir:
+ args->ar_suiddir = result.boolean;
+ break;
+ case Opt_data:
+ /* The uint_32 result maps directly to GFS2_DATA_* */
+ args->ar_data = result.uint_32;
+ break;
+ case Opt_meta:
+ args->ar_meta = 1;
+ break;
+ case Opt_discard:
+ args->ar_discard = result.boolean;
+ break;
+ case Opt_commit:
+ if (result.int_32 <= 0)
+ return invalf(fc, "gfs2: commit mount option requires a positive numeric argument");
+ args->ar_commit = result.int_32;
+ break;
+ case Opt_statfs_quantum:
+ if (result.int_32 < 0)
+ return invalf(fc, "gfs2: statfs_quantum mount option requires a non-negative numeric argument");
+ args->ar_statfs_quantum = result.int_32;
+ break;
+ case Opt_quota_quantum:
+ if (result.int_32 <= 0)
+ return invalf(fc, "gfs2: quota_quantum mount option requires a positive numeric argument");
+ args->ar_quota_quantum = result.int_32;
+ break;
+ case Opt_statfs_percent:
+ if (result.int_32 < 0 || result.int_32 > 100)
+ return invalf(fc, "gfs2: statfs_percent mount option requires a numeric argument between 0 and 100");
+ args->ar_statfs_percent = result.int_32;
+ break;
+ case Opt_errors:
+ if (args->ar_debug && result.uint_32 == GFS2_ERRORS_PANIC)
+ return invalf(fc, "gfs2: -o debug and -o errors=panic are mutually exclusive");
+ args->ar_errors = result.uint_32;
+ break;
+ case Opt_barrier:
+ args->ar_nobarrier = result.boolean;
+ break;
+ case Opt_rgrplvb:
+ args->ar_rgrplvb = result.boolean;
+ break;
+ case Opt_loccookie:
+ args->ar_loccookie = result.boolean;
+ break;
+ default:
+ return invalf(fc, "gfs2: invalid mount option: %s", param->key);
+ }
+ return 0;
+}
- if (!(flags & SB_RDONLY))
- mode |= FMODE_WRITE;
+static int gfs2_reconfigure(struct fs_context *fc)
+{
+ struct super_block *sb = fc->root->d_sb;
+ struct gfs2_sbd *sdp = sb->s_fs_info;
+ struct gfs2_args *oldargs = &sdp->sd_args;
+ struct gfs2_args *newargs = fc->fs_private;
+ struct gfs2_tune *gt = &sdp->sd_tune;
+ int error = 0;
- bdev = blkdev_get_by_path(dev_name, mode, fs_type);
- if (IS_ERR(bdev))
- return ERR_CAST(bdev);
+ sync_filesystem(sb);
- /*
- * once the super is inserted into the list by sget, s_umount
- * will protect the lockfs code from trying to start a snapshot
- * while we are mounting
- */
- mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (bdev->bd_fsfreeze_count > 0) {
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- error = -EBUSY;
- goto error_bdev;
+ spin_lock(&gt->gt_spin);
+ oldargs->ar_commit = gt->gt_logd_secs;
+ oldargs->ar_quota_quantum = gt->gt_quota_quantum;
+ if (gt->gt_statfs_slow)
+ oldargs->ar_statfs_quantum = 0;
+ else
+ oldargs->ar_statfs_quantum = gt->gt_statfs_quantum;
+ spin_unlock(&gt->gt_spin);
+
+ if (strcmp(newargs->ar_lockproto, oldargs->ar_lockproto)) {
+ errorf(fc, "gfs2: reconfiguration of locking protocol not allowed");
+ return -EINVAL;
}
- s = sget(fs_type, test_gfs2_super, set_gfs2_super, flags, bdev);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- error = PTR_ERR(s);
- if (IS_ERR(s))
- goto error_bdev;
-
- if (s->s_root) {
- /*
- * s_umount nests inside bd_mutex during
- * __invalidate_device(). blkdev_put() acquires
- * bd_mutex and can't be called under s_umount. Drop
- * s_umount temporarily. This is safe as we're
- * holding an active reference.
- */
- up_write(&s->s_umount);
- blkdev_put(bdev, mode);
- down_write(&s->s_umount);
- } else {
- /* s_mode must be set before deactivate_locked_super calls */
- s->s_mode = mode;
+ if (strcmp(newargs->ar_locktable, oldargs->ar_locktable)) {
+ errorf(fc, "gfs2: reconfiguration of lock table not allowed");
+ return -EINVAL;
}
+ if (strcmp(newargs->ar_hostdata, oldargs->ar_hostdata)) {
+ errorf(fc, "gfs2: reconfiguration of host data not allowed");
+ return -EINVAL;
+ }
+ if (newargs->ar_spectator != oldargs->ar_spectator) {
+ errorf(fc, "gfs2: reconfiguration of spectator mode not allowed");
+ return -EINVAL;
+ }
+ if (newargs->ar_localflocks != oldargs->ar_localflocks) {
+ errorf(fc, "gfs2: reconfiguration of localflocks not allowed");
+ return -EINVAL;
+ }
+ if (newargs->ar_meta != oldargs->ar_meta) {
+ errorf(fc, "gfs2: switching between gfs2 and gfs2meta not allowed");
+ return -EINVAL;
+ }
+ if (oldargs->ar_spectator)
+ fc->sb_flags |= SB_RDONLY;
+
+ if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
+ if (fc->sb_flags & SB_RDONLY) {
+ error = gfs2_make_fs_ro(sdp);
+ if (error)
+ errorf(fc, "gfs2: unable to remount read-only");
+ } else {
+ error = gfs2_make_fs_rw(sdp);
+ if (error)
+ errorf(fc, "gfs2: unable to remount read-write");
+ }
+ }
+ sdp->sd_args = *newargs;
- memset(&args, 0, sizeof(args));
- args.ar_quota = GFS2_QUOTA_DEFAULT;
- args.ar_data = GFS2_DATA_DEFAULT;
- args.ar_commit = 30;
- args.ar_statfs_quantum = 30;
- args.ar_quota_quantum = 60;
- args.ar_errors = GFS2_ERRORS_DEFAULT;
-
- error = gfs2_mount_args(&args, data);
- if (error) {
- pr_warn("can't parse mount arguments\n");
- goto error_super;
+ if (sdp->sd_args.ar_posix_acl)
+ sb->s_flags |= SB_POSIXACL;
+ else
+ sb->s_flags &= ~SB_POSIXACL;
+ if (sdp->sd_args.ar_nobarrier)
+ set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
+ else
+ clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
+ spin_lock(&gt->gt_spin);
+ gt->gt_logd_secs = newargs->ar_commit;
+ gt->gt_quota_quantum = newargs->ar_quota_quantum;
+ if (newargs->ar_statfs_quantum) {
+ gt->gt_statfs_slow = 0;
+ gt->gt_statfs_quantum = newargs->ar_statfs_quantum;
+ }
+ else {
+ gt->gt_statfs_slow = 1;
+ gt->gt_statfs_quantum = 30;
}
+ spin_unlock(&gt->gt_spin);
+
+ gfs2_online_uevent(sdp);
+ return error;
+}
+
+static const struct fs_context_operations gfs2_context_ops = {
+ .free = gfs2_fc_free,
+ .parse_param = gfs2_parse_param,
+ .get_tree = gfs2_get_tree,
+ .reconfigure = gfs2_reconfigure,
+};
+
+/* Set up the filesystem mount context */
+static int gfs2_init_fs_context(struct fs_context *fc)
+{
+ struct gfs2_args *args;
+
+ args = kmalloc(sizeof(*args), GFP_KERNEL);
+ if (args == NULL)
+ return -ENOMEM;
+
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ struct gfs2_sbd *sdp = fc->root->d_sb->s_fs_info;
- if (s->s_root) {
- error = -EBUSY;
- if ((flags ^ s->s_flags) & SB_RDONLY)
- goto error_super;
+ *args = sdp->sd_args;
} else {
- snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
- sb_set_blocksize(s, block_size(bdev));
- error = fill_super(s, &args, flags & SB_SILENT ? 1 : 0);
- if (error)
- goto error_super;
- s->s_flags |= SB_ACTIVE;
- bdev->bd_super = s;
+ memset(args, 0, sizeof(*args));
+ args->ar_quota = GFS2_QUOTA_DEFAULT;
+ args->ar_data = GFS2_DATA_DEFAULT;
+ args->ar_commit = 30;
+ args->ar_statfs_quantum = 30;
+ args->ar_quota_quantum = 60;
+ args->ar_errors = GFS2_ERRORS_DEFAULT;
}
-
- sdp = s->s_fs_info;
- if (args.ar_meta)
- return dget(sdp->sd_master_dir);
- else
- return dget(sdp->sd_root_dir);
-
-error_super:
- deactivate_locked_super(s);
- return ERR_PTR(error);
-error_bdev:
- blkdev_put(bdev, mode);
- return ERR_PTR(error);
+ fc->fs_private = args;
+ fc->ops = &gfs2_context_ops;
+ return 0;
}
-static int set_meta_super(struct super_block *s, void *ptr)
+static int set_meta_super(struct super_block *s, struct fs_context *fc)
{
return -EINVAL;
}
-static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int test_meta_super(struct super_block *s, struct fs_context *fc)
+{
+ return (fc->sget_key == s->s_bdev);
+}
+
+static int gfs2_meta_get_tree(struct fs_context *fc)
{
struct super_block *s;
struct gfs2_sbd *sdp;
struct path path;
int error;
- if (!dev_name || !*dev_name)
- return ERR_PTR(-EINVAL);
+ if (!fc->source || !*fc->source)
+ return -EINVAL;
- error = kern_path(dev_name, LOOKUP_FOLLOW, &path);
+ error = kern_path(fc->source, LOOKUP_FOLLOW, &path);
if (error) {
pr_warn("path_lookup on %s returned error %d\n",
- dev_name, error);
- return ERR_PTR(error);
+ fc->source, error);
+ return error;
}
- s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags,
- path.dentry->d_sb->s_bdev);
+ fc->fs_type = &gfs2_fs_type;
+ fc->sget_key = path.dentry->d_sb->s_bdev;
+ s = sget_fc(fc, test_meta_super, set_meta_super);
path_put(&path);
if (IS_ERR(s)) {
pr_warn("gfs2 mount does not exist\n");
- return ERR_CAST(s);
+ return PTR_ERR(s);
}
- if ((flags ^ s->s_flags) & SB_RDONLY) {
+ if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
deactivate_locked_super(s);
- return ERR_PTR(-EBUSY);
+ return -EBUSY;
}
sdp = s->s_fs_info;
- return dget(sdp->sd_master_dir);
+ fc->root = dget(sdp->sd_master_dir);
+ return 0;
+}
+
+static const struct fs_context_operations gfs2_meta_context_ops = {
+ .free = gfs2_fc_free,
+ .get_tree = gfs2_meta_get_tree,
+};
+
+static int gfs2_meta_init_fs_context(struct fs_context *fc)
+{
+ int ret = gfs2_init_fs_context(fc);
+
+ if (ret)
+ return ret;
+
+ fc->ops = &gfs2_meta_context_ops;
+ return 0;
}
static void gfs2_kill_sb(struct super_block *sb)
@@ -1382,7 +1641,8 @@ static void gfs2_kill_sb(struct super_block *sb)
struct file_system_type gfs2_fs_type = {
.name = "gfs2",
.fs_flags = FS_REQUIRES_DEV,
- .mount = gfs2_mount,
+ .init_fs_context = gfs2_init_fs_context,
+ .parameters = &gfs2_fs_parameters,
.kill_sb = gfs2_kill_sb,
.owner = THIS_MODULE,
};
@@ -1391,7 +1651,7 @@ MODULE_ALIAS_FS("gfs2");
struct file_system_type gfs2meta_fs_type = {
.name = "gfs2meta",
.fs_flags = FS_REQUIRES_DEV,
- .mount = gfs2_mount_meta,
+ .init_fs_context = gfs2_meta_init_fs_context,
.owner = THIS_MODULE,
};
MODULE_ALIAS_FS("gfs2meta");
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 69c4b77f127b..e9f93045eb01 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -774,7 +774,7 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
nbytes = sizeof(struct gfs2_quota);
pg_beg = loc >> PAGE_SHIFT;
- pg_off = loc % PAGE_SIZE;
+ pg_off = offset_in_page(loc);
/* If the quota straddles a page boundary, split the write in two */
if ((pg_off + nbytes) > PAGE_SIZE) {
@@ -1273,7 +1273,7 @@ int gfs2_quota_sync(struct super_block *sb, int type)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_data **qda;
- unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
+ unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
unsigned int num_qd;
unsigned int x;
int error = 0;
@@ -1475,7 +1475,7 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
{
if (error == 0 || error == -EROFS)
return;
- if (!test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) {
+ if (!gfs2_withdrawn(sdp)) {
fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
sdp->sd_log_error = error;
wake_up(&sdp->sd_logd_waitq);
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index c529f8749a89..85f830e56945 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -263,11 +263,13 @@ static void clean_journal(struct gfs2_jdesc *jd,
u32 lblock = head->lh_blkno;
gfs2_replay_incr_blk(jd, &lblock);
- if (jd->jd_jid == sdp->sd_lockstruct.ls_jid)
- sdp->sd_log_flush_head = lblock;
gfs2_write_log_header(sdp, jd, head->lh_sequence + 1, 0, lblock,
GFS2_LOG_HEAD_UNMOUNT | GFS2_LOG_HEAD_RECOVERY,
REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC);
+ if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) {
+ sdp->sd_log_flush_head = lblock;
+ gfs2_log_incr_head(sdp);
+ }
}
@@ -326,7 +328,7 @@ void gfs2_recover_func(struct work_struct *work)
default:
goto fail;
- };
+ }
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
LM_FLAG_NOEXP | GL_NOCACHE, &ji_gh);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 49ac0a5e74ea..2466bb44a23c 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -2285,7 +2285,7 @@ void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_glock *gl,
static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
{
struct gfs2_sbd *sdp = rgd->rd_sbd;
- char fs_id_buf[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
+ char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
(unsigned long long)rgd->rd_addr);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 0acc5834f653..68cc7c291a81 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -44,258 +44,6 @@
#include "xattr.h"
#include "lops.h"
-#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
-
-enum {
- Opt_lockproto,
- Opt_locktable,
- Opt_hostdata,
- Opt_spectator,
- Opt_ignore_local_fs,
- Opt_localflocks,
- Opt_localcaching,
- Opt_debug,
- Opt_nodebug,
- Opt_upgrade,
- Opt_acl,
- Opt_noacl,
- Opt_quota_off,
- Opt_quota_account,
- Opt_quota_on,
- Opt_quota,
- Opt_noquota,
- Opt_suiddir,
- Opt_nosuiddir,
- Opt_data_writeback,
- Opt_data_ordered,
- Opt_meta,
- Opt_discard,
- Opt_nodiscard,
- Opt_commit,
- Opt_err_withdraw,
- Opt_err_panic,
- Opt_statfs_quantum,
- Opt_statfs_percent,
- Opt_quota_quantum,
- Opt_barrier,
- Opt_nobarrier,
- Opt_rgrplvb,
- Opt_norgrplvb,
- Opt_loccookie,
- Opt_noloccookie,
- Opt_error,
-};
-
-static const match_table_t tokens = {
- {Opt_lockproto, "lockproto=%s"},
- {Opt_locktable, "locktable=%s"},
- {Opt_hostdata, "hostdata=%s"},
- {Opt_spectator, "spectator"},
- {Opt_spectator, "norecovery"},
- {Opt_ignore_local_fs, "ignore_local_fs"},
- {Opt_localflocks, "localflocks"},
- {Opt_localcaching, "localcaching"},
- {Opt_debug, "debug"},
- {Opt_nodebug, "nodebug"},
- {Opt_upgrade, "upgrade"},
- {Opt_acl, "acl"},
- {Opt_noacl, "noacl"},
- {Opt_quota_off, "quota=off"},
- {Opt_quota_account, "quota=account"},
- {Opt_quota_on, "quota=on"},
- {Opt_quota, "quota"},
- {Opt_noquota, "noquota"},
- {Opt_suiddir, "suiddir"},
- {Opt_nosuiddir, "nosuiddir"},
- {Opt_data_writeback, "data=writeback"},
- {Opt_data_ordered, "data=ordered"},
- {Opt_meta, "meta"},
- {Opt_discard, "discard"},
- {Opt_nodiscard, "nodiscard"},
- {Opt_commit, "commit=%d"},
- {Opt_err_withdraw, "errors=withdraw"},
- {Opt_err_panic, "errors=panic"},
- {Opt_statfs_quantum, "statfs_quantum=%d"},
- {Opt_statfs_percent, "statfs_percent=%d"},
- {Opt_quota_quantum, "quota_quantum=%d"},
- {Opt_barrier, "barrier"},
- {Opt_nobarrier, "nobarrier"},
- {Opt_rgrplvb, "rgrplvb"},
- {Opt_norgrplvb, "norgrplvb"},
- {Opt_loccookie, "loccookie"},
- {Opt_noloccookie, "noloccookie"},
- {Opt_error, NULL}
-};
-
-/**
- * gfs2_mount_args - Parse mount options
- * @args: The structure into which the parsed options will be written
- * @options: The options to parse
- *
- * Return: errno
- */
-
-int gfs2_mount_args(struct gfs2_args *args, char *options)
-{
- char *o;
- int token;
- substring_t tmp[MAX_OPT_ARGS];
- int rv;
-
- /* Split the options into tokens with the "," character and
- process them */
-
- while (1) {
- o = strsep(&options, ",");
- if (o == NULL)
- break;
- if (*o == '\0')
- continue;
-
- token = match_token(o, tokens, tmp);
- switch (token) {
- case Opt_lockproto:
- match_strlcpy(args->ar_lockproto, &tmp[0],
- GFS2_LOCKNAME_LEN);
- break;
- case Opt_locktable:
- match_strlcpy(args->ar_locktable, &tmp[0],
- GFS2_LOCKNAME_LEN);
- break;
- case Opt_hostdata:
- match_strlcpy(args->ar_hostdata, &tmp[0],
- GFS2_LOCKNAME_LEN);
- break;
- case Opt_spectator:
- args->ar_spectator = 1;
- break;
- case Opt_ignore_local_fs:
- /* Retained for backwards compat only */
- break;
- case Opt_localflocks:
- args->ar_localflocks = 1;
- break;
- case Opt_localcaching:
- /* Retained for backwards compat only */
- break;
- case Opt_debug:
- if (args->ar_errors == GFS2_ERRORS_PANIC) {
- pr_warn("-o debug and -o errors=panic are mutually exclusive\n");
- return -EINVAL;
- }
- args->ar_debug = 1;
- break;
- case Opt_nodebug:
- args->ar_debug = 0;
- break;
- case Opt_upgrade:
- /* Retained for backwards compat only */
- break;
- case Opt_acl:
- args->ar_posix_acl = 1;
- break;
- case Opt_noacl:
- args->ar_posix_acl = 0;
- break;
- case Opt_quota_off:
- case Opt_noquota:
- args->ar_quota = GFS2_QUOTA_OFF;
- break;
- case Opt_quota_account:
- args->ar_quota = GFS2_QUOTA_ACCOUNT;
- break;
- case Opt_quota_on:
- case Opt_quota:
- args->ar_quota = GFS2_QUOTA_ON;
- break;
- case Opt_suiddir:
- args->ar_suiddir = 1;
- break;
- case Opt_nosuiddir:
- args->ar_suiddir = 0;
- break;
- case Opt_data_writeback:
- args->ar_data = GFS2_DATA_WRITEBACK;
- break;
- case Opt_data_ordered:
- args->ar_data = GFS2_DATA_ORDERED;
- break;
- case Opt_meta:
- args->ar_meta = 1;
- break;
- case Opt_discard:
- args->ar_discard = 1;
- break;
- case Opt_nodiscard:
- args->ar_discard = 0;
- break;
- case Opt_commit:
- rv = match_int(&tmp[0], &args->ar_commit);
- if (rv || args->ar_commit <= 0) {
- pr_warn("commit mount option requires a positive numeric argument\n");
- return rv ? rv : -EINVAL;
- }
- break;
- case Opt_statfs_quantum:
- rv = match_int(&tmp[0], &args->ar_statfs_quantum);
- if (rv || args->ar_statfs_quantum < 0) {
- pr_warn("statfs_quantum mount option requires a non-negative numeric argument\n");
- return rv ? rv : -EINVAL;
- }
- break;
- case Opt_quota_quantum:
- rv = match_int(&tmp[0], &args->ar_quota_quantum);
- if (rv || args->ar_quota_quantum <= 0) {
- pr_warn("quota_quantum mount option requires a positive numeric argument\n");
- return rv ? rv : -EINVAL;
- }
- break;
- case Opt_statfs_percent:
- rv = match_int(&tmp[0], &args->ar_statfs_percent);
- if (rv || args->ar_statfs_percent < 0 ||
- args->ar_statfs_percent > 100) {
- pr_warn("statfs_percent mount option requires a numeric argument between 0 and 100\n");
- return rv ? rv : -EINVAL;
- }
- break;
- case Opt_err_withdraw:
- args->ar_errors = GFS2_ERRORS_WITHDRAW;
- break;
- case Opt_err_panic:
- if (args->ar_debug) {
- pr_warn("-o debug and -o errors=panic are mutually exclusive\n");
- return -EINVAL;
- }
- args->ar_errors = GFS2_ERRORS_PANIC;
- break;
- case Opt_barrier:
- args->ar_nobarrier = 0;
- break;
- case Opt_nobarrier:
- args->ar_nobarrier = 1;
- break;
- case Opt_rgrplvb:
- args->ar_rgrplvb = 1;
- break;
- case Opt_norgrplvb:
- args->ar_rgrplvb = 0;
- break;
- case Opt_loccookie:
- args->ar_loccookie = 1;
- break;
- case Opt_noloccookie:
- args->ar_loccookie = 0;
- break;
- case Opt_error:
- default:
- pr_warn("invalid mount option: %s\n", o);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
/**
* gfs2_jindex_free - Clear all the journal index information
* @sdp: The GFS2 superblock
@@ -651,8 +399,7 @@ struct lfcc {
* Returns: errno
*/
-static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
- struct gfs2_holder *freeze_gh)
+static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
{
struct gfs2_inode *ip;
struct gfs2_jdesc *jd;
@@ -677,7 +424,9 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
}
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
- GL_NOCACHE, freeze_gh);
+ GL_NOCACHE, &sdp->sd_freeze_gh);
+ if (error)
+ goto out;
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
error = gfs2_jdesc_check(jd);
@@ -693,7 +442,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
}
if (error)
- gfs2_glock_dq_uninit(freeze_gh);
+ gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
out:
while (!list_empty(&list)) {
@@ -805,7 +554,7 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
if (!(flags & I_DIRTY_INODE))
return;
- if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
+ if (unlikely(gfs2_withdrawn(sdp)))
return;
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
@@ -847,14 +596,14 @@ out:
* Returns: errno
*/
-static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
+int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
{
struct gfs2_holder freeze_gh;
int error;
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, GL_NOCACHE,
&freeze_gh);
- if (error && !test_bit(SDF_WITHDRAWN, &sdp->sd_flags))
+ if (error && !gfs2_withdrawn(sdp))
return error;
flush_workqueue(gfs2_delete_workqueue);
@@ -1013,21 +762,25 @@ static int gfs2_freeze(struct super_block *sb)
if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
goto out;
- if (test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) {
- error = -EINVAL;
- goto out;
- }
-
for (;;) {
- error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
+ if (gfs2_withdrawn(sdp)) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ error = gfs2_lock_fs_check_clean(sdp);
if (!error)
break;
if (error == -EBUSY)
fs_err(sdp, "waiting for recovery before freeze\n");
- else
+ else if (error == -EIO) {
+ fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
+ "to recovery error.\n");
+ goto out;
+ } else {
fs_err(sdp, "error freezing FS: %d\n", error);
-
+ }
fs_err(sdp, "retrying...\n");
msleep(1000);
}
@@ -1227,84 +980,6 @@ static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
}
/**
- * gfs2_remount_fs - called when the FS is remounted
- * @sb: the filesystem
- * @flags: the remount flags
- * @data: extra data passed in (not used right now)
- *
- * Returns: errno
- */
-
-static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
-{
- struct gfs2_sbd *sdp = sb->s_fs_info;
- struct gfs2_args args = sdp->sd_args; /* Default to current settings */
- struct gfs2_tune *gt = &sdp->sd_tune;
- int error;
-
- sync_filesystem(sb);
-
- spin_lock(&gt->gt_spin);
- args.ar_commit = gt->gt_logd_secs;
- args.ar_quota_quantum = gt->gt_quota_quantum;
- if (gt->gt_statfs_slow)
- args.ar_statfs_quantum = 0;
- else
- args.ar_statfs_quantum = gt->gt_statfs_quantum;
- spin_unlock(&gt->gt_spin);
- error = gfs2_mount_args(&args, data);
- if (error)
- return error;
-
- /* Not allowed to change locking details */
- if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) ||
- strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) ||
- strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata))
- return -EINVAL;
-
- /* Some flags must not be changed */
- if (args_neq(&args, &sdp->sd_args, spectator) ||
- args_neq(&args, &sdp->sd_args, localflocks) ||
- args_neq(&args, &sdp->sd_args, meta))
- return -EINVAL;
-
- if (sdp->sd_args.ar_spectator)
- *flags |= SB_RDONLY;
-
- if ((sb->s_flags ^ *flags) & SB_RDONLY) {
- if (*flags & SB_RDONLY)
- error = gfs2_make_fs_ro(sdp);
- else
- error = gfs2_make_fs_rw(sdp);
- }
-
- sdp->sd_args = args;
- if (sdp->sd_args.ar_posix_acl)
- sb->s_flags |= SB_POSIXACL;
- else
- sb->s_flags &= ~SB_POSIXACL;
- if (sdp->sd_args.ar_nobarrier)
- set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
- else
- clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
- spin_lock(&gt->gt_spin);
- gt->gt_logd_secs = args.ar_commit;
- gt->gt_quota_quantum = args.ar_quota_quantum;
- if (args.ar_statfs_quantum) {
- gt->gt_statfs_slow = 0;
- gt->gt_statfs_quantum = args.ar_statfs_quantum;
- }
- else {
- gt->gt_statfs_slow = 1;
- gt->gt_statfs_quantum = 30;
- }
- spin_unlock(&gt->gt_spin);
-
- gfs2_online_uevent(sdp);
- return error;
-}
-
-/**
* gfs2_drop_inode - Drop an inode (test for remote unlink)
* @inode: The inode to drop
*
@@ -1722,13 +1397,13 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb)
struct gfs2_inode *ip;
ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
- if (ip) {
- ip->i_flags = 0;
- ip->i_gl = NULL;
- memset(&ip->i_res, 0, sizeof(ip->i_res));
- RB_CLEAR_NODE(&ip->i_res.rs_node);
- ip->i_rahead = 0;
- }
+ if (!ip)
+ return NULL;
+ ip->i_flags = 0;
+ ip->i_gl = NULL;
+ memset(&ip->i_res, 0, sizeof(ip->i_res));
+ RB_CLEAR_NODE(&ip->i_res.rs_node);
+ ip->i_rahead = 0;
return &ip->i_inode;
}
@@ -1748,7 +1423,6 @@ const struct super_operations gfs2_super_ops = {
.freeze_super = gfs2_freeze,
.thaw_super = gfs2_unfreeze,
.statfs = gfs2_statfs,
- .remount_fs = gfs2_remount_fs,
.drop_inode = gfs2_drop_inode,
.show_options = gfs2_show_options,
};
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 9d49eaadb9d9..b8bf811a1305 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -24,8 +24,6 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
-extern int gfs2_mount_args(struct gfs2_args *args, char *data);
-
extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
@@ -33,6 +31,7 @@ extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
struct gfs2_inode **ipp);
extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
+extern int gfs2_make_fs_ro(struct gfs2_sbd *sdp);
extern void gfs2_online_uevent(struct gfs2_sbd *sdp);
extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index dd15b8e4af2c..8ccb68f4ed16 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -118,7 +118,7 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf)
{
- unsigned int b = test_bit(SDF_WITHDRAWN, &sdp->sd_flags);
+ unsigned int b = gfs2_withdrawn(sdp);
return snprintf(buf, PAGE_SIZE, "%u\n", b);
}
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 35e3059255fe..9d4227330de4 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -262,6 +262,8 @@ void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
list_del_init(&bd->bd_list);
gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke);
sdp->sd_log_num_revoke--;
+ if (bd->bd_gl)
+ gfs2_glock_remove_revoke(bd->bd_gl);
kmem_cache_free(gfs2_bufdata_cachep, bd);
tr->tr_num_revoke--;
if (--n == 0)
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 83f6c582773a..ec600b487498 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -178,7 +178,7 @@ int gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, int cluster_wide,
const char *function, char *file, unsigned int line)
{
struct gfs2_sbd *sdp = rgd->rd_sbd;
- char fs_id_buf[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
+ char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
int rv;
sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
@@ -258,7 +258,7 @@ void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
const char *function, char *file, unsigned int line,
bool withdraw)
{
- if (!test_bit(SDF_WITHDRAWN, &sdp->sd_flags))
+ if (!gfs2_withdrawn(sdp))
fs_err(sdp,
"fatal: I/O error\n"
" block = %llu\n"
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 4b68b2c1fe56..f2702bc9837c 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -164,6 +164,15 @@ static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
return x;
}
+/**
+ * gfs2_withdrawn - test whether the file system is withdrawing or withdrawn
+ * @sdp: the superblock
+ */
+static inline bool gfs2_withdrawn(struct gfs2_sbd *sdp)
+{
+ return test_bit(SDF_WITHDRAWN, &sdp->sd_flags);
+}
+
#define gfs2_tune_get(sdp, field) \
gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index d85230c84ef2..f32f15669996 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -325,4 +325,5 @@ const struct file_operations hpfs_dir_ops =
.release = hpfs_dir_release,
.fsync = hpfs_file_fsync,
.unlocked_ioctl = hpfs_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 1ecec124e76f..b36abf9cb345 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -215,6 +215,7 @@ const struct file_operations hpfs_file_ops =
.fsync = hpfs_file_fsync,
.splice_read = generic_file_splice_read,
.unlocked_ioctl = hpfs_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
const struct inode_operations hpfs_file_iops =
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index ab2e7cc2ff33..1cca83218fb5 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -334,7 +334,7 @@ long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg);
* local time (HPFS) to GMT (Unix)
*/
-static inline time64_t local_to_gmt(struct super_block *s, time32_t t)
+static inline time64_t local_to_gmt(struct super_block *s, time64_t t)
{
extern struct timezone sys_tz;
return t + sys_tz.tz_minuteswest * 60 + hpfs_sb(s)->sb_timeshift;
@@ -343,9 +343,7 @@ static inline time64_t local_to_gmt(struct super_block *s, time32_t t)
static inline time32_t gmt_to_local(struct super_block *s, time64_t t)
{
extern struct timezone sys_tz;
- t = t - sys_tz.tz_minuteswest * 60 - hpfs_sb(s)->sb_timeshift;
-
- return clamp_t(time64_t, t, 0, U32_MAX);
+ return t - sys_tz.tz_minuteswest * 60 - hpfs_sb(s)->sb_timeshift;
}
static inline time32_t local_get_seconds(struct super_block *s)
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 9db6d84f0d62..0a677a9aaf34 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -614,6 +614,8 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
s->s_magic = HPFS_SUPER_MAGIC;
s->s_op = &hpfs_sops;
s->s_d_op = &hpfs_dentry_operations;
+ s->s_time_min = local_to_gmt(s, 0);
+ s->s_time_max = local_to_gmt(s, U32_MAX);
sbi->sb_root = le32_to_cpu(superblock->root);
sbi->sb_fs_size = le32_to_cpu(superblock->n_sectors);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a478df035651..d5c2a3158610 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -440,7 +440,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
u32 hash;
index = page->index;
- hash = hugetlb_fault_mutex_hash(h, mapping, index, 0);
+ hash = hugetlb_fault_mutex_hash(mapping, index);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/*
@@ -644,7 +644,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
addr = index * hpage_size;
/* mutex taken here, fault path and hole punch */
- hash = hugetlb_fault_mutex_hash(h, mapping, index, addr);
+ hash = hugetlb_fault_mutex_hash(mapping, index);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/* See if already present in mapping to avoid alloc/free */
@@ -815,8 +815,11 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
/*
* File creation. Allocate an inode, and we're done..
*/
-static int hugetlbfs_mknod(struct inode *dir,
- struct dentry *dentry, umode_t mode, dev_t dev)
+static int do_hugetlbfs_mknod(struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode,
+ dev_t dev,
+ bool tmpfile)
{
struct inode *inode;
int error = -ENOSPC;
@@ -824,13 +827,23 @@ static int hugetlbfs_mknod(struct inode *dir,
inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
if (inode) {
dir->i_ctime = dir->i_mtime = current_time(dir);
- d_instantiate(dentry, inode);
- dget(dentry); /* Extra count - pin the dentry in core */
+ if (tmpfile) {
+ d_tmpfile(dentry, inode);
+ } else {
+ d_instantiate(dentry, inode);
+ dget(dentry);/* Extra count - pin the dentry in core */
+ }
error = 0;
}
return error;
}
+static int hugetlbfs_mknod(struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t dev)
+{
+ return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
+}
+
static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
@@ -844,6 +857,12 @@ static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mo
return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
}
+static int hugetlbfs_tmpfile(struct inode *dir,
+ struct dentry *dentry, umode_t mode)
+{
+ return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
+}
+
static int hugetlbfs_symlink(struct inode *dir,
struct dentry *dentry, const char *symname)
{
@@ -1102,6 +1121,7 @@ static const struct inode_operations hugetlbfs_dir_inode_operations = {
.mknod = hugetlbfs_mknod,
.rename = simple_rename,
.setattr = hugetlbfs_setattr,
+ .tmpfile = hugetlbfs_tmpfile,
};
static const struct inode_operations hugetlbfs_inode_operations = {
@@ -1461,28 +1481,41 @@ static int __init init_hugetlbfs_fs(void)
sizeof(struct hugetlbfs_inode_info),
0, SLAB_ACCOUNT, init_once);
if (hugetlbfs_inode_cachep == NULL)
- goto out2;
+ goto out;
error = register_filesystem(&hugetlbfs_fs_type);
if (error)
- goto out;
+ goto out_free;
+ /* default hstate mount is required */
+ mnt = mount_one_hugetlbfs(&hstates[default_hstate_idx]);
+ if (IS_ERR(mnt)) {
+ error = PTR_ERR(mnt);
+ goto out_unreg;
+ }
+ hugetlbfs_vfsmount[default_hstate_idx] = mnt;
+
+ /* other hstates are optional */
i = 0;
for_each_hstate(h) {
+ if (i == default_hstate_idx)
+ continue;
+
mnt = mount_one_hugetlbfs(h);
- if (IS_ERR(mnt) && i == 0) {
- error = PTR_ERR(mnt);
- goto out;
- }
- hugetlbfs_vfsmount[i] = mnt;
+ if (IS_ERR(mnt))
+ hugetlbfs_vfsmount[i] = NULL;
+ else
+ hugetlbfs_vfsmount[i] = mnt;
i++;
}
return 0;
- out:
+ out_unreg:
+ (void)unregister_filesystem(&hugetlbfs_fs_type);
+ out_free:
kmem_cache_destroy(hugetlbfs_inode_cachep);
- out2:
+ out:
return error;
}
fs_initcall(init_hugetlbfs_fs)
diff --git a/fs/inode.c b/fs/inode.c
index 0f1e3b563c47..fef457a42882 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -181,6 +181,9 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
mapping->flags = 0;
mapping->wb_err = 0;
atomic_set(&mapping->i_mmap_writable, 0);
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ atomic_set(&mapping->nr_thps, 0);
+#endif
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
mapping->private_data = NULL;
mapping->writeback_index = 0;
@@ -2167,6 +2170,37 @@ struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran)
EXPORT_SYMBOL(timespec64_trunc);
/**
+ * timestamp_truncate - Truncate timespec to a granularity
+ * @t: Timespec
+ * @inode: inode being updated
+ *
+ * Truncate a timespec to the granularity supported by the fs
+ * containing the inode. Always rounds down. gran must
+ * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
+ */
+struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ unsigned int gran = sb->s_time_gran;
+
+ t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
+ if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min))
+ t.tv_nsec = 0;
+
+ /* Avoid division in the common cases 1 ns and 1 s. */
+ if (gran == 1)
+ ; /* nothing */
+ else if (gran == NSEC_PER_SEC)
+ t.tv_nsec = 0;
+ else if (gran > 1 && gran < NSEC_PER_SEC)
+ t.tv_nsec -= t.tv_nsec % gran;
+ else
+ WARN(1, "invalid file time granularity: %u", gran);
+ return t;
+}
+EXPORT_SYMBOL(timestamp_truncate);
+
+/**
* current_time - Return FS time
* @inode: inode.
*
@@ -2187,7 +2221,7 @@ struct timespec64 current_time(struct inode *inode)
return now;
}
- return timespec64_trunc(now, inode->i_sb->s_time_gran);
+ return timestamp_truncate(now, inode);
}
EXPORT_SYMBOL(current_time);
diff --git a/fs/io-wq.c b/fs/io-wq.c
new file mode 100644
index 000000000000..91b85df0861e
--- /dev/null
+++ b/fs/io-wq.c
@@ -0,0 +1,1094 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Basic worker thread pool for io_uring
+ *
+ * Copyright (C) 2019 Jens Axboe
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched/signal.h>
+#include <linux/mm.h>
+#include <linux/mmu_context.h>
+#include <linux/sched/mm.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/rculist_nulls.h>
+
+#include "io-wq.h"
+
+#define WORKER_IDLE_TIMEOUT (5 * HZ)
+
+enum {
+ IO_WORKER_F_UP = 1, /* up and active */
+ IO_WORKER_F_RUNNING = 2, /* account as running */
+ IO_WORKER_F_FREE = 4, /* worker on free list */
+ IO_WORKER_F_EXITING = 8, /* worker exiting */
+ IO_WORKER_F_FIXED = 16, /* static idle worker */
+ IO_WORKER_F_BOUND = 32, /* is doing bounded work */
+};
+
+enum {
+ IO_WQ_BIT_EXIT = 0, /* wq exiting */
+ IO_WQ_BIT_CANCEL = 1, /* cancel work on list */
+ IO_WQ_BIT_ERROR = 2, /* error on setup */
+};
+
+enum {
+ IO_WQE_FLAG_STALLED = 1, /* stalled on hash */
+};
+
+/*
+ * One for each thread in a wqe pool
+ */
+struct io_worker {
+ refcount_t ref;
+ unsigned flags;
+ struct hlist_nulls_node nulls_node;
+ struct list_head all_list;
+ struct task_struct *task;
+ wait_queue_head_t wait;
+ struct io_wqe *wqe;
+
+ struct io_wq_work *cur_work;
+ spinlock_t lock;
+
+ struct rcu_head rcu;
+ struct mm_struct *mm;
+ const struct cred *creds;
+ struct files_struct *restore_files;
+};
+
+#if BITS_PER_LONG == 64
+#define IO_WQ_HASH_ORDER 6
+#else
+#define IO_WQ_HASH_ORDER 5
+#endif
+
+struct io_wqe_acct {
+ unsigned nr_workers;
+ unsigned max_workers;
+ atomic_t nr_running;
+};
+
+enum {
+ IO_WQ_ACCT_BOUND,
+ IO_WQ_ACCT_UNBOUND,
+};
+
+/*
+ * Per-node worker thread pool
+ */
+struct io_wqe {
+ struct {
+ spinlock_t lock;
+ struct io_wq_work_list work_list;
+ unsigned long hash_map;
+ unsigned flags;
+ } ____cacheline_aligned_in_smp;
+
+ int node;
+ struct io_wqe_acct acct[2];
+
+ struct hlist_nulls_head free_list;
+ struct hlist_nulls_head busy_list;
+ struct list_head all_list;
+
+ struct io_wq *wq;
+};
+
+/*
+ * Per io_wq state
+ */
+struct io_wq {
+ struct io_wqe **wqes;
+ unsigned long state;
+
+ get_work_fn *get_work;
+ put_work_fn *put_work;
+
+ struct task_struct *manager;
+ struct user_struct *user;
+ struct cred *creds;
+ struct mm_struct *mm;
+ refcount_t refs;
+ struct completion done;
+};
+
+static bool io_worker_get(struct io_worker *worker)
+{
+ return refcount_inc_not_zero(&worker->ref);
+}
+
+static void io_worker_release(struct io_worker *worker)
+{
+ if (refcount_dec_and_test(&worker->ref))
+ wake_up_process(worker->task);
+}
+
+/*
+ * Note: drops the wqe->lock if returning true! The caller must re-acquire
+ * the lock in that case. Some callers need to restart handling if this
+ * happens, so we can't just re-acquire the lock on behalf of the caller.
+ */
+static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
+{
+ bool dropped_lock = false;
+
+ if (worker->creds) {
+ revert_creds(worker->creds);
+ worker->creds = NULL;
+ }
+
+ if (current->files != worker->restore_files) {
+ __acquire(&wqe->lock);
+ spin_unlock_irq(&wqe->lock);
+ dropped_lock = true;
+
+ task_lock(current);
+ current->files = worker->restore_files;
+ task_unlock(current);
+ }
+
+ /*
+ * If we have an active mm, we need to drop the wq lock before unusing
+ * it. If we do, return true and let the caller retry the idle loop.
+ */
+ if (worker->mm) {
+ if (!dropped_lock) {
+ __acquire(&wqe->lock);
+ spin_unlock_irq(&wqe->lock);
+ dropped_lock = true;
+ }
+ __set_current_state(TASK_RUNNING);
+ set_fs(KERNEL_DS);
+ unuse_mm(worker->mm);
+ mmput(worker->mm);
+ worker->mm = NULL;
+ }
+
+ return dropped_lock;
+}
+
+static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
+ struct io_wq_work *work)
+{
+ if (work->flags & IO_WQ_WORK_UNBOUND)
+ return &wqe->acct[IO_WQ_ACCT_UNBOUND];
+
+ return &wqe->acct[IO_WQ_ACCT_BOUND];
+}
+
+static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
+ struct io_worker *worker)
+{
+ if (worker->flags & IO_WORKER_F_BOUND)
+ return &wqe->acct[IO_WQ_ACCT_BOUND];
+
+ return &wqe->acct[IO_WQ_ACCT_UNBOUND];
+}
+
+static void io_worker_exit(struct io_worker *worker)
+{
+ struct io_wqe *wqe = worker->wqe;
+ struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
+ unsigned nr_workers;
+
+ /*
+ * If we're not at zero, someone else is holding a brief reference
+ * to the worker. Wait for that to go away.
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!refcount_dec_and_test(&worker->ref))
+ schedule();
+ __set_current_state(TASK_RUNNING);
+
+ preempt_disable();
+ current->flags &= ~PF_IO_WORKER;
+ if (worker->flags & IO_WORKER_F_RUNNING)
+ atomic_dec(&acct->nr_running);
+ if (!(worker->flags & IO_WORKER_F_BOUND))
+ atomic_dec(&wqe->wq->user->processes);
+ worker->flags = 0;
+ preempt_enable();
+
+ spin_lock_irq(&wqe->lock);
+ hlist_nulls_del_rcu(&worker->nulls_node);
+ list_del_rcu(&worker->all_list);
+ if (__io_worker_unuse(wqe, worker)) {
+ __release(&wqe->lock);
+ spin_lock_irq(&wqe->lock);
+ }
+ acct->nr_workers--;
+ nr_workers = wqe->acct[IO_WQ_ACCT_BOUND].nr_workers +
+ wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers;
+ spin_unlock_irq(&wqe->lock);
+
+ /* all workers gone, wq exit can proceed */
+ if (!nr_workers && refcount_dec_and_test(&wqe->wq->refs))
+ complete(&wqe->wq->done);
+
+ kfree_rcu(worker, rcu);
+}
+
+static inline bool io_wqe_run_queue(struct io_wqe *wqe)
+ __must_hold(wqe->lock)
+{
+ if (!wq_list_empty(&wqe->work_list) &&
+ !(wqe->flags & IO_WQE_FLAG_STALLED))
+ return true;
+ return false;
+}
+
+/*
+ * Check head of free list for an available worker. If one isn't available,
+ * caller must wake up the wq manager to create one.
+ */
+static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
+ __must_hold(RCU)
+{
+ struct hlist_nulls_node *n;
+ struct io_worker *worker;
+
+ n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
+ if (is_a_nulls(n))
+ return false;
+
+ worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
+ if (io_worker_get(worker)) {
+ wake_up(&worker->wait);
+ io_worker_release(worker);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * We need a worker. If we find a free one, we're good. If not, and we're
+ * below the max number of workers, wake up the manager to create one.
+ */
+static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
+{
+ bool ret;
+
+ /*
+ * Most likely an attempt to queue unbounded work on an io_wq that
+ * wasn't setup with any unbounded workers.
+ */
+ WARN_ON_ONCE(!acct->max_workers);
+
+ rcu_read_lock();
+ ret = io_wqe_activate_free_worker(wqe);
+ rcu_read_unlock();
+
+ if (!ret && acct->nr_workers < acct->max_workers)
+ wake_up_process(wqe->wq->manager);
+}
+
+static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker)
+{
+ struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
+
+ atomic_inc(&acct->nr_running);
+}
+
+static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker)
+ __must_hold(wqe->lock)
+{
+ struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
+
+ if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
+ io_wqe_wake_worker(wqe, acct);
+}
+
+static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
+{
+ allow_kernel_signal(SIGINT);
+
+ current->flags |= PF_IO_WORKER;
+
+ worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
+ worker->restore_files = current->files;
+ io_wqe_inc_running(wqe, worker);
+}
+
+/*
+ * Worker will start processing some work. Move it to the busy list, if
+ * it's currently on the freelist
+ */
+static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
+ struct io_wq_work *work)
+ __must_hold(wqe->lock)
+{
+ bool worker_bound, work_bound;
+
+ if (worker->flags & IO_WORKER_F_FREE) {
+ worker->flags &= ~IO_WORKER_F_FREE;
+ hlist_nulls_del_init_rcu(&worker->nulls_node);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
+ }
+
+ /*
+ * If worker is moving from bound to unbound (or vice versa), then
+ * ensure we update the running accounting.
+ */
+ worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
+ work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
+ if (worker_bound != work_bound) {
+ io_wqe_dec_running(wqe, worker);
+ if (work_bound) {
+ worker->flags |= IO_WORKER_F_BOUND;
+ wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
+ wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
+ atomic_dec(&wqe->wq->user->processes);
+ } else {
+ worker->flags &= ~IO_WORKER_F_BOUND;
+ wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
+ wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
+ atomic_inc(&wqe->wq->user->processes);
+ }
+ io_wqe_inc_running(wqe, worker);
+ }
+}
+
+/*
+ * No work, worker going to sleep. Move to freelist, and unuse mm if we
+ * have one attached. Dropping the mm may potentially sleep, so we drop
+ * the lock in that case and return success. Since the caller has to
+ * retry the loop in that case (we changed task state), we don't regrab
+ * the lock if we return success.
+ */
+static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
+ __must_hold(wqe->lock)
+{
+ if (!(worker->flags & IO_WORKER_F_FREE)) {
+ worker->flags |= IO_WORKER_F_FREE;
+ hlist_nulls_del_init_rcu(&worker->nulls_node);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
+ }
+
+ return __io_worker_unuse(wqe, worker);
+}
+
+static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash)
+ __must_hold(wqe->lock)
+{
+ struct io_wq_work_node *node, *prev;
+ struct io_wq_work *work;
+
+ wq_list_for_each(node, prev, &wqe->work_list) {
+ work = container_of(node, struct io_wq_work, list);
+
+ /* not hashed, can run anytime */
+ if (!(work->flags & IO_WQ_WORK_HASHED)) {
+ wq_node_del(&wqe->work_list, node, prev);
+ return work;
+ }
+
+ /* hashed, can run if not already running */
+ *hash = work->flags >> IO_WQ_HASH_SHIFT;
+ if (!(wqe->hash_map & BIT_ULL(*hash))) {
+ wqe->hash_map |= BIT_ULL(*hash);
+ wq_node_del(&wqe->work_list, node, prev);
+ return work;
+ }
+ }
+
+ return NULL;
+}
+
+static void io_worker_handle_work(struct io_worker *worker)
+ __releases(wqe->lock)
+{
+ struct io_wq_work *work, *old_work = NULL, *put_work = NULL;
+ struct io_wqe *wqe = worker->wqe;
+ struct io_wq *wq = wqe->wq;
+
+ do {
+ unsigned hash = -1U;
+
+ /*
+ * If we got some work, mark us as busy. If we didn't, but
+ * the list isn't empty, it means we stalled on hashed work.
+ * Mark us stalled so we don't keep looking for work when we
+ * can't make progress, any work completion or insertion will
+ * clear the stalled flag.
+ */
+ work = io_get_next_work(wqe, &hash);
+ if (work)
+ __io_worker_busy(wqe, worker, work);
+ else if (!wq_list_empty(&wqe->work_list))
+ wqe->flags |= IO_WQE_FLAG_STALLED;
+
+ spin_unlock_irq(&wqe->lock);
+ if (put_work && wq->put_work)
+ wq->put_work(old_work);
+ if (!work)
+ break;
+next:
+ /* flush any pending signals before assigning new work */
+ if (signal_pending(current))
+ flush_signals(current);
+
+ spin_lock_irq(&worker->lock);
+ worker->cur_work = work;
+ spin_unlock_irq(&worker->lock);
+
+ if (work->flags & IO_WQ_WORK_CB)
+ work->func(&work);
+
+ if ((work->flags & IO_WQ_WORK_NEEDS_FILES) &&
+ current->files != work->files) {
+ task_lock(current);
+ current->files = work->files;
+ task_unlock(current);
+ }
+ if ((work->flags & IO_WQ_WORK_NEEDS_USER) && !worker->mm &&
+ wq->mm && mmget_not_zero(wq->mm)) {
+ use_mm(wq->mm);
+ set_fs(USER_DS);
+ worker->mm = wq->mm;
+ }
+ if (!worker->creds)
+ worker->creds = override_creds(wq->creds);
+ if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
+ work->flags |= IO_WQ_WORK_CANCEL;
+ if (worker->mm)
+ work->flags |= IO_WQ_WORK_HAS_MM;
+
+ if (wq->get_work && !(work->flags & IO_WQ_WORK_INTERNAL)) {
+ put_work = work;
+ wq->get_work(work);
+ }
+
+ old_work = work;
+ work->func(&work);
+
+ spin_lock_irq(&worker->lock);
+ worker->cur_work = NULL;
+ spin_unlock_irq(&worker->lock);
+
+ spin_lock_irq(&wqe->lock);
+
+ if (hash != -1U) {
+ wqe->hash_map &= ~BIT_ULL(hash);
+ wqe->flags &= ~IO_WQE_FLAG_STALLED;
+ }
+ if (work && work != old_work) {
+ spin_unlock_irq(&wqe->lock);
+
+ if (put_work && wq->put_work) {
+ wq->put_work(put_work);
+ put_work = NULL;
+ }
+
+ /* dependent work not hashed */
+ hash = -1U;
+ goto next;
+ }
+ } while (1);
+}
+
+static int io_wqe_worker(void *data)
+{
+ struct io_worker *worker = data;
+ struct io_wqe *wqe = worker->wqe;
+ struct io_wq *wq = wqe->wq;
+ DEFINE_WAIT(wait);
+
+ io_worker_start(wqe, worker);
+
+ while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+ prepare_to_wait(&worker->wait, &wait, TASK_INTERRUPTIBLE);
+
+ spin_lock_irq(&wqe->lock);
+ if (io_wqe_run_queue(wqe)) {
+ __set_current_state(TASK_RUNNING);
+ io_worker_handle_work(worker);
+ continue;
+ }
+ /* drops the lock on success, retry */
+ if (__io_worker_idle(wqe, worker)) {
+ __release(&wqe->lock);
+ continue;
+ }
+ spin_unlock_irq(&wqe->lock);
+ if (signal_pending(current))
+ flush_signals(current);
+ if (schedule_timeout(WORKER_IDLE_TIMEOUT))
+ continue;
+ /* timed out, exit unless we're the fixed worker */
+ if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
+ !(worker->flags & IO_WORKER_F_FIXED))
+ break;
+ }
+
+ finish_wait(&worker->wait, &wait);
+
+ if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+ spin_lock_irq(&wqe->lock);
+ if (!wq_list_empty(&wqe->work_list))
+ io_worker_handle_work(worker);
+ else
+ spin_unlock_irq(&wqe->lock);
+ }
+
+ io_worker_exit(worker);
+ return 0;
+}
+
+/*
+ * Called when a worker is scheduled in. Mark us as currently running.
+ */
+void io_wq_worker_running(struct task_struct *tsk)
+{
+ struct io_worker *worker = kthread_data(tsk);
+ struct io_wqe *wqe = worker->wqe;
+
+ if (!(worker->flags & IO_WORKER_F_UP))
+ return;
+ if (worker->flags & IO_WORKER_F_RUNNING)
+ return;
+ worker->flags |= IO_WORKER_F_RUNNING;
+ io_wqe_inc_running(wqe, worker);
+}
+
+/*
+ * Called when worker is going to sleep. If there are no workers currently
+ * running and we have work pending, wake up a free one or have the manager
+ * set one up.
+ */
+void io_wq_worker_sleeping(struct task_struct *tsk)
+{
+ struct io_worker *worker = kthread_data(tsk);
+ struct io_wqe *wqe = worker->wqe;
+
+ if (!(worker->flags & IO_WORKER_F_UP))
+ return;
+ if (!(worker->flags & IO_WORKER_F_RUNNING))
+ return;
+
+ worker->flags &= ~IO_WORKER_F_RUNNING;
+
+ spin_lock_irq(&wqe->lock);
+ io_wqe_dec_running(wqe, worker);
+ spin_unlock_irq(&wqe->lock);
+}
+
+static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+{
+ struct io_wqe_acct *acct =&wqe->acct[index];
+ struct io_worker *worker;
+
+ worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
+ if (!worker)
+ return false;
+
+ refcount_set(&worker->ref, 1);
+ worker->nulls_node.pprev = NULL;
+ init_waitqueue_head(&worker->wait);
+ worker->wqe = wqe;
+ spin_lock_init(&worker->lock);
+
+ worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node,
+ "io_wqe_worker-%d/%d", index, wqe->node);
+ if (IS_ERR(worker->task)) {
+ kfree(worker);
+ return false;
+ }
+
+ spin_lock_irq(&wqe->lock);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
+ list_add_tail_rcu(&worker->all_list, &wqe->all_list);
+ worker->flags |= IO_WORKER_F_FREE;
+ if (index == IO_WQ_ACCT_BOUND)
+ worker->flags |= IO_WORKER_F_BOUND;
+ if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
+ worker->flags |= IO_WORKER_F_FIXED;
+ acct->nr_workers++;
+ spin_unlock_irq(&wqe->lock);
+
+ if (index == IO_WQ_ACCT_UNBOUND)
+ atomic_inc(&wq->user->processes);
+
+ wake_up_process(worker->task);
+ return true;
+}
+
+static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
+ __must_hold(wqe->lock)
+{
+ struct io_wqe_acct *acct = &wqe->acct[index];
+
+ /* if we have available workers or no work, no need */
+ if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
+ return false;
+ return acct->nr_workers < acct->max_workers;
+}
+
+/*
+ * Manager thread. Tasked with creating new workers, if we need them.
+ */
+static int io_wq_manager(void *data)
+{
+ struct io_wq *wq = data;
+ int workers_to_create = num_possible_nodes();
+ int node;
+
+ /* create fixed workers */
+ refcount_set(&wq->refs, workers_to_create);
+ for_each_node(node) {
+ if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
+ goto err;
+ workers_to_create--;
+ }
+
+ complete(&wq->done);
+
+ while (!kthread_should_stop()) {
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
+ bool fork_worker[2] = { false, false };
+
+ spin_lock_irq(&wqe->lock);
+ if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
+ fork_worker[IO_WQ_ACCT_BOUND] = true;
+ if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
+ fork_worker[IO_WQ_ACCT_UNBOUND] = true;
+ spin_unlock_irq(&wqe->lock);
+ if (fork_worker[IO_WQ_ACCT_BOUND])
+ create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
+ if (fork_worker[IO_WQ_ACCT_UNBOUND])
+ create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+
+ return 0;
+err:
+ set_bit(IO_WQ_BIT_ERROR, &wq->state);
+ set_bit(IO_WQ_BIT_EXIT, &wq->state);
+ if (refcount_sub_and_test(workers_to_create, &wq->refs))
+ complete(&wq->done);
+ return 0;
+}
+
+static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
+ struct io_wq_work *work)
+{
+ bool free_worker;
+
+ if (!(work->flags & IO_WQ_WORK_UNBOUND))
+ return true;
+ if (atomic_read(&acct->nr_running))
+ return true;
+
+ rcu_read_lock();
+ free_worker = !hlist_nulls_empty(&wqe->free_list);
+ rcu_read_unlock();
+ if (free_worker)
+ return true;
+
+ if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
+ !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
+ return false;
+
+ return true;
+}
+
+static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
+{
+ struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
+ unsigned long flags;
+
+ /*
+ * Do early check to see if we need a new unbound worker, and if we do,
+ * if we're allowed to do so. This isn't 100% accurate as there's a
+ * gap between this check and incrementing the value, but that's OK.
+ * It's close enough to not be an issue, fork() has the same delay.
+ */
+ if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ work->func(&work);
+ return;
+ }
+
+ spin_lock_irqsave(&wqe->lock, flags);
+ wq_list_add_tail(&work->list, &wqe->work_list);
+ wqe->flags &= ~IO_WQE_FLAG_STALLED;
+ spin_unlock_irqrestore(&wqe->lock, flags);
+
+ if (!atomic_read(&acct->nr_running))
+ io_wqe_wake_worker(wqe, acct);
+}
+
+void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
+{
+ struct io_wqe *wqe = wq->wqes[numa_node_id()];
+
+ io_wqe_enqueue(wqe, work);
+}
+
+/*
+ * Enqueue work, hashed by some key. Work items that hash to the same value
+ * will not be done in parallel. Used to limit concurrent writes, generally
+ * hashed by inode.
+ */
+void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val)
+{
+ struct io_wqe *wqe = wq->wqes[numa_node_id()];
+ unsigned bit;
+
+
+ bit = hash_ptr(val, IO_WQ_HASH_ORDER);
+ work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
+ io_wqe_enqueue(wqe, work);
+}
+
+static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
+{
+ send_sig(SIGINT, worker->task, 1);
+ return false;
+}
+
+/*
+ * Iterate the passed in list and call the specific function for each
+ * worker that isn't exiting
+ */
+static bool io_wq_for_each_worker(struct io_wqe *wqe,
+ bool (*func)(struct io_worker *, void *),
+ void *data)
+{
+ struct io_worker *worker;
+ bool ret = false;
+
+ list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
+ if (io_worker_get(worker)) {
+ ret = func(worker, data);
+ io_worker_release(worker);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+void io_wq_cancel_all(struct io_wq *wq)
+{
+ int node;
+
+ set_bit(IO_WQ_BIT_CANCEL, &wq->state);
+
+ /*
+ * Browse both lists, as there's a gap between handing work off
+ * to a worker and the worker putting itself on the busy_list
+ */
+ rcu_read_lock();
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
+
+ io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
+ }
+ rcu_read_unlock();
+}
+
+struct io_cb_cancel_data {
+ struct io_wqe *wqe;
+ work_cancel_fn *cancel;
+ void *caller_data;
+};
+
+static bool io_work_cancel(struct io_worker *worker, void *cancel_data)
+{
+ struct io_cb_cancel_data *data = cancel_data;
+ unsigned long flags;
+ bool ret = false;
+
+ /*
+ * Hold the lock to avoid ->cur_work going out of scope, caller
+ * may dereference the passed in work.
+ */
+ spin_lock_irqsave(&worker->lock, flags);
+ if (worker->cur_work &&
+ data->cancel(worker->cur_work, data->caller_data)) {
+ send_sig(SIGINT, worker->task, 1);
+ ret = true;
+ }
+ spin_unlock_irqrestore(&worker->lock, flags);
+
+ return ret;
+}
+
+static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
+ work_cancel_fn *cancel,
+ void *cancel_data)
+{
+ struct io_cb_cancel_data data = {
+ .wqe = wqe,
+ .cancel = cancel,
+ .caller_data = cancel_data,
+ };
+ struct io_wq_work_node *node, *prev;
+ struct io_wq_work *work;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&wqe->lock, flags);
+ wq_list_for_each(node, prev, &wqe->work_list) {
+ work = container_of(node, struct io_wq_work, list);
+
+ if (cancel(work, cancel_data)) {
+ wq_node_del(&wqe->work_list, node, prev);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&wqe->lock, flags);
+
+ if (found) {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ work->func(&work);
+ return IO_WQ_CANCEL_OK;
+ }
+
+ rcu_read_lock();
+ found = io_wq_for_each_worker(wqe, io_work_cancel, &data);
+ rcu_read_unlock();
+ return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
+}
+
+enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
+ void *data)
+{
+ enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
+ int node;
+
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
+
+ ret = io_wqe_cancel_cb_work(wqe, cancel, data);
+ if (ret != IO_WQ_CANCEL_NOTFOUND)
+ break;
+ }
+
+ return ret;
+}
+
+static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
+{
+ struct io_wq_work *work = data;
+ unsigned long flags;
+ bool ret = false;
+
+ if (worker->cur_work != work)
+ return false;
+
+ spin_lock_irqsave(&worker->lock, flags);
+ if (worker->cur_work == work) {
+ send_sig(SIGINT, worker->task, 1);
+ ret = true;
+ }
+ spin_unlock_irqrestore(&worker->lock, flags);
+
+ return ret;
+}
+
+static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
+ struct io_wq_work *cwork)
+{
+ struct io_wq_work_node *node, *prev;
+ struct io_wq_work *work;
+ unsigned long flags;
+ bool found = false;
+
+ cwork->flags |= IO_WQ_WORK_CANCEL;
+
+ /*
+ * First check pending list, if we're lucky we can just remove it
+ * from there. CANCEL_OK means that the work is returned as-new,
+ * no completion will be posted for it.
+ */
+ spin_lock_irqsave(&wqe->lock, flags);
+ wq_list_for_each(node, prev, &wqe->work_list) {
+ work = container_of(node, struct io_wq_work, list);
+
+ if (work == cwork) {
+ wq_node_del(&wqe->work_list, node, prev);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&wqe->lock, flags);
+
+ if (found) {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ work->func(&work);
+ return IO_WQ_CANCEL_OK;
+ }
+
+ /*
+ * Now check if a free (going busy) or busy worker has the work
+ * currently running. If we find it there, we'll return CANCEL_RUNNING
+ * as an indication that we attempte to signal cancellation. The
+ * completion will run normally in this case.
+ */
+ rcu_read_lock();
+ found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, cwork);
+ rcu_read_unlock();
+ return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
+}
+
+enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
+{
+ enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
+ int node;
+
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
+
+ ret = io_wqe_cancel_work(wqe, cwork);
+ if (ret != IO_WQ_CANCEL_NOTFOUND)
+ break;
+ }
+
+ return ret;
+}
+
+struct io_wq_flush_data {
+ struct io_wq_work work;
+ struct completion done;
+};
+
+static void io_wq_flush_func(struct io_wq_work **workptr)
+{
+ struct io_wq_work *work = *workptr;
+ struct io_wq_flush_data *data;
+
+ data = container_of(work, struct io_wq_flush_data, work);
+ complete(&data->done);
+}
+
+/*
+ * Doesn't wait for previously queued work to finish. When this completes,
+ * it just means that previously queued work was started.
+ */
+void io_wq_flush(struct io_wq *wq)
+{
+ struct io_wq_flush_data data;
+ int node;
+
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
+
+ init_completion(&data.done);
+ INIT_IO_WORK(&data.work, io_wq_flush_func);
+ data.work.flags |= IO_WQ_WORK_INTERNAL;
+ io_wqe_enqueue(wqe, &data.work);
+ wait_for_completion(&data.done);
+ }
+}
+
+struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
+{
+ int ret = -ENOMEM, node;
+ struct io_wq *wq;
+
+ wq = kzalloc(sizeof(*wq), GFP_KERNEL);
+ if (!wq)
+ return ERR_PTR(-ENOMEM);
+
+ wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
+ if (!wq->wqes) {
+ kfree(wq);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ wq->get_work = data->get_work;
+ wq->put_work = data->put_work;
+
+ /* caller must already hold a reference to this */
+ wq->user = data->user;
+ wq->creds = data->creds;
+
+ for_each_node(node) {
+ struct io_wqe *wqe;
+
+ wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, node);
+ if (!wqe)
+ goto err;
+ wq->wqes[node] = wqe;
+ wqe->node = node;
+ wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
+ atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
+ if (wq->user) {
+ wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
+ task_rlimit(current, RLIMIT_NPROC);
+ }
+ atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
+ wqe->node = node;
+ wqe->wq = wq;
+ spin_lock_init(&wqe->lock);
+ INIT_WQ_LIST(&wqe->work_list);
+ INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
+ INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
+ INIT_LIST_HEAD(&wqe->all_list);
+ }
+
+ init_completion(&wq->done);
+
+ /* caller must have already done mmgrab() on this mm */
+ wq->mm = data->mm;
+
+ wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
+ if (!IS_ERR(wq->manager)) {
+ wake_up_process(wq->manager);
+ wait_for_completion(&wq->done);
+ if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ reinit_completion(&wq->done);
+ return wq;
+ }
+
+ ret = PTR_ERR(wq->manager);
+ complete(&wq->done);
+err:
+ for_each_node(node)
+ kfree(wq->wqes[node]);
+ kfree(wq->wqes);
+ kfree(wq);
+ return ERR_PTR(ret);
+}
+
+static bool io_wq_worker_wake(struct io_worker *worker, void *data)
+{
+ wake_up_process(worker->task);
+ return false;
+}
+
+void io_wq_destroy(struct io_wq *wq)
+{
+ int node;
+
+ set_bit(IO_WQ_BIT_EXIT, &wq->state);
+ if (wq->manager)
+ kthread_stop(wq->manager);
+
+ rcu_read_lock();
+ for_each_node(node)
+ io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
+ rcu_read_unlock();
+
+ wait_for_completion(&wq->done);
+
+ for_each_node(node)
+ kfree(wq->wqes[node]);
+ kfree(wq->wqes);
+ kfree(wq);
+}
diff --git a/fs/io-wq.h b/fs/io-wq.h
new file mode 100644
index 000000000000..600e0158cba7
--- /dev/null
+++ b/fs/io-wq.h
@@ -0,0 +1,127 @@
+#ifndef INTERNAL_IO_WQ_H
+#define INTERNAL_IO_WQ_H
+
+struct io_wq;
+
+enum {
+ IO_WQ_WORK_CANCEL = 1,
+ IO_WQ_WORK_HAS_MM = 2,
+ IO_WQ_WORK_HASHED = 4,
+ IO_WQ_WORK_NEEDS_USER = 8,
+ IO_WQ_WORK_NEEDS_FILES = 16,
+ IO_WQ_WORK_UNBOUND = 32,
+ IO_WQ_WORK_INTERNAL = 64,
+ IO_WQ_WORK_CB = 128,
+
+ IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
+};
+
+enum io_wq_cancel {
+ IO_WQ_CANCEL_OK, /* cancelled before started */
+ IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
+ IO_WQ_CANCEL_NOTFOUND, /* work not found */
+};
+
+struct io_wq_work_node {
+ struct io_wq_work_node *next;
+};
+
+struct io_wq_work_list {
+ struct io_wq_work_node *first;
+ struct io_wq_work_node *last;
+};
+
+static inline void wq_list_add_tail(struct io_wq_work_node *node,
+ struct io_wq_work_list *list)
+{
+ if (!list->first) {
+ list->first = list->last = node;
+ } else {
+ list->last->next = node;
+ list->last = node;
+ }
+}
+
+static inline void wq_node_del(struct io_wq_work_list *list,
+ struct io_wq_work_node *node,
+ struct io_wq_work_node *prev)
+{
+ if (node == list->first)
+ list->first = node->next;
+ if (node == list->last)
+ list->last = prev;
+ if (prev)
+ prev->next = node->next;
+}
+
+#define wq_list_for_each(pos, prv, head) \
+ for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
+
+#define wq_list_empty(list) ((list)->first == NULL)
+#define INIT_WQ_LIST(list) do { \
+ (list)->first = NULL; \
+ (list)->last = NULL; \
+} while (0)
+
+struct io_wq_work {
+ union {
+ struct io_wq_work_node list;
+ void *data;
+ };
+ void (*func)(struct io_wq_work **);
+ struct files_struct *files;
+ unsigned flags;
+};
+
+#define INIT_IO_WORK(work, _func) \
+ do { \
+ (work)->list.next = NULL; \
+ (work)->func = _func; \
+ (work)->flags = 0; \
+ (work)->files = NULL; \
+ } while (0) \
+
+typedef void (get_work_fn)(struct io_wq_work *);
+typedef void (put_work_fn)(struct io_wq_work *);
+
+struct io_wq_data {
+ struct mm_struct *mm;
+ struct user_struct *user;
+ struct cred *creds;
+
+ get_work_fn *get_work;
+ put_work_fn *put_work;
+};
+
+struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
+void io_wq_destroy(struct io_wq *wq);
+
+void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
+void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
+void io_wq_flush(struct io_wq *wq);
+
+void io_wq_cancel_all(struct io_wq *wq);
+enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
+
+typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
+
+enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
+ void *data);
+
+#if defined(CONFIG_IO_WQ)
+extern void io_wq_worker_sleeping(struct task_struct *);
+extern void io_wq_worker_running(struct task_struct *);
+#else
+static inline void io_wq_worker_sleeping(struct task_struct *tsk)
+{
+}
+static inline void io_wq_worker_running(struct task_struct *tsk)
+{
+}
+#endif
+
+static inline bool io_wq_current_is_worker(void)
+{
+ return in_task() && (current->flags & PF_IO_WORKER);
+}
+#endif
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e2a66e12fbc6..ec53aa7cdc94 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -56,7 +56,6 @@
#include <linux/mmu_context.h>
#include <linux/percpu.h>
#include <linux/slab.h>
-#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/blkdev.h>
#include <linux/bvec.h>
@@ -70,13 +69,26 @@
#include <linux/nospec.h>
#include <linux/sizes.h>
#include <linux/hugetlb.h>
+#include <linux/highmem.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/io_uring.h>
#include <uapi/linux/io_uring.h>
#include "internal.h"
+#include "io-wq.h"
-#define IORING_MAX_ENTRIES 4096
-#define IORING_MAX_FIXED_FILES 1024
+#define IORING_MAX_ENTRIES 32768
+#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
+
+/*
+ * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
+ */
+#define IORING_FILE_TABLE_SHIFT 9
+#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
+#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
+#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
struct io_uring {
u32 head ____cacheline_aligned_in_smp;
@@ -84,27 +96,29 @@ struct io_uring {
};
/*
- * This data is shared with the application through the mmap at offset
- * IORING_OFF_SQ_RING.
+ * This data is shared with the application through the mmap at offsets
+ * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
*
* The offsets to the member fields are published through struct
* io_sqring_offsets when calling io_uring_setup.
*/
-struct io_sq_ring {
+struct io_rings {
/*
* Head and tail offsets into the ring; the offsets need to be
* masked to get valid indices.
*
- * The kernel controls head and the application controls tail.
+ * The kernel controls head of the sq ring and the tail of the cq ring,
+ * and the application controls tail of the sq ring and the head of the
+ * cq ring.
*/
- struct io_uring r;
+ struct io_uring sq, cq;
/*
- * Bitmask to apply to head and tail offsets (constant, equals
+ * Bitmasks to apply to head and tail offsets (constant, equals
* ring_entries - 1)
*/
- u32 ring_mask;
- /* Ring size (constant, power of 2) */
- u32 ring_entries;
+ u32 sq_ring_mask, cq_ring_mask;
+ /* Ring sizes (constant, power of 2) */
+ u32 sq_ring_entries, cq_ring_entries;
/*
* Number of invalid entries dropped by the kernel due to
* invalid index stored in array
@@ -117,7 +131,7 @@ struct io_sq_ring {
* counter includes all submissions that were dropped reaching
* the new SQ head (and possibly more).
*/
- u32 dropped;
+ u32 sq_dropped;
/*
* Runtime flags
*
@@ -127,43 +141,7 @@ struct io_sq_ring {
* The application needs a full memory barrier before checking
* for IORING_SQ_NEED_WAKEUP after updating the sq tail.
*/
- u32 flags;
- /*
- * Ring buffer of indices into array of io_uring_sqe, which is
- * mmapped by the application using the IORING_OFF_SQES offset.
- *
- * This indirection could e.g. be used to assign fixed
- * io_uring_sqe entries to operations and only submit them to
- * the queue when needed.
- *
- * The kernel modifies neither the indices array nor the entries
- * array.
- */
- u32 array[];
-};
-
-/*
- * This data is shared with the application through the mmap at offset
- * IORING_OFF_CQ_RING.
- *
- * The offsets to the member fields are published through struct
- * io_cqring_offsets when calling io_uring_setup.
- */
-struct io_cq_ring {
- /*
- * Head and tail offsets into the ring; the offsets need to be
- * masked to get valid indices.
- *
- * The application controls head and the kernel tail.
- */
- struct io_uring r;
- /*
- * Bitmask to apply to head and tail offsets (constant, equals
- * ring_entries - 1)
- */
- u32 ring_mask;
- /* Ring size (constant, power of 2) */
- u32 ring_entries;
+ u32 sq_flags;
/*
* Number of completion events lost because the queue was full;
* this should be avoided by the application by making sure
@@ -177,7 +155,7 @@ struct io_cq_ring {
* As completion events come in out of order this counter is not
* ordered with any other data.
*/
- u32 overflow;
+ u32 cq_overflow;
/*
* Ring buffer of completion events.
*
@@ -185,7 +163,7 @@ struct io_cq_ring {
* produced, so the application is allowed to modify pending
* entries.
*/
- struct io_uring_cqe cqes[];
+ struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
};
struct io_mapped_ubuf {
@@ -195,14 +173,8 @@ struct io_mapped_ubuf {
unsigned int nr_bvecs;
};
-struct async_list {
- spinlock_t lock;
- atomic_t cnt;
- struct list_head list;
-
- struct file *file;
- off_t io_end;
- size_t io_pages;
+struct fixed_file_table {
+ struct file **files;
};
struct io_ring_ctx {
@@ -214,42 +186,50 @@ struct io_ring_ctx {
unsigned int flags;
bool compat;
bool account_mem;
+ bool cq_overflow_flushed;
+ bool drain_next;
- /* SQ ring */
- struct io_sq_ring *sq_ring;
+ /*
+ * Ring buffer of indices into array of io_uring_sqe, which is
+ * mmapped by the application using the IORING_OFF_SQES offset.
+ *
+ * This indirection could e.g. be used to assign fixed
+ * io_uring_sqe entries to operations and only submit them to
+ * the queue when needed.
+ *
+ * The kernel modifies neither the indices array nor the entries
+ * array.
+ */
+ u32 *sq_array;
unsigned cached_sq_head;
unsigned sq_entries;
unsigned sq_mask;
unsigned sq_thread_idle;
+ unsigned cached_sq_dropped;
+ atomic_t cached_cq_overflow;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
+ struct list_head timeout_list;
+ struct list_head cq_overflow_list;
+
+ wait_queue_head_t inflight_wait;
} ____cacheline_aligned_in_smp;
+ struct io_rings *rings;
+
/* IO offload */
- struct workqueue_struct *sqo_wq;
+ struct io_wq *io_wq;
struct task_struct *sqo_thread; /* if using sq thread polling */
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
- struct completion sqo_thread_started;
-
- struct {
- /* CQ ring */
- struct io_cq_ring *cq_ring;
- unsigned cached_cq_tail;
- unsigned cq_entries;
- unsigned cq_mask;
- struct wait_queue_head cq_wait;
- struct fasync_struct *cq_fasync;
- struct eventfd_ctx *cq_ev_fd;
- } ____cacheline_aligned_in_smp;
/*
* If used, fixed file set. Writers must ensure that ->refs is dead,
* readers must ensure that ->refs is alive as long as the file* is
* used. Only updated through io_uring_register(2).
*/
- struct file **user_files;
+ struct fixed_file_table *file_table;
unsigned nr_user_files;
/* if used, fixed mapped user buffers */
@@ -258,7 +238,27 @@ struct io_ring_ctx {
struct user_struct *user;
- struct completion ctx_done;
+ struct cred *creds;
+
+ /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */
+ struct completion *completions;
+
+ /* if all else fails... */
+ struct io_kiocb *fallback_req;
+
+#if defined(CONFIG_UNIX)
+ struct socket *ring_sock;
+#endif
+
+ struct {
+ unsigned cached_cq_tail;
+ unsigned cq_entries;
+ unsigned cq_mask;
+ atomic_t cq_timeouts;
+ struct wait_queue_head cq_wait;
+ struct fasync_struct *cq_fasync;
+ struct eventfd_ctx *cq_ev_fd;
+ } ____cacheline_aligned_in_smp;
struct {
struct mutex uring_lock;
@@ -275,22 +275,11 @@ struct io_ring_ctx {
* manipulate the list, hence no extra locking is needed there.
*/
struct list_head poll_list;
- struct list_head cancel_list;
- } ____cacheline_aligned_in_smp;
-
- struct async_list pending_async[2];
+ struct rb_root cancel_tree;
-#if defined(CONFIG_UNIX)
- struct socket *ring_sock;
-#endif
-};
-
-struct sqe_submit {
- const struct io_uring_sqe *sqe;
- unsigned short index;
- bool has_user;
- bool needs_lock;
- bool needs_fixed_file;
+ spinlock_t inflight_lock;
+ struct list_head inflight_list;
+ } ____cacheline_aligned_in_smp;
};
/*
@@ -303,7 +292,20 @@ struct io_poll_iocb {
__poll_t events;
bool done;
bool canceled;
- struct wait_queue_entry wait;
+ struct wait_queue_entry *wait;
+};
+
+struct io_timeout_data {
+ struct io_kiocb *req;
+ struct hrtimer timer;
+ struct timespec64 ts;
+ enum hrtimer_mode mode;
+ u32 seq_offset;
+};
+
+struct io_timeout {
+ struct file *file;
+ struct io_timeout_data *data;
};
/*
@@ -317,28 +319,48 @@ struct io_kiocb {
struct file *file;
struct kiocb rw;
struct io_poll_iocb poll;
+ struct io_timeout timeout;
};
- struct sqe_submit submit;
+ const struct io_uring_sqe *sqe;
+ struct file *ring_file;
+ int ring_fd;
+ bool has_user;
+ bool in_async;
+ bool needs_fixed_file;
struct io_ring_ctx *ctx;
- struct list_head list;
+ union {
+ struct list_head list;
+ struct rb_node rb_node;
+ };
struct list_head link_list;
unsigned int flags;
refcount_t refs;
#define REQ_F_NOWAIT 1 /* must not punt to workers */
#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
#define REQ_F_FIXED_FILE 4 /* ctx owns file */
-#define REQ_F_SEQ_PREV 8 /* sequential with previous */
+#define REQ_F_LINK_NEXT 8 /* already grabbed next link */
#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
#define REQ_F_IO_DRAINED 32 /* drain done */
#define REQ_F_LINK 64 /* linked sqes */
-#define REQ_F_FAIL_LINK 128 /* fail rest of links */
+#define REQ_F_LINK_TIMEOUT 128 /* has linked timeout */
+#define REQ_F_FAIL_LINK 256 /* fail rest of links */
+#define REQ_F_DRAIN_LINK 512 /* link should be fully drained */
+#define REQ_F_TIMEOUT 1024 /* timeout request */
+#define REQ_F_ISREG 2048 /* regular file */
+#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
+#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
+#define REQ_F_INFLIGHT 16384 /* on inflight list */
+#define REQ_F_COMP_LOCKED 32768 /* completion under lock */
+#define REQ_F_FREE_SQE 65536 /* free sqe if not async queued */
u64 user_data;
u32 result;
u32 sequence;
- struct work_struct work;
+ struct list_head inflight_entry;
+
+ struct io_wq_work work;
};
#define IO_PLUG_THRESHOLD 2
@@ -364,7 +386,14 @@ struct io_submit_state {
unsigned int ios_left;
};
-static void io_sq_wq_submit_work(struct work_struct *work);
+static void io_wq_submit_work(struct io_wq_work **workptr);
+static void io_cqring_fill_event(struct io_kiocb *req, long res);
+static void __io_free_req(struct io_kiocb *req);
+static void io_put_req(struct io_kiocb *req);
+static void io_double_put_req(struct io_kiocb *req);
+static void __io_double_put_req(struct io_kiocb *req);
+static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
+static void io_queue_linked_timeout(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
@@ -387,60 +416,75 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref)
{
struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
- complete(&ctx->ctx_done);
+ complete(&ctx->completions[0]);
}
static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
{
struct io_ring_ctx *ctx;
- int i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return NULL;
+ ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
+ if (!ctx->fallback_req)
+ goto err;
+
+ ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL);
+ if (!ctx->completions)
+ goto err;
+
if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
- PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
- kfree(ctx);
- return NULL;
- }
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
+ goto err;
ctx->flags = p->flags;
init_waitqueue_head(&ctx->cq_wait);
- init_completion(&ctx->ctx_done);
- init_completion(&ctx->sqo_thread_started);
+ INIT_LIST_HEAD(&ctx->cq_overflow_list);
+ init_completion(&ctx->completions[0]);
+ init_completion(&ctx->completions[1]);
mutex_init(&ctx->uring_lock);
init_waitqueue_head(&ctx->wait);
- for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) {
- spin_lock_init(&ctx->pending_async[i].lock);
- INIT_LIST_HEAD(&ctx->pending_async[i].list);
- atomic_set(&ctx->pending_async[i].cnt, 0);
- }
spin_lock_init(&ctx->completion_lock);
INIT_LIST_HEAD(&ctx->poll_list);
- INIT_LIST_HEAD(&ctx->cancel_list);
+ ctx->cancel_tree = RB_ROOT;
INIT_LIST_HEAD(&ctx->defer_list);
+ INIT_LIST_HEAD(&ctx->timeout_list);
+ init_waitqueue_head(&ctx->inflight_wait);
+ spin_lock_init(&ctx->inflight_lock);
+ INIT_LIST_HEAD(&ctx->inflight_list);
return ctx;
+err:
+ if (ctx->fallback_req)
+ kmem_cache_free(req_cachep, ctx->fallback_req);
+ kfree(ctx->completions);
+ kfree(ctx);
+ return NULL;
}
-static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static inline bool __req_need_defer(struct io_kiocb *req)
{
- if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
- return false;
+ struct io_ring_ctx *ctx = req->ctx;
+
+ return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
+ + atomic_read(&ctx->cached_cq_overflow);
+}
- return req->sequence > ctx->cached_cq_tail + ctx->sq_ring->dropped;
+static inline bool req_need_defer(struct io_kiocb *req)
+{
+ if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) == REQ_F_IO_DRAIN)
+ return __req_need_defer(req);
+
+ return false;
}
static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
{
struct io_kiocb *req;
- if (list_empty(&ctx->defer_list))
- return NULL;
-
- req = list_first_entry(&ctx->defer_list, struct io_kiocb, list);
- if (!io_sequence_defer(ctx, req)) {
+ req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
+ if (req && !req_need_defer(req)) {
list_del_init(&req->list);
return req;
}
@@ -448,13 +492,30 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
return NULL;
}
+static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
+{
+ struct io_kiocb *req;
+
+ req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
+ if (req) {
+ if (req->flags & REQ_F_TIMEOUT_NOSEQ)
+ return NULL;
+ if (!__req_need_defer(req)) {
+ list_del_init(&req->list);
+ return req;
+ }
+ }
+
+ return NULL;
+}
+
static void __io_commit_cqring(struct io_ring_ctx *ctx)
{
- struct io_cq_ring *ring = ctx->cq_ring;
+ struct io_rings *rings = ctx->rings;
- if (ctx->cached_cq_tail != READ_ONCE(ring->r.tail)) {
+ if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
/* order cqe stores with ring update */
- smp_store_release(&ring->r.tail, ctx->cached_cq_tail);
+ smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
if (wq_has_sleeper(&ctx->cq_wait)) {
wake_up_interruptible(&ctx->cq_wait);
@@ -463,21 +524,111 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
}
}
+static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
+{
+ u8 opcode = READ_ONCE(sqe->opcode);
+
+ return !(opcode == IORING_OP_READ_FIXED ||
+ opcode == IORING_OP_WRITE_FIXED);
+}
+
+static inline bool io_prep_async_work(struct io_kiocb *req,
+ struct io_kiocb **link)
+{
+ bool do_hashed = false;
+
+ if (req->sqe) {
+ switch (req->sqe->opcode) {
+ case IORING_OP_WRITEV:
+ case IORING_OP_WRITE_FIXED:
+ do_hashed = true;
+ /* fall-through */
+ case IORING_OP_READV:
+ case IORING_OP_READ_FIXED:
+ case IORING_OP_SENDMSG:
+ case IORING_OP_RECVMSG:
+ case IORING_OP_ACCEPT:
+ case IORING_OP_POLL_ADD:
+ case IORING_OP_CONNECT:
+ /*
+ * We know REQ_F_ISREG is not set on some of these
+ * opcodes, but this enables us to keep the check in
+ * just one place.
+ */
+ if (!(req->flags & REQ_F_ISREG))
+ req->work.flags |= IO_WQ_WORK_UNBOUND;
+ break;
+ }
+ if (io_sqe_needs_user(req->sqe))
+ req->work.flags |= IO_WQ_WORK_NEEDS_USER;
+ }
+
+ *link = io_prep_linked_timeout(req);
+ return do_hashed;
+}
+
+static inline void io_queue_async_work(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *link;
+ bool do_hashed;
+
+ do_hashed = io_prep_async_work(req, &link);
+
+ trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
+ req->flags);
+ if (!do_hashed) {
+ io_wq_enqueue(ctx->io_wq, &req->work);
+ } else {
+ io_wq_enqueue_hashed(ctx->io_wq, &req->work,
+ file_inode(req->file));
+ }
+
+ if (link)
+ io_queue_linked_timeout(link);
+}
+
+static void io_kill_timeout(struct io_kiocb *req)
+{
+ int ret;
+
+ ret = hrtimer_try_to_cancel(&req->timeout.data->timer);
+ if (ret != -1) {
+ atomic_inc(&req->ctx->cq_timeouts);
+ list_del_init(&req->list);
+ io_cqring_fill_event(req, 0);
+ io_put_req(req);
+ }
+}
+
+static void io_kill_timeouts(struct io_ring_ctx *ctx)
+{
+ struct io_kiocb *req, *tmp;
+
+ spin_lock_irq(&ctx->completion_lock);
+ list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
+ io_kill_timeout(req);
+ spin_unlock_irq(&ctx->completion_lock);
+}
+
static void io_commit_cqring(struct io_ring_ctx *ctx)
{
struct io_kiocb *req;
+ while ((req = io_get_timeout_req(ctx)) != NULL)
+ io_kill_timeout(req);
+
__io_commit_cqring(ctx);
while ((req = io_get_deferred_req(ctx)) != NULL) {
req->flags |= REQ_F_IO_DRAINED;
- queue_work(ctx->sqo_wq, &req->work);
+ io_queue_async_work(req);
}
}
static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
{
- struct io_cq_ring *ring = ctx->cq_ring;
+ struct io_rings *rings = ctx->rings;
unsigned tail;
tail = ctx->cached_cq_tail;
@@ -486,17 +637,84 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
* control dependency is enough as we're using WRITE_ONCE to
* fill the cq entry
*/
- if (tail - READ_ONCE(ring->r.head) == ring->ring_entries)
+ if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
return NULL;
ctx->cached_cq_tail++;
- return &ring->cqes[tail & ctx->cq_mask];
+ return &rings->cqes[tail & ctx->cq_mask];
+}
+
+static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+{
+ if (waitqueue_active(&ctx->wait))
+ wake_up(&ctx->wait);
+ if (waitqueue_active(&ctx->sqo_wait))
+ wake_up(&ctx->sqo_wait);
+ if (ctx->cq_ev_fd)
+ eventfd_signal(ctx->cq_ev_fd, 1);
}
-static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
- long res)
+/* Returns true if there are no backlogged entries after the flush */
+static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{
+ struct io_rings *rings = ctx->rings;
struct io_uring_cqe *cqe;
+ struct io_kiocb *req;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ if (!force) {
+ if (list_empty_careful(&ctx->cq_overflow_list))
+ return true;
+ if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
+ rings->cq_ring_entries))
+ return false;
+ }
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+
+ /* if force is set, the ring is going away. always drop after that */
+ if (force)
+ ctx->cq_overflow_flushed = true;
+
+ cqe = NULL;
+ while (!list_empty(&ctx->cq_overflow_list)) {
+ cqe = io_get_cqring(ctx);
+ if (!cqe && !force)
+ break;
+
+ req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
+ list);
+ list_move(&req->list, &list);
+ if (cqe) {
+ WRITE_ONCE(cqe->user_data, req->user_data);
+ WRITE_ONCE(cqe->res, req->result);
+ WRITE_ONCE(cqe->flags, 0);
+ } else {
+ WRITE_ONCE(ctx->rings->cq_overflow,
+ atomic_inc_return(&ctx->cached_cq_overflow));
+ }
+ }
+
+ io_commit_cqring(ctx);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ io_cqring_ev_posted(ctx);
+
+ while (!list_empty(&list)) {
+ req = list_first_entry(&list, struct io_kiocb, list);
+ list_del(&req->list);
+ io_put_req(req);
+ }
+
+ return cqe != NULL;
+}
+
+static void io_cqring_fill_event(struct io_kiocb *req, long res)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_uring_cqe *cqe;
+
+ trace_io_uring_complete(ctx, req->user_data, res);
/*
* If we can't get a cq entry, userspace overflowed the
@@ -504,46 +722,48 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
* the ring.
*/
cqe = io_get_cqring(ctx);
- if (cqe) {
- WRITE_ONCE(cqe->user_data, ki_user_data);
+ if (likely(cqe)) {
+ WRITE_ONCE(cqe->user_data, req->user_data);
WRITE_ONCE(cqe->res, res);
WRITE_ONCE(cqe->flags, 0);
+ } else if (ctx->cq_overflow_flushed) {
+ WRITE_ONCE(ctx->rings->cq_overflow,
+ atomic_inc_return(&ctx->cached_cq_overflow));
} else {
- unsigned overflow = READ_ONCE(ctx->cq_ring->overflow);
-
- WRITE_ONCE(ctx->cq_ring->overflow, overflow + 1);
+ refcount_inc(&req->refs);
+ req->result = res;
+ list_add_tail(&req->list, &ctx->cq_overflow_list);
}
}
-static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
-{
- if (waitqueue_active(&ctx->wait))
- wake_up(&ctx->wait);
- if (waitqueue_active(&ctx->sqo_wait))
- wake_up(&ctx->sqo_wait);
- if (ctx->cq_ev_fd)
- eventfd_signal(ctx->cq_ev_fd, 1);
-}
-
-static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
- long res)
+static void io_cqring_add_event(struct io_kiocb *req, long res)
{
+ struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags);
- io_cqring_fill_event(ctx, user_data, res);
+ io_cqring_fill_event(req, res);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
}
-static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
+static inline bool io_is_fallback_req(struct io_kiocb *req)
{
- percpu_ref_put_many(&ctx->refs, refs);
+ return req == (struct io_kiocb *)
+ ((unsigned long) req->ctx->fallback_req & ~1UL);
+}
- if (waitqueue_active(&ctx->wait))
- wake_up(&ctx->wait);
+static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
+{
+ struct io_kiocb *req;
+
+ req = ctx->fallback_req;
+ if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req))
+ return req;
+
+ return NULL;
}
static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
@@ -558,7 +778,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
if (!state) {
req = kmem_cache_alloc(req_cachep, gfp);
if (unlikely(!req))
- goto out;
+ goto fallback;
} else if (!state->free_reqs) {
size_t sz;
int ret;
@@ -573,7 +793,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
if (unlikely(ret <= 0)) {
state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
if (!state->reqs[0])
- goto out;
+ goto fallback;
ret = 1;
}
state->free_reqs = ret - 1;
@@ -585,15 +805,21 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
state->cur_req++;
}
+got_it:
+ req->ring_file = NULL;
req->file = NULL;
req->ctx = ctx;
req->flags = 0;
/* one is dropped after submission, the other at completion */
refcount_set(&req->refs, 2);
req->result = 0;
+ INIT_IO_WORK(&req->work, io_wq_submit_work);
return req;
-out:
- io_ring_drop_ctx_refs(ctx, 1);
+fallback:
+ req = io_get_fallback_req(ctx);
+ if (req)
+ goto got_it;
+ percpu_ref_put(&ctx->refs);
return NULL;
}
@@ -601,22 +827,63 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
{
if (*nr) {
kmem_cache_free_bulk(req_cachep, *nr, reqs);
- io_ring_drop_ctx_refs(ctx, *nr);
+ percpu_ref_put_many(&ctx->refs, *nr);
*nr = 0;
}
}
static void __io_free_req(struct io_kiocb *req)
{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (req->flags & REQ_F_FREE_SQE)
+ kfree(req->sqe);
if (req->file && !(req->flags & REQ_F_FIXED_FILE))
fput(req->file);
- io_ring_drop_ctx_refs(req->ctx, 1);
- kmem_cache_free(req_cachep, req);
+ if (req->flags & REQ_F_INFLIGHT) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->inflight_lock, flags);
+ list_del(&req->inflight_entry);
+ if (waitqueue_active(&ctx->inflight_wait))
+ wake_up(&ctx->inflight_wait);
+ spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+ }
+ if (req->flags & REQ_F_TIMEOUT)
+ kfree(req->timeout.data);
+ percpu_ref_put(&ctx->refs);
+ if (likely(!io_is_fallback_req(req)))
+ kmem_cache_free(req_cachep, req);
+ else
+ clear_bit_unlock(0, (unsigned long *) ctx->fallback_req);
+}
+
+static bool io_link_cancel_timeout(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret;
+
+ ret = hrtimer_try_to_cancel(&req->timeout.data->timer);
+ if (ret != -1) {
+ io_cqring_fill_event(req, -ECANCELED);
+ io_commit_cqring(ctx);
+ req->flags &= ~REQ_F_LINK;
+ io_put_req(req);
+ return true;
+ }
+
+ return false;
}
-static void io_req_link_next(struct io_kiocb *req)
+static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
{
+ struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *nxt;
+ bool wake_ev = false;
+
+ /* Already got next link */
+ if (req->flags & REQ_F_LINK_NEXT)
+ return;
/*
* The list should never be empty when we are called here. But could
@@ -624,17 +891,30 @@ static void io_req_link_next(struct io_kiocb *req)
* safe side.
*/
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
- if (nxt) {
- list_del(&nxt->list);
+ while (nxt) {
+ list_del_init(&nxt->list);
+
+ if ((req->flags & REQ_F_LINK_TIMEOUT) &&
+ (nxt->flags & REQ_F_TIMEOUT)) {
+ wake_ev |= io_link_cancel_timeout(nxt);
+ nxt = list_first_entry_or_null(&req->link_list,
+ struct io_kiocb, list);
+ req->flags &= ~REQ_F_LINK_TIMEOUT;
+ continue;
+ }
if (!list_empty(&req->link_list)) {
INIT_LIST_HEAD(&nxt->link_list);
list_splice(&req->link_list, &nxt->link_list);
nxt->flags |= REQ_F_LINK;
}
- INIT_WORK(&nxt->work, io_sq_wq_submit_work);
- queue_work(req->ctx->sqo_wq, &nxt->work);
+ *nxtptr = nxt;
+ break;
}
+
+ req->flags |= REQ_F_LINK_NEXT;
+ if (wake_ev)
+ io_cqring_ev_posted(ctx);
}
/*
@@ -642,33 +922,86 @@ static void io_req_link_next(struct io_kiocb *req)
*/
static void io_fail_links(struct io_kiocb *req)
{
+ struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *link;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
while (!list_empty(&req->link_list)) {
link = list_first_entry(&req->link_list, struct io_kiocb, list);
- list_del(&link->list);
+ list_del_init(&link->list);
+
+ trace_io_uring_fail_link(req, link);
- io_cqring_add_event(req->ctx, link->user_data, -ECANCELED);
- __io_free_req(link);
+ if ((req->flags & REQ_F_LINK_TIMEOUT) &&
+ link->sqe->opcode == IORING_OP_LINK_TIMEOUT) {
+ io_link_cancel_timeout(link);
+ } else {
+ io_cqring_fill_event(link, -ECANCELED);
+ __io_double_put_req(link);
+ }
+ req->flags &= ~REQ_F_LINK_TIMEOUT;
}
+
+ io_commit_cqring(ctx);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ io_cqring_ev_posted(ctx);
}
-static void io_free_req(struct io_kiocb *req)
+static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
{
+ if (likely(!(req->flags & REQ_F_LINK)))
+ return;
+
/*
* If LINK is set, we have dependent requests in this chain. If we
* didn't fail this request, queue the first one up, moving any other
* dependencies to the next request. In case of failure, fail the rest
* of the chain.
*/
- if (req->flags & REQ_F_LINK) {
- if (req->flags & REQ_F_FAIL_LINK)
- io_fail_links(req);
- else
- io_req_link_next(req);
+ if (req->flags & REQ_F_FAIL_LINK) {
+ io_fail_links(req);
+ } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
+ REQ_F_LINK_TIMEOUT) {
+ struct io_ring_ctx *ctx = req->ctx;
+ unsigned long flags;
+
+ /*
+ * If this is a timeout link, we could be racing with the
+ * timeout timer. Grab the completion lock for this case to
+ * protect against that.
+ */
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ io_req_link_next(req, nxt);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ } else {
+ io_req_link_next(req, nxt);
}
+}
+static void io_free_req(struct io_kiocb *req)
+{
+ struct io_kiocb *nxt = NULL;
+
+ io_req_find_next(req, &nxt);
__io_free_req(req);
+
+ if (nxt)
+ io_queue_async_work(nxt);
+}
+
+/*
+ * Drop reference to request, return next in chain (if there is one) if this
+ * was the last reference to this request.
+ */
+__attribute__((nonnull))
+static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
+{
+ io_req_find_next(req, nxtptr);
+
+ if (refcount_dec_and_test(&req->refs))
+ __io_free_req(req);
}
static void io_put_req(struct io_kiocb *req)
@@ -678,6 +1011,51 @@ static void io_put_req(struct io_kiocb *req)
}
/*
+ * Must only be used if we don't need to care about links, usually from
+ * within the completion handling itself.
+ */
+static void __io_double_put_req(struct io_kiocb *req)
+{
+ /* drop both submit and complete references */
+ if (refcount_sub_and_test(2, &req->refs))
+ __io_free_req(req);
+}
+
+static void io_double_put_req(struct io_kiocb *req)
+{
+ /* drop both submit and complete references */
+ if (refcount_sub_and_test(2, &req->refs))
+ io_free_req(req);
+}
+
+static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
+{
+ struct io_rings *rings = ctx->rings;
+
+ /*
+ * noflush == true is from the waitqueue handler, just ensure we wake
+ * up the task, and the next invocation will flush the entries. We
+ * cannot safely to it from here.
+ */
+ if (noflush && !list_empty(&ctx->cq_overflow_list))
+ return -1U;
+
+ io_cqring_overflow_flush(ctx, false);
+
+ /* See comment at the top of this file */
+ smp_rmb();
+ return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
+}
+
+static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
+{
+ struct io_rings *rings = ctx->rings;
+
+ /* make sure SQ entry isn't read before tail */
+ return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
+}
+
+/*
* Find and free completed poll iocbs
*/
static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
@@ -692,7 +1070,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
req = list_first_entry(done, struct io_kiocb, list);
list_del(&req->list);
- io_cqring_fill_event(ctx, req->user_data, req->result);
+ io_cqring_fill_event(req, req->result);
(*nr_events)++;
if (refcount_dec_and_test(&req->refs)) {
@@ -701,8 +1079,9 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
* completions for those, only batch free for fixed
* file and non-linked commands.
*/
- if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
- REQ_F_FIXED_FILE) {
+ if (((req->flags &
+ (REQ_F_FIXED_FILE|REQ_F_LINK|REQ_F_FREE_SQE)) ==
+ REQ_F_FIXED_FILE) && !io_is_fallback_req(req)) {
reqs[to_free++] = req;
if (to_free == ARRAY_SIZE(reqs))
io_free_req_many(ctx, reqs, &to_free);
@@ -769,7 +1148,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
long min)
{
- while (!list_empty(&ctx->poll_list)) {
+ while (!list_empty(&ctx->poll_list) && !need_resched()) {
int ret;
ret = io_do_iopoll(ctx, nr_events, min);
@@ -796,18 +1175,47 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
unsigned int nr_events = 0;
io_iopoll_getevents(ctx, &nr_events, 1);
+
+ /*
+ * Ensure we allow local-to-the-cpu processing to take place,
+ * in this case we need to ensure that we reap all events.
+ */
+ cond_resched();
}
mutex_unlock(&ctx->uring_lock);
}
-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
- long min)
+static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+ long min)
{
- int ret = 0;
+ int iters = 0, ret = 0;
do {
int tmin = 0;
+ /*
+ * Don't enter poll loop if we already have events pending.
+ * If we do, we can potentially be spinning for commands that
+ * already triggered a CQE (eg in error).
+ */
+ if (io_cqring_events(ctx, false))
+ break;
+
+ /*
+ * If a submit got punted to a workqueue, we can have the
+ * application entering polling for a command before it gets
+ * issued. That app will hold the uring_lock for the duration
+ * of the poll right here, so we need to take a breather every
+ * now and then to ensure that the issue has a chance to add
+ * the poll to the issued list. Otherwise we can spin here
+ * forever, while the workqueue is stuck trying to acquire the
+ * very same mutex.
+ */
+ if (!(++iters & 7)) {
+ mutex_unlock(&ctx->uring_lock);
+ mutex_lock(&ctx->uring_lock);
+ }
+
if (*nr_events < min)
tmin = min - *nr_events;
@@ -820,38 +1228,73 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
return ret;
}
-static void kiocb_end_write(struct kiocb *kiocb)
+static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+ long min)
{
- if (kiocb->ki_flags & IOCB_WRITE) {
- struct inode *inode = file_inode(kiocb->ki_filp);
+ int ret;
- /*
- * Tell lockdep we inherited freeze protection from submission
- * thread.
- */
- if (S_ISREG(inode->i_mode))
- __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
- file_end_write(kiocb->ki_filp);
+ /*
+ * We disallow the app entering submit/complete with polling, but we
+ * still need to lock the ring to prevent racing with polled issue
+ * that got punted to a workqueue.
+ */
+ mutex_lock(&ctx->uring_lock);
+ ret = __io_iopoll_check(ctx, nr_events, min);
+ mutex_unlock(&ctx->uring_lock);
+ return ret;
+}
+
+static void kiocb_end_write(struct io_kiocb *req)
+{
+ /*
+ * Tell lockdep we inherited freeze protection from submission
+ * thread.
+ */
+ if (req->flags & REQ_F_ISREG) {
+ struct inode *inode = file_inode(req->file);
+
+ __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
}
+ file_end_write(req->file);
}
-static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
+static void io_complete_rw_common(struct kiocb *kiocb, long res)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
- kiocb_end_write(kiocb);
+ if (kiocb->ki_flags & IOCB_WRITE)
+ kiocb_end_write(req);
if ((req->flags & REQ_F_LINK) && res != req->result)
req->flags |= REQ_F_FAIL_LINK;
- io_cqring_add_event(req->ctx, req->user_data, res);
+ io_cqring_add_event(req, res);
+}
+
+static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
+{
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+
+ io_complete_rw_common(kiocb, res);
io_put_req(req);
}
+static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
+{
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+ struct io_kiocb *nxt = NULL;
+
+ io_complete_rw_common(kiocb, res);
+ io_put_req_find_next(req, &nxt);
+
+ return nxt;
+}
+
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
- kiocb_end_write(kiocb);
+ if (kiocb->ki_flags & IOCB_WRITE)
+ kiocb_end_write(req);
if ((req->flags & REQ_F_LINK) && res != req->result)
req->flags |= REQ_F_FAIL_LINK;
@@ -953,10 +1396,9 @@ static bool io_file_supports_async(struct file *file)
return false;
}
-static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
- bool force_nonblock)
+static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
{
- const struct io_uring_sqe *sqe = s->sqe;
+ const struct io_uring_sqe *sqe = req->sqe;
struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw;
unsigned ioprio;
@@ -965,8 +1407,17 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
if (!req->file)
return -EBADF;
- if (force_nonblock && !io_file_supports_async(req->file))
- force_nonblock = false;
+ if (S_ISREG(file_inode(req->file)->i_mode))
+ req->flags |= REQ_F_ISREG;
+
+ /*
+ * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
+ * we know to async punt it even if it was opened O_NONBLOCK
+ */
+ if (force_nonblock && !io_file_supports_async(req->file)) {
+ req->flags |= REQ_F_MUST_PUNT;
+ return -EAGAIN;
+ }
kiocb->ki_pos = READ_ONCE(sqe->off);
kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
@@ -987,7 +1438,8 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
return ret;
/* don't allow async punt if RWF_NOWAIT was requested */
- if (kiocb->ki_flags & IOCB_NOWAIT)
+ if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+ (req->file->f_flags & O_NONBLOCK))
req->flags |= REQ_F_NOWAIT;
if (force_nonblock)
@@ -1000,6 +1452,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
kiocb->ki_flags |= IOCB_HIPRI;
kiocb->ki_complete = io_complete_rw_iopoll;
+ req->result = 0;
} else {
if (kiocb->ki_flags & IOCB_HIPRI)
return -EINVAL;
@@ -1029,9 +1482,18 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
}
}
-static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
- const struct io_uring_sqe *sqe,
- struct iov_iter *iter)
+static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
+ bool in_async)
+{
+ if (in_async && ret >= 0 && kiocb->ki_complete == io_complete_rw)
+ *nxt = __io_complete_rw(kiocb, ret);
+ else
+ io_rw_done(kiocb, ret);
+}
+
+static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw,
+ const struct io_uring_sqe *sqe,
+ struct iov_iter *iter)
{
size_t len = READ_ONCE(sqe->len);
struct io_mapped_ubuf *imu;
@@ -1064,16 +1526,49 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
*/
offset = buf_addr - imu->ubuf;
iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
- if (offset)
- iov_iter_advance(iter, offset);
- return 0;
+
+ if (offset) {
+ /*
+ * Don't use iov_iter_advance() here, as it's really slow for
+ * using the latter parts of a big fixed buffer - it iterates
+ * over each segment manually. We can cheat a bit here, because
+ * we know that:
+ *
+ * 1) it's a BVEC iter, we set it up
+ * 2) all bvecs are PAGE_SIZE in size, except potentially the
+ * first and last bvec
+ *
+ * So just find our index, and adjust the iterator afterwards.
+ * If the offset is within the first bvec (or the whole first
+ * bvec, just use iov_iter_advance(). This makes it easier
+ * since we can just skip the first segment, which may not
+ * be PAGE_SIZE aligned.
+ */
+ const struct bio_vec *bvec = imu->bvec;
+
+ if (offset <= bvec->bv_len) {
+ iov_iter_advance(iter, offset);
+ } else {
+ unsigned long seg_skip;
+
+ /* skip first vec */
+ offset -= bvec->bv_len;
+ seg_skip = 1 + (offset >> PAGE_SHIFT);
+
+ iter->bvec = bvec + seg_skip;
+ iter->nr_segs -= seg_skip;
+ iter->count -= bvec->bv_len + offset;
+ iter->iov_offset = offset & ~PAGE_MASK;
+ }
+ }
+
+ return len;
}
-static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
- const struct sqe_submit *s, struct iovec **iovec,
- struct iov_iter *iter)
+static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
+ struct iovec **iovec, struct iov_iter *iter)
{
- const struct io_uring_sqe *sqe = s->sqe;
+ const struct io_uring_sqe *sqe = req->sqe;
void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
size_t sqe_len = READ_ONCE(sqe->len);
u8 opcode;
@@ -1087,18 +1582,16 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
* flag.
*/
opcode = READ_ONCE(sqe->opcode);
- if (opcode == IORING_OP_READ_FIXED ||
- opcode == IORING_OP_WRITE_FIXED) {
- ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
+ if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
*iovec = NULL;
- return ret;
+ return io_import_fixed(req->ctx, rw, sqe, iter);
}
- if (!s->has_user)
+ if (!req->has_user)
return -EFAULT;
#ifdef CONFIG_COMPAT
- if (ctx->compat)
+ if (req->ctx->compat)
return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
iovec, iter);
#endif
@@ -1107,47 +1600,64 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
}
/*
- * Make a note of the last file/offset/direction we punted to async
- * context. We'll use this information to see if we can piggy back a
- * sequential request onto the previous one, if it's still hasn't been
- * completed by the async worker.
+ * For files that don't have ->read_iter() and ->write_iter(), handle them
+ * by looping over ->read() or ->write() manually.
*/
-static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
+static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
+ struct iov_iter *iter)
{
- struct async_list *async_list = &req->ctx->pending_async[rw];
- struct kiocb *kiocb = &req->rw;
- struct file *filp = kiocb->ki_filp;
- off_t io_end = kiocb->ki_pos + len;
-
- if (filp == async_list->file && kiocb->ki_pos == async_list->io_end) {
- unsigned long max_pages;
-
- /* Use 8x RA size as a decent limiter for both reads/writes */
- max_pages = filp->f_ra.ra_pages;
- if (!max_pages)
- max_pages = VM_READAHEAD_PAGES;
- max_pages *= 8;
-
- /* If max pages are exceeded, reset the state */
- len >>= PAGE_SHIFT;
- if (async_list->io_pages + len <= max_pages) {
- req->flags |= REQ_F_SEQ_PREV;
- async_list->io_pages += len;
+ ssize_t ret = 0;
+
+ /*
+ * Don't support polled IO through this interface, and we can't
+ * support non-blocking either. For the latter, this just causes
+ * the kiocb to be handled from an async context.
+ */
+ if (kiocb->ki_flags & IOCB_HIPRI)
+ return -EOPNOTSUPP;
+ if (kiocb->ki_flags & IOCB_NOWAIT)
+ return -EAGAIN;
+
+ while (iov_iter_count(iter)) {
+ struct iovec iovec;
+ ssize_t nr;
+
+ if (!iov_iter_is_bvec(iter)) {
+ iovec = iov_iter_iovec(iter);
} else {
- io_end = 0;
- async_list->io_pages = 0;
+ /* fixed buffers import bvec */
+ iovec.iov_base = kmap(iter->bvec->bv_page)
+ + iter->iov_offset;
+ iovec.iov_len = min(iter->count,
+ iter->bvec->bv_len - iter->iov_offset);
}
- }
- /* New file? Reset state. */
- if (async_list->file != filp) {
- async_list->io_pages = 0;
- async_list->file = filp;
+ if (rw == READ) {
+ nr = file->f_op->read(file, iovec.iov_base,
+ iovec.iov_len, &kiocb->ki_pos);
+ } else {
+ nr = file->f_op->write(file, iovec.iov_base,
+ iovec.iov_len, &kiocb->ki_pos);
+ }
+
+ if (iov_iter_is_bvec(iter))
+ kunmap(iter->bvec->bv_page);
+
+ if (nr < 0) {
+ if (!ret)
+ ret = nr;
+ break;
+ }
+ ret += nr;
+ if (nr != iovec.iov_len)
+ break;
+ iov_iter_advance(iter, nr);
}
- async_list->io_end = io_end;
+
+ return ret;
}
-static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
+static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
@@ -1157,17 +1667,15 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
size_t iov_count;
ssize_t read_size, ret;
- ret = io_prep_rw(req, s, force_nonblock);
+ ret = io_prep_rw(req, force_nonblock);
if (ret)
return ret;
file = kiocb->ki_filp;
if (unlikely(!(file->f_mode & FMODE_READ)))
return -EBADF;
- if (unlikely(!file->f_op->read_iter))
- return -EINVAL;
- ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
+ ret = io_import_iovec(READ, req, &iovec, &iter);
if (ret < 0)
return ret;
@@ -1180,7 +1688,11 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
if (!ret) {
ssize_t ret2;
- ret2 = call_read_iter(file, kiocb, &iter);
+ if (file->f_op->read_iter)
+ ret2 = call_read_iter(file, kiocb, &iter);
+ else
+ ret2 = loop_rw_iter(READ, file, kiocb, &iter);
+
/*
* In case of a short read, punt to async. This can happen
* if we have data partially cached. Alternatively we can
@@ -1189,26 +1701,21 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
* need async punt anyway, so it's more efficient to do it
* here.
*/
- if (force_nonblock && ret2 > 0 && ret2 < read_size)
+ if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
+ (req->flags & REQ_F_ISREG) &&
+ ret2 > 0 && ret2 < read_size)
ret2 = -EAGAIN;
/* Catch -EAGAIN return for forced non-blocking submission */
- if (!force_nonblock || ret2 != -EAGAIN) {
- io_rw_done(kiocb, ret2);
- } else {
- /*
- * If ->needs_lock is true, we're already in async
- * context.
- */
- if (!s->needs_lock)
- io_async_list_note(READ, req, iov_count);
+ if (!force_nonblock || ret2 != -EAGAIN)
+ kiocb_done(kiocb, ret2, nxt, req->in_async);
+ else
ret = -EAGAIN;
- }
}
kfree(iovec);
return ret;
}
-static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
+static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
@@ -1218,17 +1725,15 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
size_t iov_count;
ssize_t ret;
- ret = io_prep_rw(req, s, force_nonblock);
+ ret = io_prep_rw(req, force_nonblock);
if (ret)
return ret;
file = kiocb->ki_filp;
if (unlikely(!(file->f_mode & FMODE_WRITE)))
return -EBADF;
- if (unlikely(!file->f_op->write_iter))
- return -EINVAL;
- ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
+ ret = io_import_iovec(WRITE, req, &iovec, &iter);
if (ret < 0)
return ret;
@@ -1238,12 +1743,8 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
iov_count = iov_iter_count(&iter);
ret = -EAGAIN;
- if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) {
- /* If ->needs_lock is true, we're already in async context. */
- if (!s->needs_lock)
- io_async_list_note(WRITE, req, iov_count);
+ if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT))
goto out_free;
- }
ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
if (!ret) {
@@ -1256,7 +1757,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
* released so that it doesn't complain about the held lock when
* we return to userspace.
*/
- if (S_ISREG(file_inode(file)->i_mode)) {
+ if (req->flags & REQ_F_ISREG) {
__sb_start_write(file_inode(file)->i_sb,
SB_FREEZE_WRITE, true);
__sb_writers_release(file_inode(file)->i_sb,
@@ -1264,18 +1765,14 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
}
kiocb->ki_flags |= IOCB_WRITE;
- ret2 = call_write_iter(file, kiocb, &iter);
- if (!force_nonblock || ret2 != -EAGAIN) {
- io_rw_done(kiocb, ret2);
- } else {
- /*
- * If ->needs_lock is true, we're already in async
- * context.
- */
- if (!s->needs_lock)
- io_async_list_note(WRITE, req, iov_count);
+ if (file->f_op->write_iter)
+ ret2 = call_write_iter(file, kiocb, &iter);
+ else
+ ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
+ if (!force_nonblock || ret2 != -EAGAIN)
+ kiocb_done(kiocb, ret2, nxt, req->in_async);
+ else
ret = -EAGAIN;
- }
}
out_free:
kfree(iovec);
@@ -1285,15 +1782,14 @@ out_free:
/*
* IORING_OP_NOP just posts a completion event, nothing else.
*/
-static int io_nop(struct io_kiocb *req, u64 user_data)
+static int io_nop(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- long err = 0;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- io_cqring_add_event(ctx, user_data, err);
+ io_cqring_add_event(req, 0);
io_put_req(req);
return 0;
}
@@ -1314,7 +1810,7 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
loff_t sqe_off = READ_ONCE(sqe->off);
loff_t sqe_len = READ_ONCE(sqe->len);
@@ -1340,8 +1836,8 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
- io_cqring_add_event(req->ctx, sqe->user_data, ret);
- io_put_req(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
return 0;
}
@@ -1363,6 +1859,7 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int io_sync_file_range(struct io_kiocb *req,
const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt,
bool force_nonblock)
{
loff_t sqe_off;
@@ -1386,14 +1883,14 @@ static int io_sync_file_range(struct io_kiocb *req,
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
- io_cqring_add_event(req->ctx, sqe->user_data, ret);
- io_put_req(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
return 0;
}
#if defined(CONFIG_NET)
static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock,
+ struct io_kiocb **nxt, bool force_nonblock,
long (*fn)(struct socket *, struct user_msghdr __user *,
unsigned int))
{
@@ -1422,59 +1919,161 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return ret;
}
- io_cqring_add_event(req->ctx, sqe->user_data, ret);
- io_put_req(req);
+ io_cqring_add_event(req, ret);
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
+ io_put_req_find_next(req, nxt);
return 0;
}
#endif
static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
#if defined(CONFIG_NET)
- return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock);
+ return io_send_recvmsg(req, sqe, nxt, force_nonblock,
+ __sys_sendmsg_sock);
#else
return -EOPNOTSUPP;
#endif
}
static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
#if defined(CONFIG_NET)
- return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock);
+ return io_send_recvmsg(req, sqe, nxt, force_nonblock,
+ __sys_recvmsg_sock);
#else
return -EOPNOTSUPP;
#endif
}
+static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt, bool force_nonblock)
+{
+#if defined(CONFIG_NET)
+ struct sockaddr __user *addr;
+ int __user *addr_len;
+ unsigned file_flags;
+ int flags, ret;
+
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+ return -EINVAL;
+ if (sqe->ioprio || sqe->len || sqe->buf_index)
+ return -EINVAL;
+
+ addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
+ addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2);
+ flags = READ_ONCE(sqe->accept_flags);
+ file_flags = force_nonblock ? O_NONBLOCK : 0;
+
+ ret = __sys_accept4_file(req->file, file_flags, addr, addr_len, flags);
+ if (ret == -EAGAIN && force_nonblock) {
+ req->work.flags |= IO_WQ_WORK_NEEDS_FILES;
+ return -EAGAIN;
+ }
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt, bool force_nonblock)
+{
+#if defined(CONFIG_NET)
+ struct sockaddr __user *addr;
+ unsigned file_flags;
+ int addr_len, ret;
+
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+ return -EINVAL;
+ if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
+ return -EINVAL;
+
+ addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
+ addr_len = READ_ONCE(sqe->addr2);
+ file_flags = force_nonblock ? O_NONBLOCK : 0;
+
+ ret = __sys_connect_file(req->file, addr, addr_len, file_flags);
+ if (ret == -EAGAIN && force_nonblock)
+ return -EAGAIN;
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static inline void io_poll_remove_req(struct io_kiocb *req)
+{
+ if (!RB_EMPTY_NODE(&req->rb_node)) {
+ rb_erase(&req->rb_node, &req->ctx->cancel_tree);
+ RB_CLEAR_NODE(&req->rb_node);
+ }
+}
+
static void io_poll_remove_one(struct io_kiocb *req)
{
struct io_poll_iocb *poll = &req->poll;
spin_lock(&poll->head->lock);
WRITE_ONCE(poll->canceled, true);
- if (!list_empty(&poll->wait.entry)) {
- list_del_init(&poll->wait.entry);
- queue_work(req->ctx->sqo_wq, &req->work);
+ if (!list_empty(&poll->wait->entry)) {
+ list_del_init(&poll->wait->entry);
+ io_queue_async_work(req);
}
spin_unlock(&poll->head->lock);
-
- list_del_init(&req->list);
+ io_poll_remove_req(req);
}
static void io_poll_remove_all(struct io_ring_ctx *ctx)
{
+ struct rb_node *node;
struct io_kiocb *req;
spin_lock_irq(&ctx->completion_lock);
- while (!list_empty(&ctx->cancel_list)) {
- req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
+ while ((node = rb_first(&ctx->cancel_tree)) != NULL) {
+ req = rb_entry(node, struct io_kiocb, rb_node);
io_poll_remove_one(req);
}
spin_unlock_irq(&ctx->completion_lock);
}
+static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
+{
+ struct rb_node *p, *parent = NULL;
+ struct io_kiocb *req;
+
+ p = ctx->cancel_tree.rb_node;
+ while (p) {
+ parent = p;
+ req = rb_entry(parent, struct io_kiocb, rb_node);
+ if (sqe_addr < req->user_data) {
+ p = p->rb_left;
+ } else if (sqe_addr > req->user_data) {
+ p = p->rb_right;
+ } else {
+ io_poll_remove_one(req);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
/*
* Find a running poll command that matches one specified in sqe->addr,
* and remove it if found.
@@ -1482,8 +2081,7 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)
static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_kiocb *poll_req, *next;
- int ret = -ENOENT;
+ int ret;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
@@ -1492,37 +2090,48 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL;
spin_lock_irq(&ctx->completion_lock);
- list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) {
- if (READ_ONCE(sqe->addr) == poll_req->user_data) {
- io_poll_remove_one(poll_req);
- ret = 0;
- break;
- }
- }
+ ret = io_poll_cancel(ctx, READ_ONCE(sqe->addr));
spin_unlock_irq(&ctx->completion_lock);
- io_cqring_add_event(req->ctx, sqe->user_data, ret);
+ io_cqring_add_event(req, ret);
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
io_put_req(req);
return 0;
}
-static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
- __poll_t mask)
+static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
{
+ struct io_ring_ctx *ctx = req->ctx;
+
req->poll.done = true;
- io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask));
+ kfree(req->poll.wait);
+ if (error)
+ io_cqring_fill_event(req, error);
+ else
+ io_cqring_fill_event(req, mangle_poll(mask));
io_commit_cqring(ctx);
}
-static void io_poll_complete_work(struct work_struct *work)
+static void io_poll_complete_work(struct io_wq_work **workptr)
{
+ struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_poll_iocb *poll = &req->poll;
struct poll_table_struct pt = { ._key = poll->events };
struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *nxt = NULL;
__poll_t mask = 0;
+ int ret = 0;
- if (!READ_ONCE(poll->canceled))
+ if (work->flags & IO_WQ_WORK_CANCEL) {
+ WRITE_ONCE(poll->canceled, true);
+ ret = -ECANCELED;
+ } else if (READ_ONCE(poll->canceled)) {
+ ret = -ECANCELED;
+ }
+
+ if (ret != -ECANCELED)
mask = vfs_poll(poll->file, &pt) & poll->events;
/*
@@ -1533,24 +2142,28 @@ static void io_poll_complete_work(struct work_struct *work)
* avoid further branches in the fast path.
*/
spin_lock_irq(&ctx->completion_lock);
- if (!mask && !READ_ONCE(poll->canceled)) {
- add_wait_queue(poll->head, &poll->wait);
+ if (!mask && ret != -ECANCELED) {
+ add_wait_queue(poll->head, poll->wait);
spin_unlock_irq(&ctx->completion_lock);
return;
}
- list_del_init(&req->list);
- io_poll_complete(ctx, req, mask);
+ io_poll_remove_req(req);
+ io_poll_complete(req, mask, ret);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
- io_put_req(req);
+
+ if (ret < 0 && req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
+ io_put_req_find_next(req, &nxt);
+ if (nxt)
+ *workptr = &nxt->work;
}
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
void *key)
{
- struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
- wait);
+ struct io_poll_iocb *poll = wait->private;
struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
struct io_ring_ctx *ctx = req->ctx;
__poll_t mask = key_to_poll(key);
@@ -1560,17 +2173,24 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (mask && !(mask & poll->events))
return 0;
- list_del_init(&poll->wait.entry);
+ list_del_init(&poll->wait->entry);
+ /*
+ * Run completion inline if we can. We're using trylock here because
+ * we are violating the completion_lock -> poll wq lock ordering.
+ * If we have a link timeout we're going to need the completion_lock
+ * for finalizing the request, mark us as having grabbed that already.
+ */
if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
- list_del(&req->list);
- io_poll_complete(ctx, req, mask);
+ io_poll_remove_req(req);
+ io_poll_complete(req, mask, 0);
+ req->flags |= REQ_F_COMP_LOCKED;
+ io_put_req(req);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
- io_put_req(req);
} else {
- queue_work(ctx->sqo_wq, &req->work);
+ io_queue_async_work(req);
}
return 1;
@@ -1594,10 +2214,30 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
pt->error = 0;
pt->req->poll.head = head;
- add_wait_queue(head, &pt->req->poll.wait);
+ add_wait_queue(head, pt->req->poll.wait);
}
-static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static void io_poll_req_insert(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct rb_node **p = &ctx->cancel_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct io_kiocb *tmp;
+
+ while (*p) {
+ parent = *p;
+ tmp = rb_entry(parent, struct io_kiocb, rb_node);
+ if (req->user_data < tmp->user_data)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&req->rb_node, parent, p);
+ rb_insert_color(&req->rb_node, &ctx->cancel_tree);
+}
+
+static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt)
{
struct io_poll_iocb *poll = &req->poll;
struct io_ring_ctx *ctx = req->ctx;
@@ -1613,9 +2253,15 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (!poll->file)
return -EBADF;
- INIT_WORK(&req->work, io_poll_complete_work);
+ poll->wait = kmalloc(sizeof(*poll->wait), GFP_KERNEL);
+ if (!poll->wait)
+ return -ENOMEM;
+
+ req->sqe = NULL;
+ INIT_IO_WORK(&req->work, io_poll_complete_work);
events = READ_ONCE(sqe->poll_events);
poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
+ RB_CLEAR_NODE(&req->rb_node);
poll->head = NULL;
poll->done = false;
@@ -1627,47 +2273,338 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
/* initialized the list so that we can do list_empty checks */
- INIT_LIST_HEAD(&poll->wait.entry);
- init_waitqueue_func_entry(&poll->wait, io_poll_wake);
+ INIT_LIST_HEAD(&poll->wait->entry);
+ init_waitqueue_func_entry(poll->wait, io_poll_wake);
+ poll->wait->private = poll;
+
+ INIT_LIST_HEAD(&req->list);
mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
spin_lock_irq(&ctx->completion_lock);
if (likely(poll->head)) {
spin_lock(&poll->head->lock);
- if (unlikely(list_empty(&poll->wait.entry))) {
+ if (unlikely(list_empty(&poll->wait->entry))) {
if (ipt.error)
cancel = true;
ipt.error = 0;
mask = 0;
}
if (mask || ipt.error)
- list_del_init(&poll->wait.entry);
+ list_del_init(&poll->wait->entry);
else if (cancel)
WRITE_ONCE(poll->canceled, true);
else if (!poll->done) /* actually waiting for an event */
- list_add_tail(&req->list, &ctx->cancel_list);
+ io_poll_req_insert(req);
spin_unlock(&poll->head->lock);
}
if (mask) { /* no async, we'd stolen it */
ipt.error = 0;
- io_poll_complete(ctx, req, mask);
+ io_poll_complete(req, mask, 0);
}
spin_unlock_irq(&ctx->completion_lock);
if (mask) {
io_cqring_ev_posted(ctx);
- io_put_req(req);
+ io_put_req_find_next(req, nxt);
}
return ipt.error;
}
-static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
+{
+ struct io_timeout_data *data = container_of(timer,
+ struct io_timeout_data, timer);
+ struct io_kiocb *req = data->req;
+ struct io_ring_ctx *ctx = req->ctx;
+ unsigned long flags;
+
+ atomic_inc(&ctx->cq_timeouts);
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ /*
+ * We could be racing with timeout deletion. If the list is empty,
+ * then timeout lookup already found it and will be handling it.
+ */
+ if (!list_empty(&req->list)) {
+ struct io_kiocb *prev;
+
+ /*
+ * Adjust the reqs sequence before the current one because it
+ * will consume a slot in the cq_ring and the the cq_tail
+ * pointer will be increased, otherwise other timeout reqs may
+ * return in advance without waiting for enough wait_nr.
+ */
+ prev = req;
+ list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
+ prev->sequence++;
+ list_del_init(&req->list);
+ }
+
+ io_cqring_fill_event(req, -ETIME);
+ io_commit_cqring(ctx);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+
+ io_cqring_ev_posted(ctx);
+ if (req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
+ io_put_req(req);
+ return HRTIMER_NORESTART;
+}
+
+static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
+{
+ struct io_kiocb *req;
+ int ret = -ENOENT;
+
+ list_for_each_entry(req, &ctx->timeout_list, list) {
+ if (user_data == req->user_data) {
+ list_del_init(&req->list);
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret == -ENOENT)
+ return ret;
+
+ ret = hrtimer_try_to_cancel(&req->timeout.data->timer);
+ if (ret == -1)
+ return -EALREADY;
+
+ if (req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
+ io_cqring_fill_event(req, -ECANCELED);
+ io_put_req(req);
+ return 0;
+}
+
+/*
+ * Remove or update an existing timeout command
+ */
+static int io_timeout_remove(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ unsigned flags;
+ int ret;
+
+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+ if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
+ return -EINVAL;
+ flags = READ_ONCE(sqe->timeout_flags);
+ if (flags)
+ return -EINVAL;
+
+ spin_lock_irq(&ctx->completion_lock);
+ ret = io_timeout_cancel(ctx, READ_ONCE(sqe->addr));
+
+ io_cqring_fill_event(req, ret);
+ io_commit_cqring(ctx);
+ spin_unlock_irq(&ctx->completion_lock);
+ io_cqring_ev_posted(ctx);
+ if (ret < 0 && req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
+ io_put_req(req);
+ return 0;
+}
+
+static int io_timeout_setup(struct io_kiocb *req)
+{
+ const struct io_uring_sqe *sqe = req->sqe;
+ struct io_timeout_data *data;
+ unsigned flags;
+
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+ if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
+ return -EINVAL;
+ flags = READ_ONCE(sqe->timeout_flags);
+ if (flags & ~IORING_TIMEOUT_ABS)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(struct io_timeout_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->req = req;
+ req->timeout.data = data;
+ req->flags |= REQ_F_TIMEOUT;
+
+ if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
+ return -EFAULT;
+
+ if (flags & IORING_TIMEOUT_ABS)
+ data->mode = HRTIMER_MODE_ABS;
+ else
+ data->mode = HRTIMER_MODE_REL;
+
+ hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
+ return 0;
+}
+
+static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ unsigned count;
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_timeout_data *data;
+ struct list_head *entry;
+ unsigned span = 0;
+ int ret;
+
+ ret = io_timeout_setup(req);
+ /* common setup allows flags (like links) set, we don't */
+ if (!ret && sqe->flags)
+ ret = -EINVAL;
+ if (ret)
+ return ret;
+
+ /*
+ * sqe->off holds how many events that need to occur for this
+ * timeout event to be satisfied. If it isn't set, then this is
+ * a pure timeout request, sequence isn't used.
+ */
+ count = READ_ONCE(sqe->off);
+ if (!count) {
+ req->flags |= REQ_F_TIMEOUT_NOSEQ;
+ spin_lock_irq(&ctx->completion_lock);
+ entry = ctx->timeout_list.prev;
+ goto add;
+ }
+
+ req->sequence = ctx->cached_sq_head + count - 1;
+ req->timeout.data->seq_offset = count;
+
+ /*
+ * Insertion sort, ensuring the first entry in the list is always
+ * the one we need first.
+ */
+ spin_lock_irq(&ctx->completion_lock);
+ list_for_each_prev(entry, &ctx->timeout_list) {
+ struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
+ unsigned nxt_sq_head;
+ long long tmp, tmp_nxt;
+ u32 nxt_offset = nxt->timeout.data->seq_offset;
+
+ if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
+ continue;
+
+ /*
+ * Since cached_sq_head + count - 1 can overflow, use type long
+ * long to store it.
+ */
+ tmp = (long long)ctx->cached_sq_head + count - 1;
+ nxt_sq_head = nxt->sequence - nxt_offset + 1;
+ tmp_nxt = (long long)nxt_sq_head + nxt_offset - 1;
+
+ /*
+ * cached_sq_head may overflow, and it will never overflow twice
+ * once there is some timeout req still be valid.
+ */
+ if (ctx->cached_sq_head < nxt_sq_head)
+ tmp += UINT_MAX;
+
+ if (tmp > tmp_nxt)
+ break;
+
+ /*
+ * Sequence of reqs after the insert one and itself should
+ * be adjusted because each timeout req consumes a slot.
+ */
+ span++;
+ nxt->sequence++;
+ }
+ req->sequence -= span;
+add:
+ list_add(&req->list, entry);
+ data = req->timeout.data;
+ data->timer.function = io_timeout_fn;
+ hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
+ spin_unlock_irq(&ctx->completion_lock);
+ return 0;
+}
+
+static bool io_cancel_cb(struct io_wq_work *work, void *data)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+ return req->user_data == (unsigned long) data;
+}
+
+static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
+{
+ enum io_wq_cancel cancel_ret;
+ int ret = 0;
+
+ cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
+ switch (cancel_ret) {
+ case IO_WQ_CANCEL_OK:
+ ret = 0;
+ break;
+ case IO_WQ_CANCEL_RUNNING:
+ ret = -EALREADY;
+ break;
+ case IO_WQ_CANCEL_NOTFOUND:
+ ret = -ENOENT;
+ break;
+ }
+
+ return ret;
+}
+
+static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
+ struct io_kiocb *req, __u64 sqe_addr,
+ struct io_kiocb **nxt, int success_ret)
+{
+ unsigned long flags;
+ int ret;
+
+ ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
+ if (ret != -ENOENT) {
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ goto done;
+ }
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ ret = io_timeout_cancel(ctx, sqe_addr);
+ if (ret != -ENOENT)
+ goto done;
+ ret = io_poll_cancel(ctx, sqe_addr);
+done:
+ if (!ret)
+ ret = success_ret;
+ io_cqring_fill_event(req, ret);
+ io_commit_cqring(ctx);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ io_cqring_ev_posted(ctx);
+
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
+ io_put_req_find_next(req, nxt);
+}
+
+static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+ if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
+ sqe->cancel_flags)
+ return -EINVAL;
+
+ io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), nxt, 0);
+ return 0;
+}
+
+static int io_req_defer(struct io_kiocb *req)
{
struct io_uring_sqe *sqe_copy;
+ struct io_ring_ctx *ctx = req->ctx;
- if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
+ /* Still need defer if there is pending req in defer list. */
+ if (!req_need_defer(req) && list_empty(&ctx->defer_list))
return 0;
sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
@@ -1675,69 +2612,82 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
return -EAGAIN;
spin_lock_irq(&ctx->completion_lock);
- if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
+ if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
spin_unlock_irq(&ctx->completion_lock);
kfree(sqe_copy);
return 0;
}
- memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
- req->submit.sqe = sqe_copy;
+ memcpy(sqe_copy, req->sqe, sizeof(*sqe_copy));
+ req->flags |= REQ_F_FREE_SQE;
+ req->sqe = sqe_copy;
- INIT_WORK(&req->work, io_sq_wq_submit_work);
+ trace_io_uring_defer(ctx, req, req->user_data);
list_add_tail(&req->list, &ctx->defer_list);
spin_unlock_irq(&ctx->completion_lock);
return -EIOCBQUEUED;
}
-static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct sqe_submit *s, bool force_nonblock)
+__attribute__((nonnull))
+static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
{
int ret, opcode;
+ struct io_ring_ctx *ctx = req->ctx;
- req->user_data = READ_ONCE(s->sqe->user_data);
-
- if (unlikely(s->index >= ctx->sq_entries))
- return -EINVAL;
-
- opcode = READ_ONCE(s->sqe->opcode);
+ opcode = READ_ONCE(req->sqe->opcode);
switch (opcode) {
case IORING_OP_NOP:
- ret = io_nop(req, req->user_data);
+ ret = io_nop(req);
break;
case IORING_OP_READV:
- if (unlikely(s->sqe->buf_index))
+ if (unlikely(req->sqe->buf_index))
return -EINVAL;
- ret = io_read(req, s, force_nonblock);
+ ret = io_read(req, nxt, force_nonblock);
break;
case IORING_OP_WRITEV:
- if (unlikely(s->sqe->buf_index))
+ if (unlikely(req->sqe->buf_index))
return -EINVAL;
- ret = io_write(req, s, force_nonblock);
+ ret = io_write(req, nxt, force_nonblock);
break;
case IORING_OP_READ_FIXED:
- ret = io_read(req, s, force_nonblock);
+ ret = io_read(req, nxt, force_nonblock);
break;
case IORING_OP_WRITE_FIXED:
- ret = io_write(req, s, force_nonblock);
+ ret = io_write(req, nxt, force_nonblock);
break;
case IORING_OP_FSYNC:
- ret = io_fsync(req, s->sqe, force_nonblock);
+ ret = io_fsync(req, req->sqe, nxt, force_nonblock);
break;
case IORING_OP_POLL_ADD:
- ret = io_poll_add(req, s->sqe);
+ ret = io_poll_add(req, req->sqe, nxt);
break;
case IORING_OP_POLL_REMOVE:
- ret = io_poll_remove(req, s->sqe);
+ ret = io_poll_remove(req, req->sqe);
break;
case IORING_OP_SYNC_FILE_RANGE:
- ret = io_sync_file_range(req, s->sqe, force_nonblock);
+ ret = io_sync_file_range(req, req->sqe, nxt, force_nonblock);
break;
case IORING_OP_SENDMSG:
- ret = io_sendmsg(req, s->sqe, force_nonblock);
+ ret = io_sendmsg(req, req->sqe, nxt, force_nonblock);
break;
case IORING_OP_RECVMSG:
- ret = io_recvmsg(req, s->sqe, force_nonblock);
+ ret = io_recvmsg(req, req->sqe, nxt, force_nonblock);
+ break;
+ case IORING_OP_TIMEOUT:
+ ret = io_timeout(req, req->sqe);
+ break;
+ case IORING_OP_TIMEOUT_REMOVE:
+ ret = io_timeout_remove(req, req->sqe);
+ break;
+ case IORING_OP_ACCEPT:
+ ret = io_accept(req, req->sqe, nxt, force_nonblock);
+ break;
+ case IORING_OP_CONNECT:
+ ret = io_connect(req, req->sqe, nxt, force_nonblock);
+ break;
+ case IORING_OP_ASYNC_CANCEL:
+ ret = io_async_cancel(req, req->sqe, nxt);
break;
default:
ret = -EINVAL;
@@ -1752,177 +2702,76 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return -EAGAIN;
/* workqueue context doesn't hold uring_lock, grab it now */
- if (s->needs_lock)
+ if (req->in_async)
mutex_lock(&ctx->uring_lock);
io_iopoll_req_issued(req);
- if (s->needs_lock)
+ if (req->in_async)
mutex_unlock(&ctx->uring_lock);
}
return 0;
}
-static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
- const struct io_uring_sqe *sqe)
-{
- switch (sqe->opcode) {
- case IORING_OP_READV:
- case IORING_OP_READ_FIXED:
- return &ctx->pending_async[READ];
- case IORING_OP_WRITEV:
- case IORING_OP_WRITE_FIXED:
- return &ctx->pending_async[WRITE];
- default:
- return NULL;
- }
-}
-
-static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
+static void io_link_work_cb(struct io_wq_work **workptr)
{
- u8 opcode = READ_ONCE(sqe->opcode);
+ struct io_wq_work *work = *workptr;
+ struct io_kiocb *link = work->data;
- return !(opcode == IORING_OP_READ_FIXED ||
- opcode == IORING_OP_WRITE_FIXED);
+ io_queue_linked_timeout(link);
+ work->func = io_wq_submit_work;
}
-static void io_sq_wq_submit_work(struct work_struct *work)
+static void io_wq_submit_work(struct io_wq_work **workptr)
{
+ struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- struct io_ring_ctx *ctx = req->ctx;
- struct mm_struct *cur_mm = NULL;
- struct async_list *async_list;
- LIST_HEAD(req_list);
- mm_segment_t old_fs;
- int ret;
+ struct io_kiocb *nxt = NULL;
+ int ret = 0;
- async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
-restart:
- do {
- struct sqe_submit *s = &req->submit;
- const struct io_uring_sqe *sqe = s->sqe;
+ /* Ensure we clear previously set non-block flag */
+ req->rw.ki_flags &= ~IOCB_NOWAIT;
- /* Ensure we clear previously set non-block flag */
- req->rw.ki_flags &= ~IOCB_NOWAIT;
+ if (work->flags & IO_WQ_WORK_CANCEL)
+ ret = -ECANCELED;
- ret = 0;
- if (io_sqe_needs_user(sqe) && !cur_mm) {
- if (!mmget_not_zero(ctx->sqo_mm)) {
- ret = -EFAULT;
- } else {
- cur_mm = ctx->sqo_mm;
- use_mm(cur_mm);
- old_fs = get_fs();
- set_fs(USER_DS);
- }
- }
+ if (!ret) {
+ req->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
+ req->in_async = true;
+ do {
+ ret = io_issue_sqe(req, &nxt, false);
+ /*
+ * We can get EAGAIN for polled IO even though we're
+ * forcing a sync submission from here, since we can't
+ * wait for request slots on the block side.
+ */
+ if (ret != -EAGAIN)
+ break;
+ cond_resched();
+ } while (1);
+ }
- if (!ret) {
- s->has_user = cur_mm != NULL;
- s->needs_lock = true;
- do {
- ret = __io_submit_sqe(ctx, req, s, false);
- /*
- * We can get EAGAIN for polled IO even though
- * we're forcing a sync submission from here,
- * since we can't wait for request slots on the
- * block side.
- */
- if (ret != -EAGAIN)
- break;
- cond_resched();
- } while (1);
- }
+ /* drop submission reference */
+ io_put_req(req);
- /* drop submission reference */
+ if (ret) {
+ if (req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
+ io_cqring_add_event(req, ret);
io_put_req(req);
-
- if (ret) {
- io_cqring_add_event(ctx, sqe->user_data, ret);
- io_put_req(req);
- }
-
- /* async context always use a copy of the sqe */
- kfree(sqe);
-
- if (!async_list)
- break;
- if (!list_empty(&req_list)) {
- req = list_first_entry(&req_list, struct io_kiocb,
- list);
- list_del(&req->list);
- continue;
- }
- if (list_empty(&async_list->list))
- break;
-
- req = NULL;
- spin_lock(&async_list->lock);
- if (list_empty(&async_list->list)) {
- spin_unlock(&async_list->lock);
- break;
- }
- list_splice_init(&async_list->list, &req_list);
- spin_unlock(&async_list->lock);
-
- req = list_first_entry(&req_list, struct io_kiocb, list);
- list_del(&req->list);
- } while (req);
-
- /*
- * Rare case of racing with a submitter. If we find the count has
- * dropped to zero AND we have pending work items, then restart
- * the processing. This is a tiny race window.
- */
- if (async_list) {
- ret = atomic_dec_return(&async_list->cnt);
- while (!ret && !list_empty(&async_list->list)) {
- spin_lock(&async_list->lock);
- atomic_inc(&async_list->cnt);
- list_splice_init(&async_list->list, &req_list);
- spin_unlock(&async_list->lock);
-
- if (!list_empty(&req_list)) {
- req = list_first_entry(&req_list,
- struct io_kiocb, list);
- list_del(&req->list);
- goto restart;
- }
- ret = atomic_dec_return(&async_list->cnt);
- }
}
- if (cur_mm) {
- set_fs(old_fs);
- unuse_mm(cur_mm);
- mmput(cur_mm);
- }
-}
-
-/*
- * See if we can piggy back onto previously submitted work, that is still
- * running. We currently only allow this if the new request is sequential
- * to the previous one we punted.
- */
-static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
-{
- bool ret = false;
-
- if (!list)
- return false;
- if (!(req->flags & REQ_F_SEQ_PREV))
- return false;
- if (!atomic_read(&list->cnt))
- return false;
+ /* if a dependent link is ready, pass it back */
+ if (!ret && nxt) {
+ struct io_kiocb *link;
- ret = true;
- spin_lock(&list->lock);
- list_add_tail(&req->list, &list->list);
- if (!atomic_read(&list->cnt)) {
- list_del_init(&req->list);
- ret = false;
+ io_prep_async_work(nxt, &link);
+ *workptr = &nxt->work;
+ if (link) {
+ nxt->work.flags |= IO_WQ_WORK_CB;
+ nxt->work.func = io_link_work_cb;
+ nxt->work.data = link;
+ }
}
- spin_unlock(&list->lock);
- return ret;
}
static bool io_op_needs_file(const struct io_uring_sqe *sqe)
@@ -1932,38 +2781,53 @@ static bool io_op_needs_file(const struct io_uring_sqe *sqe)
switch (op) {
case IORING_OP_NOP:
case IORING_OP_POLL_REMOVE:
+ case IORING_OP_TIMEOUT:
+ case IORING_OP_TIMEOUT_REMOVE:
+ case IORING_OP_ASYNC_CANCEL:
+ case IORING_OP_LINK_TIMEOUT:
return false;
default:
return true;
}
}
-static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
- struct io_submit_state *state, struct io_kiocb *req)
+static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
+ int index)
+{
+ struct fixed_file_table *table;
+
+ table = &ctx->file_table[index >> IORING_FILE_TABLE_SHIFT];
+ return table->files[index & IORING_FILE_TABLE_MASK];
+}
+
+static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
{
+ struct io_ring_ctx *ctx = req->ctx;
unsigned flags;
int fd;
- flags = READ_ONCE(s->sqe->flags);
- fd = READ_ONCE(s->sqe->fd);
+ flags = READ_ONCE(req->sqe->flags);
+ fd = READ_ONCE(req->sqe->fd);
- if (flags & IOSQE_IO_DRAIN) {
+ if (flags & IOSQE_IO_DRAIN)
req->flags |= REQ_F_IO_DRAIN;
- req->sequence = ctx->cached_sq_head - 1;
- }
- if (!io_op_needs_file(s->sqe))
+ if (!io_op_needs_file(req->sqe))
return 0;
if (flags & IOSQE_FIXED_FILE) {
- if (unlikely(!ctx->user_files ||
+ if (unlikely(!ctx->file_table ||
(unsigned) fd >= ctx->nr_user_files))
return -EBADF;
- req->file = ctx->user_files[fd];
+ fd = array_index_nospec(fd, ctx->nr_user_files);
+ req->file = io_file_from_index(ctx, fd);
+ if (!req->file)
+ return -EBADF;
req->flags |= REQ_F_FIXED_FILE;
} else {
- if (s->needs_fixed_file)
+ if (req->needs_fixed_file)
return -EBADF;
+ trace_io_uring_file_get(ctx, fd);
req->file = io_file_get(state, fd);
if (unlikely(!req->file))
return -EBADF;
@@ -1972,87 +2836,219 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
return 0;
}
-static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s)
+static int io_grab_files(struct io_kiocb *req)
+{
+ int ret = -EBADF;
+ struct io_ring_ctx *ctx = req->ctx;
+
+ rcu_read_lock();
+ spin_lock_irq(&ctx->inflight_lock);
+ /*
+ * We use the f_ops->flush() handler to ensure that we can flush
+ * out work accessing these files if the fd is closed. Check if
+ * the fd has changed since we started down this path, and disallow
+ * this operation if it has.
+ */
+ if (fcheck(req->ring_fd) == req->ring_file) {
+ list_add(&req->inflight_entry, &ctx->inflight_list);
+ req->flags |= REQ_F_INFLIGHT;
+ req->work.files = current->files;
+ ret = 0;
+ }
+ spin_unlock_irq(&ctx->inflight_lock);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
+{
+ struct io_timeout_data *data = container_of(timer,
+ struct io_timeout_data, timer);
+ struct io_kiocb *req = data->req;
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *prev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+
+ /*
+ * We don't expect the list to be empty, that will only happen if we
+ * race with the completion of the linked work.
+ */
+ if (!list_empty(&req->list)) {
+ prev = list_entry(req->list.prev, struct io_kiocb, link_list);
+ if (refcount_inc_not_zero(&prev->refs)) {
+ list_del_init(&req->list);
+ prev->flags &= ~REQ_F_LINK_TIMEOUT;
+ } else
+ prev = NULL;
+ }
+
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+
+ if (prev) {
+ if (prev->flags & REQ_F_LINK)
+ prev->flags |= REQ_F_FAIL_LINK;
+ io_async_find_and_cancel(ctx, req, prev->user_data, NULL,
+ -ETIME);
+ io_put_req(prev);
+ } else {
+ io_cqring_add_event(req, -ETIME);
+ io_put_req(req);
+ }
+ return HRTIMER_NORESTART;
+}
+
+static void io_queue_linked_timeout(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ /*
+ * If the list is now empty, then our linked request finished before
+ * we got a chance to setup the timer
+ */
+ spin_lock_irq(&ctx->completion_lock);
+ if (!list_empty(&req->list)) {
+ struct io_timeout_data *data = req->timeout.data;
+
+ data->timer.function = io_link_timeout_fn;
+ hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
+ data->mode);
+ }
+ spin_unlock_irq(&ctx->completion_lock);
+
+ /* drop submission reference */
+ io_put_req(req);
+}
+
+static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
+{
+ struct io_kiocb *nxt;
+
+ if (!(req->flags & REQ_F_LINK))
+ return NULL;
+
+ nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
+ if (!nxt || nxt->sqe->opcode != IORING_OP_LINK_TIMEOUT)
+ return NULL;
+
+ req->flags |= REQ_F_LINK_TIMEOUT;
+ return nxt;
+}
+
+static void __io_queue_sqe(struct io_kiocb *req)
{
+ struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
+ struct io_kiocb *nxt = NULL;
int ret;
- ret = __io_submit_sqe(ctx, req, s, true);
- if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
- struct io_uring_sqe *sqe_copy;
+ ret = io_issue_sqe(req, &nxt, true);
+ if (nxt)
+ io_queue_async_work(nxt);
- sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
- if (sqe_copy) {
- struct async_list *list;
+ /*
+ * We async punt it if the file wasn't marked NOWAIT, or if the file
+ * doesn't support non-blocking read/write attempts
+ */
+ if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
+ (req->flags & REQ_F_MUST_PUNT))) {
+ struct io_uring_sqe *sqe_copy;
- memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy));
- s->sqe = sqe_copy;
+ sqe_copy = kmemdup(req->sqe, sizeof(*sqe_copy), GFP_KERNEL);
+ if (!sqe_copy)
+ goto err;
- memcpy(&req->submit, s, sizeof(*s));
- list = io_async_list_from_sqe(ctx, s->sqe);
- if (!io_add_to_prev_work(list, req)) {
- if (list)
- atomic_inc(&list->cnt);
- INIT_WORK(&req->work, io_sq_wq_submit_work);
- queue_work(ctx->sqo_wq, &req->work);
- }
+ req->sqe = sqe_copy;
+ req->flags |= REQ_F_FREE_SQE;
- /*
- * Queued up for async execution, worker will release
- * submit reference when the iocb is actually submitted.
- */
- return 0;
+ if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
+ ret = io_grab_files(req);
+ if (ret)
+ goto err;
}
+
+ /*
+ * Queued up for async execution, worker will release
+ * submit reference when the iocb is actually submitted.
+ */
+ io_queue_async_work(req);
+ return;
}
+err:
/* drop submission reference */
io_put_req(req);
+ if (linked_timeout) {
+ if (!ret)
+ io_queue_linked_timeout(linked_timeout);
+ else
+ io_put_req(linked_timeout);
+ }
+
/* and drop final reference, if we failed */
if (ret) {
- io_cqring_add_event(ctx, req->user_data, ret);
+ io_cqring_add_event(req, ret);
if (req->flags & REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK;
io_put_req(req);
}
+}
- return ret;
+static void io_queue_sqe(struct io_kiocb *req)
+{
+ int ret;
+
+ if (unlikely(req->ctx->drain_next)) {
+ req->flags |= REQ_F_IO_DRAIN;
+ req->ctx->drain_next = false;
+ }
+ req->ctx->drain_next = (req->flags & REQ_F_DRAIN_LINK);
+
+ ret = io_req_defer(req);
+ if (ret) {
+ if (ret != -EIOCBQUEUED) {
+ io_cqring_add_event(req, ret);
+ if (req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
+ io_double_put_req(req);
+ }
+ } else
+ __io_queue_sqe(req);
}
+static inline void io_queue_link_head(struct io_kiocb *req)
+{
+ if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
+ io_cqring_add_event(req, -ECANCELED);
+ io_double_put_req(req);
+ } else
+ io_queue_sqe(req);
+}
+
+
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
-static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
- struct io_submit_state *state, struct io_kiocb **link)
+static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
+ struct io_kiocb **link)
{
- struct io_uring_sqe *sqe_copy;
- struct io_kiocb *req;
+ struct io_ring_ctx *ctx = req->ctx;
int ret;
+ req->user_data = req->sqe->user_data;
+
/* enforce forwards compatibility on users */
- if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
+ if (unlikely(req->sqe->flags & ~SQE_VALID_FLAGS)) {
ret = -EINVAL;
- goto err;
- }
-
- req = io_get_req(ctx, state);
- if (unlikely(!req)) {
- ret = -EAGAIN;
- goto err;
+ goto err_req;
}
- ret = io_req_set_file(ctx, s, state, req);
+ ret = io_req_set_file(state, req);
if (unlikely(ret)) {
err_req:
- io_free_req(req);
-err:
- io_cqring_add_event(ctx, s->sqe->user_data, ret);
- return;
- }
-
- ret = io_req_defer(ctx, req, s->sqe);
- if (ret) {
- if (ret != -EIOCBQUEUED)
- goto err_req;
+ io_cqring_add_event(req, ret);
+ io_double_put_req(req);
return;
}
@@ -2065,24 +3061,39 @@ err:
*/
if (*link) {
struct io_kiocb *prev = *link;
+ struct io_uring_sqe *sqe_copy;
- sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
+ if (req->sqe->flags & IOSQE_IO_DRAIN)
+ (*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN;
+
+ if (READ_ONCE(req->sqe->opcode) == IORING_OP_LINK_TIMEOUT) {
+ ret = io_timeout_setup(req);
+ /* common setup allows offset being set, we don't */
+ if (!ret && req->sqe->off)
+ ret = -EINVAL;
+ if (ret) {
+ prev->flags |= REQ_F_FAIL_LINK;
+ goto err_req;
+ }
+ }
+
+ sqe_copy = kmemdup(req->sqe, sizeof(*sqe_copy), GFP_KERNEL);
if (!sqe_copy) {
ret = -EAGAIN;
goto err_req;
}
- s->sqe = sqe_copy;
- memcpy(&req->submit, s, sizeof(*s));
+ req->sqe = sqe_copy;
+ req->flags |= REQ_F_FREE_SQE;
+ trace_io_uring_link(ctx, req, prev);
list_add_tail(&req->list, &prev->link_list);
- } else if (s->sqe->flags & IOSQE_IO_LINK) {
+ } else if (req->sqe->flags & IOSQE_IO_LINK) {
req->flags |= REQ_F_LINK;
- memcpy(&req->submit, s, sizeof(*s));
INIT_LIST_HEAD(&req->link_list);
*link = req;
} else {
- io_queue_sqe(ctx, req, s);
+ io_queue_sqe(req);
}
}
@@ -2112,15 +3123,15 @@ static void io_submit_state_start(struct io_submit_state *state,
static void io_commit_sqring(struct io_ring_ctx *ctx)
{
- struct io_sq_ring *ring = ctx->sq_ring;
+ struct io_rings *rings = ctx->rings;
- if (ctx->cached_sq_head != READ_ONCE(ring->r.head)) {
+ if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) {
/*
* Ensure any loads from the SQEs are done at this point,
* since once we write the new head, the application could
* write new data to them.
*/
- smp_store_release(&ring->r.head, ctx->cached_sq_head);
+ smp_store_release(&rings->sq.head, ctx->cached_sq_head);
}
}
@@ -2132,9 +3143,10 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
* used, it's important that those reads are done through READ_ONCE() to
* prevent a re-load down the line.
*/
-static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
+static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req)
{
- struct io_sq_ring *ring = ctx->sq_ring;
+ struct io_rings *rings = ctx->rings;
+ u32 *sq_array = ctx->sq_array;
unsigned head;
/*
@@ -2147,30 +3159,42 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
*/
head = ctx->cached_sq_head;
/* make sure SQ entry isn't read before tail */
- if (head == smp_load_acquire(&ring->r.tail))
+ if (unlikely(head == smp_load_acquire(&rings->sq.tail)))
return false;
- head = READ_ONCE(ring->array[head & ctx->sq_mask]);
- if (head < ctx->sq_entries) {
- s->index = head;
- s->sqe = &ctx->sq_sqes[head];
+ head = READ_ONCE(sq_array[head & ctx->sq_mask]);
+ if (likely(head < ctx->sq_entries)) {
+ /*
+ * All io need record the previous position, if LINK vs DARIN,
+ * it can be used to mark the position of the first IO in the
+ * link list.
+ */
+ req->sequence = ctx->cached_sq_head;
+ req->sqe = &ctx->sq_sqes[head];
ctx->cached_sq_head++;
return true;
}
/* drop invalid entries */
ctx->cached_sq_head++;
- ring->dropped++;
+ ctx->cached_sq_dropped++;
+ WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
return false;
}
-static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
- unsigned int nr, bool has_user, bool mm_fault)
+static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
+ struct file *ring_file, int ring_fd,
+ struct mm_struct **mm, bool async)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
- bool prev_was_link = false;
int i, submitted = 0;
+ bool mm_fault = false;
+
+ /* if we have a backlog and couldn't flush it all, return BUSY */
+ if (!list_empty(&ctx->cq_overflow_list) &&
+ !io_cqring_overflow_flush(ctx, false))
+ return -EBUSY;
if (nr > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, ctx, nr);
@@ -2178,68 +3202,99 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
}
for (i = 0; i < nr; i++) {
+ struct io_kiocb *req;
+ unsigned int sqe_flags;
+
+ req = io_get_req(ctx, statep);
+ if (unlikely(!req)) {
+ if (!submitted)
+ submitted = -EAGAIN;
+ break;
+ }
+ if (!io_get_sqring(ctx, req)) {
+ __io_free_req(req);
+ break;
+ }
+
+ if (io_sqe_needs_user(req->sqe) && !*mm) {
+ mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
+ if (!mm_fault) {
+ use_mm(ctx->sqo_mm);
+ *mm = ctx->sqo_mm;
+ }
+ }
+
+ sqe_flags = req->sqe->flags;
+
+ req->ring_file = ring_file;
+ req->ring_fd = ring_fd;
+ req->has_user = *mm != NULL;
+ req->in_async = async;
+ req->needs_fixed_file = async;
+ trace_io_uring_submit_sqe(ctx, req->sqe->user_data,
+ true, async);
+ io_submit_sqe(req, statep, &link);
+ submitted++;
+
/*
* If previous wasn't linked and we have a linked command,
* that's the end of the chain. Submit the previous link.
*/
- if (!prev_was_link && link) {
- io_queue_sqe(ctx, link, &link->submit);
+ if (!(sqe_flags & IOSQE_IO_LINK) && link) {
+ io_queue_link_head(link);
link = NULL;
}
- prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0;
-
- if (unlikely(mm_fault)) {
- io_cqring_add_event(ctx, sqes[i].sqe->user_data,
- -EFAULT);
- } else {
- sqes[i].has_user = has_user;
- sqes[i].needs_lock = true;
- sqes[i].needs_fixed_file = true;
- io_submit_sqe(ctx, &sqes[i], statep, &link);
- submitted++;
- }
}
if (link)
- io_queue_sqe(ctx, link, &link->submit);
+ io_queue_link_head(link);
if (statep)
io_submit_state_end(&state);
+ /* Commit SQ ring head once we've consumed and submitted all SQEs */
+ io_commit_sqring(ctx);
+
return submitted;
}
static int io_sq_thread(void *data)
{
- struct sqe_submit sqes[IO_IOPOLL_BATCH];
struct io_ring_ctx *ctx = data;
struct mm_struct *cur_mm = NULL;
+ const struct cred *old_cred;
mm_segment_t old_fs;
DEFINE_WAIT(wait);
unsigned inflight;
unsigned long timeout;
+ int ret;
- complete(&ctx->sqo_thread_started);
+ complete(&ctx->completions[1]);
old_fs = get_fs();
set_fs(USER_DS);
+ old_cred = override_creds(ctx->creds);
- timeout = inflight = 0;
+ ret = timeout = inflight = 0;
while (!kthread_should_park()) {
- bool all_fixed, mm_fault = false;
- int i;
+ unsigned int to_submit;
if (inflight) {
unsigned nr_events = 0;
if (ctx->flags & IORING_SETUP_IOPOLL) {
/*
- * We disallow the app entering submit/complete
- * with polling, but we still need to lock the
- * ring to prevent racing with polled issue
- * that got punted to a workqueue.
+ * inflight is the count of the maximum possible
+ * entries we submitted, but it can be smaller
+ * if we dropped some of them. If we don't have
+ * poll entries available, then we know that we
+ * have nothing left to poll for. Reset the
+ * inflight count to zero in that case.
*/
mutex_lock(&ctx->uring_lock);
- io_iopoll_check(ctx, &nr_events, 0);
+ if (!list_empty(&ctx->poll_list))
+ __io_iopoll_check(ctx, &nr_events, 0);
+ else
+ inflight = 0;
mutex_unlock(&ctx->uring_lock);
} else {
/*
@@ -2254,14 +3309,23 @@ static int io_sq_thread(void *data)
timeout = jiffies + ctx->sq_thread_idle;
}
- if (!io_get_sqring(ctx, &sqes[0])) {
+ to_submit = io_sqring_entries(ctx);
+
+ /*
+ * If submit got -EBUSY, flag us as needing the application
+ * to enter the kernel to reap and flush events.
+ */
+ if (!to_submit || ret == -EBUSY) {
/*
* We're polling. If we're within the defined idle
* period, then let us spin without work before going
- * to sleep.
+ * to sleep. The exception is if we got EBUSY doing
+ * more IO, we should wait for the application to
+ * reap events and wake us up.
*/
- if (inflight || !time_after(jiffies, timeout)) {
- cpu_relax();
+ if (inflight ||
+ (!time_after(jiffies, timeout) && ret != -EBUSY)) {
+ cond_resched();
continue;
}
@@ -2281,11 +3345,12 @@ static int io_sq_thread(void *data)
TASK_INTERRUPTIBLE);
/* Tell userspace we may need a wakeup call */
- ctx->sq_ring->flags |= IORING_SQ_NEED_WAKEUP;
+ ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
/* make sure to read SQ tail after writing flags */
smp_mb();
- if (!io_get_sqring(ctx, &sqes[0])) {
+ to_submit = io_sqring_entries(ctx);
+ if (!to_submit || ret == -EBUSY) {
if (kthread_should_park()) {
finish_wait(&ctx->sqo_wait, &wait);
break;
@@ -2295,39 +3360,18 @@ static int io_sq_thread(void *data)
schedule();
finish_wait(&ctx->sqo_wait, &wait);
- ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
+ ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
continue;
}
finish_wait(&ctx->sqo_wait, &wait);
- ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
- }
-
- i = 0;
- all_fixed = true;
- do {
- if (all_fixed && io_sqe_needs_user(sqes[i].sqe))
- all_fixed = false;
-
- i++;
- if (i == ARRAY_SIZE(sqes))
- break;
- } while (io_get_sqring(ctx, &sqes[i]));
-
- /* Unless all new commands are FIXED regions, grab mm */
- if (!all_fixed && !cur_mm) {
- mm_fault = !mmget_not_zero(ctx->sqo_mm);
- if (!mm_fault) {
- use_mm(ctx->sqo_mm);
- cur_mm = ctx->sqo_mm;
- }
+ ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
}
- inflight += io_submit_sqes(ctx, sqes, i, cur_mm != NULL,
- mm_fault);
-
- /* Commit SQ ring head once we've consumed all SQEs */
- io_commit_sqring(ctx);
+ to_submit = min(to_submit, ctx->sq_entries);
+ ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
+ if (ret > 0)
+ inflight += ret;
}
set_fs(old_fs);
@@ -2335,61 +3379,44 @@ static int io_sq_thread(void *data)
unuse_mm(cur_mm);
mmput(cur_mm);
}
+ revert_creds(old_cred);
kthread_parkme();
return 0;
}
-static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
-{
- struct io_submit_state state, *statep = NULL;
- struct io_kiocb *link = NULL;
- bool prev_was_link = false;
- int i, submit = 0;
-
- if (to_submit > IO_PLUG_THRESHOLD) {
- io_submit_state_start(&state, ctx, to_submit);
- statep = &state;
- }
-
- for (i = 0; i < to_submit; i++) {
- struct sqe_submit s;
-
- if (!io_get_sqring(ctx, &s))
- break;
-
- /*
- * If previous wasn't linked and we have a linked command,
- * that's the end of the chain. Submit the previous link.
- */
- if (!prev_was_link && link) {
- io_queue_sqe(ctx, link, &link->submit);
- link = NULL;
- }
- prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
-
- s.has_user = true;
- s.needs_lock = false;
- s.needs_fixed_file = false;
- submit++;
- io_submit_sqe(ctx, &s, statep, &link);
- }
- io_commit_sqring(ctx);
+struct io_wait_queue {
+ struct wait_queue_entry wq;
+ struct io_ring_ctx *ctx;
+ unsigned to_wait;
+ unsigned nr_timeouts;
+};
- if (link)
- io_queue_sqe(ctx, link, &link->submit);
- if (statep)
- io_submit_state_end(statep);
+static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
+{
+ struct io_ring_ctx *ctx = iowq->ctx;
- return submit;
+ /*
+ * Wake up if we have enough events, or if a timeout occured since we
+ * started waiting. For timeouts, we always want to return to userspace,
+ * regardless of event count.
+ */
+ return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
+ atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
}
-static unsigned io_cqring_events(struct io_cq_ring *ring)
+static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
+ int wake_flags, void *key)
{
- /* See comment at the top of this file */
- smp_rmb();
- return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
+ struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
+ wq);
+
+ /* use noflush == true, as we can't safely rely on locking context */
+ if (!io_should_wake(iowq, true))
+ return -1;
+
+ return autoremove_wake_function(curr, mode, wake_flags, key);
}
/*
@@ -2399,10 +3426,19 @@ static unsigned io_cqring_events(struct io_cq_ring *ring)
static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
const sigset_t __user *sig, size_t sigsz)
{
- struct io_cq_ring *ring = ctx->cq_ring;
- int ret;
+ struct io_wait_queue iowq = {
+ .wq = {
+ .private = current,
+ .func = io_wake_function,
+ .entry = LIST_HEAD_INIT(iowq.wq.entry),
+ },
+ .ctx = ctx,
+ .to_wait = min_events,
+ };
+ struct io_rings *rings = ctx->rings;
+ int ret = 0;
- if (io_cqring_events(ring) >= min_events)
+ if (io_cqring_events(ctx, false) >= min_events)
return 0;
if (sig) {
@@ -2418,12 +3454,24 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return ret;
}
- ret = wait_event_interruptible(ctx->wait, io_cqring_events(ring) >= min_events);
- restore_saved_sigmask_unless(ret == -ERESTARTSYS);
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
+ iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
+ trace_io_uring_cqring_wait(ctx, min_events);
+ do {
+ prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
+ TASK_INTERRUPTIBLE);
+ if (io_should_wake(&iowq, false))
+ break;
+ schedule();
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+ } while (1);
+ finish_wait(&ctx->wait, &iowq.wq);
+
+ restore_saved_sigmask_unless(ret == -EINTR);
- return READ_ONCE(ring->r.head) == READ_ONCE(ring->r.tail) ? ret : 0;
+ return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
}
static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
@@ -2439,19 +3487,29 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
#else
int i;
- for (i = 0; i < ctx->nr_user_files; i++)
- fput(ctx->user_files[i]);
+ for (i = 0; i < ctx->nr_user_files; i++) {
+ struct file *file;
+
+ file = io_file_from_index(ctx, i);
+ if (file)
+ fput(file);
+ }
#endif
}
static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
{
- if (!ctx->user_files)
+ unsigned nr_tables, i;
+
+ if (!ctx->file_table)
return -ENXIO;
__io_sqe_files_unregister(ctx);
- kfree(ctx->user_files);
- ctx->user_files = NULL;
+ nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
+ for (i = 0; i < nr_tables; i++)
+ kfree(ctx->file_table[i].files);
+ kfree(ctx->file_table);
+ ctx->file_table = NULL;
ctx->nr_user_files = 0;
return 0;
}
@@ -2459,7 +3517,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
static void io_sq_thread_stop(struct io_ring_ctx *ctx)
{
if (ctx->sqo_thread) {
- wait_for_completion(&ctx->sqo_thread_started);
+ wait_for_completion(&ctx->completions[1]);
/*
* The park is a bit of a work-around, without it we get
* warning spews on shutdown with SQPOLL set and affinity
@@ -2475,9 +3533,9 @@ static void io_finish_async(struct io_ring_ctx *ctx)
{
io_sq_thread_stop(ctx);
- if (ctx->sqo_wq) {
- destroy_workqueue(ctx->sqo_wq);
- ctx->sqo_wq = NULL;
+ if (ctx->io_wq) {
+ io_wq_destroy(ctx->io_wq);
+ ctx->io_wq = NULL;
}
}
@@ -2486,7 +3544,9 @@ static void io_destruct_skb(struct sk_buff *skb)
{
struct io_ring_ctx *ctx = skb->sk->sk_user_data;
- io_finish_async(ctx);
+ if (ctx->io_wq)
+ io_wq_flush(ctx->io_wq);
+
unix_destruct_scm(skb);
}
@@ -2500,7 +3560,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
struct sock *sk = ctx->ring_sock->sk;
struct scm_fp_list *fpl;
struct sk_buff *skb;
- int i;
+ int i, nr_files;
if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
unsigned long inflight = ctx->user->unix_inflight + nr;
@@ -2520,21 +3580,33 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
}
skb->sk = sk;
- skb->destructor = io_destruct_skb;
+ nr_files = 0;
fpl->user = get_uid(ctx->user);
for (i = 0; i < nr; i++) {
- fpl->fp[i] = get_file(ctx->user_files[i + offset]);
- unix_inflight(fpl->user, fpl->fp[i]);
+ struct file *file = io_file_from_index(ctx, i + offset);
+
+ if (!file)
+ continue;
+ fpl->fp[nr_files] = get_file(file);
+ unix_inflight(fpl->user, fpl->fp[nr_files]);
+ nr_files++;
}
- fpl->max = fpl->count = nr;
- UNIXCB(skb).fp = fpl;
- refcount_add(skb->truesize, &sk->sk_wmem_alloc);
- skb_queue_head(&sk->sk_receive_queue, skb);
+ if (nr_files) {
+ fpl->max = SCM_MAX_FD;
+ fpl->count = nr_files;
+ UNIXCB(skb).fp = fpl;
+ skb->destructor = io_destruct_skb;
+ refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+ skb_queue_head(&sk->sk_receive_queue, skb);
- for (i = 0; i < nr; i++)
- fput(fpl->fp[i]);
+ for (i = 0; i < nr_files; i++)
+ fput(fpl->fp[i]);
+ } else {
+ kfree_skb(skb);
+ kfree(fpl);
+ }
return 0;
}
@@ -2565,7 +3637,10 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx)
return 0;
while (total < ctx->nr_user_files) {
- fput(ctx->user_files[total]);
+ struct file *file = io_file_from_index(ctx, total);
+
+ if (file)
+ fput(file);
total++;
}
@@ -2578,33 +3653,79 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx)
}
#endif
+static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
+ unsigned nr_files)
+{
+ int i;
+
+ for (i = 0; i < nr_tables; i++) {
+ struct fixed_file_table *table = &ctx->file_table[i];
+ unsigned this_files;
+
+ this_files = min(nr_files, IORING_MAX_FILES_TABLE);
+ table->files = kcalloc(this_files, sizeof(struct file *),
+ GFP_KERNEL);
+ if (!table->files)
+ break;
+ nr_files -= this_files;
+ }
+
+ if (i == nr_tables)
+ return 0;
+
+ for (i = 0; i < nr_tables; i++) {
+ struct fixed_file_table *table = &ctx->file_table[i];
+ kfree(table->files);
+ }
+ return 1;
+}
+
static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args)
{
__s32 __user *fds = (__s32 __user *) arg;
+ unsigned nr_tables;
int fd, ret = 0;
unsigned i;
- if (ctx->user_files)
+ if (ctx->file_table)
return -EBUSY;
if (!nr_args)
return -EINVAL;
if (nr_args > IORING_MAX_FIXED_FILES)
return -EMFILE;
- ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL);
- if (!ctx->user_files)
+ nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
+ ctx->file_table = kcalloc(nr_tables, sizeof(struct fixed_file_table),
+ GFP_KERNEL);
+ if (!ctx->file_table)
return -ENOMEM;
- for (i = 0; i < nr_args; i++) {
+ if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
+ kfree(ctx->file_table);
+ ctx->file_table = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
+ struct fixed_file_table *table;
+ unsigned index;
+
ret = -EFAULT;
if (copy_from_user(&fd, &fds[i], sizeof(fd)))
break;
+ /* allow sparse sets */
+ if (fd == -1) {
+ ret = 0;
+ continue;
+ }
- ctx->user_files[i] = fget(fd);
+ table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
+ index = i & IORING_FILE_TABLE_MASK;
+ table->files[index] = fget(fd);
ret = -EBADF;
- if (!ctx->user_files[i])
+ if (!table->files[index])
break;
/*
* Don't allow io_uring instances to be registered. If UNIX
@@ -2613,20 +3734,26 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
* handle it just fine, but there's still no point in allowing
* a ring fd as it doesn't support regular read/write anyway.
*/
- if (ctx->user_files[i]->f_op == &io_uring_fops) {
- fput(ctx->user_files[i]);
+ if (table->files[index]->f_op == &io_uring_fops) {
+ fput(table->files[index]);
break;
}
- ctx->nr_user_files++;
ret = 0;
}
if (ret) {
- for (i = 0; i < ctx->nr_user_files; i++)
- fput(ctx->user_files[i]);
+ for (i = 0; i < ctx->nr_user_files; i++) {
+ struct file *file;
- kfree(ctx->user_files);
- ctx->user_files = NULL;
+ file = io_file_from_index(ctx, i);
+ if (file)
+ fput(file);
+ }
+ for (i = 0; i < nr_tables; i++)
+ kfree(ctx->file_table[i].files);
+
+ kfree(ctx->file_table);
+ ctx->file_table = NULL;
ctx->nr_user_files = 0;
return ret;
}
@@ -2638,9 +3765,202 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
return ret;
}
+static void io_sqe_file_unregister(struct io_ring_ctx *ctx, int index)
+{
+#if defined(CONFIG_UNIX)
+ struct file *file = io_file_from_index(ctx, index);
+ struct sock *sock = ctx->ring_sock->sk;
+ struct sk_buff_head list, *head = &sock->sk_receive_queue;
+ struct sk_buff *skb;
+ int i;
+
+ __skb_queue_head_init(&list);
+
+ /*
+ * Find the skb that holds this file in its SCM_RIGHTS. When found,
+ * remove this entry and rearrange the file array.
+ */
+ skb = skb_dequeue(head);
+ while (skb) {
+ struct scm_fp_list *fp;
+
+ fp = UNIXCB(skb).fp;
+ for (i = 0; i < fp->count; i++) {
+ int left;
+
+ if (fp->fp[i] != file)
+ continue;
+
+ unix_notinflight(fp->user, fp->fp[i]);
+ left = fp->count - 1 - i;
+ if (left) {
+ memmove(&fp->fp[i], &fp->fp[i + 1],
+ left * sizeof(struct file *));
+ }
+ fp->count--;
+ if (!fp->count) {
+ kfree_skb(skb);
+ skb = NULL;
+ } else {
+ __skb_queue_tail(&list, skb);
+ }
+ fput(file);
+ file = NULL;
+ break;
+ }
+
+ if (!file)
+ break;
+
+ __skb_queue_tail(&list, skb);
+
+ skb = skb_dequeue(head);
+ }
+
+ if (skb_peek(&list)) {
+ spin_lock_irq(&head->lock);
+ while ((skb = __skb_dequeue(&list)) != NULL)
+ __skb_queue_tail(head, skb);
+ spin_unlock_irq(&head->lock);
+ }
+#else
+ fput(io_file_from_index(ctx, index));
+#endif
+}
+
+static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
+ int index)
+{
+#if defined(CONFIG_UNIX)
+ struct sock *sock = ctx->ring_sock->sk;
+ struct sk_buff_head *head = &sock->sk_receive_queue;
+ struct sk_buff *skb;
+
+ /*
+ * See if we can merge this file into an existing skb SCM_RIGHTS
+ * file set. If there's no room, fall back to allocating a new skb
+ * and filling it in.
+ */
+ spin_lock_irq(&head->lock);
+ skb = skb_peek(head);
+ if (skb) {
+ struct scm_fp_list *fpl = UNIXCB(skb).fp;
+
+ if (fpl->count < SCM_MAX_FD) {
+ __skb_unlink(skb, head);
+ spin_unlock_irq(&head->lock);
+ fpl->fp[fpl->count] = get_file(file);
+ unix_inflight(fpl->user, fpl->fp[fpl->count]);
+ fpl->count++;
+ spin_lock_irq(&head->lock);
+ __skb_queue_head(head, skb);
+ } else {
+ skb = NULL;
+ }
+ }
+ spin_unlock_irq(&head->lock);
+
+ if (skb) {
+ fput(file);
+ return 0;
+ }
+
+ return __io_sqe_files_scm(ctx, 1, index);
+#else
+ return 0;
+#endif
+}
+
+static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
+ unsigned nr_args)
+{
+ struct io_uring_files_update up;
+ __s32 __user *fds;
+ int fd, i, err;
+ __u32 done;
+
+ if (!ctx->file_table)
+ return -ENXIO;
+ if (!nr_args)
+ return -EINVAL;
+ if (copy_from_user(&up, arg, sizeof(up)))
+ return -EFAULT;
+ if (check_add_overflow(up.offset, nr_args, &done))
+ return -EOVERFLOW;
+ if (done > ctx->nr_user_files)
+ return -EINVAL;
+
+ done = 0;
+ fds = (__s32 __user *) up.fds;
+ while (nr_args) {
+ struct fixed_file_table *table;
+ unsigned index;
+
+ err = 0;
+ if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
+ err = -EFAULT;
+ break;
+ }
+ i = array_index_nospec(up.offset, ctx->nr_user_files);
+ table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
+ index = i & IORING_FILE_TABLE_MASK;
+ if (table->files[index]) {
+ io_sqe_file_unregister(ctx, i);
+ table->files[index] = NULL;
+ }
+ if (fd != -1) {
+ struct file *file;
+
+ file = fget(fd);
+ if (!file) {
+ err = -EBADF;
+ break;
+ }
+ /*
+ * Don't allow io_uring instances to be registered. If
+ * UNIX isn't enabled, then this causes a reference
+ * cycle and this instance can never get freed. If UNIX
+ * is enabled we'll handle it just fine, but there's
+ * still no point in allowing a ring fd as it doesn't
+ * support regular read/write anyway.
+ */
+ if (file->f_op == &io_uring_fops) {
+ fput(file);
+ err = -EBADF;
+ break;
+ }
+ table->files[index] = file;
+ err = io_sqe_file_register(ctx, file, i);
+ if (err)
+ break;
+ }
+ nr_args--;
+ done++;
+ up.offset++;
+ }
+
+ return done ? done : err;
+}
+
+static void io_put_work(struct io_wq_work *work)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+ io_put_req(req);
+}
+
+static void io_get_work(struct io_wq_work *work)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+ refcount_inc(&req->refs);
+}
+
static int io_sq_offload_start(struct io_ring_ctx *ctx,
struct io_uring_params *p)
{
+ struct io_wq_data data;
+ unsigned concurrency;
int ret;
init_waitqueue_head(&ctx->sqo_wait);
@@ -2684,17 +4004,24 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
goto err;
}
- /* Do QD, or 2 * CPUS, whatever is smallest */
- ctx->sqo_wq = alloc_workqueue("io_ring-wq", WQ_UNBOUND | WQ_FREEZABLE,
- min(ctx->sq_entries - 1, 2 * num_online_cpus()));
- if (!ctx->sqo_wq) {
- ret = -ENOMEM;
+ data.mm = ctx->sqo_mm;
+ data.user = ctx->user;
+ data.creds = ctx->creds;
+ data.get_work = io_get_work;
+ data.put_work = io_put_work;
+
+ /* Do QD, or 4 * CPUS, whatever is smallest */
+ concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
+ ctx->io_wq = io_wq_create(concurrency, &data);
+ if (IS_ERR(ctx->io_wq)) {
+ ret = PTR_ERR(ctx->io_wq);
+ ctx->io_wq = NULL;
goto err;
}
return 0;
err:
- io_sq_thread_stop(ctx);
+ io_finish_async(ctx);
mmdrop(ctx->sqo_mm);
ctx->sqo_mm = NULL;
return ret;
@@ -2743,17 +4070,45 @@ static void *io_mem_alloc(size_t size)
return (void *) __get_free_pages(gfp_flags, get_order(size));
}
+static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
+ size_t *sq_offset)
+{
+ struct io_rings *rings;
+ size_t off, sq_array_size;
+
+ off = struct_size(rings, cqes, cq_entries);
+ if (off == SIZE_MAX)
+ return SIZE_MAX;
+
+#ifdef CONFIG_SMP
+ off = ALIGN(off, SMP_CACHE_BYTES);
+ if (off == 0)
+ return SIZE_MAX;
+#endif
+
+ sq_array_size = array_size(sizeof(u32), sq_entries);
+ if (sq_array_size == SIZE_MAX)
+ return SIZE_MAX;
+
+ if (check_add_overflow(off, sq_array_size, &off))
+ return SIZE_MAX;
+
+ if (sq_offset)
+ *sq_offset = off;
+
+ return off;
+}
+
static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
{
- struct io_sq_ring *sq_ring;
- struct io_cq_ring *cq_ring;
- size_t bytes;
+ size_t pages;
- bytes = struct_size(sq_ring, array, sq_entries);
- bytes += array_size(sizeof(struct io_uring_sqe), sq_entries);
- bytes += struct_size(cq_ring, cqes, cq_entries);
+ pages = (size_t)1 << get_order(
+ rings_size(sq_entries, cq_entries, NULL));
+ pages += (size_t)1 << get_order(
+ array_size(sizeof(struct io_uring_sqe), sq_entries));
- return (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
+ return pages;
}
static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
@@ -2767,7 +4122,7 @@ static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
for (j = 0; j < imu->nr_bvecs; j++)
- put_page(imu->bvec[j].bv_page);
+ put_user_page(imu->bvec[j].bv_page);
if (ctx->account_mem)
io_unaccount_mem(ctx->user, imu->nr_bvecs);
@@ -2911,10 +4266,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
* if we did partial map, or found file backed vmas,
* release any pages we did get
*/
- if (pret > 0) {
- for (j = 0; j < pret; j++)
- put_page(pages[j]);
- }
+ if (pret > 0)
+ put_user_pages(pages, pret);
if (ctx->account_mem)
io_unaccount_mem(ctx->user, nr_pages);
kvfree(imu->bvec);
@@ -3000,15 +4353,17 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
}
#endif
- io_mem_free(ctx->sq_ring);
+ io_mem_free(ctx->rings);
io_mem_free(ctx->sq_sqes);
- io_mem_free(ctx->cq_ring);
percpu_ref_exit(&ctx->refs);
if (ctx->account_mem)
io_unaccount_mem(ctx->user,
ring_pages(ctx->sq_entries, ctx->cq_entries));
free_uid(ctx->user);
+ put_cred(ctx->creds);
+ kfree(ctx->completions);
+ kmem_cache_free(req_cachep, ctx->fallback_req);
kfree(ctx);
}
@@ -3023,10 +4378,10 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
* io_commit_cqring
*/
smp_rmb();
- if (READ_ONCE(ctx->sq_ring->r.tail) - ctx->cached_sq_head !=
- ctx->sq_ring->ring_entries)
+ if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
+ ctx->rings->sq_ring_entries)
mask |= EPOLLOUT | EPOLLWRNORM;
- if (READ_ONCE(ctx->cq_ring->r.head) != ctx->cached_cq_tail)
+ if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
@@ -3045,9 +4400,17 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
percpu_ref_kill(&ctx->refs);
mutex_unlock(&ctx->uring_lock);
+ io_kill_timeouts(ctx);
io_poll_remove_all(ctx);
+
+ if (ctx->io_wq)
+ io_wq_cancel_all(ctx->io_wq);
+
io_iopoll_reap_events(ctx);
- wait_for_completion(&ctx->ctx_done);
+ /* if we failed setting up the ctx, we might not have any rings */
+ if (ctx->rings)
+ io_cqring_overflow_flush(ctx, true);
+ wait_for_completion(&ctx->completions[0]);
io_ring_ctx_free(ctx);
}
@@ -3060,37 +4423,123 @@ static int io_uring_release(struct inode *inode, struct file *file)
return 0;
}
-static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
+static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ struct files_struct *files)
+{
+ struct io_kiocb *req;
+ DEFINE_WAIT(wait);
+
+ while (!list_empty_careful(&ctx->inflight_list)) {
+ struct io_kiocb *cancel_req = NULL;
+
+ spin_lock_irq(&ctx->inflight_lock);
+ list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
+ if (req->work.files != files)
+ continue;
+ /* req is being completed, ignore */
+ if (!refcount_inc_not_zero(&req->refs))
+ continue;
+ cancel_req = req;
+ break;
+ }
+ if (cancel_req)
+ prepare_to_wait(&ctx->inflight_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&ctx->inflight_lock);
+
+ /* We need to keep going until we don't find a matching req */
+ if (!cancel_req)
+ break;
+
+ io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
+ io_put_req(cancel_req);
+ schedule();
+ }
+ finish_wait(&ctx->inflight_wait, &wait);
+}
+
+static int io_uring_flush(struct file *file, void *data)
{
- loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
- unsigned long sz = vma->vm_end - vma->vm_start;
struct io_ring_ctx *ctx = file->private_data;
- unsigned long pfn;
+
+ io_uring_cancel_files(ctx, data);
+ if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
+ io_cqring_overflow_flush(ctx, true);
+ io_wq_cancel_all(ctx->io_wq);
+ }
+ return 0;
+}
+
+static void *io_uring_validate_mmap_request(struct file *file,
+ loff_t pgoff, size_t sz)
+{
+ struct io_ring_ctx *ctx = file->private_data;
+ loff_t offset = pgoff << PAGE_SHIFT;
struct page *page;
void *ptr;
switch (offset) {
case IORING_OFF_SQ_RING:
- ptr = ctx->sq_ring;
+ case IORING_OFF_CQ_RING:
+ ptr = ctx->rings;
break;
case IORING_OFF_SQES:
ptr = ctx->sq_sqes;
break;
- case IORING_OFF_CQ_RING:
- ptr = ctx->cq_ring;
- break;
default:
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
page = virt_to_head_page(ptr);
- if (sz > (PAGE_SIZE << compound_order(page)))
- return -EINVAL;
+ if (sz > page_size(page))
+ return ERR_PTR(-EINVAL);
+
+ return ptr;
+}
+
+#ifdef CONFIG_MMU
+
+static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ size_t sz = vma->vm_end - vma->vm_start;
+ unsigned long pfn;
+ void *ptr;
+
+ ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
}
+#else /* !CONFIG_MMU */
+
+static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
+}
+
+static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
+{
+ return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
+}
+
+static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ void *ptr;
+
+ ptr = io_uring_validate_mmap_request(file, pgoff, len);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ return (unsigned long) ptr;
+}
+
+#endif /* !CONFIG_MMU */
+
SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
u32, min_complete, u32, flags, const sigset_t __user *, sig,
size_t, sigsz)
@@ -3121,19 +4570,22 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
* Just return the requested submit count, and wake the thread if
* we were asked to.
*/
+ ret = 0;
if (ctx->flags & IORING_SETUP_SQPOLL) {
+ if (!list_empty_careful(&ctx->cq_overflow_list))
+ io_cqring_overflow_flush(ctx, false);
if (flags & IORING_ENTER_SQ_WAKEUP)
wake_up(&ctx->sqo_wait);
submitted = to_submit;
- goto out_ctx;
- }
+ } else if (to_submit) {
+ struct mm_struct *cur_mm;
- ret = 0;
- if (to_submit) {
to_submit = min(to_submit, ctx->sq_entries);
-
mutex_lock(&ctx->uring_lock);
- submitted = io_ring_submit(ctx, to_submit);
+ /* already have mm, so io_submit_sqes() won't try to grab it */
+ cur_mm = ctx->sqo_mm;
+ submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
+ &cur_mm, false);
mutex_unlock(&ctx->uring_lock);
}
if (flags & IORING_ENTER_GETEVENTS) {
@@ -3142,16 +4594,13 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
min_complete = min(min_complete, ctx->cq_entries);
if (ctx->flags & IORING_SETUP_IOPOLL) {
- mutex_lock(&ctx->uring_lock);
ret = io_iopoll_check(ctx, &nr_events, min_complete);
- mutex_unlock(&ctx->uring_lock);
} else {
ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
}
}
-out_ctx:
- io_ring_drop_ctx_refs(ctx, 1);
+ percpu_ref_put(&ctx->refs);
out_fput:
fdput(f);
return submitted ? submitted : ret;
@@ -3159,7 +4608,12 @@ out_fput:
static const struct file_operations io_uring_fops = {
.release = io_uring_release,
+ .flush = io_uring_flush,
.mmap = io_uring_mmap,
+#ifndef CONFIG_MMU
+ .get_unmapped_area = io_uring_nommu_get_unmapped_area,
+ .mmap_capabilities = io_uring_nommu_mmap_capabilities,
+#endif
.poll = io_uring_poll,
.fasync = io_uring_fasync,
};
@@ -3167,37 +4621,42 @@ static const struct file_operations io_uring_fops = {
static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
struct io_uring_params *p)
{
- struct io_sq_ring *sq_ring;
- struct io_cq_ring *cq_ring;
- size_t size;
+ struct io_rings *rings;
+ size_t size, sq_array_offset;
- sq_ring = io_mem_alloc(struct_size(sq_ring, array, p->sq_entries));
- if (!sq_ring)
+ size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
+ if (size == SIZE_MAX)
+ return -EOVERFLOW;
+
+ rings = io_mem_alloc(size);
+ if (!rings)
return -ENOMEM;
- ctx->sq_ring = sq_ring;
- sq_ring->ring_mask = p->sq_entries - 1;
- sq_ring->ring_entries = p->sq_entries;
- ctx->sq_mask = sq_ring->ring_mask;
- ctx->sq_entries = sq_ring->ring_entries;
+ ctx->rings = rings;
+ ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
+ rings->sq_ring_mask = p->sq_entries - 1;
+ rings->cq_ring_mask = p->cq_entries - 1;
+ rings->sq_ring_entries = p->sq_entries;
+ rings->cq_ring_entries = p->cq_entries;
+ ctx->sq_mask = rings->sq_ring_mask;
+ ctx->cq_mask = rings->cq_ring_mask;
+ ctx->sq_entries = rings->sq_ring_entries;
+ ctx->cq_entries = rings->cq_ring_entries;
size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
- if (size == SIZE_MAX)
+ if (size == SIZE_MAX) {
+ io_mem_free(ctx->rings);
+ ctx->rings = NULL;
return -EOVERFLOW;
+ }
ctx->sq_sqes = io_mem_alloc(size);
- if (!ctx->sq_sqes)
- return -ENOMEM;
-
- cq_ring = io_mem_alloc(struct_size(cq_ring, cqes, p->cq_entries));
- if (!cq_ring)
+ if (!ctx->sq_sqes) {
+ io_mem_free(ctx->rings);
+ ctx->rings = NULL;
return -ENOMEM;
+ }
- ctx->cq_ring = cq_ring;
- cq_ring->ring_mask = p->cq_entries - 1;
- cq_ring->ring_entries = p->cq_entries;
- ctx->cq_mask = cq_ring->ring_mask;
- ctx->cq_entries = cq_ring->ring_entries;
return 0;
}
@@ -3259,10 +4718,23 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
* Use twice as many entries for the CQ ring. It's possible for the
* application to drive a higher depth than the size of the SQ ring,
* since the sqes are only used at submission time. This allows for
- * some flexibility in overcommitting a bit.
+ * some flexibility in overcommitting a bit. If the application has
+ * set IORING_SETUP_CQSIZE, it will have passed in the desired number
+ * of CQ ring entries manually.
*/
p->sq_entries = roundup_pow_of_two(entries);
- p->cq_entries = 2 * p->sq_entries;
+ if (p->flags & IORING_SETUP_CQSIZE) {
+ /*
+ * If IORING_SETUP_CQSIZE is set, we do the same roundup
+ * to a power-of-two, if it isn't already. We do NOT impose
+ * any cq vs sq ring sizing.
+ */
+ if (p->cq_entries < p->sq_entries || p->cq_entries > IORING_MAX_CQ_ENTRIES)
+ return -EINVAL;
+ p->cq_entries = roundup_pow_of_two(p->cq_entries);
+ } else {
+ p->cq_entries = 2 * p->sq_entries;
+ }
user = get_uid(current_user());
account_mem = !capable(CAP_IPC_LOCK);
@@ -3287,6 +4759,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
ctx->compat = in_compat_syscall();
ctx->account_mem = account_mem;
ctx->user = user;
+ ctx->creds = prepare_creds();
ret = io_allocate_scq_urings(ctx, p);
if (ret)
@@ -3296,26 +4769,33 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
if (ret)
goto err;
+ memset(&p->sq_off, 0, sizeof(p->sq_off));
+ p->sq_off.head = offsetof(struct io_rings, sq.head);
+ p->sq_off.tail = offsetof(struct io_rings, sq.tail);
+ p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
+ p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
+ p->sq_off.flags = offsetof(struct io_rings, sq_flags);
+ p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
+ p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
+
+ memset(&p->cq_off, 0, sizeof(p->cq_off));
+ p->cq_off.head = offsetof(struct io_rings, cq.head);
+ p->cq_off.tail = offsetof(struct io_rings, cq.tail);
+ p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
+ p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
+ p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
+ p->cq_off.cqes = offsetof(struct io_rings, cqes);
+
+ /*
+ * Install ring fd as the very last thing, so we don't risk someone
+ * having closed it before we finish setup
+ */
ret = io_uring_get_fd(ctx);
if (ret < 0)
goto err;
- memset(&p->sq_off, 0, sizeof(p->sq_off));
- p->sq_off.head = offsetof(struct io_sq_ring, r.head);
- p->sq_off.tail = offsetof(struct io_sq_ring, r.tail);
- p->sq_off.ring_mask = offsetof(struct io_sq_ring, ring_mask);
- p->sq_off.ring_entries = offsetof(struct io_sq_ring, ring_entries);
- p->sq_off.flags = offsetof(struct io_sq_ring, flags);
- p->sq_off.dropped = offsetof(struct io_sq_ring, dropped);
- p->sq_off.array = offsetof(struct io_sq_ring, array);
-
- memset(&p->cq_off, 0, sizeof(p->cq_off));
- p->cq_off.head = offsetof(struct io_cq_ring, r.head);
- p->cq_off.tail = offsetof(struct io_cq_ring, r.tail);
- p->cq_off.ring_mask = offsetof(struct io_cq_ring, ring_mask);
- p->cq_off.ring_entries = offsetof(struct io_cq_ring, ring_entries);
- p->cq_off.overflow = offsetof(struct io_cq_ring, overflow);
- p->cq_off.cqes = offsetof(struct io_cq_ring, cqes);
+ p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP;
+ trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
return ret;
err:
io_ring_ctx_wait_and_kill(ctx);
@@ -3341,7 +4821,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
}
if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
- IORING_SETUP_SQ_AFF))
+ IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE))
return -EINVAL;
ret = io_uring_create(entries, &p);
@@ -3385,7 +4865,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
* no new references will come in after we've killed the percpu ref.
*/
mutex_unlock(&ctx->uring_lock);
- wait_for_completion(&ctx->ctx_done);
+ wait_for_completion(&ctx->completions[0]);
mutex_lock(&ctx->uring_lock);
switch (opcode) {
@@ -3407,6 +4887,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break;
ret = io_sqe_files_unregister(ctx);
break;
+ case IORING_REGISTER_FILES_UPDATE:
+ ret = io_sqe_files_update(ctx, arg, nr_args);
+ break;
case IORING_REGISTER_EVENTFD:
ret = -EINVAL;
if (nr_args != 1)
@@ -3425,7 +4908,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
}
/* bring the ctx back to life */
- reinit_completion(&ctx->ctx_done);
+ reinit_completion(&ctx->completions[0]);
percpu_ref_reinit(&ctx->refs);
return ret;
}
@@ -3450,6 +4933,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
mutex_lock(&ctx->uring_lock);
ret = __io_uring_register(ctx, opcode, arg, nr_args);
mutex_unlock(&ctx->uring_lock);
+ trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
+ ctx->cq_ev_fd != NULL, ret);
out_fput:
fdput(f);
return ret;
diff --git a/fs/ioctl.c b/fs/ioctl.c
index fef3a6bf7c78..2f5e4e5b97e1 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -8,6 +8,7 @@
#include <linux/syscalls.h>
#include <linux/mm.h>
#include <linux/capability.h>
+#include <linux/compat.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/security.h>
@@ -174,10 +175,9 @@ static int fiemap_check_ranges(struct super_block *sb,
return 0;
}
-static int ioctl_fiemap(struct file *filp, unsigned long arg)
+static int ioctl_fiemap(struct file *filp, struct fiemap __user *ufiemap)
{
struct fiemap fiemap;
- struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
struct fiemap_extent_info fieinfo = { 0, };
struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
@@ -244,7 +244,8 @@ fdput:
return ret;
}
-static long ioctl_file_clone_range(struct file *file, void __user *argp)
+static long ioctl_file_clone_range(struct file *file,
+ struct file_clone_range __user *argp)
{
struct file_clone_range args;
@@ -466,7 +467,7 @@ EXPORT_SYMBOL(generic_block_fiemap);
* Only the l_start, l_len and l_whence fields of the 'struct space_resv'
* are used here, rest are ignored.
*/
-int ioctl_preallocate(struct file *filp, void __user *argp)
+int ioctl_preallocate(struct file *filp, int mode, void __user *argp)
{
struct inode *inode = file_inode(filp);
struct space_resv sr;
@@ -487,9 +488,39 @@ int ioctl_preallocate(struct file *filp, void __user *argp)
return -EINVAL;
}
- return vfs_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
+ return vfs_fallocate(filp, mode | FALLOC_FL_KEEP_SIZE, sr.l_start,
+ sr.l_len);
}
+/* on ia32 l_start is on a 32-bit boundary */
+#if defined CONFIG_COMPAT && defined(CONFIG_X86_64)
+/* just account for different alignment */
+int compat_ioctl_preallocate(struct file *file, int mode,
+ struct space_resv_32 __user *argp)
+{
+ struct inode *inode = file_inode(file);
+ struct space_resv_32 sr;
+
+ if (copy_from_user(&sr, argp, sizeof(sr)))
+ return -EFAULT;
+
+ switch (sr.l_whence) {
+ case SEEK_SET:
+ break;
+ case SEEK_CUR:
+ sr.l_start += file->f_pos;
+ break;
+ case SEEK_END:
+ sr.l_start += i_size_read(inode);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return vfs_fallocate(file, mode | FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
+}
+#endif
+
static int file_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -503,7 +534,12 @@ static int file_ioctl(struct file *filp, unsigned int cmd,
return put_user(i_size_read(inode) - filp->f_pos, p);
case FS_IOC_RESVSP:
case FS_IOC_RESVSP64:
- return ioctl_preallocate(filp, p);
+ return ioctl_preallocate(filp, 0, p);
+ case FS_IOC_UNRESVSP:
+ case FS_IOC_UNRESVSP64:
+ return ioctl_preallocate(filp, FALLOC_FL_PUNCH_HOLE, p);
+ case FS_IOC_ZERO_RANGE:
+ return ioctl_preallocate(filp, FALLOC_FL_ZERO_RANGE, p);
}
return vfs_ioctl(filp, cmd, arg);
@@ -584,9 +620,9 @@ static int ioctl_fsthaw(struct file *filp)
return thaw_super(sb);
}
-static int ioctl_file_dedupe_range(struct file *file, void __user *arg)
+static int ioctl_file_dedupe_range(struct file *file,
+ struct file_dedupe_range __user *argp)
{
- struct file_dedupe_range __user *argp = arg;
struct file_dedupe_range *same = NULL;
int ret;
unsigned long size;
@@ -635,7 +671,7 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
unsigned long arg)
{
int error = 0;
- int __user *argp = (int __user *)arg;
+ void __user *argp = (void __user *)arg;
struct inode *inode = file_inode(filp);
switch (cmd) {
@@ -674,13 +710,13 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
break;
case FS_IOC_FIEMAP:
- return ioctl_fiemap(filp, arg);
+ return ioctl_fiemap(filp, argp);
case FIGETBSZ:
/* anon_bdev filesystems may not have a block size */
if (!inode->i_sb->s_blocksize)
return -EINVAL;
- return put_user(inode->i_sb->s_blocksize, argp);
+ return put_user(inode->i_sb->s_blocksize, (int __user *)argp);
case FICLONE:
return ioctl_file_clone(filp, arg, 0, 0, 0);
@@ -719,3 +755,37 @@ SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
{
return ksys_ioctl(fd, cmd, arg);
}
+
+#ifdef CONFIG_COMPAT
+/**
+ * compat_ptr_ioctl - generic implementation of .compat_ioctl file operation
+ *
+ * This is not normally called as a function, but instead set in struct
+ * file_operations as
+ *
+ * .compat_ioctl = compat_ptr_ioctl,
+ *
+ * On most architectures, the compat_ptr_ioctl() just passes all arguments
+ * to the corresponding ->ioctl handler. The exception is arch/s390, where
+ * compat_ptr() clears the top bit of a 32-bit pointer value, so user space
+ * pointers to the second 2GB alias the first 2GB, as is the case for
+ * native 32-bit s390 user space.
+ *
+ * The compat_ptr_ioctl() function must therefore be used only with ioctl
+ * functions that either ignore the argument or pass a pointer to a
+ * compatible data type.
+ *
+ * If any ioctl command handled by fops->unlocked_ioctl passes a plain
+ * integer instead of a pointer, or any of the passed data types
+ * is incompatible between 32-bit and 64-bit architectures, a proper
+ * handler is required instead of compat_ptr_ioctl.
+ */
+long compat_ptr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ if (!file->f_op->unlocked_ioctl)
+ return -ENOIOCTLCMD;
+
+ return file->f_op->unlocked_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+EXPORT_SYMBOL(compat_ptr_ioctl);
+#endif
diff --git a/fs/iomap/Makefile b/fs/iomap/Makefile
index 2d165388d952..eef2722d93a1 100644
--- a/fs/iomap/Makefile
+++ b/fs/iomap/Makefile
@@ -1,15 +1,17 @@
-# SPDX-License-Identifier: GPL-2.0-or-newer
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (c) 2019 Oracle.
# All Rights Reserved.
#
-obj-$(CONFIG_FS_IOMAP) += iomap.o
-iomap-y += \
- apply.o \
- buffered-io.o \
- direct-io.o \
- fiemap.o \
- seek.o
+ccflags-y += -I $(srctree)/$(src) # needed for trace events
+
+obj-$(CONFIG_FS_IOMAP) += iomap.o
+iomap-y += trace.o \
+ apply.o \
+ buffered-io.o \
+ direct-io.o \
+ fiemap.o \
+ seek.o
iomap-$(CONFIG_SWAP) += swapfile.o
diff --git a/fs/iomap/apply.c b/fs/iomap/apply.c
index 54c02aecf3cd..76925b40b5fd 100644
--- a/fs/iomap/apply.c
+++ b/fs/iomap/apply.c
@@ -7,6 +7,7 @@
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/iomap.h>
+#include "trace.h"
/*
* Execute a iomap write on a segment of the mapping that spans a
@@ -23,8 +24,12 @@ loff_t
iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
const struct iomap_ops *ops, void *data, iomap_actor_t actor)
{
- struct iomap iomap = { 0 };
+ struct iomap iomap = { .type = IOMAP_HOLE };
+ struct iomap srcmap = { .type = IOMAP_HOLE };
loff_t written = 0, ret;
+ u64 end;
+
+ trace_iomap_apply(inode, pos, length, flags, ops, actor, _RET_IP_);
/*
* Need to map a range from start position for length bytes. This can
@@ -38,7 +43,7 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
* expose transient stale data. If the reserve fails, we can safely
* back out at this point as there is nothing to undo.
*/
- ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
+ ret = ops->iomap_begin(inode, pos, length, flags, &iomap, &srcmap);
if (ret)
return ret;
if (WARN_ON(iomap.offset > pos))
@@ -46,19 +51,34 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
if (WARN_ON(iomap.length == 0))
return -EIO;
+ trace_iomap_apply_dstmap(inode, &iomap);
+ if (srcmap.type != IOMAP_HOLE)
+ trace_iomap_apply_srcmap(inode, &srcmap);
+
/*
* Cut down the length to the one actually provided by the filesystem,
* as it might not be able to give us the whole size that we requested.
*/
- if (iomap.offset + iomap.length < pos + length)
- length = iomap.offset + iomap.length - pos;
+ end = iomap.offset + iomap.length;
+ if (srcmap.type != IOMAP_HOLE)
+ end = min(end, srcmap.offset + srcmap.length);
+ if (pos + length > end)
+ length = end - pos;
/*
- * Now that we have guaranteed that the space allocation will succeed.
+ * Now that we have guaranteed that the space allocation will succeed,
* we can do the copy-in page by page without having to worry about
* failures exposing transient data.
+ *
+ * To support COW operations, we read in data for partially blocks from
+ * the srcmap if the file system filled it in. In that case we the
+ * length needs to be limited to the earlier of the ends of the iomaps.
+ * If the file system did not provide a srcmap we pass in the normal
+ * iomap into the actors so that they don't need to have special
+ * handling for the two cases.
*/
- written = actor(inode, pos, length, data, &iomap);
+ written = actor(inode, pos, length, data, &iomap,
+ srcmap.type != IOMAP_HOLE ? &srcmap : &iomap);
/*
* Now the data has been copied, commit the range we've copied. This
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index e25901ae3ff4..d33c7bc5ee92 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Red Hat, Inc.
- * Copyright (c) 2016-2018 Christoph Hellwig.
+ * Copyright (C) 2016-2019 Christoph Hellwig.
*/
#include <linux/module.h>
#include <linux/compiler.h>
@@ -12,13 +12,34 @@
#include <linux/buffer_head.h>
#include <linux/dax.h>
#include <linux/writeback.h>
+#include <linux/list_sort.h>
#include <linux/swap.h>
#include <linux/bio.h>
#include <linux/sched/signal.h>
#include <linux/migrate.h>
+#include "trace.h"
#include "../internal.h"
+/*
+ * Structure allocated for each page when block size < PAGE_SIZE to track
+ * sub-page uptodate status and I/O completions.
+ */
+struct iomap_page {
+ atomic_t read_count;
+ atomic_t write_count;
+ DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
+};
+
+static inline struct iomap_page *to_iomap_page(struct page *page)
+{
+ if (page_has_private(page))
+ return (struct iomap_page *)page_private(page);
+ return NULL;
+}
+
+static struct bio_set iomap_ioend_bioset;
+
static struct iomap_page *
iomap_page_create(struct inode *inode, struct page *page)
{
@@ -203,9 +224,17 @@ iomap_read_inline_data(struct inode *inode, struct page *page,
SetPageUptodate(page);
}
+static inline bool iomap_block_needs_zeroing(struct inode *inode,
+ struct iomap *iomap, loff_t pos)
+{
+ return iomap->type != IOMAP_MAPPED ||
+ (iomap->flags & IOMAP_F_NEW) ||
+ pos >= i_size_read(inode);
+}
+
static loff_t
iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
+ struct iomap *iomap, struct iomap *srcmap)
{
struct iomap_readpage_ctx *ctx = data;
struct page *page = ctx->cur_page;
@@ -226,7 +255,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (plen == 0)
goto done;
- if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
+ if (iomap_block_needs_zeroing(inode, iomap, pos)) {
zero_user(page, poff, plen);
iomap_set_range_uptodate(page, poff, plen);
goto done;
@@ -293,6 +322,8 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
unsigned poff;
loff_t ret;
+ trace_iomap_readpage(page->mapping->host, 1);
+
for (poff = 0; poff < PAGE_SIZE; poff += ret) {
ret = iomap_apply(inode, page_offset(page) + poff,
PAGE_SIZE - poff, 0, ops, &ctx,
@@ -351,7 +382,7 @@ iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
static loff_t
iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap)
+ void *data, struct iomap *iomap, struct iomap *srcmap)
{
struct iomap_readpage_ctx *ctx = data;
loff_t done, ret;
@@ -371,7 +402,7 @@ iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
ctx->cur_page_in_bio = false;
}
ret = iomap_readpage_actor(inode, pos + done, length - done,
- ctx, iomap);
+ ctx, iomap, srcmap);
}
return done;
@@ -389,6 +420,8 @@ iomap_readpages(struct address_space *mapping, struct list_head *pages,
loff_t last = page_offset(list_entry(pages->next, struct page, lru));
loff_t length = last - pos + PAGE_SIZE, ret = 0;
+ trace_iomap_readpages(mapping->host, nr_pages);
+
while (length > 0) {
ret = iomap_apply(mapping->host, pos, length, 0, ops,
&ctx, iomap_readpages_actor);
@@ -455,6 +488,8 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
int
iomap_releasepage(struct page *page, gfp_t gfp_mask)
{
+ trace_iomap_releasepage(page->mapping->host, page, 0, 0);
+
/*
* mm accommodates an old ext3 case where clean pages might not have had
* the dirty bit cleared. Thus, it can send actual dirty pages to
@@ -470,6 +505,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage);
void
iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
{
+ trace_iomap_invalidatepage(page->mapping->host, page, offset, len);
+
/*
* If we are invalidating the entire page, clear the dirty state from it
* and release it to avoid unnecessary buildup of the LRU.
@@ -511,6 +548,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
EXPORT_SYMBOL_GPL(iomap_migrate_page);
#endif /* CONFIG_MIGRATION */
+enum {
+ IOMAP_WRITE_F_UNSHARE = (1 << 0),
+};
+
static void
iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
{
@@ -525,19 +566,12 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
}
static int
-iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
- unsigned poff, unsigned plen, unsigned from, unsigned to,
- struct iomap *iomap)
+iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
+ unsigned plen, struct iomap *iomap)
{
struct bio_vec bvec;
struct bio bio;
- if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
- zero_user_segments(page, poff, from, to, poff + plen);
- iomap_set_range_uptodate(page, poff, plen);
- return 0;
- }
-
bio_init(&bio, &bvec, 1);
bio.bi_opf = REQ_OP_READ;
bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
@@ -547,15 +581,15 @@ iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
}
static int
-__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
- struct page *page, struct iomap *iomap)
+__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
+ struct page *page, struct iomap *srcmap)
{
struct iomap_page *iop = iomap_page_create(inode, page);
loff_t block_size = i_blocksize(inode);
loff_t block_start = pos & ~(block_size - 1);
loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
unsigned from = offset_in_page(pos), to = from + len, poff, plen;
- int status = 0;
+ int status;
if (PageUptodate(page))
return 0;
@@ -566,29 +600,39 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
if (plen == 0)
break;
- if ((from > poff && from < poff + plen) ||
- (to > poff && to < poff + plen)) {
- status = iomap_read_page_sync(inode, block_start, page,
- poff, plen, from, to, iomap);
- if (status)
- break;
+ if (!(flags & IOMAP_WRITE_F_UNSHARE) &&
+ (from <= poff || from >= poff + plen) &&
+ (to <= poff || to >= poff + plen))
+ continue;
+
+ if (iomap_block_needs_zeroing(inode, srcmap, block_start)) {
+ if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
+ return -EIO;
+ zero_user_segments(page, poff, from, to, poff + plen);
+ iomap_set_range_uptodate(page, poff, plen);
+ continue;
}
+ status = iomap_read_page_sync(block_start, page, poff, plen,
+ srcmap);
+ if (status)
+ return status;
} while ((block_start += plen) < block_end);
- return status;
+ return 0;
}
static int
iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, struct iomap *iomap)
+ struct page **pagep, struct iomap *iomap, struct iomap *srcmap)
{
const struct iomap_page_ops *page_ops = iomap->page_ops;
- pgoff_t index = pos >> PAGE_SHIFT;
struct page *page;
int status = 0;
BUG_ON(pos + len > iomap->offset + iomap->length);
+ if (srcmap != iomap)
+ BUG_ON(pos + len > srcmap->offset + srcmap->length);
if (fatal_signal_pending(current))
return -EINTR;
@@ -599,18 +643,20 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
return status;
}
- page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
+ page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT,
+ AOP_FLAG_NOFS);
if (!page) {
status = -ENOMEM;
goto out_no_page;
}
- if (iomap->type == IOMAP_INLINE)
- iomap_read_inline_data(inode, page, iomap);
+ if (srcmap->type == IOMAP_INLINE)
+ iomap_read_inline_data(inode, page, srcmap);
else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
- status = __block_write_begin_int(page, pos, len, NULL, iomap);
+ status = __block_write_begin_int(page, pos, len, NULL, srcmap);
else
- status = __iomap_write_begin(inode, pos, len, page, iomap);
+ status = __iomap_write_begin(inode, pos, len, flags, page,
+ srcmap);
if (unlikely(status))
goto out_unlock;
@@ -656,7 +702,7 @@ EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
static int
__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
- unsigned copied, struct page *page, struct iomap *iomap)
+ unsigned copied, struct page *page)
{
flush_dcache_page(page);
@@ -696,20 +742,20 @@ iomap_write_end_inline(struct inode *inode, struct page *page,
}
static int
-iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
- unsigned copied, struct page *page, struct iomap *iomap)
+iomap_write_end(struct inode *inode, loff_t pos, unsigned len, unsigned copied,
+ struct page *page, struct iomap *iomap, struct iomap *srcmap)
{
const struct iomap_page_ops *page_ops = iomap->page_ops;
loff_t old_size = inode->i_size;
int ret;
- if (iomap->type == IOMAP_INLINE) {
+ if (srcmap->type == IOMAP_INLINE) {
ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
- } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
+ } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
page, NULL);
} else {
- ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
+ ret = __iomap_write_end(inode, pos, len, copied, page);
}
/*
@@ -736,12 +782,11 @@ iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
static loff_t
iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
+ struct iomap *iomap, struct iomap *srcmap)
{
struct iov_iter *i = data;
long status = 0;
ssize_t written = 0;
- unsigned int flags = AOP_FLAG_NOFS;
do {
struct page *page;
@@ -771,8 +816,8 @@ again:
break;
}
- status = iomap_write_begin(inode, pos, bytes, flags, &page,
- iomap);
+ status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
+ srcmap);
if (unlikely(status))
break;
@@ -783,8 +828,8 @@ again:
flush_dcache_page(page);
- status = iomap_write_end(inode, pos, bytes, copied, page,
- iomap);
+ status = iomap_write_end(inode, pos, bytes, copied, page, iomap,
+ srcmap);
if (unlikely(status < 0))
break;
copied = status;
@@ -835,50 +880,32 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
}
EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
-static struct page *
-__iomap_read_page(struct inode *inode, loff_t offset)
-{
- struct address_space *mapping = inode->i_mapping;
- struct page *page;
-
- page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
- if (IS_ERR(page))
- return page;
- if (!PageUptodate(page)) {
- put_page(page);
- return ERR_PTR(-EIO);
- }
- return page;
-}
-
static loff_t
-iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
+iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ struct iomap *iomap, struct iomap *srcmap)
{
long status = 0;
ssize_t written = 0;
- do {
- struct page *page, *rpage;
- unsigned long offset; /* Offset into pagecache page */
- unsigned long bytes; /* Bytes to write to page */
-
- offset = offset_in_page(pos);
- bytes = min_t(loff_t, PAGE_SIZE - offset, length);
+ /* don't bother with blocks that are not shared to start with */
+ if (!(iomap->flags & IOMAP_F_SHARED))
+ return length;
+ /* don't bother with holes or unwritten extents */
+ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
+ return length;
- rpage = __iomap_read_page(inode, pos);
- if (IS_ERR(rpage))
- return PTR_ERR(rpage);
+ do {
+ unsigned long offset = offset_in_page(pos);
+ unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
+ struct page *page;
status = iomap_write_begin(inode, pos, bytes,
- AOP_FLAG_NOFS, &page, iomap);
- put_page(rpage);
+ IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap);
if (unlikely(status))
return status;
- WARN_ON_ONCE(!PageUptodate(page));
-
- status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
+ status = iomap_write_end(inode, pos, bytes, bytes, page, iomap,
+ srcmap);
if (unlikely(status <= 0)) {
if (WARN_ON_ONCE(status == 0))
return -EIO;
@@ -898,14 +925,14 @@ iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
}
int
-iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
+iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops)
{
loff_t ret;
while (len) {
ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
- iomap_dirty_actor);
+ iomap_unshare_actor);
if (ret <= 0)
return ret;
pos += ret;
@@ -914,23 +941,22 @@ iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
return 0;
}
-EXPORT_SYMBOL_GPL(iomap_file_dirty);
+EXPORT_SYMBOL_GPL(iomap_file_unshare);
static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
- unsigned bytes, struct iomap *iomap)
+ unsigned bytes, struct iomap *iomap, struct iomap *srcmap)
{
struct page *page;
int status;
- status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
- iomap);
+ status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
if (status)
return status;
zero_user(page, offset, bytes);
mark_page_accessed(page);
- return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
+ return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
}
static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
@@ -942,14 +968,14 @@ static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
static loff_t
iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
- void *data, struct iomap *iomap)
+ void *data, struct iomap *iomap, struct iomap *srcmap)
{
bool *did_zero = data;
loff_t written = 0;
int status;
/* already zeroed? we're done. */
- if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
+ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
return count;
do {
@@ -961,7 +987,8 @@ iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
if (IS_DAX(inode))
status = iomap_dax_zero(pos, offset, bytes, iomap);
else
- status = iomap_zero(inode, pos, offset, bytes, iomap);
+ status = iomap_zero(inode, pos, offset, bytes, iomap,
+ srcmap);
if (status < 0)
return status;
@@ -1011,7 +1038,7 @@ EXPORT_SYMBOL_GPL(iomap_truncate_page);
static loff_t
iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap)
+ void *data, struct iomap *iomap, struct iomap *srcmap)
{
struct page *page = data;
int ret;
@@ -1040,20 +1067,19 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
lock_page(page);
size = i_size_read(inode);
- if ((page->mapping != inode->i_mapping) ||
- (page_offset(page) > size)) {
+ offset = page_offset(page);
+ if (page->mapping != inode->i_mapping || offset > size) {
/* We overload EFAULT to mean page got truncated */
ret = -EFAULT;
goto out_unlock;
}
/* page is wholly or partially inside EOF */
- if (((page->index + 1) << PAGE_SHIFT) > size)
+ if (offset > size - PAGE_SIZE)
length = offset_in_page(size);
else
length = PAGE_SIZE;
- offset = page_offset(page);
while (length > 0) {
ret = iomap_apply(inode, offset, length,
IOMAP_WRITE | IOMAP_FAULT, ops, page,
@@ -1071,3 +1097,551 @@ out_unlock:
return block_page_mkwrite_return(ret);
}
EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
+
+static void
+iomap_finish_page_writeback(struct inode *inode, struct page *page,
+ int error)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+
+ if (error) {
+ SetPageError(page);
+ mapping_set_error(inode->i_mapping, -EIO);
+ }
+
+ WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
+ WARN_ON_ONCE(iop && atomic_read(&iop->write_count) <= 0);
+
+ if (!iop || atomic_dec_and_test(&iop->write_count))
+ end_page_writeback(page);
+}
+
+/*
+ * We're now finished for good with this ioend structure. Update the page
+ * state, release holds on bios, and finally free up memory. Do not use the
+ * ioend after this.
+ */
+static void
+iomap_finish_ioend(struct iomap_ioend *ioend, int error)
+{
+ struct inode *inode = ioend->io_inode;
+ struct bio *bio = &ioend->io_inline_bio;
+ struct bio *last = ioend->io_bio, *next;
+ u64 start = bio->bi_iter.bi_sector;
+ bool quiet = bio_flagged(bio, BIO_QUIET);
+
+ for (bio = &ioend->io_inline_bio; bio; bio = next) {
+ struct bio_vec *bv;
+ struct bvec_iter_all iter_all;
+
+ /*
+ * For the last bio, bi_private points to the ioend, so we
+ * need to explicitly end the iteration here.
+ */
+ if (bio == last)
+ next = NULL;
+ else
+ next = bio->bi_private;
+
+ /* walk each page on bio, ending page IO on them */
+ bio_for_each_segment_all(bv, bio, iter_all)
+ iomap_finish_page_writeback(inode, bv->bv_page, error);
+ bio_put(bio);
+ }
+
+ if (unlikely(error && !quiet)) {
+ printk_ratelimited(KERN_ERR
+"%s: writeback error on inode %lu, offset %lld, sector %llu",
+ inode->i_sb->s_id, inode->i_ino, ioend->io_offset,
+ start);
+ }
+}
+
+void
+iomap_finish_ioends(struct iomap_ioend *ioend, int error)
+{
+ struct list_head tmp;
+
+ list_replace_init(&ioend->io_list, &tmp);
+ iomap_finish_ioend(ioend, error);
+
+ while (!list_empty(&tmp)) {
+ ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
+ list_del_init(&ioend->io_list);
+ iomap_finish_ioend(ioend, error);
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_finish_ioends);
+
+/*
+ * We can merge two adjacent ioends if they have the same set of work to do.
+ */
+static bool
+iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
+{
+ if (ioend->io_bio->bi_status != next->io_bio->bi_status)
+ return false;
+ if ((ioend->io_flags & IOMAP_F_SHARED) ^
+ (next->io_flags & IOMAP_F_SHARED))
+ return false;
+ if ((ioend->io_type == IOMAP_UNWRITTEN) ^
+ (next->io_type == IOMAP_UNWRITTEN))
+ return false;
+ if (ioend->io_offset + ioend->io_size != next->io_offset)
+ return false;
+ return true;
+}
+
+void
+iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends,
+ void (*merge_private)(struct iomap_ioend *ioend,
+ struct iomap_ioend *next))
+{
+ struct iomap_ioend *next;
+
+ INIT_LIST_HEAD(&ioend->io_list);
+
+ while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
+ io_list))) {
+ if (!iomap_ioend_can_merge(ioend, next))
+ break;
+ list_move_tail(&next->io_list, &ioend->io_list);
+ ioend->io_size += next->io_size;
+ if (next->io_private && merge_private)
+ merge_private(ioend, next);
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
+
+static int
+iomap_ioend_compare(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
+ struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
+
+ if (ia->io_offset < ib->io_offset)
+ return -1;
+ if (ia->io_offset > ib->io_offset)
+ return 1;
+ return 0;
+}
+
+void
+iomap_sort_ioends(struct list_head *ioend_list)
+{
+ list_sort(NULL, ioend_list, iomap_ioend_compare);
+}
+EXPORT_SYMBOL_GPL(iomap_sort_ioends);
+
+static void iomap_writepage_end_bio(struct bio *bio)
+{
+ struct iomap_ioend *ioend = bio->bi_private;
+
+ iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
+}
+
+/*
+ * Submit the final bio for an ioend.
+ *
+ * If @error is non-zero, it means that we have a situation where some part of
+ * the submission process has failed after we have marked paged for writeback
+ * and unlocked them. In this situation, we need to fail the bio instead of
+ * submitting it. This typically only happens on a filesystem shutdown.
+ */
+static int
+iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
+ int error)
+{
+ ioend->io_bio->bi_private = ioend;
+ ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
+
+ if (wpc->ops->prepare_ioend)
+ error = wpc->ops->prepare_ioend(ioend, error);
+ if (error) {
+ /*
+ * If we are failing the IO now, just mark the ioend with an
+ * error and finish it. This will run IO completion immediately
+ * as there is only one reference to the ioend at this point in
+ * time.
+ */
+ ioend->io_bio->bi_status = errno_to_blk_status(error);
+ bio_endio(ioend->io_bio);
+ return error;
+ }
+
+ submit_bio(ioend->io_bio);
+ return 0;
+}
+
+static struct iomap_ioend *
+iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
+ loff_t offset, sector_t sector, struct writeback_control *wbc)
+{
+ struct iomap_ioend *ioend;
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset);
+ bio_set_dev(bio, wpc->iomap.bdev);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
+ bio->bi_write_hint = inode->i_write_hint;
+ wbc_init_bio(wbc, bio);
+
+ ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
+ INIT_LIST_HEAD(&ioend->io_list);
+ ioend->io_type = wpc->iomap.type;
+ ioend->io_flags = wpc->iomap.flags;
+ ioend->io_inode = inode;
+ ioend->io_size = 0;
+ ioend->io_offset = offset;
+ ioend->io_private = NULL;
+ ioend->io_bio = bio;
+ return ioend;
+}
+
+/*
+ * Allocate a new bio, and chain the old bio to the new one.
+ *
+ * Note that we have to do perform the chaining in this unintuitive order
+ * so that the bi_private linkage is set up in the right direction for the
+ * traversal in iomap_finish_ioend().
+ */
+static struct bio *
+iomap_chain_bio(struct bio *prev)
+{
+ struct bio *new;
+
+ new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
+ bio_copy_dev(new, prev);/* also copies over blkcg information */
+ new->bi_iter.bi_sector = bio_end_sector(prev);
+ new->bi_opf = prev->bi_opf;
+ new->bi_write_hint = prev->bi_write_hint;
+
+ bio_chain(prev, new);
+ bio_get(prev); /* for iomap_finish_ioend */
+ submit_bio(prev);
+ return new;
+}
+
+static bool
+iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
+ sector_t sector)
+{
+ if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
+ (wpc->ioend->io_flags & IOMAP_F_SHARED))
+ return false;
+ if (wpc->iomap.type != wpc->ioend->io_type)
+ return false;
+ if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
+ return false;
+ if (sector != bio_end_sector(wpc->ioend->io_bio))
+ return false;
+ return true;
+}
+
+/*
+ * Test to see if we have an existing ioend structure that we could append to
+ * first, otherwise finish off the current ioend and start another.
+ */
+static void
+iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
+ struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
+ struct writeback_control *wbc, struct list_head *iolist)
+{
+ sector_t sector = iomap_sector(&wpc->iomap, offset);
+ unsigned len = i_blocksize(inode);
+ unsigned poff = offset & (PAGE_SIZE - 1);
+ bool merged, same_page = false;
+
+ if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
+ if (wpc->ioend)
+ list_add(&wpc->ioend->io_list, iolist);
+ wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
+ }
+
+ merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
+ &same_page);
+ if (iop && !same_page)
+ atomic_inc(&iop->write_count);
+
+ if (!merged) {
+ if (bio_full(wpc->ioend->io_bio, len)) {
+ wpc->ioend->io_bio =
+ iomap_chain_bio(wpc->ioend->io_bio);
+ }
+ bio_add_page(wpc->ioend->io_bio, page, len, poff);
+ }
+
+ wpc->ioend->io_size += len;
+ wbc_account_cgroup_owner(wbc, page, len);
+}
+
+/*
+ * We implement an immediate ioend submission policy here to avoid needing to
+ * chain multiple ioends and hence nest mempool allocations which can violate
+ * forward progress guarantees we need to provide. The current ioend we are
+ * adding blocks to is cached on the writepage context, and if the new block
+ * does not append to the cached ioend it will create a new ioend and cache that
+ * instead.
+ *
+ * If a new ioend is created and cached, the old ioend is returned and queued
+ * locally for submission once the entire page is processed or an error has been
+ * detected. While ioends are submitted immediately after they are completed,
+ * batching optimisations are provided by higher level block plugging.
+ *
+ * At the end of a writeback pass, there will be a cached ioend remaining on the
+ * writepage context that the caller will need to submit.
+ */
+static int
+iomap_writepage_map(struct iomap_writepage_ctx *wpc,
+ struct writeback_control *wbc, struct inode *inode,
+ struct page *page, u64 end_offset)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+ struct iomap_ioend *ioend, *next;
+ unsigned len = i_blocksize(inode);
+ u64 file_offset; /* file offset of page */
+ int error = 0, count = 0, i;
+ LIST_HEAD(submit_list);
+
+ WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
+ WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0);
+
+ /*
+ * Walk through the page to find areas to write back. If we run off the
+ * end of the current map or find the current map invalid, grab a new
+ * one.
+ */
+ for (i = 0, file_offset = page_offset(page);
+ i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
+ i++, file_offset += len) {
+ if (iop && !test_bit(i, iop->uptodate))
+ continue;
+
+ error = wpc->ops->map_blocks(wpc, inode, file_offset);
+ if (error)
+ break;
+ if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
+ continue;
+ if (wpc->iomap.type == IOMAP_HOLE)
+ continue;
+ iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
+ &submit_list);
+ count++;
+ }
+
+ WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
+ WARN_ON_ONCE(!PageLocked(page));
+ WARN_ON_ONCE(PageWriteback(page));
+
+ /*
+ * We cannot cancel the ioend directly here on error. We may have
+ * already set other pages under writeback and hence we have to run I/O
+ * completion to mark the error state of the pages under writeback
+ * appropriately.
+ */
+ if (unlikely(error)) {
+ if (!count) {
+ /*
+ * If the current page hasn't been added to ioend, it
+ * won't be affected by I/O completions and we must
+ * discard and unlock it right here.
+ */
+ if (wpc->ops->discard_page)
+ wpc->ops->discard_page(page);
+ ClearPageUptodate(page);
+ unlock_page(page);
+ goto done;
+ }
+
+ /*
+ * If the page was not fully cleaned, we need to ensure that the
+ * higher layers come back to it correctly. That means we need
+ * to keep the page dirty, and for WB_SYNC_ALL writeback we need
+ * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
+ * so another attempt to write this page in this writeback sweep
+ * will be made.
+ */
+ set_page_writeback_keepwrite(page);
+ } else {
+ clear_page_dirty_for_io(page);
+ set_page_writeback(page);
+ }
+
+ unlock_page(page);
+
+ /*
+ * Preserve the original error if there was one, otherwise catch
+ * submission errors here and propagate into subsequent ioend
+ * submissions.
+ */
+ list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
+ int error2;
+
+ list_del_init(&ioend->io_list);
+ error2 = iomap_submit_ioend(wpc, ioend, error);
+ if (error2 && !error)
+ error = error2;
+ }
+
+ /*
+ * We can end up here with no error and nothing to write only if we race
+ * with a partial page truncate on a sub-page block sized filesystem.
+ */
+ if (!count)
+ end_page_writeback(page);
+done:
+ mapping_set_error(page->mapping, error);
+ return error;
+}
+
+/*
+ * Write out a dirty page.
+ *
+ * For delalloc space on the page we need to allocate space and flush it.
+ * For unwritten space on the page we need to start the conversion to
+ * regular allocated space.
+ */
+static int
+iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
+{
+ struct iomap_writepage_ctx *wpc = data;
+ struct inode *inode = page->mapping->host;
+ pgoff_t end_index;
+ u64 end_offset;
+ loff_t offset;
+
+ trace_iomap_writepage(inode, page, 0, 0);
+
+ /*
+ * Refuse to write the page out if we are called from reclaim context.
+ *
+ * This avoids stack overflows when called from deeply used stacks in
+ * random callers for direct reclaim or memcg reclaim. We explicitly
+ * allow reclaim from kswapd as the stack usage there is relatively low.
+ *
+ * This should never happen except in the case of a VM regression so
+ * warn about it.
+ */
+ if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
+ PF_MEMALLOC))
+ goto redirty;
+
+ /*
+ * Given that we do not allow direct reclaim to call us, we should
+ * never be called in a recursive filesystem reclaim context.
+ */
+ if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
+ goto redirty;
+
+ /*
+ * Is this page beyond the end of the file?
+ *
+ * The page index is less than the end_index, adjust the end_offset
+ * to the highest offset that this page should represent.
+ * -----------------------------------------------------
+ * | file mapping | <EOF> |
+ * -----------------------------------------------------
+ * | Page ... | Page N-2 | Page N-1 | Page N | |
+ * ^--------------------------------^----------|--------
+ * | desired writeback range | see else |
+ * ---------------------------------^------------------|
+ */
+ offset = i_size_read(inode);
+ end_index = offset >> PAGE_SHIFT;
+ if (page->index < end_index)
+ end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
+ else {
+ /*
+ * Check whether the page to write out is beyond or straddles
+ * i_size or not.
+ * -------------------------------------------------------
+ * | file mapping | <EOF> |
+ * -------------------------------------------------------
+ * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
+ * ^--------------------------------^-----------|---------
+ * | | Straddles |
+ * ---------------------------------^-----------|--------|
+ */
+ unsigned offset_into_page = offset & (PAGE_SIZE - 1);
+
+ /*
+ * Skip the page if it is fully outside i_size, e.g. due to a
+ * truncate operation that is in progress. We must redirty the
+ * page so that reclaim stops reclaiming it. Otherwise
+ * iomap_vm_releasepage() is called on it and gets confused.
+ *
+ * Note that the end_index is unsigned long, it would overflow
+ * if the given offset is greater than 16TB on 32-bit system
+ * and if we do check the page is fully outside i_size or not
+ * via "if (page->index >= end_index + 1)" as "end_index + 1"
+ * will be evaluated to 0. Hence this page will be redirtied
+ * and be written out repeatedly which would result in an
+ * infinite loop, the user program that perform this operation
+ * will hang. Instead, we can verify this situation by checking
+ * if the page to write is totally beyond the i_size or if it's
+ * offset is just equal to the EOF.
+ */
+ if (page->index > end_index ||
+ (page->index == end_index && offset_into_page == 0))
+ goto redirty;
+
+ /*
+ * The page straddles i_size. It must be zeroed out on each
+ * and every writepage invocation because it may be mmapped.
+ * "A file is mapped in multiples of the page size. For a file
+ * that is not a multiple of the page size, the remaining
+ * memory is zeroed when mapped, and writes to that region are
+ * not written out to the file."
+ */
+ zero_user_segment(page, offset_into_page, PAGE_SIZE);
+
+ /* Adjust the end_offset to the end of file */
+ end_offset = offset;
+ }
+
+ return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
+
+redirty:
+ redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
+ return 0;
+}
+
+int
+iomap_writepage(struct page *page, struct writeback_control *wbc,
+ struct iomap_writepage_ctx *wpc,
+ const struct iomap_writeback_ops *ops)
+{
+ int ret;
+
+ wpc->ops = ops;
+ ret = iomap_do_writepage(page, wbc, wpc);
+ if (!wpc->ioend)
+ return ret;
+ return iomap_submit_ioend(wpc, wpc->ioend, ret);
+}
+EXPORT_SYMBOL_GPL(iomap_writepage);
+
+int
+iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
+ struct iomap_writepage_ctx *wpc,
+ const struct iomap_writeback_ops *ops)
+{
+ int ret;
+
+ wpc->ops = ops;
+ ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
+ if (!wpc->ioend)
+ return ret;
+ return iomap_submit_ioend(wpc, wpc->ioend, ret);
+}
+EXPORT_SYMBOL_GPL(iomap_writepages);
+
+static int __init iomap_init(void)
+{
+ return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
+ offsetof(struct iomap_ioend, io_inline_bio),
+ BIOSET_NEED_BVECS);
+}
+fs_initcall(iomap_init);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 10517cea9682..23837926c0c5 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -24,7 +24,7 @@
struct iomap_dio {
struct kiocb *iocb;
- iomap_dio_end_io_t *end_io;
+ const struct iomap_dio_ops *dops;
loff_t i_size;
loff_t size;
atomic_t ref;
@@ -72,18 +72,14 @@ static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
static ssize_t iomap_dio_complete(struct iomap_dio *dio)
{
+ const struct iomap_dio_ops *dops = dio->dops;
struct kiocb *iocb = dio->iocb;
struct inode *inode = file_inode(iocb->ki_filp);
loff_t offset = iocb->ki_pos;
- ssize_t ret;
+ ssize_t ret = dio->error;
- if (dio->end_io) {
- ret = dio->end_io(iocb,
- dio->error ? dio->error : dio->size,
- dio->flags);
- } else {
- ret = dio->error;
- }
+ if (dops && dops->end_io)
+ ret = dops->end_io(iocb, dio->size, ret, dio->flags);
if (likely(!ret)) {
ret = dio->size;
@@ -101,9 +97,9 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
* one is a pretty crazy thing to do, so we don't support it 100%. If
* this invalidation fails, tough, the write still worked...
*
- * And this page cache invalidation has to be after dio->end_io(), as
- * some filesystems convert unwritten extents to real allocations in
- * end_io() when necessary, otherwise a racing buffer read would cache
+ * And this page cache invalidation has to be after ->end_io(), as some
+ * filesystems convert unwritten extents to real allocations in
+ * ->end_io() when necessary, otherwise a racing buffer read would cache
* zeros from unwritten extents.
*/
if (!dio->error &&
@@ -205,12 +201,12 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
unsigned int fs_block_size = i_blocksize(inode), pad;
unsigned int align = iov_iter_alignment(dio->submit.iter);
- struct iov_iter iter;
struct bio *bio;
bool need_zeroout = false;
bool use_fua = false;
int nr_pages, ret = 0;
size_t copied = 0;
+ size_t orig_count;
if ((pos | length | align) & ((1 << blkbits) - 1))
return -EINVAL;
@@ -240,15 +236,18 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
}
/*
- * Operate on a partial iter trimmed to the extent we were called for.
- * We'll update the iter in the dio once we're done with this extent.
+ * Save the original count and trim the iter to just the extent we
+ * are operating on right now. The iter will be re-expanded once
+ * we are done.
*/
- iter = *dio->submit.iter;
- iov_iter_truncate(&iter, length);
+ orig_count = iov_iter_count(dio->submit.iter);
+ iov_iter_truncate(dio->submit.iter, length);
- nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
- if (nr_pages <= 0)
- return nr_pages;
+ nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
+ if (nr_pages <= 0) {
+ ret = nr_pages;
+ goto out;
+ }
if (need_zeroout) {
/* zero out from the start of the block to the write offset */
@@ -261,7 +260,8 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
size_t n;
if (dio->error) {
iov_iter_revert(dio->submit.iter, copied);
- return 0;
+ copied = ret = 0;
+ goto out;
}
bio = bio_alloc(GFP_KERNEL, nr_pages);
@@ -272,7 +272,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
- ret = bio_iov_iter_get_pages(bio, &iter);
+ ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
if (unlikely(ret)) {
/*
* We have to stop part way through an IO. We must fall
@@ -298,13 +298,11 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
bio_set_pages_dirty(bio);
}
- iov_iter_advance(dio->submit.iter, n);
-
dio->size += n;
pos += n;
copied += n;
- nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
+ nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
iomap_dio_submit_bio(dio, iomap, bio);
} while (nr_pages);
@@ -322,7 +320,12 @@ zero_tail:
if (pad)
iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
}
- return copied ? copied : ret;
+out:
+ /* Undo iter limitation to current extent */
+ iov_iter_reexpand(dio->submit.iter, orig_count - copied);
+ if (copied)
+ return copied;
+ return ret;
}
static loff_t
@@ -362,7 +365,7 @@ iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
static loff_t
iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap)
+ void *data, struct iomap *iomap, struct iomap *srcmap)
{
struct iomap_dio *dio = data;
@@ -396,15 +399,15 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
*/
ssize_t
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
- const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
+ const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
+ bool wait_for_completion)
{
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = file_inode(iocb->ki_filp);
size_t count = iov_iter_count(iter);
- loff_t pos = iocb->ki_pos, start = pos;
+ loff_t pos = iocb->ki_pos;
loff_t end = iocb->ki_pos + count - 1, ret = 0;
unsigned int flags = IOMAP_DIRECT;
- bool wait_for_completion = is_sync_kiocb(iocb);
struct blk_plug plug;
struct iomap_dio *dio;
@@ -413,6 +416,9 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (!count)
return 0;
+ if (WARN_ON(is_sync_kiocb(iocb) && !wait_for_completion))
+ return -EIO;
+
dio = kmalloc(sizeof(*dio), GFP_KERNEL);
if (!dio)
return -ENOMEM;
@@ -421,7 +427,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
atomic_set(&dio->ref, 1);
dio->size = 0;
dio->i_size = i_size_read(inode);
- dio->end_io = end_io;
+ dio->dops = dops;
dio->error = 0;
dio->flags = 0;
@@ -434,7 +440,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (pos >= dio->i_size)
goto out_free_dio;
- if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ)
+ if (iter_is_iovec(iter))
dio->flags |= IOMAP_DIO_DIRTY;
} else {
flags |= IOMAP_WRITE;
@@ -455,14 +461,14 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
}
if (iocb->ki_flags & IOCB_NOWAIT) {
- if (filemap_range_has_page(mapping, start, end)) {
+ if (filemap_range_has_page(mapping, pos, end)) {
ret = -EAGAIN;
goto out_free_dio;
}
flags |= IOMAP_NOWAIT;
}
- ret = filemap_write_and_wait_range(mapping, start, end);
+ ret = filemap_write_and_wait_range(mapping, pos, end);
if (ret)
goto out_free_dio;
@@ -473,7 +479,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
* pretty crazy thing to do, so we don't support it 100%.
*/
ret = invalidate_inode_pages2_range(mapping,
- start >> PAGE_SHIFT, end >> PAGE_SHIFT);
+ pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
if (ret)
dio_warn_stale_pagecache(iocb->ki_filp);
ret = 0;
@@ -501,8 +507,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
}
pos += ret;
- if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
+ if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
+ /*
+ * We only report that we've read data up to i_size.
+ * Revert iter to a state corresponding to that as
+ * some callers (such as splice code) rely on it.
+ */
+ iov_iter_revert(iter, pos - dio->i_size);
break;
+ }
} while ((count = iov_iter_count(iter)) > 0);
blk_finish_plug(&plug);
diff --git a/fs/iomap/fiemap.c b/fs/iomap/fiemap.c
index f26fdd36e383..bccf305ea9ce 100644
--- a/fs/iomap/fiemap.c
+++ b/fs/iomap/fiemap.c
@@ -44,7 +44,7 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
static loff_t
iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
+ struct iomap *iomap, struct iomap *srcmap)
{
struct fiemap_ctx *ctx = data;
loff_t ret = length;
@@ -111,7 +111,7 @@ EXPORT_SYMBOL_GPL(iomap_fiemap);
static loff_t
iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap)
+ void *data, struct iomap *iomap, struct iomap *srcmap)
{
sector_t *bno = data, addr;
@@ -133,12 +133,16 @@ iomap_bmap(struct address_space *mapping, sector_t bno,
struct inode *inode = mapping->host;
loff_t pos = bno << inode->i_blkbits;
unsigned blocksize = i_blocksize(inode);
+ int ret;
if (filemap_write_and_wait(mapping))
return 0;
bno = 0;
- iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
+ ret = iomap_apply(inode, pos, blocksize, 0, ops, &bno,
+ iomap_bmap_actor);
+ if (ret)
+ return 0;
return bno;
}
EXPORT_SYMBOL_GPL(iomap_bmap);
diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c
index c04bad4b2b43..89f61d93c0bc 100644
--- a/fs/iomap/seek.c
+++ b/fs/iomap/seek.c
@@ -119,7 +119,7 @@ out:
static loff_t
iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
- void *data, struct iomap *iomap)
+ void *data, struct iomap *iomap, struct iomap *srcmap)
{
switch (iomap->type) {
case IOMAP_UNWRITTEN:
@@ -165,7 +165,7 @@ EXPORT_SYMBOL_GPL(iomap_seek_hole);
static loff_t
iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
- void *data, struct iomap *iomap)
+ void *data, struct iomap *iomap, struct iomap *srcmap)
{
switch (iomap->type) {
case IOMAP_HOLE:
diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
index 152a230f668d..a648dbf6991e 100644
--- a/fs/iomap/swapfile.c
+++ b/fs/iomap/swapfile.c
@@ -76,7 +76,8 @@ static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
* distinction between written and unwritten extents.
*/
static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
- loff_t count, void *data, struct iomap *iomap)
+ loff_t count, void *data, struct iomap *iomap,
+ struct iomap *srcmap)
{
struct iomap_swapfile_info *isi = data;
int error;
diff --git a/fs/iomap/trace.c b/fs/iomap/trace.c
new file mode 100644
index 000000000000..da217246b1a9
--- /dev/null
+++ b/fs/iomap/trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Christoph Hellwig
+ */
+#include <linux/iomap.h>
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h
new file mode 100644
index 000000000000..6dc227b8c47e
--- /dev/null
+++ b/fs/iomap/trace.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2009-2019 Christoph Hellwig
+ *
+ * NOTE: none of these tracepoints shall be consider a stable kernel ABI
+ * as they can change at any time.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iomap
+
+#if !defined(_IOMAP_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _IOMAP_TRACE_H
+
+#include <linux/tracepoint.h>
+
+struct inode;
+
+DECLARE_EVENT_CLASS(iomap_readpage_class,
+ TP_PROTO(struct inode *inode, int nr_pages),
+ TP_ARGS(inode, nr_pages),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, ino)
+ __field(int, nr_pages)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->nr_pages = nr_pages;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx nr_pages %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->nr_pages)
+)
+
+#define DEFINE_READPAGE_EVENT(name) \
+DEFINE_EVENT(iomap_readpage_class, name, \
+ TP_PROTO(struct inode *inode, int nr_pages), \
+ TP_ARGS(inode, nr_pages))
+DEFINE_READPAGE_EVENT(iomap_readpage);
+DEFINE_READPAGE_EVENT(iomap_readpages);
+
+DECLARE_EVENT_CLASS(iomap_page_class,
+ TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
+ unsigned int len),
+ TP_ARGS(inode, page, off, len),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, ino)
+ __field(pgoff_t, pgoff)
+ __field(loff_t, size)
+ __field(unsigned long, offset)
+ __field(unsigned int, length)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgoff = page_offset(page);
+ __entry->size = i_size_read(inode);
+ __entry->offset = off;
+ __entry->length = len;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
+ "length %x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->pgoff,
+ __entry->size,
+ __entry->offset,
+ __entry->length)
+)
+
+#define DEFINE_PAGE_EVENT(name) \
+DEFINE_EVENT(iomap_page_class, name, \
+ TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \
+ unsigned int len), \
+ TP_ARGS(inode, page, off, len))
+DEFINE_PAGE_EVENT(iomap_writepage);
+DEFINE_PAGE_EVENT(iomap_releasepage);
+DEFINE_PAGE_EVENT(iomap_invalidatepage);
+
+#define IOMAP_TYPE_STRINGS \
+ { IOMAP_HOLE, "HOLE" }, \
+ { IOMAP_DELALLOC, "DELALLOC" }, \
+ { IOMAP_MAPPED, "MAPPED" }, \
+ { IOMAP_UNWRITTEN, "UNWRITTEN" }, \
+ { IOMAP_INLINE, "INLINE" }
+
+#define IOMAP_FLAGS_STRINGS \
+ { IOMAP_WRITE, "WRITE" }, \
+ { IOMAP_ZERO, "ZERO" }, \
+ { IOMAP_REPORT, "REPORT" }, \
+ { IOMAP_FAULT, "FAULT" }, \
+ { IOMAP_DIRECT, "DIRECT" }, \
+ { IOMAP_NOWAIT, "NOWAIT" }
+
+#define IOMAP_F_FLAGS_STRINGS \
+ { IOMAP_F_NEW, "NEW" }, \
+ { IOMAP_F_DIRTY, "DIRTY" }, \
+ { IOMAP_F_SHARED, "SHARED" }, \
+ { IOMAP_F_MERGED, "MERGED" }, \
+ { IOMAP_F_BUFFER_HEAD, "BH" }, \
+ { IOMAP_F_SIZE_CHANGED, "SIZE_CHANGED" }
+
+DECLARE_EVENT_CLASS(iomap_class,
+ TP_PROTO(struct inode *inode, struct iomap *iomap),
+ TP_ARGS(inode, iomap),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, ino)
+ __field(u64, addr)
+ __field(loff_t, offset)
+ __field(u64, length)
+ __field(u16, type)
+ __field(u16, flags)
+ __field(dev_t, bdev)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->addr = iomap->addr;
+ __entry->offset = iomap->offset;
+ __entry->length = iomap->length;
+ __entry->type = iomap->type;
+ __entry->flags = iomap->flags;
+ __entry->bdev = iomap->bdev ? iomap->bdev->bd_dev : 0;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx bdev %d:%d addr %lld offset %lld "
+ "length %llu type %s flags %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ MAJOR(__entry->bdev), MINOR(__entry->bdev),
+ __entry->addr,
+ __entry->offset,
+ __entry->length,
+ __print_symbolic(__entry->type, IOMAP_TYPE_STRINGS),
+ __print_flags(__entry->flags, "|", IOMAP_F_FLAGS_STRINGS))
+)
+
+#define DEFINE_IOMAP_EVENT(name) \
+DEFINE_EVENT(iomap_class, name, \
+ TP_PROTO(struct inode *inode, struct iomap *iomap), \
+ TP_ARGS(inode, iomap))
+DEFINE_IOMAP_EVENT(iomap_apply_dstmap);
+DEFINE_IOMAP_EVENT(iomap_apply_srcmap);
+
+TRACE_EVENT(iomap_apply,
+ TP_PROTO(struct inode *inode, loff_t pos, loff_t length,
+ unsigned int flags, const void *ops, void *actor,
+ unsigned long caller),
+ TP_ARGS(inode, pos, length, flags, ops, actor, caller),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, ino)
+ __field(loff_t, pos)
+ __field(loff_t, length)
+ __field(unsigned int, flags)
+ __field(const void *, ops)
+ __field(void *, actor)
+ __field(unsigned long, caller)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pos = pos;
+ __entry->length = length;
+ __entry->flags = flags;
+ __entry->ops = ops;
+ __entry->actor = actor;
+ __entry->caller = caller;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx pos %lld length %lld flags %s (0x%x) "
+ "ops %ps caller %pS actor %ps",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->pos,
+ __entry->length,
+ __print_flags(__entry->flags, "|", IOMAP_FLAGS_STRINGS),
+ __entry->flags,
+ __entry->ops,
+ (void *)__entry->caller,
+ __entry->actor)
+);
+
+#endif /* _IOMAP_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index 85a9093769a9..35768a63fb1d 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -10,7 +10,7 @@
*
* The following files are helpful:
*
- * Documentation/filesystems/nfs/Exporting
+ * Documentation/filesystems/nfs/exporting.rst
* fs/exportfs/expfs.c.
*/
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 9e30d8703735..62c0462dc89f 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -30,6 +30,9 @@
#include "isofs.h"
#include "zisofs.h"
+/* max tz offset is 13 hours */
+#define MAX_TZ_OFFSET (52*15*60)
+
#define BEQUIET
static int isofs_hashi(const struct dentry *parent, struct qstr *qstr);
@@ -801,6 +804,10 @@ root_found:
*/
s->s_maxbytes = 0x80000000000LL;
+ /* ECMA-119 timestamp from 1900/1/1 with tz offset */
+ s->s_time_min = mktime64(1900, 1, 1, 0, 0, 0) - MAX_TZ_OFFSET;
+ s->s_time_max = mktime64(U8_MAX+1900, 12, 31, 23, 59, 59) + MAX_TZ_OFFSET;
+
/* Set this for reference. Its not currently used except on write
which we don't have .. */
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index a1909066bde6..8fff6677a5da 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -110,7 +110,7 @@ void __jbd2_log_wait_for_space(journal_t *journal)
int nblocks, space_left;
/* assert_spin_locked(&journal->j_state_lock); */
- nblocks = jbd2_space_needed(journal);
+ nblocks = journal->j_max_transaction_buffers;
while (jbd2_log_space_left(journal) < nblocks) {
write_unlock(&journal->j_state_lock);
mutex_lock_io(&journal->j_checkpoint_mutex);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 132fb92098c7..7f0b362b3842 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -482,10 +482,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
if (jh->b_committed_data) {
struct buffer_head *bh = jh2bh(jh);
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
jbd2_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL;
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
}
jbd2_journal_refile_buffer(journal, jh);
}
@@ -560,8 +560,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
stats.run.rs_logging = jiffies;
stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
stats.run.rs_logging);
- stats.run.rs_blocks =
- atomic_read(&commit_transaction->t_outstanding_credits);
+ stats.run.rs_blocks = commit_transaction->t_nr_buffers;
stats.run.rs_blocks_logged = 0;
J_ASSERT(commit_transaction->t_nr_buffers <=
@@ -642,8 +641,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
/*
* start_this_handle() uses t_outstanding_credits to determine
- * the free space in the log, but this counter is changed
- * by jbd2_journal_next_log_block() also.
+ * the free space in the log.
*/
atomic_dec(&commit_transaction->t_outstanding_credits);
@@ -727,7 +725,6 @@ start_journal_io:
submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
}
cond_resched();
- stats.run.rs_blocks_logged += bufs;
/* Force a new descriptor to be generated next
time round the loop. */
@@ -814,6 +811,7 @@ start_journal_io:
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
jbd2_unfile_log_bh(bh);
+ stats.run.rs_blocks_logged++;
/*
* The list contains temporary buffer heads created by
@@ -859,6 +857,7 @@ start_journal_io:
BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
clear_buffer_jwrite(bh);
jbd2_unfile_log_bh(bh);
+ stats.run.rs_blocks_logged++;
__brelse(bh); /* One for getblk */
/* AKPM: bforget here */
}
@@ -880,6 +879,7 @@ start_journal_io:
}
if (cbh)
err = journal_wait_on_commit_record(journal, cbh);
+ stats.run.rs_blocks_logged++;
if (jbd2_has_feature_async_commit(journal) &&
journal->j_flags & JBD2_BARRIER) {
blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
@@ -888,6 +888,9 @@ start_journal_io:
if (err)
jbd2_journal_abort(journal, err);
+ WARN_ON_ONCE(
+ atomic_read(&commit_transaction->t_outstanding_credits) < 0);
+
/*
* Now disk caches for filesystem device are flushed so we are safe to
* erase checkpointed transactions from the log by updating journal
@@ -918,6 +921,7 @@ restart_loop:
transaction_t *cp_transaction;
struct buffer_head *bh;
int try_to_free = 0;
+ bool drop_ref;
jh = commit_transaction->t_forget;
spin_unlock(&journal->j_list_lock);
@@ -927,7 +931,7 @@ restart_loop:
* done with it.
*/
get_bh(bh);
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
/*
@@ -1022,8 +1026,10 @@ restart_loop:
try_to_free = 1;
}
JBUFFER_TRACE(jh, "refile or unfile buffer");
- __jbd2_journal_refile_buffer(jh);
- jbd_unlock_bh_state(bh);
+ drop_ref = __jbd2_journal_refile_buffer(jh);
+ spin_unlock(&jh->b_state_lock);
+ if (drop_ref)
+ jbd2_journal_put_journal_head(jh);
if (try_to_free)
release_buffer_page(bh); /* Drops bh reference */
else
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 953990eb70a9..5e408ee24a1a 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -89,8 +89,6 @@ EXPORT_SYMBOL(jbd2_journal_blocks_per_page);
EXPORT_SYMBOL(jbd2_journal_invalidatepage);
EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
EXPORT_SYMBOL(jbd2_journal_force_commit);
-EXPORT_SYMBOL(jbd2_journal_inode_add_write);
-EXPORT_SYMBOL(jbd2_journal_inode_add_wait);
EXPORT_SYMBOL(jbd2_journal_inode_ranged_write);
EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait);
EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
@@ -365,7 +363,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
/* keep subsequent assertions sane */
atomic_set(&new_bh->b_count, 1);
- jbd_lock_bh_state(bh_in);
+ spin_lock(&jh_in->b_state_lock);
repeat:
/*
* If a new transaction has already done a buffer copy-out, then
@@ -407,13 +405,13 @@ repeat:
if (need_copy_out && !done_copy_out) {
char *tmp;
- jbd_unlock_bh_state(bh_in);
+ spin_unlock(&jh_in->b_state_lock);
tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
if (!tmp) {
brelse(new_bh);
return -ENOMEM;
}
- jbd_lock_bh_state(bh_in);
+ spin_lock(&jh_in->b_state_lock);
if (jh_in->b_frozen_data) {
jbd2_free(tmp, bh_in->b_size);
goto repeat;
@@ -466,7 +464,7 @@ repeat:
__jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
spin_unlock(&journal->j_list_lock);
set_buffer_shadow(bh_in);
- jbd_unlock_bh_state(bh_in);
+ spin_unlock(&jh_in->b_state_lock);
return do_escape | (done_copy_out << 1);
}
@@ -842,6 +840,7 @@ jbd2_journal_get_descriptor_buffer(transaction_t *transaction, int type)
bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
if (!bh)
return NULL;
+ atomic_dec(&transaction->t_outstanding_credits);
lock_buffer(bh);
memset(bh->b_data, 0, journal->j_blocksize);
header = (journal_header_t *)bh->b_data;
@@ -1100,6 +1099,16 @@ static void jbd2_stats_proc_exit(journal_t *journal)
remove_proc_entry(journal->j_devname, proc_jbd2_stats);
}
+/* Minimum size of descriptor tag */
+static int jbd2_min_tag_size(void)
+{
+ /*
+ * Tag with 32-bit block numbers does not use last four bytes of the
+ * structure
+ */
+ return sizeof(journal_block_tag_t) - 4;
+}
+
/*
* Management for journal control blocks: functions to create and
* destroy journal_t structures, and to initialise and read existing
@@ -1158,7 +1167,8 @@ static journal_t *journal_init_common(struct block_device *bdev,
journal->j_fs_dev = fs_dev;
journal->j_blk_offset = start;
journal->j_maxlen = len;
- n = journal->j_blocksize / sizeof(journal_block_tag_t);
+ /* We need enough buffers to write out full descriptor block. */
+ n = journal->j_blocksize / jbd2_min_tag_size();
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
GFP_KERNEL);
@@ -1490,6 +1500,21 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
}
EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
+static int journal_revoke_records_per_block(journal_t *journal)
+{
+ int record_size;
+ int space = journal->j_blocksize - sizeof(jbd2_journal_revoke_header_t);
+
+ if (jbd2_has_feature_64bit(journal))
+ record_size = 8;
+ else
+ record_size = 4;
+
+ if (jbd2_journal_has_csum_v2or3(journal))
+ space -= sizeof(struct jbd2_journal_block_tail);
+ return space / record_size;
+}
+
/*
* Read the superblock for a given journal, performing initial
* validation of the format.
@@ -1598,6 +1623,8 @@ static int journal_get_superblock(journal_t *journal)
sizeof(sb->s_uuid));
}
+ journal->j_revoke_records_per_block =
+ journal_revoke_records_per_block(journal);
set_buffer_verified(bh);
return 0;
@@ -1918,6 +1945,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
sb->s_feature_ro_compat |= cpu_to_be32(ro);
sb->s_feature_incompat |= cpu_to_be32(incompat);
unlock_buffer(journal->j_sb_buffer);
+ journal->j_revoke_records_per_block =
+ journal_revoke_records_per_block(journal);
return 1;
#undef COMPAT_FEATURE_ON
@@ -1948,6 +1977,8 @@ void jbd2_journal_clear_features(journal_t *journal, unsigned long compat,
sb->s_feature_compat &= ~cpu_to_be32(compat);
sb->s_feature_ro_compat &= ~cpu_to_be32(ro);
sb->s_feature_incompat &= ~cpu_to_be32(incompat);
+ journal->j_revoke_records_per_block =
+ journal_revoke_records_per_block(journal);
}
EXPORT_SYMBOL(jbd2_journal_clear_features);
@@ -2412,6 +2443,8 @@ static struct journal_head *journal_alloc_journal_head(void)
ret = kmem_cache_zalloc(jbd2_journal_head_cache,
GFP_NOFS | __GFP_NOFAIL);
}
+ if (ret)
+ spin_lock_init(&ret->b_state_lock);
return ret;
}
@@ -2531,17 +2564,23 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
J_ASSERT_BH(bh, buffer_jbd(bh));
J_ASSERT_BH(bh, jh2bh(jh) == bh);
BUFFER_TRACE(bh, "remove journal_head");
+
+ /* Unlink before dropping the lock */
+ bh->b_private = NULL;
+ jh->b_bh = NULL; /* debug, really */
+ clear_buffer_jbd(bh);
+}
+
+static void journal_release_journal_head(struct journal_head *jh, size_t b_size)
+{
if (jh->b_frozen_data) {
printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__);
- jbd2_free(jh->b_frozen_data, bh->b_size);
+ jbd2_free(jh->b_frozen_data, b_size);
}
if (jh->b_committed_data) {
printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__);
- jbd2_free(jh->b_committed_data, bh->b_size);
+ jbd2_free(jh->b_committed_data, b_size);
}
- bh->b_private = NULL;
- jh->b_bh = NULL; /* debug, really */
- clear_buffer_jbd(bh);
journal_free_journal_head(jh);
}
@@ -2559,9 +2598,11 @@ void jbd2_journal_put_journal_head(struct journal_head *jh)
if (!jh->b_jcount) {
__journal_remove_journal_head(bh);
jbd_unlock_bh_journal_head(bh);
+ journal_release_journal_head(jh, bh->b_size);
__brelse(bh);
- } else
+ } else {
jbd_unlock_bh_journal_head(bh);
+ }
}
/*
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 69b9bc329964..fa608788b93d 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -371,6 +371,11 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
}
#endif
+ if (WARN_ON_ONCE(handle->h_revoke_credits <= 0)) {
+ if (!bh_in)
+ brelse(bh);
+ return -EIO;
+ }
/* We really ought not ever to revoke twice in a row without
first having the revoke cancelled: it's illegal to free a
block twice without allocating it in between! */
@@ -391,6 +396,7 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
__brelse(bh);
}
}
+ handle->h_revoke_credits--;
jbd_debug(2, "insert revoke for block %llu, bh_in=%p\n",blocknr, bh_in);
err = insert_revoke_hash(journal, blocknr,
@@ -638,10 +644,8 @@ static void flush_descriptor(journal_t *journal,
{
jbd2_journal_revoke_header_t *header;
- if (is_journal_aborted(journal)) {
- put_bh(descriptor);
+ if (is_journal_aborted(journal))
return;
- }
header = (jbd2_journal_revoke_header_t *)descriptor->b_data;
header->r_count = cpu_to_be32(offset);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 990e7b5062e7..27b9f9dee434 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -63,6 +63,28 @@ void jbd2_journal_free_transaction(transaction_t *transaction)
}
/*
+ * Base amount of descriptor blocks we reserve for each transaction.
+ */
+static int jbd2_descriptor_blocks_per_trans(journal_t *journal)
+{
+ int tag_space = journal->j_blocksize - sizeof(journal_header_t);
+ int tags_per_block;
+
+ /* Subtract UUID */
+ tag_space -= 16;
+ if (jbd2_journal_has_csum_v2or3(journal))
+ tag_space -= sizeof(struct jbd2_journal_block_tail);
+ /* Commit code leaves a slack space of 16 bytes at the end of block */
+ tags_per_block = (tag_space - 16) / journal_tag_bytes(journal);
+ /*
+ * Revoke descriptors are accounted separately so we need to reserve
+ * space for commit block and normal transaction descriptor blocks.
+ */
+ return 1 + DIV_ROUND_UP(journal->j_max_transaction_buffers,
+ tags_per_block);
+}
+
+/*
* jbd2_get_transaction: obtain a new transaction_t object.
*
* Simply initialise a new transaction. Initialize it in
@@ -88,7 +110,9 @@ static void jbd2_get_transaction(journal_t *journal,
spin_lock_init(&transaction->t_handle_lock);
atomic_set(&transaction->t_updates, 0);
atomic_set(&transaction->t_outstanding_credits,
+ jbd2_descriptor_blocks_per_trans(journal) +
atomic_read(&journal->j_reserved_credits));
+ atomic_set(&transaction->t_outstanding_revokes, 0);
atomic_set(&transaction->t_handle_count, 0);
INIT_LIST_HEAD(&transaction->t_inode_list);
INIT_LIST_HEAD(&transaction->t_private_list);
@@ -258,12 +282,13 @@ static int add_transaction_credits(journal_t *journal, int blocks,
* *before* starting to dirty potentially checkpointed buffers
* in the new transaction.
*/
- if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) {
+ if (jbd2_log_space_left(journal) < journal->j_max_transaction_buffers) {
atomic_sub(total, &t->t_outstanding_credits);
read_unlock(&journal->j_state_lock);
jbd2_might_wait_for_commit(journal);
write_lock(&journal->j_state_lock);
- if (jbd2_log_space_left(journal) < jbd2_space_needed(journal))
+ if (jbd2_log_space_left(journal) <
+ journal->j_max_transaction_buffers)
__jbd2_log_wait_for_space(journal);
write_unlock(&journal->j_state_lock);
return 1;
@@ -299,12 +324,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
gfp_t gfp_mask)
{
transaction_t *transaction, *new_transaction = NULL;
- int blocks = handle->h_buffer_credits;
+ int blocks = handle->h_total_credits;
int rsv_blocks = 0;
unsigned long ts = jiffies;
if (handle->h_rsv_handle)
- rsv_blocks = handle->h_rsv_handle->h_buffer_credits;
+ rsv_blocks = handle->h_rsv_handle->h_total_credits;
/*
* Limit the number of reserved credits to 1/2 of maximum transaction
@@ -405,6 +430,7 @@ repeat:
update_t_max_wait(transaction, ts);
handle->h_transaction = transaction;
handle->h_requested_credits = blocks;
+ handle->h_revoke_credits_requested = handle->h_revoke_credits;
handle->h_start_jiffies = jiffies;
atomic_inc(&transaction->t_updates);
atomic_inc(&transaction->t_handle_count);
@@ -431,15 +457,15 @@ static handle_t *new_handle(int nblocks)
handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
if (!handle)
return NULL;
- handle->h_buffer_credits = nblocks;
+ handle->h_total_credits = nblocks;
handle->h_ref = 1;
return handle;
}
handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
- gfp_t gfp_mask, unsigned int type,
- unsigned int line_no)
+ int revoke_records, gfp_t gfp_mask,
+ unsigned int type, unsigned int line_no)
{
handle_t *handle = journal_current_handle();
int err;
@@ -453,6 +479,8 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
return handle;
}
+ nblocks += DIV_ROUND_UP(revoke_records,
+ journal->j_revoke_records_per_block);
handle = new_handle(nblocks);
if (!handle)
return ERR_PTR(-ENOMEM);
@@ -468,6 +496,7 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
rsv_handle->h_journal = journal;
handle->h_rsv_handle = rsv_handle;
}
+ handle->h_revoke_credits = revoke_records;
err = start_this_handle(journal, handle, gfp_mask);
if (err < 0) {
@@ -508,16 +537,21 @@ EXPORT_SYMBOL(jbd2__journal_start);
*/
handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
{
- return jbd2__journal_start(journal, nblocks, 0, GFP_NOFS, 0, 0);
+ return jbd2__journal_start(journal, nblocks, 0, 0, GFP_NOFS, 0, 0);
}
EXPORT_SYMBOL(jbd2_journal_start);
-void jbd2_journal_free_reserved(handle_t *handle)
+static void __jbd2_journal_unreserve_handle(handle_t *handle)
{
journal_t *journal = handle->h_journal;
WARN_ON(!handle->h_reserved);
- sub_reserved_credits(journal, handle->h_buffer_credits);
+ sub_reserved_credits(journal, handle->h_total_credits);
+}
+
+void jbd2_journal_free_reserved(handle_t *handle)
+{
+ __jbd2_journal_unreserve_handle(handle);
jbd2_free_handle(handle);
}
EXPORT_SYMBOL(jbd2_journal_free_reserved);
@@ -569,6 +603,9 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
}
handle->h_type = type;
handle->h_line_no = line_no;
+ trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
+ handle->h_transaction->t_tid, type,
+ line_no, handle->h_total_credits);
return 0;
}
EXPORT_SYMBOL(jbd2_journal_start_reserved);
@@ -577,6 +614,7 @@ EXPORT_SYMBOL(jbd2_journal_start_reserved);
* int jbd2_journal_extend() - extend buffer credits.
* @handle: handle to 'extend'
* @nblocks: nr blocks to try to extend by.
+ * @revoke_records: number of revoke records to try to extend by.
*
* Some transactions, such as large extends and truncates, can be done
* atomically all at once or in several stages. The operation requests
@@ -593,7 +631,7 @@ EXPORT_SYMBOL(jbd2_journal_start_reserved);
* return code < 0 implies an error
* return code > 0 implies normal transaction-full status.
*/
-int jbd2_journal_extend(handle_t *handle, int nblocks)
+int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal;
@@ -615,6 +653,12 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
goto error_out;
}
+ nblocks += DIV_ROUND_UP(
+ handle->h_revoke_credits_requested + revoke_records,
+ journal->j_revoke_records_per_block) -
+ DIV_ROUND_UP(
+ handle->h_revoke_credits_requested,
+ journal->j_revoke_records_per_block);
spin_lock(&transaction->t_handle_lock);
wanted = atomic_add_return(nblocks,
&transaction->t_outstanding_credits);
@@ -626,22 +670,16 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
goto unlock;
}
- if (wanted + (wanted >> JBD2_CONTROL_BLOCKS_SHIFT) >
- jbd2_log_space_left(journal)) {
- jbd_debug(3, "denied handle %p %d blocks: "
- "insufficient log space\n", handle, nblocks);
- atomic_sub(nblocks, &transaction->t_outstanding_credits);
- goto unlock;
- }
-
trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
transaction->t_tid,
handle->h_type, handle->h_line_no,
- handle->h_buffer_credits,
+ handle->h_total_credits,
nblocks);
- handle->h_buffer_credits += nblocks;
+ handle->h_total_credits += nblocks;
handle->h_requested_credits += nblocks;
+ handle->h_revoke_credits += revoke_records;
+ handle->h_revoke_credits_requested += revoke_records;
result = 0;
jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
@@ -652,11 +690,55 @@ error_out:
return result;
}
+static void stop_this_handle(handle_t *handle)
+{
+ transaction_t *transaction = handle->h_transaction;
+ journal_t *journal = transaction->t_journal;
+ int revokes;
+
+ J_ASSERT(journal_current_handle() == handle);
+ J_ASSERT(atomic_read(&transaction->t_updates) > 0);
+ current->journal_info = NULL;
+ /*
+ * Subtract necessary revoke descriptor blocks from handle credits. We
+ * take care to account only for revoke descriptor blocks the
+ * transaction will really need as large sequences of transactions with
+ * small numbers of revokes are relatively common.
+ */
+ revokes = handle->h_revoke_credits_requested - handle->h_revoke_credits;
+ if (revokes) {
+ int t_revokes, revoke_descriptors;
+ int rr_per_blk = journal->j_revoke_records_per_block;
+
+ WARN_ON_ONCE(DIV_ROUND_UP(revokes, rr_per_blk)
+ > handle->h_total_credits);
+ t_revokes = atomic_add_return(revokes,
+ &transaction->t_outstanding_revokes);
+ revoke_descriptors =
+ DIV_ROUND_UP(t_revokes, rr_per_blk) -
+ DIV_ROUND_UP(t_revokes - revokes, rr_per_blk);
+ handle->h_total_credits -= revoke_descriptors;
+ }
+ atomic_sub(handle->h_total_credits,
+ &transaction->t_outstanding_credits);
+ if (handle->h_rsv_handle)
+ __jbd2_journal_unreserve_handle(handle->h_rsv_handle);
+ if (atomic_dec_and_test(&transaction->t_updates))
+ wake_up(&journal->j_wait_updates);
+
+ rwsem_release(&journal->j_trans_commit_map, _THIS_IP_);
+ /*
+ * Scope of the GFP_NOFS context is over here and so we can restore the
+ * original alloc context.
+ */
+ memalloc_nofs_restore(handle->saved_alloc_context);
+}
/**
* int jbd2_journal_restart() - restart a handle .
* @handle: handle to restart
* @nblocks: nr credits requested
+ * @revoke_records: number of revoke record credits requested
* @gfp_mask: memory allocation flags (for start_this_handle)
*
* Restart a handle for a multi-transaction filesystem
@@ -669,56 +751,48 @@ error_out:
* credits. We preserve reserved handle if there's any attached to the
* passed in handle.
*/
-int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
+int jbd2__journal_restart(handle_t *handle, int nblocks, int revoke_records,
+ gfp_t gfp_mask)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal;
tid_t tid;
- int need_to_start, ret;
+ int need_to_start;
+ int ret;
/* If we've had an abort of any type, don't even think about
* actually doing the restart! */
if (is_handle_aborted(handle))
return 0;
journal = transaction->t_journal;
+ tid = transaction->t_tid;
/*
* First unlink the handle from its current transaction, and start the
* commit on that.
*/
- J_ASSERT(atomic_read(&transaction->t_updates) > 0);
- J_ASSERT(journal_current_handle() == handle);
-
- read_lock(&journal->j_state_lock);
- spin_lock(&transaction->t_handle_lock);
- atomic_sub(handle->h_buffer_credits,
- &transaction->t_outstanding_credits);
- if (handle->h_rsv_handle) {
- sub_reserved_credits(journal,
- handle->h_rsv_handle->h_buffer_credits);
- }
- if (atomic_dec_and_test(&transaction->t_updates))
- wake_up(&journal->j_wait_updates);
- tid = transaction->t_tid;
- spin_unlock(&transaction->t_handle_lock);
+ jbd_debug(2, "restarting handle %p\n", handle);
+ stop_this_handle(handle);
handle->h_transaction = NULL;
- current->journal_info = NULL;
- jbd_debug(2, "restarting handle %p\n", handle);
+ /*
+ * TODO: If we use READ_ONCE / WRITE_ONCE for j_commit_request we can
+ * get rid of pointless j_state_lock traffic like this.
+ */
+ read_lock(&journal->j_state_lock);
need_to_start = !tid_geq(journal->j_commit_request, tid);
read_unlock(&journal->j_state_lock);
if (need_to_start)
jbd2_log_start_commit(journal, tid);
-
- rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_);
- handle->h_buffer_credits = nblocks;
- /*
- * Restore the original nofs context because the journal restart
- * is basically the same thing as journal stop and start.
- * start_this_handle will start a new nofs context.
- */
- memalloc_nofs_restore(handle->saved_alloc_context);
+ handle->h_total_credits = nblocks +
+ DIV_ROUND_UP(revoke_records,
+ journal->j_revoke_records_per_block);
+ handle->h_revoke_credits = revoke_records;
ret = start_this_handle(journal, handle, gfp_mask);
+ trace_jbd2_handle_restart(journal->j_fs_dev->bd_dev,
+ ret ? 0 : handle->h_transaction->t_tid,
+ handle->h_type, handle->h_line_no,
+ handle->h_total_credits);
return ret;
}
EXPORT_SYMBOL(jbd2__journal_restart);
@@ -726,7 +800,7 @@ EXPORT_SYMBOL(jbd2__journal_restart);
int jbd2_journal_restart(handle_t *handle, int nblocks)
{
- return jbd2__journal_restart(handle, nblocks, GFP_NOFS);
+ return jbd2__journal_restart(handle, nblocks, 0, GFP_NOFS);
}
EXPORT_SYMBOL(jbd2_journal_restart);
@@ -876,7 +950,7 @@ repeat:
start_lock = jiffies;
lock_buffer(bh);
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
/* If it takes too long to lock the buffer, trace it */
time_lock = jbd2_time_diff(start_lock, jiffies);
@@ -926,7 +1000,7 @@ repeat:
error = -EROFS;
if (is_handle_aborted(handle)) {
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
goto out;
}
error = 0;
@@ -990,7 +1064,7 @@ repeat:
*/
if (buffer_shadow(bh)) {
JBUFFER_TRACE(jh, "on shadow: sleep");
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE);
goto repeat;
}
@@ -1011,7 +1085,7 @@ repeat:
JBUFFER_TRACE(jh, "generate frozen data");
if (!frozen_buffer) {
JBUFFER_TRACE(jh, "allocate memory for buffer");
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size,
GFP_NOFS | __GFP_NOFAIL);
goto repeat;
@@ -1030,7 +1104,7 @@ attach_next:
jh->b_next_transaction = transaction;
done:
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
/*
* If we are about to journal a buffer, then any revoke pending on it is
@@ -1169,7 +1243,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
* that case: the transaction must have deleted the buffer for it to be
* reused here.
*/
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
jh->b_transaction == NULL ||
(jh->b_transaction == journal->j_committing_transaction &&
@@ -1204,7 +1278,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
jh->b_next_transaction = transaction;
spin_unlock(&journal->j_list_lock);
}
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
/*
* akpm: I added this. ext3_alloc_branch can pick up new indirect
@@ -1272,13 +1346,13 @@ repeat:
committed_data = jbd2_alloc(jh2bh(jh)->b_size,
GFP_NOFS|__GFP_NOFAIL);
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
if (!jh->b_committed_data) {
/* Copy out the current buffer contents into the
* preserved, committed copy. */
JBUFFER_TRACE(jh, "generate b_committed data");
if (!committed_data) {
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
goto repeat;
}
@@ -1286,7 +1360,7 @@ repeat:
committed_data = NULL;
memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
}
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
out:
jbd2_journal_put_journal_head(jh);
if (unlikely(committed_data))
@@ -1387,16 +1461,16 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
*/
if (jh->b_transaction != transaction &&
jh->b_next_transaction != transaction) {
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
J_ASSERT_JH(jh, jh->b_transaction == transaction ||
jh->b_next_transaction == transaction);
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
}
if (jh->b_modified == 1) {
/* If it's in our transaction it must be in BJ_Metadata list. */
if (jh->b_transaction == transaction &&
jh->b_jlist != BJ_Metadata) {
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
if (jh->b_transaction == transaction &&
jh->b_jlist != BJ_Metadata)
pr_err("JBD2: assertion failure: h_type=%u "
@@ -1406,13 +1480,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
jh->b_jlist);
J_ASSERT_JH(jh, jh->b_transaction != transaction ||
jh->b_jlist == BJ_Metadata);
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
}
goto out;
}
journal = transaction->t_journal;
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
if (jh->b_modified == 0) {
/*
@@ -1420,12 +1494,12 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
* of the transaction. This needs to be done
* once a transaction -bzzz
*/
- if (handle->h_buffer_credits <= 0) {
+ if (WARN_ON_ONCE(jbd2_handle_buffer_credits(handle) <= 0)) {
ret = -ENOSPC;
goto out_unlock_bh;
}
jh->b_modified = 1;
- handle->h_buffer_credits--;
+ handle->h_total_credits--;
}
/*
@@ -1498,7 +1572,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
__jbd2_journal_file_buffer(jh, transaction, BJ_Metadata);
spin_unlock(&journal->j_list_lock);
out_unlock_bh:
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
out:
JBUFFER_TRACE(jh, "exit");
return ret;
@@ -1536,18 +1610,20 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
BUFFER_TRACE(bh, "entry");
- jbd_lock_bh_state(bh);
+ jh = jbd2_journal_grab_journal_head(bh);
+ if (!jh) {
+ __bforget(bh);
+ return 0;
+ }
- if (!buffer_jbd(bh))
- goto not_jbd;
- jh = bh2jh(bh);
+ spin_lock(&jh->b_state_lock);
/* Critical error: attempting to delete a bitmap buffer, maybe?
* Don't do any jbd operations, and return an error. */
if (!J_EXPECT_JH(jh, !jh->b_committed_data,
"inconsistent data on disk")) {
err = -EIO;
- goto not_jbd;
+ goto drop;
}
/* keep track of whether or not this transaction modified us */
@@ -1595,10 +1671,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
} else {
__jbd2_journal_unfile_buffer(jh);
- if (!buffer_jbd(bh)) {
- spin_unlock(&journal->j_list_lock);
- goto not_jbd;
- }
+ jbd2_journal_put_journal_head(jh);
}
spin_unlock(&journal->j_list_lock);
} else if (jh->b_transaction) {
@@ -1640,7 +1713,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
if (!jh->b_cp_transaction) {
JBUFFER_TRACE(jh, "belongs to none transaction");
spin_unlock(&journal->j_list_lock);
- goto not_jbd;
+ goto drop;
}
/*
@@ -1650,7 +1723,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
if (!buffer_dirty(bh)) {
__jbd2_journal_remove_checkpoint(jh);
spin_unlock(&journal->j_list_lock);
- goto not_jbd;
+ goto drop;
}
/*
@@ -1663,20 +1736,15 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
spin_unlock(&journal->j_list_lock);
}
-
- jbd_unlock_bh_state(bh);
- __brelse(bh);
drop:
+ __brelse(bh);
+ spin_unlock(&jh->b_state_lock);
+ jbd2_journal_put_journal_head(jh);
if (drop_reserve) {
/* no need to reserve log space for this block -bzzz */
- handle->h_buffer_credits++;
+ handle->h_total_credits++;
}
return err;
-
-not_jbd:
- jbd_unlock_bh_state(bh);
- __bforget(bh);
- goto drop;
}
/**
@@ -1703,45 +1771,34 @@ int jbd2_journal_stop(handle_t *handle)
tid_t tid;
pid_t pid;
+ if (--handle->h_ref > 0) {
+ jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
+ handle->h_ref);
+ if (is_handle_aborted(handle))
+ return -EIO;
+ return 0;
+ }
if (!transaction) {
/*
- * Handle is already detached from the transaction so
- * there is nothing to do other than decrease a refcount,
- * or free the handle if refcount drops to zero
+ * Handle is already detached from the transaction so there is
+ * nothing to do other than free the handle.
*/
- if (--handle->h_ref > 0) {
- jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
- handle->h_ref);
- return err;
- } else {
- if (handle->h_rsv_handle)
- jbd2_free_handle(handle->h_rsv_handle);
- goto free_and_exit;
- }
+ memalloc_nofs_restore(handle->saved_alloc_context);
+ goto free_and_exit;
}
journal = transaction->t_journal;
-
- J_ASSERT(journal_current_handle() == handle);
+ tid = transaction->t_tid;
if (is_handle_aborted(handle))
err = -EIO;
- else
- J_ASSERT(atomic_read(&transaction->t_updates) > 0);
-
- if (--handle->h_ref > 0) {
- jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
- handle->h_ref);
- return err;
- }
jbd_debug(4, "Handle %p going down\n", handle);
trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
- transaction->t_tid,
- handle->h_type, handle->h_line_no,
+ tid, handle->h_type, handle->h_line_no,
jiffies - handle->h_start_jiffies,
handle->h_sync, handle->h_requested_credits,
(handle->h_requested_credits -
- handle->h_buffer_credits));
+ handle->h_total_credits));
/*
* Implement synchronous transaction batching. If the handle
@@ -1801,19 +1858,13 @@ int jbd2_journal_stop(handle_t *handle)
if (handle->h_sync)
transaction->t_synchronous_commit = 1;
- current->journal_info = NULL;
- atomic_sub(handle->h_buffer_credits,
- &transaction->t_outstanding_credits);
/*
* If the handle is marked SYNC, we need to set another commit
- * going! We also want to force a commit if the current
- * transaction is occupying too much of the log, or if the
- * transaction is too old now.
+ * going! We also want to force a commit if the transaction is too
+ * old now.
*/
if (handle->h_sync ||
- (atomic_read(&transaction->t_outstanding_credits) >
- journal->j_max_transaction_buffers) ||
time_after_eq(jiffies, transaction->t_expires)) {
/* Do this even for aborted journals: an abort still
* completes the commit thread, it just doesn't write
@@ -1822,7 +1873,7 @@ int jbd2_journal_stop(handle_t *handle)
jbd_debug(2, "transaction too old, requesting commit for "
"handle %p\n", handle);
/* This is non-blocking */
- jbd2_log_start_commit(journal, transaction->t_tid);
+ jbd2_log_start_commit(journal, tid);
/*
* Special case: JBD2_SYNC synchronous updates require us
@@ -1833,31 +1884,19 @@ int jbd2_journal_stop(handle_t *handle)
}
/*
- * Once we drop t_updates, if it goes to zero the transaction
- * could start committing on us and eventually disappear. So
- * once we do this, we must not dereference transaction
- * pointer again.
+ * Once stop_this_handle() drops t_updates, the transaction could start
+ * committing on us and eventually disappear. So we must not
+ * dereference transaction pointer again after calling
+ * stop_this_handle().
*/
- tid = transaction->t_tid;
- if (atomic_dec_and_test(&transaction->t_updates)) {
- wake_up(&journal->j_wait_updates);
- if (journal->j_barrier_count)
- wake_up(&journal->j_wait_transaction_locked);
- }
-
- rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_);
+ stop_this_handle(handle);
if (wait_for_commit)
err = jbd2_log_wait_commit(journal, tid);
- if (handle->h_rsv_handle)
- jbd2_journal_free_reserved(handle->h_rsv_handle);
free_and_exit:
- /*
- * Scope of the GFP_NOFS context is over here and so we can restore the
- * original alloc context.
- */
- memalloc_nofs_restore(handle->saved_alloc_context);
+ if (handle->h_rsv_handle)
+ jbd2_free_handle(handle->h_rsv_handle);
jbd2_free_handle(handle);
return err;
}
@@ -1875,7 +1914,7 @@ free_and_exit:
*
* j_list_lock is held.
*
- * jbd_lock_bh_state(jh2bh(jh)) is held.
+ * jh->b_state_lock is held.
*/
static inline void
@@ -1899,7 +1938,7 @@ __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
*
* Called with j_list_lock held, and the journal may not be locked.
*
- * jbd_lock_bh_state(jh2bh(jh)) is held.
+ * jh->b_state_lock is held.
*/
static inline void
@@ -1931,7 +1970,7 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
transaction_t *transaction;
struct buffer_head *bh = jh2bh(jh);
- J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+ lockdep_assert_held(&jh->b_state_lock);
transaction = jh->b_transaction;
if (transaction)
assert_spin_locked(&transaction->t_journal->j_list_lock);
@@ -1968,17 +2007,15 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
}
/*
- * Remove buffer from all transactions.
+ * Remove buffer from all transactions. The caller is responsible for dropping
+ * the jh reference that belonged to the transaction.
*
* Called with bh_state lock and j_list_lock
- *
- * jh and bh may be already freed when this function returns.
*/
static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
{
__jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = NULL;
- jbd2_journal_put_journal_head(jh);
}
void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
@@ -1987,18 +2024,19 @@ void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
/* Get reference so that buffer cannot be freed before we unlock it */
get_bh(bh);
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
spin_lock(&journal->j_list_lock);
__jbd2_journal_unfile_buffer(jh);
spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
+ jbd2_journal_put_journal_head(jh);
__brelse(bh);
}
/*
* Called from jbd2_journal_try_to_free_buffers().
*
- * Called under jbd_lock_bh_state(bh)
+ * Called under jh->b_state_lock
*/
static void
__journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
@@ -2085,10 +2123,10 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
if (!jh)
continue;
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
__journal_try_to_free_buffer(journal, bh);
+ spin_unlock(&jh->b_state_lock);
jbd2_journal_put_journal_head(jh);
- jbd_unlock_bh_state(bh);
if (buffer_jbd(bh))
goto busy;
} while ((bh = bh->b_this_page) != head);
@@ -2109,7 +2147,7 @@ busy:
*
* Called under j_list_lock.
*
- * Called under jbd_lock_bh_state(bh).
+ * Called under jh->b_state_lock.
*/
static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
{
@@ -2130,6 +2168,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
} else {
JBUFFER_TRACE(jh, "on running transaction");
__jbd2_journal_unfile_buffer(jh);
+ jbd2_journal_put_journal_head(jh);
}
return may_free;
}
@@ -2196,18 +2235,15 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
* holding the page lock. --sct
*/
- if (!buffer_jbd(bh))
+ jh = jbd2_journal_grab_journal_head(bh);
+ if (!jh)
goto zap_buffer_unlocked;
/* OK, we have data buffer in journaled mode */
write_lock(&journal->j_state_lock);
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
spin_lock(&journal->j_list_lock);
- jh = jbd2_journal_grab_journal_head(bh);
- if (!jh)
- goto zap_buffer_no_jh;
-
/*
* We cannot remove the buffer from checkpoint lists until the
* transaction adding inode to orphan list (let's call it T)
@@ -2286,10 +2322,10 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
* for commit and try again.
*/
if (partial_page) {
- jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
write_unlock(&journal->j_state_lock);
+ jbd2_journal_put_journal_head(jh);
return -EBUSY;
}
/*
@@ -2301,10 +2337,10 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
set_buffer_freed(bh);
if (journal->j_running_transaction && buffer_jbddirty(bh))
jh->b_next_transaction = journal->j_running_transaction;
- jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
write_unlock(&journal->j_state_lock);
+ jbd2_journal_put_journal_head(jh);
return 0;
} else {
/* Good, the buffer belongs to the running transaction.
@@ -2328,11 +2364,10 @@ zap_buffer:
* here.
*/
jh->b_modified = 0;
- jbd2_journal_put_journal_head(jh);
-zap_buffer_no_jh:
spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
write_unlock(&journal->j_state_lock);
+ jbd2_journal_put_journal_head(jh);
zap_buffer_unlocked:
clear_buffer_dirty(bh);
J_ASSERT_BH(bh, !buffer_jbddirty(bh));
@@ -2419,7 +2454,7 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
int was_dirty = 0;
struct buffer_head *bh = jh2bh(jh);
- J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+ lockdep_assert_held(&jh->b_state_lock);
assert_spin_locked(&transaction->t_journal->j_list_lock);
J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
@@ -2481,11 +2516,11 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
void jbd2_journal_file_buffer(struct journal_head *jh,
transaction_t *transaction, int jlist)
{
- jbd_lock_bh_state(jh2bh(jh));
+ spin_lock(&jh->b_state_lock);
spin_lock(&transaction->t_journal->j_list_lock);
__jbd2_journal_file_buffer(jh, transaction, jlist);
spin_unlock(&transaction->t_journal->j_list_lock);
- jbd_unlock_bh_state(jh2bh(jh));
+ spin_unlock(&jh->b_state_lock);
}
/*
@@ -2495,23 +2530,25 @@ void jbd2_journal_file_buffer(struct journal_head *jh,
* buffer on that transaction's metadata list.
*
* Called under j_list_lock
- * Called under jbd_lock_bh_state(jh2bh(jh))
+ * Called under jh->b_state_lock
*
- * jh and bh may be already free when this function returns
+ * When this function returns true, there's no next transaction to refile to
+ * and the caller has to drop jh reference through
+ * jbd2_journal_put_journal_head().
*/
-void __jbd2_journal_refile_buffer(struct journal_head *jh)
+bool __jbd2_journal_refile_buffer(struct journal_head *jh)
{
int was_dirty, jlist;
struct buffer_head *bh = jh2bh(jh);
- J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+ lockdep_assert_held(&jh->b_state_lock);
if (jh->b_transaction)
assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
/* If the buffer is now unused, just drop it. */
if (jh->b_next_transaction == NULL) {
__jbd2_journal_unfile_buffer(jh);
- return;
+ return true;
}
/*
@@ -2539,6 +2576,7 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
if (was_dirty)
set_buffer_jbddirty(bh);
+ return false;
}
/*
@@ -2549,16 +2587,15 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
*/
void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
{
- struct buffer_head *bh = jh2bh(jh);
+ bool drop;
- /* Get reference so that buffer cannot be freed before we unlock it */
- get_bh(bh);
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
spin_lock(&journal->j_list_lock);
- __jbd2_journal_refile_buffer(jh);
- jbd_unlock_bh_state(bh);
+ drop = __jbd2_journal_refile_buffer(jh);
+ spin_unlock(&jh->b_state_lock);
spin_unlock(&journal->j_list_lock);
- __brelse(bh);
+ if (drop)
+ jbd2_journal_put_journal_head(jh);
}
/*
@@ -2619,18 +2656,6 @@ done:
return 0;
}
-int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *jinode)
-{
- return jbd2_journal_file_inode(handle, jinode,
- JI_WRITE_DATA | JI_WAIT_DATA, 0, LLONG_MAX);
-}
-
-int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *jinode)
-{
- return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA, 0,
- LLONG_MAX);
-}
-
int jbd2_journal_inode_ranged_write(handle_t *handle,
struct jbd2_inode *jinode, loff_t start_byte, loff_t length)
{
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 8a20ddd25f2d..ab8cdd9e9325 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/list.h>
#include <linux/mtd/mtd.h>
#include <linux/pagemap.h>
@@ -184,7 +185,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
truncate_setsize(inode, iattr->ia_size);
inode->i_blocks = (inode->i_size + 511) >> 9;
- }
+ }
return 0;
}
@@ -391,7 +392,7 @@ void jffs2_dirty_inode(struct inode *inode, int flags)
jffs2_do_setattr(inode, &iattr);
}
-int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
+int jffs2_do_remount_fs(struct super_block *sb, struct fs_context *fc)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
@@ -409,10 +410,10 @@ int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
mutex_unlock(&c->alloc_sem);
}
- if (!(*flags & SB_RDONLY))
+ if (!(fc->sb_flags & SB_RDONLY))
jffs2_start_garbage_collect_thread(c);
- *flags |= SB_NOATIME;
+ fc->sb_flags |= SB_NOATIME;
return 0;
}
@@ -509,7 +510,7 @@ static int calculate_inocache_hashsize(uint32_t flash_size)
return hashsize;
}
-int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
+int jffs2_do_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct jffs2_sb_info *c;
struct inode *root_i;
@@ -524,11 +525,11 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
if (c->mtd->type == MTD_NANDFLASH) {
- pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
+ errorf(fc, "Cannot operate on NAND flash unless jffs2 NAND support is compiled in");
return -EINVAL;
}
if (c->mtd->type == MTD_DATAFLASH) {
- pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
+ errorf(fc, "Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in");
return -EINVAL;
}
#endif
@@ -542,12 +543,12 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
*/
if ((c->sector_size * blocks) != c->flash_size) {
c->flash_size = c->sector_size * blocks;
- pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
- c->flash_size / 1024);
+ infof(fc, "Flash size not aligned to erasesize, reducing to %dKiB",
+ c->flash_size / 1024);
}
if (c->flash_size < 5*c->sector_size) {
- pr_err("Too few erase blocks (%d)\n",
+ errorf(fc, "Too few erase blocks (%d)",
c->flash_size / c->sector_size);
return -EINVAL;
}
@@ -590,6 +591,9 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = JFFS2_SUPER_MAGIC;
+ sb->s_time_min = 0;
+ sb->s_time_max = U32_MAX;
+
if (!sb_rdonly(sb))
jffs2_start_garbage_collect_thread(c);
return 0;
@@ -678,33 +682,6 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
return JFFS2_INODE_INFO(inode);
}
-unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
- struct jffs2_inode_info *f,
- unsigned long offset,
- unsigned long *priv)
-{
- struct inode *inode = OFNI_EDONI_2SFFJ(f);
- struct page *pg;
-
- pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT,
- jffs2_do_readpage_unlock, inode);
- if (IS_ERR(pg))
- return (void *)pg;
-
- *priv = (unsigned long)pg;
- return kmap(pg);
-}
-
-void jffs2_gc_release_page(struct jffs2_sb_info *c,
- unsigned char *ptr,
- unsigned long *priv)
-{
- struct page *pg = (void *)*priv;
-
- kunmap(pg);
- put_page(pg);
-}
-
static int jffs2_flash_setup(struct jffs2_sb_info *c) {
int ret = 0;
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 9ed0f26cf023..373b3b7c9f44 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -1165,12 +1165,13 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
uint32_t start, uint32_t end)
{
+ struct inode *inode = OFNI_EDONI_2SFFJ(f);
struct jffs2_full_dnode *new_fn;
struct jffs2_raw_inode ri;
uint32_t alloclen, offset, orig_end, orig_start;
int ret = 0;
unsigned char *comprbuf = NULL, *writebuf;
- unsigned long pg;
+ struct page *page;
unsigned char *pg_ptr;
memset(&ri, 0, sizeof(ri));
@@ -1325,15 +1326,18 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
* end up here trying to GC the *same* page that jffs2_write_begin() is
* trying to write out, read_cache_page() will not deadlock. */
mutex_unlock(&f->sem);
- pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
- mutex_lock(&f->sem);
-
- if (IS_ERR(pg_ptr)) {
+ page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT,
+ jffs2_do_readpage_unlock, inode);
+ if (IS_ERR(page)) {
pr_warn("read_cache_page() returned error: %ld\n",
- PTR_ERR(pg_ptr));
- return PTR_ERR(pg_ptr);
+ PTR_ERR(page));
+ mutex_lock(&f->sem);
+ return PTR_ERR(page);
}
+ pg_ptr = kmap(page);
+ mutex_lock(&f->sem);
+
offset = start;
while(offset < orig_end) {
uint32_t datalen;
@@ -1396,6 +1400,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
}
}
- jffs2_gc_release_page(c, pg_ptr, &pg);
+ kunmap(page);
+ put_page(page);
return ret;
}
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index bd3d5f0ddc34..ef1cfa61549e 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -172,8 +172,8 @@ void jffs2_dirty_inode(struct inode *inode, int flags);
struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode,
struct jffs2_raw_inode *ri);
int jffs2_statfs (struct dentry *, struct kstatfs *);
-int jffs2_do_remount_fs(struct super_block *, int *, char *);
-int jffs2_do_fill_super(struct super_block *sb, void *data, int silent);
+int jffs2_do_remount_fs(struct super_block *sb, struct fs_context *fc);
+int jffs2_do_fill_super(struct super_block *sb, struct fs_context *fc);
void jffs2_gc_release_inode(struct jffs2_sb_info *c,
struct jffs2_inode_info *f);
struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
@@ -183,9 +183,6 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
struct jffs2_inode_info *f,
unsigned long offset,
unsigned long *priv);
-void jffs2_gc_release_page(struct jffs2_sb_info *c,
- unsigned char *pg,
- unsigned long *priv);
void jffs2_flash_cleanup(struct jffs2_sb_info *c);
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 90431dd613b8..5f7e284e0df3 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -527,8 +527,11 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
err = jffs2_fill_scan_buf(c, sumptr,
jeb->offset + c->sector_size - sumlen,
sumlen - buf_len);
- if (err)
+ if (err) {
+ if (sumlen > buf_size)
+ kfree(sumptr);
return err;
+ }
}
}
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index af4aa6599473..0e6406c4f362 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -19,7 +19,8 @@
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/mount.h>
-#include <linux/parser.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/jffs2.h>
#include <linux/pagemap.h>
#include <linux/mtd/super.h>
@@ -157,96 +158,75 @@ static const struct export_operations jffs2_export_ops = {
/*
* JFFS2 mount options.
*
+ * Opt_source: The source device
* Opt_override_compr: override default compressor
* Opt_rp_size: size of reserved pool in KiB
- * Opt_err: just end of array marker
*/
enum {
Opt_override_compr,
Opt_rp_size,
- Opt_err,
};
-static const match_table_t tokens = {
- {Opt_override_compr, "compr=%s"},
- {Opt_rp_size, "rp_size=%u"},
- {Opt_err, NULL},
+static const struct fs_parameter_spec jffs2_param_specs[] = {
+ fsparam_enum ("compr", Opt_override_compr),
+ fsparam_u32 ("rp_size", Opt_rp_size),
+ {}
};
-static int jffs2_parse_options(struct jffs2_sb_info *c, char *data)
-{
- substring_t args[MAX_OPT_ARGS];
- char *p, *name;
- unsigned int opt;
-
- if (!data)
- return 0;
-
- while ((p = strsep(&data, ","))) {
- int token;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_override_compr:
- name = match_strdup(&args[0]);
-
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "none"))
- c->mount_opts.compr = JFFS2_COMPR_MODE_NONE;
+static const struct fs_parameter_enum jffs2_param_enums[] = {
+ { Opt_override_compr, "none", JFFS2_COMPR_MODE_NONE },
#ifdef CONFIG_JFFS2_LZO
- else if (!strcmp(name, "lzo"))
- c->mount_opts.compr = JFFS2_COMPR_MODE_FORCELZO;
+ { Opt_override_compr, "lzo", JFFS2_COMPR_MODE_FORCELZO },
#endif
#ifdef CONFIG_JFFS2_ZLIB
- else if (!strcmp(name, "zlib"))
- c->mount_opts.compr =
- JFFS2_COMPR_MODE_FORCEZLIB;
+ { Opt_override_compr, "zlib", JFFS2_COMPR_MODE_FORCEZLIB },
#endif
- else {
- pr_err("Error: unknown compressor \"%s\"\n",
- name);
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- c->mount_opts.override_compr = true;
- break;
- case Opt_rp_size:
- if (match_int(&args[0], &opt))
- return -EINVAL;
- opt *= 1024;
- if (opt > c->mtd->size) {
- pr_warn("Too large reserve pool specified, max "
- "is %llu KB\n", c->mtd->size / 1024);
- return -EINVAL;
- }
- c->mount_opts.rp_size = opt;
- break;
- default:
- pr_err("Error: unrecognized mount option '%s' or missing value\n",
- p);
- return -EINVAL;
- }
+ {}
+};
+
+const struct fs_parameter_description jffs2_fs_parameters = {
+ .name = "jffs2",
+ .specs = jffs2_param_specs,
+ .enums = jffs2_param_enums,
+};
+
+static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct fs_parse_result result;
+ struct jffs2_sb_info *c = fc->s_fs_info;
+ int opt;
+
+ opt = fs_parse(fc, &jffs2_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_override_compr:
+ c->mount_opts.compr = result.uint_32;
+ c->mount_opts.override_compr = true;
+ break;
+ case Opt_rp_size:
+ if (result.uint_32 > UINT_MAX / 1024)
+ return invalf(fc, "jffs2: rp_size unrepresentable");
+ opt = result.uint_32 * 1024;
+ if (opt > c->mtd->size)
+ return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB",
+ c->mtd->size / 1024);
+ c->mount_opts.rp_size = opt;
+ break;
+ default:
+ return -EINVAL;
}
return 0;
}
-static int jffs2_remount_fs(struct super_block *sb, int *flags, char *data)
+static int jffs2_reconfigure(struct fs_context *fc)
{
- struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
- int err;
+ struct super_block *sb = fc->root->d_sb;
sync_filesystem(sb);
- err = jffs2_parse_options(c, data);
- if (err)
- return -EINVAL;
-
- return jffs2_do_remount_fs(sb, flags, data);
+ return jffs2_do_remount_fs(sb, fc);
}
static const struct super_operations jffs2_super_operations =
@@ -255,7 +235,6 @@ static const struct super_operations jffs2_super_operations =
.free_inode = jffs2_free_inode,
.put_super = jffs2_put_super,
.statfs = jffs2_statfs,
- .remount_fs = jffs2_remount_fs,
.evict_inode = jffs2_evict_inode,
.dirty_inode = jffs2_dirty_inode,
.show_options = jffs2_show_options,
@@ -265,26 +244,16 @@ static const struct super_operations jffs2_super_operations =
/*
* fill in the superblock
*/
-static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
+static int jffs2_fill_super(struct super_block *sb, struct fs_context *fc)
{
- struct jffs2_sb_info *c;
- int ret;
+ struct jffs2_sb_info *c = sb->s_fs_info;
jffs2_dbg(1, "jffs2_get_sb_mtd():"
" New superblock for device %d (\"%s\")\n",
sb->s_mtd->index, sb->s_mtd->name);
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- return -ENOMEM;
-
c->mtd = sb->s_mtd;
c->os_priv = sb;
- sb->s_fs_info = c;
-
- ret = jffs2_parse_options(c, data);
- if (ret)
- return -EINVAL;
/* Initialize JFFS2 superblock locks, the further initialization will
* be done later */
@@ -302,15 +271,37 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
#ifdef CONFIG_JFFS2_FS_POSIX_ACL
sb->s_flags |= SB_POSIXACL;
#endif
- ret = jffs2_do_fill_super(sb, data, silent);
- return ret;
+ return jffs2_do_fill_super(sb, fc);
}
-static struct dentry *jffs2_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int jffs2_get_tree(struct fs_context *fc)
{
- return mount_mtd(fs_type, flags, dev_name, data, jffs2_fill_super);
+ return get_tree_mtd(fc, jffs2_fill_super);
+}
+
+static void jffs2_free_fc(struct fs_context *fc)
+{
+ kfree(fc->s_fs_info);
+}
+
+static const struct fs_context_operations jffs2_context_ops = {
+ .free = jffs2_free_fc,
+ .parse_param = jffs2_parse_param,
+ .get_tree = jffs2_get_tree,
+ .reconfigure = jffs2_reconfigure,
+};
+
+static int jffs2_init_fs_context(struct fs_context *fc)
+{
+ struct jffs2_sb_info *ctx;
+
+ ctx = kzalloc(sizeof(struct jffs2_sb_info), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ fc->s_fs_info = ctx;
+ fc->ops = &jffs2_context_ops;
+ return 0;
}
static void jffs2_put_super (struct super_block *sb)
@@ -347,7 +338,8 @@ static void jffs2_kill_sb(struct super_block *sb)
static struct file_system_type jffs2_fs_type = {
.owner = THIS_MODULE,
.name = "jffs2",
- .mount = jffs2_mount,
+ .init_fs_context = jffs2_init_fs_context,
+ .parameters = &jffs2_fs_parameters,
.kill_sb = jffs2_kill_sb,
};
MODULE_ALIAS_FS("jffs2");
diff --git a/fs/jfs/Kconfig b/fs/jfs/Kconfig
index 22a273bd4648..05cb0e8e4382 100644
--- a/fs/jfs/Kconfig
+++ b/fs/jfs/Kconfig
@@ -5,7 +5,7 @@ config JFS_FS
select CRC32
help
This is a port of IBM's Journaled Filesystem . More information is
- available in the file <file:Documentation/filesystems/jfs.txt>.
+ available in the file <file:Documentation/admin-guide/jfs.rst>.
If you do not intend to use the JFS filesystem, say N.
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index f4e10cb9f734..b2dc4d1f9dcc 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -503,6 +503,8 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_fs_info = sbi;
sb->s_max_links = JFS_LINK_MAX;
+ sb->s_time_min = 0;
+ sb->s_time_max = U32_MAX;
sbi->sb = sb;
sbi->uid = INVALID_UID;
sbi->gid = INVALID_GID;
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index a387534c9577..9d96e6871e1a 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -137,6 +137,9 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
if (kn_from == kn_to)
return strlcpy(buf, "/", buflen);
+ if (!buf)
+ return -EINVAL;
+
common = kernfs_common_ancestor(kn_from, kn_to);
if (WARN_ON(!common))
return -EINVAL;
@@ -144,8 +147,7 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
depth_to = kernfs_depth(common, kn_to);
depth_from = kernfs_depth(common, kn_from);
- if (buf)
- buf[0] = '\0';
+ buf[0] = '\0';
for (i = 0; i < depth_from; i++)
len += strlcpy(buf + len, parent_str,
@@ -430,19 +432,18 @@ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
*/
void kernfs_put_active(struct kernfs_node *kn)
{
- struct kernfs_root *root = kernfs_root(kn);
int v;
if (unlikely(!kn))
return;
if (kernfs_lockdep(kn))
- rwsem_release(&kn->dep_map, 1, _RET_IP_);
+ rwsem_release(&kn->dep_map, _RET_IP_);
v = atomic_dec_return(&kn->active);
if (likely(v != KN_DEACTIVATED_BIAS))
return;
- wake_up_all(&root->deactivate_waitq);
+ wake_up_all(&kernfs_root(kn)->deactivate_waitq);
}
/**
@@ -475,7 +476,7 @@ static void kernfs_drain(struct kernfs_node *kn)
if (kernfs_lockdep(kn)) {
lock_acquired(&kn->dep_map, _RET_IP_);
- rwsem_release(&kn->dep_map, 1, _RET_IP_);
+ rwsem_release(&kn->dep_map, _RET_IP_);
}
kernfs_drain_open_files(kn);
@@ -507,10 +508,6 @@ void kernfs_put(struct kernfs_node *kn)
struct kernfs_node *parent;
struct kernfs_root *root;
- /*
- * kernfs_node is freed with ->count 0, kernfs_find_and_get_node_by_ino
- * depends on this to filter reused stale node
- */
if (!kn || !atomic_dec_and_test(&kn->count))
return;
root = kernfs_root(kn);
@@ -535,7 +532,7 @@ void kernfs_put(struct kernfs_node *kn)
kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
}
spin_lock(&kernfs_idr_lock);
- idr_remove(&root->ino_idr, kn->id.ino);
+ idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
spin_unlock(&kernfs_idr_lock);
kmem_cache_free(kernfs_node_cache, kn);
@@ -620,8 +617,7 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
unsigned flags)
{
struct kernfs_node *kn;
- u32 gen;
- int cursor;
+ u32 id_highbits;
int ret;
name = kstrdup_const(name, GFP_KERNEL);
@@ -634,23 +630,19 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
idr_preload(GFP_KERNEL);
spin_lock(&kernfs_idr_lock);
- cursor = idr_get_cursor(&root->ino_idr);
ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC);
- if (ret >= 0 && ret < cursor)
- root->next_generation++;
- gen = root->next_generation;
+ if (ret >= 0 && ret < root->last_id_lowbits)
+ root->id_highbits++;
+ id_highbits = root->id_highbits;
+ root->last_id_lowbits = ret;
spin_unlock(&kernfs_idr_lock);
idr_preload_end();
if (ret < 0)
goto err_out2;
- kn->id.ino = ret;
- kn->id.generation = gen;
- /*
- * set ino first. This RELEASE is paired with atomic_inc_not_zero in
- * kernfs_find_and_get_node_by_ino
- */
- atomic_set_release(&kn->count, 1);
+ kn->id = (u64)id_highbits << 32 | ret;
+
+ atomic_set(&kn->count, 1);
atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
RB_CLEAR_NODE(&kn->rb);
@@ -679,7 +671,7 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
return kn;
err_out3:
- idr_remove(&root->ino_idr, kn->id.ino);
+ idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
err_out2:
kmem_cache_free(kernfs_node_cache, kn);
err_out1:
@@ -704,50 +696,52 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
}
/*
- * kernfs_find_and_get_node_by_ino - get kernfs_node from inode number
+ * kernfs_find_and_get_node_by_id - get kernfs_node from node id
* @root: the kernfs root
- * @ino: inode number
+ * @id: the target node id
+ *
+ * @id's lower 32bits encode ino and upper gen. If the gen portion is
+ * zero, all generations are matched.
*
* RETURNS:
* NULL on failure. Return a kernfs node with reference counter incremented
*/
-struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root,
- unsigned int ino)
+struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
+ u64 id)
{
struct kernfs_node *kn;
+ ino_t ino = kernfs_id_ino(id);
+ u32 gen = kernfs_id_gen(id);
+
+ spin_lock(&kernfs_idr_lock);
- rcu_read_lock();
- kn = idr_find(&root->ino_idr, ino);
+ kn = idr_find(&root->ino_idr, (u32)ino);
if (!kn)
- goto out;
+ goto err_unlock;
- /*
- * Since kernfs_node is freed in RCU, it's possible an old node for ino
- * is freed, but reused before RCU grace period. But a freed node (see
- * kernfs_put) or an incompletedly initialized node (see
- * __kernfs_new_node) should have 'count' 0. We can use this fact to
- * filter out such node.
- */
- if (!atomic_inc_not_zero(&kn->count)) {
- kn = NULL;
- goto out;
+ if (sizeof(ino_t) >= sizeof(u64)) {
+ /* we looked up with the low 32bits, compare the whole */
+ if (kernfs_ino(kn) != ino)
+ goto err_unlock;
+ } else {
+ /* 0 matches all generations */
+ if (unlikely(gen && kernfs_gen(kn) != gen))
+ goto err_unlock;
}
/*
- * The node could be a new node or a reused node. If it's a new node,
- * we are ok. If it's reused because of RCU (because of
- * SLAB_TYPESAFE_BY_RCU), the __kernfs_new_node always sets its 'ino'
- * before 'count'. So if 'count' is uptodate, 'ino' should be uptodate,
- * hence we can use 'ino' to filter stale node.
+ * ACTIVATED is protected with kernfs_mutex but it was clear when
+ * @kn was added to idr and we just wanna see it set. No need to
+ * grab kernfs_mutex.
*/
- if (kn->id.ino != ino)
- goto out;
- rcu_read_unlock();
+ if (unlikely(!(kn->flags & KERNFS_ACTIVATED) ||
+ !atomic_inc_not_zero(&kn->count)))
+ goto err_unlock;
+ spin_unlock(&kernfs_idr_lock);
return kn;
-out:
- rcu_read_unlock();
- kernfs_put(kn);
+err_unlock:
+ spin_unlock(&kernfs_idr_lock);
return NULL;
}
@@ -961,7 +955,17 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
idr_init(&root->ino_idr);
INIT_LIST_HEAD(&root->supers);
- root->next_generation = 1;
+
+ /*
+ * On 64bit ino setups, id is ino. On 32bit, low 32bits are ino.
+ * High bits generation. The starting value for both ino and
+ * genenration is 1. Initialize upper 32bit allocation
+ * accordingly.
+ */
+ if (sizeof(ino_t) >= sizeof(u64))
+ root->id_highbits = 0;
+ else
+ root->id_highbits = 1;
kn = __kernfs_new_node(root, NULL, "", S_IFDIR | S_IRUGO | S_IXUGO,
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
@@ -1677,7 +1681,7 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
const char *name = pos->name;
unsigned int type = dt_type(pos);
int len = strlen(name);
- ino_t ino = pos->id.ino;
+ ino_t ino = kernfs_ino(pos);
ctx->pos = pos->hash;
file->private_data = pos;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index e8c792b49616..34366db3620d 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -892,7 +892,7 @@ repeat:
* have the matching @file available. Look up the inodes
* and generate the events manually.
*/
- inode = ilookup(info->sb, kn->id.ino);
+ inode = ilookup(info->sb, kernfs_ino(kn));
if (!inode)
continue;
@@ -901,7 +901,7 @@ repeat:
if (parent) {
struct inode *p_inode;
- p_inode = ilookup(info->sb, parent->id.ino);
+ p_inode = ilookup(info->sb, kernfs_ino(parent));
if (p_inode) {
fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD,
inode, FSNOTIFY_EVENT_INODE, &name, 0);
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index f3f3984cce80..eac277c63d42 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -158,12 +158,11 @@ static inline void set_default_inode_attr(struct inode *inode, umode_t mode)
static inline void set_inode_attr(struct inode *inode,
struct kernfs_iattrs *attrs)
{
- struct super_block *sb = inode->i_sb;
inode->i_uid = attrs->ia_uid;
inode->i_gid = attrs->ia_gid;
- inode->i_atime = timespec64_trunc(attrs->ia_atime, sb->s_time_gran);
- inode->i_mtime = timespec64_trunc(attrs->ia_mtime, sb->s_time_gran);
- inode->i_ctime = timespec64_trunc(attrs->ia_ctime, sb->s_time_gran);
+ inode->i_atime = timestamp_truncate(attrs->ia_atime, inode);
+ inode->i_mtime = timestamp_truncate(attrs->ia_mtime, inode);
+ inode->i_ctime = timestamp_truncate(attrs->ia_ctime, inode);
}
static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode)
@@ -202,7 +201,7 @@ static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode)
inode->i_private = kn;
inode->i_mapping->a_ops = &kernfs_aops;
inode->i_op = &kernfs_iops;
- inode->i_generation = kn->id.generation;
+ inode->i_generation = kernfs_gen(kn);
set_default_inode_attr(inode, kn->mode);
kernfs_refresh_inode(kn, inode);
@@ -248,7 +247,7 @@ struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn)
{
struct inode *inode;
- inode = iget_locked(sb, kn->id.ino);
+ inode = iget_locked(sb, kernfs_ino(kn));
if (inode && (inode->i_state & I_NEW))
kernfs_init_inode(kn, inode);
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 02ce570a9a3c..2f3c51d55261 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -109,8 +109,6 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
const char *name, umode_t mode,
kuid_t uid, kgid_t gid,
unsigned flags);
-struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root,
- unsigned int ino);
/*
* file.c
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index 6c12fac2c287..4d31503abaee 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -53,63 +53,85 @@ const struct super_operations kernfs_sops = {
.show_path = kernfs_sop_show_path,
};
-/*
- * Similar to kernfs_fh_get_inode, this one gets kernfs node from inode
- * number and generation
- */
-struct kernfs_node *kernfs_get_node_by_id(struct kernfs_root *root,
- const union kernfs_node_id *id)
+static int kernfs_encode_fh(struct inode *inode, __u32 *fh, int *max_len,
+ struct inode *parent)
{
- struct kernfs_node *kn;
+ struct kernfs_node *kn = inode->i_private;
- kn = kernfs_find_and_get_node_by_ino(root, id->ino);
- if (!kn)
- return NULL;
- if (kn->id.generation != id->generation) {
- kernfs_put(kn);
- return NULL;
+ if (*max_len < 2) {
+ *max_len = 2;
+ return FILEID_INVALID;
}
- return kn;
+
+ *max_len = 2;
+ *(u64 *)fh = kn->id;
+ return FILEID_KERNFS;
}
-static struct inode *kernfs_fh_get_inode(struct super_block *sb,
- u64 ino, u32 generation)
+static struct dentry *__kernfs_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type, bool get_parent)
{
struct kernfs_super_info *info = kernfs_info(sb);
- struct inode *inode;
struct kernfs_node *kn;
+ struct inode *inode;
+ u64 id;
- if (ino == 0)
- return ERR_PTR(-ESTALE);
+ if (fh_len < 2)
+ return NULL;
+
+ switch (fh_type) {
+ case FILEID_KERNFS:
+ id = *(u64 *)fid;
+ break;
+ case FILEID_INO32_GEN:
+ case FILEID_INO32_GEN_PARENT:
+ /*
+ * blk_log_action() exposes "LOW32,HIGH32" pair without
+ * type and userland can call us with generic fid
+ * constructed from them. Combine it back to ID. See
+ * blk_log_action().
+ */
+ id = ((u64)fid->i32.gen << 32) | fid->i32.ino;
+ break;
+ default:
+ return NULL;
+ }
- kn = kernfs_find_and_get_node_by_ino(info->root, ino);
+ kn = kernfs_find_and_get_node_by_id(info->root, id);
if (!kn)
return ERR_PTR(-ESTALE);
+
+ if (get_parent) {
+ struct kernfs_node *parent;
+
+ parent = kernfs_get_parent(kn);
+ kernfs_put(kn);
+ kn = parent;
+ if (!kn)
+ return ERR_PTR(-ESTALE);
+ }
+
inode = kernfs_get_inode(sb, kn);
kernfs_put(kn);
if (!inode)
return ERR_PTR(-ESTALE);
- if (generation && inode->i_generation != generation) {
- /* we didn't find the right inode.. */
- iput(inode);
- return ERR_PTR(-ESTALE);
- }
- return inode;
+ return d_obtain_alias(inode);
}
-static struct dentry *kernfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
- int fh_len, int fh_type)
+static struct dentry *kernfs_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
{
- return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
- kernfs_fh_get_inode);
+ return __kernfs_fh_to_dentry(sb, fid, fh_len, fh_type, false);
}
-static struct dentry *kernfs_fh_to_parent(struct super_block *sb, struct fid *fid,
- int fh_len, int fh_type)
+static struct dentry *kernfs_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
{
- return generic_fh_to_parent(sb, fid, fh_len, fh_type,
- kernfs_fh_get_inode);
+ return __kernfs_fh_to_dentry(sb, fid, fh_len, fh_type, true);
}
static struct dentry *kernfs_get_parent_dentry(struct dentry *child)
@@ -120,6 +142,7 @@ static struct dentry *kernfs_get_parent_dentry(struct dentry *child)
}
static const struct export_operations kernfs_export_ops = {
+ .encode_fh = kernfs_encode_fh,
.fh_to_dentry = kernfs_fh_to_dentry,
.fh_to_parent = kernfs_fh_to_parent,
.get_parent = kernfs_get_parent_dentry,
@@ -363,18 +386,9 @@ void kernfs_kill_sb(struct super_block *sb)
void __init kernfs_init(void)
{
-
- /*
- * the slab is freed in RCU context, so kernfs_find_and_get_node_by_ino
- * can access the slab lock free. This could introduce stale nodes,
- * please see how kernfs_find_and_get_node_by_ino filters out stale
- * nodes.
- */
kernfs_node_cache = kmem_cache_create("kernfs_node_cache",
sizeof(struct kernfs_node),
- 0,
- SLAB_PANIC | SLAB_TYPESAFE_BY_RCU,
- NULL);
+ 0, SLAB_PANIC, NULL);
/* Creates slab cache for kernfs inode attributes */
kernfs_iattrs_cache = kmem_cache_create("kernfs_iattrs_cache",
diff --git a/fs/libfs.c b/fs/libfs.c
index c9b2850c0f7c..1463b038ffc4 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -89,58 +89,45 @@ int dcache_dir_close(struct inode *inode, struct file *file)
EXPORT_SYMBOL(dcache_dir_close);
/* parent is locked at least shared */
-static struct dentry *next_positive(struct dentry *parent,
- struct list_head *from,
- int count)
+/*
+ * Returns an element of siblings' list.
+ * We are looking for <count>th positive after <p>; if
+ * found, dentry is grabbed and returned to caller.
+ * If no such element exists, NULL is returned.
+ */
+static struct dentry *scan_positives(struct dentry *cursor,
+ struct list_head *p,
+ loff_t count,
+ struct dentry *last)
{
- unsigned *seq = &parent->d_inode->i_dir_seq, n;
- struct dentry *res;
- struct list_head *p;
- bool skipped;
- int i;
+ struct dentry *dentry = cursor->d_parent, *found = NULL;
-retry:
- i = count;
- skipped = false;
- n = smp_load_acquire(seq) & ~1;
- res = NULL;
- rcu_read_lock();
- for (p = from->next; p != &parent->d_subdirs; p = p->next) {
+ spin_lock(&dentry->d_lock);
+ while ((p = p->next) != &dentry->d_subdirs) {
struct dentry *d = list_entry(p, struct dentry, d_child);
- if (!simple_positive(d)) {
- skipped = true;
- } else if (!--i) {
- res = d;
- break;
+ // we must at least skip cursors, to avoid livelocks
+ if (d->d_flags & DCACHE_DENTRY_CURSOR)
+ continue;
+ if (simple_positive(d) && !--count) {
+ spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+ if (simple_positive(d))
+ found = dget_dlock(d);
+ spin_unlock(&d->d_lock);
+ if (likely(found))
+ break;
+ count = 1;
+ }
+ if (need_resched()) {
+ list_move(&cursor->d_child, p);
+ p = &cursor->d_child;
+ spin_unlock(&dentry->d_lock);
+ cond_resched();
+ spin_lock(&dentry->d_lock);
}
}
- rcu_read_unlock();
- if (skipped) {
- smp_rmb();
- if (unlikely(*seq != n))
- goto retry;
- }
- return res;
-}
-
-static void move_cursor(struct dentry *cursor, struct list_head *after)
-{
- struct dentry *parent = cursor->d_parent;
- unsigned n, *seq = &parent->d_inode->i_dir_seq;
- spin_lock(&parent->d_lock);
- for (;;) {
- n = *seq;
- if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
- break;
- cpu_relax();
- }
- __list_del(cursor->d_child.prev, cursor->d_child.next);
- if (after)
- list_add(&cursor->d_child, after);
- else
- list_add_tail(&cursor->d_child, &parent->d_subdirs);
- smp_store_release(seq, n + 2);
- spin_unlock(&parent->d_lock);
+ spin_unlock(&dentry->d_lock);
+ dput(last);
+ return found;
}
loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
@@ -158,17 +145,25 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
return -EINVAL;
}
if (offset != file->f_pos) {
+ struct dentry *cursor = file->private_data;
+ struct dentry *to = NULL;
+
+ inode_lock_shared(dentry->d_inode);
+
+ if (offset > 2)
+ to = scan_positives(cursor, &dentry->d_subdirs,
+ offset - 2, NULL);
+ spin_lock(&dentry->d_lock);
+ if (to)
+ list_move(&cursor->d_child, &to->d_child);
+ else
+ list_del_init(&cursor->d_child);
+ spin_unlock(&dentry->d_lock);
+ dput(to);
+
file->f_pos = offset;
- if (file->f_pos >= 2) {
- struct dentry *cursor = file->private_data;
- struct dentry *to;
- loff_t n = file->f_pos - 2;
-
- inode_lock_shared(dentry->d_inode);
- to = next_positive(dentry, &dentry->d_subdirs, n);
- move_cursor(cursor, to ? &to->d_child : NULL);
- inode_unlock_shared(dentry->d_inode);
- }
+
+ inode_unlock_shared(dentry->d_inode);
}
return offset;
}
@@ -190,25 +185,35 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file->f_path.dentry;
struct dentry *cursor = file->private_data;
- struct list_head *p = &cursor->d_child;
- struct dentry *next;
- bool moved = false;
+ struct list_head *anchor = &dentry->d_subdirs;
+ struct dentry *next = NULL;
+ struct list_head *p;
if (!dir_emit_dots(file, ctx))
return 0;
if (ctx->pos == 2)
- p = &dentry->d_subdirs;
- while ((next = next_positive(dentry, p, 1)) != NULL) {
+ p = anchor;
+ else if (!list_empty(&cursor->d_child))
+ p = &cursor->d_child;
+ else
+ return 0;
+
+ while ((next = scan_positives(cursor, p, 1, next)) != NULL) {
if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
d_inode(next)->i_ino, dt_type(d_inode(next))))
break;
- moved = true;
- p = &next->d_child;
ctx->pos++;
+ p = &next->d_child;
}
- if (moved)
- move_cursor(cursor, p);
+ spin_lock(&dentry->d_lock);
+ if (next)
+ list_move_tail(&cursor->d_child, &next->d_child);
+ else
+ list_del_init(&cursor->d_child);
+ spin_unlock(&dentry->d_lock);
+ dput(next);
+
return 0;
}
EXPORT_SYMBOL(dcache_readdir);
@@ -468,8 +473,7 @@ EXPORT_SYMBOL(simple_write_begin);
/**
* simple_write_end - .write_end helper for non-block-device FSes
- * @available: See .write_end of address_space_operations
- * @file: "
+ * @file: See .write_end of address_space_operations
* @mapping: "
* @pos: "
* @len: "
diff --git a/fs/locks.c b/fs/locks.c
index 686eae21daf6..6970f55daf54 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -212,6 +212,7 @@ struct file_lock_list_struct {
static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
+
/*
* The blocked_hash is used to find POSIX lock loops for deadlock detection.
* It is protected by blocked_lock_lock.
@@ -1592,7 +1593,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
ctx = smp_load_acquire(&inode->i_flctx);
if (!ctx) {
WARN_ON_ONCE(1);
- return error;
+ goto free_lock;
}
percpu_down_read(&file_rwsem);
@@ -1672,6 +1673,7 @@ out:
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
+free_lock:
locks_free_lock(new_fl);
return error;
}
@@ -1990,6 +1992,64 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
}
EXPORT_SYMBOL(generic_setlease);
+#if IS_ENABLED(CONFIG_SRCU)
+/*
+ * Kernel subsystems can register to be notified on any attempt to set
+ * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
+ * to close files that it may have cached when there is an attempt to set a
+ * conflicting lease.
+ */
+static struct srcu_notifier_head lease_notifier_chain;
+
+static inline void
+lease_notifier_chain_init(void)
+{
+ srcu_init_notifier_head(&lease_notifier_chain);
+}
+
+static inline void
+setlease_notifier(long arg, struct file_lock *lease)
+{
+ if (arg != F_UNLCK)
+ srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
+}
+
+int lease_register_notifier(struct notifier_block *nb)
+{
+ return srcu_notifier_chain_register(&lease_notifier_chain, nb);
+}
+EXPORT_SYMBOL_GPL(lease_register_notifier);
+
+void lease_unregister_notifier(struct notifier_block *nb)
+{
+ srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
+}
+EXPORT_SYMBOL_GPL(lease_unregister_notifier);
+
+#else /* !IS_ENABLED(CONFIG_SRCU) */
+static inline void
+lease_notifier_chain_init(void)
+{
+}
+
+static inline void
+setlease_notifier(long arg, struct file_lock *lease)
+{
+}
+
+int lease_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lease_register_notifier);
+
+void lease_unregister_notifier(struct notifier_block *nb)
+{
+}
+EXPORT_SYMBOL_GPL(lease_unregister_notifier);
+
+#endif /* IS_ENABLED(CONFIG_SRCU) */
+
/**
* vfs_setlease - sets a lease on an open file
* @filp: file pointer
@@ -2010,6 +2070,8 @@ EXPORT_SYMBOL(generic_setlease);
int
vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
{
+ if (lease)
+ setlease_notifier(arg, *lease);
if (filp->f_op->setlease)
return filp->f_op->setlease(filp, arg, lease, priv);
else
@@ -2784,10 +2846,10 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
: (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
} else {
- seq_printf(f, "%s ",
- (lease_breaking(fl))
- ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
- : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
+ int type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
+
+ seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
+ (type == F_RDLCK) ? "READ" : "UNLCK");
}
if (inode) {
/* userspace relies on this representation of dev_t */
@@ -2923,6 +2985,7 @@ static int __init filelock_init(void)
INIT_HLIST_HEAD(&fll->hlist);
}
+ lease_notifier_chain_init();
return 0;
}
core_initcall(filelock_init);
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index f96073f25432..7cb5fd38eb14 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -277,6 +277,8 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
/* set up enough so that it can read an inode */
s->s_op = &minix_sops;
+ s->s_time_min = 0;
+ s->s_time_max = U32_MAX;
root_inode = minix_iget(s, MINIX_ROOT_INO);
if (IS_ERR(root_inode)) {
ret = PTR_ERR(root_inode);
diff --git a/fs/namei.c b/fs/namei.c
index 209c51a5226c..2dda552bcf7a 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -596,14 +596,12 @@ static void terminate_walk(struct nameidata *nd)
path_put(&nd->path);
for (i = 0; i < nd->depth; i++)
path_put(&nd->stack[i].link);
- if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
+ if (nd->flags & LOOKUP_ROOT_GRABBED) {
path_put(&nd->root);
- nd->root.mnt = NULL;
+ nd->flags &= ~LOOKUP_ROOT_GRABBED;
}
} else {
nd->flags &= ~LOOKUP_RCU;
- if (!(nd->flags & LOOKUP_ROOT))
- nd->root.mnt = NULL;
rcu_read_unlock();
}
nd->depth = 0;
@@ -641,6 +639,14 @@ static bool legitimize_links(struct nameidata *nd)
return true;
}
+static bool legitimize_root(struct nameidata *nd)
+{
+ if (!nd->root.mnt || (nd->flags & LOOKUP_ROOT))
+ return true;
+ nd->flags |= LOOKUP_ROOT_GRABBED;
+ return legitimize_path(nd, &nd->root, nd->root_seq);
+}
+
/*
* Path walking has 2 modes, rcu-walk and ref-walk (see
* Documentation/filesystems/path-lookup.txt). In situations when we can't
@@ -671,23 +677,18 @@ static int unlazy_walk(struct nameidata *nd)
nd->flags &= ~LOOKUP_RCU;
if (unlikely(!legitimize_links(nd)))
- goto out2;
- if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
goto out1;
- if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
- if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq)))
- goto out;
- }
+ if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
+ goto out;
+ if (unlikely(!legitimize_root(nd)))
+ goto out;
rcu_read_unlock();
BUG_ON(nd->inode != parent->d_inode);
return 0;
-out2:
+out1:
nd->path.mnt = NULL;
nd->path.dentry = NULL;
-out1:
- if (!(nd->flags & LOOKUP_ROOT))
- nd->root.mnt = NULL;
out:
rcu_read_unlock();
return -ECHILD;
@@ -727,23 +728,14 @@ static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned se
*/
if (unlikely(!lockref_get_not_dead(&dentry->d_lockref)))
goto out;
- if (unlikely(read_seqcount_retry(&dentry->d_seq, seq))) {
- rcu_read_unlock();
- dput(dentry);
- goto drop_root_mnt;
- }
+ if (unlikely(read_seqcount_retry(&dentry->d_seq, seq)))
+ goto out_dput;
/*
* Sequence counts matched. Now make sure that the root is
* still valid and get it if required.
*/
- if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
- if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq))) {
- rcu_read_unlock();
- dput(dentry);
- return -ECHILD;
- }
- }
-
+ if (unlikely(!legitimize_root(nd)))
+ goto out_dput;
rcu_read_unlock();
return 0;
@@ -753,9 +745,10 @@ out1:
nd->path.dentry = NULL;
out:
rcu_read_unlock();
-drop_root_mnt:
- if (!(nd->flags & LOOKUP_ROOT))
- nd->root.mnt = NULL;
+ return -ECHILD;
+out_dput:
+ rcu_read_unlock();
+ dput(dentry);
return -ECHILD;
}
@@ -819,6 +812,7 @@ static void set_root(struct nameidata *nd)
} while (read_seqcount_retry(&fs->seq, seq));
} else {
get_fs_root(fs, &nd->root);
+ nd->flags |= LOOKUP_ROOT_GRABBED;
}
}
@@ -931,7 +925,7 @@ static inline int may_follow_link(struct nameidata *nd)
return -ECHILD;
audit_inode(nd->name, nd->stack[0].link.dentry, 0);
- audit_log_link_denied("follow_link");
+ audit_log_path_denied(AUDIT_ANOM_LINK, "follow_link");
return -EACCES;
}
@@ -999,7 +993,7 @@ static int may_linkat(struct path *link)
if (safe_hardlink_source(inode) || inode_owner_or_capable(inode))
return 0;
- audit_log_link_denied("linkat");
+ audit_log_path_denied(AUDIT_ANOM_LINK, "linkat");
return -EPERM;
}
@@ -1037,6 +1031,10 @@ static int may_create_in_sticky(struct dentry * const dir,
(dir->d_inode->i_mode & 0020 &&
((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
(sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
+ const char *operation = S_ISFIFO(inode->i_mode) ?
+ "sticky_create_fifo" :
+ "sticky_create_regular";
+ audit_log_path_denied(AUDIT_ANOM_CREAT, operation);
return -EACCES;
}
return 0;
@@ -1735,8 +1733,6 @@ static int pick_link(struct nameidata *nd, struct path *link,
nd->flags &= ~LOOKUP_RCU;
nd->path.mnt = NULL;
nd->path.dentry = NULL;
- if (!(nd->flags & LOOKUP_ROOT))
- nd->root.mnt = NULL;
rcu_read_unlock();
} else if (likely(unlazy_walk(nd)) == 0)
error = nd_alloc_stack(nd);
@@ -2350,7 +2346,7 @@ int filename_lookup(int dfd, struct filename *name, unsigned flags,
retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path);
if (likely(!retval))
- audit_inode(name, path->dentry, flags & LOOKUP_PARENT);
+ audit_inode(name, path->dentry, 0);
restore_nameidata();
putname(name);
return retval;
@@ -2391,7 +2387,7 @@ static struct filename *filename_parentat(int dfd, struct filename *name,
if (likely(!retval)) {
*last = nd.last;
*type = nd.last_type;
- audit_inode(name, parent->dentry, LOOKUP_PARENT);
+ audit_inode(name, parent->dentry, AUDIT_INODE_PARENT);
} else {
putname(name);
name = ERR_PTR(retval);
@@ -2718,7 +2714,7 @@ filename_mountpoint(int dfd, struct filename *name, struct path *path,
if (unlikely(error == -ESTALE))
error = path_mountpoint(&nd, flags | LOOKUP_REVAL, path);
if (likely(!error))
- audit_inode(name, path->dentry, flags & LOOKUP_NO_EVAL);
+ audit_inode(name, path->dentry, AUDIT_INODE_NOEVAL);
restore_nameidata();
putname(name);
return error;
@@ -3299,7 +3295,7 @@ static int do_last(struct nameidata *nd,
if (error)
return error;
- audit_inode(nd->name, dir, LOOKUP_PARENT);
+ audit_inode(nd->name, dir, AUDIT_INODE_PARENT);
/* trailing slashes? */
if (unlikely(nd->last.name[nd->last.len]))
return -EISDIR;
diff --git a/fs/namespace.c b/fs/namespace.c
index 6464ea4acba9..2adfe7b166a3 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1463,7 +1463,6 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
disconnect = disconnect_mount(p, how);
-
if (mnt_has_parent(p)) {
mnt_add_count(p->mnt_parent, -1);
if (!disconnect) {
@@ -1471,10 +1470,11 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
} else {
umount_mnt(p);
- hlist_add_head(&p->mnt_umount, &unmounted);
}
}
change_mnt_propagation(p, MS_PRIVATE);
+ if (disconnect)
+ hlist_add_head(&p->mnt_umount, &unmounted);
}
}
@@ -1643,13 +1643,18 @@ static inline bool may_mount(void)
return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
}
+#ifdef CONFIG_MANDATORY_FILE_LOCKING
static inline bool may_mandlock(void)
{
-#ifndef CONFIG_MANDATORY_FILE_LOCKING
- return false;
-#endif
return capable(CAP_SYS_ADMIN);
}
+#else
+static inline bool may_mandlock(void)
+{
+ pr_warn("VFS: \"mand\" mount option not supported");
+ return false;
+}
+#endif
/*
* Now umount can handle mount points as well as block devices.
@@ -1675,8 +1680,6 @@ int ksys_umount(char __user *name, int flags)
if (!(flags & UMOUNT_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
- lookup_flags |= LOOKUP_NO_EVAL;
-
retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path);
if (retval)
goto out;
@@ -2463,6 +2466,28 @@ static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
unlock_mount_hash();
}
+static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
+{
+ struct super_block *sb = mnt->mnt_sb;
+
+ if (!__mnt_is_readonly(mnt) &&
+ (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
+ char *buf = (char *)__get_free_page(GFP_KERNEL);
+ char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM);
+ struct tm tm;
+
+ time64_to_tm(sb->s_time_max, 0, &tm);
+
+ pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n",
+ sb->s_type->name,
+ is_mounted(mnt) ? "remounted" : "mounted",
+ mntpath,
+ tm.tm_year+1900, (unsigned long long)sb->s_time_max);
+
+ free_page((unsigned long)buf);
+ }
+}
+
/*
* Handle reconfiguration of the mountpoint only without alteration of the
* superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
@@ -2488,6 +2513,9 @@ static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
if (ret == 0)
set_mount_attributes(mnt, mnt_flags);
up_write(&sb->s_umount);
+
+ mnt_warn_timestamp_expiry(path, &mnt->mnt);
+
return ret;
}
@@ -2528,6 +2556,9 @@ static int do_remount(struct path *path, int ms_flags, int sb_flags,
}
up_write(&sb->s_umount);
}
+
+ mnt_warn_timestamp_expiry(path, &mnt->mnt);
+
put_fs_context(fc);
return err;
}
@@ -2735,6 +2766,8 @@ static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
if (IS_ERR(mnt))
return PTR_ERR(mnt);
+ mnt_warn_timestamp_expiry(mountpoint, mnt);
+
error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags);
if (error < 0)
mntput(mnt);
@@ -2768,8 +2801,6 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
put_filesystem(type);
return -EINVAL;
}
- } else {
- subtype = "";
}
}
@@ -2994,7 +3025,7 @@ void *copy_mount_options(const void __user * data)
* the remainder of the page.
*/
/* copy_from_user cannot cross TASK_SIZE ! */
- size = TASK_SIZE - (unsigned long)data;
+ size = TASK_SIZE - (unsigned long)untagged_addr(data);
if (size > PAGE_SIZE)
size = PAGE_SIZE;
@@ -3046,7 +3077,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
return -EINVAL;
/* ... and get the mountpoint */
- retval = user_path(dir_name, &path);
+ retval = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
if (retval)
return retval;
@@ -3593,11 +3624,13 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
if (!may_mount())
return -EPERM;
- error = user_path_dir(new_root, &new);
+ error = user_path_at(AT_FDCWD, new_root,
+ LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new);
if (error)
goto out0;
- error = user_path_dir(put_old, &old);
+ error = user_path_at(AT_FDCWD, put_old,
+ LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old);
if (error)
goto out1;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 0ff3facf81da..af549d70ec50 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -53,6 +53,16 @@ nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
return false;
}
+struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode)
+{
+ struct nfs_delegation *delegation;
+
+ delegation = rcu_dereference(NFS_I(inode)->delegation);
+ if (nfs4_is_valid_delegation(delegation, 0))
+ return delegation;
+ return NULL;
+}
+
static int
nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
{
@@ -153,7 +163,7 @@ again:
/* Block nfs4_proc_unlck */
mutex_lock(&sp->so_delegreturn_mutex);
seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
- err = nfs4_open_delegation_recall(ctx, state, stateid, type);
+ err = nfs4_open_delegation_recall(ctx, state, stateid);
if (!err)
err = nfs_delegation_claim_locks(state, stateid);
if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
@@ -1046,6 +1056,22 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp)
nfs4_schedule_state_manager(clp);
}
+static void
+nfs_delegation_test_free_expired(struct inode *inode,
+ nfs4_stateid *stateid,
+ const struct cred *cred)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
+ int status;
+
+ if (!cred)
+ return;
+ status = ops->test_and_free_expired(server, stateid, cred);
+ if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
+ nfs_remove_bad_delegation(inode, stateid);
+}
+
/**
* nfs_reap_expired_delegations - reap expired delegations
* @clp: nfs_client to process
@@ -1057,7 +1083,6 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp)
*/
void nfs_reap_expired_delegations(struct nfs_client *clp)
{
- const struct nfs4_minor_version_ops *ops = clp->cl_mvops;
struct nfs_delegation *delegation;
struct nfs_server *server;
struct inode *inode;
@@ -1088,11 +1113,7 @@ restart:
nfs4_stateid_copy(&stateid, &delegation->stateid);
clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
rcu_read_unlock();
- if (cred != NULL &&
- ops->test_and_free_expired(server, &stateid, cred) < 0) {
- nfs_revoke_delegation(inode, &stateid);
- nfs_inode_find_state_and_recover(inode, &stateid);
- }
+ nfs_delegation_test_free_expired(inode, &stateid, cred);
put_cred(cred);
if (nfs4_server_rebooted(clp)) {
nfs_inode_mark_test_expired_delegation(server,inode);
@@ -1170,7 +1191,7 @@ bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
if (delegation != NULL &&
nfs4_stateid_match_other(dst, &delegation->stateid)) {
dst->seqid = delegation->stateid.seqid;
- return ret;
+ ret = true;
}
rcu_read_unlock();
out:
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 5799777df5ec..8b14d441e699 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -63,11 +63,12 @@ void nfs_reap_expired_delegations(struct nfs_client *clp);
/* NFSv4 delegation-related procedures */
int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync);
-int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
+int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred);
bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
+struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode);
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
int nfs4_have_delegation(struct inode *inode, fmode_t flags);
int nfs4_check_delegation(struct inode *inode, fmode_t flags);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 8d501093660f..e180033e35cf 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1487,7 +1487,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
nfs_file_set_open_context(file, ctx);
else
- err = -ESTALE;
+ err = -EOPENSTALE;
out:
return err;
}
@@ -1669,10 +1669,8 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
#endif /* CONFIG_NFSV4 */
-/*
- * Code common to create, mkdir, and mknod.
- */
-int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
+struct dentry *
+nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle,
struct nfs_fattr *fattr,
struct nfs4_label *label)
{
@@ -1680,13 +1678,10 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
struct inode *dir = d_inode(parent);
struct inode *inode;
struct dentry *d;
- int error = -EACCES;
+ int error;
d_drop(dentry);
- /* We may have been initialized further down */
- if (d_really_is_positive(dentry))
- goto out;
if (fhandle->size == 0) {
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, NULL);
if (error)
@@ -1702,18 +1697,32 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
}
inode = nfs_fhget(dentry->d_sb, fhandle, fattr, label);
d = d_splice_alias(inode, dentry);
- if (IS_ERR(d)) {
- error = PTR_ERR(d);
- goto out_error;
- }
- dput(d);
out:
dput(parent);
- return 0;
+ return d;
out_error:
nfs_mark_for_revalidate(dir);
- dput(parent);
- return error;
+ d = ERR_PTR(error);
+ goto out;
+}
+EXPORT_SYMBOL_GPL(nfs_add_or_obtain);
+
+/*
+ * Code common to create, mkdir, and mknod.
+ */
+int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr,
+ struct nfs4_label *label)
+{
+ struct dentry *d;
+
+ d = nfs_add_or_obtain(dentry, fhandle, fattr, label);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+
+ /* Callers don't care */
+ dput(d);
+ return 0;
}
EXPORT_SYMBOL_GPL(nfs_instantiate);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 0cb442406168..040a50fd9bf3 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -64,13 +64,6 @@
static struct kmem_cache *nfs_direct_cachep;
-/*
- * This represents a set of asynchronous requests that we're waiting on
- */
-struct nfs_direct_mirror {
- ssize_t count;
-};
-
struct nfs_direct_req {
struct kref kref; /* release manager */
@@ -84,9 +77,6 @@ struct nfs_direct_req {
atomic_t io_count; /* i/os we're waiting for */
spinlock_t lock; /* protect completion state */
- struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
- int mirror_count;
-
loff_t io_start; /* Start offset for I/O */
ssize_t count, /* bytes actually processed */
max_count, /* max expected count */
@@ -123,32 +113,42 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
}
static void
-nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
+nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
+ const struct nfs_pgio_header *hdr,
+ ssize_t dreq_len)
{
- int i;
- ssize_t count;
+ if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
+ test_bit(NFS_IOHDR_EOF, &hdr->flags)))
+ return;
+ if (dreq->max_count >= dreq_len) {
+ dreq->max_count = dreq_len;
+ if (dreq->count > dreq_len)
+ dreq->count = dreq_len;
+
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
+ dreq->error = hdr->error;
+ else /* Clear outstanding error if this is EOF */
+ dreq->error = 0;
+ }
+}
- WARN_ON_ONCE(dreq->count >= dreq->max_count);
+static void
+nfs_direct_count_bytes(struct nfs_direct_req *dreq,
+ const struct nfs_pgio_header *hdr)
+{
+ loff_t hdr_end = hdr->io_start + hdr->good_bytes;
+ ssize_t dreq_len = 0;
- if (dreq->mirror_count == 1) {
- dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
- dreq->count += hdr->good_bytes;
- } else {
- /* mirrored writes */
- count = dreq->mirrors[hdr->pgio_mirror_idx].count;
- if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
- count = hdr->io_start + hdr->good_bytes - dreq->io_start;
- dreq->mirrors[hdr->pgio_mirror_idx].count = count;
- }
- /* update the dreq->count by finding the minimum agreed count from all
- * mirrors */
- count = dreq->mirrors[0].count;
+ if (hdr_end > dreq->io_start)
+ dreq_len = hdr_end - dreq->io_start;
- for (i = 1; i < dreq->mirror_count; i++)
- count = min(count, dreq->mirrors[i].count);
+ nfs_direct_handle_truncated(dreq, hdr, dreq_len);
- dreq->count = count;
- }
+ if (dreq_len > dreq->max_count)
+ dreq_len = dreq->max_count;
+
+ if (dreq->count < dreq_len)
+ dreq->count = dreq_len;
}
/*
@@ -293,18 +293,6 @@ void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
cinfo->completion_ops = &nfs_direct_commit_completion_ops;
}
-static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
- struct nfs_pageio_descriptor *pgio,
- struct nfs_page *req)
-{
- int mirror_count = 1;
-
- if (pgio->pg_ops->pg_get_mirror_count)
- mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
-
- dreq->mirror_count = mirror_count;
-}
-
static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
{
struct nfs_direct_req *dreq;
@@ -319,7 +307,6 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
INIT_LIST_HEAD(&dreq->mds_cinfo.list);
dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */
INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
- dreq->mirror_count = 1;
spin_lock_init(&dreq->lock);
return dreq;
@@ -401,15 +388,13 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
unsigned long bytes = 0;
struct nfs_direct_req *dreq = hdr->dreq;
- if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
- goto out_put;
-
spin_lock(&dreq->lock);
- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
- dreq->error = hdr->error;
- else
- nfs_direct_good_bytes(dreq, hdr);
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ spin_unlock(&dreq->lock);
+ goto out_put;
+ }
+ nfs_direct_count_bytes(dreq, hdr);
spin_unlock(&dreq->lock);
while (!list_empty(&hdr->pages)) {
@@ -640,29 +625,22 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
LIST_HEAD(reqs);
struct nfs_commit_info cinfo;
LIST_HEAD(failed);
- int i;
nfs_init_cinfo_from_dreq(&cinfo, dreq);
nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
dreq->count = 0;
+ dreq->max_count = 0;
+ list_for_each_entry(req, &reqs, wb_list)
+ dreq->max_count += req->wb_bytes;
dreq->verf.committed = NFS_INVALID_STABLE_HOW;
nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
- for (i = 0; i < dreq->mirror_count; i++)
- dreq->mirrors[i].count = 0;
get_dreq(dreq);
nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
&nfs_direct_write_completion_ops);
desc.pg_dreq = dreq;
- req = nfs_list_entry(reqs.next);
- nfs_direct_setup_mirroring(dreq, &desc, req);
- if (desc.pg_error < 0) {
- list_splice_init(&reqs, &failed);
- goto out_failed;
- }
-
list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
/* Bump the transmission count */
req->wb_nio++;
@@ -680,7 +658,6 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
}
nfs_pageio_complete(&desc);
-out_failed:
while (!list_empty(&failed)) {
req = nfs_list_entry(failed.next);
nfs_list_remove_request(req);
@@ -782,17 +759,16 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
bool request_commit = false;
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
- if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
- goto out_put;
-
nfs_init_cinfo_from_dreq(&cinfo, dreq);
spin_lock(&dreq->lock);
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ spin_unlock(&dreq->lock);
+ goto out_put;
+ }
- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
- dreq->error = hdr->error;
- if (dreq->error == 0) {
- nfs_direct_good_bytes(dreq, hdr);
+ nfs_direct_count_bytes(dreq, hdr);
+ if (hdr->good_bytes != 0) {
if (nfs_write_need_commit(hdr)) {
if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
request_commit = true;
@@ -914,7 +890,6 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
break;
}
- nfs_direct_setup_mirroring(dreq, &desc, req);
if (desc.pg_error < 0) {
nfs_free_request(req);
result = desc.pg_error;
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 3cb073c50fa6..c9b605f6c9cb 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -1164,6 +1164,7 @@ static struct pnfs_layoutdriver_type filelayout_type = {
.id = LAYOUT_NFSV4_1_FILES,
.name = "LAYOUT_NFSV4_1_FILES",
.owner = THIS_MODULE,
+ .flags = PNFS_LAYOUTGET_ON_OPEN,
.max_layoutget_response = 4096, /* 1 page or so... */
.alloc_layout_hdr = filelayout_alloc_layout_hdr,
.free_layout_hdr = filelayout_free_layout_hdr,
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index b04e20d28162..5657b7f2611f 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -8,6 +8,7 @@
*/
#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
#include <linux/nfs_page.h>
#include <linux/module.h>
#include <linux/sched/mm.h>
@@ -928,7 +929,9 @@ retry:
pgm = &pgio->pg_mirrors[0];
pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
- pgio->pg_maxretrans = io_maxretrans;
+ if (NFS_SERVER(pgio->pg_inode)->flags &
+ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
+ pgio->pg_maxretrans = io_maxretrans;
return;
out_nolseg:
if (pgio->pg_error < 0)
@@ -940,6 +943,7 @@ out_mds:
pgio->pg_lseg);
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
+ pgio->pg_maxretrans = 0;
nfs_pageio_reset_read_mds(pgio);
}
@@ -1000,7 +1004,9 @@ retry:
pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
}
- pgio->pg_maxretrans = io_maxretrans;
+ if (NFS_SERVER(pgio->pg_inode)->flags &
+ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
+ pgio->pg_maxretrans = io_maxretrans;
return;
out_mds:
@@ -1010,6 +1016,7 @@ out_mds:
pgio->pg_lseg);
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
+ pgio->pg_maxretrans = 0;
nfs_pageio_reset_write_mds(pgio);
}
@@ -1148,8 +1155,6 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
break;
case -NFS4ERR_RETRY_UNCACHED_REP:
break;
- case -EAGAIN:
- return -NFS4ERR_RESET_TO_PNFS;
/* Invalidate Layout errors */
case -NFS4ERR_PNFS_NO_LAYOUT:
case -ESTALE: /* mapped NFS4ERR_STALE */
@@ -1210,7 +1215,6 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
case -EBADHANDLE:
case -ELOOP:
case -ENOSPC:
- case -EAGAIN:
break;
case -EJUKEBOX:
nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
@@ -1445,16 +1449,6 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
ff_layout_read_prepare_common(task, hdr);
}
-static void
-ff_layout_io_prepare_transmit(struct rpc_task *task,
- void *data)
-{
- struct nfs_pgio_header *hdr = data;
-
- if (!pnfs_is_valid_lseg(hdr->lseg))
- rpc_exit(task, -EAGAIN);
-}
-
static void ff_layout_read_call_done(struct rpc_task *task, void *data)
{
struct nfs_pgio_header *hdr = data;
@@ -1740,7 +1734,6 @@ static void ff_layout_commit_release(void *data)
static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
.rpc_call_prepare = ff_layout_read_prepare_v3,
- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_read_call_done,
.rpc_count_stats = ff_layout_read_count_stats,
.rpc_release = ff_layout_read_release,
@@ -1748,7 +1741,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
.rpc_call_prepare = ff_layout_read_prepare_v4,
- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_read_call_done,
.rpc_count_stats = ff_layout_read_count_stats,
.rpc_release = ff_layout_read_release,
@@ -1756,7 +1748,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
.rpc_call_prepare = ff_layout_write_prepare_v3,
- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_write_call_done,
.rpc_count_stats = ff_layout_write_count_stats,
.rpc_release = ff_layout_write_release,
@@ -1764,7 +1755,6 @@ static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
.rpc_call_prepare = ff_layout_write_prepare_v4,
- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_write_call_done,
.rpc_count_stats = ff_layout_write_count_stats,
.rpc_release = ff_layout_write_release,
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 53507aa96b0b..3800ab6f08fa 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -114,6 +114,10 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
struct rb_node **p, *parent;
int diff;
+ nfss->fscache_key = NULL;
+ nfss->fscache = NULL;
+ if (!(nfss->options & NFS_OPTION_FSCACHE))
+ return;
if (!uniq) {
uniq = "";
ulen = 1;
@@ -226,10 +230,11 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
void nfs_fscache_init_inode(struct inode *inode)
{
struct nfs_fscache_inode_auxdata auxdata;
+ struct nfs_server *nfss = NFS_SERVER(inode);
struct nfs_inode *nfsi = NFS_I(inode);
nfsi->fscache = NULL;
- if (!S_ISREG(inode->i_mode))
+ if (!(nfss->fscache && S_ISREG(inode->i_mode)))
return;
memset(&auxdata, 0, sizeof(auxdata));
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 25a75e40d91d..ad041cfbf9ec 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -182,7 +182,7 @@ static inline void nfs_fscache_wait_on_invalidate(struct inode *inode)
*/
static inline const char *nfs_server_fscache_state(struct nfs_server *server)
{
- if (server->fscache && (server->options & NFS_OPTION_FSCACHE))
+ if (server->fscache)
return "yes";
return "no ";
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 8a1758200b57..2a03bfeec10a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1403,12 +1403,22 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
return 0;
+ if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
+ /* Only a mounted-on-fileid? Just exit */
+ if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
+ return 0;
/* Has the inode gone and changed behind our back? */
- if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
+ } else if (nfsi->fileid != fattr->fileid) {
+ /* Is this perhaps the mounted-on fileid? */
+ if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
+ nfsi->fileid == fattr->mounted_on_fileid)
+ return 0;
return -ESTALE;
+ }
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
return -ESTALE;
+
if (!nfs_file_has_buffered_writers(nfsi)) {
/* Verify a few of the more important attributes */
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
@@ -1768,18 +1778,6 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa
EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc);
-static inline bool nfs_fileid_valid(struct nfs_inode *nfsi,
- struct nfs_fattr *fattr)
-{
- bool ret1 = true, ret2 = true;
-
- if (fattr->valid & NFS_ATTR_FATTR_FILEID)
- ret1 = (nfsi->fileid == fattr->fileid);
- if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
- ret2 = (nfsi->fileid == fattr->mounted_on_fileid);
- return ret1 || ret2;
-}
-
/*
* Many nfs protocol calls return the new file attributes after
* an operation. Here we update the inode to reflect the state
@@ -1810,7 +1808,16 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfs_display_fhandle_hash(NFS_FH(inode)),
atomic_read(&inode->i_count), fattr->valid);
- if (!nfs_fileid_valid(nfsi, fattr)) {
+ if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
+ /* Only a mounted-on-fileid? Just exit */
+ if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
+ return 0;
+ /* Has the inode gone and changed behind our back? */
+ } else if (nfsi->fileid != fattr->fileid) {
+ /* Is this perhaps the mounted-on fileid? */
+ if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
+ nfsi->fileid == fattr->mounted_on_fileid)
+ return 0;
printk(KERN_ERR "NFS: server %s error: fileid changed\n"
"fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
NFS_SERVER(inode)->nfs_client->cl_hostname,
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index a2346a2f8361..447a3c17fa8e 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -16,14 +16,6 @@ extern const struct export_operations nfs_export_ops;
struct nfs_string;
-/* Maximum number of readahead requests
- * FIXME: this should really be a sysctl so that users may tune it to suit
- * their needs. People that do NFS over a slow network, might for
- * instance want to reduce it to something closer to 1 for improved
- * interactive response.
- */
-#define NFS_MAX_READAHEAD (RPC_DEF_SLOT_TABLE - 1)
-
static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct nfs_fattr *fattr)
{
if (!nfs_fsid_equal(&NFS_SB(parent)->fsid, &fattr->fsid))
@@ -775,3 +767,13 @@ static inline bool nfs_error_is_fatal(int err)
}
}
+static inline bool nfs_error_is_fatal_on_server(int err)
+{
+ switch (err) {
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ return false;
+ }
+ return nfs_error_is_fatal(err);
+}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index a3ad2d46fd42..9eb2f1a503ab 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -279,15 +279,17 @@ static struct nfs3_createdata *nfs3_alloc_createdata(void)
return data;
}
-static int nfs3_do_create(struct inode *dir, struct dentry *dentry, struct nfs3_createdata *data)
+static struct dentry *
+nfs3_do_create(struct inode *dir, struct dentry *dentry, struct nfs3_createdata *data)
{
int status;
status = rpc_call_sync(NFS_CLIENT(dir), &data->msg, 0);
nfs_post_op_update_inode(dir, data->res.dir_attr);
- if (status == 0)
- status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, NULL);
- return status;
+ if (status != 0)
+ return ERR_PTR(status);
+
+ return nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr, NULL);
}
static void nfs3_free_createdata(struct nfs3_createdata *data)
@@ -304,6 +306,7 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
{
struct posix_acl *default_acl, *acl;
struct nfs3_createdata *data;
+ struct dentry *d_alias;
int status = -ENOMEM;
dprintk("NFS call create %pd\n", dentry);
@@ -330,7 +333,8 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
goto out;
for (;;) {
- status = nfs3_do_create(dir, dentry, data);
+ d_alias = nfs3_do_create(dir, dentry, data);
+ status = PTR_ERR_OR_ZERO(d_alias);
if (status != -ENOTSUPP)
break;
@@ -355,6 +359,9 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
if (status != 0)
goto out_release_acls;
+ if (d_alias)
+ dentry = d_alias;
+
/* When we created the file with exclusive semantics, make
* sure we set the attributes afterwards. */
if (data->arg.create.createmode == NFS3_CREATE_EXCLUSIVE) {
@@ -372,11 +379,13 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
nfs_post_op_update_inode(d_inode(dentry), data->res.fattr);
dprintk("NFS reply setattr (post-create): %d\n", status);
if (status != 0)
- goto out_release_acls;
+ goto out_dput;
}
status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
+out_dput:
+ dput(d_alias);
out_release_acls:
posix_acl_release(acl);
posix_acl_release(default_acl);
@@ -504,6 +513,7 @@ nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
unsigned int len, struct iattr *sattr)
{
struct nfs3_createdata *data;
+ struct dentry *d_alias;
int status = -ENOMEM;
if (len > NFS3_MAXPATHLEN)
@@ -522,7 +532,11 @@ nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
data->arg.symlink.pathlen = len;
data->arg.symlink.sattr = sattr;
- status = nfs3_do_create(dir, dentry, data);
+ d_alias = nfs3_do_create(dir, dentry, data);
+ status = PTR_ERR_OR_ZERO(d_alias);
+
+ if (status == 0)
+ dput(d_alias);
nfs3_free_createdata(data);
out:
@@ -535,6 +549,7 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
{
struct posix_acl *default_acl, *acl;
struct nfs3_createdata *data;
+ struct dentry *d_alias;
int status = -ENOMEM;
dprintk("NFS call mkdir %pd\n", dentry);
@@ -553,12 +568,18 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
data->arg.mkdir.len = dentry->d_name.len;
data->arg.mkdir.sattr = sattr;
- status = nfs3_do_create(dir, dentry, data);
+ d_alias = nfs3_do_create(dir, dentry, data);
+ status = PTR_ERR_OR_ZERO(d_alias);
+
if (status != 0)
goto out_release_acls;
+ if (d_alias)
+ dentry = d_alias;
+
status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
+ dput(d_alias);
out_release_acls:
posix_acl_release(acl);
posix_acl_release(default_acl);
@@ -660,6 +681,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
{
struct posix_acl *default_acl, *acl;
struct nfs3_createdata *data;
+ struct dentry *d_alias;
int status = -ENOMEM;
dprintk("NFS call mknod %pd %u:%u\n", dentry,
@@ -698,12 +720,17 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
goto out;
}
- status = nfs3_do_create(dir, dentry, data);
+ d_alias = nfs3_do_create(dir, dentry, data);
+ status = PTR_ERR_OR_ZERO(d_alias);
if (status != 0)
goto out_release_acls;
+ if (d_alias)
+ dentry = d_alias;
+
status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
+ dput(d_alias);
out_release_acls:
posix_acl_release(acl);
posix_acl_release(default_acl);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index d778dad9a75e..16b2e5cc3e94 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -465,7 +465,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, const struct cred *, gfp_t);
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
-extern void nfs4_purge_state_owners(struct nfs_server *);
+extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
+extern void nfs4_free_state_owners(struct list_head *head);
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
extern void nfs4_put_open_state(struct nfs4_state *);
extern void nfs4_close_state(struct nfs4_state *, fmode_t);
@@ -490,8 +491,6 @@ extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
extern int nfs4_select_rw_stateid(struct nfs4_state *, fmode_t,
const struct nfs_lock_context *, nfs4_stateid *,
const struct cred **);
-extern bool nfs4_refresh_open_stateid(nfs4_stateid *dst,
- struct nfs4_state *state);
extern bool nfs4_copy_open_stateid(nfs4_stateid *dst,
struct nfs4_state *state);
@@ -573,6 +572,15 @@ static inline bool nfs4_stateid_is_newer(const nfs4_stateid *s1, const nfs4_stat
return (s32)(be32_to_cpu(s1->seqid) - be32_to_cpu(s2->seqid)) > 0;
}
+static inline void nfs4_stateid_seqid_inc(nfs4_stateid *s1)
+{
+ u32 seqid = be32_to_cpu(s1->seqid);
+
+ if (++seqid == 0)
+ ++seqid;
+ s1->seqid = cpu_to_be32(seqid);
+}
+
static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state)
{
return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0;
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 616393a01c06..da6204025a2d 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -758,9 +758,12 @@ out:
static void nfs4_destroy_server(struct nfs_server *server)
{
+ LIST_HEAD(freeme);
+
nfs_server_return_all_delegations(server);
unset_pnfs_layoutdriver(server);
- nfs4_purge_state_owners(server);
+ nfs4_purge_state_owners(server, &freeme);
+ nfs4_free_state_owners(&freeme);
}
/*
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 96db471ca2e5..339663d04bf8 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
switch (err) {
- case -EPERM:
- case -EACCES:
- case -EDQUOT:
- case -ENOSPC:
- case -EROFS:
- goto out_put_ctx;
default:
+ goto out_put_ctx;
+ case -ENOENT:
+ case -ESTALE:
+ case -EISDIR:
+ case -ENOTDIR:
+ case -ELOOP:
goto out_drop;
}
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 39896afc6edf..caacf5e7f5e1 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1073,14 +1073,26 @@ static const struct rpc_call_ops nfs40_call_sync_ops = {
.rpc_call_done = nfs40_call_sync_done,
};
+static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
+{
+ int ret;
+ struct rpc_task *task;
+
+ task = rpc_run_task(task_setup);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ ret = task->tk_status;
+ rpc_put_task(task);
+ return ret;
+}
+
static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res)
{
- int ret;
- struct rpc_task *task;
struct nfs_client *clp = server->nfs_client;
struct nfs4_call_sync_data data = {
.seq_server = server,
@@ -1094,14 +1106,7 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
.callback_data = &data
};
- task = rpc_run_task(&task_setup);
- if (IS_ERR(task))
- ret = PTR_ERR(task);
- else {
- ret = task->tk_status;
- rpc_put_task(task);
- }
- return ret;
+ return nfs4_call_sync_custom(&task_setup);
}
int nfs4_call_sync(struct rpc_clnt *clnt,
@@ -1435,8 +1440,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
return 0;
if ((delegation->type & fmode) != fmode)
return 0;
- if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
- return 0;
switch (claim) {
case NFS4_OPEN_CLAIM_NULL:
case NFS4_OPEN_CLAIM_FH:
@@ -1683,6 +1686,14 @@ static void nfs_state_set_open_stateid(struct nfs4_state *state,
write_sequnlock(&state->seqlock);
}
+static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
+{
+ clear_bit(NFS_O_RDWR_STATE, &state->flags);
+ clear_bit(NFS_O_WRONLY_STATE, &state->flags);
+ clear_bit(NFS_O_RDONLY_STATE, &state->flags);
+ clear_bit(NFS_OPEN_STATE, &state->flags);
+}
+
static void nfs_state_set_delegation(struct nfs4_state *state,
const nfs4_stateid *deleg_stateid,
fmode_t fmode)
@@ -1797,7 +1808,6 @@ static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmo
static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
{
struct nfs4_state *state = opendata->state;
- struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_delegation *delegation;
int open_mode = opendata->o_arg.open_flags;
fmode_t fmode = opendata->o_arg.fmode;
@@ -1814,7 +1824,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
}
spin_unlock(&state->owner->so_lock);
rcu_read_lock();
- delegation = rcu_dereference(nfsi->delegation);
+ delegation = nfs4_get_valid_delegation(state->inode);
if (!can_open_delegated(delegation, fmode, claim)) {
rcu_read_unlock();
break;
@@ -1907,8 +1917,9 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
if (data->o_res.delegation_type != 0)
nfs4_opendata_check_deleg(data, state);
update:
- update_open_stateid(state, &data->o_res.stateid, NULL,
- data->o_arg.fmode);
+ if (!update_open_stateid(state, &data->o_res.stateid,
+ NULL, data->o_arg.fmode))
+ return ERR_PTR(-EAGAIN);
refcount_inc(&state->count);
return state;
@@ -1973,8 +1984,11 @@ _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
if (data->o_res.delegation_type != 0)
nfs4_opendata_check_deleg(data, state);
- update_open_stateid(state, &data->o_res.stateid, NULL,
- data->o_arg.fmode);
+ if (!update_open_stateid(state, &data->o_res.stateid,
+ NULL, data->o_arg.fmode)) {
+ nfs4_put_open_state(state);
+ state = ERR_PTR(-EAGAIN);
+ }
out:
nfs_release_seqid(data->o_arg.seqid);
return state;
@@ -2074,13 +2088,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
{
int ret;
- /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
- clear_bit(NFS_O_RDWR_STATE, &state->flags);
- clear_bit(NFS_O_WRONLY_STATE, &state->flags);
- clear_bit(NFS_O_RDONLY_STATE, &state->flags);
/* memory barrier prior to reading state->n_* */
- clear_bit(NFS_DELEGATED_STATE, &state->flags);
- clear_bit(NFS_OPEN_STATE, &state->flags);
smp_rmb();
ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
if (ret != 0)
@@ -2156,6 +2164,8 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta
ctx = nfs4_state_find_open_context(state);
if (IS_ERR(ctx))
return -EAGAIN;
+ clear_bit(NFS_DELEGATED_STATE, &state->flags);
+ nfs_state_clear_open_state_flags(state);
ret = nfs4_do_open_reclaim(ctx, state);
put_nfs_open_context(ctx);
return ret;
@@ -2171,18 +2181,17 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
case -ENOENT:
case -EAGAIN:
case -ESTALE:
+ case -ETIMEDOUT:
break;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
- set_bit(NFS_DELEGATED_STATE, &state->flags);
nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
return -EAGAIN;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
- set_bit(NFS_DELEGATED_STATE, &state->flags);
/* Don't recall a delegation if it was lost */
nfs4_schedule_lease_recovery(server->nfs_client);
return -EAGAIN;
@@ -2203,7 +2212,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
return -EAGAIN;
case -NFS4ERR_DELAY:
case -NFS4ERR_GRACE:
- set_bit(NFS_DELEGATED_STATE, &state->flags);
ssleep(1);
return -EAGAIN;
case -ENOMEM:
@@ -2219,8 +2227,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
}
int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
- struct nfs4_state *state, const nfs4_stateid *stateid,
- fmode_t type)
+ struct nfs4_state *state, const nfs4_stateid *stateid)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_opendata *opendata;
@@ -2231,20 +2238,23 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
if (IS_ERR(opendata))
return PTR_ERR(opendata);
nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
- nfs_state_clear_delegation(state);
- switch (type & (FMODE_READ|FMODE_WRITE)) {
- case FMODE_READ|FMODE_WRITE:
- case FMODE_WRITE:
+ if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
if (err)
- break;
+ goto out;
+ }
+ if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
if (err)
- break;
- /* Fall through */
- case FMODE_READ:
+ goto out;
+ }
+ if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
err = nfs4_open_recover_helper(opendata, FMODE_READ);
+ if (err)
+ goto out;
}
+ nfs_state_clear_delegation(state);
+out:
nfs4_opendata_put(opendata);
return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
}
@@ -2358,7 +2368,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
data->o_arg.open_flags, claim))
goto out_no_action;
rcu_read_lock();
- delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
+ delegation = nfs4_get_valid_delegation(data->state->inode);
if (can_open_delegated(delegation, data->o_arg.fmode, claim))
goto unlock_no_action;
rcu_read_unlock();
@@ -2492,6 +2502,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data,
if (!ctx) {
nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
data->is_recover = true;
+ task_setup_data.flags |= RPC_TASK_TIMEOUT;
} else {
nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
pnfs_lgopen_prepare(data, ctx);
@@ -2698,6 +2709,7 @@ static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
{
/* NFSv4.0 doesn't allow for delegation recovery on open expire */
nfs40_clear_delegation_stateid(state);
+ nfs_state_clear_open_state_flags(state);
return nfs4_open_expired(sp, state);
}
@@ -2740,13 +2752,13 @@ out_free:
return -NFS4ERR_EXPIRED;
}
-static void nfs41_check_delegation_stateid(struct nfs4_state *state)
+static int nfs41_check_delegation_stateid(struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(state->inode);
nfs4_stateid stateid;
struct nfs_delegation *delegation;
const struct cred *cred = NULL;
- int status;
+ int status, ret = NFS_OK;
/* Get the delegation credential for use by test/free_stateid */
rcu_read_lock();
@@ -2754,20 +2766,15 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
if (delegation == NULL) {
rcu_read_unlock();
nfs_state_clear_delegation(state);
- return;
+ return NFS_OK;
}
nfs4_stateid_copy(&stateid, &delegation->stateid);
- if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
- rcu_read_unlock();
- nfs_state_clear_delegation(state);
- return;
- }
if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
&delegation->flags)) {
rcu_read_unlock();
- return;
+ return NFS_OK;
}
if (delegation->cred)
@@ -2777,9 +2784,24 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
trace_nfs4_test_delegation_stateid(state, NULL, status);
if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
nfs_finish_clear_delegation_stateid(state, &stateid);
+ else
+ ret = status;
- if (delegation->cred)
- put_cred(cred);
+ put_cred(cred);
+ return ret;
+}
+
+static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
+{
+ nfs4_stateid tmp;
+
+ if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
+ nfs4_copy_delegation_stateid(state->inode, state->state,
+ &tmp, NULL) &&
+ nfs4_stateid_match_other(&state->stateid, &tmp))
+ nfs_state_set_delegation(state, &tmp, state->state);
+ else
+ nfs_state_clear_delegation(state);
}
/**
@@ -2849,21 +2871,12 @@ static int nfs41_check_open_stateid(struct nfs4_state *state)
const struct cred *cred = state->owner->so_cred;
int status;
- if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) {
- if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) {
- if (nfs4_have_delegation(state->inode, state->state))
- return NFS_OK;
- return -NFS4ERR_OPENMODE;
- }
+ if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
return -NFS4ERR_BAD_STATEID;
- }
status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
trace_nfs4_test_open_stateid(state, NULL, status);
if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
- clear_bit(NFS_O_RDONLY_STATE, &state->flags);
- clear_bit(NFS_O_WRONLY_STATE, &state->flags);
- clear_bit(NFS_O_RDWR_STATE, &state->flags);
- clear_bit(NFS_OPEN_STATE, &state->flags);
+ nfs_state_clear_open_state_flags(state);
stateid->type = NFS4_INVALID_STATEID_TYPE;
return status;
}
@@ -2876,7 +2889,11 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
{
int status;
- nfs41_check_delegation_stateid(state);
+ status = nfs41_check_delegation_stateid(state);
+ if (status != NFS_OK)
+ return status;
+ nfs41_delegation_recover_stateid(state);
+
status = nfs41_check_expired_locks(state);
if (status != NFS_OK)
return status;
@@ -3201,7 +3218,7 @@ static int _nfs4_do_setattr(struct inode *inode,
if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
/* Use that stateid */
- } else if (ctx != NULL) {
+ } else if (ctx != NULL && ctx->state) {
struct nfs_lock_context *l_ctx;
if (!nfs4_valid_open_stateid(ctx->state))
return -EBADF;
@@ -3293,6 +3310,75 @@ nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
return pnfs_wait_on_layoutreturn(inode, task);
}
+/*
+ * Update the seqid of an open stateid
+ */
+static void nfs4_sync_open_stateid(nfs4_stateid *dst,
+ struct nfs4_state *state)
+{
+ __be32 seqid_open;
+ u32 dst_seqid;
+ int seq;
+
+ for (;;) {
+ if (!nfs4_valid_open_stateid(state))
+ break;
+ seq = read_seqbegin(&state->seqlock);
+ if (!nfs4_state_match_open_stateid_other(state, dst)) {
+ nfs4_stateid_copy(dst, &state->open_stateid);
+ if (read_seqretry(&state->seqlock, seq))
+ continue;
+ break;
+ }
+ seqid_open = state->open_stateid.seqid;
+ if (read_seqretry(&state->seqlock, seq))
+ continue;
+
+ dst_seqid = be32_to_cpu(dst->seqid);
+ if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0)
+ dst->seqid = seqid_open;
+ break;
+ }
+}
+
+/*
+ * Update the seqid of an open stateid after receiving
+ * NFS4ERR_OLD_STATEID
+ */
+static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
+ struct nfs4_state *state)
+{
+ __be32 seqid_open;
+ u32 dst_seqid;
+ bool ret;
+ int seq;
+
+ for (;;) {
+ ret = false;
+ if (!nfs4_valid_open_stateid(state))
+ break;
+ seq = read_seqbegin(&state->seqlock);
+ if (!nfs4_state_match_open_stateid_other(state, dst)) {
+ if (read_seqretry(&state->seqlock, seq))
+ continue;
+ break;
+ }
+ seqid_open = state->open_stateid.seqid;
+ if (read_seqretry(&state->seqlock, seq))
+ continue;
+
+ dst_seqid = be32_to_cpu(dst->seqid);
+ if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) >= 0)
+ dst->seqid = cpu_to_be32(dst_seqid + 1);
+ else
+ dst->seqid = seqid_open;
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
struct nfs4_closedata {
struct inode *inode;
struct nfs4_state *state;
@@ -3343,32 +3429,11 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
/* Handle Layoutreturn errors */
- if (calldata->arg.lr_args && task->tk_status != 0) {
- switch (calldata->res.lr_ret) {
- default:
- calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
- break;
- case 0:
- calldata->arg.lr_args = NULL;
- calldata->res.lr_res = NULL;
- break;
- case -NFS4ERR_OLD_STATEID:
- if (nfs4_layoutreturn_refresh_stateid(&calldata->arg.lr_args->stateid,
- &calldata->arg.lr_args->range,
- calldata->inode))
- goto lr_restart;
- /* Fallthrough */
- case -NFS4ERR_ADMIN_REVOKED:
- case -NFS4ERR_DELEG_REVOKED:
- case -NFS4ERR_EXPIRED:
- case -NFS4ERR_BAD_STATEID:
- case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
- case -NFS4ERR_WRONG_CRED:
- calldata->arg.lr_args = NULL;
- calldata->res.lr_res = NULL;
- goto lr_restart;
- }
- }
+ if (pnfs_roc_done(task, calldata->inode,
+ &calldata->arg.lr_args,
+ &calldata->res.lr_res,
+ &calldata->res.lr_ret) == -EAGAIN)
+ goto out_restart;
/* hmm. we are done with the inode, and in the process of freeing
* the state_owner. we keep this around to process errors
@@ -3388,7 +3453,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
break;
case -NFS4ERR_OLD_STATEID:
/* Did we race with OPEN? */
- if (nfs4_refresh_open_stateid(&calldata->arg.stateid,
+ if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid,
state))
goto out_restart;
goto out_release;
@@ -3400,7 +3465,9 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
task->tk_msg.rpc_cred);
/* Fallthrough */
case -NFS4ERR_BAD_STATEID:
- break;
+ if (calldata->arg.fmode == 0)
+ break;
+ /* Fallthrough */
default:
task->tk_status = nfs4_async_handle_exception(task,
server, task->tk_status, &exception);
@@ -3415,8 +3482,6 @@ out_release:
nfs_refresh_inode(calldata->inode, &calldata->fattr);
dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
return;
-lr_restart:
- calldata->res.lr_ret = 0;
out_restart:
task->tk_status = 0;
rpc_restart_call_prepare(task);
@@ -3457,8 +3522,8 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
} else if (is_rdwr)
calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
- if (!nfs4_valid_open_stateid(state) ||
- !nfs4_refresh_open_stateid(&calldata->arg.stateid, state))
+ nfs4_sync_open_stateid(&calldata->arg.stateid, state);
+ if (!nfs4_valid_open_stateid(state))
call_close = 0;
spin_unlock(&state->owner->so_lock);
@@ -6003,7 +6068,6 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
.rpc_resp = res,
.rpc_cred = cred,
};
- struct rpc_task *task;
struct rpc_task_setup task_setup_data = {
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
@@ -6036,17 +6100,13 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
dprintk("NFS call setclientid auth=%s, '%s'\n",
clp->cl_rpcclient->cl_auth->au_ops->au_name,
clp->cl_owner_id);
- task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task)) {
- status = PTR_ERR(task);
- goto out;
- }
- status = task->tk_status;
+
+ status = nfs4_call_sync_custom(&task_setup_data);
if (setclientid.sc_cred) {
+ kfree(clp->cl_acceptor);
clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
put_rpccred(setclientid.sc_cred);
}
- rpc_put_task(task);
out:
trace_nfs4_setclientid(clp, status);
dprintk("NFS reply setclientid: %d\n", status);
@@ -6114,32 +6174,11 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
/* Handle Layoutreturn errors */
- if (data->args.lr_args && task->tk_status != 0) {
- switch(data->res.lr_ret) {
- default:
- data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
- break;
- case 0:
- data->args.lr_args = NULL;
- data->res.lr_res = NULL;
- break;
- case -NFS4ERR_OLD_STATEID:
- if (nfs4_layoutreturn_refresh_stateid(&data->args.lr_args->stateid,
- &data->args.lr_args->range,
- data->inode))
- goto lr_restart;
- /* Fallthrough */
- case -NFS4ERR_ADMIN_REVOKED:
- case -NFS4ERR_DELEG_REVOKED:
- case -NFS4ERR_EXPIRED:
- case -NFS4ERR_BAD_STATEID:
- case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
- case -NFS4ERR_WRONG_CRED:
- data->args.lr_args = NULL;
- data->res.lr_res = NULL;
- goto lr_restart;
- }
- }
+ if (pnfs_roc_done(task, data->inode,
+ &data->args.lr_args,
+ &data->res.lr_res,
+ &data->res.lr_ret) == -EAGAIN)
+ goto out_restart;
switch (task->tk_status) {
case 0:
@@ -6177,8 +6216,6 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
}
data->rpc_status = task->tk_status;
return;
-lr_restart:
- data->res.lr_ret = 0;
out_restart:
task->tk_status = 0;
rpc_restart_call_prepare(task);
@@ -6371,6 +6408,42 @@ static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *
return err;
}
+/*
+ * Update the seqid of a lock stateid after receiving
+ * NFS4ERR_OLD_STATEID
+ */
+static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst,
+ struct nfs4_lock_state *lsp)
+{
+ struct nfs4_state *state = lsp->ls_state;
+ bool ret = false;
+
+ spin_lock(&state->state_lock);
+ if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid))
+ goto out;
+ if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst))
+ nfs4_stateid_seqid_inc(dst);
+ else
+ dst->seqid = lsp->ls_stateid.seqid;
+ ret = true;
+out:
+ spin_unlock(&state->state_lock);
+ return ret;
+}
+
+static bool nfs4_sync_lock_stateid(nfs4_stateid *dst,
+ struct nfs4_lock_state *lsp)
+{
+ struct nfs4_state *state = lsp->ls_state;
+ bool ret;
+
+ spin_lock(&state->state_lock);
+ ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid);
+ nfs4_stateid_copy(dst, &lsp->ls_stateid);
+ spin_unlock(&state->state_lock);
+ return ret;
+}
+
struct nfs4_unlockdata {
struct nfs_locku_args arg;
struct nfs_locku_res res;
@@ -6388,7 +6461,8 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
struct nfs_seqid *seqid)
{
struct nfs4_unlockdata *p;
- struct inode *inode = lsp->ls_state->inode;
+ struct nfs4_state *state = lsp->ls_state;
+ struct inode *inode = state->inode;
p = kzalloc(sizeof(*p), GFP_NOFS);
if (p == NULL)
@@ -6404,6 +6478,9 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
locks_init_lock(&p->fl);
locks_copy_lock(&p->fl, fl);
p->server = NFS_SERVER(inode);
+ spin_lock(&state->state_lock);
+ nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
+ spin_unlock(&state->state_lock);
return p;
}
@@ -6442,10 +6519,14 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
task->tk_msg.rpc_cred);
/* Fall through */
case -NFS4ERR_BAD_STATEID:
- case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID:
- if (!nfs4_stateid_match(&calldata->arg.stateid,
- &calldata->lsp->ls_stateid))
+ if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
+ calldata->lsp))
+ rpc_restart_call_prepare(task);
+ break;
+ case -NFS4ERR_OLD_STATEID:
+ if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid,
+ calldata->lsp))
rpc_restart_call_prepare(task);
break;
default:
@@ -6468,7 +6549,6 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
goto out_wait;
- nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
/* Note: exit _without_ running nfs4_locku_done */
goto out_no_action;
@@ -7630,6 +7710,8 @@ int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
{
int status;
+ struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
+ struct nfs_client *clp = NFS_SERVER(dir)->nfs_client;
struct nfs4_secinfo_arg args = {
.dir_fh = NFS_FH(dir),
.name = name,
@@ -7642,26 +7724,37 @@ static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct
.rpc_argp = &args,
.rpc_resp = &res,
};
- struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
+ struct nfs4_call_sync_data data = {
+ .seq_server = NFS_SERVER(dir),
+ .seq_args = &args.seq_args,
+ .seq_res = &res.seq_res,
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_client = clnt,
+ .rpc_message = &msg,
+ .callback_ops = clp->cl_mvops->call_sync_ops,
+ .callback_data = &data,
+ .flags = RPC_TASK_NO_ROUND_ROBIN,
+ };
const struct cred *cred = NULL;
if (use_integrity) {
- clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
- cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
+ clnt = clp->cl_rpcclient;
+ task_setup.rpc_client = clnt;
+
+ cred = nfs4_get_clid_cred(clp);
msg.rpc_cred = cred;
}
dprintk("NFS call secinfo %s\n", name->name);
- nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
- NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
+ nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
+ nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
+ status = nfs4_call_sync_custom(&task_setup);
- status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
- &res.seq_res, RPC_TASK_NO_ROUND_ROBIN);
dprintk("NFS reply secinfo: %d\n", status);
put_cred(cred);
-
return status;
}
@@ -8329,7 +8422,6 @@ static const struct rpc_call_ops nfs4_get_lease_time_ops = {
int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
{
- struct rpc_task *task;
struct nfs4_get_lease_time_args args;
struct nfs4_get_lease_time_res res = {
.lr_fsinfo = fsinfo,
@@ -8351,17 +8443,9 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
.callback_data = &data,
.flags = RPC_TASK_TIMEOUT,
};
- int status;
nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1);
- task = rpc_run_task(&task_setup);
-
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- status = task->tk_status;
- rpc_put_task(task);
- return status;
+ return nfs4_call_sync_custom(&task_setup);
}
#ifdef CONFIG_NFS_V4_1
@@ -8830,7 +8914,6 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
const struct cred *cred)
{
struct nfs4_reclaim_complete_data *calldata;
- struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
.rpc_cred = cred,
@@ -8839,7 +8922,7 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs4_reclaim_complete_call_ops,
- .flags = RPC_TASK_ASYNC | RPC_TASK_NO_ROUND_ROBIN,
+ .flags = RPC_TASK_NO_ROUND_ROBIN,
};
int status = -ENOMEM;
@@ -8854,15 +8937,7 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
msg.rpc_argp = &calldata->arg;
msg.rpc_resp = &calldata->res;
task_setup_data.callback_data = calldata;
- task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task)) {
- status = PTR_ERR(task);
- goto out;
- }
- status = rpc_wait_for_completion_task(task);
- if (status == 0)
- status = task->tk_status;
- rpc_put_task(task);
+ status = nfs4_call_sync_custom(&task_setup_data);
out:
dprintk("<-- %s status=%d\n", __func__, status);
return status;
@@ -9088,10 +9163,19 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
if (!nfs41_sequence_process(task, &lrp->res.seq_res))
return;
+ /*
+ * Was there an RPC level error? Assume the call succeeded,
+ * and that we need to release the layout
+ */
+ if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
+ lrp->res.lrs_present = 0;
+ return;
+ }
+
server = NFS_SERVER(lrp->args.inode);
switch (task->tk_status) {
case -NFS4ERR_OLD_STATEID:
- if (nfs4_layoutreturn_refresh_stateid(&lrp->args.stateid,
+ if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid,
&lrp->args.range,
lrp->args.inode))
goto out_restart;
@@ -9347,18 +9431,32 @@ _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_resp = &res,
};
struct rpc_clnt *clnt = server->client;
+ struct nfs4_call_sync_data data = {
+ .seq_server = server,
+ .seq_args = &args.seq_args,
+ .seq_res = &res.seq_res,
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_client = server->client,
+ .rpc_message = &msg,
+ .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
+ .callback_data = &data,
+ .flags = RPC_TASK_NO_ROUND_ROBIN,
+ };
const struct cred *cred = NULL;
int status;
if (use_integrity) {
clnt = server->nfs_client->cl_rpcclient;
+ task_setup.rpc_client = clnt;
+
cred = nfs4_get_clid_cred(server->nfs_client);
msg.rpc_cred = cred;
}
dprintk("--> %s\n", __func__);
- status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
- &res.seq_res, RPC_TASK_NO_ROUND_ROBIN);
+ nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
+ status = nfs4_call_sync_custom(&task_setup);
dprintk("<-- %s status=%d\n", __func__, status);
put_cred(cred);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 9afd051a4876..0c6d53dc3672 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -624,24 +624,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
/**
* nfs4_purge_state_owners - Release all cached state owners
* @server: nfs_server with cached state owners to release
+ * @head: resulting list of state owners
*
* Called at umount time. Remaining state owners will be on
* the LRU with ref count of zero.
+ * Note that the state owners are not freed, but are added
+ * to the list @head, which can later be used as an argument
+ * to nfs4_free_state_owners.
*/
-void nfs4_purge_state_owners(struct nfs_server *server)
+void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state_owner *sp, *tmp;
- LIST_HEAD(doomed);
spin_lock(&clp->cl_lock);
list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
- list_move(&sp->so_lru, &doomed);
+ list_move(&sp->so_lru, head);
nfs4_remove_state_owner_locked(sp);
}
spin_unlock(&clp->cl_lock);
+}
- list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
+/**
+ * nfs4_purge_state_owners - Release all cached state owners
+ * @head: resulting list of state owners
+ *
+ * Frees a list of state owners that was generated by
+ * nfs4_purge_state_owners
+ */
+void nfs4_free_state_owners(struct list_head *head)
+{
+ struct nfs4_state_owner *sp, *tmp;
+
+ list_for_each_entry_safe(sp, tmp, head, so_lru) {
list_del(&sp->so_lru);
nfs4_free_state_owner(sp);
}
@@ -1000,22 +1015,6 @@ out:
return ret;
}
-bool nfs4_refresh_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
-{
- bool ret;
- int seq;
-
- do {
- ret = false;
- seq = read_seqbegin(&state->seqlock);
- if (nfs4_state_match_open_stateid_other(state, dst)) {
- dst->seqid = state->open_stateid.seqid;
- ret = true;
- }
- } while (read_seqretry(&state->seqlock, seq));
- return ret;
-}
-
bool nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
{
bool ret;
@@ -1463,7 +1462,7 @@ void nfs_inode_find_state_and_recover(struct inode *inode,
nfs4_schedule_state_manager(clp);
}
-static void nfs4_state_mark_open_context_bad(struct nfs4_state *state)
+static void nfs4_state_mark_open_context_bad(struct nfs4_state *state, int err)
{
struct inode *inode = state->inode;
struct nfs_inode *nfsi = NFS_I(inode);
@@ -1474,6 +1473,8 @@ static void nfs4_state_mark_open_context_bad(struct nfs4_state *state)
if (ctx->state != state)
continue;
set_bit(NFS_CONTEXT_BAD, &ctx->flags);
+ pr_warn("NFSv4: state recovery failed for open file %pd2, "
+ "error = %d\n", ctx->dentry, err);
}
rcu_read_unlock();
}
@@ -1481,7 +1482,7 @@ static void nfs4_state_mark_open_context_bad(struct nfs4_state *state)
static void nfs4_state_mark_recovery_failed(struct nfs4_state *state, int error)
{
set_bit(NFS_STATE_RECOVERY_FAILED, &state->flags);
- nfs4_state_mark_open_context_bad(state);
+ nfs4_state_mark_open_context_bad(state, error);
}
@@ -1512,6 +1513,7 @@ restart:
switch (status) {
case 0:
break;
+ case -ETIMEDOUT:
case -ESTALE:
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_STALE_STATEID:
@@ -1605,6 +1607,7 @@ static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_st
static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
{
struct nfs4_state *state;
+ unsigned int loop = 0;
int status = 0;
/* Note: we rely on the sp->so_states list being ordered
@@ -1631,8 +1634,10 @@ restart:
switch (status) {
default:
- if (status >= 0)
+ if (status >= 0) {
+ loop = 0;
break;
+ }
printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status);
/* Fall through */
case -ENOENT:
@@ -1646,6 +1651,10 @@ restart:
break;
case -EAGAIN:
ssleep(1);
+ if (loop++ < 10) {
+ set_bit(ops->state_flag_bit, &state->flags);
+ break;
+ }
/* Fall through */
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_STALE_STATEID:
@@ -1658,11 +1667,13 @@ restart:
case -NFS4ERR_EXPIRED:
case -NFS4ERR_NO_GRACE:
nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
+ /* Fall through */
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -ETIMEDOUT:
goto out_err;
}
nfs4_put_open_state(state);
@@ -1856,12 +1867,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
struct nfs4_state_owner *sp;
struct nfs_server *server;
struct rb_node *pos;
+ LIST_HEAD(freeme);
int status = 0;
restart:
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
- nfs4_purge_state_owners(server);
+ nfs4_purge_state_owners(server, &freeme);
spin_lock(&clp->cl_lock);
for (pos = rb_first(&server->state_owners);
pos != NULL;
@@ -1890,6 +1902,7 @@ restart:
spin_unlock(&clp->cl_lock);
}
rcu_read_unlock();
+ nfs4_free_state_owners(&freeme);
return 0;
}
@@ -1945,7 +1958,6 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
return -EPERM;
case -EACCES:
case -NFS4ERR_DELAY:
- case -ETIMEDOUT:
case -EAGAIN:
ssleep(1);
break;
@@ -2067,8 +2079,10 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
}
status = nfs4_begin_drain_session(clp);
- if (status != 0)
- return status;
+ if (status != 0) {
+ result = status;
+ goto out;
+ }
status = nfs4_replace_transport(server, locations);
if (status != 0) {
@@ -2574,7 +2588,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
}
/* Now recover expired state... */
- if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
+ if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
section = "reclaim nograce";
status = nfs4_do_reclaim(clp,
clp->cl_mvops->nograce_recovery_ops);
@@ -2582,6 +2596,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
continue;
if (status < 0)
goto out_error;
+ clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
}
nfs4_end_drain_session(clp);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 46a8d636d151..ab07db0f07cd 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1174,7 +1174,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
} else
*p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
}
- if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
+ if (label && (bmval[2] & FATTR4_WORD2_SECURITY_LABEL)) {
*p++ = cpu_to_be32(label->lfs);
*p++ = cpu_to_be32(label->pi);
*p++ = cpu_to_be32(label->len);
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 976d4089e267..361cc10d6f95 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -207,7 +207,6 @@ TRACE_DEFINE_ENUM(LOOKUP_PARENT);
TRACE_DEFINE_ENUM(LOOKUP_REVAL);
TRACE_DEFINE_ENUM(LOOKUP_RCU);
TRACE_DEFINE_ENUM(LOOKUP_NO_REVAL);
-TRACE_DEFINE_ENUM(LOOKUP_NO_EVAL);
TRACE_DEFINE_ENUM(LOOKUP_OPEN);
TRACE_DEFINE_ENUM(LOOKUP_CREATE);
TRACE_DEFINE_ENUM(LOOKUP_EXCL);
@@ -226,7 +225,6 @@ TRACE_DEFINE_ENUM(LOOKUP_DOWN);
{ LOOKUP_REVAL, "REVAL" }, \
{ LOOKUP_RCU, "RCU" }, \
{ LOOKUP_NO_REVAL, "NO_REVAL" }, \
- { LOOKUP_NO_EVAL, "NO_EVAL" }, \
{ LOOKUP_OPEN, "OPEN" }, \
{ LOOKUP_CREATE, "CREATE" }, \
{ LOOKUP_EXCL, "EXCL" }, \
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ed4e1b07447b..20b3717cd7ca 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -590,7 +590,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
}
hdr->res.fattr = &hdr->fattr;
- hdr->res.count = count;
+ hdr->res.count = 0;
hdr->res.eof = 0;
hdr->res.verf = &hdr->verf;
nfs_fattr_init(&hdr->fattr);
@@ -1251,20 +1251,23 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
- LIST_HEAD(failed);
+ LIST_HEAD(pages);
desc->pg_io_completion = hdr->io_completion;
desc->pg_dreq = hdr->dreq;
- while (!list_empty(&hdr->pages)) {
- struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ list_splice_init(&hdr->pages, &pages);
+ while (!list_empty(&pages)) {
+ struct nfs_page *req = nfs_list_entry(pages.next);
if (!nfs_pageio_add_request(desc, req))
- nfs_list_move_request(req, &failed);
+ break;
}
nfs_pageio_complete(desc);
- if (!list_empty(&failed)) {
- list_move(&failed, &hdr->pages);
- return desc->pg_error < 0 ? desc->pg_error : -EIO;
+ if (!list_empty(&pages)) {
+ int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
+ hdr->completion_ops->error_cleanup(&pages, err);
+ nfs_set_pgio_error(hdr, err, hdr->io_start);
+ return err;
}
return 0;
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 75bd5b552ba4..bb80034a7661 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -359,9 +359,10 @@ pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
}
/*
- * Update the seqid of a layout stateid
+ * Update the seqid of a layout stateid after receiving
+ * NFS4ERR_OLD_STATEID
*/
-bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
+bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
struct pnfs_layout_range *dst_range,
struct inode *inode)
{
@@ -377,7 +378,15 @@ bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
spin_lock(&inode->i_lock);
lo = NFS_I(inode)->layout;
- if (lo && nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
+ if (lo && pnfs_layout_is_valid(lo) &&
+ nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
+ /* Is our call using the most recent seqid? If so, bump it */
+ if (!nfs4_stateid_is_newer(&lo->plh_stateid, dst)) {
+ nfs4_stateid_seqid_inc(dst);
+ ret = true;
+ goto out;
+ }
+ /* Try to update the seqid to the most recent */
err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
if (err != -EBUSY) {
dst->seqid = lo->plh_stateid.seqid;
@@ -385,6 +394,7 @@ bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
ret = true;
}
}
+out:
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&head);
return ret;
@@ -1440,6 +1450,52 @@ out_noroc:
return false;
}
+int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
+ struct nfs4_layoutreturn_args **argpp,
+ struct nfs4_layoutreturn_res **respp,
+ int *ret)
+{
+ struct nfs4_layoutreturn_args *arg = *argpp;
+ int retval = -EAGAIN;
+
+ if (!arg)
+ return 0;
+ /* Handle Layoutreturn errors */
+ switch (*ret) {
+ case 0:
+ retval = 0;
+ break;
+ case -NFS4ERR_NOMATCHING_LAYOUT:
+ /* Was there an RPC level error? If not, retry */
+ if (task->tk_rpc_status == 0)
+ break;
+ /* If the call was not sent, let caller handle it */
+ if (!RPC_WAS_SENT(task))
+ return 0;
+ /*
+ * Otherwise, assume the call succeeded and
+ * that we need to release the layout
+ */
+ *ret = 0;
+ (*respp)->lrs_present = 0;
+ retval = 0;
+ break;
+ case -NFS4ERR_DELAY:
+ /* Let the caller handle the retry */
+ *ret = -NFS4ERR_NOMATCHING_LAYOUT;
+ return 0;
+ case -NFS4ERR_OLD_STATEID:
+ if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
+ &arg->range, inode))
+ break;
+ *ret = -NFS4ERR_NOMATCHING_LAYOUT;
+ return -EAGAIN;
+ }
+ *argpp = NULL;
+ *respp = NULL;
+ return retval;
+}
+
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
int ret)
@@ -1449,10 +1505,15 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
const nfs4_stateid *res_stateid = NULL;
struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
- if (ret == 0) {
- arg_stateid = &args->stateid;
+ switch (ret) {
+ case -NFS4ERR_NOMATCHING_LAYOUT:
+ break;
+ case 0:
if (res->lrs_present)
res_stateid = &res->stateid;
+ /* Fallthrough */
+ default:
+ arg_stateid = &args->stateid;
}
pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
res_stateid);
@@ -1903,12 +1964,6 @@ lookup_again:
goto out_unlock;
}
- if (!nfs4_valid_open_stateid(ctx->state)) {
- trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
- PNFS_UPDATE_LAYOUT_INVALID_OPEN);
- goto out_unlock;
- }
-
/*
* Choose a stateid for the LAYOUTGET. If we don't have a layout
* stateid, or it has been invalidated, then we must use the open
@@ -1939,6 +1994,7 @@ lookup_again:
iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ,
NULL, &stateid, NULL);
if (status != 0) {
+ lseg = ERR_PTR(status);
trace_pnfs_update_layout(ino, pos, count,
iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_INVALID_OPEN);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index f15609c003d8..f8a38065c7e4 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -261,7 +261,7 @@ int pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
bool is_recall);
int pnfs_destroy_layouts_byclid(struct nfs_client *clp,
bool is_recall);
-bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
+bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
struct pnfs_layout_range *dst_range,
struct inode *inode);
void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo);
@@ -282,6 +282,10 @@ bool pnfs_roc(struct inode *ino,
struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
const struct cred *cred);
+int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
+ struct nfs4_layoutreturn_args **argpp,
+ struct nfs4_layoutreturn_res **respp,
+ int *ret);
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
int ret);
@@ -701,6 +705,15 @@ pnfs_roc(struct inode *ino,
return false;
}
+static inline int
+pnfs_roc_done(struct rpc_task *task, struct inode *inode,
+ struct nfs4_layoutreturn_args **argpp,
+ struct nfs4_layoutreturn_res **respp,
+ int *ret)
+{
+ return 0;
+}
+
static inline void
pnfs_roc_release(struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
@@ -785,7 +798,7 @@ static inline void nfs4_pnfs_v3_ds_connect_unload(void)
{
}
-static inline bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
+static inline bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
struct pnfs_layout_range *dst_range,
struct inode *inode)
{
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index c0046c348910..82af4809b869 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -627,11 +627,16 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
/* Add this address as an alias */
rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
rpc_clnt_test_and_add_xprt, NULL);
- } else
- clp = get_v3_ds_connect(mds_srv,
- (struct sockaddr *)&da->da_addr,
- da->da_addrlen, IPPROTO_TCP,
- timeo, retrans);
+ continue;
+ }
+ clp = get_v3_ds_connect(mds_srv,
+ (struct sockaddr *)&da->da_addr,
+ da->da_addrlen, IPPROTO_TCP,
+ timeo, retrans);
+ if (IS_ERR(clp))
+ continue;
+ clp->cl_rpcclient->cl_softerr = 0;
+ clp->cl_rpcclient->cl_softrtry = 0;
}
if (IS_ERR(clp)) {
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 5552fa8b6e12..0f7288b94633 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -594,7 +594,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
/* Emulate the eof flag, which isn't normally needed in NFSv2
* as it is guaranteed to always return the file attributes
*/
- if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
+ if ((hdr->res.count == 0 && hdr->args.count > 0) ||
+ hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
hdr->res.eof = 1;
}
return 0;
@@ -615,8 +616,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
- if (task->tk_status >= 0)
+ if (task->tk_status >= 0) {
+ hdr->res.count = hdr->args.count;
nfs_writeback_update_inode(hdr);
+ }
return 0;
}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index c19841c82b6a..cfe0b586eadd 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -91,19 +91,25 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
-static void nfs_readpage_release(struct nfs_page *req)
+static void nfs_readpage_release(struct nfs_page *req, int error)
{
struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
+ struct page *page = req->wb_page;
dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode), req->wb_bytes,
(long long)req_offset(req));
+ if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
+ SetPageError(page);
if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
- if (PageUptodate(req->wb_page))
- nfs_readpage_to_fscache(inode, req->wb_page, 0);
+ struct address_space *mapping = page_file_mapping(page);
- unlock_page(req->wb_page);
+ if (PageUptodate(page))
+ nfs_readpage_to_fscache(inode, page, 0);
+ else if (!PageError(page) && !PagePrivate(page))
+ generic_error_remove_page(mapping, page);
+ unlock_page(page);
}
nfs_release_request(req);
}
@@ -131,7 +137,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
&nfs_async_read_completion_ops);
if (!nfs_pageio_add_request(&pgio, new)) {
nfs_list_remove_request(new);
- nfs_readpage_release(new);
+ nfs_readpage_release(new, pgio.pg_error);
}
nfs_pageio_complete(&pgio);
@@ -153,6 +159,7 @@ static void nfs_page_group_set_uptodate(struct nfs_page *req)
static void nfs_read_completion(struct nfs_pgio_header *hdr)
{
unsigned long bytes = 0;
+ int error;
if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
goto out;
@@ -179,14 +186,19 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
zero_user_segment(page, start, end);
}
}
+ error = 0;
bytes += req->wb_bytes;
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
if (bytes <= hdr->good_bytes)
nfs_page_group_set_uptodate(req);
+ else {
+ error = hdr->error;
+ xchg(&nfs_req_openctx(req)->error, error);
+ }
} else
nfs_page_group_set_uptodate(req);
nfs_list_remove_request(req);
- nfs_readpage_release(req);
+ nfs_readpage_release(req, error);
}
out:
hdr->release(hdr);
@@ -213,7 +225,7 @@ nfs_async_read_error(struct list_head *head, int error)
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_readpage_release(req);
+ nfs_readpage_release(req, error);
}
}
@@ -337,8 +349,13 @@ int nfs_readpage(struct file *file, struct page *page)
goto out;
}
+ xchg(&ctx->error, 0);
error = nfs_readpage_async(ctx, inode, page);
-
+ if (!error) {
+ error = wait_on_page_locked_killable(page);
+ if (!PageUptodate(page) && !error)
+ error = xchg(&ctx->error, 0);
+ }
out:
put_nfs_open_context(ctx);
return error;
@@ -372,8 +389,8 @@ readpage_async_filler(void *data, struct page *page)
zero_user_segment(page, len, PAGE_SIZE);
if (!nfs_pageio_add_request(desc->pgio, new)) {
nfs_list_remove_request(new);
- nfs_readpage_release(new);
error = desc->pgio->pg_error;
+ nfs_readpage_release(new, error);
goto out;
}
return 0;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 628631e2e34f..a84df7d63403 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2260,6 +2260,7 @@ nfs_compare_remount_data(struct nfs_server *nfss,
data->acdirmin != nfss->acdirmin / HZ ||
data->acdirmax != nfss->acdirmax / HZ ||
data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
+ (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
data->nfs_server.port != nfss->port ||
data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
!rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
@@ -2381,6 +2382,15 @@ void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
sb->s_flags |= SB_POSIXACL;
sb->s_time_gran = 1;
sb->s_export_op = &nfs_export_ops;
+ } else
+ sb->s_time_gran = 1000;
+
+ if (server->nfs_client->rpc_ops->version != 4) {
+ sb->s_time_min = 0;
+ sb->s_time_max = U32_MAX;
+ } else {
+ sb->s_time_min = S64_MIN;
+ sb->s_time_max = S64_MAX;
}
nfs_initialise_sb(sb);
@@ -2401,7 +2411,6 @@ static void nfs_clone_super(struct super_block *sb,
sb->s_maxbytes = old_sb->s_maxbytes;
sb->s_xattr = old_sb->s_xattr;
sb->s_op = old_sb->s_op;
- sb->s_time_gran = 1;
sb->s_export_op = old_sb->s_export_op;
if (server->nfs_client->rpc_ops->version != 2) {
@@ -2409,6 +2418,16 @@ static void nfs_clone_super(struct super_block *sb,
* so ourselves when necessary.
*/
sb->s_flags |= SB_POSIXACL;
+ sb->s_time_gran = 1;
+ } else
+ sb->s_time_gran = 1000;
+
+ if (server->nfs_client->rpc_ops->version != 4) {
+ sb->s_time_min = 0;
+ sb->s_time_max = U32_MAX;
+ } else {
+ sb->s_time_min = S64_MIN;
+ sb->s_time_max = S64_MAX;
}
nfs_initialise_sb(sb);
@@ -2626,6 +2645,13 @@ int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
}
EXPORT_SYMBOL_GPL(nfs_clone_sb_security);
+static void nfs_set_readahead(struct backing_dev_info *bdi,
+ unsigned long iomax_pages)
+{
+ bdi->ra_pages = VM_READAHEAD_PAGES;
+ bdi->io_pages = iomax_pages;
+}
+
struct dentry *nfs_fs_mount_common(struct nfs_server *server,
int flags, const char *dev_name,
struct nfs_mount_info *mount_info,
@@ -2668,7 +2694,7 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
mntroot = ERR_PTR(error);
goto error_splat_super;
}
- s->s_bdi->ra_pages = server->rpages * NFS_MAX_READAHEAD;
+ nfs_set_readahead(s->s_bdi, server->rpages);
server->super = s;
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 92d9cadc6102..52cab65f91cf 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -57,6 +57,7 @@ static const struct rpc_call_ops nfs_commit_ops;
static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
static const struct nfs_rw_ops nfs_rw_write_ops;
+static void nfs_inode_remove_request(struct nfs_page *req);
static void nfs_clear_request_commit(struct nfs_page *req);
static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
struct inode *inode);
@@ -591,23 +592,13 @@ release_request:
static void nfs_write_error(struct nfs_page *req, int error)
{
+ nfs_set_pageerror(page_file_mapping(req->wb_page));
nfs_mapping_set_error(req->wb_page, error);
+ nfs_inode_remove_request(req);
nfs_end_page_writeback(req);
nfs_release_request(req);
}
-static bool
-nfs_error_is_fatal_on_server(int err)
-{
- switch (err) {
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- return false;
- }
- return nfs_error_is_fatal(err);
-}
-
/*
* Find an associated nfs write request, and prepare to flush it out
* May return an error if the user signalled nfs_wait_on_request().
@@ -615,7 +606,6 @@ nfs_error_is_fatal_on_server(int err)
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
struct page *page)
{
- struct address_space *mapping;
struct nfs_page *req;
int ret = 0;
@@ -630,12 +620,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
/* If there is a fatal error that covers this write, just exit */
- ret = 0;
- mapping = page_file_mapping(page);
- if (test_bit(AS_ENOSPC, &mapping->flags) ||
- test_bit(AS_EIO, &mapping->flags))
+ ret = pgio->pg_error;
+ if (nfs_error_is_fatal_on_server(ret))
goto out_launder;
+ ret = 0;
if (!nfs_pageio_add_request(pgio, req)) {
ret = pgio->pg_error;
/*
@@ -647,6 +636,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
} else
ret = -EAGAIN;
nfs_redirty_request(req);
+ pgio->pg_error = 0;
} else
nfs_add_stats(page_file_mapping(page)->host,
NFSIOS_WRITEPAGES, 1);
@@ -666,7 +656,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
ret = nfs_page_async_flush(pgio, page);
if (ret == -EAGAIN) {
redirty_page_for_writepage(wbc, page);
- ret = 0;
+ ret = AOP_WRITEPAGE_ACTIVATE;
}
return ret;
}
@@ -685,10 +675,11 @@ static int nfs_writepage_locked(struct page *page,
nfs_pageio_init_write(&pgio, inode, 0,
false, &nfs_async_write_completion_ops);
err = nfs_do_writepage(page, wbc, &pgio);
+ pgio.pg_error = 0;
nfs_pageio_complete(&pgio);
if (err < 0)
return err;
- if (pgio.pg_error < 0)
+ if (nfs_error_is_fatal(pgio.pg_error))
return pgio.pg_error;
return 0;
}
@@ -698,7 +689,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
int ret;
ret = nfs_writepage_locked(page, wbc);
- unlock_page(page);
+ if (ret != AOP_WRITEPAGE_ACTIVATE)
+ unlock_page(page);
return ret;
}
@@ -707,7 +699,8 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
int ret;
ret = nfs_do_writepage(page, wbc, data);
- unlock_page(page);
+ if (ret != AOP_WRITEPAGE_ACTIVATE)
+ unlock_page(page);
return ret;
}
@@ -733,13 +726,14 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
&nfs_async_write_completion_ops);
pgio.pg_io_completion = ioc;
err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
+ pgio.pg_error = 0;
nfs_pageio_complete(&pgio);
nfs_io_completion_put(ioc);
if (err < 0)
goto out_err;
err = pgio.pg_error;
- if (err < 0)
+ if (nfs_error_is_fatal(err))
goto out_err;
return 0;
out_err:
@@ -792,7 +786,6 @@ static void nfs_inode_remove_request(struct nfs_page *req)
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_page *head;
- atomic_long_dec(&nfsi->nrequests);
if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
head = req->wb_head;
@@ -805,8 +798,10 @@ static void nfs_inode_remove_request(struct nfs_page *req)
spin_unlock(&mapping->private_lock);
}
- if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
+ if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
nfs_release_request(req);
+ atomic_long_dec(&nfsi->nrequests);
+ }
}
static void
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index d25f6bbe7006..10cefb0c07c7 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -3,6 +3,7 @@ config NFSD
tristate "NFS server support"
depends on INET
depends on FILE_LOCKING
+ depends on FSNOTIFY
select LOCKD
select SUNRPC
select EXPORTFS
@@ -147,7 +148,7 @@ config NFSD_V4_SECURITY_LABEL
config NFSD_FAULT_INJECTION
bool "NFS server manual fault injection"
- depends on NFSD_V4 && DEBUG_KERNEL && DEBUG_FS
+ depends on NFSD_V4 && DEBUG_KERNEL && DEBUG_FS && BROKEN
help
This option enables support for manually injecting faults
into the NFS server. This is intended to be used for
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
index 2bfb58eefad1..6a40b1afe703 100644
--- a/fs/nfsd/Makefile
+++ b/fs/nfsd/Makefile
@@ -11,7 +11,8 @@ obj-$(CONFIG_NFSD) += nfsd.o
nfsd-y += trace.o
nfsd-y += nfssvc.o nfsctl.o nfsproc.o nfsfh.o vfs.o \
- export.o auth.o lockd.o nfscache.o nfsxdr.o stats.o
+ export.o auth.o lockd.o nfscache.o nfsxdr.o \
+ stats.o filecache.o
nfsd-$(CONFIG_NFSD_FAULT_INJECTION) += fault_inject.o
nfsd-$(CONFIG_NFSD_V2_ACL) += nfs2acl.o
nfsd-$(CONFIG_NFSD_V3) += nfs3proc.o nfs3xdr.o
diff --git a/fs/nfsd/acl.h b/fs/nfsd/acl.h
index 4cd7c69a6cb9..ba14d2f4b64f 100644
--- a/fs/nfsd/acl.h
+++ b/fs/nfsd/acl.h
@@ -39,14 +39,6 @@ struct nfs4_acl;
struct svc_fh;
struct svc_rqst;
-/*
- * Maximum ACL we'll accept from a client; chosen (somewhat
- * arbitrarily) so that kmalloc'ing the ACL shouldn't require a
- * high-order allocation. This allows 204 ACEs on x86_64:
- */
-#define NFS4_ACL_MAX ((PAGE_SIZE - sizeof(struct nfs4_acl)) \
- / sizeof(struct nfs4_ace))
-
int nfs4_acl_bytes(int entries);
int nfs4_acl_get_whotype(char *, u32);
__be32 nfs4_acl_write_who(struct xdr_stream *xdr, int who);
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 66d4c55eb48e..9bbaa671c079 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -15,6 +15,7 @@
#include "blocklayoutxdr.h"
#include "pnfs.h"
+#include "filecache.h"
#define NFSDDBG_FACILITY NFSDDBG_PNFS
@@ -404,7 +405,7 @@ static void
nfsd4_scsi_fence_client(struct nfs4_layout_stateid *ls)
{
struct nfs4_client *clp = ls->ls_stid.sc_client;
- struct block_device *bdev = ls->ls_file->f_path.mnt->mnt_sb->s_bdev;
+ struct block_device *bdev = ls->ls_file->nf_file->f_path.mnt->mnt_sb->s_bdev;
bdev->bd_disk->fops->pr_ops->pr_preempt(bdev, NFSD_MDS_PR_KEY,
nfsd4_scsi_pr_key(clp), 0, true);
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index baa01956a5b3..15422c951fd1 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -22,6 +22,7 @@
#include "nfsfh.h"
#include "netns.h"
#include "pnfs.h"
+#include "filecache.h"
#define NFSDDBG_FACILITY NFSDDBG_EXPORT
@@ -232,6 +233,17 @@ static struct cache_head *expkey_alloc(void)
return NULL;
}
+static void expkey_flush(void)
+{
+ /*
+ * Take the nfsd_mutex here to ensure that the file cache is not
+ * destroyed while we're in the middle of flushing.
+ */
+ mutex_lock(&nfsd_mutex);
+ nfsd_file_cache_purge(current->nsproxy->net_ns);
+ mutex_unlock(&nfsd_mutex);
+}
+
static const struct cache_detail svc_expkey_cache_template = {
.owner = THIS_MODULE,
.hash_size = EXPKEY_HASHMAX,
@@ -244,6 +256,7 @@ static const struct cache_detail svc_expkey_cache_template = {
.init = expkey_init,
.update = expkey_update,
.alloc = expkey_alloc,
+ .flush = expkey_flush,
};
static int
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
new file mode 100644
index 000000000000..ef55e9b1cd4e
--- /dev/null
+++ b/fs/nfsd/filecache.c
@@ -0,0 +1,934 @@
+/*
+ * Open file cache.
+ *
+ * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
+ */
+
+#include <linux/hash.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/sched.h>
+#include <linux/list_lru.h>
+#include <linux/fsnotify_backend.h>
+#include <linux/fsnotify.h>
+#include <linux/seq_file.h>
+
+#include "vfs.h"
+#include "nfsd.h"
+#include "nfsfh.h"
+#include "netns.h"
+#include "filecache.h"
+#include "trace.h"
+
+#define NFSDDBG_FACILITY NFSDDBG_FH
+
+/* FIXME: dynamically size this for the machine somehow? */
+#define NFSD_FILE_HASH_BITS 12
+#define NFSD_FILE_HASH_SIZE (1 << NFSD_FILE_HASH_BITS)
+#define NFSD_LAUNDRETTE_DELAY (2 * HZ)
+
+#define NFSD_FILE_LRU_RESCAN (0)
+#define NFSD_FILE_SHUTDOWN (1)
+#define NFSD_FILE_LRU_THRESHOLD (4096UL)
+#define NFSD_FILE_LRU_LIMIT (NFSD_FILE_LRU_THRESHOLD << 2)
+
+/* We only care about NFSD_MAY_READ/WRITE for this cache */
+#define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
+
+struct nfsd_fcache_bucket {
+ struct hlist_head nfb_head;
+ spinlock_t nfb_lock;
+ unsigned int nfb_count;
+ unsigned int nfb_maxcount;
+};
+
+static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
+
+static struct kmem_cache *nfsd_file_slab;
+static struct kmem_cache *nfsd_file_mark_slab;
+static struct nfsd_fcache_bucket *nfsd_file_hashtbl;
+static struct list_lru nfsd_file_lru;
+static long nfsd_file_lru_flags;
+static struct fsnotify_group *nfsd_file_fsnotify_group;
+static atomic_long_t nfsd_filecache_count;
+static struct delayed_work nfsd_filecache_laundrette;
+
+enum nfsd_file_laundrette_ctl {
+ NFSD_FILE_LAUNDRETTE_NOFLUSH = 0,
+ NFSD_FILE_LAUNDRETTE_MAY_FLUSH
+};
+
+static void
+nfsd_file_schedule_laundrette(enum nfsd_file_laundrette_ctl ctl)
+{
+ long count = atomic_long_read(&nfsd_filecache_count);
+
+ if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
+ return;
+
+ /* Be more aggressive about scanning if over the threshold */
+ if (count > NFSD_FILE_LRU_THRESHOLD)
+ mod_delayed_work(system_wq, &nfsd_filecache_laundrette, 0);
+ else
+ schedule_delayed_work(&nfsd_filecache_laundrette, NFSD_LAUNDRETTE_DELAY);
+
+ if (ctl == NFSD_FILE_LAUNDRETTE_NOFLUSH)
+ return;
+
+ /* ...and don't delay flushing if we're out of control */
+ if (count >= NFSD_FILE_LRU_LIMIT)
+ flush_delayed_work(&nfsd_filecache_laundrette);
+}
+
+static void
+nfsd_file_slab_free(struct rcu_head *rcu)
+{
+ struct nfsd_file *nf = container_of(rcu, struct nfsd_file, nf_rcu);
+
+ put_cred(nf->nf_cred);
+ kmem_cache_free(nfsd_file_slab, nf);
+}
+
+static void
+nfsd_file_mark_free(struct fsnotify_mark *mark)
+{
+ struct nfsd_file_mark *nfm = container_of(mark, struct nfsd_file_mark,
+ nfm_mark);
+
+ kmem_cache_free(nfsd_file_mark_slab, nfm);
+}
+
+static struct nfsd_file_mark *
+nfsd_file_mark_get(struct nfsd_file_mark *nfm)
+{
+ if (!atomic_inc_not_zero(&nfm->nfm_ref))
+ return NULL;
+ return nfm;
+}
+
+static void
+nfsd_file_mark_put(struct nfsd_file_mark *nfm)
+{
+ if (atomic_dec_and_test(&nfm->nfm_ref)) {
+
+ fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
+ fsnotify_put_mark(&nfm->nfm_mark);
+ }
+}
+
+static struct nfsd_file_mark *
+nfsd_file_mark_find_or_create(struct nfsd_file *nf)
+{
+ int err;
+ struct fsnotify_mark *mark;
+ struct nfsd_file_mark *nfm = NULL, *new;
+ struct inode *inode = nf->nf_inode;
+
+ do {
+ mutex_lock(&nfsd_file_fsnotify_group->mark_mutex);
+ mark = fsnotify_find_mark(&inode->i_fsnotify_marks,
+ nfsd_file_fsnotify_group);
+ if (mark) {
+ nfm = nfsd_file_mark_get(container_of(mark,
+ struct nfsd_file_mark,
+ nfm_mark));
+ mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
+ fsnotify_put_mark(mark);
+ if (likely(nfm))
+ break;
+ } else
+ mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
+
+ /* allocate a new nfm */
+ new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
+ if (!new)
+ return NULL;
+ fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
+ new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
+ atomic_set(&new->nfm_ref, 1);
+
+ err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
+
+ /*
+ * If the add was successful, then return the object.
+ * Otherwise, we need to put the reference we hold on the
+ * nfm_mark. The fsnotify code will take a reference and put
+ * it on failure, so we can't just free it directly. It's also
+ * not safe to call fsnotify_destroy_mark on it as the
+ * mark->group will be NULL. Thus, we can't let the nfm_ref
+ * counter drive the destruction at this point.
+ */
+ if (likely(!err))
+ nfm = new;
+ else
+ fsnotify_put_mark(&new->nfm_mark);
+ } while (unlikely(err == -EEXIST));
+
+ return nfm;
+}
+
+static struct nfsd_file *
+nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
+ struct net *net)
+{
+ struct nfsd_file *nf;
+
+ nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
+ if (nf) {
+ INIT_HLIST_NODE(&nf->nf_node);
+ INIT_LIST_HEAD(&nf->nf_lru);
+ nf->nf_file = NULL;
+ nf->nf_cred = get_current_cred();
+ nf->nf_net = net;
+ nf->nf_flags = 0;
+ nf->nf_inode = inode;
+ nf->nf_hashval = hashval;
+ atomic_set(&nf->nf_ref, 1);
+ nf->nf_may = may & NFSD_FILE_MAY_MASK;
+ if (may & NFSD_MAY_NOT_BREAK_LEASE) {
+ if (may & NFSD_MAY_WRITE)
+ __set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
+ if (may & NFSD_MAY_READ)
+ __set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
+ }
+ nf->nf_mark = NULL;
+ trace_nfsd_file_alloc(nf);
+ }
+ return nf;
+}
+
+static bool
+nfsd_file_free(struct nfsd_file *nf)
+{
+ bool flush = false;
+
+ trace_nfsd_file_put_final(nf);
+ if (nf->nf_mark)
+ nfsd_file_mark_put(nf->nf_mark);
+ if (nf->nf_file) {
+ get_file(nf->nf_file);
+ filp_close(nf->nf_file, NULL);
+ fput(nf->nf_file);
+ flush = true;
+ }
+ call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
+ return flush;
+}
+
+static bool
+nfsd_file_check_writeback(struct nfsd_file *nf)
+{
+ struct file *file = nf->nf_file;
+ struct address_space *mapping;
+
+ if (!file || !(file->f_mode & FMODE_WRITE))
+ return false;
+ mapping = file->f_mapping;
+ return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
+ mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
+}
+
+static int
+nfsd_file_check_write_error(struct nfsd_file *nf)
+{
+ struct file *file = nf->nf_file;
+
+ if (!file || !(file->f_mode & FMODE_WRITE))
+ return 0;
+ return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
+}
+
+static bool
+nfsd_file_in_use(struct nfsd_file *nf)
+{
+ return nfsd_file_check_writeback(nf) ||
+ nfsd_file_check_write_error(nf);
+}
+
+static void
+nfsd_file_do_unhash(struct nfsd_file *nf)
+{
+ lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
+
+ trace_nfsd_file_unhash(nf);
+
+ if (nfsd_file_check_write_error(nf))
+ nfsd_reset_boot_verifier(net_generic(nf->nf_net, nfsd_net_id));
+ --nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
+ hlist_del_rcu(&nf->nf_node);
+ if (!list_empty(&nf->nf_lru))
+ list_lru_del(&nfsd_file_lru, &nf->nf_lru);
+ atomic_long_dec(&nfsd_filecache_count);
+}
+
+static bool
+nfsd_file_unhash(struct nfsd_file *nf)
+{
+ if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
+ nfsd_file_do_unhash(nf);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Return true if the file was unhashed.
+ */
+static bool
+nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *dispose)
+{
+ lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
+
+ trace_nfsd_file_unhash_and_release_locked(nf);
+ if (!nfsd_file_unhash(nf))
+ return false;
+ /* keep final reference for nfsd_file_lru_dispose */
+ if (atomic_add_unless(&nf->nf_ref, -1, 1))
+ return true;
+
+ list_add(&nf->nf_lru, dispose);
+ return true;
+}
+
+static int
+nfsd_file_put_noref(struct nfsd_file *nf)
+{
+ int count;
+ trace_nfsd_file_put(nf);
+
+ count = atomic_dec_return(&nf->nf_ref);
+ if (!count) {
+ WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
+ nfsd_file_free(nf);
+ }
+ return count;
+}
+
+void
+nfsd_file_put(struct nfsd_file *nf)
+{
+ bool is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
+ bool unused = !nfsd_file_in_use(nf);
+
+ set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
+ if (nfsd_file_put_noref(nf) == 1 && is_hashed && unused)
+ nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_MAY_FLUSH);
+}
+
+struct nfsd_file *
+nfsd_file_get(struct nfsd_file *nf)
+{
+ if (likely(atomic_inc_not_zero(&nf->nf_ref)))
+ return nf;
+ return NULL;
+}
+
+static void
+nfsd_file_dispose_list(struct list_head *dispose)
+{
+ struct nfsd_file *nf;
+
+ while(!list_empty(dispose)) {
+ nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
+ list_del(&nf->nf_lru);
+ nfsd_file_put_noref(nf);
+ }
+}
+
+static void
+nfsd_file_dispose_list_sync(struct list_head *dispose)
+{
+ bool flush = false;
+ struct nfsd_file *nf;
+
+ while(!list_empty(dispose)) {
+ nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
+ list_del(&nf->nf_lru);
+ if (!atomic_dec_and_test(&nf->nf_ref))
+ continue;
+ if (nfsd_file_free(nf))
+ flush = true;
+ }
+ if (flush)
+ flush_delayed_fput();
+}
+
+/*
+ * Note this can deadlock with nfsd_file_cache_purge.
+ */
+static enum lru_status
+nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
+ spinlock_t *lock, void *arg)
+ __releases(lock)
+ __acquires(lock)
+{
+ struct list_head *head = arg;
+ struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
+
+ /*
+ * Do a lockless refcount check. The hashtable holds one reference, so
+ * we look to see if anything else has a reference, or if any have
+ * been put since the shrinker last ran. Those don't get unhashed and
+ * released.
+ *
+ * Note that in the put path, we set the flag and then decrement the
+ * counter. Here we check the counter and then test and clear the flag.
+ * That order is deliberate to ensure that we can do this locklessly.
+ */
+ if (atomic_read(&nf->nf_ref) > 1)
+ goto out_skip;
+
+ /*
+ * Don't throw out files that are still undergoing I/O or
+ * that have uncleared errors pending.
+ */
+ if (nfsd_file_check_writeback(nf))
+ goto out_skip;
+
+ if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
+ goto out_rescan;
+
+ if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
+ goto out_skip;
+
+ list_lru_isolate_move(lru, &nf->nf_lru, head);
+ return LRU_REMOVED;
+out_rescan:
+ set_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags);
+out_skip:
+ return LRU_SKIP;
+}
+
+static void
+nfsd_file_lru_dispose(struct list_head *head)
+{
+ while(!list_empty(head)) {
+ struct nfsd_file *nf = list_first_entry(head,
+ struct nfsd_file, nf_lru);
+ list_del_init(&nf->nf_lru);
+ spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
+ nfsd_file_do_unhash(nf);
+ spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
+ nfsd_file_put_noref(nf);
+ }
+}
+
+static unsigned long
+nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
+{
+ return list_lru_count(&nfsd_file_lru);
+}
+
+static unsigned long
+nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
+{
+ LIST_HEAD(head);
+ unsigned long ret;
+
+ ret = list_lru_shrink_walk(&nfsd_file_lru, sc, nfsd_file_lru_cb, &head);
+ nfsd_file_lru_dispose(&head);
+ return ret;
+}
+
+static struct shrinker nfsd_file_shrinker = {
+ .scan_objects = nfsd_file_lru_scan,
+ .count_objects = nfsd_file_lru_count,
+ .seeks = 1,
+};
+
+static void
+__nfsd_file_close_inode(struct inode *inode, unsigned int hashval,
+ struct list_head *dispose)
+{
+ struct nfsd_file *nf;
+ struct hlist_node *tmp;
+
+ spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
+ hlist_for_each_entry_safe(nf, tmp, &nfsd_file_hashtbl[hashval].nfb_head, nf_node) {
+ if (inode == nf->nf_inode)
+ nfsd_file_unhash_and_release_locked(nf, dispose);
+ }
+ spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
+}
+
+/**
+ * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
+ * @inode: inode of the file to attempt to remove
+ *
+ * Walk the whole hash bucket, looking for any files that correspond to "inode".
+ * If any do, then unhash them and put the hashtable reference to them and
+ * destroy any that had their last reference put. Also ensure that any of the
+ * fputs also have their final __fput done as well.
+ */
+void
+nfsd_file_close_inode_sync(struct inode *inode)
+{
+ unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
+ NFSD_FILE_HASH_BITS);
+ LIST_HEAD(dispose);
+
+ __nfsd_file_close_inode(inode, hashval, &dispose);
+ trace_nfsd_file_close_inode_sync(inode, hashval, !list_empty(&dispose));
+ nfsd_file_dispose_list_sync(&dispose);
+}
+
+/**
+ * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
+ * @inode: inode of the file to attempt to remove
+ *
+ * Walk the whole hash bucket, looking for any files that correspond to "inode".
+ * If any do, then unhash them and put the hashtable reference to them and
+ * destroy any that had their last reference put.
+ */
+static void
+nfsd_file_close_inode(struct inode *inode)
+{
+ unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
+ NFSD_FILE_HASH_BITS);
+ LIST_HEAD(dispose);
+
+ __nfsd_file_close_inode(inode, hashval, &dispose);
+ trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
+ nfsd_file_dispose_list(&dispose);
+}
+
+/**
+ * nfsd_file_delayed_close - close unused nfsd_files
+ * @work: dummy
+ *
+ * Walk the LRU list and close any entries that have not been used since
+ * the last scan.
+ *
+ * Note this can deadlock with nfsd_file_cache_purge.
+ */
+static void
+nfsd_file_delayed_close(struct work_struct *work)
+{
+ LIST_HEAD(head);
+
+ list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb, &head, LONG_MAX);
+
+ if (test_and_clear_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags))
+ nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_NOFLUSH);
+
+ if (!list_empty(&head)) {
+ nfsd_file_lru_dispose(&head);
+ flush_delayed_fput();
+ }
+}
+
+static int
+nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
+ void *data)
+{
+ struct file_lock *fl = data;
+
+ /* Only close files for F_SETLEASE leases */
+ if (fl->fl_flags & FL_LEASE)
+ nfsd_file_close_inode_sync(file_inode(fl->fl_file));
+ return 0;
+}
+
+static struct notifier_block nfsd_file_lease_notifier = {
+ .notifier_call = nfsd_file_lease_notifier_call,
+};
+
+static int
+nfsd_file_fsnotify_handle_event(struct fsnotify_group *group,
+ struct inode *inode,
+ u32 mask, const void *data, int data_type,
+ const struct qstr *file_name, u32 cookie,
+ struct fsnotify_iter_info *iter_info)
+{
+ trace_nfsd_file_fsnotify_handle_event(inode, mask);
+
+ /* Should be no marks on non-regular files */
+ if (!S_ISREG(inode->i_mode)) {
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ /* don't close files if this was not the last link */
+ if (mask & FS_ATTRIB) {
+ if (inode->i_nlink)
+ return 0;
+ }
+
+ nfsd_file_close_inode(inode);
+ return 0;
+}
+
+
+static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
+ .handle_event = nfsd_file_fsnotify_handle_event,
+ .free_mark = nfsd_file_mark_free,
+};
+
+int
+nfsd_file_cache_init(void)
+{
+ int ret = -ENOMEM;
+ unsigned int i;
+
+ clear_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
+
+ if (nfsd_file_hashtbl)
+ return 0;
+
+ nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE,
+ sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
+ if (!nfsd_file_hashtbl) {
+ pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
+ goto out_err;
+ }
+
+ nfsd_file_slab = kmem_cache_create("nfsd_file",
+ sizeof(struct nfsd_file), 0, 0, NULL);
+ if (!nfsd_file_slab) {
+ pr_err("nfsd: unable to create nfsd_file_slab\n");
+ goto out_err;
+ }
+
+ nfsd_file_mark_slab = kmem_cache_create("nfsd_file_mark",
+ sizeof(struct nfsd_file_mark), 0, 0, NULL);
+ if (!nfsd_file_mark_slab) {
+ pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
+ goto out_err;
+ }
+
+
+ ret = list_lru_init(&nfsd_file_lru);
+ if (ret) {
+ pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
+ goto out_err;
+ }
+
+ ret = register_shrinker(&nfsd_file_shrinker);
+ if (ret) {
+ pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
+ goto out_lru;
+ }
+
+ ret = lease_register_notifier(&nfsd_file_lease_notifier);
+ if (ret) {
+ pr_err("nfsd: unable to register lease notifier: %d\n", ret);
+ goto out_shrinker;
+ }
+
+ nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops);
+ if (IS_ERR(nfsd_file_fsnotify_group)) {
+ pr_err("nfsd: unable to create fsnotify group: %ld\n",
+ PTR_ERR(nfsd_file_fsnotify_group));
+ nfsd_file_fsnotify_group = NULL;
+ goto out_notifier;
+ }
+
+ for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
+ INIT_HLIST_HEAD(&nfsd_file_hashtbl[i].nfb_head);
+ spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
+ }
+
+ INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_delayed_close);
+out:
+ return ret;
+out_notifier:
+ lease_unregister_notifier(&nfsd_file_lease_notifier);
+out_shrinker:
+ unregister_shrinker(&nfsd_file_shrinker);
+out_lru:
+ list_lru_destroy(&nfsd_file_lru);
+out_err:
+ kmem_cache_destroy(nfsd_file_slab);
+ nfsd_file_slab = NULL;
+ kmem_cache_destroy(nfsd_file_mark_slab);
+ nfsd_file_mark_slab = NULL;
+ kfree(nfsd_file_hashtbl);
+ nfsd_file_hashtbl = NULL;
+ goto out;
+}
+
+/*
+ * Note this can deadlock with nfsd_file_lru_cb.
+ */
+void
+nfsd_file_cache_purge(struct net *net)
+{
+ unsigned int i;
+ struct nfsd_file *nf;
+ struct hlist_node *next;
+ LIST_HEAD(dispose);
+ bool del;
+
+ if (!nfsd_file_hashtbl)
+ return;
+
+ for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
+ struct nfsd_fcache_bucket *nfb = &nfsd_file_hashtbl[i];
+
+ spin_lock(&nfb->nfb_lock);
+ hlist_for_each_entry_safe(nf, next, &nfb->nfb_head, nf_node) {
+ if (net && nf->nf_net != net)
+ continue;
+ del = nfsd_file_unhash_and_release_locked(nf, &dispose);
+
+ /*
+ * Deadlock detected! Something marked this entry as
+ * unhased, but hasn't removed it from the hash list.
+ */
+ WARN_ON_ONCE(!del);
+ }
+ spin_unlock(&nfb->nfb_lock);
+ nfsd_file_dispose_list(&dispose);
+ }
+}
+
+void
+nfsd_file_cache_shutdown(void)
+{
+ LIST_HEAD(dispose);
+
+ set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
+
+ lease_unregister_notifier(&nfsd_file_lease_notifier);
+ unregister_shrinker(&nfsd_file_shrinker);
+ /*
+ * make sure all callers of nfsd_file_lru_cb are done before
+ * calling nfsd_file_cache_purge
+ */
+ cancel_delayed_work_sync(&nfsd_filecache_laundrette);
+ nfsd_file_cache_purge(NULL);
+ list_lru_destroy(&nfsd_file_lru);
+ rcu_barrier();
+ fsnotify_put_group(nfsd_file_fsnotify_group);
+ nfsd_file_fsnotify_group = NULL;
+ kmem_cache_destroy(nfsd_file_slab);
+ nfsd_file_slab = NULL;
+ fsnotify_wait_marks_destroyed();
+ kmem_cache_destroy(nfsd_file_mark_slab);
+ nfsd_file_mark_slab = NULL;
+ kfree(nfsd_file_hashtbl);
+ nfsd_file_hashtbl = NULL;
+}
+
+static bool
+nfsd_match_cred(const struct cred *c1, const struct cred *c2)
+{
+ int i;
+
+ if (!uid_eq(c1->fsuid, c2->fsuid))
+ return false;
+ if (!gid_eq(c1->fsgid, c2->fsgid))
+ return false;
+ if (c1->group_info == NULL || c2->group_info == NULL)
+ return c1->group_info == c2->group_info;
+ if (c1->group_info->ngroups != c2->group_info->ngroups)
+ return false;
+ for (i = 0; i < c1->group_info->ngroups; i++) {
+ if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
+ return false;
+ }
+ return true;
+}
+
+static struct nfsd_file *
+nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
+ unsigned int hashval, struct net *net)
+{
+ struct nfsd_file *nf;
+ unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
+
+ hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
+ nf_node) {
+ if ((need & nf->nf_may) != need)
+ continue;
+ if (nf->nf_inode != inode)
+ continue;
+ if (nf->nf_net != net)
+ continue;
+ if (!nfsd_match_cred(nf->nf_cred, current_cred()))
+ continue;
+ if (nfsd_file_get(nf) != NULL)
+ return nf;
+ }
+ return NULL;
+}
+
+/**
+ * nfsd_file_is_cached - are there any cached open files for this fh?
+ * @inode: inode of the file to check
+ *
+ * Scan the hashtable for open files that match this fh. Returns true if there
+ * are any, and false if not.
+ */
+bool
+nfsd_file_is_cached(struct inode *inode)
+{
+ bool ret = false;
+ struct nfsd_file *nf;
+ unsigned int hashval;
+
+ hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
+ nf_node) {
+ if (inode == nf->nf_inode) {
+ ret = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ trace_nfsd_file_is_cached(inode, hashval, (int)ret);
+ return ret;
+}
+
+__be32
+nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ unsigned int may_flags, struct nfsd_file **pnf)
+{
+ __be32 status;
+ struct net *net = SVC_NET(rqstp);
+ struct nfsd_file *nf, *new;
+ struct inode *inode;
+ unsigned int hashval;
+
+ /* FIXME: skip this if fh_dentry is already set? */
+ status = fh_verify(rqstp, fhp, S_IFREG,
+ may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ if (status != nfs_ok)
+ return status;
+
+ inode = d_inode(fhp->fh_dentry);
+ hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
+retry:
+ rcu_read_lock();
+ nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
+ rcu_read_unlock();
+ if (nf)
+ goto wait_for_construction;
+
+ new = nfsd_file_alloc(inode, may_flags, hashval, net);
+ if (!new) {
+ trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags,
+ NULL, nfserr_jukebox);
+ return nfserr_jukebox;
+ }
+
+ spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
+ nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
+ if (nf == NULL)
+ goto open_file;
+ spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
+ nfsd_file_slab_free(&new->nf_rcu);
+
+wait_for_construction:
+ wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
+
+ /* Did construction of this file fail? */
+ if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
+ nfsd_file_put_noref(nf);
+ goto retry;
+ }
+
+ this_cpu_inc(nfsd_file_cache_hits);
+
+ if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
+ bool write = (may_flags & NFSD_MAY_WRITE);
+
+ if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
+ (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
+ status = nfserrno(nfsd_open_break_lease(
+ file_inode(nf->nf_file), may_flags));
+ if (status == nfs_ok) {
+ clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
+ if (write)
+ clear_bit(NFSD_FILE_BREAK_WRITE,
+ &nf->nf_flags);
+ }
+ }
+ }
+out:
+ if (status == nfs_ok) {
+ *pnf = nf;
+ } else {
+ nfsd_file_put(nf);
+ nf = NULL;
+ }
+
+ trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags, nf, status);
+ return status;
+open_file:
+ nf = new;
+ /* Take reference for the hashtable */
+ atomic_inc(&nf->nf_ref);
+ __set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
+ __set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
+ list_lru_add(&nfsd_file_lru, &nf->nf_lru);
+ hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head);
+ ++nfsd_file_hashtbl[hashval].nfb_count;
+ nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
+ nfsd_file_hashtbl[hashval].nfb_count);
+ spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
+ atomic_long_inc(&nfsd_filecache_count);
+
+ nf->nf_mark = nfsd_file_mark_find_or_create(nf);
+ if (nf->nf_mark)
+ status = nfsd_open_verified(rqstp, fhp, S_IFREG,
+ may_flags, &nf->nf_file);
+ else
+ status = nfserr_jukebox;
+ /*
+ * If construction failed, or we raced with a call to unlink()
+ * then unhash.
+ */
+ if (status != nfs_ok || inode->i_nlink == 0) {
+ bool do_free;
+ spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
+ do_free = nfsd_file_unhash(nf);
+ spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
+ if (do_free)
+ nfsd_file_put_noref(nf);
+ }
+ clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
+ goto out;
+}
+
+/*
+ * Note that fields may be added, removed or reordered in the future. Programs
+ * scraping this file for info should test the labels to ensure they're
+ * getting the correct field.
+ */
+static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
+{
+ unsigned int i, count = 0, longest = 0;
+ unsigned long hits = 0;
+
+ /*
+ * No need for spinlocks here since we're not terribly interested in
+ * accuracy. We do take the nfsd_mutex simply to ensure that we
+ * don't end up racing with server shutdown
+ */
+ mutex_lock(&nfsd_mutex);
+ if (nfsd_file_hashtbl) {
+ for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
+ count += nfsd_file_hashtbl[i].nfb_count;
+ longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
+ }
+ }
+ mutex_unlock(&nfsd_mutex);
+
+ for_each_possible_cpu(i)
+ hits += per_cpu(nfsd_file_cache_hits, i);
+
+ seq_printf(m, "total entries: %u\n", count);
+ seq_printf(m, "longest chain: %u\n", longest);
+ seq_printf(m, "cache hits: %lu\n", hits);
+ return 0;
+}
+
+int nfsd_file_cache_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nfsd_file_cache_stats_show, NULL);
+}
diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
new file mode 100644
index 000000000000..851d9abf54c2
--- /dev/null
+++ b/fs/nfsd/filecache.h
@@ -0,0 +1,61 @@
+#ifndef _FS_NFSD_FILECACHE_H
+#define _FS_NFSD_FILECACHE_H
+
+#include <linux/fsnotify_backend.h>
+
+/*
+ * This is the fsnotify_mark container that nfsd attaches to the files that it
+ * is holding open. Note that we have a separate refcount here aside from the
+ * one in the fsnotify_mark. We only want a single fsnotify_mark attached to
+ * the inode, and for each nfsd_file to hold a reference to it.
+ *
+ * The fsnotify_mark is itself refcounted, but that's not sufficient to tell us
+ * how to put that reference. If there are still outstanding nfsd_files that
+ * reference the mark, then we would want to call fsnotify_put_mark on it.
+ * If there were not, then we'd need to call fsnotify_destroy_mark. Since we
+ * can't really tell the difference, we use the nfm_mark to keep track of how
+ * many nfsd_files hold references to the mark. When that counter goes to zero
+ * then we know to call fsnotify_destroy_mark on it.
+ */
+struct nfsd_file_mark {
+ struct fsnotify_mark nfm_mark;
+ atomic_t nfm_ref;
+};
+
+/*
+ * A representation of a file that has been opened by knfsd. These are hashed
+ * in the hashtable by inode pointer value. Note that this object doesn't
+ * hold a reference to the inode by itself, so the nf_inode pointer should
+ * never be dereferenced, only used for comparison.
+ */
+struct nfsd_file {
+ struct hlist_node nf_node;
+ struct list_head nf_lru;
+ struct rcu_head nf_rcu;
+ struct file *nf_file;
+ const struct cred *nf_cred;
+ struct net *nf_net;
+#define NFSD_FILE_HASHED (0)
+#define NFSD_FILE_PENDING (1)
+#define NFSD_FILE_BREAK_READ (2)
+#define NFSD_FILE_BREAK_WRITE (3)
+#define NFSD_FILE_REFERENCED (4)
+ unsigned long nf_flags;
+ struct inode *nf_inode;
+ unsigned int nf_hashval;
+ atomic_t nf_ref;
+ unsigned char nf_may;
+ struct nfsd_file_mark *nf_mark;
+};
+
+int nfsd_file_cache_init(void);
+void nfsd_file_cache_purge(struct net *);
+void nfsd_file_cache_shutdown(void);
+void nfsd_file_put(struct nfsd_file *nf);
+struct nfsd_file *nfsd_file_get(struct nfsd_file *nf);
+void nfsd_file_close_inode_sync(struct inode *inode);
+bool nfsd_file_is_cached(struct inode *inode);
+__be32 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ unsigned int may_flags, struct nfsd_file **nfp);
+int nfsd_file_cache_stats_open(struct inode *, struct file *);
+#endif /* _FS_NFSD_FILECACHE_H */
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index bdfe5bcb3dcd..9a4ef815fb8c 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -104,6 +104,7 @@ struct nfsd_net {
/* Time of server startup */
struct timespec64 nfssvc_boot;
+ seqlock_t boot_lock;
/*
* Max number of connections this nfsd container will allow. Defaults
@@ -179,4 +180,7 @@ struct nfsd_net {
extern void nfsd_netns_free_versions(struct nfsd_net *nn);
extern unsigned int nfsd_net_id;
+
+void nfsd_copy_boot_verifier(__be32 verf[2], struct nfsd_net *nn);
+void nfsd_reset_boot_verifier(struct nfsd_net *nn);
#endif /* __NFSD_NETNS_H__ */
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 9bc32af4e2da..cea68d8411ac 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -172,13 +172,8 @@ nfsd3_proc_read(struct svc_rqst *rqstp)
nfserr = nfsd_read(rqstp, &resp->fh,
argp->offset,
rqstp->rq_vec, argp->vlen,
- &resp->count);
- if (nfserr == 0) {
- struct inode *inode = d_inode(resp->fh.fh_dentry);
- resp->eof = nfsd_eof_on_read(cnt, resp->count, argp->offset,
- inode->i_size);
- }
-
+ &resp->count,
+ &resp->eof);
RETURN_STATUS(nfserr);
}
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index fcf31822c74c..86e5658651f1 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -27,6 +27,7 @@ static u32 nfs3_ftypes[] = {
NF3SOCK, NF3BAD, NF3LNK, NF3BAD,
};
+
/*
* XDR functions for basic NFS types
*/
@@ -751,14 +752,16 @@ nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p)
{
struct nfsd3_writeres *resp = rqstp->rq_resp;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ __be32 verf[2];
p = encode_wcc_data(rqstp, p, &resp->fh);
if (resp->status == 0) {
*p++ = htonl(resp->count);
*p++ = htonl(resp->committed);
/* unique identifier, y2038 overflow can be ignored */
- *p++ = htonl((u32)nn->nfssvc_boot.tv_sec);
- *p++ = htonl(nn->nfssvc_boot.tv_nsec);
+ nfsd_copy_boot_verifier(verf, nn);
+ *p++ = verf[0];
+ *p++ = verf[1];
}
return xdr_ressize_check(rqstp, p);
}
@@ -1125,13 +1128,15 @@ nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p)
{
struct nfsd3_commitres *resp = rqstp->rq_resp;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ __be32 verf[2];
p = encode_wcc_data(rqstp, p, &resp->fh);
/* Write verifier */
if (resp->status == 0) {
/* unique identifier, y2038 overflow can be ignored */
- *p++ = htonl((u32)nn->nfssvc_boot.tv_sec);
- *p++ = htonl(nn->nfssvc_boot.tv_nsec);
+ nfsd_copy_boot_verifier(verf, nn);
+ *p++ = verf[0];
+ *p++ = verf[1];
}
return xdr_ressize_check(rqstp, p);
}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 397eb7820929..524111420b48 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -512,11 +512,9 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
if (unlikely(status))
return status;
- if (cb != NULL) {
- status = decode_cb_sequence4res(xdr, cb);
- if (unlikely(status || cb->cb_seq_status))
- return status;
- }
+ status = decode_cb_sequence4res(xdr, cb);
+ if (unlikely(status || cb->cb_seq_status))
+ return status;
return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
}
@@ -604,11 +602,10 @@ static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
if (unlikely(status))
return status;
- if (cb) {
- status = decode_cb_sequence4res(xdr, cb);
- if (unlikely(status || cb->cb_seq_status))
- return status;
- }
+ status = decode_cb_sequence4res(xdr, cb);
+ if (unlikely(status || cb->cb_seq_status))
+ return status;
+
return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
}
#endif /* CONFIG_NFSD_PNFS */
@@ -663,11 +660,10 @@ static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
if (unlikely(status))
return status;
- if (cb) {
- status = decode_cb_sequence4res(xdr, cb);
- if (unlikely(status || cb->cb_seq_status))
- return status;
- }
+ status = decode_cb_sequence4res(xdr, cb);
+ if (unlikely(status || cb->cb_seq_status))
+ return status;
+
return decode_cb_op_status(xdr, OP_CB_NOTIFY_LOCK, &cb->cb_status);
}
@@ -759,11 +755,10 @@ static int nfs4_xdr_dec_cb_offload(struct rpc_rqst *rqstp,
if (unlikely(status))
return status;
- if (cb) {
- status = decode_cb_sequence4res(xdr, cb);
- if (unlikely(status || cb->cb_seq_status))
- return status;
- }
+ status = decode_cb_sequence4res(xdr, cb);
+ if (unlikely(status || cb->cb_seq_status))
+ return status;
+
return decode_cb_op_status(xdr, OP_CB_OFFLOAD, &cb->cb_status);
}
/*
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index a79e24b79095..2681c70283ce 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -169,8 +169,8 @@ nfsd4_free_layout_stateid(struct nfs4_stid *stid)
spin_unlock(&fp->fi_lock);
if (!nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
- vfs_setlease(ls->ls_file, F_UNLCK, NULL, (void **)&ls);
- fput(ls->ls_file);
+ vfs_setlease(ls->ls_file->nf_file, F_UNLCK, NULL, (void **)&ls);
+ nfsd_file_put(ls->ls_file);
if (ls->ls_recalled)
atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
@@ -197,7 +197,7 @@ nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
fl->fl_end = OFFSET_MAX;
fl->fl_owner = ls;
fl->fl_pid = current->tgid;
- fl->fl_file = ls->ls_file;
+ fl->fl_file = ls->ls_file->nf_file;
status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL);
if (status) {
@@ -236,13 +236,13 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
NFSPROC4_CLNT_CB_LAYOUT);
if (parent->sc_type == NFS4_DELEG_STID)
- ls->ls_file = get_file(fp->fi_deleg_file);
+ ls->ls_file = nfsd_file_get(fp->fi_deleg_file);
else
ls->ls_file = find_any_file(fp);
BUG_ON(!ls->ls_file);
if (nfsd4_layout_setlease(ls)) {
- fput(ls->ls_file);
+ nfsd_file_put(ls->ls_file);
put_nfs4_file(fp);
kmem_cache_free(nfs4_layout_stateid_cache, ls);
return NULL;
@@ -626,7 +626,7 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
argv[0] = (char *)nfsd_recall_failed;
argv[1] = addr_str;
- argv[2] = ls->ls_file->f_path.mnt->mnt_sb->s_id;
+ argv[2] = ls->ls_file->nf_file->f_path.mnt->mnt_sb->s_id;
argv[3] = NULL;
error = call_usermodehelper(nfsd_recall_failed, argv, envp,
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 8beda999e134..4e3e77b76411 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -568,17 +568,11 @@ nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
{
- __be32 verf[2];
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ __be32 *verf = (__be32 *)verifier->data;
- /*
- * This is opaque to client, so no need to byte-swap. Use
- * __force to keep sparse happy. y2038 time_t overflow is
- * irrelevant in this usage.
- */
- verf[0] = (__force __be32)nn->nfssvc_boot.tv_sec;
- verf[1] = (__force __be32)nn->nfssvc_boot.tv_nsec;
- memcpy(verifier->data, verf, sizeof(verifier->data));
+ BUILD_BUG_ON(2*sizeof(*verf) != sizeof(verifier->data));
+
+ nfsd_copy_boot_verifier(verf, net_generic(net, nfsd_net_id));
}
static __be32
@@ -761,7 +755,7 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_read *read = &u->read;
__be32 status;
- read->rd_filp = NULL;
+ read->rd_nf = NULL;
if (read->rd_offset >= OFFSET_MAX)
return nfserr_inval;
@@ -782,7 +776,7 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/* check stateid */
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&read->rd_stateid, RD_STATE,
- &read->rd_filp, &read->rd_tmp_file);
+ &read->rd_nf);
if (status) {
dprintk("NFSD: nfsd4_read: couldn't process stateid!\n");
goto out;
@@ -798,8 +792,8 @@ out:
static void
nfsd4_read_release(union nfsd4_op_u *u)
{
- if (u->read.rd_filp)
- fput(u->read.rd_filp);
+ if (u->read.rd_nf)
+ nfsd_file_put(u->read.rd_nf);
trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp,
u->read.rd_offset, u->read.rd_length);
}
@@ -954,7 +948,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
status = nfs4_preprocess_stateid_op(rqstp, cstate,
&cstate->current_fh, &setattr->sa_stateid,
- WR_STATE, NULL, NULL);
+ WR_STATE, NULL);
if (status) {
dprintk("NFSD: nfsd4_setattr: couldn't process stateid!\n");
return status;
@@ -993,7 +987,7 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
struct nfsd4_write *write = &u->write;
stateid_t *stateid = &write->wr_stateid;
- struct file *filp = NULL;
+ struct nfsd_file *nf = NULL;
__be32 status = nfs_ok;
unsigned long cnt;
int nvecs;
@@ -1005,7 +999,7 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
trace_nfsd_write_start(rqstp, &cstate->current_fh,
write->wr_offset, cnt);
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
- stateid, WR_STATE, &filp, NULL);
+ stateid, WR_STATE, &nf);
if (status) {
dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
return status;
@@ -1018,10 +1012,10 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&write->wr_head, write->wr_buflen);
WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
- status = nfsd_vfs_write(rqstp, &cstate->current_fh, filp,
+ status = nfsd_vfs_write(rqstp, &cstate->current_fh, nf->nf_file,
write->wr_offset, rqstp->rq_vec, nvecs, &cnt,
write->wr_how_written);
- fput(filp);
+ nfsd_file_put(nf);
write->wr_bytes_written = cnt;
trace_nfsd_write_done(rqstp, &cstate->current_fh,
@@ -1031,8 +1025,8 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static __be32
nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- stateid_t *src_stateid, struct file **src,
- stateid_t *dst_stateid, struct file **dst)
+ stateid_t *src_stateid, struct nfsd_file **src,
+ stateid_t *dst_stateid, struct nfsd_file **dst)
{
__be32 status;
@@ -1040,22 +1034,22 @@ nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_nofilehandle;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
- src_stateid, RD_STATE, src, NULL);
+ src_stateid, RD_STATE, src);
if (status) {
dprintk("NFSD: %s: couldn't process src stateid!\n", __func__);
goto out;
}
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
- dst_stateid, WR_STATE, dst, NULL);
+ dst_stateid, WR_STATE, dst);
if (status) {
dprintk("NFSD: %s: couldn't process dst stateid!\n", __func__);
goto out_put_src;
}
/* fix up for NFS-specific error code */
- if (!S_ISREG(file_inode(*src)->i_mode) ||
- !S_ISREG(file_inode(*dst)->i_mode)) {
+ if (!S_ISREG(file_inode((*src)->nf_file)->i_mode) ||
+ !S_ISREG(file_inode((*dst)->nf_file)->i_mode)) {
status = nfserr_wrong_type;
goto out_put_dst;
}
@@ -1063,9 +1057,9 @@ nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
out:
return status;
out_put_dst:
- fput(*dst);
+ nfsd_file_put(*dst);
out_put_src:
- fput(*src);
+ nfsd_file_put(*src);
goto out;
}
@@ -1074,7 +1068,7 @@ nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_clone *clone = &u->clone;
- struct file *src, *dst;
+ struct nfsd_file *src, *dst;
__be32 status;
status = nfsd4_verify_copy(rqstp, cstate, &clone->cl_src_stateid, &src,
@@ -1082,11 +1076,11 @@ nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out;
- status = nfsd4_clone_file_range(src, clone->cl_src_pos,
- dst, clone->cl_dst_pos, clone->cl_count);
+ status = nfsd4_clone_file_range(src->nf_file, clone->cl_src_pos,
+ dst->nf_file, clone->cl_dst_pos, clone->cl_count);
- fput(dst);
- fput(src);
+ nfsd_file_put(dst);
+ nfsd_file_put(src);
out:
return status;
}
@@ -1176,8 +1170,9 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy)
do {
if (kthread_should_stop())
break;
- bytes_copied = nfsd_copy_file_range(copy->file_src, src_pos,
- copy->file_dst, dst_pos, bytes_total);
+ bytes_copied = nfsd_copy_file_range(copy->nf_src->nf_file,
+ src_pos, copy->nf_dst->nf_file, dst_pos,
+ bytes_total);
if (bytes_copied <= 0)
break;
bytes_total -= bytes_copied;
@@ -1204,8 +1199,8 @@ static __be32 nfsd4_do_copy(struct nfsd4_copy *copy, bool sync)
status = nfs_ok;
}
- fput(copy->file_src);
- fput(copy->file_dst);
+ nfsd_file_put(copy->nf_src);
+ nfsd_file_put(copy->nf_dst);
return status;
}
@@ -1218,16 +1213,16 @@ static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
memcpy(&dst->cp_res, &src->cp_res, sizeof(src->cp_res));
memcpy(&dst->fh, &src->fh, sizeof(src->fh));
dst->cp_clp = src->cp_clp;
- dst->file_dst = get_file(src->file_dst);
- dst->file_src = get_file(src->file_src);
+ dst->nf_dst = nfsd_file_get(src->nf_dst);
+ dst->nf_src = nfsd_file_get(src->nf_src);
memcpy(&dst->cp_stateid, &src->cp_stateid, sizeof(src->cp_stateid));
}
static void cleanup_async_copy(struct nfsd4_copy *copy)
{
nfs4_free_cp_state(copy);
- fput(copy->file_dst);
- fput(copy->file_src);
+ nfsd_file_put(copy->nf_dst);
+ nfsd_file_put(copy->nf_src);
spin_lock(&copy->cp_clp->async_lock);
list_del(&copy->copies);
spin_unlock(&copy->cp_clp->async_lock);
@@ -1264,8 +1259,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_copy *async_copy = NULL;
status = nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid,
- &copy->file_src, &copy->cp_dst_stateid,
- &copy->file_dst);
+ &copy->nf_src, &copy->cp_dst_stateid,
+ &copy->nf_dst);
if (status)
goto out;
@@ -1347,21 +1342,21 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_fallocate *fallocate, int flags)
{
__be32 status;
- struct file *file;
+ struct nfsd_file *nf;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&fallocate->falloc_stateid,
- WR_STATE, &file, NULL);
+ WR_STATE, &nf);
if (status != nfs_ok) {
dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
return status;
}
- status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file,
+ status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, nf->nf_file,
fallocate->falloc_offset,
fallocate->falloc_length,
flags);
- fput(file);
+ nfsd_file_put(nf);
return status;
}
static __be32
@@ -1406,11 +1401,11 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_seek *seek = &u->seek;
int whence;
__be32 status;
- struct file *file;
+ struct nfsd_file *nf;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&seek->seek_stateid,
- RD_STATE, &file, NULL);
+ RD_STATE, &nf);
if (status) {
dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
return status;
@@ -1432,14 +1427,14 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* Note: This call does change file->f_pos, but nothing in NFSD
* should ever file->f_pos.
*/
- seek->seek_pos = vfs_llseek(file, seek->seek_offset, whence);
+ seek->seek_pos = vfs_llseek(nf->nf_file, seek->seek_offset, whence);
if (seek->seek_pos < 0)
status = nfserrno(seek->seek_pos);
- else if (seek->seek_pos >= i_size_read(file_inode(file)))
+ else if (seek->seek_pos >= i_size_read(file_inode(nf->nf_file)))
seek->seek_eof = true;
out:
- fput(file);
+ nfsd_file_put(nf);
return status;
}
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 87679557d0d6..cdc75ad4438b 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -59,8 +59,13 @@ struct nfsd4_client_tracking_ops {
void (*remove)(struct nfs4_client *);
int (*check)(struct nfs4_client *);
void (*grace_done)(struct nfsd_net *);
+ uint8_t version;
+ size_t msglen;
};
+static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops;
+static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops_v2;
+
/* Globals */
static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery";
@@ -173,6 +178,7 @@ __nfsd4_create_reclaim_record_grace(struct nfs4_client *clp,
const char *dname, int len, struct nfsd_net *nn)
{
struct xdr_netobj name;
+ struct xdr_netobj princhash = { .len = 0, .data = NULL };
struct nfs4_client_reclaim *crp;
name.data = kmemdup(dname, len, GFP_KERNEL);
@@ -182,7 +188,7 @@ __nfsd4_create_reclaim_record_grace(struct nfs4_client *clp,
return;
}
name.len = len;
- crp = nfs4_client_to_reclaim(name, nn);
+ crp = nfs4_client_to_reclaim(name, princhash, nn);
if (!crp) {
kfree(name.data);
return;
@@ -482,6 +488,7 @@ static int
load_recdir(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
{
struct xdr_netobj name;
+ struct xdr_netobj princhash = { .len = 0, .data = NULL };
if (child->d_name.len != HEXDIR_LEN - 1) {
printk("%s: illegal name %pd in recovery directory\n",
@@ -496,7 +503,7 @@ load_recdir(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
goto out;
}
name.len = HEXDIR_LEN;
- if (!nfs4_client_to_reclaim(name, nn))
+ if (!nfs4_client_to_reclaim(name, princhash, nn))
kfree(name.data);
out:
return 0;
@@ -718,6 +725,8 @@ static const struct nfsd4_client_tracking_ops nfsd4_legacy_tracking_ops = {
.remove = nfsd4_remove_clid_dir,
.check = nfsd4_check_legacy_client,
.grace_done = nfsd4_recdir_purge_old,
+ .version = 1,
+ .msglen = 0,
};
/* Globals */
@@ -731,25 +740,32 @@ struct cld_net {
struct list_head cn_list;
unsigned int cn_xid;
bool cn_has_legacy;
+ struct crypto_shash *cn_tfm;
};
struct cld_upcall {
struct list_head cu_list;
struct cld_net *cu_net;
struct completion cu_done;
- struct cld_msg cu_msg;
+ union {
+ struct cld_msg_hdr cu_hdr;
+ struct cld_msg cu_msg;
+ struct cld_msg_v2 cu_msg_v2;
+ } cu_u;
};
static int
-__cld_pipe_upcall(struct rpc_pipe *pipe, struct cld_msg *cmsg)
+__cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg)
{
int ret;
struct rpc_pipe_msg msg;
- struct cld_upcall *cup = container_of(cmsg, struct cld_upcall, cu_msg);
+ struct cld_upcall *cup = container_of(cmsg, struct cld_upcall, cu_u);
+ struct nfsd_net *nn = net_generic(pipe->dentry->d_sb->s_fs_info,
+ nfsd_net_id);
memset(&msg, 0, sizeof(msg));
msg.data = cmsg;
- msg.len = sizeof(*cmsg);
+ msg.len = nn->client_tracking_ops->msglen;
ret = rpc_queue_upcall(pipe, &msg);
if (ret < 0) {
@@ -765,7 +781,7 @@ out:
}
static int
-cld_pipe_upcall(struct rpc_pipe *pipe, struct cld_msg *cmsg)
+cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg)
{
int ret;
@@ -781,11 +797,11 @@ cld_pipe_upcall(struct rpc_pipe *pipe, struct cld_msg *cmsg)
}
static ssize_t
-__cld_pipe_inprogress_downcall(const struct cld_msg __user *cmsg,
+__cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
struct nfsd_net *nn)
{
- uint8_t cmd;
- struct xdr_netobj name;
+ uint8_t cmd, princhashlen;
+ struct xdr_netobj name, princhash = { .len = 0, .data = NULL };
uint16_t namelen;
struct cld_net *cn = nn->cld_net;
@@ -794,22 +810,48 @@ __cld_pipe_inprogress_downcall(const struct cld_msg __user *cmsg,
return -EFAULT;
}
if (cmd == Cld_GraceStart) {
- if (get_user(namelen, &cmsg->cm_u.cm_name.cn_len))
- return -EFAULT;
- name.data = memdup_user(&cmsg->cm_u.cm_name.cn_id, namelen);
- if (IS_ERR_OR_NULL(name.data))
- return -EFAULT;
- name.len = namelen;
+ if (nn->client_tracking_ops->version >= 2) {
+ const struct cld_clntinfo __user *ci;
+
+ ci = &cmsg->cm_u.cm_clntinfo;
+ if (get_user(namelen, &ci->cc_name.cn_len))
+ return -EFAULT;
+ name.data = memdup_user(&ci->cc_name.cn_id, namelen);
+ if (IS_ERR_OR_NULL(name.data))
+ return -EFAULT;
+ name.len = namelen;
+ get_user(princhashlen, &ci->cc_princhash.cp_len);
+ if (princhashlen > 0) {
+ princhash.data = memdup_user(
+ &ci->cc_princhash.cp_data,
+ princhashlen);
+ if (IS_ERR_OR_NULL(princhash.data))
+ return -EFAULT;
+ princhash.len = princhashlen;
+ } else
+ princhash.len = 0;
+ } else {
+ const struct cld_name __user *cnm;
+
+ cnm = &cmsg->cm_u.cm_name;
+ if (get_user(namelen, &cnm->cn_len))
+ return -EFAULT;
+ name.data = memdup_user(&cnm->cn_id, namelen);
+ if (IS_ERR_OR_NULL(name.data))
+ return -EFAULT;
+ name.len = namelen;
+ }
if (name.len > 5 && memcmp(name.data, "hash:", 5) == 0) {
name.len = name.len - 5;
memmove(name.data, name.data + 5, name.len);
cn->cn_has_legacy = true;
}
- if (!nfs4_client_to_reclaim(name, nn)) {
+ if (!nfs4_client_to_reclaim(name, princhash, nn)) {
kfree(name.data);
+ kfree(princhash.data);
return -EFAULT;
}
- return sizeof(*cmsg);
+ return nn->client_tracking_ops->msglen;
}
return -EFAULT;
}
@@ -818,21 +860,22 @@ static ssize_t
cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
{
struct cld_upcall *tmp, *cup;
- struct cld_msg __user *cmsg = (struct cld_msg __user *)src;
+ struct cld_msg_hdr __user *hdr = (struct cld_msg_hdr __user *)src;
+ struct cld_msg_v2 __user *cmsg = (struct cld_msg_v2 __user *)src;
uint32_t xid;
struct nfsd_net *nn = net_generic(file_inode(filp)->i_sb->s_fs_info,
nfsd_net_id);
struct cld_net *cn = nn->cld_net;
int16_t status;
- if (mlen != sizeof(*cmsg)) {
+ if (mlen != nn->client_tracking_ops->msglen) {
dprintk("%s: got %zu bytes, expected %zu\n", __func__, mlen,
- sizeof(*cmsg));
+ nn->client_tracking_ops->msglen);
return -EINVAL;
}
/* copy just the xid so we can try to find that */
- if (copy_from_user(&xid, &cmsg->cm_xid, sizeof(xid)) != 0) {
+ if (copy_from_user(&xid, &hdr->cm_xid, sizeof(xid)) != 0) {
dprintk("%s: error when copying xid from userspace", __func__);
return -EFAULT;
}
@@ -842,7 +885,7 @@ cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
* list (for -EINPROGRESS, we just want to make sure the xid is
* valid, not remove the upcall from the list)
*/
- if (get_user(status, &cmsg->cm_status)) {
+ if (get_user(status, &hdr->cm_status)) {
dprintk("%s: error when copying status from userspace", __func__);
return -EFAULT;
}
@@ -851,7 +894,7 @@ cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
cup = NULL;
spin_lock(&cn->cn_lock);
list_for_each_entry(tmp, &cn->cn_list, cu_list) {
- if (get_unaligned(&tmp->cu_msg.cm_xid) == xid) {
+ if (get_unaligned(&tmp->cu_u.cu_hdr.cm_xid) == xid) {
cup = tmp;
if (status != -EINPROGRESS)
list_del_init(&cup->cu_list);
@@ -869,7 +912,7 @@ cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
if (status == -EINPROGRESS)
return __cld_pipe_inprogress_downcall(cmsg, nn);
- if (copy_from_user(&cup->cu_msg, src, mlen) != 0)
+ if (copy_from_user(&cup->cu_u.cu_msg_v2, src, mlen) != 0)
return -EFAULT;
complete(&cup->cu_done);
@@ -881,7 +924,7 @@ cld_pipe_destroy_msg(struct rpc_pipe_msg *msg)
{
struct cld_msg *cmsg = msg->data;
struct cld_upcall *cup = container_of(cmsg, struct cld_upcall,
- cu_msg);
+ cu_u.cu_msg);
/* errno >= 0 means we got a downcall */
if (msg->errno >= 0)
@@ -1007,14 +1050,17 @@ nfsd4_remove_cld_pipe(struct net *net)
nfsd4_cld_unregister_net(net, cn->cn_pipe);
rpc_destroy_pipe_data(cn->cn_pipe);
+ if (cn->cn_tfm)
+ crypto_free_shash(cn->cn_tfm);
kfree(nn->cld_net);
nn->cld_net = NULL;
}
static struct cld_upcall *
-alloc_cld_upcall(struct cld_net *cn)
+alloc_cld_upcall(struct nfsd_net *nn)
{
struct cld_upcall *new, *tmp;
+ struct cld_net *cn = nn->cld_net;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
@@ -1024,20 +1070,20 @@ alloc_cld_upcall(struct cld_net *cn)
restart_search:
spin_lock(&cn->cn_lock);
list_for_each_entry(tmp, &cn->cn_list, cu_list) {
- if (tmp->cu_msg.cm_xid == cn->cn_xid) {
+ if (tmp->cu_u.cu_msg.cm_xid == cn->cn_xid) {
cn->cn_xid++;
spin_unlock(&cn->cn_lock);
goto restart_search;
}
}
init_completion(&new->cu_done);
- new->cu_msg.cm_vers = CLD_UPCALL_VERSION;
- put_unaligned(cn->cn_xid++, &new->cu_msg.cm_xid);
+ new->cu_u.cu_msg.cm_vers = nn->client_tracking_ops->version;
+ put_unaligned(cn->cn_xid++, &new->cu_u.cu_msg.cm_xid);
new->cu_net = cn;
list_add(&new->cu_list, &cn->cn_list);
spin_unlock(&cn->cn_lock);
- dprintk("%s: allocated xid %u\n", __func__, new->cu_msg.cm_xid);
+ dprintk("%s: allocated xid %u\n", __func__, new->cu_u.cu_msg.cm_xid);
return new;
}
@@ -1066,20 +1112,20 @@ nfsd4_cld_create(struct nfs4_client *clp)
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
- cup = alloc_cld_upcall(cn);
+ cup = alloc_cld_upcall(nn);
if (!cup) {
ret = -ENOMEM;
goto out_err;
}
- cup->cu_msg.cm_cmd = Cld_Create;
- cup->cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
- memcpy(cup->cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
+ cup->cu_u.cu_msg.cm_cmd = Cld_Create;
+ cup->cu_u.cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
+ memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
clp->cl_name.len);
- ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg);
+ ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
if (!ret) {
- ret = cup->cu_msg.cm_status;
+ ret = cup->cu_u.cu_msg.cm_status;
set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
}
@@ -1092,6 +1138,75 @@ out_err:
/* Ask daemon to create a new record */
static void
+nfsd4_cld_create_v2(struct nfs4_client *clp)
+{
+ int ret;
+ struct cld_upcall *cup;
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+ struct cld_net *cn = nn->cld_net;
+ struct cld_msg_v2 *cmsg;
+ struct crypto_shash *tfm = cn->cn_tfm;
+ struct xdr_netobj cksum;
+ char *principal = NULL;
+ SHASH_DESC_ON_STACK(desc, tfm);
+
+ /* Don't upcall if it's already stored */
+ if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
+ return;
+
+ cup = alloc_cld_upcall(nn);
+ if (!cup) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ cmsg = &cup->cu_u.cu_msg_v2;
+ cmsg->cm_cmd = Cld_Create;
+ cmsg->cm_u.cm_clntinfo.cc_name.cn_len = clp->cl_name.len;
+ memcpy(cmsg->cm_u.cm_clntinfo.cc_name.cn_id, clp->cl_name.data,
+ clp->cl_name.len);
+ if (clp->cl_cred.cr_raw_principal)
+ principal = clp->cl_cred.cr_raw_principal;
+ else if (clp->cl_cred.cr_principal)
+ principal = clp->cl_cred.cr_principal;
+ if (principal) {
+ desc->tfm = tfm;
+ cksum.len = crypto_shash_digestsize(tfm);
+ cksum.data = kmalloc(cksum.len, GFP_KERNEL);
+ if (cksum.data == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = crypto_shash_digest(desc, principal, strlen(principal),
+ cksum.data);
+ shash_desc_zero(desc);
+ if (ret) {
+ kfree(cksum.data);
+ goto out;
+ }
+ cmsg->cm_u.cm_clntinfo.cc_princhash.cp_len = cksum.len;
+ memcpy(cmsg->cm_u.cm_clntinfo.cc_princhash.cp_data,
+ cksum.data, cksum.len);
+ kfree(cksum.data);
+ } else
+ cmsg->cm_u.cm_clntinfo.cc_princhash.cp_len = 0;
+
+ ret = cld_pipe_upcall(cn->cn_pipe, cmsg);
+ if (!ret) {
+ ret = cmsg->cm_status;
+ set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
+ }
+
+out:
+ free_cld_upcall(cup);
+out_err:
+ if (ret)
+ pr_err("NFSD: Unable to create client record on stable storage: %d\n",
+ ret);
+}
+
+/* Ask daemon to create a new record */
+static void
nfsd4_cld_remove(struct nfs4_client *clp)
{
int ret;
@@ -1103,20 +1218,20 @@ nfsd4_cld_remove(struct nfs4_client *clp)
if (!test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
- cup = alloc_cld_upcall(cn);
+ cup = alloc_cld_upcall(nn);
if (!cup) {
ret = -ENOMEM;
goto out_err;
}
- cup->cu_msg.cm_cmd = Cld_Remove;
- cup->cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
- memcpy(cup->cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
+ cup->cu_u.cu_msg.cm_cmd = Cld_Remove;
+ cup->cu_u.cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
+ memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
clp->cl_name.len);
- ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg);
+ ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
if (!ret) {
- ret = cup->cu_msg.cm_status;
+ ret = cup->cu_u.cu_msg.cm_status;
clear_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
}
@@ -1145,21 +1260,21 @@ nfsd4_cld_check_v0(struct nfs4_client *clp)
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return 0;
- cup = alloc_cld_upcall(cn);
+ cup = alloc_cld_upcall(nn);
if (!cup) {
printk(KERN_ERR "NFSD: Unable to check client record on "
"stable storage: %d\n", -ENOMEM);
return -ENOMEM;
}
- cup->cu_msg.cm_cmd = Cld_Check;
- cup->cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
- memcpy(cup->cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
+ cup->cu_u.cu_msg.cm_cmd = Cld_Check;
+ cup->cu_u.cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
+ memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
clp->cl_name.len);
- ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg);
+ ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
if (!ret) {
- ret = cup->cu_msg.cm_status;
+ ret = cup->cu_u.cu_msg.cm_status;
set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
}
@@ -1217,22 +1332,95 @@ found:
}
static int
+nfsd4_cld_check_v2(struct nfs4_client *clp)
+{
+ struct nfs4_client_reclaim *crp;
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+ struct cld_net *cn = nn->cld_net;
+ int status;
+ char dname[HEXDIR_LEN];
+ struct xdr_netobj name;
+ struct crypto_shash *tfm = cn->cn_tfm;
+ struct xdr_netobj cksum;
+ char *principal = NULL;
+ SHASH_DESC_ON_STACK(desc, tfm);
+
+ /* did we already find that this client is stable? */
+ if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
+ return 0;
+
+ /* look for it in the reclaim hashtable otherwise */
+ crp = nfsd4_find_reclaim_client(clp->cl_name, nn);
+ if (crp)
+ goto found;
+
+ if (cn->cn_has_legacy) {
+ status = nfs4_make_rec_clidname(dname, &clp->cl_name);
+ if (status)
+ return -ENOENT;
+
+ name.data = kmemdup(dname, HEXDIR_LEN, GFP_KERNEL);
+ if (!name.data) {
+ dprintk("%s: failed to allocate memory for name.data\n",
+ __func__);
+ return -ENOENT;
+ }
+ name.len = HEXDIR_LEN;
+ crp = nfsd4_find_reclaim_client(name, nn);
+ kfree(name.data);
+ if (crp)
+ goto found;
+
+ }
+ return -ENOENT;
+found:
+ if (crp->cr_princhash.len) {
+ if (clp->cl_cred.cr_raw_principal)
+ principal = clp->cl_cred.cr_raw_principal;
+ else if (clp->cl_cred.cr_principal)
+ principal = clp->cl_cred.cr_principal;
+ if (principal == NULL)
+ return -ENOENT;
+ desc->tfm = tfm;
+ cksum.len = crypto_shash_digestsize(tfm);
+ cksum.data = kmalloc(cksum.len, GFP_KERNEL);
+ if (cksum.data == NULL)
+ return -ENOENT;
+ status = crypto_shash_digest(desc, principal, strlen(principal),
+ cksum.data);
+ shash_desc_zero(desc);
+ if (status) {
+ kfree(cksum.data);
+ return -ENOENT;
+ }
+ if (memcmp(crp->cr_princhash.data, cksum.data,
+ crp->cr_princhash.len)) {
+ kfree(cksum.data);
+ return -ENOENT;
+ }
+ kfree(cksum.data);
+ }
+ crp->cr_clp = clp;
+ return 0;
+}
+
+static int
nfsd4_cld_grace_start(struct nfsd_net *nn)
{
int ret;
struct cld_upcall *cup;
struct cld_net *cn = nn->cld_net;
- cup = alloc_cld_upcall(cn);
+ cup = alloc_cld_upcall(nn);
if (!cup) {
ret = -ENOMEM;
goto out_err;
}
- cup->cu_msg.cm_cmd = Cld_GraceStart;
- ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg);
+ cup->cu_u.cu_msg.cm_cmd = Cld_GraceStart;
+ ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
if (!ret)
- ret = cup->cu_msg.cm_status;
+ ret = cup->cu_u.cu_msg.cm_status;
free_cld_upcall(cup);
out_err:
@@ -1250,17 +1438,17 @@ nfsd4_cld_grace_done_v0(struct nfsd_net *nn)
struct cld_upcall *cup;
struct cld_net *cn = nn->cld_net;
- cup = alloc_cld_upcall(cn);
+ cup = alloc_cld_upcall(nn);
if (!cup) {
ret = -ENOMEM;
goto out_err;
}
- cup->cu_msg.cm_cmd = Cld_GraceDone;
- cup->cu_msg.cm_u.cm_gracetime = (int64_t)nn->boot_time;
- ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg);
+ cup->cu_u.cu_msg.cm_cmd = Cld_GraceDone;
+ cup->cu_u.cu_msg.cm_u.cm_gracetime = (int64_t)nn->boot_time;
+ ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
if (!ret)
- ret = cup->cu_msg.cm_status;
+ ret = cup->cu_u.cu_msg.cm_status;
free_cld_upcall(cup);
out_err:
@@ -1279,16 +1467,16 @@ nfsd4_cld_grace_done(struct nfsd_net *nn)
struct cld_upcall *cup;
struct cld_net *cn = nn->cld_net;
- cup = alloc_cld_upcall(cn);
+ cup = alloc_cld_upcall(nn);
if (!cup) {
ret = -ENOMEM;
goto out_err;
}
- cup->cu_msg.cm_cmd = Cld_GraceDone;
- ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg);
+ cup->cu_u.cu_msg.cm_cmd = Cld_GraceDone;
+ ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
if (!ret)
- ret = cup->cu_msg.cm_status;
+ ret = cup->cu_u.cu_msg.cm_status;
free_cld_upcall(cup);
out_err:
@@ -1337,6 +1525,53 @@ cld_running(struct nfsd_net *nn)
}
static int
+nfsd4_cld_get_version(struct nfsd_net *nn)
+{
+ int ret = 0;
+ struct cld_upcall *cup;
+ struct cld_net *cn = nn->cld_net;
+ uint8_t version;
+
+ cup = alloc_cld_upcall(nn);
+ if (!cup) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ cup->cu_u.cu_msg.cm_cmd = Cld_GetVersion;
+ ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
+ if (!ret) {
+ ret = cup->cu_u.cu_msg.cm_status;
+ if (ret)
+ goto out_free;
+ version = cup->cu_u.cu_msg.cm_u.cm_version;
+ dprintk("%s: userspace returned version %u\n",
+ __func__, version);
+ if (version < 1)
+ version = 1;
+ else if (version > CLD_UPCALL_VERSION)
+ version = CLD_UPCALL_VERSION;
+
+ switch (version) {
+ case 1:
+ nn->client_tracking_ops = &nfsd4_cld_tracking_ops;
+ break;
+ case 2:
+ nn->client_tracking_ops = &nfsd4_cld_tracking_ops_v2;
+ break;
+ default:
+ break;
+ }
+ }
+out_free:
+ free_cld_upcall(cup);
+out_err:
+ if (ret)
+ dprintk("%s: Unable to get version from userspace: %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static int
nfsd4_cld_tracking_init(struct net *net)
{
int status;
@@ -1351,6 +1586,11 @@ nfsd4_cld_tracking_init(struct net *net)
status = __nfsd4_init_cld_pipe(net);
if (status)
goto err_shutdown;
+ nn->cld_net->cn_tfm = crypto_alloc_shash("sha256", 0, 0);
+ if (IS_ERR(nn->cld_net->cn_tfm)) {
+ status = PTR_ERR(nn->cld_net->cn_tfm);
+ goto err_remove;
+ }
/*
* rpc pipe upcalls take 30 seconds to time out, so we don't want to
@@ -1368,10 +1608,14 @@ nfsd4_cld_tracking_init(struct net *net)
goto err_remove;
}
+ status = nfsd4_cld_get_version(nn);
+ if (status == -EOPNOTSUPP)
+ pr_warn("NFSD: nfsdcld GetVersion upcall failed. Please upgrade nfsdcld.\n");
+
status = nfsd4_cld_grace_start(nn);
if (status) {
if (status == -EOPNOTSUPP)
- printk(KERN_WARNING "NFSD: Please upgrade nfsdcld.\n");
+ pr_warn("NFSD: nfsdcld GraceStart upcall failed. Please upgrade nfsdcld.\n");
nfs4_release_reclaim(nn);
goto err_remove;
} else
@@ -1403,6 +1647,8 @@ static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops_v0 = {
.remove = nfsd4_cld_remove,
.check = nfsd4_cld_check_v0,
.grace_done = nfsd4_cld_grace_done_v0,
+ .version = 1,
+ .msglen = sizeof(struct cld_msg),
};
/* For newer nfsdcld's */
@@ -1413,6 +1659,20 @@ static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops = {
.remove = nfsd4_cld_remove,
.check = nfsd4_cld_check,
.grace_done = nfsd4_cld_grace_done,
+ .version = 1,
+ .msglen = sizeof(struct cld_msg),
+};
+
+/* v2 create/check ops include the principal, if available */
+static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops_v2 = {
+ .init = nfsd4_cld_tracking_init,
+ .exit = nfsd4_cld_tracking_exit,
+ .create = nfsd4_cld_create_v2,
+ .remove = nfsd4_cld_remove,
+ .check = nfsd4_cld_check_v2,
+ .grace_done = nfsd4_cld_grace_done,
+ .version = 2,
+ .msglen = sizeof(struct cld_msg_v2),
};
/* upcall via usermodehelper */
@@ -1760,6 +2020,8 @@ static const struct nfsd4_client_tracking_ops nfsd4_umh_tracking_ops = {
.remove = nfsd4_umh_cltrack_remove,
.check = nfsd4_umh_cltrack_check,
.grace_done = nfsd4_umh_cltrack_grace_done,
+ .version = 1,
+ .msglen = 0,
};
int
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 7857942c5ca6..c65aeaa812d4 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -50,6 +50,7 @@
#include "netns.h"
#include "pnfs.h"
+#include "filecache.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -429,18 +430,18 @@ put_nfs4_file(struct nfs4_file *fi)
}
}
-static struct file *
+static struct nfsd_file *
__nfs4_get_fd(struct nfs4_file *f, int oflag)
{
if (f->fi_fds[oflag])
- return get_file(f->fi_fds[oflag]);
+ return nfsd_file_get(f->fi_fds[oflag]);
return NULL;
}
-static struct file *
+static struct nfsd_file *
find_writeable_file_locked(struct nfs4_file *f)
{
- struct file *ret;
+ struct nfsd_file *ret;
lockdep_assert_held(&f->fi_lock);
@@ -450,10 +451,10 @@ find_writeable_file_locked(struct nfs4_file *f)
return ret;
}
-static struct file *
+static struct nfsd_file *
find_writeable_file(struct nfs4_file *f)
{
- struct file *ret;
+ struct nfsd_file *ret;
spin_lock(&f->fi_lock);
ret = find_writeable_file_locked(f);
@@ -462,9 +463,10 @@ find_writeable_file(struct nfs4_file *f)
return ret;
}
-static struct file *find_readable_file_locked(struct nfs4_file *f)
+static struct nfsd_file *
+find_readable_file_locked(struct nfs4_file *f)
{
- struct file *ret;
+ struct nfsd_file *ret;
lockdep_assert_held(&f->fi_lock);
@@ -474,10 +476,10 @@ static struct file *find_readable_file_locked(struct nfs4_file *f)
return ret;
}
-static struct file *
+static struct nfsd_file *
find_readable_file(struct nfs4_file *f)
{
- struct file *ret;
+ struct nfsd_file *ret;
spin_lock(&f->fi_lock);
ret = find_readable_file_locked(f);
@@ -486,10 +488,10 @@ find_readable_file(struct nfs4_file *f)
return ret;
}
-struct file *
+struct nfsd_file *
find_any_file(struct nfs4_file *f)
{
- struct file *ret;
+ struct nfsd_file *ret;
spin_lock(&f->fi_lock);
ret = __nfs4_get_fd(f, O_RDWR);
@@ -590,17 +592,17 @@ static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
might_lock(&fp->fi_lock);
if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
- struct file *f1 = NULL;
- struct file *f2 = NULL;
+ struct nfsd_file *f1 = NULL;
+ struct nfsd_file *f2 = NULL;
swap(f1, fp->fi_fds[oflag]);
if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
swap(f2, fp->fi_fds[O_RDWR]);
spin_unlock(&fp->fi_lock);
if (f1)
- fput(f1);
+ nfsd_file_put(f1);
if (f2)
- fput(f2);
+ nfsd_file_put(f2);
}
}
@@ -933,25 +935,25 @@ nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
static void put_deleg_file(struct nfs4_file *fp)
{
- struct file *filp = NULL;
+ struct nfsd_file *nf = NULL;
spin_lock(&fp->fi_lock);
if (--fp->fi_delegees == 0)
- swap(filp, fp->fi_deleg_file);
+ swap(nf, fp->fi_deleg_file);
spin_unlock(&fp->fi_lock);
- if (filp)
- fput(filp);
+ if (nf)
+ nfsd_file_put(nf);
}
static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
{
struct nfs4_file *fp = dp->dl_stid.sc_file;
- struct file *filp = fp->fi_deleg_file;
+ struct nfsd_file *nf = fp->fi_deleg_file;
WARN_ON_ONCE(!fp->fi_delegees);
- vfs_setlease(filp, F_UNLCK, NULL, (void **)&dp);
+ vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
put_deleg_file(fp);
}
@@ -1289,11 +1291,14 @@ static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
{
struct nfs4_ol_stateid *stp = openlockstateid(stid);
struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
- struct file *file;
+ struct nfsd_file *nf;
- file = find_any_file(stp->st_stid.sc_file);
- if (file)
- filp_close(file, (fl_owner_t)lo);
+ nf = find_any_file(stp->st_stid.sc_file);
+ if (nf) {
+ get_file(nf->nf_file);
+ filp_close(nf->nf_file, (fl_owner_t)lo);
+ nfsd_file_put(nf);
+ }
nfs4_free_ol_stateid(stid);
}
@@ -1563,21 +1568,39 @@ static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
* re-negotiate active sessions and reduce their slot usage to make
* room for new connections. For now we just fail the create session.
*/
-static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
+static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
{
u32 slotsize = slot_bytes(ca);
u32 num = ca->maxreqs;
unsigned long avail, total_avail;
+ unsigned int scale_factor;
spin_lock(&nfsd_drc_lock);
- total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
+ if (nfsd_drc_max_mem > nfsd_drc_mem_used)
+ total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
+ else
+ /* We have handed out more space than we chose in
+ * set_max_drc() to allow. That isn't really a
+ * problem as long as that doesn't make us think we
+ * have lots more due to integer overflow.
+ */
+ total_avail = 0;
avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
/*
- * Never use more than a third of the remaining memory,
- * unless it's the only way to give this client a slot:
+ * Never use more than a fraction of the remaining memory,
+ * unless it's the only way to give this client a slot.
+ * The chosen fraction is either 1/8 or 1/number of threads,
+ * whichever is smaller. This ensures there are adequate
+ * slots to support multiple clients per thread.
+ * Give the client one slot even if that would require
+ * over-allocation--it is better than failure.
*/
- avail = clamp_t(unsigned long, avail, slotsize, total_avail/3);
+ scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
+
+ avail = clamp_t(unsigned long, avail, slotsize,
+ total_avail/scale_factor);
num = min_t(int, num, avail / slotsize);
+ num = max_t(int, num, 1);
nfsd_drc_mem_used += num * slotsize;
spin_unlock(&nfsd_drc_lock);
@@ -2323,9 +2346,9 @@ static void states_stop(struct seq_file *s, void *v)
spin_unlock(&clp->cl_lock);
}
-static void nfs4_show_superblock(struct seq_file *s, struct file *f)
+static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
{
- struct inode *inode = file_inode(f);
+ struct inode *inode = f->nf_inode;
seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
MAJOR(inode->i_sb->s_dev),
@@ -2343,7 +2366,7 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
{
struct nfs4_ol_stateid *ols;
struct nfs4_file *nf;
- struct file *file;
+ struct nfsd_file *file;
struct nfs4_stateowner *oo;
unsigned int access, deny;
@@ -2370,7 +2393,7 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
seq_printf(s, ", ");
nfs4_show_owner(s, oo);
seq_printf(s, " }\n");
- fput(file);
+ nfsd_file_put(file);
return 0;
}
@@ -2379,7 +2402,7 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
{
struct nfs4_ol_stateid *ols;
struct nfs4_file *nf;
- struct file *file;
+ struct nfsd_file *file;
struct nfs4_stateowner *oo;
ols = openlockstateid(st);
@@ -2401,7 +2424,7 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
seq_printf(s, ", ");
nfs4_show_owner(s, oo);
seq_printf(s, " }\n");
- fput(file);
+ nfsd_file_put(file);
return 0;
}
@@ -2410,7 +2433,7 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
{
struct nfs4_delegation *ds;
struct nfs4_file *nf;
- struct file *file;
+ struct nfsd_file *file;
ds = delegstateid(st);
nf = st->sc_file;
@@ -2433,7 +2456,7 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
{
struct nfs4_layout_stateid *ls;
- struct file *file;
+ struct nfsd_file *file;
ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
file = ls->ls_file;
@@ -3169,10 +3192,10 @@ static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfs
* performance. When short on memory we therefore prefer to
* decrease number of slots instead of their size. Clients that
* request larger slots than they need will get poor results:
+ * Note that we always allow at least one slot, because our
+ * accounting is soft and provides no guarantees either way.
*/
- ca->maxreqs = nfsd4_get_drc_mem(ca);
- if (!ca->maxreqs)
- return nfserr_jukebox;
+ ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
return nfs_ok;
}
@@ -4651,7 +4674,7 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
struct nfsd4_open *open)
{
- struct file *filp = NULL;
+ struct nfsd_file *nf = NULL;
__be32 status;
int oflag = nfs4_access_to_omode(open->op_share_access);
int access = nfs4_access_to_access(open->op_share_access);
@@ -4687,18 +4710,18 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
if (!fp->fi_fds[oflag]) {
spin_unlock(&fp->fi_lock);
- status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
+ status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
if (status)
goto out_put_access;
spin_lock(&fp->fi_lock);
if (!fp->fi_fds[oflag]) {
- fp->fi_fds[oflag] = filp;
- filp = NULL;
+ fp->fi_fds[oflag] = nf;
+ nf = NULL;
}
}
spin_unlock(&fp->fi_lock);
- if (filp)
- fput(filp);
+ if (nf)
+ nfsd_file_put(nf);
status = nfsd4_truncate(rqstp, cur_fh, open);
if (status)
@@ -4767,7 +4790,7 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
fl->fl_end = OFFSET_MAX;
fl->fl_owner = (fl_owner_t)dp;
fl->fl_pid = current->tgid;
- fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file;
+ fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
return fl;
}
@@ -4777,7 +4800,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
{
int status = 0;
struct nfs4_delegation *dp;
- struct file *filp;
+ struct nfsd_file *nf;
struct file_lock *fl;
/*
@@ -4788,8 +4811,8 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
if (fp->fi_had_conflict)
return ERR_PTR(-EAGAIN);
- filp = find_readable_file(fp);
- if (!filp) {
+ nf = find_readable_file(fp);
+ if (!nf) {
/* We should always have a readable file here */
WARN_ON_ONCE(1);
return ERR_PTR(-EBADF);
@@ -4799,17 +4822,17 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
if (nfs4_delegation_exists(clp, fp))
status = -EAGAIN;
else if (!fp->fi_deleg_file) {
- fp->fi_deleg_file = filp;
+ fp->fi_deleg_file = nf;
/* increment early to prevent fi_deleg_file from being
* cleared */
fp->fi_delegees = 1;
- filp = NULL;
+ nf = NULL;
} else
fp->fi_delegees++;
spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock);
- if (filp)
- fput(filp);
+ if (nf)
+ nfsd_file_put(nf);
if (status)
return ERR_PTR(status);
@@ -4822,7 +4845,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
if (!fl)
goto out_clnt_odstate;
- status = vfs_setlease(fp->fi_deleg_file, fl->fl_type, &fl, NULL);
+ status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
if (fl)
locks_free_lock(fl);
if (status)
@@ -4842,7 +4865,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
return dp;
out_unlock:
- vfs_setlease(fp->fi_deleg_file, F_UNLCK, NULL, (void **)&dp);
+ vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
out_clnt_odstate:
put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_stid(&dp->dl_stid);
@@ -5513,7 +5536,7 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
return nfs_ok;
}
-static struct file *
+static struct nfsd_file *
nfs4_find_file(struct nfs4_stid *s, int flags)
{
if (!s)
@@ -5523,7 +5546,7 @@ nfs4_find_file(struct nfs4_stid *s, int flags)
case NFS4_DELEG_STID:
if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
return NULL;
- return get_file(s->sc_file->fi_deleg_file);
+ return nfsd_file_get(s->sc_file->fi_deleg_file);
case NFS4_OPEN_STID:
case NFS4_LOCK_STID:
if (flags & RD_STATE)
@@ -5549,32 +5572,28 @@ nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
static __be32
nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
- struct file **filpp, bool *tmp_file, int flags)
+ struct nfsd_file **nfp, int flags)
{
int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
- struct file *file;
+ struct nfsd_file *nf;
__be32 status;
- file = nfs4_find_file(s, flags);
- if (file) {
+ nf = nfs4_find_file(s, flags);
+ if (nf) {
status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
acc | NFSD_MAY_OWNER_OVERRIDE);
if (status) {
- fput(file);
- return status;
+ nfsd_file_put(nf);
+ goto out;
}
-
- *filpp = file;
} else {
- status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp);
+ status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
if (status)
return status;
-
- if (tmp_file)
- *tmp_file = true;
}
-
- return 0;
+ *nfp = nf;
+out:
+ return status;
}
/*
@@ -5583,7 +5602,7 @@ nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
__be32
nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
- stateid_t *stateid, int flags, struct file **filpp, bool *tmp_file)
+ stateid_t *stateid, int flags, struct nfsd_file **nfp)
{
struct inode *ino = d_inode(fhp->fh_dentry);
struct net *net = SVC_NET(rqstp);
@@ -5591,10 +5610,8 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
struct nfs4_stid *s = NULL;
__be32 status;
- if (filpp)
- *filpp = NULL;
- if (tmp_file)
- *tmp_file = false;
+ if (nfp)
+ *nfp = NULL;
if (grace_disallows_io(net, ino))
return nfserr_grace;
@@ -5631,8 +5648,8 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
status = nfs4_check_fh(fhp, s);
done:
- if (!status && filpp)
- status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags);
+ if (status == nfs_ok && nfp)
+ status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
out:
if (s)
nfs4_put_stid(s);
@@ -6392,7 +6409,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfs4_ol_stateid *lock_stp = NULL;
struct nfs4_ol_stateid *open_stp = NULL;
struct nfs4_file *fp;
- struct file *filp = NULL;
+ struct nfsd_file *nf = NULL;
struct nfsd4_blocked_lock *nbl = NULL;
struct file_lock *file_lock = NULL;
struct file_lock *conflock = NULL;
@@ -6474,8 +6491,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/* Fallthrough */
case NFS4_READ_LT:
spin_lock(&fp->fi_lock);
- filp = find_readable_file_locked(fp);
- if (filp)
+ nf = find_readable_file_locked(fp);
+ if (nf)
get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
spin_unlock(&fp->fi_lock);
fl_type = F_RDLCK;
@@ -6486,8 +6503,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/* Fallthrough */
case NFS4_WRITE_LT:
spin_lock(&fp->fi_lock);
- filp = find_writeable_file_locked(fp);
- if (filp)
+ nf = find_writeable_file_locked(fp);
+ if (nf)
get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
spin_unlock(&fp->fi_lock);
fl_type = F_WRLCK;
@@ -6497,7 +6514,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
}
- if (!filp) {
+ if (!nf) {
status = nfserr_openmode;
goto out;
}
@@ -6513,7 +6530,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
file_lock->fl_type = fl_type;
file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
file_lock->fl_pid = current->tgid;
- file_lock->fl_file = filp;
+ file_lock->fl_file = nf->nf_file;
file_lock->fl_flags = fl_flags;
file_lock->fl_lmops = &nfsd_posix_mng_ops;
file_lock->fl_start = lock->lk_offset;
@@ -6535,7 +6552,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
spin_unlock(&nn->blocked_locks_lock);
}
- err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
+ err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
switch (err) {
case 0: /* success! */
nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
@@ -6570,8 +6587,8 @@ out:
}
free_blocked_lock(nbl);
}
- if (filp)
- fput(filp);
+ if (nf)
+ nfsd_file_put(nf);
if (lock_stp) {
/* Bump seqid manually if the 4.0 replay owner is openowner */
if (cstate->replay_owner &&
@@ -6606,11 +6623,11 @@ out:
*/
static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
{
- struct file *file;
- __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
+ struct nfsd_file *nf;
+ __be32 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
if (!err) {
- err = nfserrno(vfs_test_lock(file, lock));
- fput(file);
+ err = nfserrno(vfs_test_lock(nf->nf_file, lock));
+ nfsd_file_put(nf);
}
return err;
}
@@ -6698,7 +6715,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
struct nfsd4_locku *locku = &u->locku;
struct nfs4_ol_stateid *stp;
- struct file *filp = NULL;
+ struct nfsd_file *nf = NULL;
struct file_lock *file_lock = NULL;
__be32 status;
int err;
@@ -6716,8 +6733,8 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&stp, nn);
if (status)
goto out;
- filp = find_any_file(stp->st_stid.sc_file);
- if (!filp) {
+ nf = find_any_file(stp->st_stid.sc_file);
+ if (!nf) {
status = nfserr_lock_range;
goto put_stateid;
}
@@ -6725,13 +6742,13 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (!file_lock) {
dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
status = nfserr_jukebox;
- goto fput;
+ goto put_file;
}
file_lock->fl_type = F_UNLCK;
file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
file_lock->fl_pid = current->tgid;
- file_lock->fl_file = filp;
+ file_lock->fl_file = nf->nf_file;
file_lock->fl_flags = FL_POSIX;
file_lock->fl_lmops = &nfsd_posix_mng_ops;
file_lock->fl_start = locku->lu_offset;
@@ -6740,14 +6757,14 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
locku->lu_length);
nfs4_transform_lock_offset(file_lock);
- err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
+ err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
if (err) {
dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
goto out_nfserr;
}
nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
-fput:
- fput(filp);
+put_file:
+ nfsd_file_put(nf);
put_stateid:
mutex_unlock(&stp->st_mutex);
nfs4_put_stid(&stp->st_stid);
@@ -6759,7 +6776,7 @@ out:
out_nfserr:
status = nfserrno(err);
- goto fput;
+ goto put_file;
}
/*
@@ -6772,17 +6789,17 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
{
struct file_lock *fl;
int status = false;
- struct file *filp = find_any_file(fp);
+ struct nfsd_file *nf = find_any_file(fp);
struct inode *inode;
struct file_lock_context *flctx;
- if (!filp) {
+ if (!nf) {
/* Any valid lock stateid should have some sort of access */
WARN_ON_ONCE(1);
return status;
}
- inode = locks_inode(filp);
+ inode = locks_inode(nf->nf_file);
flctx = inode->i_flctx;
if (flctx && !list_empty_careful(&flctx->flc_posix)) {
@@ -6795,7 +6812,7 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
}
spin_unlock(&flctx->flc_lock);
}
- fput(filp);
+ nfsd_file_put(nf);
return status;
}
@@ -6888,7 +6905,8 @@ nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
* will be freed in nfs4_remove_reclaim_record in the normal case).
*/
struct nfs4_client_reclaim *
-nfs4_client_to_reclaim(struct xdr_netobj name, struct nfsd_net *nn)
+nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
+ struct nfsd_net *nn)
{
unsigned int strhashval;
struct nfs4_client_reclaim *crp;
@@ -6901,6 +6919,8 @@ nfs4_client_to_reclaim(struct xdr_netobj name, struct nfsd_net *nn)
list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
crp->cr_name.data = name.data;
crp->cr_name.len = name.len;
+ crp->cr_princhash.data = princhash.data;
+ crp->cr_princhash.len = princhash.len;
crp->cr_clp = NULL;
nn->reclaim_str_hashtbl_size++;
}
@@ -6912,6 +6932,7 @@ nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
{
list_del(&crp->cr_strhash);
kfree(crp->cr_name.data);
+ kfree(crp->cr_princhash.data);
kfree(crp);
nn->reclaim_str_hashtbl_size--;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 442811809f3d..533d0fc3c96b 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -49,6 +49,7 @@
#include "cache.h"
#include "netns.h"
#include "pnfs.h"
+#include "filecache.h"
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#include <linux/security.h>
@@ -203,6 +204,13 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
return p;
}
+static unsigned int compoundargs_bytes_left(struct nfsd4_compoundargs *argp)
+{
+ unsigned int this = (char *)argp->end - (char *)argp->p;
+
+ return this + argp->pagelen;
+}
+
static int zero_clientid(clientid_t *clid)
{
return (clid->cl_boot == 0) && (clid->cl_id == 0);
@@ -211,10 +219,10 @@ static int zero_clientid(clientid_t *clid)
/**
* svcxdr_tmpalloc - allocate memory to be freed after compound processing
* @argp: NFSv4 compound argument structure
- * @p: pointer to be freed (with kfree())
+ * @len: length of buffer to allocate
*
- * Marks @p to be freed when processing the compound operation
- * described in @argp finishes.
+ * Allocates a buffer of size @len to be freed when processing the compound
+ * operation described in @argp finishes.
*/
static void *
svcxdr_tmpalloc(struct nfsd4_compoundargs *argp, u32 len)
@@ -347,7 +355,12 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
READ_BUF(4); len += 4;
nace = be32_to_cpup(p++);
- if (nace > NFS4_ACL_MAX)
+ if (nace > compoundargs_bytes_left(argp)/20)
+ /*
+ * Even with 4-byte names there wouldn't be
+ * space for that many aces; something fishy is
+ * going on:
+ */
return nfserr_fbig;
*acl = svcxdr_tmpalloc(argp, nfs4_acl_bytes(nace));
@@ -1418,7 +1431,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
struct nfsd4_create_session *sess)
{
DECODE_HEAD;
- u32 dummy;
READ_BUF(16);
COPYMEM(&sess->clientid, 8);
@@ -1427,7 +1439,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
/* Fore channel attrs */
READ_BUF(28);
- dummy = be32_to_cpup(p++); /* headerpadsz is always 0 */
+ p++; /* headerpadsz is always 0 */
sess->fore_channel.maxreq_sz = be32_to_cpup(p++);
sess->fore_channel.maxresp_sz = be32_to_cpup(p++);
sess->fore_channel.maxresp_cached = be32_to_cpup(p++);
@@ -1444,7 +1456,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
/* Back channel attrs */
READ_BUF(28);
- dummy = be32_to_cpup(p++); /* headerpadsz is always 0 */
+ p++; /* headerpadsz is always 0 */
sess->back_channel.maxreq_sz = be32_to_cpup(p++);
sess->back_channel.maxresp_sz = be32_to_cpup(p++);
sess->back_channel.maxresp_cached = be32_to_cpup(p++);
@@ -1736,7 +1748,6 @@ static __be32
nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
{
DECODE_HEAD;
- unsigned int tmp;
status = nfsd4_decode_stateid(argp, &copy->cp_src_stateid);
if (status)
@@ -1751,7 +1762,7 @@ nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
p = xdr_decode_hyper(p, &copy->cp_count);
p++; /* ca_consecutive: we always do consecutive copies */
copy->cp_synchronous = be32_to_cpup(p++);
- tmp = be32_to_cpup(p); /* Source server list not supported */
+ /* tmp = be32_to_cpup(p); Source server list not supported */
DECODE_TAIL;
}
@@ -3217,9 +3228,8 @@ nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
if (!p)
return nfserr_resource;
encode_cinfo(p, &create->cr_cinfo);
- nfserr = nfsd4_encode_bitmap(xdr, create->cr_bmval[0],
+ return nfsd4_encode_bitmap(xdr, create->cr_bmval[0],
create->cr_bmval[1], create->cr_bmval[2]);
- return 0;
}
static __be32
@@ -3462,7 +3472,7 @@ static __be32 nfsd4_encode_splice_read(
len = maxcount;
nfserr = nfsd_splice_read(read->rd_rqstp, read->rd_fhp,
- file, read->rd_offset, &maxcount);
+ file, read->rd_offset, &maxcount, &eof);
read->rd_length = maxcount;
if (nfserr) {
/*
@@ -3474,9 +3484,6 @@ static __be32 nfsd4_encode_splice_read(
return nfserr;
}
- eof = nfsd_eof_on_read(len, maxcount, read->rd_offset,
- d_inode(read->rd_fhp->fh_dentry)->i_size);
-
*(p++) = htonl(eof);
*(p++) = htonl(maxcount);
@@ -3547,15 +3554,13 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
len = maxcount;
nfserr = nfsd_readv(resp->rqstp, read->rd_fhp, file, read->rd_offset,
- resp->rqstp->rq_vec, read->rd_vlen, &maxcount);
+ resp->rqstp->rq_vec, read->rd_vlen, &maxcount,
+ &eof);
read->rd_length = maxcount;
if (nfserr)
return nfserr;
xdr_truncate_encode(xdr, starting_len + 8 + ((maxcount+3)&~3));
- eof = nfsd_eof_on_read(len, maxcount, read->rd_offset,
- d_inode(read->rd_fhp->fh_dentry)->i_size);
-
tmp = htonl(eof);
write_bytes_to_xdr_buf(xdr->buf, starting_len , &tmp, 4);
tmp = htonl(maxcount);
@@ -3574,11 +3579,14 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
{
unsigned long maxcount;
struct xdr_stream *xdr = &resp->xdr;
- struct file *file = read->rd_filp;
+ struct file *file;
int starting_len = xdr->buf->len;
- struct raparms *ra = NULL;
__be32 *p;
+ if (nfserr)
+ return nfserr;
+ file = read->rd_nf->nf_file;
+
p = xdr_reserve_space(xdr, 8); /* eof flag and byte count */
if (!p) {
WARN_ON_ONCE(test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags));
@@ -3596,18 +3604,12 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
(xdr->buf->buflen - xdr->buf->len));
maxcount = min_t(unsigned long, maxcount, read->rd_length);
- if (read->rd_tmp_file)
- ra = nfsd_init_raparms(file);
-
if (file->f_op->splice_read &&
test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
nfserr = nfsd4_encode_splice_read(resp, read, file, maxcount);
else
nfserr = nfsd4_encode_readv(resp, read, file, maxcount);
- if (ra)
- nfsd_put_raparams(file, ra);
-
if (nfserr)
xdr_truncate_encode(xdr, starting_len);
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 26ad75ae2be0..96352ab7bd81 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -571,7 +571,7 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
*/
static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
{
- struct nfsd_net *nn = v;
+ struct nfsd_net *nn = m->private;
seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
seq_printf(m, "num entries: %u\n",
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 13c548733860..11b42c523f04 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1171,13 +1171,17 @@ static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode)
return inode;
}
-static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode, struct nfsdfs_client *ncl)
{
struct inode *inode;
inode = nfsd_get_inode(dir->i_sb, mode);
if (!inode)
return -ENOMEM;
+ if (ncl) {
+ inode->i_private = ncl;
+ kref_get(&ncl->cl_ref);
+ }
d_add(dentry, inode);
inc_nlink(dir);
fsnotify_mkdir(dir, dentry);
@@ -1194,17 +1198,14 @@ static struct dentry *nfsd_mkdir(struct dentry *parent, struct nfsdfs_client *nc
dentry = d_alloc_name(parent, name);
if (!dentry)
goto out_err;
- ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600);
+ ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600, ncl);
if (ret)
goto out_err;
- if (ncl) {
- d_inode(dentry)->i_private = ncl;
- kref_get(&ncl->cl_ref);
- }
out:
inode_unlock(dir);
return dentry;
out_err:
+ dput(dentry);
dentry = ERR_PTR(ret);
goto out;
}
@@ -1214,11 +1215,9 @@ static void clear_ncl(struct inode *inode)
struct nfsdfs_client *ncl = inode->i_private;
inode->i_private = NULL;
- synchronize_rcu();
kref_put(&ncl->cl_ref, ncl->cl_release);
}
-
static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode)
{
struct nfsdfs_client *nc = inode->i_private;
@@ -1232,9 +1231,9 @@ struct nfsdfs_client *get_nfsdfs_client(struct inode *inode)
{
struct nfsdfs_client *nc;
- rcu_read_lock();
+ inode_lock_shared(inode);
nc = __get_nfsdfs_client(inode);
- rcu_read_unlock();
+ inode_unlock_shared(inode);
return nc;
}
/* from __rpc_unlink */
@@ -1386,8 +1385,7 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
static int nfsd_fs_get_tree(struct fs_context *fc)
{
- fc->s_fs_info = get_net(fc->net_ns);
- return vfs_get_super(fc, vfs_get_keyed_super, nfsd_fill_super);
+ return get_tree_keyed(fc, nfsd_fill_super, get_net(fc->net_ns));
}
static void nfsd_fs_free_fc(struct fs_context *fc)
@@ -1478,6 +1476,7 @@ static __net_init int nfsd_init_net(struct net *net)
atomic_set(&nn->ntf_refcnt, 0);
init_waitqueue_head(&nn->ntf_wq);
+ seqlock_init(&nn->boot_lock);
mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL);
if (IS_ERR(mnt)) {
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 0d20fd161225..c83ddac22f38 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -172,6 +172,7 @@ nfsd_proc_read(struct svc_rqst *rqstp)
struct nfsd_readargs *argp = rqstp->rq_argp;
struct nfsd_readres *resp = rqstp->rq_resp;
__be32 nfserr;
+ u32 eof;
dprintk("nfsd: READ %s %d bytes at %d\n",
SVCFH_fmt(&argp->fh),
@@ -195,7 +196,8 @@ nfsd_proc_read(struct svc_rqst *rqstp)
nfserr = nfsd_read(rqstp, fh_copy(&resp->fh, &argp->fh),
argp->offset,
rqstp->rq_vec, argp->vlen,
- &resp->count);
+ &resp->count,
+ &eof);
if (nfserr) return nfserr;
return fh_getattr(&resp->fh, &resp->stat);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 18d94ea984ba..fdf7ed4bd5dd 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -27,6 +27,7 @@
#include "cache.h"
#include "vfs.h"
#include "netns.h"
+#include "filecache.h"
#define NFSDDBG_FACILITY NFSDDBG_SVC
@@ -313,22 +314,17 @@ static int nfsd_startup_generic(int nrservs)
if (nfsd_users++)
return 0;
- /*
- * Readahead param cache - will no-op if it already exists.
- * (Note therefore results will be suboptimal if number of
- * threads is modified after nfsd start.)
- */
- ret = nfsd_racache_init(2*nrservs);
+ ret = nfsd_file_cache_init();
if (ret)
goto dec_users;
ret = nfs4_state_start();
if (ret)
- goto out_racache;
+ goto out_file_cache;
return 0;
-out_racache:
- nfsd_racache_shutdown();
+out_file_cache:
+ nfsd_file_cache_shutdown();
dec_users:
nfsd_users--;
return ret;
@@ -340,7 +336,7 @@ static void nfsd_shutdown_generic(void)
return;
nfs4_state_shutdown();
- nfsd_racache_shutdown();
+ nfsd_file_cache_shutdown();
}
static bool nfsd_needs_lockd(struct nfsd_net *nn)
@@ -348,6 +344,35 @@ static bool nfsd_needs_lockd(struct nfsd_net *nn)
return nfsd_vers(nn, 2, NFSD_TEST) || nfsd_vers(nn, 3, NFSD_TEST);
}
+void nfsd_copy_boot_verifier(__be32 verf[2], struct nfsd_net *nn)
+{
+ int seq = 0;
+
+ do {
+ read_seqbegin_or_lock(&nn->boot_lock, &seq);
+ /*
+ * This is opaque to client, so no need to byte-swap. Use
+ * __force to keep sparse happy. y2038 time_t overflow is
+ * irrelevant in this usage
+ */
+ verf[0] = (__force __be32)nn->nfssvc_boot.tv_sec;
+ verf[1] = (__force __be32)nn->nfssvc_boot.tv_nsec;
+ } while (need_seqretry(&nn->boot_lock, seq));
+ done_seqretry(&nn->boot_lock, seq);
+}
+
+static void nfsd_reset_boot_verifier_locked(struct nfsd_net *nn)
+{
+ ktime_get_real_ts64(&nn->nfssvc_boot);
+}
+
+void nfsd_reset_boot_verifier(struct nfsd_net *nn)
+{
+ write_seqlock(&nn->boot_lock);
+ nfsd_reset_boot_verifier_locked(nn);
+ write_sequnlock(&nn->boot_lock);
+}
+
static int nfsd_startup_net(int nrservs, struct net *net, const struct cred *cred)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
@@ -391,6 +416,7 @@ static void nfsd_shutdown_net(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ nfsd_file_cache_purge(net);
nfs4_state_shutdown_net(net);
if (nn->lockd_up) {
lockd_down(net);
@@ -599,7 +625,7 @@ int nfsd_create_serv(struct net *net)
#endif
}
atomic_inc(&nn->ntf_refcnt);
- ktime_get_real_ts64(&nn->nfssvc_boot); /* record boot time */
+ nfsd_reset_boot_verifier(nn);
return 0;
}
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 5dbd16946e8e..46f56afb6cb8 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -378,6 +378,7 @@ struct nfs4_client_reclaim {
struct list_head cr_strhash; /* hash by cr_name */
struct nfs4_client *cr_clp; /* pointer to associated clp */
struct xdr_netobj cr_name; /* recovery dir name */
+ struct xdr_netobj cr_princhash;
};
/* A reasonable value for REPLAY_ISIZE was estimated as follows:
@@ -506,7 +507,7 @@ struct nfs4_file {
};
struct list_head fi_clnt_odstate;
/* One each for O_RDONLY, O_WRONLY, O_RDWR: */
- struct file * fi_fds[3];
+ struct nfsd_file *fi_fds[3];
/*
* Each open or lock stateid contributes 0-4 to the counts
* below depending on which bits are set in st_access_bitmap:
@@ -516,7 +517,7 @@ struct nfs4_file {
*/
atomic_t fi_access[2];
u32 fi_share_deny;
- struct file *fi_deleg_file;
+ struct nfsd_file *fi_deleg_file;
int fi_delegees;
struct knfsd_fh fi_fhandle;
bool fi_had_conflict;
@@ -565,7 +566,7 @@ struct nfs4_layout_stateid {
spinlock_t ls_lock;
struct list_head ls_layouts;
u32 ls_layout_type;
- struct file *ls_file;
+ struct nfsd_file *ls_file;
struct nfsd4_callback ls_recall;
stateid_t ls_recall_sid;
bool ls_recalled;
@@ -616,7 +617,7 @@ struct nfsd4_copy;
extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
- stateid_t *stateid, int flags, struct file **filp, bool *tmp_file);
+ stateid_t *stateid, int flags, struct nfsd_file **filp);
__be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
stateid_t *stateid, unsigned char typemask,
struct nfs4_stid **s, struct nfsd_net *nn);
@@ -645,7 +646,7 @@ extern void nfsd4_shutdown_callback(struct nfs4_client *);
extern void nfsd4_shutdown_copy(struct nfs4_client *clp);
extern void nfsd4_prepare_cb_recall(struct nfs4_delegation *dp);
extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(struct xdr_netobj name,
- struct nfsd_net *nn);
+ struct xdr_netobj princhash, struct nfsd_net *nn);
extern bool nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn);
struct nfs4_file *find_file(struct knfsd_fh *fh);
@@ -657,7 +658,7 @@ static inline void get_nfs4_file(struct nfs4_file *fi)
{
refcount_inc(&fi->fi_ref);
}
-struct file *find_any_file(struct nfs4_file *f);
+struct nfsd_file *find_any_file(struct nfs4_file *f);
/* grace period management */
void nfsd4_end_grace(struct nfsd_net *nn);
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 80933e4334d8..ffc78a0e28b2 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -126,6 +126,8 @@ DEFINE_NFSD_ERR_EVENT(read_err);
DEFINE_NFSD_ERR_EVENT(write_err);
#include "state.h"
+#include "filecache.h"
+#include "vfs.h"
DECLARE_EVENT_CLASS(nfsd_stateid_class,
TP_PROTO(stateid_t *stp),
@@ -164,6 +166,144 @@ DEFINE_STATEID_EVENT(layout_recall_done);
DEFINE_STATEID_EVENT(layout_recall_fail);
DEFINE_STATEID_EVENT(layout_recall_release);
+#define show_nf_flags(val) \
+ __print_flags(val, "|", \
+ { 1 << NFSD_FILE_HASHED, "HASHED" }, \
+ { 1 << NFSD_FILE_PENDING, "PENDING" }, \
+ { 1 << NFSD_FILE_BREAK_READ, "BREAK_READ" }, \
+ { 1 << NFSD_FILE_BREAK_WRITE, "BREAK_WRITE" }, \
+ { 1 << NFSD_FILE_REFERENCED, "REFERENCED"})
+
+/* FIXME: This should probably be fleshed out in the future. */
+#define show_nf_may(val) \
+ __print_flags(val, "|", \
+ { NFSD_MAY_READ, "READ" }, \
+ { NFSD_MAY_WRITE, "WRITE" }, \
+ { NFSD_MAY_NOT_BREAK_LEASE, "NOT_BREAK_LEASE" })
+
+DECLARE_EVENT_CLASS(nfsd_file_class,
+ TP_PROTO(struct nfsd_file *nf),
+ TP_ARGS(nf),
+ TP_STRUCT__entry(
+ __field(unsigned int, nf_hashval)
+ __field(void *, nf_inode)
+ __field(int, nf_ref)
+ __field(unsigned long, nf_flags)
+ __field(unsigned char, nf_may)
+ __field(struct file *, nf_file)
+ ),
+ TP_fast_assign(
+ __entry->nf_hashval = nf->nf_hashval;
+ __entry->nf_inode = nf->nf_inode;
+ __entry->nf_ref = atomic_read(&nf->nf_ref);
+ __entry->nf_flags = nf->nf_flags;
+ __entry->nf_may = nf->nf_may;
+ __entry->nf_file = nf->nf_file;
+ ),
+ TP_printk("hash=0x%x inode=0x%p ref=%d flags=%s may=%s file=%p",
+ __entry->nf_hashval,
+ __entry->nf_inode,
+ __entry->nf_ref,
+ show_nf_flags(__entry->nf_flags),
+ show_nf_may(__entry->nf_may),
+ __entry->nf_file)
+)
+
+#define DEFINE_NFSD_FILE_EVENT(name) \
+DEFINE_EVENT(nfsd_file_class, name, \
+ TP_PROTO(struct nfsd_file *nf), \
+ TP_ARGS(nf))
+
+DEFINE_NFSD_FILE_EVENT(nfsd_file_alloc);
+DEFINE_NFSD_FILE_EVENT(nfsd_file_put_final);
+DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash);
+DEFINE_NFSD_FILE_EVENT(nfsd_file_put);
+DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash_and_release_locked);
+
+TRACE_EVENT(nfsd_file_acquire,
+ TP_PROTO(struct svc_rqst *rqstp, unsigned int hash,
+ struct inode *inode, unsigned int may_flags,
+ struct nfsd_file *nf, __be32 status),
+
+ TP_ARGS(rqstp, hash, inode, may_flags, nf, status),
+
+ TP_STRUCT__entry(
+ __field(__be32, xid)
+ __field(unsigned int, hash)
+ __field(void *, inode)
+ __field(unsigned int, may_flags)
+ __field(int, nf_ref)
+ __field(unsigned long, nf_flags)
+ __field(unsigned char, nf_may)
+ __field(struct file *, nf_file)
+ __field(__be32, status)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = rqstp->rq_xid;
+ __entry->hash = hash;
+ __entry->inode = inode;
+ __entry->may_flags = may_flags;
+ __entry->nf_ref = nf ? atomic_read(&nf->nf_ref) : 0;
+ __entry->nf_flags = nf ? nf->nf_flags : 0;
+ __entry->nf_may = nf ? nf->nf_may : 0;
+ __entry->nf_file = nf ? nf->nf_file : NULL;
+ __entry->status = status;
+ ),
+
+ TP_printk("xid=0x%x hash=0x%x inode=0x%p may_flags=%s ref=%d nf_flags=%s nf_may=%s nf_file=0x%p status=%u",
+ be32_to_cpu(__entry->xid), __entry->hash, __entry->inode,
+ show_nf_may(__entry->may_flags), __entry->nf_ref,
+ show_nf_flags(__entry->nf_flags),
+ show_nf_may(__entry->nf_may), __entry->nf_file,
+ be32_to_cpu(__entry->status))
+);
+
+DECLARE_EVENT_CLASS(nfsd_file_search_class,
+ TP_PROTO(struct inode *inode, unsigned int hash, int found),
+ TP_ARGS(inode, hash, found),
+ TP_STRUCT__entry(
+ __field(struct inode *, inode)
+ __field(unsigned int, hash)
+ __field(int, found)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->hash = hash;
+ __entry->found = found;
+ ),
+ TP_printk("hash=0x%x inode=0x%p found=%d", __entry->hash,
+ __entry->inode, __entry->found)
+);
+
+#define DEFINE_NFSD_FILE_SEARCH_EVENT(name) \
+DEFINE_EVENT(nfsd_file_search_class, name, \
+ TP_PROTO(struct inode *inode, unsigned int hash, int found), \
+ TP_ARGS(inode, hash, found))
+
+DEFINE_NFSD_FILE_SEARCH_EVENT(nfsd_file_close_inode_sync);
+DEFINE_NFSD_FILE_SEARCH_EVENT(nfsd_file_close_inode);
+DEFINE_NFSD_FILE_SEARCH_EVENT(nfsd_file_is_cached);
+
+TRACE_EVENT(nfsd_file_fsnotify_handle_event,
+ TP_PROTO(struct inode *inode, u32 mask),
+ TP_ARGS(inode, mask),
+ TP_STRUCT__entry(
+ __field(struct inode *, inode)
+ __field(unsigned int, nlink)
+ __field(umode_t, mode)
+ __field(u32, mask)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->nlink = inode->i_nlink;
+ __entry->mode = inode->i_mode;
+ __entry->mask = mask;
+ ),
+ TP_printk("inode=0x%p nlink=%u mode=0%ho mask=0x%x", __entry->inode,
+ __entry->nlink, __entry->mode, __entry->mask)
+);
+
#endif /* _NFSD_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c85783e536d5..bd0a385df3fc 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -44,38 +44,11 @@
#include "nfsd.h"
#include "vfs.h"
+#include "filecache.h"
#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_FILEOP
-
-/*
- * This is a cache of readahead params that help us choose the proper
- * readahead strategy. Initially, we set all readahead parameters to 0
- * and let the VFS handle things.
- * If you increase the number of cached files very much, you'll need to
- * add a hash table here.
- */
-struct raparms {
- struct raparms *p_next;
- unsigned int p_count;
- ino_t p_ino;
- dev_t p_dev;
- int p_set;
- struct file_ra_state p_ra;
- unsigned int p_hindex;
-};
-
-struct raparm_hbucket {
- struct raparms *pb_head;
- spinlock_t pb_lock;
-} ____cacheline_aligned_in_smp;
-
-#define RAPARM_HASH_BITS 4
-#define RAPARM_HASH_SIZE (1<<RAPARM_HASH_BITS)
-#define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1)
-static struct raparm_hbucket raparm_hash[RAPARM_HASH_SIZE];
-
/*
* Called from nfsd_lookup and encode_dirent. Check if we have crossed
* a mount point.
@@ -699,7 +672,7 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
}
#endif /* CONFIG_NFSD_V3 */
-static int nfsd_open_break_lease(struct inode *inode, int access)
+int nfsd_open_break_lease(struct inode *inode, int access)
{
unsigned int mode;
@@ -715,8 +688,8 @@ static int nfsd_open_break_lease(struct inode *inode, int access)
* and additional flags.
* N.B. After this call fhp needs an fh_put
*/
-__be32
-nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+static __be32
+__nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
int may_flags, struct file **filp)
{
struct path path;
@@ -726,25 +699,6 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
__be32 err;
int host_err = 0;
- validate_process_creds();
-
- /*
- * If we get here, then the client has already done an "open",
- * and (hopefully) checked permission - so allow OWNER_OVERRIDE
- * in case a chmod has now revoked permission.
- *
- * Arguably we should also allow the owner override for
- * directories, but we never have and it doesn't seem to have
- * caused anyone a problem. If we were to change this, note
- * also that our filldir callbacks would need a variant of
- * lookup_one_len that doesn't check permissions.
- */
- if (type == S_IFREG)
- may_flags |= NFSD_MAY_OWNER_OVERRIDE;
- err = fh_verify(rqstp, fhp, type, may_flags);
- if (err)
- goto out;
-
path.mnt = fhp->fh_export->ex_path.mnt;
path.dentry = fhp->fh_dentry;
inode = d_inode(path.dentry);
@@ -798,67 +752,46 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
out_nfserr:
err = nfserrno(host_err);
out:
- validate_process_creds();
return err;
}
-struct raparms *
-nfsd_init_raparms(struct file *file)
+__be32
+nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ int may_flags, struct file **filp)
{
- struct inode *inode = file_inode(file);
- dev_t dev = inode->i_sb->s_dev;
- ino_t ino = inode->i_ino;
- struct raparms *ra, **rap, **frap = NULL;
- int depth = 0;
- unsigned int hash;
- struct raparm_hbucket *rab;
-
- hash = jhash_2words(dev, ino, 0xfeedbeef) & RAPARM_HASH_MASK;
- rab = &raparm_hash[hash];
-
- spin_lock(&rab->pb_lock);
- for (rap = &rab->pb_head; (ra = *rap); rap = &ra->p_next) {
- if (ra->p_ino == ino && ra->p_dev == dev)
- goto found;
- depth++;
- if (ra->p_count == 0)
- frap = rap;
- }
- depth = nfsdstats.ra_size;
- if (!frap) {
- spin_unlock(&rab->pb_lock);
- return NULL;
- }
- rap = frap;
- ra = *frap;
- ra->p_dev = dev;
- ra->p_ino = ino;
- ra->p_set = 0;
- ra->p_hindex = hash;
-found:
- if (rap != &rab->pb_head) {
- *rap = ra->p_next;
- ra->p_next = rab->pb_head;
- rab->pb_head = ra;
- }
- ra->p_count++;
- nfsdstats.ra_depth[depth*10/nfsdstats.ra_size]++;
- spin_unlock(&rab->pb_lock);
+ __be32 err;
- if (ra->p_set)
- file->f_ra = ra->p_ra;
- return ra;
+ validate_process_creds();
+ /*
+ * If we get here, then the client has already done an "open",
+ * and (hopefully) checked permission - so allow OWNER_OVERRIDE
+ * in case a chmod has now revoked permission.
+ *
+ * Arguably we should also allow the owner override for
+ * directories, but we never have and it doesn't seem to have
+ * caused anyone a problem. If we were to change this, note
+ * also that our filldir callbacks would need a variant of
+ * lookup_one_len that doesn't check permissions.
+ */
+ if (type == S_IFREG)
+ may_flags |= NFSD_MAY_OWNER_OVERRIDE;
+ err = fh_verify(rqstp, fhp, type, may_flags);
+ if (!err)
+ err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
+ validate_process_creds();
+ return err;
}
-void nfsd_put_raparams(struct file *file, struct raparms *ra)
+__be32
+nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ int may_flags, struct file **filp)
{
- struct raparm_hbucket *rab = &raparm_hash[ra->p_hindex];
+ __be32 err;
- spin_lock(&rab->pb_lock);
- ra->p_ra = file->f_ra;
- ra->p_set = 1;
- ra->p_count--;
- spin_unlock(&rab->pb_lock);
+ validate_process_creds();
+ err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
+ validate_process_creds();
+ return err;
}
/*
@@ -901,12 +834,23 @@ static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe,
return __splice_from_pipe(pipe, sd, nfsd_splice_actor);
}
+static u32 nfsd_eof_on_read(struct file *file, loff_t offset, ssize_t len,
+ size_t expected)
+{
+ if (expected != 0 && len == 0)
+ return 1;
+ if (offset+len >= i_size_read(file_inode(file)))
+ return 1;
+ return 0;
+}
+
static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct file *file, loff_t offset,
- unsigned long *count, int host_err)
+ unsigned long *count, u32 *eof, ssize_t host_err)
{
if (host_err >= 0) {
nfsdstats.io_read += host_err;
+ *eof = nfsd_eof_on_read(file, offset, host_err, *count);
*count = host_err;
fsnotify_access(file);
trace_nfsd_read_io_done(rqstp, fhp, offset, *count);
@@ -918,7 +862,8 @@ static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
}
__be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct file *file, loff_t offset, unsigned long *count)
+ struct file *file, loff_t offset, unsigned long *count,
+ u32 *eof)
{
struct splice_desc sd = {
.len = 0,
@@ -926,25 +871,27 @@ __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
.pos = offset,
.u.data = rqstp,
};
- int host_err;
+ ssize_t host_err;
trace_nfsd_read_splice(rqstp, fhp, offset, *count);
rqstp->rq_next_page = rqstp->rq_respages + 1;
host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor);
- return nfsd_finish_read(rqstp, fhp, file, offset, count, host_err);
+ return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err);
}
__be32 nfsd_readv(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct file *file, loff_t offset,
- struct kvec *vec, int vlen, unsigned long *count)
+ struct kvec *vec, int vlen, unsigned long *count,
+ u32 *eof)
{
struct iov_iter iter;
- int host_err;
+ loff_t ppos = offset;
+ ssize_t host_err;
trace_nfsd_read_vector(rqstp, fhp, offset, *count);
iov_iter_kvec(&iter, READ, vec, vlen, *count);
- host_err = vfs_iter_read(file, &iter, &offset, 0);
- return nfsd_finish_read(rqstp, fhp, file, offset, count, host_err);
+ host_err = vfs_iter_read(file, &iter, &ppos, 0);
+ return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err);
}
/*
@@ -1025,8 +972,12 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
nfsdstats.io_write += *cnt;
fsnotify_modify(file);
- if (stable && use_wgather)
+ if (stable && use_wgather) {
host_err = wait_for_concurrent_writes(file);
+ if (host_err < 0)
+ nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp),
+ nfsd_net_id));
+ }
out_nfserr:
if (host_err >= 0) {
@@ -1047,27 +998,25 @@ out_nfserr:
* N.B. After this call fhp needs an fh_put
*/
__be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
- loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
+ loff_t offset, struct kvec *vec, int vlen, unsigned long *count,
+ u32 *eof)
{
+ struct nfsd_file *nf;
struct file *file;
- struct raparms *ra;
__be32 err;
trace_nfsd_read_start(rqstp, fhp, offset, *count);
- err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
+ err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
if (err)
return err;
- ra = nfsd_init_raparms(file);
-
+ file = nf->nf_file;
if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &rqstp->rq_flags))
- err = nfsd_splice_read(rqstp, fhp, file, offset, count);
+ err = nfsd_splice_read(rqstp, fhp, file, offset, count, eof);
else
- err = nfsd_readv(rqstp, fhp, file, offset, vec, vlen, count);
+ err = nfsd_readv(rqstp, fhp, file, offset, vec, vlen, count, eof);
- if (ra)
- nfsd_put_raparams(file, ra);
- fput(file);
+ nfsd_file_put(nf);
trace_nfsd_read_done(rqstp, fhp, offset, *count);
@@ -1083,17 +1032,18 @@ __be32
nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
struct kvec *vec, int vlen, unsigned long *cnt, int stable)
{
- struct file *file = NULL;
- __be32 err = 0;
+ struct nfsd_file *nf;
+ __be32 err;
trace_nfsd_write_start(rqstp, fhp, offset, *cnt);
- err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE, &file);
+ err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_WRITE, &nf);
if (err)
goto out;
- err = nfsd_vfs_write(rqstp, fhp, file, offset, vec, vlen, cnt, stable);
- fput(file);
+ err = nfsd_vfs_write(rqstp, fhp, nf->nf_file, offset, vec,
+ vlen, cnt, stable);
+ nfsd_file_put(nf);
out:
trace_nfsd_write_done(rqstp, fhp, offset, *cnt);
return err;
@@ -1113,9 +1063,9 @@ __be32
nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
loff_t offset, unsigned long count)
{
- struct file *file;
- loff_t end = LLONG_MAX;
- __be32 err = nfserr_inval;
+ struct nfsd_file *nf;
+ loff_t end = LLONG_MAX;
+ __be32 err = nfserr_inval;
if (offset < 0)
goto out;
@@ -1125,20 +1075,27 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out;
}
- err = nfsd_open(rqstp, fhp, S_IFREG,
- NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &file);
+ err = nfsd_file_acquire(rqstp, fhp,
+ NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &nf);
if (err)
goto out;
if (EX_ISSYNC(fhp->fh_export)) {
- int err2 = vfs_fsync_range(file, offset, end, 0);
+ int err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
- if (err2 != -EINVAL)
- err = nfserrno(err2);
- else
+ switch (err2) {
+ case 0:
+ break;
+ case -EINVAL:
err = nfserr_notsupp;
+ break;
+ default:
+ err = nfserrno(err2);
+ nfsd_reset_boot_verifier(net_generic(nf->nf_net,
+ nfsd_net_id));
+ }
}
- fput(file);
+ nfsd_file_put(nf);
out:
return err;
}
@@ -1659,6 +1616,26 @@ out_nfserr:
goto out_unlock;
}
+static void
+nfsd_close_cached_files(struct dentry *dentry)
+{
+ struct inode *inode = d_inode(dentry);
+
+ if (inode && S_ISREG(inode->i_mode))
+ nfsd_file_close_inode_sync(inode);
+}
+
+static bool
+nfsd_has_cached_files(struct dentry *dentry)
+{
+ bool ret = false;
+ struct inode *inode = d_inode(dentry);
+
+ if (inode && S_ISREG(inode->i_mode))
+ ret = nfsd_file_is_cached(inode);
+ return ret;
+}
+
/*
* Rename a file
* N.B. After this call _both_ ffhp and tfhp need an fh_put
@@ -1671,6 +1648,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
struct inode *fdir, *tdir;
__be32 err;
int host_err;
+ bool has_cached = false;
err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_REMOVE);
if (err)
@@ -1689,6 +1667,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
goto out;
+retry:
host_err = fh_want_write(ffhp);
if (host_err) {
err = nfserrno(host_err);
@@ -1728,11 +1707,16 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
goto out_dput_new;
- host_err = vfs_rename(fdir, odentry, tdir, ndentry, NULL, 0);
- if (!host_err) {
- host_err = commit_metadata(tfhp);
- if (!host_err)
- host_err = commit_metadata(ffhp);
+ if (nfsd_has_cached_files(ndentry)) {
+ has_cached = true;
+ goto out_dput_old;
+ } else {
+ host_err = vfs_rename(fdir, odentry, tdir, ndentry, NULL, 0);
+ if (!host_err) {
+ host_err = commit_metadata(tfhp);
+ if (!host_err)
+ host_err = commit_metadata(ffhp);
+ }
}
out_dput_new:
dput(ndentry);
@@ -1745,12 +1729,26 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
* as that would do the wrong thing if the two directories
* were the same, so again we do it by hand.
*/
- fill_post_wcc(ffhp);
- fill_post_wcc(tfhp);
+ if (!has_cached) {
+ fill_post_wcc(ffhp);
+ fill_post_wcc(tfhp);
+ }
unlock_rename(tdentry, fdentry);
ffhp->fh_locked = tfhp->fh_locked = false;
fh_drop_write(ffhp);
+ /*
+ * If the target dentry has cached open files, then we need to try to
+ * close them prior to doing the rename. Flushing delayed fput
+ * shouldn't be done with locks held however, so we delay it until this
+ * point and then reattempt the whole shebang.
+ */
+ if (has_cached) {
+ has_cached = false;
+ nfsd_close_cached_files(ndentry);
+ dput(ndentry);
+ goto retry;
+ }
out:
return err;
}
@@ -1797,10 +1795,13 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
if (!type)
type = d_inode(rdentry)->i_mode & S_IFMT;
- if (type != S_IFDIR)
+ if (type != S_IFDIR) {
+ nfsd_close_cached_files(rdentry);
host_err = vfs_unlink(dirp, rdentry, NULL);
- else
+ } else {
host_err = vfs_rmdir(dirp, rdentry);
+ }
+
if (!host_err)
host_err = commit_metadata(fhp);
dput(rdentry);
@@ -2074,63 +2075,3 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
return err? nfserrno(err) : 0;
}
-
-void
-nfsd_racache_shutdown(void)
-{
- struct raparms *raparm, *last_raparm;
- unsigned int i;
-
- dprintk("nfsd: freeing readahead buffers.\n");
-
- for (i = 0; i < RAPARM_HASH_SIZE; i++) {
- raparm = raparm_hash[i].pb_head;
- while(raparm) {
- last_raparm = raparm;
- raparm = raparm->p_next;
- kfree(last_raparm);
- }
- raparm_hash[i].pb_head = NULL;
- }
-}
-/*
- * Initialize readahead param cache
- */
-int
-nfsd_racache_init(int cache_size)
-{
- int i;
- int j = 0;
- int nperbucket;
- struct raparms **raparm = NULL;
-
-
- if (raparm_hash[0].pb_head)
- return 0;
- nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE);
- nperbucket = max(2, nperbucket);
- cache_size = nperbucket * RAPARM_HASH_SIZE;
-
- dprintk("nfsd: allocating %d readahead buffers.\n", cache_size);
-
- for (i = 0; i < RAPARM_HASH_SIZE; i++) {
- spin_lock_init(&raparm_hash[i].pb_lock);
-
- raparm = &raparm_hash[i].pb_head;
- for (j = 0; j < nperbucket; j++) {
- *raparm = kzalloc(sizeof(struct raparms), GFP_KERNEL);
- if (!*raparm)
- goto out_nomem;
- raparm = &(*raparm)->p_next;
- }
- *raparm = NULL;
- }
-
- nfsdstats.ra_size = cache_size;
- return 0;
-
-out_nomem:
- dprintk("nfsd: kmalloc failed, freeing readahead buffers\n");
- nfsd_racache_shutdown();
- return -ENOMEM;
-}
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index db351247892d..a13fd9d7e1f5 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -40,8 +40,6 @@
typedef int (*nfsd_filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
/* nfsd/vfs.c */
-int nfsd_racache_init(int);
-void nfsd_racache_shutdown(void);
int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
struct svc_export **expp);
__be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *,
@@ -75,18 +73,23 @@ __be32 do_nfsd_create(struct svc_rqst *, struct svc_fh *,
__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *,
loff_t, unsigned long);
#endif /* CONFIG_NFSD_V3 */
+int nfsd_open_break_lease(struct inode *, int);
__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t,
int, struct file **);
-struct raparms;
+__be32 nfsd_open_verified(struct svc_rqst *, struct svc_fh *, umode_t,
+ int, struct file **);
__be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct file *file, loff_t offset,
- unsigned long *count);
+ unsigned long *count,
+ u32 *eof);
__be32 nfsd_readv(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct file *file, loff_t offset,
struct kvec *vec, int vlen,
- unsigned long *count);
+ unsigned long *count,
+ u32 *eof);
__be32 nfsd_read(struct svc_rqst *, struct svc_fh *,
- loff_t, struct kvec *, int, unsigned long *);
+ loff_t, struct kvec *, int, unsigned long *,
+ u32 *eof);
__be32 nfsd_write(struct svc_rqst *, struct svc_fh *, loff_t,
struct kvec *, int, unsigned long *, int);
__be32 nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp,
@@ -115,9 +118,6 @@ __be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *,
__be32 nfsd_permission(struct svc_rqst *, struct svc_export *,
struct dentry *, int);
-struct raparms *nfsd_init_raparms(struct file *file);
-void nfsd_put_raparams(struct file *file, struct raparms *ra);
-
static inline int fh_want_write(struct svc_fh *fh)
{
int ret;
@@ -152,23 +152,4 @@ static inline int nfsd_create_is_exclusive(int createmode)
|| createmode == NFS4_CREATE_EXCLUSIVE4_1;
}
-static inline bool nfsd_eof_on_read(long requested, long read,
- loff_t offset, loff_t size)
-{
- /* We assume a short read means eof: */
- if (requested > read)
- return true;
- /*
- * A non-short read might also reach end of file. The spec
- * still requires us to set eof in that case.
- *
- * Further operations may have modified the file size since
- * the read, so the following check is not atomic with the read.
- * We've only seen that cause a problem for a client in the case
- * where the read returned a count of 0 without setting eof.
- * That case was fixed by the addition of the above check.
- */
- return (offset + read >= size);
-}
-
#endif /* LINUX_NFSD_VFS_H */
diff --git a/fs/nfsd/xdr3.h b/fs/nfsd/xdr3.h
index 2cb29e961a76..99ff9f403ff1 100644
--- a/fs/nfsd/xdr3.h
+++ b/fs/nfsd/xdr3.h
@@ -151,7 +151,7 @@ struct nfsd3_readres {
__be32 status;
struct svc_fh fh;
unsigned long count;
- int eof;
+ __u32 eof;
};
struct nfsd3_writeres {
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index d64c870f998a..f4737d66ee98 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -273,15 +273,14 @@ struct nfsd4_open_downgrade {
struct nfsd4_read {
- stateid_t rd_stateid; /* request */
- u64 rd_offset; /* request */
- u32 rd_length; /* request */
- int rd_vlen;
- struct file *rd_filp;
- bool rd_tmp_file;
+ stateid_t rd_stateid; /* request */
+ u64 rd_offset; /* request */
+ u32 rd_length; /* request */
+ int rd_vlen;
+ struct nfsd_file *rd_nf;
- struct svc_rqst *rd_rqstp; /* response */
- struct svc_fh * rd_fhp; /* response */
+ struct svc_rqst *rd_rqstp; /* response */
+ struct svc_fh *rd_fhp; /* response */
};
struct nfsd4_readdir {
@@ -538,8 +537,8 @@ struct nfsd4_copy {
struct nfs4_client *cp_clp;
- struct file *file_src;
- struct file *file_dst;
+ struct nfsd_file *nf_src;
+ struct nfsd_file *nf_dst;
stateid_t cp_stateid;
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 91b9dac6b2cc..4ba73dbf3e8d 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -1354,6 +1354,7 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case NILFS_IOCTL_SYNC:
case NILFS_IOCTL_RESIZE:
case NILFS_IOCTL_SET_ALLOC_RANGE:
+ case FITRIM:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index c03758c91481..7a42c2ebe28d 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -13,6 +13,7 @@
#include <linux/sched/signal.h>
#include <linux/dnotify.h>
#include <linux/init.h>
+#include <linux/security.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/fdtable.h>
@@ -279,6 +280,17 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
goto out_err;
}
+ /*
+ * convert the userspace DN_* "arg" to the internal FS_*
+ * defined in fsnotify
+ */
+ mask = convert_arg(arg);
+
+ error = security_path_notify(&filp->f_path, mask,
+ FSNOTIFY_OBJ_TYPE_INODE);
+ if (error)
+ goto out_err;
+
/* expect most fcntl to add new rather than augment old */
dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL);
if (!dn) {
@@ -293,9 +305,6 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
goto out_err;
}
- /* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */
- mask = convert_arg(arg);
-
/* set up the new_fsn_mark and new_dn_mark */
new_fsn_mark = &new_dn_mark->fsn_mark;
fsnotify_init_mark(new_fsn_mark, dnotify_group);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 91006f47e420..0aa362b88550 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -523,12 +523,13 @@ static const struct file_operations fanotify_fops = {
.fasync = NULL,
.release = fanotify_release,
.unlocked_ioctl = fanotify_ioctl,
- .compat_ioctl = fanotify_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
static int fanotify_find_path(int dfd, const char __user *filename,
- struct path *path, unsigned int flags)
+ struct path *path, unsigned int flags, __u64 mask,
+ unsigned int obj_type)
{
int ret;
@@ -567,8 +568,15 @@ static int fanotify_find_path(int dfd, const char __user *filename,
/* you can only watch an inode if you have read permissions on it */
ret = inode_permission(path->dentry->d_inode, MAY_READ);
+ if (ret) {
+ path_put(path);
+ goto out;
+ }
+
+ ret = security_path_notify(path, mask, obj_type);
if (ret)
path_put(path);
+
out:
return ret;
}
@@ -947,6 +955,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
__kernel_fsid_t __fsid, *fsid = NULL;
u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS;
unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
+ unsigned int obj_type;
int ret;
pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
@@ -961,8 +970,13 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
switch (mark_type) {
case FAN_MARK_INODE:
+ obj_type = FSNOTIFY_OBJ_TYPE_INODE;
+ break;
case FAN_MARK_MOUNT:
+ obj_type = FSNOTIFY_OBJ_TYPE_VFSMOUNT;
+ break;
case FAN_MARK_FILESYSTEM:
+ obj_type = FSNOTIFY_OBJ_TYPE_SB;
break;
default:
return -EINVAL;
@@ -1030,7 +1044,8 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
goto fput_and_out;
}
- ret = fanotify_find_path(dfd, pathname, &path, flags);
+ ret = fanotify_find_path(dfd, pathname, &path, flags,
+ (mask & ALL_FSNOTIFY_EVENTS), obj_type);
if (ret)
goto fput_and_out;
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index 1e2bfd26b352..ef83f4020554 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -50,7 +50,7 @@ static void show_mark_fhandle(struct seq_file *m, struct inode *inode)
f.handle.handle_bytes = sizeof(f.pad);
size = f.handle.handle_bytes >> 2;
- ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
+ ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, NULL);
if ((ret == FILEID_INVALID) || (ret < 0)) {
WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
return;
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 2ecef6155fc0..3e77b728a22b 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -381,8 +381,6 @@ out:
}
EXPORT_SYMBOL_GPL(fsnotify);
-extern struct kmem_cache *fsnotify_mark_connector_cachep;
-
static __init int fsnotify_init(void)
{
int ret;
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index 5a00121fb219..ff2063ec6b0f 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -54,8 +54,6 @@ static inline void fsnotify_clear_marks_by_sb(struct super_block *sb)
{
fsnotify_destroy_marks(&sb->s_fsnotify_marks);
}
-/* Wait until all marks queued for destruction are destroyed */
-extern void fsnotify_wait_marks_destroyed(void);
/*
* update the dentry->d_flags of all of inode's children to indicate if inode cares
@@ -67,4 +65,6 @@ extern void __fsnotify_update_child_dentry_flags(struct inode *inode);
extern struct fsnotify_event_holder *fsnotify_alloc_event_holder(void);
extern void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder);
+extern struct kmem_cache *fsnotify_mark_connector_cachep;
+
#endif /* __FS_NOTIFY_FSNOTIFY_H_ */
diff --git a/fs/notify/group.c b/fs/notify/group.c
index 0391190305cc..133f723aca07 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -108,6 +108,7 @@ void fsnotify_put_group(struct fsnotify_group *group)
if (refcount_dec_and_test(&group->refcnt))
fsnotify_final_destroy_group(group);
}
+EXPORT_SYMBOL_GPL(fsnotify_put_group);
/*
* Create a new fsnotify_group and hold a reference for the group returned.
@@ -137,6 +138,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
return group;
}
+EXPORT_SYMBOL_GPL(fsnotify_alloc_group);
int fsnotify_fasync(int fd, struct file *file, int on)
{
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 0b815178126e..107537a543fd 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -30,6 +30,7 @@
#include <linux/poll.h>
#include <linux/wait.h>
#include <linux/memcontrol.h>
+#include <linux/security.h>
#include "inotify.h"
#include "../fdinfo.h"
@@ -331,7 +332,8 @@ static const struct file_operations inotify_fops = {
/*
* find_inode - resolve a user-given path to a specific inode
*/
-static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
+static int inotify_find_inode(const char __user *dirname, struct path *path,
+ unsigned int flags, __u64 mask)
{
int error;
@@ -340,8 +342,15 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
return error;
/* you can only watch an inode if you have read permissions on it */
error = inode_permission(path->dentry->d_inode, MAY_READ);
+ if (error) {
+ path_put(path);
+ return error;
+ }
+ error = security_path_notify(path, mask,
+ FSNOTIFY_OBJ_TYPE_INODE);
if (error)
path_put(path);
+
return error;
}
@@ -733,7 +742,8 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
if (mask & IN_ONLYDIR)
flags |= LOOKUP_DIRECTORY;
- ret = inotify_find_inode(pathname, &path, flags);
+ ret = inotify_find_inode(pathname, &path, flags,
+ (mask & IN_ALL_EVENTS));
if (ret)
goto fput_and_out;
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 99ddd126f6f0..1d96216dffd1 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -276,6 +276,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
queue_delayed_work(system_unbound_wq, &reaper_work,
FSNOTIFY_REAPER_DELAY);
}
+EXPORT_SYMBOL_GPL(fsnotify_put_mark);
/*
* Get mark reference when we found the mark via lockless traversal of object
@@ -430,6 +431,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark,
mutex_unlock(&group->mark_mutex);
fsnotify_free_mark(mark);
}
+EXPORT_SYMBOL_GPL(fsnotify_destroy_mark);
/*
* Sorting function for lists of fsnotify marks.
@@ -685,6 +687,7 @@ int fsnotify_add_mark(struct fsnotify_mark *mark, fsnotify_connp_t *connp,
mutex_unlock(&group->mark_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(fsnotify_add_mark);
/*
* Given a list of marks, find the mark associated with given group. If found
@@ -711,6 +714,7 @@ struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp,
spin_unlock(&conn->lock);
return NULL;
}
+EXPORT_SYMBOL_GPL(fsnotify_find_mark);
/* Clear any marks in a group with given type mask */
void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
@@ -809,6 +813,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
mark->group = group;
WRITE_ONCE(mark->connector, NULL);
}
+EXPORT_SYMBOL_GPL(fsnotify_init_mark);
/*
* Destroy all marks in destroy_list, waits for SRCU period to finish before
@@ -837,3 +842,4 @@ void fsnotify_wait_marks_destroyed(void)
{
flush_delayed_work(&reaper_work);
}
+EXPORT_SYMBOL_GPL(fsnotify_wait_marks_destroyed);
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 8baa34baf548..6c7388430ad3 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -2899,15 +2899,18 @@ int ntfs_setattr(struct dentry *dentry, struct iattr *attr)
ia_valid |= ATTR_MTIME | ATTR_CTIME;
}
}
- if (ia_valid & ATTR_ATIME)
- vi->i_atime = timespec64_trunc(attr->ia_atime,
- vi->i_sb->s_time_gran);
- if (ia_valid & ATTR_MTIME)
- vi->i_mtime = timespec64_trunc(attr->ia_mtime,
- vi->i_sb->s_time_gran);
- if (ia_valid & ATTR_CTIME)
- vi->i_ctime = timespec64_trunc(attr->ia_ctime,
- vi->i_sb->s_time_gran);
+ if (ia_valid & ATTR_ATIME) {
+ vi->i_atime = timestamp_truncate(attr->ia_atime,
+ vi);
+ }
+ if (ia_valid & ATTR_MTIME) {
+ vi->i_mtime = timestamp_truncate(attr->ia_mtime,
+ vi);
+ }
+ if (ia_valid & ATTR_CTIME) {
+ vi->i_ctime = timestamp_truncate(attr->ia_ctime,
+ vi);
+ }
mark_inode_dirty(vi);
out:
return err;
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 20c841a906f2..3aac5c917afe 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -71,7 +71,7 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
}
/* Read, map, and pin the page. */
page = ntfs_map_page(mft_vi->i_mapping, index);
- if (likely(!IS_ERR(page))) {
+ if (!IS_ERR(page)) {
/* Catch multi sector transfer fixup errors. */
if (likely(ntfs_is_mft_recordp((le32*)(page_address(page) +
ofs)))) {
@@ -154,7 +154,7 @@ MFT_RECORD *map_mft_record(ntfs_inode *ni)
mutex_lock(&ni->mrec_lock);
m = map_mft_record_page(ni);
- if (likely(!IS_ERR(m)))
+ if (!IS_ERR(m))
return m;
mutex_unlock(&ni->mrec_lock);
@@ -271,7 +271,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
m = map_mft_record(ni);
/* map_mft_record() has incremented this on success. */
atomic_dec(&ni->count);
- if (likely(!IS_ERR(m))) {
+ if (!IS_ERR(m)) {
/* Verify the sequence number. */
if (likely(le16_to_cpu(m->sequence_number) == seq_no)) {
ntfs_debug("Done 1.");
@@ -1303,7 +1303,7 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
rl = ntfs_attr_find_vcn_nolock(mftbmp_ni,
(ll - 1) >> vol->cluster_size_bits, NULL);
- if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
+ if (IS_ERR(rl) || unlikely(!rl->length || rl->lcn < 0)) {
up_write(&mftbmp_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to determine last allocated "
"cluster of mft bitmap attribute.");
@@ -1734,7 +1734,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
read_unlock_irqrestore(&mft_ni->size_lock, flags);
rl = ntfs_attr_find_vcn_nolock(mft_ni,
(ll - 1) >> vol->cluster_size_bits, NULL);
- if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
+ if (IS_ERR(rl) || unlikely(!rl->length || rl->lcn < 0)) {
up_write(&mft_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to determine last allocated "
"cluster of mft data attribute.");
@@ -1776,7 +1776,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
do {
rl2 = ntfs_cluster_alloc(vol, old_last_vcn, nr, lcn, MFT_ZONE,
true);
- if (likely(!IS_ERR(rl2)))
+ if (!IS_ERR(rl2))
break;
if (PTR_ERR(rl2) != -ENOSPC || nr == min_nr) {
ntfs_error(vol->sb, "Failed to allocate the minimal "
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index 2d3cc9e3395d..4e6a44bc654c 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -115,7 +115,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
dent_ino = MREF(mref);
ntfs_debug("Found inode 0x%lx. Calling ntfs_iget.", dent_ino);
dent_inode = ntfs_iget(vol->sb, dent_ino);
- if (likely(!IS_ERR(dent_inode))) {
+ if (!IS_ERR(dent_inode)) {
/* Consistency check. */
if (is_bad_inode(dent_inode) || MSEQNO(mref) ==
NTFS_I(dent_inode)->seq_no ||
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c
index 508744a93180..97932fb5179c 100644
--- a/fs/ntfs/runlist.c
+++ b/fs/ntfs/runlist.c
@@ -951,7 +951,7 @@ mpa_err:
}
/* Now combine the new and old runlists checking for overlaps. */
old_rl = ntfs_runlists_merge(old_rl, rl);
- if (likely(!IS_ERR(old_rl)))
+ if (!IS_ERR(old_rl))
return old_rl;
ntfs_free(rl);
ntfs_error(vol->sb, "Failed to merge runlists.");
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 29621d40f448..7dc3bc604f78 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -1475,7 +1475,7 @@ not_enabled:
kfree(name);
/* Get the inode. */
tmp_ino = ntfs_iget(vol->sb, MREF(mref));
- if (unlikely(IS_ERR(tmp_ino) || is_bad_inode(tmp_ino))) {
+ if (IS_ERR(tmp_ino) || unlikely(is_bad_inode(tmp_ino))) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
ntfs_error(vol->sb, "Failed to load $UsnJrnl.");
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 3e7da392aa6f..bb981ec76456 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -327,8 +327,8 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
down_read(&OCFS2_I(inode)->ip_xattr_sem);
acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
up_read(&OCFS2_I(inode)->ip_xattr_sem);
- if (IS_ERR(acl) || !acl)
- return PTR_ERR(acl);
+ if (IS_ERR_OR_NULL(acl))
+ return PTR_ERR_OR_ZERO(acl);
ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (ret)
return ret;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 0c335b51043d..88534eb0e7c2 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -2288,9 +2288,9 @@ static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth,
int ret = 0;
int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits;
- if (handle->h_buffer_credits < credits)
+ if (jbd2_handle_buffer_credits(handle) < credits)
ret = ocfs2_extend_trans(handle,
- credits - handle->h_buffer_credits);
+ credits - jbd2_handle_buffer_credits(handle));
return ret;
}
@@ -2367,7 +2367,7 @@ static int ocfs2_rotate_tree_right(handle_t *handle,
struct ocfs2_path *right_path,
struct ocfs2_path **ret_left_path)
{
- int ret, start, orig_credits = handle->h_buffer_credits;
+ int ret, start, orig_credits = jbd2_handle_buffer_credits(handle);
u32 cpos;
struct ocfs2_path *left_path = NULL;
struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
@@ -3148,7 +3148,7 @@ static int ocfs2_rotate_tree_left(handle_t *handle,
struct ocfs2_path *path,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
- int ret, orig_credits = handle->h_buffer_credits;
+ int ret, orig_credits = jbd2_handle_buffer_credits(handle);
struct ocfs2_path *tmp_path = NULL, *restart_path = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *el;
@@ -3386,8 +3386,8 @@ static int ocfs2_merge_rec_right(struct ocfs2_path *left_path,
right_path);
ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
- handle->h_buffer_credits,
- right_path);
+ jbd2_handle_buffer_credits(handle),
+ right_path);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3548,8 +3548,8 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
right_path);
ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
- handle->h_buffer_credits,
- left_path);
+ jbd2_handle_buffer_credits(handle),
+ left_path);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3623,7 +3623,7 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
le16_to_cpu(el->l_next_free_rec) == 1) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
- handle->h_buffer_credits,
+ jbd2_handle_buffer_credits(handle),
right_path);
if (ret) {
mlog_errno(ret);
@@ -3669,7 +3669,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
- handle->h_buffer_credits,
+ jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
@@ -3725,7 +3725,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
- handle->h_buffer_credits,
+ jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
@@ -3755,7 +3755,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
- handle->h_buffer_credits,
+ jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
@@ -3799,7 +3799,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
if (ctxt->c_split_covers_rec) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
- handle->h_buffer_credits,
+ jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
@@ -5358,7 +5358,7 @@ static int ocfs2_truncate_rec(handle_t *handle,
if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
- handle->h_buffer_credits,
+ jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
@@ -5427,8 +5427,8 @@ static int ocfs2_truncate_rec(handle_t *handle,
}
ret = ocfs2_extend_rotate_transaction(handle, 0,
- handle->h_buffer_credits,
- path);
+ jbd2_handle_buffer_credits(handle),
+ path);
if (ret) {
mlog_errno(ret);
goto out;
@@ -5993,6 +5993,7 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
struct buffer_head *data_alloc_bh = NULL;
struct ocfs2_dinode *di;
struct ocfs2_truncate_log *tl;
+ struct ocfs2_journal *journal = osb->journal;
BUG_ON(inode_trylock(tl_inode));
@@ -6013,6 +6014,20 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
goto out;
}
+ /* Appending truncate log(TA) and and flushing truncate log(TF) are
+ * two separated transactions. They can be both committed but not
+ * checkpointed. If crash occurs then, both two transaction will be
+ * replayed with several already released to global bitmap clusters.
+ * Then truncate log will be replayed resulting in cluster double free.
+ */
+ jbd2_journal_lock_updates(journal->j_journal);
+ status = jbd2_journal_flush(journal->j_journal);
+ jbd2_journal_unlock_updates(journal->j_journal);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out;
+ }
+
data_alloc_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
@@ -6792,6 +6807,8 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
struct page *page, int zero, u64 *phys)
{
int ret, partial = 0;
+ loff_t start_byte = ((loff_t)page->index << PAGE_SHIFT) + from;
+ loff_t length = to - from;
ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
if (ret)
@@ -6811,7 +6828,8 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
if (ret < 0)
mlog_errno(ret);
else if (ocfs2_should_order_data(inode)) {
- ret = ocfs2_jbd2_file_inode(handle, inode);
+ ret = ocfs2_jbd2_inode_add_write(handle, inode,
+ start_byte, length);
if (ret < 0)
mlog_errno(ret);
}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index a4c905d6b575..3a67a6518ddf 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -11,7 +11,6 @@
#include <linux/pagemap.h>
#include <asm/byteorder.h>
#include <linux/swap.h>
-#include <linux/pipe_fs_i.h>
#include <linux/mpage.h>
#include <linux/quotaops.h>
#include <linux/blkdev.h>
@@ -942,7 +941,8 @@ static void ocfs2_write_failure(struct inode *inode,
if (tmppage && page_has_buffers(tmppage)) {
if (ocfs2_should_order_data(inode))
- ocfs2_jbd2_file_inode(wc->w_handle, inode);
+ ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
+ user_pos, user_len);
block_commit_write(tmppage, from, to);
}
@@ -2023,8 +2023,14 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
}
if (page_has_buffers(tmppage)) {
- if (handle && ocfs2_should_order_data(inode))
- ocfs2_jbd2_file_inode(handle, inode);
+ if (handle && ocfs2_should_order_data(inode)) {
+ loff_t start_byte =
+ ((loff_t)tmppage->index << PAGE_SHIFT) +
+ from;
+ loff_t length = to - from;
+ ocfs2_jbd2_inode_add_write(handle, inode,
+ start_byte, length);
+ }
block_commit_write(tmppage, from, to);
}
}
@@ -2042,7 +2048,8 @@ out_write_size:
inode->i_mtime = inode->i_ctime = current_time(inode);
di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
- ocfs2_update_inode_fsync_trans(handle, inode, 1);
+ if (handle)
+ ocfs2_update_inode_fsync_trans(handle, inode, 1);
}
if (handle)
ocfs2_journal_dirty(handle, wc->w_di_bh);
@@ -2139,13 +2146,30 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
struct ocfs2_dio_write_ctxt *dwc = NULL;
struct buffer_head *di_bh = NULL;
u64 p_blkno;
- loff_t pos = iblock << inode->i_sb->s_blocksize_bits;
+ unsigned int i_blkbits = inode->i_sb->s_blocksize_bits;
+ loff_t pos = iblock << i_blkbits;
+ sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits;
unsigned len, total_len = bh_result->b_size;
int ret = 0, first_get_block = 0;
len = osb->s_clustersize - (pos & (osb->s_clustersize - 1));
len = min(total_len, len);
+ /*
+ * bh_result->b_size is count in get_more_blocks according to write
+ * "pos" and "end", we need map twice to return different buffer state:
+ * 1. area in file size, not set NEW;
+ * 2. area out file size, set NEW.
+ *
+ * iblock endblk
+ * |--------|---------|---------|---------
+ * |<-------area in file------->|
+ */
+
+ if ((iblock <= endblk) &&
+ ((iblock + ((len - 1) >> i_blkbits)) > endblk))
+ len = (endblk - iblock + 1) << i_blkbits;
+
mlog(0, "get block of %lu at %llu:%u req %u\n",
inode->i_ino, pos, len, total_len);
@@ -2229,6 +2253,9 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
if (desc->c_needs_zero)
set_buffer_new(bh_result);
+ if (iblock > endblk)
+ set_buffer_new(bh_result);
+
/* May sleep in end_io. It should not happen in a irq context. So defer
* it to dio work queue. */
set_buffer_defer_completion(bh_result);
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c
index 429e6a8359a5..eaf042feaf5e 100644
--- a/fs/ocfs2/blockcheck.c
+++ b/fs/ocfs2/blockcheck.c
@@ -231,14 +231,6 @@ static int blockcheck_u64_get(void *data, u64 *val)
}
DEFINE_SIMPLE_ATTRIBUTE(blockcheck_fops, blockcheck_u64_get, NULL, "%llu\n");
-static struct dentry *blockcheck_debugfs_create(const char *name,
- struct dentry *parent,
- u64 *value)
-{
- return debugfs_create_file(name, S_IFREG | S_IRUSR, parent, value,
- &blockcheck_fops);
-}
-
static void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats)
{
if (stats) {
@@ -250,16 +242,20 @@ static void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats)
static void ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats,
struct dentry *parent)
{
- stats->b_debug_dir = debugfs_create_dir("blockcheck", parent);
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("blockcheck", parent);
+ stats->b_debug_dir = dir;
+
+ debugfs_create_file("blocks_checked", S_IFREG | S_IRUSR, dir,
+ &stats->b_check_count, &blockcheck_fops);
- blockcheck_debugfs_create("blocks_checked", stats->b_debug_dir,
- &stats->b_check_count);
+ debugfs_create_file("checksums_failed", S_IFREG | S_IRUSR, dir,
+ &stats->b_failure_count, &blockcheck_fops);
- blockcheck_debugfs_create("checksums_failed", stats->b_debug_dir,
- &stats->b_failure_count);
+ debugfs_create_file("ecc_recoveries", S_IFREG | S_IRUSR, dir,
+ &stats->b_recover_count, &blockcheck_fops);
- blockcheck_debugfs_create("ecc_recoveries", stats->b_debug_dir,
- &stats->b_recover_count);
}
#else
static inline void ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats,
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index f1b613327ac8..a368350d4c27 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -225,10 +225,6 @@ struct o2hb_region {
unsigned int hr_region_num;
struct dentry *hr_debug_dir;
- struct dentry *hr_debug_livenodes;
- struct dentry *hr_debug_regnum;
- struct dentry *hr_debug_elapsed_time;
- struct dentry *hr_debug_pinned;
struct o2hb_debug_buf *hr_db_livenodes;
struct o2hb_debug_buf *hr_db_regnum;
struct o2hb_debug_buf *hr_db_elapsed_time;
@@ -1394,21 +1390,20 @@ void o2hb_exit(void)
kfree(o2hb_db_failedregions);
}
-static struct dentry *o2hb_debug_create(const char *name, struct dentry *dir,
- struct o2hb_debug_buf **db, int db_len,
- int type, int size, int len, void *data)
+static void o2hb_debug_create(const char *name, struct dentry *dir,
+ struct o2hb_debug_buf **db, int db_len, int type,
+ int size, int len, void *data)
{
*db = kmalloc(db_len, GFP_KERNEL);
if (!*db)
- return NULL;
+ return;
(*db)->db_type = type;
(*db)->db_size = size;
(*db)->db_len = len;
(*db)->db_data = data;
- return debugfs_create_file(name, S_IFREG|S_IRUSR, dir, *db,
- &o2hb_debug_fops);
+ debugfs_create_file(name, S_IFREG|S_IRUSR, dir, *db, &o2hb_debug_fops);
}
static void o2hb_debug_init(void)
@@ -1525,11 +1520,7 @@ static void o2hb_region_release(struct config_item *item)
kfree(reg->hr_slots);
- debugfs_remove(reg->hr_debug_livenodes);
- debugfs_remove(reg->hr_debug_regnum);
- debugfs_remove(reg->hr_debug_elapsed_time);
- debugfs_remove(reg->hr_debug_pinned);
- debugfs_remove(reg->hr_debug_dir);
+ debugfs_remove_recursive(reg->hr_debug_dir);
kfree(reg->hr_db_livenodes);
kfree(reg->hr_db_regnum);
kfree(reg->hr_db_elapsed_time);
@@ -1988,69 +1979,33 @@ static struct o2hb_heartbeat_group *to_o2hb_heartbeat_group(struct config_group
: NULL;
}
-static int o2hb_debug_region_init(struct o2hb_region *reg, struct dentry *dir)
+static void o2hb_debug_region_init(struct o2hb_region *reg,
+ struct dentry *parent)
{
- int ret = -ENOMEM;
+ struct dentry *dir;
- reg->hr_debug_dir =
- debugfs_create_dir(config_item_name(&reg->hr_item), dir);
- if (!reg->hr_debug_dir) {
- mlog_errno(ret);
- goto bail;
- }
+ dir = debugfs_create_dir(config_item_name(&reg->hr_item), parent);
+ reg->hr_debug_dir = dir;
- reg->hr_debug_livenodes =
- o2hb_debug_create(O2HB_DEBUG_LIVENODES,
- reg->hr_debug_dir,
- &(reg->hr_db_livenodes),
- sizeof(*(reg->hr_db_livenodes)),
- O2HB_DB_TYPE_REGION_LIVENODES,
- sizeof(reg->hr_live_node_bitmap),
- O2NM_MAX_NODES, reg);
- if (!reg->hr_debug_livenodes) {
- mlog_errno(ret);
- goto bail;
- }
+ o2hb_debug_create(O2HB_DEBUG_LIVENODES, dir, &(reg->hr_db_livenodes),
+ sizeof(*(reg->hr_db_livenodes)),
+ O2HB_DB_TYPE_REGION_LIVENODES,
+ sizeof(reg->hr_live_node_bitmap), O2NM_MAX_NODES,
+ reg);
- reg->hr_debug_regnum =
- o2hb_debug_create(O2HB_DEBUG_REGION_NUMBER,
- reg->hr_debug_dir,
- &(reg->hr_db_regnum),
- sizeof(*(reg->hr_db_regnum)),
- O2HB_DB_TYPE_REGION_NUMBER,
- 0, O2NM_MAX_NODES, reg);
- if (!reg->hr_debug_regnum) {
- mlog_errno(ret);
- goto bail;
- }
+ o2hb_debug_create(O2HB_DEBUG_REGION_NUMBER, dir, &(reg->hr_db_regnum),
+ sizeof(*(reg->hr_db_regnum)),
+ O2HB_DB_TYPE_REGION_NUMBER, 0, O2NM_MAX_NODES, reg);
- reg->hr_debug_elapsed_time =
- o2hb_debug_create(O2HB_DEBUG_REGION_ELAPSED_TIME,
- reg->hr_debug_dir,
- &(reg->hr_db_elapsed_time),
- sizeof(*(reg->hr_db_elapsed_time)),
- O2HB_DB_TYPE_REGION_ELAPSED_TIME,
- 0, 0, reg);
- if (!reg->hr_debug_elapsed_time) {
- mlog_errno(ret);
- goto bail;
- }
+ o2hb_debug_create(O2HB_DEBUG_REGION_ELAPSED_TIME, dir,
+ &(reg->hr_db_elapsed_time),
+ sizeof(*(reg->hr_db_elapsed_time)),
+ O2HB_DB_TYPE_REGION_ELAPSED_TIME, 0, 0, reg);
- reg->hr_debug_pinned =
- o2hb_debug_create(O2HB_DEBUG_REGION_PINNED,
- reg->hr_debug_dir,
- &(reg->hr_db_pinned),
- sizeof(*(reg->hr_db_pinned)),
- O2HB_DB_TYPE_REGION_PINNED,
- 0, 0, reg);
- if (!reg->hr_debug_pinned) {
- mlog_errno(ret);
- goto bail;
- }
+ o2hb_debug_create(O2HB_DEBUG_REGION_PINNED, dir, &(reg->hr_db_pinned),
+ sizeof(*(reg->hr_db_pinned)),
+ O2HB_DB_TYPE_REGION_PINNED, 0, 0, reg);
- ret = 0;
-bail:
- return ret;
}
static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *group,
@@ -2106,11 +2061,7 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
if (ret)
goto unregister_handler;
- ret = o2hb_debug_region_init(reg, o2hb_debug_dir);
- if (ret) {
- config_item_put(&reg->hr_item);
- goto unregister_handler;
- }
+ o2hb_debug_region_init(reg, o2hb_debug_dir);
return &reg->hr_item;
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 784426dee56c..bdef72c0f099 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -3636,7 +3636,7 @@ static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
int i, j, num_used;
u32 major_hash;
struct ocfs2_dx_leaf *orig_dx_leaf, *new_dx_leaf;
- struct ocfs2_dx_entry_list *orig_list, *new_list, *tmp_list;
+ struct ocfs2_dx_entry_list *orig_list, *tmp_list;
struct ocfs2_dx_entry *dx_entry;
tmp_list = &tmp_dx_leaf->dl_list;
@@ -3645,7 +3645,6 @@ static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
orig_dx_leaf = (struct ocfs2_dx_leaf *) orig_dx_leaves[i]->b_data;
orig_list = &orig_dx_leaf->dl_list;
new_dx_leaf = (struct ocfs2_dx_leaf *) new_dx_leaves[i]->b_data;
- new_list = &new_dx_leaf->dl_list;
num_used = le16_to_cpu(orig_list->de_num_used);
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 69a429b625cc..aaf24548b02a 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -142,7 +142,6 @@ struct dlm_ctxt
atomic_t res_tot_count;
atomic_t res_cur_count;
- struct dlm_debug_ctxt *dlm_debug_ctxt;
struct dentry *dlm_debugfs_subroot;
/* NOTE: Next three are protected by dlm_domain_lock */
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index a4b58ba99927..4d0b452012b2 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -853,67 +853,34 @@ static const struct file_operations debug_state_fops = {
/* files in subroot */
void dlm_debug_init(struct dlm_ctxt *dlm)
{
- struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt;
-
/* for dumping dlm_ctxt */
- dc->debug_state_dentry = debugfs_create_file(DLM_DEBUGFS_DLM_STATE,
- S_IFREG|S_IRUSR,
- dlm->dlm_debugfs_subroot,
- dlm, &debug_state_fops);
+ debugfs_create_file(DLM_DEBUGFS_DLM_STATE, S_IFREG|S_IRUSR,
+ dlm->dlm_debugfs_subroot, dlm, &debug_state_fops);
/* for dumping lockres */
- dc->debug_lockres_dentry =
- debugfs_create_file(DLM_DEBUGFS_LOCKING_STATE,
- S_IFREG|S_IRUSR,
- dlm->dlm_debugfs_subroot,
- dlm, &debug_lockres_fops);
+ debugfs_create_file(DLM_DEBUGFS_LOCKING_STATE, S_IFREG|S_IRUSR,
+ dlm->dlm_debugfs_subroot, dlm, &debug_lockres_fops);
/* for dumping mles */
- dc->debug_mle_dentry = debugfs_create_file(DLM_DEBUGFS_MLE_STATE,
- S_IFREG|S_IRUSR,
- dlm->dlm_debugfs_subroot,
- dlm, &debug_mle_fops);
+ debugfs_create_file(DLM_DEBUGFS_MLE_STATE, S_IFREG|S_IRUSR,
+ dlm->dlm_debugfs_subroot, dlm, &debug_mle_fops);
/* for dumping lockres on the purge list */
- dc->debug_purgelist_dentry =
- debugfs_create_file(DLM_DEBUGFS_PURGE_LIST,
- S_IFREG|S_IRUSR,
- dlm->dlm_debugfs_subroot,
- dlm, &debug_purgelist_fops);
-}
-
-void dlm_debug_shutdown(struct dlm_ctxt *dlm)
-{
- struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt;
-
- if (dc) {
- debugfs_remove(dc->debug_purgelist_dentry);
- debugfs_remove(dc->debug_mle_dentry);
- debugfs_remove(dc->debug_lockres_dentry);
- debugfs_remove(dc->debug_state_dentry);
- kfree(dc);
- dc = NULL;
- }
+ debugfs_create_file(DLM_DEBUGFS_PURGE_LIST, S_IFREG|S_IRUSR,
+ dlm->dlm_debugfs_subroot, dlm,
+ &debug_purgelist_fops);
}
/* subroot - domain dir */
-int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
+void dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
{
- dlm->dlm_debug_ctxt = kzalloc(sizeof(struct dlm_debug_ctxt),
- GFP_KERNEL);
- if (!dlm->dlm_debug_ctxt) {
- mlog_errno(-ENOMEM);
- return -ENOMEM;
- }
-
dlm->dlm_debugfs_subroot = debugfs_create_dir(dlm->name,
dlm_debugfs_root);
- return 0;
}
void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
{
- debugfs_remove(dlm->dlm_debugfs_subroot);
+ debugfs_remove_recursive(dlm->dlm_debugfs_subroot);
}
/* debugfs root */
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
index 7d0c7c9013ce..f8fd8680a4b6 100644
--- a/fs/ocfs2/dlm/dlmdebug.h
+++ b/fs/ocfs2/dlm/dlmdebug.h
@@ -14,13 +14,6 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle);
#ifdef CONFIG_DEBUG_FS
-struct dlm_debug_ctxt {
- struct dentry *debug_state_dentry;
- struct dentry *debug_lockres_dentry;
- struct dentry *debug_mle_dentry;
- struct dentry *debug_purgelist_dentry;
-};
-
struct debug_lockres {
int dl_len;
char *dl_buf;
@@ -29,9 +22,8 @@ struct debug_lockres {
};
void dlm_debug_init(struct dlm_ctxt *dlm);
-void dlm_debug_shutdown(struct dlm_ctxt *dlm);
-int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm);
+void dlm_create_debugfs_subroot(struct dlm_ctxt *dlm);
void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm);
void dlm_create_debugfs_root(void);
@@ -42,12 +34,8 @@ void dlm_destroy_debugfs_root(void);
static inline void dlm_debug_init(struct dlm_ctxt *dlm)
{
}
-static inline void dlm_debug_shutdown(struct dlm_ctxt *dlm)
-{
-}
-static inline int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
+static inline void dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
{
- return 0;
}
static inline void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
{
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 7338b5d4647c..ee6f459f9770 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -387,7 +387,6 @@ static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm)
static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
{
dlm_unregister_domain_handlers(dlm);
- dlm_debug_shutdown(dlm);
dlm_complete_thread(dlm);
dlm_complete_recovery_thread(dlm);
dlm_destroy_dlm_worker(dlm);
@@ -1938,7 +1937,6 @@ bail:
if (status) {
dlm_unregister_domain_handlers(dlm);
- dlm_debug_shutdown(dlm);
dlm_complete_thread(dlm);
dlm_complete_recovery_thread(dlm);
dlm_destroy_dlm_worker(dlm);
@@ -1992,9 +1990,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
dlm->key = key;
dlm->node_num = o2nm_this_node();
- ret = dlm_create_debugfs_subroot(dlm);
- if (ret < 0)
- goto leave;
+ dlm_create_debugfs_subroot(dlm);
spin_lock_init(&dlm->spinlock);
spin_lock_init(&dlm->master_lock);
@@ -2056,6 +2052,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
mlog(0, "context init: refcount %u\n",
kref_read(&dlm->dlm_refs));
+ ret = 0;
leave:
if (ret < 0 && dlm) {
if (dlm->master_hash)
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index e78657742bd8..3883633e82eb 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -90,7 +90,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
enum dlm_status status;
int actions = 0;
int in_use;
- u8 owner;
+ u8 owner;
+ int recovery_wait = 0;
mlog(0, "master_node = %d, valblk = %d\n", master_node,
flags & LKM_VALBLK);
@@ -193,9 +194,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
}
if (flags & LKM_CANCEL)
lock->cancel_pending = 0;
- else
- lock->unlock_pending = 0;
-
+ else {
+ if (!lock->unlock_pending)
+ recovery_wait = 1;
+ else
+ lock->unlock_pending = 0;
+ }
}
/* get an extra ref on lock. if we are just switching
@@ -229,6 +233,17 @@ leave:
spin_unlock(&res->spinlock);
wake_up(&res->wq);
+ if (recovery_wait) {
+ spin_lock(&res->spinlock);
+ /* Unlock request will directly succeed after owner dies,
+ * and the lock is already removed from grant list. We have to
+ * wait for RECOVERING done or we miss the chance to purge it
+ * since the removement is much faster than RECOVERING proc.
+ */
+ __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
+ spin_unlock(&res->spinlock);
+ }
+
/* let the caller's final dlm_lock_put handle the actual kfree */
if (actions & DLM_UNLOCK_FREE_LOCK) {
/* this should always be coupled with list removal */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 14207234fa3d..1c4c51f3df60 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1687,7 +1687,7 @@ static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
spin_unlock_irqrestore(&lockres->l_lock, flags);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (lockres->l_lockdep_map.key != NULL)
- rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
+ rwsem_release(&lockres->l_lockdep_map, caller_ip);
#endif
}
@@ -2508,9 +2508,7 @@ bail:
ocfs2_inode_unlock(inode, ex);
}
- if (local_bh)
- brelse(local_bh);
-
+ brelse(local_bh);
return status;
}
@@ -2593,8 +2591,7 @@ int ocfs2_inode_lock_atime(struct inode *inode,
*level = 1;
if (ocfs2_should_update_atime(inode, vfsmnt))
ocfs2_update_inode_atime(inode, bh);
- if (bh)
- brelse(bh);
+ brelse(bh);
} else
*level = 0;
@@ -3012,8 +3009,6 @@ struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
kref_init(&dlm_debug->d_refcnt);
INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
- dlm_debug->d_locking_state = NULL;
- dlm_debug->d_locking_filter = NULL;
dlm_debug->d_filter_secs = 0;
out:
return dlm_debug;
@@ -3282,27 +3277,19 @@ static void ocfs2_dlm_init_debug(struct ocfs2_super *osb)
{
struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
- dlm_debug->d_locking_state = debugfs_create_file("locking_state",
- S_IFREG|S_IRUSR,
- osb->osb_debug_root,
- osb,
- &ocfs2_dlm_debug_fops);
+ debugfs_create_file("locking_state", S_IFREG|S_IRUSR,
+ osb->osb_debug_root, osb, &ocfs2_dlm_debug_fops);
- dlm_debug->d_locking_filter = debugfs_create_u32("locking_filter",
- 0600,
- osb->osb_debug_root,
- &dlm_debug->d_filter_secs);
+ debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root,
+ &dlm_debug->d_filter_secs);
}
static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
{
struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
- if (dlm_debug) {
- debugfs_remove(dlm_debug->d_locking_state);
- debugfs_remove(dlm_debug->d_locking_filter);
+ if (dlm_debug)
ocfs2_put_dlm_debug(dlm_debug);
- }
}
int ocfs2_dlm_init(struct ocfs2_super *osb)
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index e66a249fe07c..e3e2d1b2af51 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -590,8 +590,7 @@ int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
*extent_flags = rec->e_flags;
}
out:
- if (eb_bh)
- brelse(eb_bh);
+ brelse(eb_bh);
return ret;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 4435df3e5adb..9876db52913a 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -706,7 +706,9 @@ leave:
* Thus, we need to explicitly order the zeroed pages.
*/
static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
- struct buffer_head *di_bh)
+ struct buffer_head *di_bh,
+ loff_t start_byte,
+ loff_t length)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
handle_t *handle = NULL;
@@ -722,7 +724,7 @@ static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
goto out;
}
- ret = ocfs2_jbd2_file_inode(handle, inode);
+ ret = ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -761,7 +763,9 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
BUG_ON(abs_from & (inode->i_blkbits - 1));
- handle = ocfs2_zero_start_ordered_transaction(inode, di_bh);
+ handle = ocfs2_zero_start_ordered_transaction(inode, di_bh,
+ abs_from,
+ abs_to - abs_from);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
@@ -1226,6 +1230,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
if (IS_ERR(transfer_to[USRQUOTA])) {
status = PTR_ERR(transfer_to[USRQUOTA]);
+ transfer_to[USRQUOTA] = NULL;
goto bail_unlock;
}
}
@@ -1235,6 +1240,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
if (IS_ERR(transfer_to[GRPQUOTA])) {
status = PTR_ERR(transfer_to[GRPQUOTA]);
+ transfer_to[GRPQUOTA] = NULL;
goto bail_unlock;
}
}
@@ -2092,54 +2098,89 @@ static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
return 0;
}
-static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
- struct file *file,
- loff_t pos, size_t count,
- int *meta_level)
+static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
+ struct buffer_head **di_bh,
+ int meta_level,
+ int overwrite_io,
+ int write_sem,
+ int wait)
{
- int ret;
- struct buffer_head *di_bh = NULL;
- u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
- u32 clusters =
- ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
+ int ret = 0;
- ret = ocfs2_inode_lock(inode, &di_bh, 1);
- if (ret) {
- mlog_errno(ret);
+ if (wait)
+ ret = ocfs2_inode_lock(inode, NULL, meta_level);
+ else
+ ret = ocfs2_try_inode_lock(inode,
+ overwrite_io ? NULL : di_bh, meta_level);
+ if (ret < 0)
goto out;
+
+ if (wait) {
+ if (write_sem)
+ down_write(&OCFS2_I(inode)->ip_alloc_sem);
+ else
+ down_read(&OCFS2_I(inode)->ip_alloc_sem);
+ } else {
+ if (write_sem)
+ ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
+ else
+ ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
+
+ if (!ret) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
}
- *meta_level = 1;
+ return ret;
- ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
- if (ret)
- mlog_errno(ret);
+out_unlock:
+ brelse(*di_bh);
+ ocfs2_inode_unlock(inode, meta_level);
out:
- brelse(di_bh);
return ret;
}
+static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
+ struct buffer_head **di_bh,
+ int meta_level,
+ int write_sem)
+{
+ if (write_sem)
+ up_write(&OCFS2_I(inode)->ip_alloc_sem);
+ else
+ up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ brelse(*di_bh);
+ *di_bh = NULL;
+
+ if (meta_level >= 0)
+ ocfs2_inode_unlock(inode, meta_level);
+}
+
static int ocfs2_prepare_inode_for_write(struct file *file,
loff_t pos, size_t count, int wait)
{
int ret = 0, meta_level = 0, overwrite_io = 0;
+ int write_sem = 0;
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = d_inode(dentry);
struct buffer_head *di_bh = NULL;
- loff_t end;
+ u32 cpos;
+ u32 clusters;
/*
* We start with a read level meta lock and only jump to an ex
* if we need to make modifications here.
*/
for(;;) {
- if (wait)
- ret = ocfs2_inode_lock(inode, NULL, meta_level);
- else
- ret = ocfs2_try_inode_lock(inode,
- overwrite_io ? NULL : &di_bh, meta_level);
+ ret = ocfs2_inode_lock_for_extent_tree(inode,
+ &di_bh,
+ meta_level,
+ overwrite_io,
+ write_sem,
+ wait);
if (ret < 0) {
- meta_level = -1;
if (ret != -EAGAIN)
mlog_errno(ret);
goto out;
@@ -2151,15 +2192,8 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
*/
if (!wait && !overwrite_io) {
overwrite_io = 1;
- if (!down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem)) {
- ret = -EAGAIN;
- goto out_unlock;
- }
ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
- brelse(di_bh);
- di_bh = NULL;
- up_read(&OCFS2_I(inode)->ip_alloc_sem);
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
@@ -2178,7 +2212,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
* set inode->i_size at the end of a write. */
if (should_remove_suid(dentry)) {
if (meta_level == 0) {
- ocfs2_inode_unlock(inode, meta_level);
+ ocfs2_inode_unlock_for_extent_tree(inode,
+ &di_bh,
+ meta_level,
+ write_sem);
meta_level = 1;
continue;
}
@@ -2190,22 +2227,34 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
}
}
- end = pos + count;
-
ret = ocfs2_check_range_for_refcount(inode, pos, count);
if (ret == 1) {
- ocfs2_inode_unlock(inode, meta_level);
- meta_level = -1;
-
- ret = ocfs2_prepare_inode_for_refcount(inode,
- file,
- pos,
- count,
- &meta_level);
+ ocfs2_inode_unlock_for_extent_tree(inode,
+ &di_bh,
+ meta_level,
+ write_sem);
+ ret = ocfs2_inode_lock_for_extent_tree(inode,
+ &di_bh,
+ meta_level,
+ overwrite_io,
+ 1,
+ wait);
+ write_sem = 1;
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
+ goto out;
+ }
+
+ cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+ clusters =
+ ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
+ ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
}
if (ret < 0) {
- mlog_errno(ret);
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
goto out_unlock;
}
@@ -2216,10 +2265,10 @@ out_unlock:
trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
pos, count, wait);
- brelse(di_bh);
-
- if (meta_level >= 0)
- ocfs2_inode_unlock(inode, meta_level);
+ ocfs2_inode_unlock_for_extent_tree(inode,
+ &di_bh,
+ meta_level,
+ write_sem);
out:
return ret;
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 7ad9d6590818..7c9dfd50c1c1 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -534,7 +534,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
*/
mlog_bug_on_msg(!!(fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) !=
!!(args->fi_flags & OCFS2_FI_FLAG_SYSFILE),
- "Inode %llu: system file state is ambigous\n",
+ "Inode %llu: system file state is ambiguous\n",
(unsigned long long)args->fi_blkno);
if (S_ISCHR(le16_to_cpu(fe->i_mode)) ||
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index d6f7b299eb23..89984172fc4a 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -283,7 +283,7 @@ static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
if (inode_alloc)
inode_lock(inode_alloc);
- if (o2info_coherent(&fi->ifi_req)) {
+ if (inode_alloc && o2info_coherent(&fi->ifi_req)) {
status = ocfs2_inode_lock(inode_alloc, &bh, 0);
if (status < 0) {
mlog_errno(status);
@@ -985,6 +985,7 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return -EFAULT;
return ocfs2_info_handle(inode, &info, 1);
+ case FITRIM:
case OCFS2_IOC_MOVE_EXT:
break;
default:
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 930e3d388579..1afe57f425a0 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -217,7 +217,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb)
/* At this point, we know that no more recovery threads can be
* launched, so wait for any recovery completion work to
* complete. */
- flush_workqueue(osb->ocfs2_wq);
+ if (osb->ocfs2_wq)
+ flush_workqueue(osb->ocfs2_wq);
/*
* Now that recovery is shut down, and the osb is about to be
@@ -419,14 +420,14 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
if (!nblocks)
return 0;
- old_nblocks = handle->h_buffer_credits;
+ old_nblocks = jbd2_handle_buffer_credits(handle);
trace_ocfs2_extend_trans(old_nblocks, nblocks);
#ifdef CONFIG_OCFS2_DEBUG_FS
status = 1;
#else
- status = jbd2_journal_extend(handle, nblocks);
+ status = jbd2_journal_extend(handle, nblocks, 0);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -460,13 +461,13 @@ int ocfs2_allocate_extend_trans(handle_t *handle, int thresh)
BUG_ON(!handle);
- old_nblks = handle->h_buffer_credits;
+ old_nblks = jbd2_handle_buffer_credits(handle);
trace_ocfs2_allocate_extend_trans(old_nblks, thresh);
if (old_nblks < thresh)
return 0;
- status = jbd2_journal_extend(handle, OCFS2_MAX_TRANS_DATA);
+ status = jbd2_journal_extend(handle, OCFS2_MAX_TRANS_DATA, 0);
if (status < 0) {
mlog_errno(status);
goto bail;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index c0fe6ed08ab1..3103ba7f97a2 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -144,7 +144,6 @@ static inline void ocfs2_ci_set_new(struct ocfs2_super *osb,
void ocfs2_orphan_scan_init(struct ocfs2_super *osb);
void ocfs2_orphan_scan_start(struct ocfs2_super *osb);
void ocfs2_orphan_scan_stop(struct ocfs2_super *osb);
-void ocfs2_orphan_scan_exit(struct ocfs2_super *osb);
void ocfs2_complete_recovery(struct work_struct *work);
void ocfs2_wait_for_recovery(struct ocfs2_super *osb);
@@ -232,8 +231,8 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode)
* ocfs2_journal_access_*() unless you intend to
* manage the checksum by hand.
* ocfs2_journal_dirty - Mark a journalled buffer as having dirty data.
- * ocfs2_jbd2_file_inode - Mark an inode so that its data goes out before
- * the current handle commits.
+ * ocfs2_jbd2_inode_add_write - Mark an inode with range so that its data goes
+ * out before the current handle commits.
*/
/* You must always start_trans with a number of buffs > 0, but it's
@@ -441,7 +440,7 @@ static inline int ocfs2_mknod_credits(struct super_block *sb, int is_dir,
* previous dirblock update in the free list */
static inline int ocfs2_link_credits(struct super_block *sb)
{
- return 2*OCFS2_INODE_UPDATE_CREDITS + 4 +
+ return 2 * OCFS2_INODE_UPDATE_CREDITS + 4 +
ocfs2_quota_trans_credits(sb);
}
@@ -575,37 +574,12 @@ static inline int ocfs2_calc_bg_discontig_credits(struct super_block *sb)
return ocfs2_extent_recs_per_gd(sb);
}
-static inline int ocfs2_calc_tree_trunc_credits(struct super_block *sb,
- unsigned int clusters_to_del,
- struct ocfs2_dinode *fe,
- struct ocfs2_extent_list *last_el)
+static inline int ocfs2_jbd2_inode_add_write(handle_t *handle, struct inode *inode,
+ loff_t start_byte, loff_t length)
{
- /* for dinode + all headers in this pass + update to next leaf */
- u16 next_free = le16_to_cpu(last_el->l_next_free_rec);
- u16 tree_depth = le16_to_cpu(fe->id2.i_list.l_tree_depth);
- int credits = 1 + tree_depth + 1;
- int i;
-
- i = next_free - 1;
- BUG_ON(i < 0);
-
- /* We may be deleting metadata blocks, so metadata alloc dinode +
- one desc. block for each possible delete. */
- if (tree_depth && next_free == 1 &&
- ocfs2_rec_clusters(last_el, &last_el->l_recs[i]) == clusters_to_del)
- credits += 1 + tree_depth;
-
- /* update to the truncate log. */
- credits += OCFS2_TRUNCATE_LOG_UPDATE;
-
- credits += ocfs2_quota_trans_credits(sb);
-
- return credits;
-}
-
-static inline int ocfs2_jbd2_file_inode(handle_t *handle, struct inode *inode)
-{
- return jbd2_journal_inode_add_write(handle, &OCFS2_I(inode)->ip_jinode);
+ return jbd2_journal_inode_ranged_write(handle,
+ &OCFS2_I(inode)->ip_jinode,
+ start_byte, length);
}
static inline int ocfs2_begin_ordered_truncate(struct inode *inode,
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 158e5af767fd..720e9f94957e 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -377,7 +377,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
struct ocfs2_dinode *alloc = NULL;
cancel_delayed_work(&osb->la_enable_wq);
- flush_workqueue(osb->ocfs2_wq);
+ if (osb->ocfs2_wq)
+ flush_workqueue(osb->ocfs2_wq);
if (osb->local_alloc_state == OCFS2_LA_UNUSED)
goto out;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 6f8e1c4fdb9c..8ea51cf27b97 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -2486,7 +2486,6 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
struct inode *inode = NULL;
struct inode *orphan_dir = NULL;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
- struct ocfs2_dinode *di = NULL;
handle_t *handle = NULL;
char orphan_name[OCFS2_ORPHAN_NAMELEN + 1];
struct buffer_head *parent_di_bh = NULL;
@@ -2552,7 +2551,6 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
goto leave;
}
- di = (struct ocfs2_dinode *)new_di_bh->b_data;
status = ocfs2_orphan_add(osb, handle, inode, new_di_bh, orphan_name,
&orphan_insert, orphan_dir, false);
if (status < 0) {
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index fddbbd60f434..9150cfa4df7d 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -223,8 +223,6 @@ struct ocfs2_orphan_scan {
struct ocfs2_dlm_debug {
struct kref d_refcnt;
- struct dentry *d_locking_state;
- struct dentry *d_locking_filter;
u32 d_filter_secs;
struct list_head d_lockres_tracking;
};
@@ -401,7 +399,6 @@ struct ocfs2_super
struct ocfs2_dlm_debug *osb_dlm_debug;
struct dentry *osb_debug_root;
- struct dentry *osb_ctxt;
wait_queue_head_t recovery_event;
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 7a922190a8c7..eda83487c9ec 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -728,7 +728,7 @@ static int ocfs2_release_dquot(struct dquot *dquot)
mutex_lock(&dquot->dq_lock);
/* Check whether we are not racing with some other dqget() */
- if (atomic_read(&dquot->dq_count) > 1)
+ if (dquot_is_busy(dquot))
goto out;
/* Running from downconvert thread? Postpone quota processing to wq */
if (current == osb->dc_task) {
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 69c21a3843af..4180c3ef0a68 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1252,6 +1252,7 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
int nr)
{
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+ struct journal_head *jh;
int ret;
if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
@@ -1260,13 +1261,14 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
if (!buffer_jbd(bg_bh))
return 1;
- jbd_lock_bh_state(bg_bh);
- bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data;
+ jh = bh2jh(bg_bh);
+ spin_lock(&jh->b_state_lock);
+ bg = (struct ocfs2_group_desc *) jh->b_committed_data;
if (bg)
ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
else
ret = 1;
- jbd_unlock_bh_state(bg_bh);
+ spin_unlock(&jh->b_state_lock);
return ret;
}
@@ -2387,6 +2389,7 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
int status;
unsigned int tmp;
struct ocfs2_group_desc *undo_bg = NULL;
+ struct journal_head *jh;
/* The caller got this descriptor from
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
@@ -2405,10 +2408,10 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
goto bail;
}
+ jh = bh2jh(group_bh);
if (undo_fn) {
- jbd_lock_bh_state(group_bh);
- undo_bg = (struct ocfs2_group_desc *)
- bh2jh(group_bh)->b_committed_data;
+ spin_lock(&jh->b_state_lock);
+ undo_bg = (struct ocfs2_group_desc *) jh->b_committed_data;
BUG_ON(!undo_bg);
}
@@ -2423,7 +2426,7 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
le16_add_cpu(&bg->bg_free_bits_count, num_bits);
if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
if (undo_fn)
- jbd_unlock_bh_state(group_bh);
+ spin_unlock(&jh->b_state_lock);
return ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit count %u but claims %u are freed. num_bits %d\n",
(unsigned long long)le64_to_cpu(bg->bg_blkno),
le16_to_cpu(bg->bg_bits),
@@ -2432,7 +2435,7 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
}
if (undo_fn)
- jbd_unlock_bh_state(group_bh);
+ spin_unlock(&jh->b_state_lock);
ocfs2_journal_dirty(handle, group_bh);
bail:
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 8b2f39506648..05dd68ade293 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -926,8 +926,8 @@ static int ocfs2_enable_quotas(struct ocfs2_super *osb)
status = -ENOENT;
goto out_quota_off;
}
- status = dquot_enable(inode[type], type, QFMT_OCFS2,
- DQUOT_USAGE_ENABLED);
+ status = dquot_load_quota_inode(inode[type], type, QFMT_OCFS2,
+ DQUOT_USAGE_ENABLED);
if (status < 0)
goto out_quota_off;
}
@@ -1080,10 +1080,8 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
osb->osb_debug_root = debugfs_create_dir(osb->uuid_str,
ocfs2_debugfs_root);
- osb->osb_ctxt = debugfs_create_file("fs_state", S_IFREG|S_IRUSR,
- osb->osb_debug_root,
- osb,
- &ocfs2_osb_debug_fops);
+ debugfs_create_file("fs_state", S_IFREG|S_IRUSR, osb->osb_debug_root,
+ osb, &ocfs2_osb_debug_fops);
if (ocfs2_meta_ecc(osb))
ocfs2_blockcheck_stats_debugfs_install( &osb->osb_ecc_stats,
@@ -1861,8 +1859,6 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
kset_unregister(osb->osb_dev_kset);
- debugfs_remove(osb->osb_ctxt);
-
/* Orphan scan should be stopped as early as possible */
ocfs2_orphan_scan_stop(osb);
@@ -1918,7 +1914,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
ocfs2_dlm_shutdown(osb, hangup_needed);
ocfs2_blockcheck_stats_debugfs_remove(&osb->osb_ecc_stats);
- debugfs_remove(osb->osb_debug_root);
+ debugfs_remove_recursive(osb->osb_debug_root);
if (hangup_needed)
ocfs2_cluster_hangup(osb->uuid_str, strlen(osb->uuid_str));
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 385f3aaa2448..90c830e3758e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3825,7 +3825,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
int low_bucket = 0, bucket, high_bucket;
struct ocfs2_xattr_bucket *search;
- u32 last_hash;
u64 blkno, lower_blkno = 0;
search = ocfs2_xattr_bucket_new(inode);
@@ -3869,8 +3868,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
if (xh->xh_count)
xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
- last_hash = le32_to_cpu(xe->xe_name_hash);
-
/* record lower_blkno which may be the insert place. */
lower_blkno = blkno;
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index 08226a835ec3..b76ec6b88ded 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -478,6 +478,10 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_maxbytes = 0xffffffff;
+ sb->s_time_gran = NSEC_PER_MSEC;
+ sb->s_time_min = 0;
+ sb->s_time_max = U64_MAX / MSEC_PER_SEC;
+
sb_set_blocksize(sb, 0x200);
bh = sb_bread(sb, 0);
diff --git a/fs/open.c b/fs/open.c
index b5b80469b93d..b62f5c0923a8 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -374,6 +374,25 @@ long do_faccessat(int dfd, const char __user *filename, int mode)
override_cred->cap_permitted;
}
+ /*
+ * The new set of credentials can *only* be used in
+ * task-synchronous circumstances, and does not need
+ * RCU freeing, unless somebody then takes a separate
+ * reference to it.
+ *
+ * NOTE! This is _only_ true because this credential
+ * is used purely for override_creds() that installs
+ * it as the subjective cred. Other threads will be
+ * accessing ->real_cred, not the subjective cred.
+ *
+ * If somebody _does_ make a copy of this (using the
+ * 'get_current_cred()' function), that will clear the
+ * non_rcu field, because now that other user may be
+ * expecting RCU freeing. But normal thread-synchronous
+ * cred accesses will keep things non-RCY.
+ */
+ override_cred->non_rcu = 1;
+
old_cred = override_creds(override_cred);
retry:
res = user_path_at(dfd, filename, lookup_flags, &path);
@@ -757,7 +776,7 @@ static int do_dentry_open(struct file *f,
f->f_mode |= FMODE_ATOMIC_POS;
f->f_op = fops_get(inode->i_fop);
- if (unlikely(WARN_ON(!f->f_op))) {
+ if (WARN_ON(!f->f_op)) {
error = -ENODEV;
goto cleanup_all;
}
@@ -799,6 +818,14 @@ static int do_dentry_open(struct file *f,
if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO)
return -EINVAL;
}
+
+ /*
+ * XXX: Huge page cache doesn't support writing yet. Drop all page
+ * cache for this file before processing writes.
+ */
+ if ((f->f_mode & FMODE_WRITE) && filemap_nr_thps(inode->i_mapping))
+ truncate_pagecache(inode, 0);
+
return 0;
cleanup_all:
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 960f9a3c012d..a5612abc0936 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -555,7 +555,7 @@ static int orangefs_fsync(struct file *file,
* Change the file pointer position for an instance of an open file.
*
* \note If .llseek is overriden, we must acquire lock as described in
- * Documentation/filesystems/Locking.
+ * Documentation/filesystems/locking.rst.
*
* Future upgrade could support SEEK_DATA and SEEK_HOLE but would
* require much changes to the FS
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 0c337d8bdaab..efb12197da18 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -940,7 +940,7 @@ out:
int orangefs_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
- int ret = -ENOENT;
+ int ret;
struct inode *inode = path->dentry->d_inode;
gossip_debug(GOSSIP_INODE_DEBUG,
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index 1dd710e5f376..3e7cf3d0a494 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -224,7 +224,7 @@ static int orangefs_symlink(struct inode *dir,
struct orangefs_object_kref ref;
struct inode *inode;
struct iattr iattr;
- int mode = 755;
+ int mode = 0755;
int ret;
gossip_debug(GOSSIP_NAME_DEBUG, "%s: called\n", __func__);
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 572dd29fbd54..34a6c99fa29b 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -246,7 +246,7 @@ struct orangefs_read_options {
extern struct orangefs_stats orangefs_stats;
/*
- * NOTE: See Documentation/filesystems/porting for information
+ * NOTE: See Documentation/filesystems/porting.rst for information
* on implementing FOO_I and properly accessing fs private data
*/
static inline struct orangefs_inode_s *ORANGEFS_I(struct inode *inode)
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index cb8ec1f65c03..73c9775215b3 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -227,9 +227,8 @@ static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen)
/* Encode an upper or lower file handle */
fh = ovl_encode_real_fh(enc_lower ? ovl_dentry_lower(dentry) :
ovl_dentry_upper(dentry), !enc_lower);
- err = PTR_ERR(fh);
if (IS_ERR(fh))
- goto fail;
+ return PTR_ERR(fh);
err = -EOVERFLOW;
if (fh->len > buflen)
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 7663aeb85fa3..bc14781886bf 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -383,7 +383,8 @@ static bool ovl_can_list(const char *s)
return true;
/* Never list trusted.overlay, list other trusted for superuser only */
- return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
+ return !ovl_is_private_xattr(s) &&
+ ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
}
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 28a2d12a1029..a8279280e88d 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -66,6 +66,7 @@ struct ovl_fs {
bool workdir_locked;
/* Traps in ovl inode cache */
struct inode *upperdir_trap;
+ struct inode *workbasedir_trap;
struct inode *workdir_trap;
struct inode *indexdir_trap;
/* Inode numbers in all layers do not use the high xino_bits */
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index b368e2e102fa..afbcb116a7f1 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -212,6 +212,7 @@ static void ovl_free_fs(struct ovl_fs *ofs)
{
unsigned i;
+ iput(ofs->workbasedir_trap);
iput(ofs->indexdir_trap);
iput(ofs->workdir_trap);
iput(ofs->upperdir_trap);
@@ -1003,6 +1004,25 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
return 0;
}
+/*
+ * Determine how we treat concurrent use of upperdir/workdir based on the
+ * index feature. This is papering over mount leaks of container runtimes,
+ * for example, an old overlay mount is leaked and now its upperdir is
+ * attempted to be used as a lower layer in a new overlay mount.
+ */
+static int ovl_report_in_use(struct ovl_fs *ofs, const char *name)
+{
+ if (ofs->config.index) {
+ pr_err("overlayfs: %s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n",
+ name);
+ return -EBUSY;
+ } else {
+ pr_warn("overlayfs: %s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n",
+ name);
+ return 0;
+ }
+}
+
static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
struct path *upperpath)
{
@@ -1040,14 +1060,12 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
ofs->upper_mnt = upper_mnt;
- err = -EBUSY;
if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) {
ofs->upperdir_locked = true;
- } else if (ofs->config.index) {
- pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
- goto out;
} else {
- pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
+ err = ovl_report_in_use(ofs, "upperdir");
+ if (err)
+ goto out;
}
err = 0;
@@ -1157,16 +1175,19 @@ static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
ofs->workbasedir = dget(workpath.dentry);
- err = -EBUSY;
if (ovl_inuse_trylock(ofs->workbasedir)) {
ofs->workdir_locked = true;
- } else if (ofs->config.index) {
- pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
- goto out;
} else {
- pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
+ err = ovl_report_in_use(ofs, "workdir");
+ if (err)
+ goto out;
}
+ err = ovl_setup_trap(sb, ofs->workbasedir, &ofs->workbasedir_trap,
+ "workdir");
+ if (err)
+ goto out;
+
err = ovl_make_workdir(sb, ofs, &workpath);
out:
@@ -1313,16 +1334,16 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
if (err < 0)
goto out;
- err = -EBUSY;
- if (ovl_is_inuse(stack[i].dentry)) {
- pr_err("overlayfs: lowerdir is in-use as upperdir/workdir\n");
- goto out;
- }
-
err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir");
if (err)
goto out;
+ if (ovl_is_inuse(stack[i].dentry)) {
+ err = ovl_report_in_use(ofs, "lowerdir");
+ if (err)
+ goto out;
+ }
+
mnt = clone_private_mount(&stack[i]);
err = PTR_ERR(mnt);
if (IS_ERR(mnt)) {
@@ -1469,8 +1490,8 @@ out_err:
* - another layer of this overlayfs instance
* - upper/work dir of any overlayfs instance
*/
-static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
- const char *name)
+static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
+ struct dentry *dentry, const char *name)
{
struct dentry *next = dentry, *parent;
int err = 0;
@@ -1482,13 +1503,11 @@ static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
/* Walk back ancestors to root (inclusive) looking for traps */
while (!err && parent != next) {
- if (ovl_is_inuse(parent)) {
- err = -EBUSY;
- pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n",
- name);
- } else if (ovl_lookup_trap_inode(sb, parent)) {
+ if (ovl_lookup_trap_inode(sb, parent)) {
err = -ELOOP;
pr_err("overlayfs: overlapping %s path\n", name);
+ } else if (ovl_is_inuse(parent)) {
+ err = ovl_report_in_use(ofs, name);
}
next = parent;
parent = dget_parent(next);
@@ -1509,7 +1528,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
int i, err;
if (ofs->upper_mnt) {
- err = ovl_check_layer(sb, ofs->upper_mnt->mnt_root, "upperdir");
+ err = ovl_check_layer(sb, ofs, ofs->upper_mnt->mnt_root,
+ "upperdir");
if (err)
return err;
@@ -1520,13 +1540,14 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
* workbasedir. In that case, we already have their traps in
* inode cache and we will catch that case on lookup.
*/
- err = ovl_check_layer(sb, ofs->workbasedir, "workdir");
+ err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir");
if (err)
return err;
}
for (i = 0; i < ofs->numlower; i++) {
- err = ovl_check_layer(sb, ofs->lower_layers[i].mnt->mnt_root,
+ err = ovl_check_layer(sb, ofs,
+ ofs->lower_layers[i].mnt->mnt_root,
"lowerdir");
if (err)
return err;
diff --git a/fs/pipe.c b/fs/pipe.c
index 8a2ab2f974bd..b901c8eefafd 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -43,10 +43,12 @@ unsigned long pipe_user_pages_hard;
unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
/*
- * We use a start+len construction, which provides full use of the
- * allocated memory.
- * -- Florian Coosmann (FGC)
- *
+ * We use head and tail indices that aren't masked off, except at the point of
+ * dereference, but rather they're allowed to wrap naturally. This means there
+ * isn't a dead spot in the buffer, but the ring has to be a power of two and
+ * <= 2^31.
+ * -- David Howells 2019-09-23.
+ *
* Reads with count = 0 should always return 0.
* -- Julian Bradfield 1999-06-07.
*
@@ -285,10 +287,12 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
ret = 0;
__pipe_lock(pipe);
for (;;) {
- int bufs = pipe->nrbufs;
- if (bufs) {
- int curbuf = pipe->curbuf;
- struct pipe_buffer *buf = pipe->bufs + curbuf;
+ unsigned int head = pipe->head;
+ unsigned int tail = pipe->tail;
+ unsigned int mask = pipe->ring_size - 1;
+
+ if (!pipe_empty(head, tail)) {
+ struct pipe_buffer *buf = &pipe->bufs[tail & mask];
size_t chars = buf->len;
size_t written;
int error;
@@ -320,18 +324,27 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
}
if (!buf->len) {
+ bool wake;
pipe_buf_release(pipe, buf);
- curbuf = (curbuf + 1) & (pipe->buffers - 1);
- pipe->curbuf = curbuf;
- pipe->nrbufs = --bufs;
+ spin_lock_irq(&pipe->wait.lock);
+ tail++;
+ pipe->tail = tail;
do_wakeup = 1;
+ wake = head - (tail - 1) == pipe->max_usage / 2;
+ if (wake)
+ wake_up_locked_poll(
+ &pipe->wait, EPOLLOUT | EPOLLWRNORM);
+ spin_unlock_irq(&pipe->wait.lock);
+ if (wake)
+ kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
total_len -= chars;
if (!total_len)
break; /* common path: read succeeded */
+ if (!pipe_empty(head, tail)) /* More to do? */
+ continue;
}
- if (bufs) /* More to do? */
- continue;
+
if (!pipe->writers)
break;
if (!pipe->waiting_writers) {
@@ -352,17 +365,13 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
ret = -ERESTARTSYS;
break;
}
- if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
- kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
- }
pipe_wait(pipe);
}
__pipe_unlock(pipe);
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
+ wake_up_interruptible_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
if (ret > 0)
@@ -380,6 +389,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
+ unsigned int head;
ssize_t ret = 0;
int do_wakeup = 0;
size_t total_len = iov_iter_count(from);
@@ -397,12 +407,13 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
+ head = pipe->head;
+
/* We try to merge small writes */
chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
- if (pipe->nrbufs && chars != 0) {
- int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
- (pipe->buffers - 1);
- struct pipe_buffer *buf = pipe->bufs + lastbuf;
+ if (!pipe_empty(head, pipe->tail) && chars != 0) {
+ unsigned int mask = pipe->ring_size - 1;
+ struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
int offset = buf->offset + buf->len;
if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) {
@@ -423,18 +434,17 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
}
for (;;) {
- int bufs;
-
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
break;
}
- bufs = pipe->nrbufs;
- if (bufs < pipe->buffers) {
- int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
- struct pipe_buffer *buf = pipe->bufs + newbuf;
+
+ head = pipe->head;
+ if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
+ unsigned int mask = pipe->ring_size - 1;
+ struct pipe_buffer *buf = &pipe->bufs[head & mask];
struct page *page = pipe->tmp_page;
int copied;
@@ -446,38 +456,64 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
}
pipe->tmp_page = page;
}
+
+ /* Allocate a slot in the ring in advance and attach an
+ * empty buffer. If we fault or otherwise fail to use
+ * it, either the reader will consume it or it'll still
+ * be there for the next write.
+ */
+ spin_lock_irq(&pipe->wait.lock);
+
+ head = pipe->head;
+ if (pipe_full(head, pipe->tail, pipe->max_usage)) {
+ spin_unlock_irq(&pipe->wait.lock);
+ continue;
+ }
+
+ pipe->head = head + 1;
+
/* Always wake up, even if the copy fails. Otherwise
* we lock up (O_NONBLOCK-)readers that sleep due to
* syscall merging.
* FIXME! Is this really true?
*/
- do_wakeup = 1;
- copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
- if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
- if (!ret)
- ret = -EFAULT;
- break;
- }
- ret += copied;
+ wake_up_locked_poll(
+ &pipe->wait, EPOLLIN | EPOLLRDNORM);
+
+ spin_unlock_irq(&pipe->wait.lock);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
/* Insert it into the buffer array */
+ buf = &pipe->bufs[head & mask];
buf->page = page;
buf->ops = &anon_pipe_buf_ops;
buf->offset = 0;
- buf->len = copied;
+ buf->len = 0;
buf->flags = 0;
if (is_packetized(filp)) {
buf->ops = &packet_pipe_buf_ops;
buf->flags = PIPE_BUF_FLAG_PACKET;
}
- pipe->nrbufs = ++bufs;
pipe->tmp_page = NULL;
+ copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
+ if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
+ if (!ret)
+ ret = -EFAULT;
+ break;
+ }
+ ret += copied;
+ buf->offset = 0;
+ buf->len = copied;
+
if (!iov_iter_count(from))
break;
}
- if (bufs < pipe->buffers)
+
+ if (!pipe_full(head, pipe->tail, pipe->max_usage))
continue;
+
+ /* Wait for buffer space to become available. */
if (filp->f_flags & O_NONBLOCK) {
if (!ret)
ret = -EAGAIN;
@@ -488,11 +524,6 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
ret = -ERESTARTSYS;
break;
}
- if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
- kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
- do_wakeup = 0;
- }
pipe->waiting_writers++;
pipe_wait(pipe);
pipe->waiting_writers--;
@@ -500,7 +531,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
out:
__pipe_unlock(pipe);
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
+ wake_up_interruptible_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
@@ -515,17 +546,19 @@ out:
static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct pipe_inode_info *pipe = filp->private_data;
- int count, buf, nrbufs;
+ int count, head, tail, mask;
switch (cmd) {
case FIONREAD:
__pipe_lock(pipe);
count = 0;
- buf = pipe->curbuf;
- nrbufs = pipe->nrbufs;
- while (--nrbufs >= 0) {
- count += pipe->bufs[buf].len;
- buf = (buf+1) & (pipe->buffers - 1);
+ head = pipe->head;
+ tail = pipe->tail;
+ mask = pipe->ring_size - 1;
+
+ while (tail != head) {
+ count += pipe->bufs[tail & mask].len;
+ tail++;
}
__pipe_unlock(pipe);
@@ -541,21 +574,23 @@ pipe_poll(struct file *filp, poll_table *wait)
{
__poll_t mask;
struct pipe_inode_info *pipe = filp->private_data;
- int nrbufs;
+ unsigned int head = READ_ONCE(pipe->head);
+ unsigned int tail = READ_ONCE(pipe->tail);
poll_wait(filp, &pipe->wait, wait);
/* Reading only -- no need for acquiring the semaphore. */
- nrbufs = pipe->nrbufs;
mask = 0;
if (filp->f_mode & FMODE_READ) {
- mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
+ if (!pipe_empty(head, tail))
+ mask |= EPOLLIN | EPOLLRDNORM;
if (!pipe->writers && filp->f_version != pipe->w_counter)
mask |= EPOLLHUP;
}
if (filp->f_mode & FMODE_WRITE) {
- mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0;
+ if (!pipe_full(head, tail, pipe->max_usage))
+ mask |= EPOLLOUT | EPOLLWRNORM;
/*
* Most Unices do not set EPOLLERR for FIFOs but on Linux they
* behave exactly like pipes for poll().
@@ -679,7 +714,8 @@ struct pipe_inode_info *alloc_pipe_info(void)
if (pipe->bufs) {
init_waitqueue_head(&pipe->wait);
pipe->r_counter = pipe->w_counter = 1;
- pipe->buffers = pipe_bufs;
+ pipe->max_usage = pipe_bufs;
+ pipe->ring_size = pipe_bufs;
pipe->user = user;
mutex_init(&pipe->mutex);
return pipe;
@@ -697,9 +733,9 @@ void free_pipe_info(struct pipe_inode_info *pipe)
{
int i;
- (void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
+ (void) account_pipe_buffers(pipe->user, pipe->ring_size, 0);
free_uid(pipe->user);
- for (i = 0; i < pipe->buffers; i++) {
+ for (i = 0; i < pipe->ring_size; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops)
pipe_buf_release(pipe, buf);
@@ -793,6 +829,8 @@ int create_pipe_files(struct file **res, int flags)
}
res[0]->private_data = inode->i_pipe;
res[1] = f;
+ stream_open(inode, res[0]);
+ stream_open(inode, res[1]);
return 0;
}
@@ -880,7 +918,7 @@ SYSCALL_DEFINE1(pipe, int __user *, fildes)
static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
{
- int cur = *cnt;
+ int cur = *cnt;
while (cur == *cnt) {
pipe_wait(pipe);
@@ -931,9 +969,9 @@ static int fifo_open(struct inode *inode, struct file *filp)
__pipe_lock(pipe);
/* We can only do regular read/write on fifos */
- filp->f_mode &= (FMODE_READ | FMODE_WRITE);
+ stream_open(inode, filp);
- switch (filp->f_mode) {
+ switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
case FMODE_READ:
/*
* O_RDONLY
@@ -955,7 +993,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
}
}
break;
-
+
case FMODE_WRITE:
/*
* O_WRONLY
@@ -975,7 +1013,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
goto err_wr;
}
break;
-
+
case FMODE_READ | FMODE_WRITE:
/*
* O_RDWR
@@ -1054,14 +1092,14 @@ unsigned int round_pipe_size(unsigned long size)
static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
{
struct pipe_buffer *bufs;
- unsigned int size, nr_pages;
+ unsigned int size, nr_slots, head, tail, mask, n;
unsigned long user_bufs;
long ret = 0;
size = round_pipe_size(arg);
- nr_pages = size >> PAGE_SHIFT;
+ nr_slots = size >> PAGE_SHIFT;
- if (!nr_pages)
+ if (!nr_slots)
return -EINVAL;
/*
@@ -1071,13 +1109,13 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
* Decreasing the pipe capacity is always permitted, even
* if the user is currently over a limit.
*/
- if (nr_pages > pipe->buffers &&
+ if (nr_slots > pipe->ring_size &&
size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
return -EPERM;
- user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
+ user_bufs = account_pipe_buffers(pipe->user, pipe->ring_size, nr_slots);
- if (nr_pages > pipe->buffers &&
+ if (nr_slots > pipe->ring_size &&
(too_many_pipe_buffers_hard(user_bufs) ||
too_many_pipe_buffers_soft(user_bufs)) &&
is_unprivileged_user()) {
@@ -1086,17 +1124,21 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
}
/*
- * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
- * expect a lot of shrink+grow operations, just free and allocate
- * again like we would do for growing. If the pipe currently
+ * We can shrink the pipe, if arg is greater than the ring occupancy.
+ * Since we don't expect a lot of shrink+grow operations, just free and
+ * allocate again like we would do for growing. If the pipe currently
* contains more buffers than arg, then return busy.
*/
- if (nr_pages < pipe->nrbufs) {
+ mask = pipe->ring_size - 1;
+ head = pipe->head;
+ tail = pipe->tail;
+ n = pipe_occupancy(pipe->head, pipe->tail);
+ if (nr_slots < n) {
ret = -EBUSY;
goto out_revert_acct;
}
- bufs = kcalloc(nr_pages, sizeof(*bufs),
+ bufs = kcalloc(nr_slots, sizeof(*bufs),
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (unlikely(!bufs)) {
ret = -ENOMEM;
@@ -1105,33 +1147,38 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
/*
* The pipe array wraps around, so just start the new one at zero
- * and adjust the indexes.
+ * and adjust the indices.
*/
- if (pipe->nrbufs) {
- unsigned int tail;
- unsigned int head;
-
- tail = pipe->curbuf + pipe->nrbufs;
- if (tail < pipe->buffers)
- tail = 0;
- else
- tail &= (pipe->buffers - 1);
-
- head = pipe->nrbufs - tail;
- if (head)
- memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
- if (tail)
- memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
+ if (n > 0) {
+ unsigned int h = head & mask;
+ unsigned int t = tail & mask;
+ if (h > t) {
+ memcpy(bufs, pipe->bufs + t,
+ n * sizeof(struct pipe_buffer));
+ } else {
+ unsigned int tsize = pipe->ring_size - t;
+ if (h > 0)
+ memcpy(bufs + tsize, pipe->bufs,
+ h * sizeof(struct pipe_buffer));
+ memcpy(bufs, pipe->bufs + t,
+ tsize * sizeof(struct pipe_buffer));
+ }
}
- pipe->curbuf = 0;
+ head = n;
+ tail = 0;
+
kfree(pipe->bufs);
pipe->bufs = bufs;
- pipe->buffers = nr_pages;
- return nr_pages * PAGE_SIZE;
+ pipe->ring_size = nr_slots;
+ pipe->max_usage = nr_slots;
+ pipe->tail = tail;
+ pipe->head = head;
+ wake_up_interruptible_all(&pipe->wait);
+ return pipe->max_usage * PAGE_SIZE;
out_revert_acct:
- (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
+ (void) account_pipe_buffers(pipe->user, nr_slots, pipe->ring_size);
return ret;
}
@@ -1161,7 +1208,7 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
ret = pipe_set_size(pipe, arg);
break;
case F_GETPIPE_SZ:
- ret = pipe->buffers * PAGE_SIZE;
+ ret = pipe->max_usage * PAGE_SIZE;
break;
default:
ret = -EINVAL;
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index cb5629bd5fff..733881a6387b 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -42,8 +42,8 @@ config PROC_VMCORE
bool "/proc/vmcore support"
depends on PROC_FS && CRASH_DUMP
default y
- help
- Exports the dump image of crashed kernel in ELF format.
+ help
+ Exports the dump image of crashed kernel in ELF format.
config PROC_VMCORE_DEVICE_DUMP
bool "Device Hardware/Firmware Log Collection"
@@ -72,7 +72,7 @@ config PROC_SYSCTL
a recompile of the kernel or reboot of the system. The primary
interface is through /proc/sys. If you say Y here a tree of
modifiable sysctl entries will be generated beneath the
- /proc/sys directory. They are explained in the files
+ /proc/sys directory. They are explained in the files
in <file:Documentation/admin-guide/sysctl/>. Note that enabling this
option will enlarge the kernel by at least 8 KB.
@@ -88,7 +88,7 @@ config PROC_PAGE_MONITOR
Various /proc files exist to monitor process memory utilization:
/proc/pid/smaps, /proc/pid/clear_refs, /proc/pid/pagemap,
/proc/kpagecount, and /proc/kpageflags. Disabling these
- interfaces will reduce the size of the kernel by approximately 4kb.
+ interfaces will reduce the size of the kernel by approximately 4kb.
config PROC_CHILDREN
bool "Include /proc/<pid>/task/<tid>/children file"
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 46dcb6f0eccf..5efaf3708ec6 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -533,7 +533,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
nice = task_nice(task);
/* convert nsec -> ticks */
- start_time = nsec_to_clock_t(task->real_start_time);
+ start_time = nsec_to_clock_t(task->start_boottime);
seq_put_decimal_ull(m, "", pid_nr_ns(pid, ns));
seq_puts(m, " (");
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 64e9ee1b129e..074e9585c699 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -138,8 +138,12 @@ static int proc_getattr(const struct path *path, struct kstat *stat,
{
struct inode *inode = d_inode(path->dentry);
struct proc_dir_entry *de = PDE(inode);
- if (de && de->nlink)
- set_nlink(inode, de->nlink);
+ if (de) {
+ nlink_t nlink = READ_ONCE(de->nlink);
+ if (nlink > 0) {
+ set_nlink(inode, nlink);
+ }
+ }
generic_fillattr(inode, stat);
return 0;
@@ -159,7 +163,6 @@ static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret,
{
const char *cp = name, *next;
struct proc_dir_entry *de;
- unsigned int len;
de = *ret;
if (!de)
@@ -170,13 +173,12 @@ static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret,
if (!next)
break;
- len = next - cp;
- de = pde_subdir_find(de, cp, len);
+ de = pde_subdir_find(de, cp, next - cp);
if (!de) {
WARN(1, "name '%s'\n", name);
return -ENOENT;
}
- cp += len + 1;
+ cp = next + 1;
}
*residual = cp;
*ret = de;
@@ -362,6 +364,7 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
write_unlock(&proc_subdir_lock);
goto out_free_inum;
}
+ dir->nlink++;
write_unlock(&proc_subdir_lock);
return dp;
@@ -472,10 +475,7 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
ent->data = data;
ent->proc_fops = &proc_dir_operations;
ent->proc_iops = &proc_dir_inode_operations;
- parent->nlink++;
ent = proc_register(parent, ent);
- if (!ent)
- parent->nlink--;
}
return ent;
}
@@ -505,10 +505,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name)
ent->data = NULL;
ent->proc_fops = NULL;
ent->proc_iops = NULL;
- parent->nlink++;
ent = proc_register(parent, ent);
- if (!ent)
- parent->nlink--;
}
return ent;
}
@@ -666,8 +663,12 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
len = strlen(fn);
de = pde_subdir_find(parent, fn, len);
- if (de)
+ if (de) {
rb_erase(&de->subdir_node, &parent->subdir);
+ if (S_ISDIR(de->mode)) {
+ parent->nlink--;
+ }
+ }
write_unlock(&proc_subdir_lock);
if (!de) {
WARN(1, "name '%s'\n", name);
@@ -676,9 +677,6 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
proc_entry_rundown(de);
- if (S_ISDIR(de->mode))
- parent->nlink--;
- de->nlink = 0;
WARN(pde_subdir_first(de),
"%s: removing non-empty directory '%s/%s', leaking at least '%s'\n",
__func__, de->parent->name, de->name, pde_subdir_first(de)->name);
@@ -714,13 +712,12 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
de = next;
continue;
}
- write_unlock(&proc_subdir_lock);
-
- proc_entry_rundown(de);
next = de->parent;
if (S_ISDIR(de->mode))
next->nlink--;
- de->nlink = 0;
+ write_unlock(&proc_subdir_lock);
+
+ proc_entry_rundown(de);
if (de == root)
break;
pde_put(de);
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index cd0c8d5ce9a1..0f3b557c9b77 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -197,8 +197,8 @@ extern ssize_t proc_simple_write(struct file *, const char __user *, size_t, lof
* inode.c
*/
struct pde_opener {
- struct file *file;
struct list_head lh;
+ struct file *file;
bool closing;
struct completion *c;
} __randomize_layout;
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index f5834488b67d..e2ed8e08cc7a 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -31,6 +31,7 @@
#include <linux/ioport.h>
#include <linux/memory.h>
#include <linux/sched/task.h>
+#include <linux/security.h>
#include <asm/sections.h>
#include "internal.h"
@@ -545,9 +546,14 @@ out:
static int open_kcore(struct inode *inode, struct file *filp)
{
+ int ret = security_locked_down(LOCKDOWN_KCORE);
+
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (ret)
+ return ret;
+
filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!filp->private_data)
return -ENOMEM;
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 465ea0153b2a..8c1f1bb1a5ce 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -8,7 +8,6 @@
#include <linux/mmzone.h>
#include <linux/proc_fs.h>
#include <linux/percpu.h>
-#include <linux/quicklist.h>
#include <linux/seq_file.h>
#include <linux/swap.h>
#include <linux/vmstat.h>
@@ -106,9 +105,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
global_zone_page_state(NR_KERNEL_STACK_KB));
show_val_kb(m, "PageTables: ",
global_zone_page_state(NR_PAGETABLE));
-#ifdef CONFIG_QUICKLIST
- show_val_kb(m, "Quicklists: ", quicklist_total_size());
-#endif
show_val_kb(m, "NFS_Unstable: ",
global_node_page_state(NR_UNSTABLE_NFS));
@@ -136,6 +132,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR);
show_val_kb(m, "ShmemPmdMapped: ",
global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR);
+ show_val_kb(m, "FileHugePages: ",
+ global_node_page_state(NR_FILE_THPS) * HPAGE_PMD_NR);
+ show_val_kb(m, "FilePmdMapped: ",
+ global_node_page_state(NR_FILE_PMDMAPPED) * HPAGE_PMD_NR);
#endif
#ifdef CONFIG_CMA
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 544d1ee15aee..7c952ee732e6 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -42,10 +42,12 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
return -EINVAL;
while (count > 0) {
- if (pfn_valid(pfn))
- ppage = pfn_to_page(pfn);
- else
- ppage = NULL;
+ /*
+ * TODO: ZONE_DEVICE support requires to identify
+ * memmaps that were actually initialized.
+ */
+ ppage = pfn_to_online_page(pfn);
+
if (!ppage || PageSlab(ppage) || page_has_type(ppage))
pcount = 0;
else
@@ -216,10 +218,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
return -EINVAL;
while (count > 0) {
- if (pfn_valid(pfn))
- ppage = pfn_to_page(pfn);
- else
- ppage = NULL;
+ /*
+ * TODO: ZONE_DEVICE support requires to identify
+ * memmaps that were actually initialized.
+ */
+ ppage = pfn_to_online_page(pfn);
if (put_user(stable_page_flags(ppage), out)) {
ret = -EFAULT;
@@ -261,10 +264,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
return -EINVAL;
while (count > 0) {
- if (pfn_valid(pfn))
- ppage = pfn_to_page(pfn);
- else
- ppage = NULL;
+ /*
+ * TODO: ZONE_DEVICE support requires to identify
+ * memmaps that were actually initialized.
+ */
+ ppage = pfn_to_online_page(pfn);
if (ppage)
ino = page_cgroup_ino(ppage);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 33f72d1b92cc..0b7c8dffc9ae 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -157,8 +157,7 @@ static int proc_get_tree(struct fs_context *fc)
{
struct proc_fs_context *ctx = fc->fs_private;
- fc->s_fs_info = ctx->pid_ns;
- return vfs_get_super(fc, vfs_get_keyed_super, proc_fill_super);
+ return get_tree_keyed(fc, proc_fill_super, ctx->pid_ns);
}
static void proc_fs_context_free(struct fs_context *fc)
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 80c305f206bb..37bdbec5b402 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -120,20 +120,23 @@ static int show_stat(struct seq_file *p, void *v)
getboottime64(&boottime);
for_each_possible_cpu(i) {
- struct kernel_cpustat *kcs = &kcpustat_cpu(i);
-
- user += kcs->cpustat[CPUTIME_USER];
- nice += kcs->cpustat[CPUTIME_NICE];
- system += kcs->cpustat[CPUTIME_SYSTEM];
- idle += get_idle_time(kcs, i);
- iowait += get_iowait_time(kcs, i);
- irq += kcs->cpustat[CPUTIME_IRQ];
- softirq += kcs->cpustat[CPUTIME_SOFTIRQ];
- steal += kcs->cpustat[CPUTIME_STEAL];
- guest += kcs->cpustat[CPUTIME_GUEST];
- guest_nice += kcs->cpustat[CPUTIME_GUEST_NICE];
- sum += kstat_cpu_irqs_sum(i);
- sum += arch_irq_stat_cpu(i);
+ struct kernel_cpustat kcpustat;
+ u64 *cpustat = kcpustat.cpustat;
+
+ kcpustat_cpu_fetch(&kcpustat, i);
+
+ user += cpustat[CPUTIME_USER];
+ nice += cpustat[CPUTIME_NICE];
+ system += cpustat[CPUTIME_SYSTEM];
+ idle += get_idle_time(&kcpustat, i);
+ iowait += get_iowait_time(&kcpustat, i);
+ irq += cpustat[CPUTIME_IRQ];
+ softirq += cpustat[CPUTIME_SOFTIRQ];
+ steal += cpustat[CPUTIME_STEAL];
+ guest += cpustat[CPUTIME_GUEST];
+ guest_nice += cpustat[CPUTIME_USER];
+ sum += kstat_cpu_irqs_sum(i);
+ sum += arch_irq_stat_cpu(i);
for (j = 0; j < NR_SOFTIRQS; j++) {
unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
@@ -157,19 +160,22 @@ static int show_stat(struct seq_file *p, void *v)
seq_putc(p, '\n');
for_each_online_cpu(i) {
- struct kernel_cpustat *kcs = &kcpustat_cpu(i);
+ struct kernel_cpustat kcpustat;
+ u64 *cpustat = kcpustat.cpustat;
+
+ kcpustat_cpu_fetch(&kcpustat, i);
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
- user = kcs->cpustat[CPUTIME_USER];
- nice = kcs->cpustat[CPUTIME_NICE];
- system = kcs->cpustat[CPUTIME_SYSTEM];
- idle = get_idle_time(kcs, i);
- iowait = get_iowait_time(kcs, i);
- irq = kcs->cpustat[CPUTIME_IRQ];
- softirq = kcs->cpustat[CPUTIME_SOFTIRQ];
- steal = kcs->cpustat[CPUTIME_STEAL];
- guest = kcs->cpustat[CPUTIME_GUEST];
- guest_nice = kcs->cpustat[CPUTIME_GUEST_NICE];
+ user = cpustat[CPUTIME_USER];
+ nice = cpustat[CPUTIME_NICE];
+ system = cpustat[CPUTIME_SYSTEM];
+ idle = get_idle_time(&kcpustat, i);
+ iowait = get_iowait_time(&kcpustat, i);
+ irq = cpustat[CPUTIME_IRQ];
+ softirq = cpustat[CPUTIME_SOFTIRQ];
+ steal = cpustat[CPUTIME_STEAL];
+ guest = cpustat[CPUTIME_GUEST];
+ guest_nice = cpustat[CPUTIME_USER];
seq_printf(p, "cpu%d", i);
seq_put_decimal_ull(p, " ", nsec_to_clock_t(user));
seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 731642e0f5a0..9442631fd4af 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/mm.h>
+#include <linux/pagewalk.h>
#include <linux/vmacache.h>
#include <linux/hugetlb.h>
#include <linux/huge_mm.h>
@@ -417,6 +417,7 @@ struct mem_size_stats {
unsigned long lazyfree;
unsigned long anonymous_thp;
unsigned long shmem_thp;
+ unsigned long file_thp;
unsigned long swap;
unsigned long shared_hugetlb;
unsigned long private_hugetlb;
@@ -461,7 +462,7 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
static void smaps_account(struct mem_size_stats *mss, struct page *page,
bool compound, bool young, bool dirty, bool locked)
{
- int i, nr = compound ? 1 << compound_order(page) : 1;
+ int i, nr = compound ? compound_nr(page) : 1;
unsigned long size = nr * PAGE_SIZE;
/*
@@ -513,7 +514,9 @@ static int smaps_pte_hole(unsigned long addr, unsigned long end,
return 0;
}
-#endif
+#else
+#define smaps_pte_hole NULL
+#endif /* CONFIG_SHMEM */
static void smaps_pte_entry(pte_t *pte, unsigned long addr,
struct mm_walk *walk)
@@ -586,7 +589,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
else if (is_zone_device_page(page))
/* pass */;
else
- VM_BUG_ON_PAGE(1, page);
+ mss->file_thp += HPAGE_PMD_SIZE;
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
}
#else
@@ -729,21 +732,24 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
}
return 0;
}
+#else
+#define smaps_hugetlb_range NULL
#endif /* HUGETLB_PAGE */
+static const struct mm_walk_ops smaps_walk_ops = {
+ .pmd_entry = smaps_pte_range,
+ .hugetlb_entry = smaps_hugetlb_range,
+};
+
+static const struct mm_walk_ops smaps_shmem_walk_ops = {
+ .pmd_entry = smaps_pte_range,
+ .hugetlb_entry = smaps_hugetlb_range,
+ .pte_hole = smaps_pte_hole,
+};
+
static void smap_gather_stats(struct vm_area_struct *vma,
struct mem_size_stats *mss)
{
- struct mm_walk smaps_walk = {
- .pmd_entry = smaps_pte_range,
-#ifdef CONFIG_HUGETLB_PAGE
- .hugetlb_entry = smaps_hugetlb_range,
-#endif
- .mm = vma->vm_mm,
- };
-
- smaps_walk.private = mss;
-
#ifdef CONFIG_SHMEM
/* In case of smaps_rollup, reset the value from previous vma */
mss->check_shmem_swap = false;
@@ -765,12 +771,13 @@ static void smap_gather_stats(struct vm_area_struct *vma,
mss->swap += shmem_swapped;
} else {
mss->check_shmem_swap = true;
- smaps_walk.pte_hole = smaps_pte_hole;
+ walk_page_vma(vma, &smaps_shmem_walk_ops, mss);
+ return;
}
}
#endif
/* mmap_sem is held in m_start */
- walk_page_vma(vma, &smaps_walk);
+ walk_page_vma(vma, &smaps_walk_ops, mss);
}
#define SEQ_PUT_DEC(str, val) \
@@ -803,6 +810,7 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree);
SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp);
SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
+ SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp);
SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
mss->private_hugetlb >> 10, 7);
@@ -1118,6 +1126,11 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end,
return 0;
}
+static const struct mm_walk_ops clear_refs_walk_ops = {
+ .pmd_entry = clear_refs_pte_range,
+ .test_walk = clear_refs_test_walk,
+};
+
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -1151,12 +1164,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
struct clear_refs_private cp = {
.type = type,
};
- struct mm_walk clear_refs_walk = {
- .pmd_entry = clear_refs_pte_range,
- .test_walk = clear_refs_test_walk,
- .mm = mm,
- .private = &cp,
- };
if (type == CLEAR_REFS_MM_HIWATER_RSS) {
if (down_write_killable(&mm->mmap_sem)) {
@@ -1217,7 +1224,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
0, NULL, mm, 0, -1UL);
mmu_notifier_invalidate_range_start(&range);
}
- walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
+ walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
+ &cp);
if (type == CLEAR_REFS_SOFT_DIRTY)
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb, 0, -1);
@@ -1489,8 +1497,16 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
return err;
}
+#else
+#define pagemap_hugetlb_range NULL
#endif /* HUGETLB_PAGE */
+static const struct mm_walk_ops pagemap_ops = {
+ .pmd_entry = pagemap_pmd_range,
+ .pte_hole = pagemap_pte_hole,
+ .hugetlb_entry = pagemap_hugetlb_range,
+};
+
/*
* /proc/pid/pagemap - an array mapping virtual pages to pfns
*
@@ -1522,7 +1538,6 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
{
struct mm_struct *mm = file->private_data;
struct pagemapread pm;
- struct mm_walk pagemap_walk = {};
unsigned long src;
unsigned long svpfn;
unsigned long start_vaddr;
@@ -1550,14 +1565,6 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!pm.buffer)
goto out_mm;
- pagemap_walk.pmd_entry = pagemap_pmd_range;
- pagemap_walk.pte_hole = pagemap_pte_hole;
-#ifdef CONFIG_HUGETLB_PAGE
- pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
-#endif
- pagemap_walk.mm = mm;
- pagemap_walk.private = &pm;
-
src = *ppos;
svpfn = src / PM_ENTRY_BYTES;
start_vaddr = svpfn << PAGE_SHIFT;
@@ -1586,7 +1593,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
ret = down_read_killable(&mm->mmap_sem);
if (ret)
goto out_free;
- ret = walk_page_range(start_vaddr, end, &pagemap_walk);
+ ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
up_read(&mm->mmap_sem);
start_vaddr = end;
@@ -1798,6 +1805,11 @@ static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
}
#endif
+static const struct mm_walk_ops show_numa_ops = {
+ .hugetlb_entry = gather_hugetlb_stats,
+ .pmd_entry = gather_pte_stats,
+};
+
/*
* Display pages allocated per node and memory policy via /proc.
*/
@@ -1809,12 +1821,6 @@ static int show_numa_map(struct seq_file *m, void *v)
struct numa_maps *md = &numa_priv->md;
struct file *file = vma->vm_file;
struct mm_struct *mm = vma->vm_mm;
- struct mm_walk walk = {
- .hugetlb_entry = gather_hugetlb_stats,
- .pmd_entry = gather_pte_stats,
- .private = md,
- .mm = mm,
- };
struct mempolicy *pol;
char buffer[64];
int nid;
@@ -1848,7 +1854,7 @@ static int show_numa_map(struct seq_file *m, void *v)
seq_puts(m, " huge");
/* mmap_sem is held by m_start */
- walk_page_vma(vma, &walk);
+ walk_page_vma(vma, &show_numa_ops, md);
if (!md->pages)
goto out;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 7bcc92add72c..7b13988796e1 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -104,9 +104,9 @@ static int pfn_is_ram(unsigned long pfn)
}
/* Reads a page from the oldmem device from given offset. */
-static ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf,
- bool encrypted)
+ssize_t read_from_oldmem(char *buf, size_t count,
+ u64 *ppos, int userbuf,
+ bool encrypted)
{
unsigned long pfn, offset;
size_t nr_bytes;
@@ -170,7 +170,7 @@ void __weak elfcorehdr_free(unsigned long long addr)
*/
ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
- return read_from_oldmem(buf, count, ppos, 0, sev_active());
+ return read_from_oldmem(buf, count, ppos, 0, false);
}
/*
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index e16fb8f2049e..273ee82d8aa9 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -88,7 +88,7 @@ static inline void mangle(struct seq_file *m, const char *s)
static void show_type(struct seq_file *m, struct super_block *sb)
{
mangle(m, sb->s_type->name);
- if (sb->s_subtype && sb->s_subtype[0]) {
+ if (sb->s_subtype) {
seq_putc(m, '.');
mangle(m, sb->s_subtype);
}
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 3d7024662d29..d896457e7c11 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -793,7 +793,7 @@ static void pstore_timefunc(struct timer_list *unused)
jiffies + msecs_to_jiffies(pstore_update_ms));
}
-void __init pstore_choose_compression(void)
+static void __init pstore_choose_compression(void)
{
const struct pstore_zbackend *step;
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 2bb3468fc93a..8caff834f002 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -144,6 +144,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n",
(time64_t *)&time->tv_sec, &time->tv_nsec, &data_type,
&header_length) == 3) {
+ time->tv_nsec *= 1000;
if (data_type == 'C')
*compressed = true;
else
@@ -151,6 +152,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
} else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n",
(time64_t *)&time->tv_sec, &time->tv_nsec,
&header_length) == 2) {
+ time->tv_nsec *= 1000;
*compressed = false;
} else {
time->tv_sec = 0;
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 922d083bbc7c..e8da1cde87b9 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -201,6 +201,8 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
s->s_op = &qnx4_sops;
s->s_magic = QNX4_SUPER_MAGIC;
s->s_flags |= SB_RDONLY; /* Yup, read-only yet */
+ s->s_time_min = 0;
+ s->s_time_max = U32_MAX;
/* Check the superblock signature. Since the qnx4 code is
dangerous, we should leave as quickly as possible
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 0f8b0ff1ba43..345db56c98fd 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -429,6 +429,8 @@ mmi_success:
s->s_op = &qnx6_sops;
s->s_magic = QNX6_SUPER_MAGIC;
s->s_flags |= SB_RDONLY; /* Yup, read-only yet */
+ s->s_time_min = 0;
+ s->s_time_max = U32_MAX;
/* ease the later tree level calculations */
sbi = QNX6_SB(s);
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index be9c471cdbc8..4639d53e96a3 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -497,7 +497,7 @@ int dquot_release(struct dquot *dquot)
mutex_lock(&dquot->dq_lock);
/* Check whether we are not racing with some other dqget() */
- if (atomic_read(&dquot->dq_count) > 1)
+ if (dquot_is_busy(dquot))
goto out_dqlock;
if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
@@ -595,7 +595,6 @@ int dquot_scan_active(struct super_block *sb,
/* Now we have active dquot so we can just increase use count */
atomic_inc(&dquot->dq_count);
spin_unlock(&dq_list_lock);
- dqstats_inc(DQST_LOOKUPS);
dqput(old_dquot);
old_dquot = dquot;
/*
@@ -623,7 +622,7 @@ EXPORT_SYMBOL(dquot_scan_active);
/* Write all dquot structures to quota files */
int dquot_writeback_dquots(struct super_block *sb, int type)
{
- struct list_head *dirty;
+ struct list_head dirty;
struct dquot *dquot;
struct quota_info *dqopt = sb_dqopt(sb);
int cnt;
@@ -637,9 +636,10 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
if (!sb_has_quota_active(sb, cnt))
continue;
spin_lock(&dq_list_lock);
- dirty = &dqopt->info[cnt].dqi_dirty_list;
- while (!list_empty(dirty)) {
- dquot = list_first_entry(dirty, struct dquot,
+ /* Move list away to avoid livelock. */
+ list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty);
+ while (!list_empty(&dirty)) {
+ dquot = list_first_entry(&dirty, struct dquot,
dq_dirty);
WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
@@ -649,7 +649,6 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
* use count */
dqgrab(dquot);
spin_unlock(&dq_list_lock);
- dqstats_inc(DQST_LOOKUPS);
err = sb->dq_op->write_dquot(dquot);
if (err) {
/*
@@ -2162,14 +2161,29 @@ int dquot_file_open(struct inode *inode, struct file *file)
}
EXPORT_SYMBOL(dquot_file_open);
+static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ struct inode *inode = dqopt->files[type];
+
+ if (!inode)
+ return;
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
+ inode_lock(inode);
+ inode->i_flags &= ~S_NOQUOTA;
+ inode_unlock(inode);
+ }
+ dqopt->files[type] = NULL;
+ iput(inode);
+}
+
/*
* Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
*/
int dquot_disable(struct super_block *sb, int type, unsigned int flags)
{
- int cnt, ret = 0;
+ int cnt;
struct quota_info *dqopt = sb_dqopt(sb);
- struct inode *toputinode[MAXQUOTAS];
/* s_umount should be held in exclusive mode */
if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
@@ -2191,7 +2205,6 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
return 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- toputinode[cnt] = NULL;
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_loaded(sb, cnt))
@@ -2211,8 +2224,7 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
dqopt->flags &= ~dquot_state_flag(
DQUOT_SUSPENDED, cnt);
spin_unlock(&dq_state_lock);
- iput(dqopt->files[cnt]);
- dqopt->files[cnt] = NULL;
+ vfs_cleanup_quota_inode(sb, cnt);
continue;
}
spin_unlock(&dq_state_lock);
@@ -2234,10 +2246,6 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
if (dqopt->ops[cnt]->free_file_info)
dqopt->ops[cnt]->free_file_info(sb, cnt);
put_quota_format(dqopt->info[cnt].dqi_format);
-
- toputinode[cnt] = dqopt->files[cnt];
- if (!sb_has_quota_loaded(sb, cnt))
- dqopt->files[cnt] = NULL;
dqopt->info[cnt].dqi_flags = 0;
dqopt->info[cnt].dqi_igrace = 0;
dqopt->info[cnt].dqi_bgrace = 0;
@@ -2259,32 +2267,22 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
* must also discard the blockdev buffers so that we see the
* changes done by userspace on the next quotaon() */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- /* This can happen when suspending quotas on remount-ro... */
- if (toputinode[cnt] && !sb_has_quota_loaded(sb, cnt)) {
- inode_lock(toputinode[cnt]);
- toputinode[cnt]->i_flags &= ~S_NOQUOTA;
- truncate_inode_pages(&toputinode[cnt]->i_data, 0);
- inode_unlock(toputinode[cnt]);
- mark_inode_dirty_sync(toputinode[cnt]);
+ if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) {
+ inode_lock(dqopt->files[cnt]);
+ truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
+ inode_unlock(dqopt->files[cnt]);
}
if (sb->s_bdev)
invalidate_bdev(sb->s_bdev);
put_inodes:
+ /* We are done when suspending quotas */
+ if (flags & DQUOT_SUSPENDED)
+ return 0;
+
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (toputinode[cnt]) {
- /* On remount RO, we keep the inode pointer so that we
- * can reenable quota on the subsequent remount RW. We
- * have to check 'flags' variable and not use sb_has_
- * function because another quotaon / quotaoff could
- * change global state before we got here. We refuse
- * to suspend quotas when there is pending delete on
- * the quota file... */
- if (!(flags & DQUOT_SUSPENDED))
- iput(toputinode[cnt]);
- else if (!toputinode[cnt]->i_nlink)
- ret = -EBUSY;
- }
- return ret;
+ if (!sb_has_quota_loaded(sb, cnt))
+ vfs_cleanup_quota_inode(sb, cnt);
+ return 0;
}
EXPORT_SYMBOL(dquot_disable);
@@ -2299,28 +2297,52 @@ EXPORT_SYMBOL(dquot_quota_off);
* Turn quotas on on a device
*/
-/*
- * Helper function to turn quotas on when we already have the inode of
- * quota file and no quota information is loaded.
- */
-static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
+static int vfs_setup_quota_inode(struct inode *inode, int type)
+{
+ struct super_block *sb = inode->i_sb;
+ struct quota_info *dqopt = sb_dqopt(sb);
+
+ if (!S_ISREG(inode->i_mode))
+ return -EACCES;
+ if (IS_RDONLY(inode))
+ return -EROFS;
+ if (sb_has_quota_loaded(sb, type))
+ return -EBUSY;
+
+ dqopt->files[type] = igrab(inode);
+ if (!dqopt->files[type])
+ return -EIO;
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
+ /* We don't want quota and atime on quota files (deadlocks
+ * possible) Also nobody should write to the file - we use
+ * special IO operations which ignore the immutable bit. */
+ inode_lock(inode);
+ inode->i_flags |= S_NOQUOTA;
+ inode_unlock(inode);
+ /*
+ * When S_NOQUOTA is set, remove dquot references as no more
+ * references can be added
+ */
+ __dquot_drop(inode);
+ }
+ return 0;
+}
+
+int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
unsigned int flags)
{
struct quota_format_type *fmt = find_quota_format(format_id);
- struct super_block *sb = inode->i_sb;
struct quota_info *dqopt = sb_dqopt(sb);
int error;
+ /* Just unsuspend quotas? */
+ BUG_ON(flags & DQUOT_SUSPENDED);
+ /* s_umount should be held in exclusive mode */
+ if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
+ up_read(&sb->s_umount);
+
if (!fmt)
return -ESRCH;
- if (!S_ISREG(inode->i_mode)) {
- error = -EACCES;
- goto out_fmt;
- }
- if (IS_RDONLY(inode)) {
- error = -EROFS;
- goto out_fmt;
- }
if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
(type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
error = -EINVAL;
@@ -2352,27 +2374,9 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
invalidate_bdev(sb->s_bdev);
}
- if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
- /* We don't want quota and atime on quota files (deadlocks
- * possible) Also nobody should write to the file - we use
- * special IO operations which ignore the immutable bit. */
- inode_lock(inode);
- inode->i_flags |= S_NOQUOTA;
- inode_unlock(inode);
- /*
- * When S_NOQUOTA is set, remove dquot references as no more
- * references can be added
- */
- __dquot_drop(inode);
- }
-
- error = -EIO;
- dqopt->files[type] = igrab(inode);
- if (!dqopt->files[type])
- goto out_file_flags;
error = -EINVAL;
if (!fmt->qf_ops->check_quota_file(sb, type))
- goto out_file_init;
+ goto out_fmt;
dqopt->ops[type] = fmt->qf_ops;
dqopt->info[type].dqi_format = fmt;
@@ -2380,7 +2384,7 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
error = dqopt->ops[type]->read_file_info(sb, type);
if (error < 0)
- goto out_file_init;
+ goto out_fmt;
if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
spin_lock(&dq_data_lock);
dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
@@ -2395,24 +2399,36 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
dquot_disable(sb, type, flags);
return error;
-out_file_init:
- dqopt->files[type] = NULL;
- iput(inode);
-out_file_flags:
- inode_lock(inode);
- inode->i_flags &= ~S_NOQUOTA;
- inode_unlock(inode);
out_fmt:
put_quota_format(fmt);
return error;
}
+EXPORT_SYMBOL(dquot_load_quota_sb);
+
+/*
+ * More powerful function for turning on quotas on given quota inode allowing
+ * setting of individual quota flags
+ */
+int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
+ unsigned int flags)
+{
+ int err;
+
+ err = vfs_setup_quota_inode(inode, type);
+ if (err < 0)
+ return err;
+ err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
+ if (err < 0)
+ vfs_cleanup_quota_inode(inode->i_sb, type);
+ return err;
+}
+EXPORT_SYMBOL(dquot_load_quota_inode);
/* Reenable quotas on remount RW */
int dquot_resume(struct super_block *sb, int type)
{
struct quota_info *dqopt = sb_dqopt(sb);
- struct inode *inode;
int ret = 0, cnt;
unsigned int flags;
@@ -2426,8 +2442,6 @@ int dquot_resume(struct super_block *sb, int type)
if (!sb_has_quota_suspended(sb, cnt))
continue;
- inode = dqopt->files[cnt];
- dqopt->files[cnt] = NULL;
spin_lock(&dq_state_lock);
flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED,
@@ -2436,9 +2450,10 @@ int dquot_resume(struct super_block *sb, int type)
spin_unlock(&dq_state_lock);
flags = dquot_generic_flag(flags, cnt);
- ret = vfs_load_quota_inode(inode, cnt,
- dqopt->info[cnt].dqi_fmt_id, flags);
- iput(inode);
+ ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id,
+ flags);
+ if (ret < 0)
+ vfs_cleanup_quota_inode(sb, type);
}
return ret;
@@ -2455,7 +2470,7 @@ int dquot_quota_on(struct super_block *sb, int type, int format_id,
if (path->dentry->d_sb != sb)
error = -EXDEV;
else
- error = vfs_load_quota_inode(d_inode(path->dentry), type,
+ error = dquot_load_quota_inode(d_inode(path->dentry), type,
format_id, DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED);
return error;
@@ -2463,41 +2478,6 @@ int dquot_quota_on(struct super_block *sb, int type, int format_id,
EXPORT_SYMBOL(dquot_quota_on);
/*
- * More powerful function for turning on quotas allowing setting
- * of individual quota flags
- */
-int dquot_enable(struct inode *inode, int type, int format_id,
- unsigned int flags)
-{
- struct super_block *sb = inode->i_sb;
-
- /* Just unsuspend quotas? */
- BUG_ON(flags & DQUOT_SUSPENDED);
- /* s_umount should be held in exclusive mode */
- if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
- up_read(&sb->s_umount);
-
- if (!flags)
- return 0;
- /* Just updating flags needed? */
- if (sb_has_quota_loaded(sb, type)) {
- if (flags & DQUOT_USAGE_ENABLED &&
- sb_has_quota_usage_enabled(sb, type))
- return -EBUSY;
- if (flags & DQUOT_LIMITS_ENABLED &&
- sb_has_quota_limits_enabled(sb, type))
- return -EBUSY;
- spin_lock(&dq_state_lock);
- sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
- spin_unlock(&dq_state_lock);
- return 0;
- }
-
- return vfs_load_quota_inode(inode, type, format_id, flags);
-}
-EXPORT_SYMBOL(dquot_enable);
-
-/*
* This function is used when filesystem needs to initialize quotas
* during mount time.
*/
@@ -2518,7 +2498,7 @@ int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
error = security_quota_on(dentry);
if (!error)
- error = vfs_load_quota_inode(d_inode(dentry), type, format_id,
+ error = dquot_load_quota_inode(d_inode(dentry), type, format_id,
DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
out:
@@ -2543,13 +2523,17 @@ static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
if (!(flags & qtype_enforce_flag(type)))
continue;
/* Can't enforce without accounting */
- if (!sb_has_quota_usage_enabled(sb, type))
- return -EINVAL;
- ret = dquot_enable(dqopt->files[type], type,
- dqopt->info[type].dqi_fmt_id,
- DQUOT_LIMITS_ENABLED);
- if (ret < 0)
+ if (!sb_has_quota_usage_enabled(sb, type)) {
+ ret = -EINVAL;
goto out_err;
+ }
+ if (sb_has_quota_limits_enabled(sb, type)) {
+ ret = -EBUSY;
+ goto out_err;
+ }
+ spin_lock(&dq_state_lock);
+ dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
+ spin_unlock(&dq_state_lock);
}
return 0;
out_err:
@@ -2599,10 +2583,12 @@ static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
out_err:
/* Backout enforcement disabling we already did */
for (type--; type >= 0; type--) {
- if (flags & qtype_enforce_flag(type))
- dquot_enable(dqopt->files[type], type,
- dqopt->info[type].dqi_fmt_id,
- DQUOT_LIMITS_ENABLED);
+ if (flags & qtype_enforce_flag(type)) {
+ spin_lock(&dq_state_lock);
+ dqopt->flags |=
+ dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
+ spin_unlock(&dq_state_lock);
+ }
}
return ret;
}
@@ -2731,7 +2717,7 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
if (check_blim) {
if (!dm->dqb_bsoftlimit ||
- dm->dqb_curspace + dm->dqb_rsvspace < dm->dqb_bsoftlimit) {
+ dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) {
dm->dqb_btime = 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
} else if (!(di->d_fieldmask & QC_SPC_TIMER))
@@ -2740,7 +2726,7 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
}
if (check_ilim) {
if (!dm->dqb_isoftlimit ||
- dm->dqb_curinodes < dm->dqb_isoftlimit) {
+ dm->dqb_curinodes <= dm->dqb_isoftlimit) {
dm->dqb_itime = 0;
clear_bit(DQ_INODES_B, &dquot->dq_flags);
} else if (!(di->d_fieldmask & QC_INO_TIMER))
@@ -2800,8 +2786,10 @@ int dquot_get_state(struct super_block *sb, struct qc_state *state)
tstate->flags |= QCI_LIMITS_ENFORCED;
tstate->spc_timelimit = mi->dqi_bgrace;
tstate->ino_timelimit = mi->dqi_igrace;
- tstate->ino = dqopt->files[type]->i_ino;
- tstate->blocks = dqopt->files[type]->i_blocks;
+ if (dqopt->files[type]) {
+ tstate->ino = dqopt->files[type]->i_ino;
+ tstate->blocks = dqopt->files[type]->i_blocks;
+ }
tstate->nextents = 1; /* We don't know... */
spin_unlock(&dq_data_lock);
}
@@ -2860,68 +2848,73 @@ EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
static int do_proc_dqstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- unsigned int type = (int *)table->data - dqstats.stat;
+ unsigned int type = (unsigned long *)table->data - dqstats.stat;
+ s64 value = percpu_counter_sum(&dqstats.counter[type]);
+
+ /* Filter negative values for non-monotonic counters */
+ if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
+ type == DQST_FREE_DQUOTS))
+ value = 0;
/* Update global table */
- dqstats.stat[type] =
- percpu_counter_sum_positive(&dqstats.counter[type]);
- return proc_dointvec(table, write, buffer, lenp, ppos);
+ dqstats.stat[type] = value;
+ return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
static struct ctl_table fs_dqstats_table[] = {
{
.procname = "lookups",
.data = &dqstats.stat[DQST_LOOKUPS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "drops",
.data = &dqstats.stat[DQST_DROPS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "reads",
.data = &dqstats.stat[DQST_READS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "writes",
.data = &dqstats.stat[DQST_WRITES],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "cache_hits",
.data = &dqstats.stat[DQST_CACHE_HITS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "allocated_dquots",
.data = &dqstats.stat[DQST_ALLOC_DQUOTS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "free_dquots",
.data = &dqstats.stat[DQST_FREE_DQUOTS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "syncs",
.data = &dqstats.stat[DQST_SYNCS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
@@ -2983,11 +2976,7 @@ static int __init dquot_init(void)
/* Find power-of-two hlist_heads which can fit into allocation */
nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
- dq_hash_bits = 0;
- do {
- dq_hash_bits++;
- } while (nr_hash >> dq_hash_bits);
- dq_hash_bits--;
+ dq_hash_bits = ilog2(nr_hash);
nr_hash = 1UL << dq_hash_bits;
dq_hash_mask = nr_hash - 1;
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index cb13fb76dbee..5444d3c4d93f 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -60,8 +60,6 @@ static int quota_sync_all(int type)
{
int ret;
- if (type >= MAXQUOTAS)
- return -EINVAL;
ret = security_quotactl(Q_SYNC, type, 0, NULL);
if (!ret)
iterate_supers(quota_sync_one, &type);
@@ -686,8 +684,6 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
{
int ret;
- if (type >= MAXQUOTAS)
- return -EINVAL;
type = array_index_nospec(type, MAXQUOTAS);
/*
* Quota not supported on this fs? Check this before s_quota_types
@@ -831,6 +827,9 @@ int kernel_quotactl(unsigned int cmd, const char __user *special,
cmds = cmd >> SUBCMDSHIFT;
type = cmd & SUBCMDMASK;
+ if (type >= MAXQUOTAS)
+ return -EINVAL;
+
/*
* As a special case Q_SYNC can be called without a specific device.
* It will iterate all superblocks that have quota enabled and call
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index c740e5572eb8..cd92e5fa0062 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -217,7 +217,6 @@ static const struct quota_format_ops v1_format_ops = {
.check_quota_file = v1_check_quota_file,
.read_file_info = v1_read_file_info,
.write_file_info = v1_write_file_info,
- .free_file_info = NULL,
.read_dqblk = v1_read_dqblk,
.commit_dqblk = v1_commit_dqblk,
};
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 733c6b4193dc..d82636e8eb65 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -36,6 +36,8 @@
#include <linux/magic.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include "internal.h"
struct ramfs_mount_opts {
@@ -175,62 +177,52 @@ static const struct super_operations ramfs_ops = {
.show_options = ramfs_show_options,
};
-enum {
+enum ramfs_param {
Opt_mode,
- Opt_err
};
-static const match_table_t tokens = {
- {Opt_mode, "mode=%o"},
- {Opt_err, NULL}
+static const struct fs_parameter_spec ramfs_param_specs[] = {
+ fsparam_u32oct("mode", Opt_mode),
+ {}
};
-static int ramfs_parse_options(char *data, struct ramfs_mount_opts *opts)
+const struct fs_parameter_description ramfs_fs_parameters = {
+ .name = "ramfs",
+ .specs = ramfs_param_specs,
+};
+
+static int ramfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- substring_t args[MAX_OPT_ARGS];
- int option;
- int token;
- char *p;
-
- opts->mode = RAMFS_DEFAULT_MODE;
-
- while ((p = strsep(&data, ",")) != NULL) {
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_mode:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->mode = option & S_IALLUGO;
- break;
+ struct fs_parse_result result;
+ struct ramfs_fs_info *fsi = fc->s_fs_info;
+ int opt;
+
+ opt = fs_parse(fc, &ramfs_fs_parameters, param, &result);
+ if (opt < 0) {
/*
* We might like to report bad mount options here;
* but traditionally ramfs has ignored all mount options,
* and as it is used as a !CONFIG_SHMEM simple substitute
* for tmpfs, better continue to ignore other mount options.
*/
- }
+ if (opt == -ENOPARAM)
+ opt = 0;
+ return opt;
+ }
+
+ switch (opt) {
+ case Opt_mode:
+ fsi->mount_opts.mode = result.uint_32 & S_IALLUGO;
+ break;
}
return 0;
}
-int ramfs_fill_super(struct super_block *sb, void *data, int silent)
+static int ramfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
- struct ramfs_fs_info *fsi;
+ struct ramfs_fs_info *fsi = sb->s_fs_info;
struct inode *inode;
- int err;
-
- fsi = kzalloc(sizeof(struct ramfs_fs_info), GFP_KERNEL);
- sb->s_fs_info = fsi;
- if (!fsi)
- return -ENOMEM;
-
- err = ramfs_parse_options(data, &fsi->mount_opts);
- if (err)
- return err;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_SIZE;
@@ -247,10 +239,34 @@ int ramfs_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-struct dentry *ramfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ramfs_get_tree(struct fs_context *fc)
{
- return mount_nodev(fs_type, flags, data, ramfs_fill_super);
+ return get_tree_nodev(fc, ramfs_fill_super);
+}
+
+static void ramfs_free_fc(struct fs_context *fc)
+{
+ kfree(fc->s_fs_info);
+}
+
+static const struct fs_context_operations ramfs_context_ops = {
+ .free = ramfs_free_fc,
+ .parse_param = ramfs_parse_param,
+ .get_tree = ramfs_get_tree,
+};
+
+int ramfs_init_fs_context(struct fs_context *fc)
+{
+ struct ramfs_fs_info *fsi;
+
+ fsi = kzalloc(sizeof(*fsi), GFP_KERNEL);
+ if (!fsi)
+ return -ENOMEM;
+
+ fsi->mount_opts.mode = RAMFS_DEFAULT_MODE;
+ fc->s_fs_info = fsi;
+ fc->ops = &ramfs_context_ops;
+ return 0;
}
static void ramfs_kill_sb(struct super_block *sb)
@@ -261,7 +277,8 @@ static void ramfs_kill_sb(struct super_block *sb)
static struct file_system_type ramfs_fs_type = {
.name = "ramfs",
- .mount = ramfs_mount,
+ .init_fs_context = ramfs_init_fs_context,
+ .parameters = &ramfs_fs_parameters,
.kill_sb = ramfs_kill_sb,
.fs_flags = FS_USERNS_MOUNT,
};
diff --git a/fs/read_write.c b/fs/read_write.c
index 1f5088dec566..5bbf587f5bc1 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1811,10 +1811,7 @@ static int generic_remap_check_len(struct inode *inode_in,
return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
}
-/*
- * Read a page's worth of file data into the page cache. Return the page
- * locked.
- */
+/* Read a page's worth of file data into the page cache. */
static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
{
struct page *page;
@@ -1826,11 +1823,33 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
put_page(page);
return ERR_PTR(-EIO);
}
- lock_page(page);
return page;
}
/*
+ * Lock two pages, ensuring that we lock in offset order if the pages are from
+ * the same file.
+ */
+static void vfs_lock_two_pages(struct page *page1, struct page *page2)
+{
+ /* Always lock in order of increasing index. */
+ if (page1->index > page2->index)
+ swap(page1, page2);
+
+ lock_page(page1);
+ if (page1 != page2)
+ lock_page(page2);
+}
+
+/* Unlock two pages, being careful not to unlock the same page twice. */
+static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
+{
+ unlock_page(page1);
+ if (page1 != page2)
+ unlock_page(page2);
+}
+
+/*
* Compare extents of two files to see if they are the same.
* Caller must have locked both inodes to prevent write races.
*/
@@ -1867,10 +1886,24 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
dest_page = vfs_dedupe_get_page(dest, destoff);
if (IS_ERR(dest_page)) {
error = PTR_ERR(dest_page);
- unlock_page(src_page);
put_page(src_page);
goto out_error;
}
+
+ vfs_lock_two_pages(src_page, dest_page);
+
+ /*
+ * Now that we've locked both pages, make sure they're still
+ * mapped to the file data we're interested in. If not,
+ * someone is invalidating pages on us and we lose.
+ */
+ if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
+ src_page->mapping != src->i_mapping ||
+ dest_page->mapping != dest->i_mapping) {
+ same = false;
+ goto unlock;
+ }
+
src_addr = kmap_atomic(src_page);
dest_addr = kmap_atomic(dest_page);
@@ -1882,8 +1915,8 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
kunmap_atomic(dest_addr);
kunmap_atomic(src_addr);
- unlock_page(dest_page);
- unlock_page(src_page);
+unlock:
+ vfs_unlock_two_pages(src_page, dest_page);
put_page(dest_page);
put_page(src_page);
diff --git a/fs/readdir.c b/fs/readdir.c
index 2f6a4534e0df..d26d5ea4de7b 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -20,9 +20,23 @@
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/compat.h>
-
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
+
+/*
+ * Note the "unsafe_put_user() semantics: we goto a
+ * label for errors.
+ */
+#define unsafe_copy_dirent_name(_dst, _src, _len, label) do { \
+ char __user *dst = (_dst); \
+ const char *src = (_src); \
+ size_t len = (_len); \
+ unsafe_put_user(0, dst+len, label); \
+ unsafe_copy_to_user(dst, src, len, label); \
+} while (0)
+
+
int iterate_dir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
@@ -65,6 +79,40 @@ out:
EXPORT_SYMBOL(iterate_dir);
/*
+ * POSIX says that a dirent name cannot contain NULL or a '/'.
+ *
+ * It's not 100% clear what we should really do in this case.
+ * The filesystem is clearly corrupted, but returning a hard
+ * error means that you now don't see any of the other names
+ * either, so that isn't a perfect alternative.
+ *
+ * And if you return an error, what error do you use? Several
+ * filesystems seem to have decided on EUCLEAN being the error
+ * code for EFSCORRUPTED, and that may be the error to use. Or
+ * just EIO, which is perhaps more obvious to users.
+ *
+ * In order to see the other file names in the directory, the
+ * caller might want to make this a "soft" error: skip the
+ * entry, and return the error at the end instead.
+ *
+ * Note that this should likely do a "memchr(name, 0, len)"
+ * check too, since that would be filesystem corruption as
+ * well. However, that case can't actually confuse user space,
+ * which has to do a strlen() on the name anyway to find the
+ * filename length, and the above "soft error" worry means
+ * that it's probably better left alone until we have that
+ * issue clarified.
+ */
+static int verify_dirent_name(const char *name, int len)
+{
+ if (!len)
+ return -EIO;
+ if (memchr(name, '/', len))
+ return -EIO;
+ return 0;
+}
+
+/*
* Traditional linux readdir() handling..
*
* "count=1" is a special case, meaning that the buffer is one
@@ -173,6 +221,9 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
sizeof(long));
+ buf->error = verify_dirent_name(name, namlen);
+ if (unlikely(buf->error))
+ return buf->error;
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
@@ -182,28 +233,31 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
return -EOVERFLOW;
}
dirent = buf->previous;
- if (dirent) {
- if (signal_pending(current))
- return -EINTR;
- if (__put_user(offset, &dirent->d_off))
- goto efault;
- }
- dirent = buf->current_dir;
- if (__put_user(d_ino, &dirent->d_ino))
- goto efault;
- if (__put_user(reclen, &dirent->d_reclen))
- goto efault;
- if (copy_to_user(dirent->d_name, name, namlen))
- goto efault;
- if (__put_user(0, dirent->d_name + namlen))
- goto efault;
- if (__put_user(d_type, (char __user *) dirent + reclen - 1))
+ if (dirent && signal_pending(current))
+ return -EINTR;
+
+ /*
+ * Note! This range-checks 'previous' (which may be NULL).
+ * The real range was checked in getdents
+ */
+ if (!user_access_begin(dirent, sizeof(*dirent)))
goto efault;
+ if (dirent)
+ unsafe_put_user(offset, &dirent->d_off, efault_end);
+ dirent = buf->current_dir;
+ unsafe_put_user(d_ino, &dirent->d_ino, efault_end);
+ unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
+ unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end);
+ unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
+ user_access_end();
+
buf->previous = dirent;
dirent = (void __user *)dirent + reclen;
buf->current_dir = dirent;
buf->count -= reclen;
return 0;
+efault_end:
+ user_access_end();
efault:
buf->error = -EFAULT;
return -EFAULT;
@@ -259,34 +313,38 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
sizeof(u64));
+ buf->error = verify_dirent_name(name, namlen);
+ if (unlikely(buf->error))
+ return buf->error;
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
dirent = buf->previous;
- if (dirent) {
- if (signal_pending(current))
- return -EINTR;
- if (__put_user(offset, &dirent->d_off))
- goto efault;
- }
- dirent = buf->current_dir;
- if (__put_user(ino, &dirent->d_ino))
- goto efault;
- if (__put_user(0, &dirent->d_off))
- goto efault;
- if (__put_user(reclen, &dirent->d_reclen))
- goto efault;
- if (__put_user(d_type, &dirent->d_type))
- goto efault;
- if (copy_to_user(dirent->d_name, name, namlen))
- goto efault;
- if (__put_user(0, dirent->d_name + namlen))
+ if (dirent && signal_pending(current))
+ return -EINTR;
+
+ /*
+ * Note! This range-checks 'previous' (which may be NULL).
+ * The real range was checked in getdents
+ */
+ if (!user_access_begin(dirent, sizeof(*dirent)))
goto efault;
+ if (dirent)
+ unsafe_put_user(offset, &dirent->d_off, efault_end);
+ dirent = buf->current_dir;
+ unsafe_put_user(ino, &dirent->d_ino, efault_end);
+ unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
+ unsafe_put_user(d_type, &dirent->d_type, efault_end);
+ unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
+ user_access_end();
+
buf->previous = dirent;
dirent = (void __user *)dirent + reclen;
buf->current_dir = dirent;
buf->count -= reclen;
return 0;
+efault_end:
+ user_access_end();
efault:
buf->error = -EFAULT;
return -EFAULT;
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 9c02d96d3a42..4075e41408b4 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -239,10 +239,8 @@ static int balance_leaf_when_delete_left(struct tree_balance *tb)
static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
{
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
- int item_pos = PATH_LAST_POSITION(tb->tb_path);
struct buffer_info bi;
int n;
- struct item_head *ih;
RFALSE(tb->FR[0] && B_LEVEL(tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1,
"vs- 12000: level: wrong FR %z", tb->FR[0]);
@@ -251,7 +249,6 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0),
"PAP-12010: tree can not be empty");
- ih = item_head(tbS0, item_pos);
buffer_info_init_tbS0(tb, &bi);
/* Delete or truncate the item */
@@ -298,7 +295,6 @@ static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
/* part of new item falls into L[0] */
int new_item_len, shift;
- int version;
ret = leaf_shift_left(tb, tb->lnum[0] - 1, -1);
@@ -317,8 +313,6 @@ static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body,
min_t(int, tb->zeroes_num, ih_item_len(ih)));
- version = ih_version(ih);
-
/*
* Calculate key component, item length and body to
* insert into S[0]
@@ -632,7 +626,6 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
int n = B_NR_ITEMS(tbS0);
struct buffer_info bi;
- int ret;
/* new item or part of it doesn't fall into R[0] */
if (n - tb->rnum[0] >= tb->item_pos) {
@@ -646,13 +639,11 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
if (tb->item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) {
loff_t old_key_comp, old_len, r_zeroes_number;
const char *r_body;
- int version, shift;
+ int shift;
loff_t offset;
leaf_shift_right(tb, tb->rnum[0] - 1, -1);
- version = ih_version(ih);
-
/* Remember key component and item length */
old_key_comp = le_ih_k_offset(ih);
old_len = ih_item_len(ih);
@@ -698,7 +689,7 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
/* whole new item falls into R[0] */
/* Shift rnum[0]-1 items to R[0] */
- ret = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
+ leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
/* Insert new item into R[0] */
buffer_info_init_right(tb, &bi);
@@ -950,14 +941,12 @@ static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
if (tb->item_pos == n - tb->snum[i] + 1 && tb->sbytes[i] != -1) {
int old_key_comp, old_len, r_zeroes_number;
const char *r_body;
- int version;
/* Move snum[i]-1 items from S[0] to S_new[i] */
leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i] - 1, -1,
tb->S_new[i]);
/* Remember key component and item length */
- version = ih_version(ih);
old_key_comp = le_ih_k_offset(ih);
old_len = ih_item_len(ih);
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 843aadcc123c..84cf8bdbec9c 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -38,16 +38,10 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
BUG_ON(!S_ISREG(inode->i_mode));
- if (atomic_add_unless(&REISERFS_I(inode)->openers, -1, 1))
+ if (!atomic_dec_and_mutex_lock(&REISERFS_I(inode)->openers,
+ &REISERFS_I(inode)->tailpack))
return 0;
- mutex_lock(&REISERFS_I(inode)->tailpack);
-
- if (!atomic_dec_and_test(&REISERFS_I(inode)->openers)) {
- mutex_unlock(&REISERFS_I(inode)->tailpack);
- return 0;
- }
-
/* fast out for when nothing needs to be done */
if ((!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
!tail_has_to_be_packed(inode)) &&
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index 6b0ddb2a9091..117092224111 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -376,7 +376,6 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
int to, int to_bytes, short *snum012, int flow)
{
int i;
- int cur_free;
int units;
struct virtual_node *vn = tb->tb_vn;
int total_node_size, max_node_size, current_item_size;
@@ -438,7 +437,6 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
/* leaf level */
needed_nodes = 1;
total_node_size = 0;
- cur_free = max_node_size;
/* start from 'from'-th item */
start_item = from;
@@ -1734,14 +1732,12 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h)
* and Fh is its father.
*/
struct buffer_head *Sh, *Fh;
- int maxsize, ret;
+ int ret;
int lfree, rfree /* free space in L and R */ ;
Sh = PATH_H_PBUFFER(tb->tb_path, h);
Fh = PATH_H_PPARENT(tb->tb_path, h);
- maxsize = MAX_CHILD_SIZE(Sh);
-
/*
* using tb->insert_size[h], which is negative in this case,
* create_virtual_node calculates:
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 132ec4406ed0..6419e6dacc39 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2097,6 +2097,15 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
goto out_inserted_sd;
}
+ /*
+ * Mark it private if we're creating the privroot
+ * or something under it.
+ */
+ if (IS_PRIVATE(dir) || dentry == REISERFS_SB(sb)->priv_root) {
+ inode->i_flags |= S_PRIVATE;
+ inode->i_opflags &= ~IOP_XATTR;
+ }
+
if (reiserfs_posixacl(inode->i_sb)) {
reiserfs_write_unlock(inode->i_sb);
retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
@@ -2111,8 +2120,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
reiserfs_warning(inode->i_sb, "jdm-13090",
"ACLs aren't enabled in the fs, "
"but vfs thinks they are!");
- } else if (IS_PRIVATE(dir))
- inode->i_flags |= S_PRIVATE;
+ }
if (security->name) {
reiserfs_write_unlock(inode->i_sb);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 4517a1394c6f..4b3e3e73b512 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -891,7 +891,6 @@ static int flush_older_commits(struct super_block *s,
struct list_head *entry;
unsigned int trans_id = jl->j_trans_id;
unsigned int other_trans_id;
- unsigned int first_trans_id;
find_first:
/*
@@ -914,8 +913,6 @@ find_first:
return 0;
}
- first_trans_id = first_jl->j_trans_id;
-
entry = &first_jl->j_list;
while (1) {
other_jl = JOURNAL_LIST_ENTRY(entry);
@@ -1351,7 +1348,7 @@ static int flush_journal_list(struct super_block *s,
struct reiserfs_journal_list *jl, int flushall)
{
struct reiserfs_journal_list *pjl;
- struct reiserfs_journal_cnode *cn, *last;
+ struct reiserfs_journal_cnode *cn;
int count;
int was_jwait = 0;
int was_dirty = 0;
@@ -1509,7 +1506,6 @@ static int flush_journal_list(struct super_block *s,
b_blocknr, __func__);
}
free_cnode:
- last = cn;
cn = cn->next;
if (saved_bh) {
/*
@@ -1792,7 +1788,6 @@ static int flush_used_journal_lists(struct super_block *s,
{
unsigned long len = 0;
unsigned long cur_len;
- int ret;
int i;
int limit = 256;
struct reiserfs_journal_list *tjl;
@@ -1829,9 +1824,9 @@ static int flush_used_journal_lists(struct super_block *s,
* transactions, but only bother if we've actually spanned
* across multiple lists
*/
- if (flush_jl != jl) {
- ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
- }
+ if (flush_jl != jl)
+ kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
+
flush_journal_list(s, flush_jl, 1);
put_journal_list(s, flush_jl);
put_journal_list(s, jl);
@@ -1911,7 +1906,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
struct super_block *sb, int error)
{
struct reiserfs_transaction_handle myth;
- int flushed = 0;
struct reiserfs_journal *journal = SB_JOURNAL(sb);
/*
@@ -1933,7 +1927,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
1);
journal_mark_dirty(&myth, SB_BUFFER_WITH_SB(sb));
do_journal_end(&myth, FLUSH_ALL);
- flushed = 1;
}
}
@@ -3444,9 +3437,8 @@ static int remove_from_transaction(struct super_block *sb,
if (cn == journal->j_last) {
journal->j_last = cn->prev;
}
- if (bh)
- remove_journal_hash(sb, journal->j_hash_table, NULL,
- bh->b_blocknr, 0);
+ remove_journal_hash(sb, journal->j_hash_table, NULL,
+ bh->b_blocknr, 0);
clear_buffer_journaled(bh); /* don't log this one */
if (!already_cleaned) {
@@ -3988,7 +3980,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
struct buffer_head *c_bh; /* commit bh */
struct buffer_head *d_bh; /* desc bh */
int cur_write_start = 0; /* start index of current log write */
- int old_start;
int i;
int flush;
int wait_on_commit;
@@ -4245,7 +4236,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
journal->j_num_work_lists++;
/* reset journal values for the next transaction */
- old_start = journal->j_start;
journal->j_start =
(journal->j_start + journal->j_len +
2) % SB_ONDISK_JOURNAL_SIZE(sb);
diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
index f5cebd70d903..7f868569d4d0 100644
--- a/fs/reiserfs/lbalance.c
+++ b/fs/reiserfs/lbalance.c
@@ -1322,7 +1322,7 @@ void leaf_paste_entries(struct buffer_info *bi,
char *item;
struct reiserfs_de_head *deh;
char *insert_point;
- int i, old_entry_num;
+ int i;
struct buffer_head *bh = bi->bi_bh;
if (new_entry_count == 0)
@@ -1362,7 +1362,6 @@ void leaf_paste_entries(struct buffer_info *bi,
put_deh_location(&deh[i],
deh_location(&deh[i]) + paste_size);
- old_entry_num = ih_entry_count(ih);
put_ih_entry_count(ih, ih_entry_count(ih) + new_entry_count);
/* prepare space for pasted records */
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 97f3fc4fdd79..959a066b7bb0 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -377,10 +377,13 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
/*
* Propagate the private flag so we know we're
- * in the priv tree
+ * in the priv tree. Also clear IOP_XATTR
+ * since we don't have xattrs on xattr files.
*/
- if (IS_PRIVATE(dir))
+ if (IS_PRIVATE(dir)) {
inode->i_flags |= S_PRIVATE;
+ inode->i_opflags &= ~IOP_XATTR;
+ }
}
reiserfs_write_unlock(dir->i_sb);
if (retval == IO_ERROR) {
diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c
index 415d66ca87d1..34baf5c0f265 100644
--- a/fs/reiserfs/objectid.c
+++ b/fs/reiserfs/objectid.c
@@ -183,13 +183,12 @@ int reiserfs_convert_objectid_map_v1(struct super_block *s)
int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2;
int old_max = sb_oid_maxsize(disk_sb);
struct reiserfs_super_block_v1 *disk_sb_v1;
- __le32 *objectid_map, *new_objectid_map;
+ __le32 *objectid_map;
int i;
disk_sb_v1 =
(struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data);
objectid_map = (__le32 *) (disk_sb_v1 + 1);
- new_objectid_map = (__le32 *) (disk_sb + 1);
if (cur_size > new_size) {
/*
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 9fed1c05f1f4..500f2000eb41 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -746,9 +746,6 @@ static void check_leaf_block_head(struct buffer_head *bh)
static void check_internal_block_head(struct buffer_head *bh)
{
- struct block_head *blkh;
-
- blkh = B_BLK_HEAD(bh);
if (!(B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL && B_LEVEL(bh) <= MAX_HEIGHT))
reiserfs_panic(NULL, "vs-6025", "invalid level %z", bh);
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index e5ca9ed79e54..726580114d55 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -1168,6 +1168,8 @@ static inline int bmap_would_wrap(unsigned bmap_nr)
return bmap_nr > ((1LL << 16) - 1);
}
+extern const struct xattr_handler *reiserfs_xattr_handlers[];
+
/*
* this says about version of key of all items (but stat data) the
* object consists of
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 0037aea97d39..da9ebe33882b 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -593,7 +593,6 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
struct buffer_head *bh;
struct path_element *last_element;
int node_level, retval;
- int right_neighbor_of_leaf_node;
int fs_gen;
struct buffer_head *reada_bh[SEARCH_BY_KEY_READA];
b_blocknr_t reada_blocks[SEARCH_BY_KEY_READA];
@@ -614,8 +613,6 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
pathrelse(search_path);
- right_neighbor_of_leaf_node = 0;
-
/*
* With each iteration of this loop we search through the items in the
* current node, and calculate the next current node(next path element)
@@ -701,7 +698,6 @@ io_error:
*/
block_number = SB_ROOT_BLOCK(sb);
expected_level = -1;
- right_neighbor_of_leaf_node = 0;
/* repeat search from the root */
continue;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index ab028ea0e561..3244037b1286 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1976,6 +1976,9 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
goto error_unlocked;
}
+ s->s_time_min = 0;
+ s->s_time_max = U32_MAX;
+
rs = SB_DISK_SUPER_BLOCK(s);
/*
* Let's do basic sanity check to verify that underlying device is not
@@ -2046,6 +2049,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
if (replay_only(s))
goto error_unlocked;
+ s->s_xattr = reiserfs_xattr_handlers;
+
if (bdev_read_only(s->s_bdev) && !sb_rdonly(s)) {
SWARN(silent, s, "clm-7000",
"Detected readonly device, marking FS readonly");
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index b5b26d8a192c..62b40df36c98 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -122,13 +122,13 @@ static struct dentry *open_xa_root(struct super_block *sb, int flags)
struct dentry *xaroot;
if (d_really_is_negative(privroot))
- return ERR_PTR(-ENODATA);
+ return ERR_PTR(-EOPNOTSUPP);
inode_lock_nested(d_inode(privroot), I_MUTEX_XATTR);
xaroot = dget(REISERFS_SB(sb)->xattr_root);
if (!xaroot)
- xaroot = ERR_PTR(-ENODATA);
+ xaroot = ERR_PTR(-EOPNOTSUPP);
else if (d_really_is_negative(xaroot)) {
int err = -ENODATA;
@@ -619,6 +619,10 @@ int reiserfs_xattr_set(struct inode *inode, const char *name,
int error, error2;
size_t jbegin_count = reiserfs_xattr_nblocks(inode, buffer_size);
+ /* Check before we start a transaction and then do nothing. */
+ if (!d_really_is_positive(REISERFS_SB(inode->i_sb)->priv_root))
+ return -EOPNOTSUPP;
+
if (!(flags & XATTR_REPLACE))
jbegin_count += reiserfs_xattr_jcreate_nblocks(inode);
@@ -841,8 +845,7 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
if (d_really_is_negative(dentry))
return -EINVAL;
- if (!dentry->d_sb->s_xattr ||
- get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
+ if (get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
return -EOPNOTSUPP;
dir = open_xa_dir(d_inode(dentry), XATTR_REPLACE);
@@ -882,6 +885,7 @@ static int create_privroot(struct dentry *dentry)
}
d_inode(dentry)->i_flags |= S_PRIVATE;
+ d_inode(dentry)->i_opflags &= ~IOP_XATTR;
reiserfs_info(dentry->d_sb, "Created %s - reserved for xattr "
"storage.\n", PRIVROOT_NAME);
@@ -895,7 +899,7 @@ static int create_privroot(struct dentry *dentry) { return 0; }
#endif
/* Actual operations that are exported to VFS-land */
-static const struct xattr_handler *reiserfs_xattr_handlers[] = {
+const struct xattr_handler *reiserfs_xattr_handlers[] = {
#ifdef CONFIG_REISERFS_FS_XATTR
&reiserfs_xattr_user_handler,
&reiserfs_xattr_trusted_handler,
@@ -966,8 +970,10 @@ int reiserfs_lookup_privroot(struct super_block *s)
if (!IS_ERR(dentry)) {
REISERFS_SB(s)->priv_root = dentry;
d_set_d_op(dentry, &xattr_lookup_poison_ops);
- if (d_really_is_positive(dentry))
+ if (d_really_is_positive(dentry)) {
d_inode(dentry)->i_flags |= S_PRIVATE;
+ d_inode(dentry)->i_opflags &= ~IOP_XATTR;
+ }
} else
err = PTR_ERR(dentry);
inode_unlock(d_inode(s->s_root));
@@ -996,7 +1002,6 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
}
if (d_really_is_positive(privroot)) {
- s->s_xattr = reiserfs_xattr_handlers;
inode_lock(d_inode(privroot));
if (!REISERFS_SB(s)->xattr_root) {
struct dentry *dentry;
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index aa9380bac196..05f666794561 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -320,10 +320,8 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
* would be useless since permissions are ignored, and a pain because
* it introduces locking cycles
*/
- if (IS_PRIVATE(dir)) {
- inode->i_flags |= S_PRIVATE;
+ if (IS_PRIVATE(inode))
goto apply_umask;
- }
err = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
if (err)
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 7d580f7c3f1d..e582d001f792 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -65,7 +65,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
-#include <linux/parser.h>
+#include <linux/fs_context.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/statfs.h>
@@ -423,10 +423,10 @@ static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
/*
* remounting must involve read-only
*/
-static int romfs_remount(struct super_block *sb, int *flags, char *data)
+static int romfs_reconfigure(struct fs_context *fc)
{
- sync_filesystem(sb);
- *flags |= SB_RDONLY;
+ sync_filesystem(fc->root->d_sb);
+ fc->sb_flags |= SB_RDONLY;
return 0;
}
@@ -434,7 +434,6 @@ static const struct super_operations romfs_super_ops = {
.alloc_inode = romfs_alloc_inode,
.free_inode = romfs_free_inode,
.statfs = romfs_statfs,
- .remount_fs = romfs_remount,
};
/*
@@ -457,7 +456,7 @@ static __u32 romfs_checksum(const void *data, int size)
/*
* fill in the superblock
*/
-static int romfs_fill_super(struct super_block *sb, void *data, int silent)
+static int romfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct romfs_super_block *rsb;
struct inode *root;
@@ -478,6 +477,8 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_maxbytes = 0xFFFFFFFF;
sb->s_magic = ROMFS_MAGIC;
sb->s_flags |= SB_RDONLY | SB_NOATIME;
+ sb->s_time_min = 0;
+ sb->s_time_max = 0;
sb->s_op = &romfs_super_ops;
#ifdef CONFIG_ROMFS_ON_MTD
@@ -504,8 +505,8 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
if (rsb->word0 != ROMSB_WORD0 || rsb->word1 != ROMSB_WORD1 ||
img_size < ROMFH_SIZE) {
- if (!silent)
- pr_warn("VFS: Can't find a romfs filesystem on dev %s.\n",
+ if (!(fc->sb_flags & SB_SILENT))
+ errorf(fc, "VFS: Can't find a romfs filesystem on dev %s.\n",
sb->s_id);
goto error_rsb_inval;
}
@@ -518,7 +519,7 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
storage = sb->s_mtd ? "MTD" : "the block layer";
len = strnlen(rsb->name, ROMFS_MAXFN);
- if (!silent)
+ if (!(fc->sb_flags & SB_SILENT))
pr_notice("Mounting image '%*.*s' through %s\n",
(unsigned) len, (unsigned) len, rsb->name, storage);
@@ -548,23 +549,34 @@ error_rsb:
/*
* get a superblock for mounting
*/
-static struct dentry *romfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int romfs_get_tree(struct fs_context *fc)
{
- struct dentry *ret = ERR_PTR(-EINVAL);
+ int ret = -EINVAL;
#ifdef CONFIG_ROMFS_ON_MTD
- ret = mount_mtd(fs_type, flags, dev_name, data, romfs_fill_super);
+ ret = get_tree_mtd(fc, romfs_fill_super);
#endif
#ifdef CONFIG_ROMFS_ON_BLOCK
- if (ret == ERR_PTR(-EINVAL))
- ret = mount_bdev(fs_type, flags, dev_name, data,
- romfs_fill_super);
+ if (ret == -EINVAL)
+ ret = get_tree_bdev(fc, romfs_fill_super);
#endif
return ret;
}
+static const struct fs_context_operations romfs_context_ops = {
+ .get_tree = romfs_get_tree,
+ .reconfigure = romfs_reconfigure,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+static int romfs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &romfs_context_ops;
+ return 0;
+}
+
/*
* destroy a romfs superblock in the appropriate manner
*/
@@ -587,7 +599,7 @@ static void romfs_kill_sb(struct super_block *sb)
static struct file_system_type romfs_fs_type = {
.owner = THIS_MODULE,
.name = "romfs",
- .mount = romfs_mount,
+ .init_fs_context = romfs_init_fs_context,
.kill_sb = romfs_kill_sb,
.fs_flags = FS_REQUIRES_DEV,
};
diff --git a/fs/select.c b/fs/select.c
index 53a0c149f528..11d0285d46b7 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -321,7 +321,7 @@ static int poll_select_finish(struct timespec64 *end_time,
switch (pt_type) {
case PT_TIMEVAL:
{
- struct timeval rtv;
+ struct __kernel_old_timeval rtv;
if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
memset(&rtv, 0, sizeof(rtv));
@@ -698,10 +698,10 @@ out_nofds:
}
static int kern_select(int n, fd_set __user *inp, fd_set __user *outp,
- fd_set __user *exp, struct timeval __user *tvp)
+ fd_set __user *exp, struct __kernel_old_timeval __user *tvp)
{
struct timespec64 end_time, *to = NULL;
- struct timeval tv;
+ struct __kernel_old_timeval tv;
int ret;
if (tvp) {
@@ -720,7 +720,7 @@ static int kern_select(int n, fd_set __user *inp, fd_set __user *outp,
}
SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
- fd_set __user *, exp, struct timeval __user *, tvp)
+ fd_set __user *, exp, struct __kernel_old_timeval __user *, tvp)
{
return kern_select(n, inp, outp, exp, tvp);
}
@@ -810,7 +810,7 @@ SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *,
struct sel_arg_struct {
unsigned long n;
fd_set __user *inp, *outp, *exp;
- struct timeval __user *tvp;
+ struct __kernel_old_timeval __user *tvp;
};
SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 04f09689cd6d..1600034a929b 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
}
if (seq_has_overflowed(m))
goto Eoverflow;
+ p = m->op->next(m, p, &m->index);
if (pos + m->count > offset) {
m->from = offset - pos;
m->count -= m->from;
@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
}
pos += m->count;
m->count = 0;
- p = m->op->next(m, p, &m->index);
if (pos == offset)
break;
}
diff --git a/fs/splice.c b/fs/splice.c
index 98412721f056..f2400ce7d528 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -185,6 +185,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
struct splice_pipe_desc *spd)
{
unsigned int spd_pages = spd->nr_pages;
+ unsigned int tail = pipe->tail;
+ unsigned int head = pipe->head;
+ unsigned int mask = pipe->ring_size - 1;
int ret = 0, page_nr = 0;
if (!spd_pages)
@@ -196,9 +199,8 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
goto out;
}
- while (pipe->nrbufs < pipe->buffers) {
- int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
- struct pipe_buffer *buf = pipe->bufs + newbuf;
+ while (!pipe_full(head, tail, pipe->max_usage)) {
+ struct pipe_buffer *buf = &pipe->bufs[head & mask];
buf->page = spd->pages[page_nr];
buf->offset = spd->partial[page_nr].offset;
@@ -207,7 +209,8 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
buf->ops = spd->ops;
buf->flags = 0;
- pipe->nrbufs++;
+ head++;
+ pipe->head = head;
page_nr++;
ret += buf->len;
@@ -228,17 +231,19 @@ EXPORT_SYMBOL_GPL(splice_to_pipe);
ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
{
+ unsigned int head = pipe->head;
+ unsigned int tail = pipe->tail;
+ unsigned int mask = pipe->ring_size - 1;
int ret;
if (unlikely(!pipe->readers)) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
- } else if (pipe->nrbufs == pipe->buffers) {
+ } else if (pipe_full(head, tail, pipe->max_usage)) {
ret = -EAGAIN;
} else {
- int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
- pipe->bufs[newbuf] = *buf;
- pipe->nrbufs++;
+ pipe->bufs[head & mask] = *buf;
+ pipe->head = head + 1;
return buf->len;
}
pipe_buf_release(pipe, buf);
@@ -252,14 +257,14 @@ EXPORT_SYMBOL(add_to_pipe);
*/
int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
{
- unsigned int buffers = READ_ONCE(pipe->buffers);
+ unsigned int max_usage = READ_ONCE(pipe->max_usage);
- spd->nr_pages_max = buffers;
- if (buffers <= PIPE_DEF_BUFFERS)
+ spd->nr_pages_max = max_usage;
+ if (max_usage <= PIPE_DEF_BUFFERS)
return 0;
- spd->pages = kmalloc_array(buffers, sizeof(struct page *), GFP_KERNEL);
- spd->partial = kmalloc_array(buffers, sizeof(struct partial_page),
+ spd->pages = kmalloc_array(max_usage, sizeof(struct page *), GFP_KERNEL);
+ spd->partial = kmalloc_array(max_usage, sizeof(struct partial_page),
GFP_KERNEL);
if (spd->pages && spd->partial)
@@ -298,10 +303,11 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
{
struct iov_iter to;
struct kiocb kiocb;
- int idx, ret;
+ unsigned int i_head;
+ int ret;
iov_iter_pipe(&to, READ, pipe, len);
- idx = to.idx;
+ i_head = to.head;
init_sync_kiocb(&kiocb, in);
kiocb.ki_pos = *ppos;
ret = call_read_iter(in, &kiocb, &to);
@@ -309,7 +315,7 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
*ppos = kiocb.ki_pos;
file_accessed(in);
} else if (ret < 0) {
- to.idx = idx;
+ to.head = i_head;
to.iov_offset = 0;
iov_iter_advance(&to, 0); /* to free what was emitted */
/*
@@ -370,11 +376,12 @@ static ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
struct iov_iter to;
struct page **pages;
unsigned int nr_pages;
+ unsigned int mask;
size_t offset, base, copied = 0;
ssize_t res;
int i;
- if (pipe->nrbufs == pipe->buffers)
+ if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
return -EAGAIN;
/*
@@ -400,8 +407,9 @@ static ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
}
}
- pipe->bufs[to.idx].offset = offset;
- pipe->bufs[to.idx].len -= offset;
+ mask = pipe->ring_size - 1;
+ pipe->bufs[to.head & mask].offset = offset;
+ pipe->bufs[to.head & mask].len -= offset;
for (i = 0; i < nr_pages; i++) {
size_t this_len = min_t(size_t, len, PAGE_SIZE - offset);
@@ -443,7 +451,8 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
- if (sd->len < sd->total_len && pipe->nrbufs > 1)
+ if (sd->len < sd->total_len &&
+ pipe_occupancy(pipe->head, pipe->tail) > 1)
more |= MSG_SENDPAGE_NOTLAST;
return file->f_op->sendpage(file, buf->page, buf->offset,
@@ -481,10 +490,13 @@ static void wakeup_pipe_writers(struct pipe_inode_info *pipe)
static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
splice_actor *actor)
{
+ unsigned int head = pipe->head;
+ unsigned int tail = pipe->tail;
+ unsigned int mask = pipe->ring_size - 1;
int ret;
- while (pipe->nrbufs) {
- struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
+ while (!pipe_empty(tail, head)) {
+ struct pipe_buffer *buf = &pipe->bufs[tail & mask];
sd->len = buf->len;
if (sd->len > sd->total_len)
@@ -511,8 +523,8 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
if (!buf->len) {
pipe_buf_release(pipe, buf);
- pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
- pipe->nrbufs--;
+ tail++;
+ pipe->tail = tail;
if (pipe->files)
sd->need_wakeup = true;
}
@@ -543,7 +555,7 @@ static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_des
if (signal_pending(current))
return -ERESTARTSYS;
- while (!pipe->nrbufs) {
+ while (pipe_empty(pipe->head, pipe->tail)) {
if (!pipe->writers)
return 0;
@@ -686,7 +698,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
.pos = *ppos,
.u.file = out,
};
- int nbufs = pipe->buffers;
+ int nbufs = pipe->max_usage;
struct bio_vec *array = kcalloc(nbufs, sizeof(struct bio_vec),
GFP_KERNEL);
ssize_t ret;
@@ -699,16 +711,19 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
splice_from_pipe_begin(&sd);
while (sd.total_len) {
struct iov_iter from;
+ unsigned int head = pipe->head;
+ unsigned int tail = pipe->tail;
+ unsigned int mask = pipe->ring_size - 1;
size_t left;
- int n, idx;
+ int n;
ret = splice_from_pipe_next(pipe, &sd);
if (ret <= 0)
break;
- if (unlikely(nbufs < pipe->buffers)) {
+ if (unlikely(nbufs < pipe->max_usage)) {
kfree(array);
- nbufs = pipe->buffers;
+ nbufs = pipe->max_usage;
array = kcalloc(nbufs, sizeof(struct bio_vec),
GFP_KERNEL);
if (!array) {
@@ -719,16 +734,13 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
/* build the vector */
left = sd.total_len;
- for (n = 0, idx = pipe->curbuf; left && n < pipe->nrbufs; n++, idx++) {
- struct pipe_buffer *buf = pipe->bufs + idx;
+ for (n = 0; !pipe_empty(head, tail) && left && n < nbufs; tail++, n++) {
+ struct pipe_buffer *buf = &pipe->bufs[tail & mask];
size_t this_len = buf->len;
if (this_len > left)
this_len = left;
- if (idx == pipe->buffers - 1)
- idx = -1;
-
ret = pipe_buf_confirm(pipe, buf);
if (unlikely(ret)) {
if (ret == -ENODATA)
@@ -752,14 +764,15 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
*ppos = sd.pos;
/* dismiss the fully eaten buffers, adjust the partial one */
+ tail = pipe->tail;
while (ret) {
- struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
+ struct pipe_buffer *buf = &pipe->bufs[tail & mask];
if (ret >= buf->len) {
ret -= buf->len;
buf->len = 0;
pipe_buf_release(pipe, buf);
- pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
- pipe->nrbufs--;
+ tail++;
+ pipe->tail = tail;
if (pipe->files)
sd.need_wakeup = true;
} else {
@@ -942,15 +955,17 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
sd->flags &= ~SPLICE_F_NONBLOCK;
more = sd->flags & SPLICE_F_MORE;
- WARN_ON_ONCE(pipe->nrbufs != 0);
+ WARN_ON_ONCE(!pipe_empty(pipe->head, pipe->tail));
while (len) {
+ unsigned int p_space;
size_t read_len;
loff_t pos = sd->pos, prev_pos = pos;
/* Don't try to read more the pipe has space for. */
- read_len = min_t(size_t, len,
- (pipe->buffers - pipe->nrbufs) << PAGE_SHIFT);
+ p_space = pipe->max_usage -
+ pipe_occupancy(pipe->head, pipe->tail);
+ read_len = min_t(size_t, len, p_space << PAGE_SHIFT);
ret = do_splice_to(in, &pos, pipe, read_len, flags);
if (unlikely(ret <= 0))
goto out_release;
@@ -989,7 +1004,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
}
done:
- pipe->nrbufs = pipe->curbuf = 0;
+ pipe->tail = pipe->head = 0;
file_accessed(in);
return bytes;
@@ -998,8 +1013,8 @@ out_release:
* If we did an incomplete transfer we must release
* the pipe buffers in question:
*/
- for (i = 0; i < pipe->buffers; i++) {
- struct pipe_buffer *buf = pipe->bufs + i;
+ for (i = 0; i < pipe->ring_size; i++) {
+ struct pipe_buffer *buf = &pipe->bufs[i];
if (buf->ops)
pipe_buf_release(pipe, buf);
@@ -1075,7 +1090,7 @@ static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
- if (pipe->nrbufs != pipe->buffers)
+ if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage))
return 0;
if (flags & SPLICE_F_NONBLOCK)
return -EAGAIN;
@@ -1180,8 +1195,15 @@ static long do_splice(struct file *in, loff_t __user *off_in,
pipe_lock(opipe);
ret = wait_for_space(opipe, flags);
- if (!ret)
+ if (!ret) {
+ unsigned int p_space;
+
+ /* Don't try to read more the pipe has space for. */
+ p_space = opipe->max_usage - pipe_occupancy(opipe->head, opipe->tail);
+ len = min_t(size_t, len, p_space << PAGE_SHIFT);
+
ret = do_splice_to(in, &offset, opipe, len, flags);
+ }
pipe_unlock(opipe);
if (ret > 0)
wakeup_pipe_readers(opipe);
@@ -1442,16 +1464,16 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
int ret;
/*
- * Check ->nrbufs without the inode lock first. This function
+ * Check the pipe occupancy without the inode lock first. This function
* is speculative anyways, so missing one is ok.
*/
- if (pipe->nrbufs)
+ if (!pipe_empty(pipe->head, pipe->tail))
return 0;
ret = 0;
pipe_lock(pipe);
- while (!pipe->nrbufs) {
+ while (pipe_empty(pipe->head, pipe->tail)) {
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
@@ -1480,16 +1502,16 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
int ret;
/*
- * Check ->nrbufs without the inode lock first. This function
+ * Check pipe occupancy without the inode lock first. This function
* is speculative anyways, so missing one is ok.
*/
- if (pipe->nrbufs < pipe->buffers)
+ if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
return 0;
ret = 0;
pipe_lock(pipe);
- while (pipe->nrbufs >= pipe->buffers) {
+ while (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
@@ -1520,7 +1542,10 @@ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
size_t len, unsigned int flags)
{
struct pipe_buffer *ibuf, *obuf;
- int ret = 0, nbuf;
+ unsigned int i_head, o_head;
+ unsigned int i_tail, o_tail;
+ unsigned int i_mask, o_mask;
+ int ret = 0;
bool input_wakeup = false;
@@ -1540,7 +1565,14 @@ retry:
*/
pipe_double_lock(ipipe, opipe);
+ i_tail = ipipe->tail;
+ i_mask = ipipe->ring_size - 1;
+ o_head = opipe->head;
+ o_mask = opipe->ring_size - 1;
+
do {
+ size_t o_len;
+
if (!opipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
@@ -1548,14 +1580,18 @@ retry:
break;
}
- if (!ipipe->nrbufs && !ipipe->writers)
+ i_head = ipipe->head;
+ o_tail = opipe->tail;
+
+ if (pipe_empty(i_head, i_tail) && !ipipe->writers)
break;
/*
* Cannot make any progress, because either the input
* pipe is empty or the output pipe is full.
*/
- if (!ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) {
+ if (pipe_empty(i_head, i_tail) ||
+ pipe_full(o_head, o_tail, opipe->max_usage)) {
/* Already processed some buffers, break */
if (ret)
break;
@@ -1575,9 +1611,8 @@ retry:
goto retry;
}
- ibuf = ipipe->bufs + ipipe->curbuf;
- nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
- obuf = opipe->bufs + nbuf;
+ ibuf = &ipipe->bufs[i_tail & i_mask];
+ obuf = &opipe->bufs[o_head & o_mask];
if (len >= ibuf->len) {
/*
@@ -1585,10 +1620,12 @@ retry:
*/
*obuf = *ibuf;
ibuf->ops = NULL;
- opipe->nrbufs++;
- ipipe->curbuf = (ipipe->curbuf + 1) & (ipipe->buffers - 1);
- ipipe->nrbufs--;
+ i_tail++;
+ ipipe->tail = i_tail;
input_wakeup = true;
+ o_len = obuf->len;
+ o_head++;
+ opipe->head = o_head;
} else {
/*
* Get a reference to this pipe buffer,
@@ -1610,12 +1647,14 @@ retry:
pipe_buf_mark_unmergeable(obuf);
obuf->len = len;
- opipe->nrbufs++;
- ibuf->offset += obuf->len;
- ibuf->len -= obuf->len;
+ ibuf->offset += len;
+ ibuf->len -= len;
+ o_len = len;
+ o_head++;
+ opipe->head = o_head;
}
- ret += obuf->len;
- len -= obuf->len;
+ ret += o_len;
+ len -= o_len;
} while (len);
pipe_unlock(ipipe);
@@ -1641,7 +1680,10 @@ static int link_pipe(struct pipe_inode_info *ipipe,
size_t len, unsigned int flags)
{
struct pipe_buffer *ibuf, *obuf;
- int ret = 0, i = 0, nbuf;
+ unsigned int i_head, o_head;
+ unsigned int i_tail, o_tail;
+ unsigned int i_mask, o_mask;
+ int ret = 0;
/*
* Potential ABBA deadlock, work around it by ordering lock
@@ -1650,6 +1692,11 @@ static int link_pipe(struct pipe_inode_info *ipipe,
*/
pipe_double_lock(ipipe, opipe);
+ i_tail = ipipe->tail;
+ i_mask = ipipe->ring_size - 1;
+ o_head = opipe->head;
+ o_mask = opipe->ring_size - 1;
+
do {
if (!opipe->readers) {
send_sig(SIGPIPE, current, 0);
@@ -1658,15 +1705,19 @@ static int link_pipe(struct pipe_inode_info *ipipe,
break;
}
+ i_head = ipipe->head;
+ o_tail = opipe->tail;
+
/*
- * If we have iterated all input buffers or ran out of
+ * If we have iterated all input buffers or run out of
* output room, break.
*/
- if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers)
+ if (pipe_empty(i_head, i_tail) ||
+ pipe_full(o_head, o_tail, opipe->max_usage))
break;
- ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1));
- nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
+ ibuf = &ipipe->bufs[i_tail & i_mask];
+ obuf = &opipe->bufs[o_head & o_mask];
/*
* Get a reference to this pipe buffer,
@@ -1678,7 +1729,6 @@ static int link_pipe(struct pipe_inode_info *ipipe,
break;
}
- obuf = opipe->bufs + nbuf;
*obuf = *ibuf;
/*
@@ -1691,11 +1741,12 @@ static int link_pipe(struct pipe_inode_info *ipipe,
if (obuf->len > len)
obuf->len = len;
-
- opipe->nrbufs++;
ret += obuf->len;
len -= obuf->len;
- i++;
+
+ o_head++;
+ opipe->head = o_head;
+ i_tail++;
} while (len);
/*
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index effa638d6d85..0cc4ceec0562 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -17,6 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/mutex.h>
@@ -36,26 +37,27 @@
static struct file_system_type squashfs_fs_type;
static const struct super_operations squashfs_super_ops;
-static const struct squashfs_decompressor *supported_squashfs_filesystem(short
- major, short minor, short id)
+static const struct squashfs_decompressor *supported_squashfs_filesystem(
+ struct fs_context *fc,
+ short major, short minor, short id)
{
const struct squashfs_decompressor *decompressor;
if (major < SQUASHFS_MAJOR) {
- ERROR("Major/Minor mismatch, older Squashfs %d.%d "
- "filesystems are unsupported\n", major, minor);
+ errorf(fc, "Major/Minor mismatch, older Squashfs %d.%d "
+ "filesystems are unsupported", major, minor);
return NULL;
} else if (major > SQUASHFS_MAJOR || minor > SQUASHFS_MINOR) {
- ERROR("Major/Minor mismatch, trying to mount newer "
- "%d.%d filesystem\n", major, minor);
- ERROR("Please update your kernel\n");
+ errorf(fc, "Major/Minor mismatch, trying to mount newer "
+ "%d.%d filesystem", major, minor);
+ errorf(fc, "Please update your kernel");
return NULL;
}
decompressor = squashfs_lookup_decompressor(id);
if (!decompressor->supported) {
- ERROR("Filesystem uses \"%s\" compression. This is not "
- "supported\n", decompressor->name);
+ errorf(fc, "Filesystem uses \"%s\" compression. This is not supported",
+ decompressor->name);
return NULL;
}
@@ -63,7 +65,7 @@ static const struct squashfs_decompressor *supported_squashfs_filesystem(short
}
-static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
+static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct squashfs_sb_info *msblk;
struct squashfs_super_block *sblk = NULL;
@@ -98,7 +100,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
sblk = squashfs_read_table(sb, SQUASHFS_START, sizeof(*sblk));
if (IS_ERR(sblk)) {
- ERROR("unable to read squashfs_super_block\n");
+ errorf(fc, "unable to read squashfs_super_block");
err = PTR_ERR(sblk);
sblk = NULL;
goto failed_mount;
@@ -109,14 +111,15 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
/* Check it is a SQUASHFS superblock */
sb->s_magic = le32_to_cpu(sblk->s_magic);
if (sb->s_magic != SQUASHFS_MAGIC) {
- if (!silent)
- ERROR("Can't find a SQUASHFS superblock on %pg\n",
- sb->s_bdev);
+ if (!(fc->sb_flags & SB_SILENT))
+ errorf(fc, "Can't find a SQUASHFS superblock on %pg",
+ sb->s_bdev);
goto failed_mount;
}
/* Check the MAJOR & MINOR versions and lookup compression type */
msblk->decompressor = supported_squashfs_filesystem(
+ fc,
le16_to_cpu(sblk->s_major),
le16_to_cpu(sblk->s_minor),
le16_to_cpu(sblk->compression));
@@ -133,15 +136,15 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
/* Check block size for sanity */
msblk->block_size = le32_to_cpu(sblk->block_size);
if (msblk->block_size > SQUASHFS_FILE_MAX_SIZE)
- goto failed_mount;
+ goto insanity;
/*
* Check the system page size is not larger than the filesystem
* block size (by default 128K). This is currently not supported.
*/
if (PAGE_SIZE > msblk->block_size) {
- ERROR("Page size > filesystem block size (%d). This is "
- "currently not supported!\n", msblk->block_size);
+ errorf(fc, "Page size > filesystem block size (%d). This is "
+ "currently not supported!", msblk->block_size);
goto failed_mount;
}
@@ -152,12 +155,12 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
/* Check that block_size and block_log match */
if (msblk->block_size != (1 << msblk->block_log))
- goto failed_mount;
+ goto insanity;
/* Check the root inode for sanity */
root_inode = le64_to_cpu(sblk->root_inode);
if (SQUASHFS_INODE_OFFSET(root_inode) > SQUASHFS_METADATA_SIZE)
- goto failed_mount;
+ goto insanity;
msblk->inode_table = le64_to_cpu(sblk->inode_table_start);
msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
@@ -183,6 +186,8 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
(u64) le64_to_cpu(sblk->id_table_start));
sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_time_min = 0;
+ sb->s_time_max = U32_MAX;
sb->s_flags |= SB_RDONLY;
sb->s_op = &squashfs_super_ops;
@@ -197,7 +202,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
msblk->read_page = squashfs_cache_init("data",
squashfs_max_decompressors(), msblk->block_size);
if (msblk->read_page == NULL) {
- ERROR("Failed to allocate read_page block\n");
+ errorf(fc, "Failed to allocate read_page block");
goto failed_mount;
}
@@ -205,7 +210,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
if (IS_ERR(msblk->stream)) {
err = PTR_ERR(msblk->stream);
msblk->stream = NULL;
- goto failed_mount;
+ goto insanity;
}
/* Handle xattrs */
@@ -220,7 +225,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
msblk->xattr_id_table = squashfs_read_xattr_id_table(sb,
xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids);
if (IS_ERR(msblk->xattr_id_table)) {
- ERROR("unable to read xattr id index table\n");
+ errorf(fc, "unable to read xattr id index table");
err = PTR_ERR(msblk->xattr_id_table);
msblk->xattr_id_table = NULL;
if (err != -ENOTSUPP)
@@ -234,7 +239,7 @@ allocate_id_index_table:
le64_to_cpu(sblk->id_table_start), next_table,
le16_to_cpu(sblk->no_ids));
if (IS_ERR(msblk->id_table)) {
- ERROR("unable to read id index table\n");
+ errorf(fc, "unable to read id index table");
err = PTR_ERR(msblk->id_table);
msblk->id_table = NULL;
goto failed_mount;
@@ -250,7 +255,7 @@ allocate_id_index_table:
msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb,
lookup_table_start, next_table, msblk->inodes);
if (IS_ERR(msblk->inode_lookup_table)) {
- ERROR("unable to read inode lookup table\n");
+ errorf(fc, "unable to read inode lookup table");
err = PTR_ERR(msblk->inode_lookup_table);
msblk->inode_lookup_table = NULL;
goto failed_mount;
@@ -275,7 +280,7 @@ handle_fragments:
msblk->fragment_index = squashfs_read_fragment_index_table(sb,
le64_to_cpu(sblk->fragment_table_start), next_table, fragments);
if (IS_ERR(msblk->fragment_index)) {
- ERROR("unable to read fragment index table\n");
+ errorf(fc, "unable to read fragment index table");
err = PTR_ERR(msblk->fragment_index);
msblk->fragment_index = NULL;
goto failed_mount;
@@ -286,13 +291,13 @@ check_directory_table:
/* Sanity check directory_table */
if (msblk->directory_table > next_table) {
err = -EINVAL;
- goto failed_mount;
+ goto insanity;
}
/* Sanity check inode_table */
if (msblk->inode_table >= msblk->directory_table) {
err = -EINVAL;
- goto failed_mount;
+ goto insanity;
}
/* allocate root */
@@ -321,6 +326,8 @@ check_directory_table:
kfree(sblk);
return 0;
+insanity:
+ errorf(fc, "squashfs image failed sanity check");
failed_mount:
squashfs_cache_delete(msblk->block_cache);
squashfs_cache_delete(msblk->fragment_cache);
@@ -336,6 +343,28 @@ failed_mount:
return err;
}
+static int squashfs_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, squashfs_fill_super);
+}
+
+static int squashfs_reconfigure(struct fs_context *fc)
+{
+ sync_filesystem(fc->root->d_sb);
+ fc->sb_flags |= SB_RDONLY;
+ return 0;
+}
+
+static const struct fs_context_operations squashfs_context_ops = {
+ .get_tree = squashfs_get_tree,
+ .reconfigure = squashfs_reconfigure,
+};
+
+static int squashfs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &squashfs_context_ops;
+ return 0;
+}
static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
@@ -358,14 +387,6 @@ static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
}
-static int squashfs_remount(struct super_block *sb, int *flags, char *data)
-{
- sync_filesystem(sb);
- *flags |= SB_RDONLY;
- return 0;
-}
-
-
static void squashfs_put_super(struct super_block *sb)
{
if (sb->s_fs_info) {
@@ -384,14 +405,6 @@ static void squashfs_put_super(struct super_block *sb)
}
}
-
-static struct dentry *squashfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_bdev(fs_type, flags, dev_name, data, squashfs_fill_super);
-}
-
-
static struct kmem_cache *squashfs_inode_cachep;
@@ -468,7 +481,7 @@ static void squashfs_free_inode(struct inode *inode)
static struct file_system_type squashfs_fs_type = {
.owner = THIS_MODULE,
.name = "squashfs",
- .mount = squashfs_mount,
+ .init_fs_context = squashfs_init_fs_context,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV
};
@@ -479,7 +492,6 @@ static const struct super_operations squashfs_super_ops = {
.free_inode = squashfs_free_inode,
.statfs = squashfs_statfs,
.put_super = squashfs_put_super,
- .remount_fs = squashfs_remount
};
module_init(init_squashfs_fs);
diff --git a/fs/statfs.c b/fs/statfs.c
index eea7af6f2f22..2616424012ea 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -318,19 +318,10 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *,
static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
{
struct compat_statfs64 buf;
- if (sizeof(ubuf->f_bsize) == 4) {
- if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen |
- kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL)
- return -EOVERFLOW;
- /* f_files and f_ffree may be -1; it's okay
- * to stuff that into 32 bits */
- if (kbuf->f_files != 0xffffffffffffffffULL
- && (kbuf->f_files & 0xffffffff00000000ULL))
- return -EOVERFLOW;
- if (kbuf->f_ffree != 0xffffffffffffffffULL
- && (kbuf->f_ffree & 0xffffffff00000000ULL))
- return -EOVERFLOW;
- }
+
+ if ((kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
+ return -EOVERFLOW;
+
memset(&buf, 0, sizeof(struct compat_statfs64));
buf.f_type = kbuf->f_type;
buf.f_bsize = kbuf->f_bsize;
diff --git a/fs/super.c b/fs/super.c
index 113c58f19425..cfadab2cbf35 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -32,6 +32,7 @@
#include <linux/backing-dev.h>
#include <linux/rculist_bl.h>
#include <linux/cleancache.h>
+#include <linux/fscrypt.h>
#include <linux/fsnotify.h>
#include <linux/lockdep.h>
#include <linux/user_namespace.h>
@@ -257,6 +258,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
s->s_maxbytes = MAX_NON_LFS;
s->s_op = &default_op;
s->s_time_gran = 1000000000;
+ s->s_time_min = TIME64_MIN;
+ s->s_time_max = TIME64_MAX;
s->cleancache_poolid = CLEANCACHE_NO_POOL;
s->s_shrink.seeks = DEFAULT_SEEKS;
@@ -290,6 +293,7 @@ static void __put_super(struct super_block *s)
WARN_ON(s->s_inode_lru.node);
WARN_ON(!list_empty(&s->s_mounts));
security_sb_free(s);
+ fscrypt_sb_free(s);
put_user_ns(s->s_user_ns);
kfree(s->s_subtype);
call_rcu(&s->rcu, destroy_super_rcu);
@@ -478,13 +482,10 @@ EXPORT_SYMBOL(generic_shutdown_super);
bool mount_capable(struct fs_context *fc)
{
- struct user_namespace *user_ns = fc->global ? &init_user_ns
- : fc->user_ns;
-
if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
return capable(CAP_SYS_ADMIN);
else
- return ns_capable(user_ns, CAP_SYS_ADMIN);
+ return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
}
/**
@@ -1163,9 +1164,11 @@ int vfs_get_super(struct fs_context *fc,
{
int (*test)(struct super_block *, struct fs_context *);
struct super_block *sb;
+ int err;
switch (keying) {
case vfs_get_single_super:
+ case vfs_get_single_reconf_super:
test = test_single_super;
break;
case vfs_get_keyed_super:
@@ -1183,18 +1186,29 @@ int vfs_get_super(struct fs_context *fc,
return PTR_ERR(sb);
if (!sb->s_root) {
- int err = fill_super(sb, fc);
- if (err) {
- deactivate_locked_super(sb);
- return err;
- }
+ err = fill_super(sb, fc);
+ if (err)
+ goto error;
sb->s_flags |= SB_ACTIVE;
+ fc->root = dget(sb->s_root);
+ } else {
+ fc->root = dget(sb->s_root);
+ if (keying == vfs_get_single_reconf_super) {
+ err = reconfigure_super(fc);
+ if (err < 0) {
+ dput(fc->root);
+ fc->root = NULL;
+ goto error;
+ }
+ }
}
- BUG_ON(fc->root);
- fc->root = dget(sb->s_root);
return 0;
+
+error:
+ deactivate_locked_super(sb);
+ return err;
}
EXPORT_SYMBOL(vfs_get_super);
@@ -1214,7 +1228,26 @@ int get_tree_single(struct fs_context *fc,
}
EXPORT_SYMBOL(get_tree_single);
+int get_tree_single_reconf(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc))
+{
+ return vfs_get_super(fc, vfs_get_single_reconf_super, fill_super);
+}
+EXPORT_SYMBOL(get_tree_single_reconf);
+
+int get_tree_keyed(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc),
+ void *key)
+{
+ fc->s_fs_info = key;
+ return vfs_get_super(fc, vfs_get_keyed_super, fill_super);
+}
+EXPORT_SYMBOL(get_tree_keyed);
+
#ifdef CONFIG_BLOCK
+
static int set_bdev_super(struct super_block *s, void *data)
{
s->s_bdev = data;
@@ -1224,6 +1257,102 @@ static int set_bdev_super(struct super_block *s, void *data)
return 0;
}
+static int set_bdev_super_fc(struct super_block *s, struct fs_context *fc)
+{
+ return set_bdev_super(s, fc->sget_key);
+}
+
+static int test_bdev_super_fc(struct super_block *s, struct fs_context *fc)
+{
+ return s->s_bdev == fc->sget_key;
+}
+
+/**
+ * get_tree_bdev - Get a superblock based on a single block device
+ * @fc: The filesystem context holding the parameters
+ * @fill_super: Helper to initialise a new superblock
+ */
+int get_tree_bdev(struct fs_context *fc,
+ int (*fill_super)(struct super_block *,
+ struct fs_context *))
+{
+ struct block_device *bdev;
+ struct super_block *s;
+ fmode_t mode = FMODE_READ | FMODE_EXCL;
+ int error = 0;
+
+ if (!(fc->sb_flags & SB_RDONLY))
+ mode |= FMODE_WRITE;
+
+ if (!fc->source)
+ return invalf(fc, "No source specified");
+
+ bdev = blkdev_get_by_path(fc->source, mode, fc->fs_type);
+ if (IS_ERR(bdev)) {
+ errorf(fc, "%s: Can't open blockdev", fc->source);
+ return PTR_ERR(bdev);
+ }
+
+ /* Once the superblock is inserted into the list by sget_fc(), s_umount
+ * will protect the lockfs code from trying to start a snapshot while
+ * we are mounting
+ */
+ mutex_lock(&bdev->bd_fsfreeze_mutex);
+ if (bdev->bd_fsfreeze_count > 0) {
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ blkdev_put(bdev, mode);
+ warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
+ return -EBUSY;
+ }
+
+ fc->sb_flags |= SB_NOSEC;
+ fc->sget_key = bdev;
+ s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc);
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ if (IS_ERR(s)) {
+ blkdev_put(bdev, mode);
+ return PTR_ERR(s);
+ }
+
+ if (s->s_root) {
+ /* Don't summarily change the RO/RW state. */
+ if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
+ warnf(fc, "%pg: Can't mount, would change RO state", bdev);
+ deactivate_locked_super(s);
+ blkdev_put(bdev, mode);
+ return -EBUSY;
+ }
+
+ /*
+ * s_umount nests inside bd_mutex during
+ * __invalidate_device(). blkdev_put() acquires
+ * bd_mutex and can't be called under s_umount. Drop
+ * s_umount temporarily. This is safe as we're
+ * holding an active reference.
+ */
+ up_write(&s->s_umount);
+ blkdev_put(bdev, mode);
+ down_write(&s->s_umount);
+ } else {
+ s->s_mode = mode;
+ snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
+ sb_set_blocksize(s, block_size(bdev));
+ error = fill_super(s, fc);
+ if (error) {
+ deactivate_locked_super(s);
+ return error;
+ }
+
+ s->s_flags |= SB_ACTIVE;
+ bdev->bd_super = s;
+ }
+
+ BUG_ON(fc->root);
+ fc->root = dget(s->s_root);
+ return 0;
+}
+EXPORT_SYMBOL(get_tree_bdev);
+
static int test_bdev_super(struct super_block *s, void *data)
{
return (void *)s->s_bdev == data;
@@ -1429,11 +1558,6 @@ int vfs_get_tree(struct fs_context *fc)
sb = fc->root->d_sb;
WARN_ON(!sb->s_bdi);
- if (fc->subtype && !sb->s_subtype) {
- sb->s_subtype = fc->subtype;
- fc->subtype = NULL;
- }
-
/*
* Write barrier is for super_cache_count(). We place it before setting
* SB_BORN as the data dependency between the two functions is the
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index d788b1daa7eb..cc8e2ed155c8 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -368,7 +368,8 @@ static int sysv_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_block_base = 0;
mutex_init(&sbi->s_lock);
sb->s_fs_info = sbi;
-
+ sb->s_time_min = 0;
+ sb->s_time_max = U32_MAX;
sb_set_blocksize(sb, BLOCK_SIZE);
for (i = 0; i < ARRAY_SIZE(flavours) && !size; i++) {
@@ -487,6 +488,8 @@ static int v7_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_type = FSTYPE_V7;
mutex_init(&sbi->s_lock);
sb->s_fs_info = sbi;
+ sb->s_time_min = 0;
+ sb->s_time_max = U32_MAX;
sb_set_blocksize(sb, 512);
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 6a6fc8aa1de7..ac7f59a58f94 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -302,11 +302,11 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
static void timerfd_show(struct seq_file *m, struct file *file)
{
struct timerfd_ctx *ctx = file->private_data;
- struct itimerspec t;
+ struct timespec64 value, interval;
spin_lock_irq(&ctx->wqh.lock);
- t.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
- t.it_interval = ktime_to_timespec(ctx->tintv);
+ value = ktime_to_timespec64(timerfd_get_remaining(ctx));
+ interval = ktime_to_timespec64(ctx->tintv);
spin_unlock_irq(&ctx->wqh.lock);
seq_printf(m,
@@ -318,10 +318,10 @@ static void timerfd_show(struct seq_file *m, struct file *file)
ctx->clockid,
(unsigned long long)ctx->ticks,
ctx->settime_flags,
- (unsigned long long)t.it_value.tv_sec,
- (unsigned long long)t.it_value.tv_nsec,
- (unsigned long long)t.it_interval.tv_sec,
- (unsigned long long)t.it_interval.tv_nsec);
+ (unsigned long long)value.tv_sec,
+ (unsigned long long)value.tv_nsec,
+ (unsigned long long)interval.tv_sec,
+ (unsigned long long)interval.tv_nsec);
}
#else
#define timerfd_show NULL
@@ -471,7 +471,11 @@ static int do_timerfd_settime(int ufd, int flags,
break;
}
spin_unlock_irq(&ctx->wqh.lock);
- cpu_relax();
+
+ if (isalarm(ctx))
+ hrtimer_cancel_wait_running(&ctx->t.alarm.timer);
+ else
+ hrtimer_cancel_wait_running(&ctx->t.tmr);
}
/*
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index eeeae0475da9..0caa151cae4e 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -16,6 +16,7 @@
#include <linux/namei.h>
#include <linux/tracefs.h>
#include <linux/fsnotify.h>
+#include <linux/security.h>
#include <linux/seq_file.h>
#include <linux/parser.h>
#include <linux/magic.h>
@@ -390,6 +391,9 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
struct dentry *dentry;
struct inode *inode;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
if (!(mode & S_IFMT))
mode |= S_IFREG;
BUG_ON(!S_ISREG(mode));
diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c
index d9af2de9084a..8cdbd53d780c 100644
--- a/fs/ubifs/auth.c
+++ b/fs/ubifs/auth.c
@@ -479,8 +479,10 @@ int __ubifs_node_verify_hmac(const struct ubifs_info *c, const void *node,
return -ENOMEM;
err = ubifs_node_calc_hmac(c, node, len, ofs_hmac, hmac);
- if (err)
+ if (err) {
+ kfree(hmac);
return err;
+ }
err = crypto_memneq(hmac, node + ofs_hmac, hmac_len);
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 80d7301ab76d..c0b84e960b20 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -51,7 +51,7 @@
static void shrink_liability(struct ubifs_info *c, int nr_to_write)
{
down_read(&c->vfs_sb->s_umount);
- writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE);
+ writeback_inodes_sb_nr(c->vfs_sb, nr_to_write, WB_REASON_FS_FREE_SPACE);
up_read(&c->vfs_sb->s_umount);
}
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index a5f10d79e0dd..0f5a480fe264 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2737,18 +2737,6 @@ static ssize_t dfs_file_write(struct file *file, const char __user *u,
struct dentry *dent = file->f_path.dentry;
int val;
- /*
- * TODO: this is racy - the file-system might have already been
- * unmounted and we'd oops in this case. The plan is to fix it with
- * help of 'iterate_supers_type()' which we should have in v3.0: when
- * a debugfs opened, we rember FS's UUID in file->private_data. Then
- * whenever we access the FS via a debugfs file, we iterate all UBIFS
- * superblocks and fine the one with the same UUID, and take the
- * locking right.
- *
- * The other way to go suggested by Al Viro is to create a separate
- * 'ubifs-debug' file-system instead.
- */
if (file->f_path.dentry == d->dfs_dump_lprops) {
ubifs_dump_lprops(c);
return count;
@@ -2817,7 +2805,6 @@ void dbg_debugfs_init_fs(struct ubifs_info *c)
c->vi.ubi_num, c->vi.vol_id);
if (n == UBIFS_DFS_DIR_LEN) {
/* The array size is too small */
- fname = UBIFS_DFS_DIR_NAME;
return;
}
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 400970d740bb..cd52585c8f4f 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1078,15 +1078,18 @@ static void do_attr_changes(struct inode *inode, const struct iattr *attr)
inode->i_uid = attr->ia_uid;
if (attr->ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
- if (attr->ia_valid & ATTR_ATIME)
- inode->i_atime = timespec64_trunc(attr->ia_atime,
- inode->i_sb->s_time_gran);
- if (attr->ia_valid & ATTR_MTIME)
- inode->i_mtime = timespec64_trunc(attr->ia_mtime,
- inode->i_sb->s_time_gran);
- if (attr->ia_valid & ATTR_CTIME)
- inode->i_ctime = timespec64_trunc(attr->ia_ctime,
- inode->i_sb->s_time_gran);
+ if (attr->ia_valid & ATTR_ATIME) {
+ inode->i_atime = timestamp_truncate(attr->ia_atime,
+ inode);
+ }
+ if (attr->ia_valid & ATTR_MTIME) {
+ inode->i_mtime = timestamp_truncate(attr->ia_mtime,
+ inode);
+ }
+ if (attr->ia_valid & ATTR_CTIME) {
+ inode->i_ctime = timestamp_truncate(attr->ia_ctime,
+ inode);
+ }
if (attr->ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 034ad14710d1..5dc5abca11c7 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -185,6 +185,21 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FS_IOC_GET_ENCRYPTION_POLICY:
return fscrypt_ioctl_get_policy(file, (void __user *)arg);
+ case FS_IOC_GET_ENCRYPTION_POLICY_EX:
+ return fscrypt_ioctl_get_policy_ex(file, (void __user *)arg);
+
+ case FS_IOC_ADD_ENCRYPTION_KEY:
+ return fscrypt_ioctl_add_key(file, (void __user *)arg);
+
+ case FS_IOC_REMOVE_ENCRYPTION_KEY:
+ return fscrypt_ioctl_remove_key(file, (void __user *)arg);
+
+ case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
+ return fscrypt_ioctl_remove_key_all_users(file,
+ (void __user *)arg);
+ case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
+ return fscrypt_ioctl_get_key_status(file, (void __user *)arg);
+
default:
return -ENOTTY;
}
@@ -202,6 +217,11 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case FS_IOC_SET_ENCRYPTION_POLICY:
case FS_IOC_GET_ENCRYPTION_POLICY:
+ case FS_IOC_GET_ENCRYPTION_POLICY_EX:
+ case FS_IOC_ADD_ENCRYPTION_KEY:
+ case FS_IOC_REMOVE_ENCRYPTION_KEY:
+ case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
+ case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 4fd9683b8245..388fe8f5dc51 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -503,7 +503,7 @@ static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent)
{
if (c->double_hash)
- dent->cookie = prandom_u32();
+ dent->cookie = (__force __le32) prandom_u32();
else
dent->cookie = 0;
}
@@ -899,7 +899,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
fname_name(&nm) = xent->name;
fname_len(&nm) = le16_to_cpu(xent->nlen);
- xino = ubifs_iget(c->vfs_sb, xent->inum);
+ xino = ubifs_iget(c->vfs_sb, le64_to_cpu(xent->inum));
if (IS_ERR(xino)) {
err = PTR_ERR(xino);
ubifs_err(c, "dead directory entry '%s', error %d",
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index b52624e28fa1..54d6db61106f 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -129,7 +129,6 @@ static void __orphan_drop(struct ubifs_info *c, struct ubifs_orphan *o)
static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
{
if (orph->del) {
- spin_unlock(&c->orphan_lock);
dbg_gen("deleted twice ino %lu", orph->inum);
return;
}
@@ -138,7 +137,6 @@ static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
orph->del = 1;
orph->dnext = c->orph_dnext;
c->orph_dnext = orph;
- spin_unlock(&c->orphan_lock);
dbg_gen("delete later ino %lu", orph->inum);
return;
}
@@ -633,12 +631,17 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
ino_t inum;
int i, n, err, first = 1;
+ ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
+ if (!ino)
+ return -ENOMEM;
+
list_for_each_entry(snod, &sleb->nodes, list) {
if (snod->type != UBIFS_ORPH_NODE) {
ubifs_err(c, "invalid node type %d in orphan area at %d:%d",
snod->type, sleb->lnum, snod->offs);
ubifs_dump_node(c, snod->node);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_free;
}
orph = snod->node;
@@ -665,20 +668,18 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
ubifs_err(c, "out of order commit number %llu in orphan node at %d:%d",
cmt_no, sleb->lnum, snod->offs);
ubifs_dump_node(c, snod->node);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_free;
}
dbg_rcvry("out of date LEB %d", sleb->lnum);
*outofdate = 1;
- return 0;
+ err = 0;
+ goto out_free;
}
if (first)
first = 0;
- ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
- if (!ino)
- return -ENOMEM;
-
n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3;
for (i = 0; i < n; i++) {
union ubifs_key key1, key2;
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index a551eb3e9b89..2b7c04bf8983 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -184,7 +184,7 @@ static int create_default_filesystem(struct ubifs_info *c)
if (err)
goto out;
} else {
- sup->hash_algo = 0xffff;
+ sup->hash_algo = cpu_to_le16(0xffff);
}
sup->ch.node_type = UBIFS_SB_NODE;
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 2c0803b0ac3a..5e1e8ec0589e 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -318,6 +318,16 @@ static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc)
return err;
}
+static int ubifs_drop_inode(struct inode *inode)
+{
+ int drop = generic_drop_inode(inode);
+
+ if (!drop)
+ drop = fscrypt_drop_inode(inode);
+
+ return drop;
+}
+
static void ubifs_evict_inode(struct inode *inode)
{
int err;
@@ -609,6 +619,10 @@ static int init_constants_early(struct ubifs_info *c)
c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
if (c->max_bu_buf_len > c->leb_size)
c->max_bu_buf_len = c->leb_size;
+
+ /* Log is ready, preserve one LEB for commits. */
+ c->min_log_bytes = c->leb_size;
+
return 0;
}
@@ -1990,6 +2004,7 @@ const struct super_operations ubifs_super_operations = {
.free_inode = ubifs_free_inode,
.put_super = ubifs_put_super,
.write_inode = ubifs_write_inode,
+ .drop_inode = ubifs_drop_inode,
.evict_inode = ubifs_evict_inode,
.statfs = ubifs_statfs,
.dirty_inode = ubifs_dirty_inode,
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
index a384a0f9ff32..234be1c4dc87 100644
--- a/fs/ubifs/tnc_commit.c
+++ b/fs/ubifs/tnc_commit.c
@@ -212,7 +212,7 @@ static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
/**
* layout_leb_in_gaps - layout index nodes using in-the-gaps method.
* @c: UBIFS file-system description object
- * @p: return LEB number here
+ * @p: return LEB number in @c->gap_lebs[p]
*
* This function lays out new index nodes for dirty znodes using in-the-gaps
* method of TNC commit.
@@ -221,7 +221,7 @@ static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
* This function returns the number of index nodes written into the gaps, or a
* negative error code on failure.
*/
-static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
+static int layout_leb_in_gaps(struct ubifs_info *c, int p)
{
struct ubifs_scan_leb *sleb;
struct ubifs_scan_node *snod;
@@ -236,7 +236,7 @@ static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
* filled, however we do not check there at present.
*/
return lnum; /* Error code */
- *p = lnum;
+ c->gap_lebs[p] = lnum;
dbg_gc("LEB %d", lnum);
/*
* Scan the index LEB. We use the generic scan for this even though
@@ -355,7 +355,7 @@ static int get_leb_cnt(struct ubifs_info *c, int cnt)
*/
static int layout_in_gaps(struct ubifs_info *c, int cnt)
{
- int err, leb_needed_cnt, written, *p;
+ int err, leb_needed_cnt, written, p = 0, old_idx_lebs, *gap_lebs;
dbg_gc("%d znodes to write", cnt);
@@ -364,9 +364,9 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
if (!c->gap_lebs)
return -ENOMEM;
- p = c->gap_lebs;
+ old_idx_lebs = c->lst.idx_lebs;
do {
- ubifs_assert(c, p < c->gap_lebs + c->lst.idx_lebs);
+ ubifs_assert(c, p < c->lst.idx_lebs);
written = layout_leb_in_gaps(c, p);
if (written < 0) {
err = written;
@@ -392,9 +392,29 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
leb_needed_cnt = get_leb_cnt(c, cnt);
dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
leb_needed_cnt, c->ileb_cnt);
+ /*
+ * Dynamically change the size of @c->gap_lebs to prevent
+ * oob, because @c->lst.idx_lebs could be increased by
+ * function @get_idx_gc_leb (called by layout_leb_in_gaps->
+ * ubifs_find_dirty_idx_leb) during loop. Only enlarge
+ * @c->gap_lebs when needed.
+ *
+ */
+ if (leb_needed_cnt > c->ileb_cnt && p >= old_idx_lebs &&
+ old_idx_lebs < c->lst.idx_lebs) {
+ old_idx_lebs = c->lst.idx_lebs;
+ gap_lebs = krealloc(c->gap_lebs, sizeof(int) *
+ (old_idx_lebs + 1), GFP_NOFS);
+ if (!gap_lebs) {
+ kfree(c->gap_lebs);
+ c->gap_lebs = NULL;
+ return -ENOMEM;
+ }
+ c->gap_lebs = gap_lebs;
+ }
} while (leb_needed_cnt > c->ileb_cnt);
- *p = -1;
+ c->gap_lebs[p] = -1;
return 0;
}
diff --git a/fs/ubifs/tnc_misc.c b/fs/ubifs/tnc_misc.c
index 6f293f662d98..49cb34c3f324 100644
--- a/fs/ubifs/tnc_misc.c
+++ b/fs/ubifs/tnc_misc.c
@@ -284,6 +284,7 @@ static int read_znode(struct ubifs_info *c, struct ubifs_zbranch *zzbr,
err = ubifs_node_check_hash(c, idx, zzbr->hash);
if (err) {
ubifs_bad_hash(c, idx, zzbr->hash, lnum, offs);
+ kfree(idx);
return err;
}
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index ec85aeaed54a..02f03fadb75b 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -325,6 +325,17 @@ got_block:
newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
(sizeof(struct spaceBitmapDesc) << 3);
+ if (newblock >= sbi->s_partmaps[partition].s_partition_len) {
+ /*
+ * Ran off the end of the bitmap, and bits following are
+ * non-compliant (not all zero)
+ */
+ udf_err(sb, "bitmap for partition %d corrupted (block %u marked"
+ " as free, partition length is %u)\n", partition,
+ newblock, sbi->s_partmaps[partition].s_partition_len);
+ goto error_return;
+ }
+
if (!udf_clear_bit(bit, bh->b_data)) {
udf_debug("bit already cleared for block %d\n", bit);
goto repeat;
diff --git a/fs/udf/ecma_167.h b/fs/udf/ecma_167.h
index 9f24bd1a9f44..fb7f2c7bec9c 100644
--- a/fs/udf/ecma_167.h
+++ b/fs/udf/ecma_167.h
@@ -88,6 +88,20 @@ struct regid {
#define ENTITYID_FLAGS_DIRTY 0x00
#define ENTITYID_FLAGS_PROTECTED 0x01
+/* OSTA UDF 2.1.5.2 */
+#define UDF_ID_COMPLIANT "*OSTA UDF Compliant"
+
+/* OSTA UDF 2.1.5.3 */
+struct domainEntityIDSuffix {
+ uint16_t revision;
+ uint8_t flags;
+ uint8_t reserved[5];
+};
+
+/* OSTA UDF 2.1.5.3 */
+#define ENTITYIDSUFFIX_FLAGS_HARDWRITEPROTECT 0
+#define ENTITYIDSUFFIX_FLAGS_SOFTWRITEPROTECT 1
+
/* Volume Structure Descriptor (ECMA 167r3 2/9.1) */
#define VSD_STD_ID_LEN 5
struct volStructDesc {
diff --git a/fs/udf/file.c b/fs/udf/file.c
index cd31e4f6d6da..628941a6b79a 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -280,6 +280,9 @@ static int udf_setattr(struct dentry *dentry, struct iattr *attr)
return error;
}
+ if (attr->ia_valid & ATTR_MODE)
+ udf_update_extra_perms(inode, attr->ia_mode);
+
setattr_copy(inode, attr);
mark_inode_dirty(inode);
return 0;
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index f8e5872f7cc2..0adb40718a5d 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -118,6 +118,9 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode)
iinfo->i_lenAlloc = 0;
iinfo->i_use = 0;
iinfo->i_checkpoint = 1;
+ iinfo->i_extraPerms = FE_PERM_U_CHATTR;
+ udf_update_extra_perms(inode, mode);
+
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 9bb18311a22f..ea80036d7897 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -45,6 +45,13 @@
#define EXTENT_MERGE_SIZE 5
+#define FE_MAPPED_PERMS (FE_PERM_U_READ | FE_PERM_U_WRITE | FE_PERM_U_EXEC | \
+ FE_PERM_G_READ | FE_PERM_G_WRITE | FE_PERM_G_EXEC | \
+ FE_PERM_O_READ | FE_PERM_O_WRITE | FE_PERM_O_EXEC)
+
+#define FE_DELETE_PERMS (FE_PERM_U_DELETE | FE_PERM_G_DELETE | \
+ FE_PERM_O_DELETE)
+
static umode_t udf_convert_permissions(struct fileEntry *);
static int udf_update_inode(struct inode *, int);
static int udf_sync_inode(struct inode *inode);
@@ -1458,6 +1465,8 @@ reread:
else
inode->i_mode = udf_convert_permissions(fe);
inode->i_mode &= ~sbi->s_umask;
+ iinfo->i_extraPerms = le32_to_cpu(fe->permissions) & ~FE_MAPPED_PERMS;
+
read_unlock(&sbi->s_cred_lock);
link_count = le16_to_cpu(fe->fileLinkCount);
@@ -1485,6 +1494,8 @@ reread:
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
+ iinfo->i_streamdir = 0;
+ iinfo->i_lenStreams = 0;
} else {
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
@@ -1498,6 +1509,16 @@ reread:
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
+
+ /* Named streams */
+ iinfo->i_streamdir = (efe->streamDirectoryICB.extLength != 0);
+ iinfo->i_locStreamdir =
+ lelb_to_cpu(efe->streamDirectoryICB.extLocation);
+ iinfo->i_lenStreams = le64_to_cpu(efe->objectSize);
+ if (iinfo->i_lenStreams >= inode->i_size)
+ iinfo->i_lenStreams -= inode->i_size;
+ else
+ iinfo->i_lenStreams = 0;
}
inode->i_generation = iinfo->i_unique;
@@ -1619,6 +1640,23 @@ static umode_t udf_convert_permissions(struct fileEntry *fe)
return mode;
}
+void udf_update_extra_perms(struct inode *inode, umode_t mode)
+{
+ struct udf_inode_info *iinfo = UDF_I(inode);
+
+ /*
+ * UDF 2.01 sec. 3.3.3.3 Note 2:
+ * In Unix, delete permission tracks write
+ */
+ iinfo->i_extraPerms &= ~FE_DELETE_PERMS;
+ if (mode & 0200)
+ iinfo->i_extraPerms |= FE_PERM_U_DELETE;
+ if (mode & 0020)
+ iinfo->i_extraPerms |= FE_PERM_G_DELETE;
+ if (mode & 0002)
+ iinfo->i_extraPerms |= FE_PERM_O_DELETE;
+}
+
int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
{
return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
@@ -1691,10 +1729,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
((inode->i_mode & 0070) << 2) |
((inode->i_mode & 0700) << 4);
- udfperms |= (le32_to_cpu(fe->permissions) &
- (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
- FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
- FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
+ udfperms |= iinfo->i_extraPerms;
fe->permissions = cpu_to_le32(udfperms);
if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
@@ -1760,9 +1795,19 @@ static int udf_update_inode(struct inode *inode, int do_sync)
iinfo->i_ext.i_data,
inode->i_sb->s_blocksize -
sizeof(struct extendedFileEntry));
- efe->objectSize = cpu_to_le64(inode->i_size);
+ efe->objectSize =
+ cpu_to_le64(inode->i_size + iinfo->i_lenStreams);
efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
+ if (iinfo->i_streamdir) {
+ struct long_ad *icb_lad = &efe->streamDirectoryICB;
+
+ icb_lad->extLocation =
+ cpu_to_lelb(iinfo->i_locStreamdir);
+ icb_lad->extLength =
+ cpu_to_le32(inode->i_sb->s_blocksize);
+ }
+
udf_adjust_time(iinfo, inode->i_atime);
udf_adjust_time(iinfo, inode->i_mtime);
udf_adjust_time(iinfo, inode->i_ctime);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index a14346137361..8c28e93e9b73 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -92,10 +92,6 @@ static void udf_put_super(struct super_block *);
static int udf_sync_fs(struct super_block *, int);
static int udf_remount_fs(struct super_block *, int *, char *);
static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
-static int udf_find_fileset(struct super_block *, struct kernel_lb_addr *,
- struct kernel_lb_addr *);
-static void udf_load_fileset(struct super_block *, struct buffer_head *,
- struct kernel_lb_addr *);
static void udf_open_lvid(struct super_block *);
static void udf_close_lvid(struct super_block *);
static unsigned int udf_count_free(struct super_block *);
@@ -151,9 +147,11 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
ei->i_unique = 0;
ei->i_lenExtents = 0;
+ ei->i_lenStreams = 0;
ei->i_next_alloc_block = 0;
ei->i_next_alloc_goal = 0;
ei->i_strat4096 = 0;
+ ei->i_streamdir = 0;
init_rwsem(&ei->i_data_sem);
ei->cached_extent.lstart = -1;
spin_lock_init(&ei->i_extent_cache_lock);
@@ -271,8 +269,7 @@ static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
int nr_groups = bitmap->s_nr_groups;
for (i = 0; i < nr_groups; i++)
- if (bitmap->s_block_bitmap[i])
- brelse(bitmap->s_block_bitmap[i]);
+ brelse(bitmap->s_block_bitmap[i]);
kvfree(bitmap);
}
@@ -646,16 +643,67 @@ out_unlock:
return error;
}
-/* Check Volume Structure Descriptors (ECMA 167 2/9.1) */
-/* We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
-static loff_t udf_check_vsd(struct super_block *sb)
+/*
+ * Check VSD descriptor. Returns -1 in case we are at the end of volume
+ * recognition area, 0 if the descriptor is valid but non-interesting, 1 if
+ * we found one of NSR descriptors we are looking for.
+ */
+static int identify_vsd(const struct volStructDesc *vsd)
+{
+ int ret = 0;
+
+ if (!memcmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) {
+ switch (vsd->structType) {
+ case 0:
+ udf_debug("ISO9660 Boot Record found\n");
+ break;
+ case 1:
+ udf_debug("ISO9660 Primary Volume Descriptor found\n");
+ break;
+ case 2:
+ udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
+ break;
+ case 3:
+ udf_debug("ISO9660 Volume Partition Descriptor found\n");
+ break;
+ case 255:
+ udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
+ break;
+ default:
+ udf_debug("ISO9660 VRS (%u) found\n", vsd->structType);
+ break;
+ }
+ } else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN))
+ ; /* ret = 0 */
+ else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN))
+ ret = 1;
+ else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN))
+ ret = 1;
+ else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BOOT2, VSD_STD_ID_LEN))
+ ; /* ret = 0 */
+ else if (!memcmp(vsd->stdIdent, VSD_STD_ID_CDW02, VSD_STD_ID_LEN))
+ ; /* ret = 0 */
+ else {
+ /* TEA01 or invalid id : end of volume recognition area */
+ ret = -1;
+ }
+
+ return ret;
+}
+
+/*
+ * Check Volume Structure Descriptors (ECMA 167 2/9.1)
+ * We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1)
+ * @return 1 if NSR02 or NSR03 found,
+ * -1 if first sector read error, 0 otherwise
+ */
+static int udf_check_vsd(struct super_block *sb)
{
struct volStructDesc *vsd = NULL;
loff_t sector = VSD_FIRST_SECTOR_OFFSET;
int sectorsize;
struct buffer_head *bh = NULL;
- int nsr02 = 0;
- int nsr03 = 0;
+ int nsr = 0;
struct udf_sb_info *sbi;
sbi = UDF_SB(sb);
@@ -679,71 +727,36 @@ static loff_t udf_check_vsd(struct super_block *sb)
* activity. This actually happened with uninitialised SSD partitions
* (all 0xFF) before the check for the limit and all valid IDs were
* added */
- for (; !nsr02 && !nsr03 && sector < VSD_MAX_SECTOR_OFFSET;
- sector += sectorsize) {
+ for (; !nsr && sector < VSD_MAX_SECTOR_OFFSET; sector += sectorsize) {
/* Read a block */
bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
if (!bh)
break;
- /* Look for ISO descriptors */
vsd = (struct volStructDesc *)(bh->b_data +
(sector & (sb->s_blocksize - 1)));
-
- if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001,
- VSD_STD_ID_LEN)) {
- switch (vsd->structType) {
- case 0:
- udf_debug("ISO9660 Boot Record found\n");
- break;
- case 1:
- udf_debug("ISO9660 Primary Volume Descriptor found\n");
- break;
- case 2:
- udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
- break;
- case 3:
- udf_debug("ISO9660 Volume Partition Descriptor found\n");
- break;
- case 255:
- udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
- break;
- default:
- udf_debug("ISO9660 VRS (%u) found\n",
- vsd->structType);
- break;
- }
- } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01,
- VSD_STD_ID_LEN))
- ; /* nothing */
- else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01,
- VSD_STD_ID_LEN)) {
- brelse(bh);
- break;
- } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02,
- VSD_STD_ID_LEN))
- nsr02 = sector;
- else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03,
- VSD_STD_ID_LEN))
- nsr03 = sector;
- else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BOOT2,
- VSD_STD_ID_LEN))
- ; /* nothing */
- else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CDW02,
- VSD_STD_ID_LEN))
- ; /* nothing */
- else {
- /* invalid id : end of volume recognition area */
+ nsr = identify_vsd(vsd);
+ /* Found NSR or end? */
+ if (nsr) {
brelse(bh);
break;
}
+ /*
+ * Special handling for improperly formatted VRS (e.g., Win10)
+ * where components are separated by 2048 bytes even though
+ * sectors are 4K
+ */
+ if (sb->s_blocksize == 4096) {
+ nsr = identify_vsd(vsd + 1);
+ /* Ignore unknown IDs... */
+ if (nsr < 0)
+ nsr = 0;
+ }
brelse(bh);
}
- if (nsr03)
- return nsr03;
- else if (nsr02)
- return nsr02;
+ if (nsr > 0)
+ return 1;
else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
VSD_FIRST_SECTOR_OFFSET)
return -1;
@@ -751,34 +764,82 @@ static loff_t udf_check_vsd(struct super_block *sb)
return 0;
}
+static int udf_verify_domain_identifier(struct super_block *sb,
+ struct regid *ident, char *dname)
+{
+ struct domainEntityIDSuffix *suffix;
+
+ if (memcmp(ident->ident, UDF_ID_COMPLIANT, strlen(UDF_ID_COMPLIANT))) {
+ udf_warn(sb, "Not OSTA UDF compliant %s descriptor.\n", dname);
+ goto force_ro;
+ }
+ if (ident->flags & (1 << ENTITYID_FLAGS_DIRTY)) {
+ udf_warn(sb, "Possibly not OSTA UDF compliant %s descriptor.\n",
+ dname);
+ goto force_ro;
+ }
+ suffix = (struct domainEntityIDSuffix *)ident->identSuffix;
+ if (suffix->flags & (1 << ENTITYIDSUFFIX_FLAGS_HARDWRITEPROTECT) ||
+ suffix->flags & (1 << ENTITYIDSUFFIX_FLAGS_SOFTWRITEPROTECT)) {
+ if (!sb_rdonly(sb)) {
+ udf_warn(sb, "Descriptor for %s marked write protected."
+ " Forcing read only mount.\n", dname);
+ }
+ goto force_ro;
+ }
+ return 0;
+
+force_ro:
+ if (!sb_rdonly(sb))
+ return -EACCES;
+ UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
+ return 0;
+}
+
+static int udf_load_fileset(struct super_block *sb, struct fileSetDesc *fset,
+ struct kernel_lb_addr *root)
+{
+ int ret;
+
+ ret = udf_verify_domain_identifier(sb, &fset->domainIdent, "file set");
+ if (ret < 0)
+ return ret;
+
+ *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
+ UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
+
+ udf_debug("Rootdir at block=%u, partition=%u\n",
+ root->logicalBlockNum, root->partitionReferenceNum);
+ return 0;
+}
+
static int udf_find_fileset(struct super_block *sb,
struct kernel_lb_addr *fileset,
struct kernel_lb_addr *root)
{
struct buffer_head *bh = NULL;
uint16_t ident;
+ int ret;
- if (fileset->logicalBlockNum != 0xFFFFFFFF ||
- fileset->partitionReferenceNum != 0xFFFF) {
- bh = udf_read_ptagged(sb, fileset, 0, &ident);
-
- if (!bh) {
- return 1;
- } else if (ident != TAG_IDENT_FSD) {
- brelse(bh);
- return 1;
- }
-
- udf_debug("Fileset at block=%u, partition=%u\n",
- fileset->logicalBlockNum,
- fileset->partitionReferenceNum);
+ if (fileset->logicalBlockNum == 0xFFFFFFFF &&
+ fileset->partitionReferenceNum == 0xFFFF)
+ return -EINVAL;
- UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
- udf_load_fileset(sb, bh, root);
+ bh = udf_read_ptagged(sb, fileset, 0, &ident);
+ if (!bh)
+ return -EIO;
+ if (ident != TAG_IDENT_FSD) {
brelse(bh);
- return 0;
+ return -EINVAL;
}
- return 1;
+
+ udf_debug("Fileset at block=%u, partition=%u\n",
+ fileset->logicalBlockNum, fileset->partitionReferenceNum);
+
+ UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
+ ret = udf_load_fileset(sb, (struct fileSetDesc *)bh->b_data, root);
+ brelse(bh);
+ return ret;
}
/*
@@ -794,9 +855,7 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
struct buffer_head *bh;
uint16_t ident;
int ret = -ENOMEM;
-#ifdef UDFFS_DEBUG
struct timestamp *ts;
-#endif
outstr = kmalloc(128, GFP_NOFS);
if (!outstr)
@@ -817,13 +876,10 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
pvoldesc->recordingDateAndTime);
-#ifdef UDFFS_DEBUG
ts = &pvoldesc->recordingDateAndTime;
udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n",
le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
ts->minute, le16_to_cpu(ts->typeAndTimezone));
-#endif
-
ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
if (ret < 0) {
@@ -939,21 +995,6 @@ static int udf_load_metadata_files(struct super_block *sb, int partition,
return 0;
}
-static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
- struct kernel_lb_addr *root)
-{
- struct fileSetDesc *fset;
-
- fset = (struct fileSetDesc *)bh->b_data;
-
- *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
-
- UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
-
- udf_debug("Rootdir at block=%u, partition=%u\n",
- root->logicalBlockNum, root->partitionReferenceNum);
-}
-
int udf_compute_nr_groups(struct super_block *sb, u32 partition)
{
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
@@ -1238,9 +1279,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
* PHYSICAL partitions are already set up
*/
type1_idx = i;
-#ifdef UDFFS_DEBUG
map = NULL; /* supress 'maybe used uninitialized' warning */
-#endif
for (i = 0; i < sbi->s_partitions; i++) {
map = &sbi->s_partmaps[i];
@@ -1364,6 +1403,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
goto out_bh;
}
+ ret = udf_verify_domain_identifier(sb, &lvd->domainIdent,
+ "logical volume");
+ if (ret)
+ goto out_bh;
ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
if (ret)
goto out_bh;
@@ -1915,7 +1958,7 @@ static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
int silent, struct kernel_lb_addr *fileset)
{
struct udf_sb_info *sbi = UDF_SB(sb);
- loff_t nsr_off;
+ int nsr = 0;
int ret;
if (!sb_set_blocksize(sb, uopt->blocksize)) {
@@ -1926,13 +1969,13 @@ static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
sbi->s_last_block = uopt->lastblock;
if (!uopt->novrs) {
/* Check that it is NSR02 compliant */
- nsr_off = udf_check_vsd(sb);
- if (!nsr_off) {
+ nsr = udf_check_vsd(sb);
+ if (!nsr) {
if (!silent)
udf_warn(sb, "No VRS found\n");
return -EINVAL;
}
- if (nsr_off == -1)
+ if (nsr == -1)
udf_debug("Failed to read sector at offset %d. "
"Assuming open disc. Skipping validity "
"check\n", VSD_FIRST_SECTOR_OFFSET);
@@ -2216,9 +2259,9 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
}
- if (udf_find_fileset(sb, &fileset, &rootdir)) {
+ ret = udf_find_fileset(sb, &fileset, &rootdir);
+ if (ret < 0) {
udf_warn(sb, "No fileset found\n");
- ret = -EINVAL;
goto error_out;
}
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index 2ef0e212f08a..4245d1f63258 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -38,16 +38,20 @@ struct udf_inode_info {
__u32 i_next_alloc_block;
__u32 i_next_alloc_goal;
__u32 i_checkpoint;
+ __u32 i_extraPerms;
unsigned i_alloc_type : 3;
unsigned i_efe : 1; /* extendedFileEntry */
unsigned i_use : 1; /* unallocSpaceEntry */
unsigned i_strat4096 : 1;
- unsigned reserved : 26;
+ unsigned i_streamdir : 1;
+ unsigned reserved : 25;
union {
struct short_ad *i_sad;
struct long_ad *i_lad;
__u8 *i_data;
} i_ext;
+ struct kernel_lb_addr i_locStreamdir;
+ __u64 i_lenStreams;
struct rw_semaphore i_data_sem;
struct udf_ext_cache cached_extent;
/* Spinlock for protecting extent cache */
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index d89ef71887fc..9dd0814f1077 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -31,16 +31,8 @@ extern __printf(3, 4) void _udf_warn(struct super_block *sb,
#define udf_info(fmt, ...) \
pr_info("INFO " fmt, ##__VA_ARGS__)
-#undef UDFFS_DEBUG
-
-#ifdef UDFFS_DEBUG
-#define udf_debug(fmt, ...) \
- printk(KERN_DEBUG pr_fmt("%s:%d:%s: " fmt), \
- __FILE__, __LINE__, __func__, ##__VA_ARGS__)
-#else
#define udf_debug(fmt, ...) \
- no_printk(fmt, ##__VA_ARGS__)
-#endif
+ pr_debug("%s:%d:%s: " fmt, __FILE__, __LINE__, __func__, ##__VA_ARGS__)
#define udf_fixed_to_variable(x) ( ( ( (x) >> 5 ) * 39 ) + ( (x) & 0x0000001F ) )
#define udf_variable_to_fixed(x) ( ( ( (x) / 39 ) << 5 ) + ( (x) % 39 ) )
@@ -178,6 +170,7 @@ extern int8_t udf_next_aext(struct inode *, struct extent_position *,
struct kernel_lb_addr *, uint32_t *, int);
extern int8_t udf_current_aext(struct inode *, struct extent_position *,
struct kernel_lb_addr *, uint32_t *, int);
+extern void udf_update_extra_perms(struct inode *inode, umode_t mode);
/* misc.c */
extern struct buffer_head *udf_tgetblk(struct super_block *sb,
diff --git a/fs/ufs/Kconfig b/fs/ufs/Kconfig
index fcb41516ea59..6d30adb6b890 100644
--- a/fs/ufs/Kconfig
+++ b/fs/ufs/Kconfig
@@ -9,7 +9,7 @@ config UFS_FS
this file system as well. Saying Y here will allow you to read from
these partitions; if you also want to write to them, say Y to the
experimental "UFS file system write support", below. Please read the
- file <file:Documentation/filesystems/ufs.txt> for more information.
+ file <file:Documentation/admin-guide/ufs.rst> for more information.
The recently released UFS2 variant (used in FreeBSD 5.x) is
READ-ONLY supported.
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 4ed0dca52ec8..1da0be667409 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -843,6 +843,10 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_time_gran = NSEC_PER_SEC;
+ sb->s_time_min = S32_MIN;
+ sb->s_time_max = S32_MAX;
+
switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
case UFS_MOUNT_UFSTYPE_44BSD:
UFSD("ufstype=44bsd\n");
@@ -861,6 +865,9 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
uspi->s_fshift = 9;
uspi->s_sbsize = super_block_size = 1536;
uspi->s_sbbase = 0;
+ sb->s_time_gran = 1;
+ sb->s_time_min = S64_MIN;
+ sb->s_time_max = S64_MAX;
flags |= UFS_TYPE_UFS2 | UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
break;
diff --git a/fs/unicode/utf8-core.c b/fs/unicode/utf8-core.c
index 71ca4d047d65..2a878b739115 100644
--- a/fs/unicode/utf8-core.c
+++ b/fs/unicode/utf8-core.c
@@ -154,7 +154,7 @@ static int utf8_parse_version(const char *version, unsigned int *maj,
{
substring_t args[3];
char version_string[12];
- const struct match_token token[] = {
+ static const struct match_token token[] = {
{1, "%d.%d.%d"},
{0, NULL}
};
diff --git a/fs/unicode/utf8-selftest.c b/fs/unicode/utf8-selftest.c
index 6c1a36bbf6ad..6fe8af7edccb 100644
--- a/fs/unicode/utf8-selftest.c
+++ b/fs/unicode/utf8-selftest.c
@@ -35,7 +35,7 @@ unsigned int total_tests;
#define test_f(cond, fmt, ...) _test(cond, __func__, __LINE__, fmt, ##__VA_ARGS__)
#define test(cond) _test(cond, __func__, __LINE__, "")
-const static struct {
+static const struct {
/* UTF-8 strings in this vector _must_ be NULL-terminated. */
unsigned char str[10];
unsigned char dec[10];
@@ -89,7 +89,7 @@ const static struct {
};
-const static struct {
+static const struct {
/* UTF-8 strings in this vector _must_ be NULL-terminated. */
unsigned char str[30];
unsigned char ncf[30];
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ccbdbd62f0d8..37df7c9eedb1 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -880,6 +880,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
/* len == 0 means wake all */
struct userfaultfd_wake_range range = { .len = 0, };
unsigned long new_flags;
+ bool still_valid;
WRITE_ONCE(ctx->released, true);
@@ -895,8 +896,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
* taking the mmap_sem for writing.
*/
down_write(&mm->mmap_sem);
- if (!mmget_still_valid(mm))
- goto skip_mm;
+ still_valid = mmget_still_valid(mm);
prev = NULL;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
cond_resched();
@@ -907,19 +907,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
continue;
}
new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
- prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
- new_flags, vma->anon_vma,
- vma->vm_file, vma->vm_pgoff,
- vma_policy(vma),
- NULL_VM_UFFD_CTX);
- if (prev)
- vma = prev;
- else
- prev = vma;
+ if (still_valid) {
+ prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
+ new_flags, vma->anon_vma,
+ vma->vm_file, vma->vm_pgoff,
+ vma_policy(vma),
+ NULL_VM_UFFD_CTX);
+ if (prev)
+ vma = prev;
+ else
+ prev = vma;
+ }
vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
}
-skip_mm:
up_write(&mm->mmap_sem);
mmput(mm);
wakeup:
@@ -1271,21 +1272,23 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
}
static __always_inline int validate_range(struct mm_struct *mm,
- __u64 start, __u64 len)
+ __u64 *start, __u64 len)
{
__u64 task_size = mm->task_size;
- if (start & ~PAGE_MASK)
+ *start = untagged_addr(*start);
+
+ if (*start & ~PAGE_MASK)
return -EINVAL;
if (len & ~PAGE_MASK)
return -EINVAL;
if (!len)
return -EINVAL;
- if (start < mmap_min_addr)
+ if (*start < mmap_min_addr)
return -EINVAL;
- if (start >= task_size)
+ if (*start >= task_size)
return -EINVAL;
- if (len > task_size - start)
+ if (len > task_size - *start)
return -EINVAL;
return 0;
}
@@ -1335,7 +1338,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
goto out;
}
- ret = validate_range(mm, uffdio_register.range.start,
+ ret = validate_range(mm, &uffdio_register.range.start,
uffdio_register.range.len);
if (ret)
goto out;
@@ -1457,7 +1460,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
start = vma->vm_start;
vma_end = min(end, vma->vm_end);
- new_flags = (vma->vm_flags & ~vm_flags) | vm_flags;
+ new_flags = (vma->vm_flags &
+ ~(VM_UFFD_MISSING|VM_UFFD_WP)) | vm_flags;
prev = vma_merge(mm, prev, start, vma_end, new_flags,
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
@@ -1524,7 +1528,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
goto out;
- ret = validate_range(mm, uffdio_unregister.start,
+ ret = validate_range(mm, &uffdio_unregister.start,
uffdio_unregister.len);
if (ret)
goto out;
@@ -1675,7 +1679,7 @@ static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
goto out;
- ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
+ ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len);
if (ret)
goto out;
@@ -1715,7 +1719,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
sizeof(uffdio_copy)-sizeof(__s64)))
goto out;
- ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
+ ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len);
if (ret)
goto out;
/*
@@ -1771,7 +1775,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
sizeof(uffdio_zeropage)-sizeof(__s64)))
goto out;
- ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
+ ret = validate_range(ctx->mm, &uffdio_zeropage.range.start,
uffdio_zeropage.range.len);
if (ret)
goto out;
@@ -1831,13 +1835,12 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
goto out;
features = uffdio_api.features;
- if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) {
- memset(&uffdio_api, 0, sizeof(uffdio_api));
- if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
- goto out;
- ret = -EINVAL;
- goto out;
- }
+ ret = -EINVAL;
+ if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
+ goto err_out;
+ ret = -EPERM;
+ if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
+ goto err_out;
/* report all available features and ioctls to userland */
uffdio_api.features = UFFD_API_FEATURES;
uffdio_api.ioctls = UFFD_API_IOCTLS;
@@ -1850,6 +1853,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
ret = 0;
out:
return ret;
+err_out:
+ memset(&uffdio_api, 0, sizeof(uffdio_api));
+ if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
+ ret = -EFAULT;
+ goto out;
}
static long userfaultfd_ioctl(struct file *file, unsigned cmd,
@@ -1920,7 +1928,7 @@ static const struct file_operations userfaultfd_fops = {
.poll = userfaultfd_poll,
.read = userfaultfd_read,
.unlocked_ioctl = userfaultfd_ioctl,
- .compat_ioctl = userfaultfd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/fs/utimes.c b/fs/utimes.c
index 350c9c16ace1..c952b6b3d8a0 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -36,16 +36,14 @@ static int utimes_common(const struct path *path, struct timespec64 *times)
if (times[0].tv_nsec == UTIME_OMIT)
newattrs.ia_valid &= ~ATTR_ATIME;
else if (times[0].tv_nsec != UTIME_NOW) {
- newattrs.ia_atime.tv_sec = times[0].tv_sec;
- newattrs.ia_atime.tv_nsec = times[0].tv_nsec;
+ newattrs.ia_atime = timestamp_truncate(times[0], inode);
newattrs.ia_valid |= ATTR_ATIME_SET;
}
if (times[1].tv_nsec == UTIME_OMIT)
newattrs.ia_valid &= ~ATTR_MTIME;
else if (times[1].tv_nsec != UTIME_NOW) {
- newattrs.ia_mtime.tv_sec = times[1].tv_sec;
- newattrs.ia_mtime.tv_nsec = times[1].tv_nsec;
+ newattrs.ia_mtime = timestamp_truncate(times[1], inode);
newattrs.ia_valid |= ATTR_MTIME_SET;
}
/*
@@ -163,9 +161,9 @@ SYSCALL_DEFINE4(utimensat, int, dfd, const char __user *, filename,
* utimensat() instead.
*/
static long do_futimesat(int dfd, const char __user *filename,
- struct timeval __user *utimes)
+ struct __kernel_old_timeval __user *utimes)
{
- struct timeval times[2];
+ struct __kernel_old_timeval times[2];
struct timespec64 tstimes[2];
if (utimes) {
@@ -192,13 +190,13 @@ static long do_futimesat(int dfd, const char __user *filename,
SYSCALL_DEFINE3(futimesat, int, dfd, const char __user *, filename,
- struct timeval __user *, utimes)
+ struct __kernel_old_timeval __user *, utimes)
{
return do_futimesat(dfd, filename, utimes);
}
SYSCALL_DEFINE2(utimes, char __user *, filename,
- struct timeval __user *, utimes)
+ struct __kernel_old_timeval __user *, utimes)
{
return do_futimesat(AT_FDCWD, filename, utimes);
}
diff --git a/fs/verity/Kconfig b/fs/verity/Kconfig
new file mode 100644
index 000000000000..88fb25119899
--- /dev/null
+++ b/fs/verity/Kconfig
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config FS_VERITY
+ bool "FS Verity (read-only file-based authenticity protection)"
+ select CRYPTO
+ # SHA-256 is selected as it's intended to be the default hash algorithm.
+ # To avoid bloat, other wanted algorithms must be selected explicitly.
+ select CRYPTO_SHA256
+ help
+ This option enables fs-verity. fs-verity is the dm-verity
+ mechanism implemented at the file level. On supported
+ filesystems (currently EXT4 and F2FS), userspace can use an
+ ioctl to enable verity for a file, which causes the filesystem
+ to build a Merkle tree for the file. The filesystem will then
+ transparently verify any data read from the file against the
+ Merkle tree. The file is also made read-only.
+
+ This serves as an integrity check, but the availability of the
+ Merkle tree root hash also allows efficiently supporting
+ various use cases where normally the whole file would need to
+ be hashed at once, such as: (a) auditing (logging the file's
+ hash), or (b) authenticity verification (comparing the hash
+ against a known good value, e.g. from a digital signature).
+
+ fs-verity is especially useful on large files where not all
+ the contents may actually be needed. Also, fs-verity verifies
+ data each time it is paged back in, which provides better
+ protection against malicious disks vs. an ahead-of-time hash.
+
+ If unsure, say N.
+
+config FS_VERITY_DEBUG
+ bool "FS Verity debugging"
+ depends on FS_VERITY
+ help
+ Enable debugging messages related to fs-verity by default.
+
+ Say N unless you are an fs-verity developer.
+
+config FS_VERITY_BUILTIN_SIGNATURES
+ bool "FS Verity builtin signature support"
+ depends on FS_VERITY
+ select SYSTEM_DATA_VERIFICATION
+ help
+ Support verifying signatures of verity files against the X.509
+ certificates that have been loaded into the ".fs-verity"
+ kernel keyring.
+
+ This is meant as a relatively simple mechanism that can be
+ used to provide an authenticity guarantee for verity files, as
+ an alternative to IMA appraisal. Userspace programs still
+ need to check that the verity bit is set in order to get an
+ authenticity guarantee.
+
+ If unsure, say N.
diff --git a/fs/verity/Makefile b/fs/verity/Makefile
new file mode 100644
index 000000000000..570e9136334d
--- /dev/null
+++ b/fs/verity/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_FS_VERITY) += enable.o \
+ hash_algs.o \
+ init.o \
+ measure.o \
+ open.o \
+ verify.o
+
+obj-$(CONFIG_FS_VERITY_BUILTIN_SIGNATURES) += signature.o
diff --git a/fs/verity/enable.c b/fs/verity/enable.c
new file mode 100644
index 000000000000..eabc6ac19906
--- /dev/null
+++ b/fs/verity/enable.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fs/verity/enable.c: ioctl to enable verity on a file
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <crypto/hash.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+static int build_merkle_tree_level(struct inode *inode, unsigned int level,
+ u64 num_blocks_to_hash,
+ const struct merkle_tree_params *params,
+ u8 *pending_hashes,
+ struct ahash_request *req)
+{
+ const struct fsverity_operations *vops = inode->i_sb->s_vop;
+ unsigned int pending_size = 0;
+ u64 dst_block_num;
+ u64 i;
+ int err;
+
+ if (WARN_ON(params->block_size != PAGE_SIZE)) /* checked earlier too */
+ return -EINVAL;
+
+ if (level < params->num_levels) {
+ dst_block_num = params->level_start[level];
+ } else {
+ if (WARN_ON(num_blocks_to_hash != 1))
+ return -EINVAL;
+ dst_block_num = 0; /* unused */
+ }
+
+ for (i = 0; i < num_blocks_to_hash; i++) {
+ struct page *src_page;
+
+ if ((pgoff_t)i % 10000 == 0 || i + 1 == num_blocks_to_hash)
+ pr_debug("Hashing block %llu of %llu for level %u\n",
+ i + 1, num_blocks_to_hash, level);
+
+ if (level == 0) {
+ /* Leaf: hashing a data block */
+ src_page = read_mapping_page(inode->i_mapping, i, NULL);
+ if (IS_ERR(src_page)) {
+ err = PTR_ERR(src_page);
+ fsverity_err(inode,
+ "Error %d reading data page %llu",
+ err, i);
+ return err;
+ }
+ } else {
+ /* Non-leaf: hashing hash block from level below */
+ src_page = vops->read_merkle_tree_page(inode,
+ params->level_start[level - 1] + i);
+ if (IS_ERR(src_page)) {
+ err = PTR_ERR(src_page);
+ fsverity_err(inode,
+ "Error %d reading Merkle tree page %llu",
+ err, params->level_start[level - 1] + i);
+ return err;
+ }
+ }
+
+ err = fsverity_hash_page(params, inode, req, src_page,
+ &pending_hashes[pending_size]);
+ put_page(src_page);
+ if (err)
+ return err;
+ pending_size += params->digest_size;
+
+ if (level == params->num_levels) /* Root hash? */
+ return 0;
+
+ if (pending_size + params->digest_size > params->block_size ||
+ i + 1 == num_blocks_to_hash) {
+ /* Flush the pending hash block */
+ memset(&pending_hashes[pending_size], 0,
+ params->block_size - pending_size);
+ err = vops->write_merkle_tree_block(inode,
+ pending_hashes,
+ dst_block_num,
+ params->log_blocksize);
+ if (err) {
+ fsverity_err(inode,
+ "Error %d writing Merkle tree block %llu",
+ err, dst_block_num);
+ return err;
+ }
+ dst_block_num++;
+ pending_size = 0;
+ }
+
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ cond_resched();
+ }
+ return 0;
+}
+
+/*
+ * Build the Merkle tree for the given inode using the given parameters, and
+ * return the root hash in @root_hash.
+ *
+ * The tree is written to a filesystem-specific location as determined by the
+ * ->write_merkle_tree_block() method. However, the blocks that comprise the
+ * tree are the same for all filesystems.
+ */
+static int build_merkle_tree(struct inode *inode,
+ const struct merkle_tree_params *params,
+ u8 *root_hash)
+{
+ u8 *pending_hashes;
+ struct ahash_request *req;
+ u64 blocks;
+ unsigned int level;
+ int err = -ENOMEM;
+
+ if (inode->i_size == 0) {
+ /* Empty file is a special case; root hash is all 0's */
+ memset(root_hash, 0, params->digest_size);
+ return 0;
+ }
+
+ pending_hashes = kmalloc(params->block_size, GFP_KERNEL);
+ req = ahash_request_alloc(params->hash_alg->tfm, GFP_KERNEL);
+ if (!pending_hashes || !req)
+ goto out;
+
+ /*
+ * Build each level of the Merkle tree, starting at the leaf level
+ * (level 0) and ascending to the root node (level 'num_levels - 1').
+ * Then at the end (level 'num_levels'), calculate the root hash.
+ */
+ blocks = (inode->i_size + params->block_size - 1) >>
+ params->log_blocksize;
+ for (level = 0; level <= params->num_levels; level++) {
+ err = build_merkle_tree_level(inode, level, blocks, params,
+ pending_hashes, req);
+ if (err)
+ goto out;
+ blocks = (blocks + params->hashes_per_block - 1) >>
+ params->log_arity;
+ }
+ memcpy(root_hash, pending_hashes, params->digest_size);
+ err = 0;
+out:
+ kfree(pending_hashes);
+ ahash_request_free(req);
+ return err;
+}
+
+static int enable_verity(struct file *filp,
+ const struct fsverity_enable_arg *arg)
+{
+ struct inode *inode = file_inode(filp);
+ const struct fsverity_operations *vops = inode->i_sb->s_vop;
+ struct merkle_tree_params params = { };
+ struct fsverity_descriptor *desc;
+ size_t desc_size = sizeof(*desc) + arg->sig_size;
+ struct fsverity_info *vi;
+ int err;
+
+ /* Start initializing the fsverity_descriptor */
+ desc = kzalloc(desc_size, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ desc->version = 1;
+ desc->hash_algorithm = arg->hash_algorithm;
+ desc->log_blocksize = ilog2(arg->block_size);
+
+ /* Get the salt if the user provided one */
+ if (arg->salt_size &&
+ copy_from_user(desc->salt,
+ (const u8 __user *)(uintptr_t)arg->salt_ptr,
+ arg->salt_size)) {
+ err = -EFAULT;
+ goto out;
+ }
+ desc->salt_size = arg->salt_size;
+
+ /* Get the signature if the user provided one */
+ if (arg->sig_size &&
+ copy_from_user(desc->signature,
+ (const u8 __user *)(uintptr_t)arg->sig_ptr,
+ arg->sig_size)) {
+ err = -EFAULT;
+ goto out;
+ }
+ desc->sig_size = cpu_to_le32(arg->sig_size);
+
+ desc->data_size = cpu_to_le64(inode->i_size);
+
+ /* Prepare the Merkle tree parameters */
+ err = fsverity_init_merkle_tree_params(&params, inode,
+ arg->hash_algorithm,
+ desc->log_blocksize,
+ desc->salt, desc->salt_size);
+ if (err)
+ goto out;
+
+ /*
+ * Start enabling verity on this file, serialized by the inode lock.
+ * Fail if verity is already enabled or is already being enabled.
+ */
+ inode_lock(inode);
+ if (IS_VERITY(inode))
+ err = -EEXIST;
+ else
+ err = vops->begin_enable_verity(filp);
+ inode_unlock(inode);
+ if (err)
+ goto out;
+
+ /*
+ * Build the Merkle tree. Don't hold the inode lock during this, since
+ * on huge files this may take a very long time and we don't want to
+ * force unrelated syscalls like chown() to block forever. We don't
+ * need the inode lock here because deny_write_access() already prevents
+ * the file from being written to or truncated, and we still serialize
+ * ->begin_enable_verity() and ->end_enable_verity() using the inode
+ * lock and only allow one process to be here at a time on a given file.
+ */
+ pr_debug("Building Merkle tree...\n");
+ BUILD_BUG_ON(sizeof(desc->root_hash) < FS_VERITY_MAX_DIGEST_SIZE);
+ err = build_merkle_tree(inode, &params, desc->root_hash);
+ if (err) {
+ fsverity_err(inode, "Error %d building Merkle tree", err);
+ goto rollback;
+ }
+ pr_debug("Done building Merkle tree. Root hash is %s:%*phN\n",
+ params.hash_alg->name, params.digest_size, desc->root_hash);
+
+ /*
+ * Create the fsverity_info. Don't bother trying to save work by
+ * reusing the merkle_tree_params from above. Instead, just create the
+ * fsverity_info from the fsverity_descriptor as if it were just loaded
+ * from disk. This is simpler, and it serves as an extra check that the
+ * metadata we're writing is valid before actually enabling verity.
+ */
+ vi = fsverity_create_info(inode, desc, desc_size);
+ if (IS_ERR(vi)) {
+ err = PTR_ERR(vi);
+ goto rollback;
+ }
+
+ if (arg->sig_size)
+ pr_debug("Storing a %u-byte PKCS#7 signature alongside the file\n",
+ arg->sig_size);
+
+ /*
+ * Tell the filesystem to finish enabling verity on the file.
+ * Serialized with ->begin_enable_verity() by the inode lock.
+ */
+ inode_lock(inode);
+ err = vops->end_enable_verity(filp, desc, desc_size, params.tree_size);
+ inode_unlock(inode);
+ if (err) {
+ fsverity_err(inode, "%ps() failed with err %d",
+ vops->end_enable_verity, err);
+ fsverity_free_info(vi);
+ } else if (WARN_ON(!IS_VERITY(inode))) {
+ err = -EINVAL;
+ fsverity_free_info(vi);
+ } else {
+ /* Successfully enabled verity */
+
+ /*
+ * Readers can start using ->i_verity_info immediately, so it
+ * can't be rolled back once set. So don't set it until just
+ * after the filesystem has successfully enabled verity.
+ */
+ fsverity_set_info(inode, vi);
+ }
+out:
+ kfree(params.hashstate);
+ kfree(desc);
+ return err;
+
+rollback:
+ inode_lock(inode);
+ (void)vops->end_enable_verity(filp, NULL, 0, params.tree_size);
+ inode_unlock(inode);
+ goto out;
+}
+
+/**
+ * fsverity_ioctl_enable() - enable verity on a file
+ *
+ * Enable fs-verity on a file. See the "FS_IOC_ENABLE_VERITY" section of
+ * Documentation/filesystems/fsverity.rst for the documentation.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
+{
+ struct inode *inode = file_inode(filp);
+ struct fsverity_enable_arg arg;
+ int err;
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.version != 1)
+ return -EINVAL;
+
+ if (arg.__reserved1 ||
+ memchr_inv(arg.__reserved2, 0, sizeof(arg.__reserved2)))
+ return -EINVAL;
+
+ if (arg.block_size != PAGE_SIZE)
+ return -EINVAL;
+
+ if (arg.salt_size > FIELD_SIZEOF(struct fsverity_descriptor, salt))
+ return -EMSGSIZE;
+
+ if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE)
+ return -EMSGSIZE;
+
+ /*
+ * Require a regular file with write access. But the actual fd must
+ * still be readonly so that we can lock out all writers. This is
+ * needed to guarantee that no writable fds exist to the file once it
+ * has verity enabled, and to stabilize the data being hashed.
+ */
+
+ err = inode_permission(inode, MAY_WRITE);
+ if (err)
+ return err;
+
+ if (IS_APPEND(inode))
+ return -EPERM;
+
+ if (S_ISDIR(inode->i_mode))
+ return -EISDIR;
+
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ err = mnt_want_write_file(filp);
+ if (err) /* -EROFS */
+ return err;
+
+ err = deny_write_access(filp);
+ if (err) /* -ETXTBSY */
+ goto out_drop_write;
+
+ err = enable_verity(filp, &arg);
+ if (err)
+ goto out_allow_write_access;
+
+ /*
+ * Some pages of the file may have been evicted from pagecache after
+ * being used in the Merkle tree construction, then read into pagecache
+ * again by another process reading from the file concurrently. Since
+ * these pages didn't undergo verification against the file measurement
+ * which fs-verity now claims to be enforcing, we have to wipe the
+ * pagecache to ensure that all future reads are verified.
+ */
+ filemap_write_and_wait(inode->i_mapping);
+ invalidate_inode_pages2(inode->i_mapping);
+
+ /*
+ * allow_write_access() is needed to pair with deny_write_access().
+ * Regardless, the filesystem won't allow writing to verity files.
+ */
+out_allow_write_access:
+ allow_write_access(filp);
+out_drop_write:
+ mnt_drop_write_file(filp);
+ return err;
+}
+EXPORT_SYMBOL_GPL(fsverity_ioctl_enable);
diff --git a/fs/verity/fsverity_private.h b/fs/verity/fsverity_private.h
new file mode 100644
index 000000000000..e74c79b64d88
--- /dev/null
+++ b/fs/verity/fsverity_private.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * fs-verity: read-only file-based authenticity protection
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef _FSVERITY_PRIVATE_H
+#define _FSVERITY_PRIVATE_H
+
+#ifdef CONFIG_FS_VERITY_DEBUG
+#define DEBUG
+#endif
+
+#define pr_fmt(fmt) "fs-verity: " fmt
+
+#include <crypto/sha.h>
+#include <linux/fsverity.h>
+
+struct ahash_request;
+
+/*
+ * Implementation limit: maximum depth of the Merkle tree. For now 8 is plenty;
+ * it's enough for over U64_MAX bytes of data using SHA-256 and 4K blocks.
+ */
+#define FS_VERITY_MAX_LEVELS 8
+
+/*
+ * Largest digest size among all hash algorithms supported by fs-verity.
+ * Currently assumed to be <= size of fsverity_descriptor::root_hash.
+ */
+#define FS_VERITY_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+
+/* A hash algorithm supported by fs-verity */
+struct fsverity_hash_alg {
+ struct crypto_ahash *tfm; /* hash tfm, allocated on demand */
+ const char *name; /* crypto API name, e.g. sha256 */
+ unsigned int digest_size; /* digest size in bytes, e.g. 32 for SHA-256 */
+ unsigned int block_size; /* block size in bytes, e.g. 64 for SHA-256 */
+};
+
+/* Merkle tree parameters: hash algorithm, initial hash state, and topology */
+struct merkle_tree_params {
+ const struct fsverity_hash_alg *hash_alg; /* the hash algorithm */
+ const u8 *hashstate; /* initial hash state or NULL */
+ unsigned int digest_size; /* same as hash_alg->digest_size */
+ unsigned int block_size; /* size of data and tree blocks */
+ unsigned int hashes_per_block; /* number of hashes per tree block */
+ unsigned int log_blocksize; /* log2(block_size) */
+ unsigned int log_arity; /* log2(hashes_per_block) */
+ unsigned int num_levels; /* number of levels in Merkle tree */
+ u64 tree_size; /* Merkle tree size in bytes */
+
+ /*
+ * Starting block index for each tree level, ordered from leaf level (0)
+ * to root level ('num_levels - 1')
+ */
+ u64 level_start[FS_VERITY_MAX_LEVELS];
+};
+
+/**
+ * fsverity_info - cached verity metadata for an inode
+ *
+ * When a verity file is first opened, an instance of this struct is allocated
+ * and stored in ->i_verity_info; it remains until the inode is evicted. It
+ * caches information about the Merkle tree that's needed to efficiently verify
+ * data read from the file. It also caches the file measurement. The Merkle
+ * tree pages themselves are not cached here, but the filesystem may cache them.
+ */
+struct fsverity_info {
+ struct merkle_tree_params tree_params;
+ u8 root_hash[FS_VERITY_MAX_DIGEST_SIZE];
+ u8 measurement[FS_VERITY_MAX_DIGEST_SIZE];
+ const struct inode *inode;
+};
+
+/*
+ * Merkle tree properties. The file measurement is the hash of this structure
+ * excluding the signature and with the sig_size field set to 0.
+ */
+struct fsverity_descriptor {
+ __u8 version; /* must be 1 */
+ __u8 hash_algorithm; /* Merkle tree hash algorithm */
+ __u8 log_blocksize; /* log2 of size of data and tree blocks */
+ __u8 salt_size; /* size of salt in bytes; 0 if none */
+ __le32 sig_size; /* size of signature in bytes; 0 if none */
+ __le64 data_size; /* size of file the Merkle tree is built over */
+ __u8 root_hash[64]; /* Merkle tree root hash */
+ __u8 salt[32]; /* salt prepended to each hashed block */
+ __u8 __reserved[144]; /* must be 0's */
+ __u8 signature[]; /* optional PKCS#7 signature */
+};
+
+/* Arbitrary limit to bound the kmalloc() size. Can be changed. */
+#define FS_VERITY_MAX_DESCRIPTOR_SIZE 16384
+
+#define FS_VERITY_MAX_SIGNATURE_SIZE (FS_VERITY_MAX_DESCRIPTOR_SIZE - \
+ sizeof(struct fsverity_descriptor))
+
+/*
+ * Format in which verity file measurements are signed. This is the same as
+ * 'struct fsverity_digest', except here some magic bytes are prepended to
+ * provide some context about what is being signed in case the same key is used
+ * for non-fsverity purposes, and here the fields have fixed endianness.
+ */
+struct fsverity_signed_digest {
+ char magic[8]; /* must be "FSVerity" */
+ __le16 digest_algorithm;
+ __le16 digest_size;
+ __u8 digest[];
+};
+
+/* hash_algs.c */
+
+extern struct fsverity_hash_alg fsverity_hash_algs[];
+
+const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
+ unsigned int num);
+const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg,
+ const u8 *salt, size_t salt_size);
+int fsverity_hash_page(const struct merkle_tree_params *params,
+ const struct inode *inode,
+ struct ahash_request *req, struct page *page, u8 *out);
+int fsverity_hash_buffer(const struct fsverity_hash_alg *alg,
+ const void *data, size_t size, u8 *out);
+void __init fsverity_check_hash_algs(void);
+
+/* init.c */
+
+extern void __printf(3, 4) __cold
+fsverity_msg(const struct inode *inode, const char *level,
+ const char *fmt, ...);
+
+#define fsverity_warn(inode, fmt, ...) \
+ fsverity_msg((inode), KERN_WARNING, fmt, ##__VA_ARGS__)
+#define fsverity_err(inode, fmt, ...) \
+ fsverity_msg((inode), KERN_ERR, fmt, ##__VA_ARGS__)
+
+/* open.c */
+
+int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
+ const struct inode *inode,
+ unsigned int hash_algorithm,
+ unsigned int log_blocksize,
+ const u8 *salt, size_t salt_size);
+
+struct fsverity_info *fsverity_create_info(const struct inode *inode,
+ void *desc, size_t desc_size);
+
+void fsverity_set_info(struct inode *inode, struct fsverity_info *vi);
+
+void fsverity_free_info(struct fsverity_info *vi);
+
+int __init fsverity_init_info_cache(void);
+void __init fsverity_exit_info_cache(void);
+
+/* signature.c */
+
+#ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES
+int fsverity_verify_signature(const struct fsverity_info *vi,
+ const struct fsverity_descriptor *desc,
+ size_t desc_size);
+
+int __init fsverity_init_signature(void);
+#else /* !CONFIG_FS_VERITY_BUILTIN_SIGNATURES */
+static inline int
+fsverity_verify_signature(const struct fsverity_info *vi,
+ const struct fsverity_descriptor *desc,
+ size_t desc_size)
+{
+ return 0;
+}
+
+static inline int fsverity_init_signature(void)
+{
+ return 0;
+}
+#endif /* !CONFIG_FS_VERITY_BUILTIN_SIGNATURES */
+
+/* verify.c */
+
+int __init fsverity_init_workqueue(void);
+void __init fsverity_exit_workqueue(void);
+
+#endif /* _FSVERITY_PRIVATE_H */
diff --git a/fs/verity/hash_algs.c b/fs/verity/hash_algs.c
new file mode 100644
index 000000000000..31e6d7d2389a
--- /dev/null
+++ b/fs/verity/hash_algs.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fs/verity/hash_algs.c: fs-verity hash algorithms
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <crypto/hash.h>
+#include <linux/scatterlist.h>
+
+/* The hash algorithms supported by fs-verity */
+struct fsverity_hash_alg fsverity_hash_algs[] = {
+ [FS_VERITY_HASH_ALG_SHA256] = {
+ .name = "sha256",
+ .digest_size = SHA256_DIGEST_SIZE,
+ .block_size = SHA256_BLOCK_SIZE,
+ },
+ [FS_VERITY_HASH_ALG_SHA512] = {
+ .name = "sha512",
+ .digest_size = SHA512_DIGEST_SIZE,
+ .block_size = SHA512_BLOCK_SIZE,
+ },
+};
+
+/**
+ * fsverity_get_hash_alg() - validate and prepare a hash algorithm
+ * @inode: optional inode for logging purposes
+ * @num: the hash algorithm number
+ *
+ * Get the struct fsverity_hash_alg for the given hash algorithm number, and
+ * ensure it has a hash transform ready to go. The hash transforms are
+ * allocated on-demand so that we don't waste resources unnecessarily, and
+ * because the crypto modules may be initialized later than fs/verity/.
+ *
+ * Return: pointer to the hash alg on success, else an ERR_PTR()
+ */
+const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
+ unsigned int num)
+{
+ struct fsverity_hash_alg *alg;
+ struct crypto_ahash *tfm;
+ int err;
+
+ if (num >= ARRAY_SIZE(fsverity_hash_algs) ||
+ !fsverity_hash_algs[num].name) {
+ fsverity_warn(inode, "Unknown hash algorithm number: %u", num);
+ return ERR_PTR(-EINVAL);
+ }
+ alg = &fsverity_hash_algs[num];
+
+ /* pairs with cmpxchg() below */
+ tfm = READ_ONCE(alg->tfm);
+ if (likely(tfm != NULL))
+ return alg;
+ /*
+ * Using the shash API would make things a bit simpler, but the ahash
+ * API is preferable as it allows the use of crypto accelerators.
+ */
+ tfm = crypto_alloc_ahash(alg->name, 0, 0);
+ if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT) {
+ fsverity_warn(inode,
+ "Missing crypto API support for hash algorithm \"%s\"",
+ alg->name);
+ return ERR_PTR(-ENOPKG);
+ }
+ fsverity_err(inode,
+ "Error allocating hash algorithm \"%s\": %ld",
+ alg->name, PTR_ERR(tfm));
+ return ERR_CAST(tfm);
+ }
+
+ err = -EINVAL;
+ if (WARN_ON(alg->digest_size != crypto_ahash_digestsize(tfm)))
+ goto err_free_tfm;
+ if (WARN_ON(alg->block_size != crypto_ahash_blocksize(tfm)))
+ goto err_free_tfm;
+
+ pr_info("%s using implementation \"%s\"\n",
+ alg->name, crypto_ahash_driver_name(tfm));
+
+ /* pairs with READ_ONCE() above */
+ if (cmpxchg(&alg->tfm, NULL, tfm) != NULL)
+ crypto_free_ahash(tfm);
+
+ return alg;
+
+err_free_tfm:
+ crypto_free_ahash(tfm);
+ return ERR_PTR(err);
+}
+
+/**
+ * fsverity_prepare_hash_state() - precompute the initial hash state
+ * @alg: hash algorithm
+ * @salt: a salt which is to be prepended to all data to be hashed
+ * @salt_size: salt size in bytes, possibly 0
+ *
+ * Return: NULL if the salt is empty, otherwise the kmalloc()'ed precomputed
+ * initial hash state on success or an ERR_PTR() on failure.
+ */
+const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg,
+ const u8 *salt, size_t salt_size)
+{
+ u8 *hashstate = NULL;
+ struct ahash_request *req = NULL;
+ u8 *padded_salt = NULL;
+ size_t padded_salt_size;
+ struct scatterlist sg;
+ DECLARE_CRYPTO_WAIT(wait);
+ int err;
+
+ if (salt_size == 0)
+ return NULL;
+
+ hashstate = kmalloc(crypto_ahash_statesize(alg->tfm), GFP_KERNEL);
+ if (!hashstate)
+ return ERR_PTR(-ENOMEM);
+
+ req = ahash_request_alloc(alg->tfm, GFP_KERNEL);
+ if (!req) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ /*
+ * Zero-pad the salt to the next multiple of the input size of the hash
+ * algorithm's compression function, e.g. 64 bytes for SHA-256 or 128
+ * bytes for SHA-512. This ensures that the hash algorithm won't have
+ * any bytes buffered internally after processing the salt, thus making
+ * salted hashing just as fast as unsalted hashing.
+ */
+ padded_salt_size = round_up(salt_size, alg->block_size);
+ padded_salt = kzalloc(padded_salt_size, GFP_KERNEL);
+ if (!padded_salt) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+ memcpy(padded_salt, salt, salt_size);
+
+ sg_init_one(&sg, padded_salt, padded_salt_size);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+ ahash_request_set_crypt(req, &sg, NULL, padded_salt_size);
+
+ err = crypto_wait_req(crypto_ahash_init(req), &wait);
+ if (err)
+ goto err_free;
+
+ err = crypto_wait_req(crypto_ahash_update(req), &wait);
+ if (err)
+ goto err_free;
+
+ err = crypto_ahash_export(req, hashstate);
+ if (err)
+ goto err_free;
+out:
+ ahash_request_free(req);
+ kfree(padded_salt);
+ return hashstate;
+
+err_free:
+ kfree(hashstate);
+ hashstate = ERR_PTR(err);
+ goto out;
+}
+
+/**
+ * fsverity_hash_page() - hash a single data or hash page
+ * @params: the Merkle tree's parameters
+ * @inode: inode for which the hashing is being done
+ * @req: preallocated hash request
+ * @page: the page to hash
+ * @out: output digest, size 'params->digest_size' bytes
+ *
+ * Hash a single data or hash block, assuming block_size == PAGE_SIZE.
+ * The hash is salted if a salt is specified in the Merkle tree parameters.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_hash_page(const struct merkle_tree_params *params,
+ const struct inode *inode,
+ struct ahash_request *req, struct page *page, u8 *out)
+{
+ struct scatterlist sg;
+ DECLARE_CRYPTO_WAIT(wait);
+ int err;
+
+ if (WARN_ON(params->block_size != PAGE_SIZE))
+ return -EINVAL;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, PAGE_SIZE, 0);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+ ahash_request_set_crypt(req, &sg, out, PAGE_SIZE);
+
+ if (params->hashstate) {
+ err = crypto_ahash_import(req, params->hashstate);
+ if (err) {
+ fsverity_err(inode,
+ "Error %d importing hash state", err);
+ return err;
+ }
+ err = crypto_ahash_finup(req);
+ } else {
+ err = crypto_ahash_digest(req);
+ }
+
+ err = crypto_wait_req(err, &wait);
+ if (err)
+ fsverity_err(inode, "Error %d computing page hash", err);
+ return err;
+}
+
+/**
+ * fsverity_hash_buffer() - hash some data
+ * @alg: the hash algorithm to use
+ * @data: the data to hash
+ * @size: size of data to hash, in bytes
+ * @out: output digest, size 'alg->digest_size' bytes
+ *
+ * Hash some data which is located in physically contiguous memory (i.e. memory
+ * allocated by kmalloc(), not by vmalloc()). No salt is used.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_hash_buffer(const struct fsverity_hash_alg *alg,
+ const void *data, size_t size, u8 *out)
+{
+ struct ahash_request *req;
+ struct scatterlist sg;
+ DECLARE_CRYPTO_WAIT(wait);
+ int err;
+
+ req = ahash_request_alloc(alg->tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ sg_init_one(&sg, data, size);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+ ahash_request_set_crypt(req, &sg, out, size);
+
+ err = crypto_wait_req(crypto_ahash_digest(req), &wait);
+
+ ahash_request_free(req);
+ return err;
+}
+
+void __init fsverity_check_hash_algs(void)
+{
+ size_t i;
+
+ /*
+ * Sanity check the hash algorithms (could be a build-time check, but
+ * they're in an array)
+ */
+ for (i = 0; i < ARRAY_SIZE(fsverity_hash_algs); i++) {
+ const struct fsverity_hash_alg *alg = &fsverity_hash_algs[i];
+
+ if (!alg->name)
+ continue;
+
+ BUG_ON(alg->digest_size > FS_VERITY_MAX_DIGEST_SIZE);
+
+ /*
+ * For efficiency, the implementation currently assumes the
+ * digest and block sizes are powers of 2. This limitation can
+ * be lifted if the code is updated to handle other values.
+ */
+ BUG_ON(!is_power_of_2(alg->digest_size));
+ BUG_ON(!is_power_of_2(alg->block_size));
+ }
+}
diff --git a/fs/verity/init.c b/fs/verity/init.c
new file mode 100644
index 000000000000..94c104e00861
--- /dev/null
+++ b/fs/verity/init.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fs/verity/init.c: fs-verity module initialization and logging
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <linux/ratelimit.h>
+
+void fsverity_msg(const struct inode *inode, const char *level,
+ const char *fmt, ...)
+{
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ struct va_format vaf;
+ va_list args;
+
+ if (!__ratelimit(&rs))
+ return;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ if (inode)
+ printk("%sfs-verity (%s, inode %lu): %pV\n",
+ level, inode->i_sb->s_id, inode->i_ino, &vaf);
+ else
+ printk("%sfs-verity: %pV\n", level, &vaf);
+ va_end(args);
+}
+
+static int __init fsverity_init(void)
+{
+ int err;
+
+ fsverity_check_hash_algs();
+
+ err = fsverity_init_info_cache();
+ if (err)
+ return err;
+
+ err = fsverity_init_workqueue();
+ if (err)
+ goto err_exit_info_cache;
+
+ err = fsverity_init_signature();
+ if (err)
+ goto err_exit_workqueue;
+
+ pr_debug("Initialized fs-verity\n");
+ return 0;
+
+err_exit_workqueue:
+ fsverity_exit_workqueue();
+err_exit_info_cache:
+ fsverity_exit_info_cache();
+ return err;
+}
+late_initcall(fsverity_init)
diff --git a/fs/verity/measure.c b/fs/verity/measure.c
new file mode 100644
index 000000000000..05049b68c745
--- /dev/null
+++ b/fs/verity/measure.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fs/verity/measure.c: ioctl to get a verity file's measurement
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <linux/uaccess.h>
+
+/**
+ * fsverity_ioctl_measure() - get a verity file's measurement
+ *
+ * Retrieve the file measurement that the kernel is enforcing for reads from a
+ * verity file. See the "FS_IOC_MEASURE_VERITY" section of
+ * Documentation/filesystems/fsverity.rst for the documentation.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_ioctl_measure(struct file *filp, void __user *_uarg)
+{
+ const struct inode *inode = file_inode(filp);
+ struct fsverity_digest __user *uarg = _uarg;
+ const struct fsverity_info *vi;
+ const struct fsverity_hash_alg *hash_alg;
+ struct fsverity_digest arg;
+
+ vi = fsverity_get_info(inode);
+ if (!vi)
+ return -ENODATA; /* not a verity file */
+ hash_alg = vi->tree_params.hash_alg;
+
+ /*
+ * The user specifies the digest_size their buffer has space for; we can
+ * return the digest if it fits in the available space. We write back
+ * the actual size, which may be shorter than the user-specified size.
+ */
+
+ if (get_user(arg.digest_size, &uarg->digest_size))
+ return -EFAULT;
+ if (arg.digest_size < hash_alg->digest_size)
+ return -EOVERFLOW;
+
+ memset(&arg, 0, sizeof(arg));
+ arg.digest_algorithm = hash_alg - fsverity_hash_algs;
+ arg.digest_size = hash_alg->digest_size;
+
+ if (copy_to_user(uarg, &arg, sizeof(arg)))
+ return -EFAULT;
+
+ if (copy_to_user(uarg->digest, vi->measurement, hash_alg->digest_size))
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsverity_ioctl_measure);
diff --git a/fs/verity/open.c b/fs/verity/open.c
new file mode 100644
index 000000000000..63d1004b688c
--- /dev/null
+++ b/fs/verity/open.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fs/verity/open.c: opening fs-verity files
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <linux/slab.h>
+
+static struct kmem_cache *fsverity_info_cachep;
+
+/**
+ * fsverity_init_merkle_tree_params() - initialize Merkle tree parameters
+ * @params: the parameters struct to initialize
+ * @inode: the inode for which the Merkle tree is being built
+ * @hash_algorithm: number of hash algorithm to use
+ * @log_blocksize: log base 2 of block size to use
+ * @salt: pointer to salt (optional)
+ * @salt_size: size of salt, possibly 0
+ *
+ * Validate the hash algorithm and block size, then compute the tree topology
+ * (num levels, num blocks in each level, etc.) and initialize @params.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
+ const struct inode *inode,
+ unsigned int hash_algorithm,
+ unsigned int log_blocksize,
+ const u8 *salt, size_t salt_size)
+{
+ const struct fsverity_hash_alg *hash_alg;
+ int err;
+ u64 blocks;
+ u64 offset;
+ int level;
+
+ memset(params, 0, sizeof(*params));
+
+ hash_alg = fsverity_get_hash_alg(inode, hash_algorithm);
+ if (IS_ERR(hash_alg))
+ return PTR_ERR(hash_alg);
+ params->hash_alg = hash_alg;
+ params->digest_size = hash_alg->digest_size;
+
+ params->hashstate = fsverity_prepare_hash_state(hash_alg, salt,
+ salt_size);
+ if (IS_ERR(params->hashstate)) {
+ err = PTR_ERR(params->hashstate);
+ params->hashstate = NULL;
+ fsverity_err(inode, "Error %d preparing hash state", err);
+ goto out_err;
+ }
+
+ if (log_blocksize != PAGE_SHIFT) {
+ fsverity_warn(inode, "Unsupported log_blocksize: %u",
+ log_blocksize);
+ err = -EINVAL;
+ goto out_err;
+ }
+ params->log_blocksize = log_blocksize;
+ params->block_size = 1 << log_blocksize;
+
+ if (WARN_ON(!is_power_of_2(params->digest_size))) {
+ err = -EINVAL;
+ goto out_err;
+ }
+ if (params->block_size < 2 * params->digest_size) {
+ fsverity_warn(inode,
+ "Merkle tree block size (%u) too small for hash algorithm \"%s\"",
+ params->block_size, hash_alg->name);
+ err = -EINVAL;
+ goto out_err;
+ }
+ params->log_arity = params->log_blocksize - ilog2(params->digest_size);
+ params->hashes_per_block = 1 << params->log_arity;
+
+ pr_debug("Merkle tree uses %s with %u-byte blocks (%u hashes/block), salt=%*phN\n",
+ hash_alg->name, params->block_size, params->hashes_per_block,
+ (int)salt_size, salt);
+
+ /*
+ * Compute the number of levels in the Merkle tree and create a map from
+ * level to the starting block of that level. Level 'num_levels - 1' is
+ * the root and is stored first. Level 0 is the level directly "above"
+ * the data blocks and is stored last.
+ */
+
+ /* Compute number of levels and the number of blocks in each level */
+ blocks = (inode->i_size + params->block_size - 1) >> log_blocksize;
+ pr_debug("Data is %lld bytes (%llu blocks)\n", inode->i_size, blocks);
+ while (blocks > 1) {
+ if (params->num_levels >= FS_VERITY_MAX_LEVELS) {
+ fsverity_err(inode, "Too many levels in Merkle tree");
+ err = -EINVAL;
+ goto out_err;
+ }
+ blocks = (blocks + params->hashes_per_block - 1) >>
+ params->log_arity;
+ /* temporarily using level_start[] to store blocks in level */
+ params->level_start[params->num_levels++] = blocks;
+ }
+
+ /* Compute the starting block of each level */
+ offset = 0;
+ for (level = (int)params->num_levels - 1; level >= 0; level--) {
+ blocks = params->level_start[level];
+ params->level_start[level] = offset;
+ pr_debug("Level %d is %llu blocks starting at index %llu\n",
+ level, blocks, offset);
+ offset += blocks;
+ }
+
+ params->tree_size = offset << log_blocksize;
+ return 0;
+
+out_err:
+ kfree(params->hashstate);
+ memset(params, 0, sizeof(*params));
+ return err;
+}
+
+/*
+ * Compute the file measurement by hashing the fsverity_descriptor excluding the
+ * signature and with the sig_size field set to 0.
+ */
+static int compute_file_measurement(const struct fsverity_hash_alg *hash_alg,
+ struct fsverity_descriptor *desc,
+ u8 *measurement)
+{
+ __le32 sig_size = desc->sig_size;
+ int err;
+
+ desc->sig_size = 0;
+ err = fsverity_hash_buffer(hash_alg, desc, sizeof(*desc), measurement);
+ desc->sig_size = sig_size;
+
+ return err;
+}
+
+/*
+ * Validate the given fsverity_descriptor and create a new fsverity_info from
+ * it. The signature (if present) is also checked.
+ */
+struct fsverity_info *fsverity_create_info(const struct inode *inode,
+ void *_desc, size_t desc_size)
+{
+ struct fsverity_descriptor *desc = _desc;
+ struct fsverity_info *vi;
+ int err;
+
+ if (desc_size < sizeof(*desc)) {
+ fsverity_err(inode, "Unrecognized descriptor size: %zu bytes",
+ desc_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (desc->version != 1) {
+ fsverity_err(inode, "Unrecognized descriptor version: %u",
+ desc->version);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (memchr_inv(desc->__reserved, 0, sizeof(desc->__reserved))) {
+ fsverity_err(inode, "Reserved bits set in descriptor");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (desc->salt_size > sizeof(desc->salt)) {
+ fsverity_err(inode, "Invalid salt_size: %u", desc->salt_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (le64_to_cpu(desc->data_size) != inode->i_size) {
+ fsverity_err(inode,
+ "Wrong data_size: %llu (desc) != %lld (inode)",
+ le64_to_cpu(desc->data_size), inode->i_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ vi = kmem_cache_zalloc(fsverity_info_cachep, GFP_KERNEL);
+ if (!vi)
+ return ERR_PTR(-ENOMEM);
+ vi->inode = inode;
+
+ err = fsverity_init_merkle_tree_params(&vi->tree_params, inode,
+ desc->hash_algorithm,
+ desc->log_blocksize,
+ desc->salt, desc->salt_size);
+ if (err) {
+ fsverity_err(inode,
+ "Error %d initializing Merkle tree parameters",
+ err);
+ goto out;
+ }
+
+ memcpy(vi->root_hash, desc->root_hash, vi->tree_params.digest_size);
+
+ err = compute_file_measurement(vi->tree_params.hash_alg, desc,
+ vi->measurement);
+ if (err) {
+ fsverity_err(inode, "Error %d computing file measurement", err);
+ goto out;
+ }
+ pr_debug("Computed file measurement: %s:%*phN\n",
+ vi->tree_params.hash_alg->name,
+ vi->tree_params.digest_size, vi->measurement);
+
+ err = fsverity_verify_signature(vi, desc, desc_size);
+out:
+ if (err) {
+ fsverity_free_info(vi);
+ vi = ERR_PTR(err);
+ }
+ return vi;
+}
+
+void fsverity_set_info(struct inode *inode, struct fsverity_info *vi)
+{
+ /*
+ * Multiple processes may race to set ->i_verity_info, so use cmpxchg.
+ * This pairs with the READ_ONCE() in fsverity_get_info().
+ */
+ if (cmpxchg(&inode->i_verity_info, NULL, vi) != NULL)
+ fsverity_free_info(vi);
+}
+
+void fsverity_free_info(struct fsverity_info *vi)
+{
+ if (!vi)
+ return;
+ kfree(vi->tree_params.hashstate);
+ kmem_cache_free(fsverity_info_cachep, vi);
+}
+
+/* Ensure the inode has an ->i_verity_info */
+static int ensure_verity_info(struct inode *inode)
+{
+ struct fsverity_info *vi = fsverity_get_info(inode);
+ struct fsverity_descriptor *desc;
+ int res;
+
+ if (vi)
+ return 0;
+
+ res = inode->i_sb->s_vop->get_verity_descriptor(inode, NULL, 0);
+ if (res < 0) {
+ fsverity_err(inode,
+ "Error %d getting verity descriptor size", res);
+ return res;
+ }
+ if (res > FS_VERITY_MAX_DESCRIPTOR_SIZE) {
+ fsverity_err(inode, "Verity descriptor is too large (%d bytes)",
+ res);
+ return -EMSGSIZE;
+ }
+ desc = kmalloc(res, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ res = inode->i_sb->s_vop->get_verity_descriptor(inode, desc, res);
+ if (res < 0) {
+ fsverity_err(inode, "Error %d reading verity descriptor", res);
+ goto out_free_desc;
+ }
+
+ vi = fsverity_create_info(inode, desc, res);
+ if (IS_ERR(vi)) {
+ res = PTR_ERR(vi);
+ goto out_free_desc;
+ }
+
+ fsverity_set_info(inode, vi);
+ res = 0;
+out_free_desc:
+ kfree(desc);
+ return res;
+}
+
+/**
+ * fsverity_file_open() - prepare to open a verity file
+ * @inode: the inode being opened
+ * @filp: the struct file being set up
+ *
+ * When opening a verity file, deny the open if it is for writing. Otherwise,
+ * set up the inode's ->i_verity_info if not already done.
+ *
+ * When combined with fscrypt, this must be called after fscrypt_file_open().
+ * Otherwise, we won't have the key set up to decrypt the verity metadata.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_file_open(struct inode *inode, struct file *filp)
+{
+ if (!IS_VERITY(inode))
+ return 0;
+
+ if (filp->f_mode & FMODE_WRITE) {
+ pr_debug("Denying opening verity file (ino %lu) for write\n",
+ inode->i_ino);
+ return -EPERM;
+ }
+
+ return ensure_verity_info(inode);
+}
+EXPORT_SYMBOL_GPL(fsverity_file_open);
+
+/**
+ * fsverity_prepare_setattr() - prepare to change a verity inode's attributes
+ * @dentry: dentry through which the inode is being changed
+ * @attr: attributes to change
+ *
+ * Verity files are immutable, so deny truncates. This isn't covered by the
+ * open-time check because sys_truncate() takes a path, not a file descriptor.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ if (IS_VERITY(d_inode(dentry)) && (attr->ia_valid & ATTR_SIZE)) {
+ pr_debug("Denying truncate of verity file (ino %lu)\n",
+ d_inode(dentry)->i_ino);
+ return -EPERM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsverity_prepare_setattr);
+
+/**
+ * fsverity_cleanup_inode() - free the inode's verity info, if present
+ *
+ * Filesystems must call this on inode eviction to free ->i_verity_info.
+ */
+void fsverity_cleanup_inode(struct inode *inode)
+{
+ fsverity_free_info(inode->i_verity_info);
+ inode->i_verity_info = NULL;
+}
+EXPORT_SYMBOL_GPL(fsverity_cleanup_inode);
+
+int __init fsverity_init_info_cache(void)
+{
+ fsverity_info_cachep = KMEM_CACHE_USERCOPY(fsverity_info,
+ SLAB_RECLAIM_ACCOUNT,
+ measurement);
+ if (!fsverity_info_cachep)
+ return -ENOMEM;
+ return 0;
+}
+
+void __init fsverity_exit_info_cache(void)
+{
+ kmem_cache_destroy(fsverity_info_cachep);
+ fsverity_info_cachep = NULL;
+}
diff --git a/fs/verity/signature.c b/fs/verity/signature.c
new file mode 100644
index 000000000000..c8b255232de5
--- /dev/null
+++ b/fs/verity/signature.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fs/verity/signature.c: verification of builtin signatures
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <linux/cred.h>
+#include <linux/key.h>
+#include <linux/slab.h>
+#include <linux/verification.h>
+
+/*
+ * /proc/sys/fs/verity/require_signatures
+ * If 1, all verity files must have a valid builtin signature.
+ */
+static int fsverity_require_signatures;
+
+/*
+ * Keyring that contains the trusted X.509 certificates.
+ *
+ * Only root (kuid=0) can modify this. Also, root may use
+ * keyctl_restrict_keyring() to prevent any more additions.
+ */
+static struct key *fsverity_keyring;
+
+/**
+ * fsverity_verify_signature() - check a verity file's signature
+ *
+ * If the file's fs-verity descriptor includes a signature of the file
+ * measurement, verify it against the certificates in the fs-verity keyring.
+ *
+ * Return: 0 on success (signature valid or not required); -errno on failure
+ */
+int fsverity_verify_signature(const struct fsverity_info *vi,
+ const struct fsverity_descriptor *desc,
+ size_t desc_size)
+{
+ const struct inode *inode = vi->inode;
+ const struct fsverity_hash_alg *hash_alg = vi->tree_params.hash_alg;
+ const u32 sig_size = le32_to_cpu(desc->sig_size);
+ struct fsverity_signed_digest *d;
+ int err;
+
+ if (sig_size == 0) {
+ if (fsverity_require_signatures) {
+ fsverity_err(inode,
+ "require_signatures=1, rejecting unsigned file!");
+ return -EPERM;
+ }
+ return 0;
+ }
+
+ if (sig_size > desc_size - sizeof(*desc)) {
+ fsverity_err(inode, "Signature overflows verity descriptor");
+ return -EBADMSG;
+ }
+
+ d = kzalloc(sizeof(*d) + hash_alg->digest_size, GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ memcpy(d->magic, "FSVerity", 8);
+ d->digest_algorithm = cpu_to_le16(hash_alg - fsverity_hash_algs);
+ d->digest_size = cpu_to_le16(hash_alg->digest_size);
+ memcpy(d->digest, vi->measurement, hash_alg->digest_size);
+
+ err = verify_pkcs7_signature(d, sizeof(*d) + hash_alg->digest_size,
+ desc->signature, sig_size,
+ fsverity_keyring,
+ VERIFYING_UNSPECIFIED_SIGNATURE,
+ NULL, NULL);
+ kfree(d);
+
+ if (err) {
+ if (err == -ENOKEY)
+ fsverity_err(inode,
+ "File's signing cert isn't in the fs-verity keyring");
+ else if (err == -EKEYREJECTED)
+ fsverity_err(inode, "Incorrect file signature");
+ else if (err == -EBADMSG)
+ fsverity_err(inode, "Malformed file signature");
+ else
+ fsverity_err(inode, "Error %d verifying file signature",
+ err);
+ return err;
+ }
+
+ pr_debug("Valid signature for file measurement %s:%*phN\n",
+ hash_alg->name, hash_alg->digest_size, vi->measurement);
+ return 0;
+}
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table_header *fsverity_sysctl_header;
+
+static const struct ctl_path fsverity_sysctl_path[] = {
+ { .procname = "fs", },
+ { .procname = "verity", },
+ { }
+};
+
+static struct ctl_table fsverity_sysctl_table[] = {
+ {
+ .procname = "require_signatures",
+ .data = &fsverity_require_signatures,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ { }
+};
+
+static int __init fsverity_sysctl_init(void)
+{
+ fsverity_sysctl_header = register_sysctl_paths(fsverity_sysctl_path,
+ fsverity_sysctl_table);
+ if (!fsverity_sysctl_header) {
+ pr_err("sysctl registration failed!\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+#else /* !CONFIG_SYSCTL */
+static inline int __init fsverity_sysctl_init(void)
+{
+ return 0;
+}
+#endif /* !CONFIG_SYSCTL */
+
+int __init fsverity_init_signature(void)
+{
+ struct key *ring;
+ int err;
+
+ ring = keyring_alloc(".fs-verity", KUIDT_INIT(0), KGIDT_INIT(0),
+ current_cred(), KEY_POS_SEARCH |
+ KEY_USR_VIEW | KEY_USR_READ | KEY_USR_WRITE |
+ KEY_USR_SEARCH | KEY_USR_SETATTR,
+ KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+
+ err = fsverity_sysctl_init();
+ if (err)
+ goto err_put_ring;
+
+ fsverity_keyring = ring;
+ return 0;
+
+err_put_ring:
+ key_put(ring);
+ return err;
+}
diff --git a/fs/verity/verify.c b/fs/verity/verify.c
new file mode 100644
index 000000000000..3e8f2de44667
--- /dev/null
+++ b/fs/verity/verify.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fs/verity/verify.c: data verification functions, i.e. hooks for ->readpages()
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <crypto/hash.h>
+#include <linux/bio.h>
+#include <linux/ratelimit.h>
+
+static struct workqueue_struct *fsverity_read_workqueue;
+
+/**
+ * hash_at_level() - compute the location of the block's hash at the given level
+ *
+ * @params: (in) the Merkle tree parameters
+ * @dindex: (in) the index of the data block being verified
+ * @level: (in) the level of hash we want (0 is leaf level)
+ * @hindex: (out) the index of the hash block containing the wanted hash
+ * @hoffset: (out) the byte offset to the wanted hash within the hash block
+ */
+static void hash_at_level(const struct merkle_tree_params *params,
+ pgoff_t dindex, unsigned int level, pgoff_t *hindex,
+ unsigned int *hoffset)
+{
+ pgoff_t position;
+
+ /* Offset of the hash within the level's region, in hashes */
+ position = dindex >> (level * params->log_arity);
+
+ /* Index of the hash block in the tree overall */
+ *hindex = params->level_start[level] + (position >> params->log_arity);
+
+ /* Offset of the wanted hash (in bytes) within the hash block */
+ *hoffset = (position & ((1 << params->log_arity) - 1)) <<
+ (params->log_blocksize - params->log_arity);
+}
+
+/* Extract a hash from a hash page */
+static void extract_hash(struct page *hpage, unsigned int hoffset,
+ unsigned int hsize, u8 *out)
+{
+ void *virt = kmap_atomic(hpage);
+
+ memcpy(out, virt + hoffset, hsize);
+ kunmap_atomic(virt);
+}
+
+static inline int cmp_hashes(const struct fsverity_info *vi,
+ const u8 *want_hash, const u8 *real_hash,
+ pgoff_t index, int level)
+{
+ const unsigned int hsize = vi->tree_params.digest_size;
+
+ if (memcmp(want_hash, real_hash, hsize) == 0)
+ return 0;
+
+ fsverity_err(vi->inode,
+ "FILE CORRUPTED! index=%lu, level=%d, want_hash=%s:%*phN, real_hash=%s:%*phN",
+ index, level,
+ vi->tree_params.hash_alg->name, hsize, want_hash,
+ vi->tree_params.hash_alg->name, hsize, real_hash);
+ return -EBADMSG;
+}
+
+/*
+ * Verify a single data page against the file's Merkle tree.
+ *
+ * In principle, we need to verify the entire path to the root node. However,
+ * for efficiency the filesystem may cache the hash pages. Therefore we need
+ * only ascend the tree until an already-verified page is seen, as indicated by
+ * the PageChecked bit being set; then verify the path to that page.
+ *
+ * This code currently only supports the case where the verity block size is
+ * equal to PAGE_SIZE. Doing otherwise would be possible but tricky, since we
+ * wouldn't be able to use the PageChecked bit.
+ *
+ * Note that multiple processes may race to verify a hash page and mark it
+ * Checked, but it doesn't matter; the result will be the same either way.
+ *
+ * Return: true if the page is valid, else false.
+ */
+static bool verify_page(struct inode *inode, const struct fsverity_info *vi,
+ struct ahash_request *req, struct page *data_page)
+{
+ const struct merkle_tree_params *params = &vi->tree_params;
+ const unsigned int hsize = params->digest_size;
+ const pgoff_t index = data_page->index;
+ int level;
+ u8 _want_hash[FS_VERITY_MAX_DIGEST_SIZE];
+ const u8 *want_hash;
+ u8 real_hash[FS_VERITY_MAX_DIGEST_SIZE];
+ struct page *hpages[FS_VERITY_MAX_LEVELS];
+ unsigned int hoffsets[FS_VERITY_MAX_LEVELS];
+ int err;
+
+ if (WARN_ON_ONCE(!PageLocked(data_page) || PageUptodate(data_page)))
+ return false;
+
+ pr_debug_ratelimited("Verifying data page %lu...\n", index);
+
+ /*
+ * Starting at the leaf level, ascend the tree saving hash pages along
+ * the way until we find a verified hash page, indicated by PageChecked;
+ * or until we reach the root.
+ */
+ for (level = 0; level < params->num_levels; level++) {
+ pgoff_t hindex;
+ unsigned int hoffset;
+ struct page *hpage;
+
+ hash_at_level(params, index, level, &hindex, &hoffset);
+
+ pr_debug_ratelimited("Level %d: hindex=%lu, hoffset=%u\n",
+ level, hindex, hoffset);
+
+ hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode,
+ hindex);
+ if (IS_ERR(hpage)) {
+ err = PTR_ERR(hpage);
+ fsverity_err(inode,
+ "Error %d reading Merkle tree page %lu",
+ err, hindex);
+ goto out;
+ }
+
+ if (PageChecked(hpage)) {
+ extract_hash(hpage, hoffset, hsize, _want_hash);
+ want_hash = _want_hash;
+ put_page(hpage);
+ pr_debug_ratelimited("Hash page already checked, want %s:%*phN\n",
+ params->hash_alg->name,
+ hsize, want_hash);
+ goto descend;
+ }
+ pr_debug_ratelimited("Hash page not yet checked\n");
+ hpages[level] = hpage;
+ hoffsets[level] = hoffset;
+ }
+
+ want_hash = vi->root_hash;
+ pr_debug("Want root hash: %s:%*phN\n",
+ params->hash_alg->name, hsize, want_hash);
+descend:
+ /* Descend the tree verifying hash pages */
+ for (; level > 0; level--) {
+ struct page *hpage = hpages[level - 1];
+ unsigned int hoffset = hoffsets[level - 1];
+
+ err = fsverity_hash_page(params, inode, req, hpage, real_hash);
+ if (err)
+ goto out;
+ err = cmp_hashes(vi, want_hash, real_hash, index, level - 1);
+ if (err)
+ goto out;
+ SetPageChecked(hpage);
+ extract_hash(hpage, hoffset, hsize, _want_hash);
+ want_hash = _want_hash;
+ put_page(hpage);
+ pr_debug("Verified hash page at level %d, now want %s:%*phN\n",
+ level - 1, params->hash_alg->name, hsize, want_hash);
+ }
+
+ /* Finally, verify the data page */
+ err = fsverity_hash_page(params, inode, req, data_page, real_hash);
+ if (err)
+ goto out;
+ err = cmp_hashes(vi, want_hash, real_hash, index, -1);
+out:
+ for (; level > 0; level--)
+ put_page(hpages[level - 1]);
+
+ return err == 0;
+}
+
+/**
+ * fsverity_verify_page() - verify a data page
+ *
+ * Verify a page that has just been read from a verity file. The page must be a
+ * pagecache page that is still locked and not yet uptodate.
+ *
+ * Return: true if the page is valid, else false.
+ */
+bool fsverity_verify_page(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ const struct fsverity_info *vi = inode->i_verity_info;
+ struct ahash_request *req;
+ bool valid;
+
+ req = ahash_request_alloc(vi->tree_params.hash_alg->tfm, GFP_NOFS);
+ if (unlikely(!req))
+ return false;
+
+ valid = verify_page(inode, vi, req, page);
+
+ ahash_request_free(req);
+
+ return valid;
+}
+EXPORT_SYMBOL_GPL(fsverity_verify_page);
+
+#ifdef CONFIG_BLOCK
+/**
+ * fsverity_verify_bio() - verify a 'read' bio that has just completed
+ *
+ * Verify a set of pages that have just been read from a verity file. The pages
+ * must be pagecache pages that are still locked and not yet uptodate. Pages
+ * that fail verification are set to the Error state. Verification is skipped
+ * for pages already in the Error state, e.g. due to fscrypt decryption failure.
+ *
+ * This is a helper function for use by the ->readpages() method of filesystems
+ * that issue bios to read data directly into the page cache. Filesystems that
+ * populate the page cache without issuing bios (e.g. non block-based
+ * filesystems) must instead call fsverity_verify_page() directly on each page.
+ * All filesystems must also call fsverity_verify_page() on holes.
+ */
+void fsverity_verify_bio(struct bio *bio)
+{
+ struct inode *inode = bio_first_page_all(bio)->mapping->host;
+ const struct fsverity_info *vi = inode->i_verity_info;
+ struct ahash_request *req;
+ struct bio_vec *bv;
+ struct bvec_iter_all iter_all;
+
+ req = ahash_request_alloc(vi->tree_params.hash_alg->tfm, GFP_NOFS);
+ if (unlikely(!req)) {
+ bio_for_each_segment_all(bv, bio, iter_all)
+ SetPageError(bv->bv_page);
+ return;
+ }
+
+ bio_for_each_segment_all(bv, bio, iter_all) {
+ struct page *page = bv->bv_page;
+
+ if (!PageError(page) && !verify_page(inode, vi, req, page))
+ SetPageError(page);
+ }
+
+ ahash_request_free(req);
+}
+EXPORT_SYMBOL_GPL(fsverity_verify_bio);
+#endif /* CONFIG_BLOCK */
+
+/**
+ * fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue
+ *
+ * Enqueue verification work for asynchronous processing.
+ */
+void fsverity_enqueue_verify_work(struct work_struct *work)
+{
+ queue_work(fsverity_read_workqueue, work);
+}
+EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
+
+int __init fsverity_init_workqueue(void)
+{
+ /*
+ * Use an unbound workqueue to allow bios to be verified in parallel
+ * even when they happen to complete on the same CPU. This sacrifices
+ * locality, but it's worthwhile since hashing is CPU-intensive.
+ *
+ * Also use a high-priority workqueue to prioritize verification work,
+ * which blocks reads from completing, over regular application tasks.
+ */
+ fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
+ WQ_UNBOUND | WQ_HIGHPRI,
+ num_online_cpus());
+ if (!fsverity_read_workqueue)
+ return -ENOMEM;
+ return 0;
+}
+
+void __init fsverity_exit_workqueue(void)
+{
+ destroy_workqueue(fsverity_read_workqueue);
+ fsverity_read_workqueue = NULL;
+}
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 06b68b6115bc..aceca2f9a3db 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -27,7 +27,6 @@ xfs-y += $(addprefix libxfs/, \
xfs_bmap_btree.o \
xfs_btree.o \
xfs_da_btree.o \
- xfs_da_format.o \
xfs_defer.o \
xfs_dir2.o \
xfs_dir2_block.o \
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index 16bb9a328678..1da94237a8cf 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -3,10 +3,10 @@
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*/
-#include <linux/sched/mm.h>
+#include "xfs.h"
#include <linux/backing-dev.h>
-#include "kmem.h"
#include "xfs_message.h"
+#include "xfs_trace.h"
void *
kmem_alloc(size_t size, xfs_km_flags_t flags)
@@ -15,9 +15,11 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
+ trace_kmem_alloc(size, flags, _RET_IP_);
+
do {
ptr = kmalloc(size, lflags);
- if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
+ if (ptr || (flags & KM_MAYFAIL))
return ptr;
if (!(++retries % 100))
xfs_err(NULL,
@@ -28,28 +30,24 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
} while (1);
}
-void *
-kmem_alloc_large(size_t size, xfs_km_flags_t flags)
+
+/*
+ * __vmalloc() will allocate data pages and auxiliary structures (e.g.
+ * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence
+ * we need to tell memory reclaim that we are in such a context via
+ * PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here
+ * and potentially deadlocking.
+ */
+static void *
+__kmem_vmalloc(size_t size, xfs_km_flags_t flags)
{
unsigned nofs_flag = 0;
void *ptr;
- gfp_t lflags;
-
- ptr = kmem_alloc(size, flags | KM_MAYFAIL);
- if (ptr)
- return ptr;
+ gfp_t lflags = kmem_flags_convert(flags);
- /*
- * __vmalloc() will allocate data pages and auxillary structures (e.g.
- * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context
- * here. Hence we need to tell memory reclaim that we are in such a
- * context via PF_MEMALLOC_NOFS to prevent memory reclaim re-entering
- * the filesystem here and potentially deadlocking.
- */
if (flags & KM_NOFS)
nofs_flag = memalloc_nofs_save();
- lflags = kmem_flags_convert(flags);
ptr = __vmalloc(size, lflags, PAGE_KERNEL);
if (flags & KM_NOFS)
@@ -58,6 +56,44 @@ kmem_alloc_large(size_t size, xfs_km_flags_t flags)
return ptr;
}
+/*
+ * Same as kmem_alloc_large, except we guarantee the buffer returned is aligned
+ * to the @align_mask. We only guarantee alignment up to page size, we'll clamp
+ * alignment at page size if it is larger. vmalloc always returns a PAGE_SIZE
+ * aligned region.
+ */
+void *
+kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags)
+{
+ void *ptr;
+
+ trace_kmem_alloc_io(size, flags, _RET_IP_);
+
+ if (WARN_ON_ONCE(align_mask >= PAGE_SIZE))
+ align_mask = PAGE_SIZE - 1;
+
+ ptr = kmem_alloc(size, flags | KM_MAYFAIL);
+ if (ptr) {
+ if (!((uintptr_t)ptr & align_mask))
+ return ptr;
+ kfree(ptr);
+ }
+ return __kmem_vmalloc(size, flags);
+}
+
+void *
+kmem_alloc_large(size_t size, xfs_km_flags_t flags)
+{
+ void *ptr;
+
+ trace_kmem_alloc_large(size, flags, _RET_IP_);
+
+ ptr = kmem_alloc(size, flags | KM_MAYFAIL);
+ if (ptr)
+ return ptr;
+ return __kmem_vmalloc(size, flags);
+}
+
void *
kmem_realloc(const void *old, size_t newsize, xfs_km_flags_t flags)
{
@@ -65,9 +101,11 @@ kmem_realloc(const void *old, size_t newsize, xfs_km_flags_t flags)
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
+ trace_kmem_realloc(newsize, flags, _RET_IP_);
+
do {
ptr = krealloc(old, newsize, lflags);
- if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
+ if (ptr || (flags & KM_MAYFAIL))
return ptr;
if (!(++retries % 100))
xfs_err(NULL,
@@ -85,9 +123,10 @@ kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
+ trace_kmem_zone_alloc(kmem_cache_size(zone), flags, _RET_IP_);
do {
ptr = kmem_cache_alloc(zone, lflags);
- if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
+ if (ptr || (flags & KM_MAYFAIL))
return ptr;
if (!(++retries % 100))
xfs_err(NULL,
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 267655acd426..6143117770e9 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -16,8 +16,6 @@
*/
typedef unsigned __bitwise xfs_km_flags_t;
-#define KM_SLEEP ((__force xfs_km_flags_t)0x0001u)
-#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
@@ -32,15 +30,11 @@ kmem_flags_convert(xfs_km_flags_t flags)
{
gfp_t lflags;
- BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO));
+ BUG_ON(flags & ~(KM_NOFS|KM_MAYFAIL|KM_ZERO));
- if (flags & KM_NOSLEEP) {
- lflags = GFP_ATOMIC | __GFP_NOWARN;
- } else {
- lflags = GFP_KERNEL | __GFP_NOWARN;
- if (flags & KM_NOFS)
- lflags &= ~__GFP_FS;
- }
+ lflags = GFP_KERNEL | __GFP_NOWARN;
+ if (flags & KM_NOFS)
+ lflags &= ~__GFP_FS;
/*
* Default page/slab allocator behavior is to retry for ever
@@ -59,6 +53,7 @@ kmem_flags_convert(xfs_km_flags_t flags)
}
extern void *kmem_alloc(size_t, xfs_km_flags_t);
+extern void *kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags);
extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
static inline void kmem_free(const void *ptr)
@@ -83,39 +78,9 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
* Zone interfaces
*/
-#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
-#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
-#define KM_ZONE_SPREAD SLAB_MEM_SPREAD
-#define KM_ZONE_ACCOUNT SLAB_ACCOUNT
-
#define kmem_zone kmem_cache
#define kmem_zone_t struct kmem_cache
-static inline kmem_zone_t *
-kmem_zone_init(int size, char *zone_name)
-{
- return kmem_cache_create(zone_name, size, 0, 0, NULL);
-}
-
-static inline kmem_zone_t *
-kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
- void (*construct)(void *))
-{
- return kmem_cache_create(zone_name, size, 0, flags, construct);
-}
-
-static inline void
-kmem_zone_free(kmem_zone_t *zone, void *ptr)
-{
- kmem_cache_free(zone, ptr);
-}
-
-static inline void
-kmem_zone_destroy(kmem_zone_t *zone)
-{
- kmem_cache_destroy(zone);
-}
-
extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
static inline void *
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 5de296b34ab1..14fbdf22b7e7 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -28,12 +28,11 @@ xfs_get_aghdr_buf(
struct xfs_mount *mp,
xfs_daddr_t blkno,
size_t numblks,
- int flags,
const struct xfs_buf_ops *ops)
{
struct xfs_buf *bp;
- bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags);
+ bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0);
if (!bp)
return NULL;
@@ -345,7 +344,7 @@ xfs_ag_init_hdr(
{
struct xfs_buf *bp;
- bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, 0, ops);
+ bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, ops);
if (!bp)
return -ENOMEM;
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index 87a9747f1d36..fdfe6dc0d307 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -19,6 +19,8 @@
#include "xfs_btree.h"
#include "xfs_refcount_btree.h"
#include "xfs_ialloc_btree.h"
+#include "xfs_sb.h"
+#include "xfs_ag_resv.h"
/*
* Per-AG Block Reservations
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 372ad55631fc..c284e10af491 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -146,9 +146,13 @@ xfs_alloc_lookup_eq(
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
+ int error;
+
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
- return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
+ error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
+ cur->bc_private.a.priv.abt.active = (*stat == 1);
+ return error;
}
/*
@@ -162,9 +166,13 @@ xfs_alloc_lookup_ge(
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
+ int error;
+
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
- return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
+ error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
+ cur->bc_private.a.priv.abt.active = (*stat == 1);
+ return error;
}
/*
@@ -178,9 +186,19 @@ xfs_alloc_lookup_le(
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
+ int error;
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
- return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
+ error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
+ cur->bc_private.a.priv.abt.active = (*stat == 1);
+ return error;
+}
+
+static inline bool
+xfs_alloc_cur_active(
+ struct xfs_btree_cur *cur)
+{
+ return cur && cur->bc_private.a.priv.abt.active;
}
/*
@@ -313,7 +331,7 @@ xfs_alloc_compute_diff(
xfs_extlen_t newlen1=0; /* length with newbno1 */
xfs_extlen_t newlen2=0; /* length with newbno2 */
xfs_agblock_t wantend; /* end of target extent */
- bool userdata = xfs_alloc_is_userdata(datatype);
+ bool userdata = datatype & XFS_ALLOC_USERDATA;
ASSERT(freelen >= wantlen);
freeend = freebno + freelen;
@@ -433,13 +451,17 @@ xfs_alloc_fixup_trees(
#ifdef DEBUG
if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp,
- i == 1 && nfbno1 == fbno && nflen1 == flen);
+ if (XFS_IS_CORRUPT(mp,
+ i != 1 ||
+ nfbno1 != fbno ||
+ nflen1 != flen))
+ return -EFSCORRUPTED;
#endif
} else {
if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
}
/*
* Look up the record in the by-block tree if necessary.
@@ -448,13 +470,17 @@ xfs_alloc_fixup_trees(
#ifdef DEBUG
if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp,
- i == 1 && nfbno1 == fbno && nflen1 == flen);
+ if (XFS_IS_CORRUPT(mp,
+ i != 1 ||
+ nfbno1 != fbno ||
+ nflen1 != flen))
+ return -EFSCORRUPTED;
#endif
} else {
if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
}
#ifdef DEBUG
@@ -465,8 +491,10 @@ xfs_alloc_fixup_trees(
bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
- XFS_WANT_CORRUPTED_RETURN(mp,
- bnoblock->bb_numrecs == cntblock->bb_numrecs);
+ if (XFS_IS_CORRUPT(mp,
+ bnoblock->bb_numrecs !=
+ cntblock->bb_numrecs))
+ return -EFSCORRUPTED;
}
#endif
@@ -496,25 +524,30 @@ xfs_alloc_fixup_trees(
*/
if ((error = xfs_btree_delete(cnt_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
/*
* Add new by-size btree entry(s).
*/
if (nfbno1 != NULLAGBLOCK) {
if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
+ if (XFS_IS_CORRUPT(mp, i != 0))
+ return -EFSCORRUPTED;
if ((error = xfs_btree_insert(cnt_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
}
if (nfbno2 != NULLAGBLOCK) {
if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
+ if (XFS_IS_CORRUPT(mp, i != 0))
+ return -EFSCORRUPTED;
if ((error = xfs_btree_insert(cnt_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
}
/*
* Fix up the by-block btree entry(s).
@@ -525,7 +558,8 @@ xfs_alloc_fixup_trees(
*/
if ((error = xfs_btree_delete(bno_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
} else {
/*
* Update the by-block entry to start later|be shorter.
@@ -539,10 +573,12 @@ xfs_alloc_fixup_trees(
*/
if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
+ if (XFS_IS_CORRUPT(mp, i != 0))
+ return -EFSCORRUPTED;
if ((error = xfs_btree_insert(bno_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
}
return 0;
}
@@ -684,16 +720,298 @@ xfs_alloc_update_counters(
xfs_trans_agblocks_delta(tp, len);
if (unlikely(be32_to_cpu(agf->agf_freeblks) >
- be32_to_cpu(agf->agf_length)))
+ be32_to_cpu(agf->agf_length))) {
+ xfs_buf_corruption_error(agbp);
return -EFSCORRUPTED;
+ }
xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
return 0;
}
/*
- * Allocation group level functions.
+ * Block allocation algorithm and data structures.
+ */
+struct xfs_alloc_cur {
+ struct xfs_btree_cur *cnt; /* btree cursors */
+ struct xfs_btree_cur *bnolt;
+ struct xfs_btree_cur *bnogt;
+ xfs_extlen_t cur_len;/* current search length */
+ xfs_agblock_t rec_bno;/* extent startblock */
+ xfs_extlen_t rec_len;/* extent length */
+ xfs_agblock_t bno; /* alloc bno */
+ xfs_extlen_t len; /* alloc len */
+ xfs_extlen_t diff; /* diff from search bno */
+ unsigned int busy_gen;/* busy state */
+ bool busy;
+};
+
+/*
+ * Set up cursors, etc. in the extent allocation cursor. This function can be
+ * called multiple times to reset an initialized structure without having to
+ * reallocate cursors.
+ */
+static int
+xfs_alloc_cur_setup(
+ struct xfs_alloc_arg *args,
+ struct xfs_alloc_cur *acur)
+{
+ int error;
+ int i;
+
+ ASSERT(args->alignment == 1 || args->type != XFS_ALLOCTYPE_THIS_BNO);
+
+ acur->cur_len = args->maxlen;
+ acur->rec_bno = 0;
+ acur->rec_len = 0;
+ acur->bno = 0;
+ acur->len = 0;
+ acur->diff = -1;
+ acur->busy = false;
+ acur->busy_gen = 0;
+
+ /*
+ * Perform an initial cntbt lookup to check for availability of maxlen
+ * extents. If this fails, we'll return -ENOSPC to signal the caller to
+ * attempt a small allocation.
+ */
+ if (!acur->cnt)
+ acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
+ args->agbp, args->agno, XFS_BTNUM_CNT);
+ error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
+ if (error)
+ return error;
+
+ /*
+ * Allocate the bnobt left and right search cursors.
+ */
+ if (!acur->bnolt)
+ acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
+ args->agbp, args->agno, XFS_BTNUM_BNO);
+ if (!acur->bnogt)
+ acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
+ args->agbp, args->agno, XFS_BTNUM_BNO);
+ return i == 1 ? 0 : -ENOSPC;
+}
+
+static void
+xfs_alloc_cur_close(
+ struct xfs_alloc_cur *acur,
+ bool error)
+{
+ int cur_error = XFS_BTREE_NOERROR;
+
+ if (error)
+ cur_error = XFS_BTREE_ERROR;
+
+ if (acur->cnt)
+ xfs_btree_del_cursor(acur->cnt, cur_error);
+ if (acur->bnolt)
+ xfs_btree_del_cursor(acur->bnolt, cur_error);
+ if (acur->bnogt)
+ xfs_btree_del_cursor(acur->bnogt, cur_error);
+ acur->cnt = acur->bnolt = acur->bnogt = NULL;
+}
+
+/*
+ * Check an extent for allocation and track the best available candidate in the
+ * allocation structure. The cursor is deactivated if it has entered an out of
+ * range state based on allocation arguments. Optionally return the extent
+ * extent geometry and allocation status if requested by the caller.
+ */
+static int
+xfs_alloc_cur_check(
+ struct xfs_alloc_arg *args,
+ struct xfs_alloc_cur *acur,
+ struct xfs_btree_cur *cur,
+ int *new)
+{
+ int error, i;
+ xfs_agblock_t bno, bnoa, bnew;
+ xfs_extlen_t len, lena, diff = -1;
+ bool busy;
+ unsigned busy_gen = 0;
+ bool deactivate = false;
+ bool isbnobt = cur->bc_btnum == XFS_BTNUM_BNO;
+
+ *new = 0;
+
+ error = xfs_alloc_get_rec(cur, &bno, &len, &i);
+ if (error)
+ return error;
+ if (XFS_IS_CORRUPT(args->mp, i != 1))
+ return -EFSCORRUPTED;
+
+ /*
+ * Check minlen and deactivate a cntbt cursor if out of acceptable size
+ * range (i.e., walking backwards looking for a minlen extent).
+ */
+ if (len < args->minlen) {
+ deactivate = !isbnobt;
+ goto out;
+ }
+
+ busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
+ &busy_gen);
+ acur->busy |= busy;
+ if (busy)
+ acur->busy_gen = busy_gen;
+ /* deactivate a bnobt cursor outside of locality range */
+ if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
+ deactivate = isbnobt;
+ goto out;
+ }
+ if (lena < args->minlen)
+ goto out;
+
+ args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
+ xfs_alloc_fix_len(args);
+ ASSERT(args->len >= args->minlen);
+ if (args->len < acur->len)
+ goto out;
+
+ /*
+ * We have an aligned record that satisfies minlen and beats or matches
+ * the candidate extent size. Compare locality for near allocation mode.
+ */
+ ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
+ diff = xfs_alloc_compute_diff(args->agbno, args->len,
+ args->alignment, args->datatype,
+ bnoa, lena, &bnew);
+ if (bnew == NULLAGBLOCK)
+ goto out;
+
+ /*
+ * Deactivate a bnobt cursor with worse locality than the current best.
+ */
+ if (diff > acur->diff) {
+ deactivate = isbnobt;
+ goto out;
+ }
+
+ ASSERT(args->len > acur->len ||
+ (args->len == acur->len && diff <= acur->diff));
+ acur->rec_bno = bno;
+ acur->rec_len = len;
+ acur->bno = bnew;
+ acur->len = args->len;
+ acur->diff = diff;
+ *new = 1;
+
+ /*
+ * We're done if we found a perfect allocation. This only deactivates
+ * the current cursor, but this is just an optimization to terminate a
+ * cntbt search that otherwise runs to the edge of the tree.
+ */
+ if (acur->diff == 0 && acur->len == args->maxlen)
+ deactivate = true;
+out:
+ if (deactivate)
+ cur->bc_private.a.priv.abt.active = false;
+ trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
+ *new);
+ return 0;
+}
+
+/*
+ * Complete an allocation of a candidate extent. Remove the extent from both
+ * trees and update the args structure.
*/
+STATIC int
+xfs_alloc_cur_finish(
+ struct xfs_alloc_arg *args,
+ struct xfs_alloc_cur *acur)
+{
+ int error;
+
+ ASSERT(acur->cnt && acur->bnolt);
+ ASSERT(acur->bno >= acur->rec_bno);
+ ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
+ ASSERT(acur->rec_bno + acur->rec_len <=
+ be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
+
+ error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
+ acur->rec_len, acur->bno, acur->len, 0);
+ if (error)
+ return error;
+
+ args->agbno = acur->bno;
+ args->len = acur->len;
+ args->wasfromfl = 0;
+
+ trace_xfs_alloc_cur(args);
+ return 0;
+}
+
+/*
+ * Locality allocation lookup algorithm. This expects a cntbt cursor and uses
+ * bno optimized lookup to search for extents with ideal size and locality.
+ */
+STATIC int
+xfs_alloc_cntbt_iter(
+ struct xfs_alloc_arg *args,
+ struct xfs_alloc_cur *acur)
+{
+ struct xfs_btree_cur *cur = acur->cnt;
+ xfs_agblock_t bno;
+ xfs_extlen_t len, cur_len;
+ int error;
+ int i;
+
+ if (!xfs_alloc_cur_active(cur))
+ return 0;
+
+ /* locality optimized lookup */
+ cur_len = acur->cur_len;
+ error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
+ if (error)
+ return error;
+ if (i == 0)
+ return 0;
+ error = xfs_alloc_get_rec(cur, &bno, &len, &i);
+ if (error)
+ return error;
+
+ /* check the current record and update search length from it */
+ error = xfs_alloc_cur_check(args, acur, cur, &i);
+ if (error)
+ return error;
+ ASSERT(len >= acur->cur_len);
+ acur->cur_len = len;
+
+ /*
+ * We looked up the first record >= [agbno, len] above. The agbno is a
+ * secondary key and so the current record may lie just before or after
+ * agbno. If it is past agbno, check the previous record too so long as
+ * the length matches as it may be closer. Don't check a smaller record
+ * because that could deactivate our cursor.
+ */
+ if (bno > args->agbno) {
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (!error && i) {
+ error = xfs_alloc_get_rec(cur, &bno, &len, &i);
+ if (!error && i && len == acur->cur_len)
+ error = xfs_alloc_cur_check(args, acur, cur,
+ &i);
+ }
+ if (error)
+ return error;
+ }
+
+ /*
+ * Increment the search key until we find at least one allocation
+ * candidate or if the extent we found was larger. Otherwise, double the
+ * search key to optimize the search. Efficiency is more important here
+ * than absolute best locality.
+ */
+ cur_len <<= 1;
+ if (!acur->len || acur->cur_len >= cur_len)
+ acur->cur_len++;
+ else
+ acur->cur_len = cur_len;
+
+ return error;
+}
/*
* Deal with the case where only small freespaces remain. Either return the
@@ -727,7 +1045,10 @@ xfs_alloc_ag_vextent_small(
error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
if (error)
goto error;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error);
+ if (XFS_IS_CORRUPT(args->mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
goto out;
}
@@ -744,13 +1065,13 @@ xfs_alloc_ag_vextent_small(
goto out;
xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
- xfs_alloc_allow_busy_reuse(args->datatype));
+ (args->datatype & XFS_ALLOC_NOBUSY));
- if (xfs_alloc_is_userdata(args->datatype)) {
+ if (args->datatype & XFS_ALLOC_USERDATA) {
struct xfs_buf *bp;
bp = xfs_btree_get_bufs(args->mp, args->tp, args->agno, fbno);
- if (!bp) {
+ if (XFS_IS_CORRUPT(args->mp, !bp)) {
error = -EFSCORRUPTED;
goto error;
}
@@ -758,9 +1079,12 @@ xfs_alloc_ag_vextent_small(
}
*fbnop = args->agbno = fbno;
*flenp = args->len = 1;
- XFS_WANT_CORRUPTED_GOTO(args->mp,
- fbno < be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
- error);
+ if (XFS_IS_CORRUPT(args->mp,
+ fbno >= be32_to_cpu(
+ XFS_BUF_TO_AGF(args->agbp)->agf_length))) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
args->wasfromfl = 1;
trace_xfs_alloc_small_freelist(args);
@@ -915,7 +1239,10 @@ xfs_alloc_ag_vextent_exact(
error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(args->mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
ASSERT(fbno <= args->agbno);
/*
@@ -984,98 +1311,243 @@ error0:
}
/*
- * Search the btree in a given direction via the search cursor and compare
- * the records found against the good extent we've already found.
+ * Search a given number of btree records in a given direction. Check each
+ * record against the good extent we've already found.
*/
STATIC int
-xfs_alloc_find_best_extent(
- struct xfs_alloc_arg *args, /* allocation argument structure */
- struct xfs_btree_cur **gcur, /* good cursor */
- struct xfs_btree_cur **scur, /* searching cursor */
- xfs_agblock_t gdiff, /* difference for search comparison */
- xfs_agblock_t *sbno, /* extent found by search */
- xfs_extlen_t *slen, /* extent length */
- xfs_agblock_t *sbnoa, /* aligned extent found by search */
- xfs_extlen_t *slena, /* aligned extent length */
- int dir) /* 0 = search right, 1 = search left */
+xfs_alloc_walk_iter(
+ struct xfs_alloc_arg *args,
+ struct xfs_alloc_cur *acur,
+ struct xfs_btree_cur *cur,
+ bool increment,
+ bool find_one, /* quit on first candidate */
+ int count, /* rec count (-1 for infinite) */
+ int *stat)
{
- xfs_agblock_t new;
- xfs_agblock_t sdiff;
int error;
int i;
- unsigned busy_gen;
- /* The good extent is perfect, no need to search. */
- if (!gdiff)
- goto out_use_good;
+ *stat = 0;
/*
- * Look until we find a better one, run out of space or run off the end.
+ * Search so long as the cursor is active or we find a better extent.
+ * The cursor is deactivated if it extends beyond the range of the
+ * current allocation candidate.
*/
- do {
- error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
+ while (xfs_alloc_cur_active(cur) && count) {
+ error = xfs_alloc_cur_check(args, acur, cur, &i);
if (error)
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
- xfs_alloc_compute_aligned(args, *sbno, *slen,
- sbnoa, slena, &busy_gen);
+ return error;
+ if (i == 1) {
+ *stat = 1;
+ if (find_one)
+ break;
+ }
+ if (!xfs_alloc_cur_active(cur))
+ break;
+
+ if (increment)
+ error = xfs_btree_increment(cur, 0, &i);
+ else
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
+ return error;
+ if (i == 0)
+ cur->bc_private.a.priv.abt.active = false;
+
+ if (count > 0)
+ count--;
+ }
+
+ return 0;
+}
+
+/*
+ * Search the by-bno and by-size btrees in parallel in search of an extent with
+ * ideal locality based on the NEAR mode ->agbno locality hint.
+ */
+STATIC int
+xfs_alloc_ag_vextent_locality(
+ struct xfs_alloc_arg *args,
+ struct xfs_alloc_cur *acur,
+ int *stat)
+{
+ struct xfs_btree_cur *fbcur = NULL;
+ int error;
+ int i;
+ bool fbinc;
+
+ ASSERT(acur->len == 0);
+ ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
+
+ *stat = 0;
+
+ error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
+ if (error)
+ return error;
+ error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
+ if (error)
+ return error;
+ error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
+ if (error)
+ return error;
+
+ /*
+ * Search the bnobt and cntbt in parallel. Search the bnobt left and
+ * right and lookup the closest extent to the locality hint for each
+ * extent size key in the cntbt. The entire search terminates
+ * immediately on a bnobt hit because that means we've found best case
+ * locality. Otherwise the search continues until the cntbt cursor runs
+ * off the end of the tree. If no allocation candidate is found at this
+ * point, give up on locality, walk backwards from the end of the cntbt
+ * and take the first available extent.
+ *
+ * The parallel tree searches balance each other out to provide fairly
+ * consistent performance for various situations. The bnobt search can
+ * have pathological behavior in the worst case scenario of larger
+ * allocation requests and fragmented free space. On the other hand, the
+ * bnobt is able to satisfy most smaller allocation requests much more
+ * quickly than the cntbt. The cntbt search can sift through fragmented
+ * free space and sets of free extents for larger allocation requests
+ * more quickly than the bnobt. Since the locality hint is just a hint
+ * and we don't want to scan the entire bnobt for perfect locality, the
+ * cntbt search essentially bounds the bnobt search such that we can
+ * find good enough locality at reasonable performance in most cases.
+ */
+ while (xfs_alloc_cur_active(acur->bnolt) ||
+ xfs_alloc_cur_active(acur->bnogt) ||
+ xfs_alloc_cur_active(acur->cnt)) {
+
+ trace_xfs_alloc_cur_lookup(args);
/*
- * The good extent is closer than this one.
+ * Search the bnobt left and right. In the case of a hit, finish
+ * the search in the opposite direction and we're done.
*/
- if (!dir) {
- if (*sbnoa > args->max_agbno)
- goto out_use_good;
- if (*sbnoa >= args->agbno + gdiff)
- goto out_use_good;
- } else {
- if (*sbnoa < args->min_agbno)
- goto out_use_good;
- if (*sbnoa <= args->agbno - gdiff)
- goto out_use_good;
+ error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
+ true, 1, &i);
+ if (error)
+ return error;
+ if (i == 1) {
+ trace_xfs_alloc_cur_left(args);
+ fbcur = acur->bnogt;
+ fbinc = true;
+ break;
+ }
+ error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
+ 1, &i);
+ if (error)
+ return error;
+ if (i == 1) {
+ trace_xfs_alloc_cur_right(args);
+ fbcur = acur->bnolt;
+ fbinc = false;
+ break;
}
/*
- * Same distance, compare length and pick the best.
+ * Check the extent with best locality based on the current
+ * extent size search key and keep track of the best candidate.
*/
- if (*slena >= args->minlen) {
- args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
- xfs_alloc_fix_len(args);
-
- sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
- args->alignment,
- args->datatype, *sbnoa,
- *slena, &new);
+ error = xfs_alloc_cntbt_iter(args, acur);
+ if (error)
+ return error;
+ if (!xfs_alloc_cur_active(acur->cnt)) {
+ trace_xfs_alloc_cur_lookup_done(args);
+ break;
+ }
+ }
- /*
- * Choose closer size and invalidate other cursor.
- */
- if (sdiff < gdiff)
- goto out_use_search;
- goto out_use_good;
+ /*
+ * If we failed to find anything due to busy extents, return empty
+ * handed so the caller can flush and retry. If no busy extents were
+ * found, walk backwards from the end of the cntbt as a last resort.
+ */
+ if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
+ error = xfs_btree_decrement(acur->cnt, 0, &i);
+ if (error)
+ return error;
+ if (i) {
+ acur->cnt->bc_private.a.priv.abt.active = true;
+ fbcur = acur->cnt;
+ fbinc = false;
}
+ }
- if (!dir)
- error = xfs_btree_increment(*scur, 0, &i);
- else
- error = xfs_btree_decrement(*scur, 0, &i);
+ /*
+ * Search in the opposite direction for a better entry in the case of
+ * a bnobt hit or walk backwards from the end of the cntbt.
+ */
+ if (fbcur) {
+ error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
+ &i);
if (error)
- goto error0;
- } while (i);
+ return error;
+ }
-out_use_good:
- xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
- *scur = NULL;
- return 0;
+ if (acur->len)
+ *stat = 1;
-out_use_search:
- xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
- *gcur = NULL;
return 0;
+}
-error0:
- /* caller invalidates cursors */
- return error;
+/* Check the last block of the cnt btree for allocations. */
+static int
+xfs_alloc_ag_vextent_lastblock(
+ struct xfs_alloc_arg *args,
+ struct xfs_alloc_cur *acur,
+ xfs_agblock_t *bno,
+ xfs_extlen_t *len,
+ bool *allocated)
+{
+ int error;
+ int i;
+
+#ifdef DEBUG
+ /* Randomly don't execute the first algorithm. */
+ if (prandom_u32() & 1)
+ return 0;
+#endif
+
+ /*
+ * Start from the entry that lookup found, sequence through all larger
+ * free blocks. If we're actually pointing at a record smaller than
+ * maxlen, go to the start of this block, and skip all those smaller
+ * than minlen.
+ */
+ if (len || args->alignment > 1) {
+ acur->cnt->bc_ptrs[0] = 1;
+ do {
+ error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
+ if (error)
+ return error;
+ if (XFS_IS_CORRUPT(args->mp, i != 1))
+ return -EFSCORRUPTED;
+ if (*len >= args->minlen)
+ break;
+ error = xfs_btree_increment(acur->cnt, 0, &i);
+ if (error)
+ return error;
+ } while (i);
+ ASSERT(*len >= args->minlen);
+ if (!i)
+ return 0;
+ }
+
+ error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
+ if (error)
+ return error;
+
+ /*
+ * It didn't work. We COULD be in a case where there's a good record
+ * somewhere, so try again.
+ */
+ if (acur->len == 0)
+ return 0;
+
+ trace_xfs_alloc_near_first(args);
+ *allocated = true;
+ return 0;
}
/*
@@ -1084,41 +1556,17 @@ error0:
* and of the form k * prod + mod unless there's nothing that large.
* Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
*/
-STATIC int /* error */
+STATIC int
xfs_alloc_ag_vextent_near(
- xfs_alloc_arg_t *args) /* allocation argument structure */
+ struct xfs_alloc_arg *args)
{
- xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
- xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
- xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
- xfs_agblock_t gtbno; /* start bno of right side entry */
- xfs_agblock_t gtbnoa; /* aligned ... */
- xfs_extlen_t gtdiff; /* difference to right side entry */
- xfs_extlen_t gtlen; /* length of right side entry */
- xfs_extlen_t gtlena; /* aligned ... */
- xfs_agblock_t gtnew; /* useful start bno of right side */
- int error; /* error code */
- int i; /* result code, temporary */
- int j; /* result code, temporary */
- xfs_agblock_t ltbno; /* start bno of left side entry */
- xfs_agblock_t ltbnoa; /* aligned ... */
- xfs_extlen_t ltdiff; /* difference to left side entry */
- xfs_extlen_t ltlen; /* length of left side entry */
- xfs_extlen_t ltlena; /* aligned ... */
- xfs_agblock_t ltnew; /* useful start bno of left side */
- xfs_extlen_t rlen; /* length of returned extent */
- bool busy;
- unsigned busy_gen;
-#ifdef DEBUG
- /*
- * Randomly don't execute the first algorithm.
- */
- int dofirst; /* set to do first algorithm */
-
- dofirst = prandom_u32() & 1;
-#endif
+ struct xfs_alloc_cur acur = {};
+ int error; /* error code */
+ int i; /* result code, temporary */
+ xfs_agblock_t bno;
+ xfs_extlen_t len;
- /* handle unitialized agbno range so caller doesn't have to */
+ /* handle uninitialized agbno range so caller doesn't have to */
if (!args->min_agbno && !args->max_agbno)
args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
ASSERT(args->min_agbno <= args->max_agbno);
@@ -1130,40 +1578,27 @@ xfs_alloc_ag_vextent_near(
args->agbno = args->max_agbno;
restart:
- bno_cur_lt = NULL;
- bno_cur_gt = NULL;
- ltlen = 0;
- gtlena = 0;
- ltlena = 0;
- busy = false;
-
- /*
- * Get a cursor for the by-size btree.
- */
- cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_CNT);
+ len = 0;
/*
- * See if there are any free extents as big as maxlen.
+ * Set up cursors and see if there are any free extents as big as
+ * maxlen. If not, pick the last entry in the tree unless the tree is
+ * empty.
*/
- if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
- goto error0;
- /*
- * If none, then pick up the last entry in the tree unless the
- * tree is empty.
- */
- if (!i) {
- if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
- &ltlen, &i)))
- goto error0;
- if (i == 0 || ltlen == 0) {
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+ error = xfs_alloc_cur_setup(args, &acur);
+ if (error == -ENOSPC) {
+ error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
+ &len, &i);
+ if (error)
+ goto out;
+ if (i == 0 || len == 0) {
trace_xfs_alloc_near_noentry(args);
- return 0;
+ goto out;
}
ASSERT(i == 1);
+ } else if (error) {
+ goto out;
}
- args->wasfromfl = 0;
/*
* First algorithm.
@@ -1172,311 +1607,47 @@ restart:
* near the right edge of the tree. If it's in the last btree leaf
* block, then we just examine all the entries in that block
* that are big enough, and pick the best one.
- * This is written as a while loop so we can break out of it,
- * but we never loop back to the top.
*/
- while (xfs_btree_islastblock(cnt_cur, 0)) {
- xfs_extlen_t bdiff;
- int besti=0;
- xfs_extlen_t blen=0;
- xfs_agblock_t bnew=0;
-
-#ifdef DEBUG
- if (dofirst)
- break;
-#endif
- /*
- * Start from the entry that lookup found, sequence through
- * all larger free blocks. If we're actually pointing at a
- * record smaller than maxlen, go to the start of this block,
- * and skip all those smaller than minlen.
- */
- if (ltlen || args->alignment > 1) {
- cnt_cur->bc_ptrs[0] = 1;
- do {
- if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
- &ltlen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
- if (ltlen >= args->minlen)
- break;
- if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
- goto error0;
- } while (i);
- ASSERT(ltlen >= args->minlen);
- if (!i)
- break;
- }
- i = cnt_cur->bc_ptrs[0];
- for (j = 1, blen = 0, bdiff = 0;
- !error && j && (blen < args->maxlen || bdiff > 0);
- error = xfs_btree_increment(cnt_cur, 0, &j)) {
- /*
- * For each entry, decide if it's better than
- * the previous best entry.
- */
- if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
- busy = xfs_alloc_compute_aligned(args, ltbno, ltlen,
- &ltbnoa, &ltlena, &busy_gen);
- if (ltlena < args->minlen)
- continue;
- if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno)
- continue;
- args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
- xfs_alloc_fix_len(args);
- ASSERT(args->len >= args->minlen);
- if (args->len < blen)
- continue;
- ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
- args->alignment, args->datatype, ltbnoa,
- ltlena, &ltnew);
- if (ltnew != NULLAGBLOCK &&
- (args->len > blen || ltdiff < bdiff)) {
- bdiff = ltdiff;
- bnew = ltnew;
- blen = args->len;
- besti = cnt_cur->bc_ptrs[0];
- }
- }
- /*
- * It didn't work. We COULD be in a case where
- * there's a good record somewhere, so try again.
- */
- if (blen == 0)
- break;
- /*
- * Point at the best entry, and retrieve it again.
- */
- cnt_cur->bc_ptrs[0] = besti;
- if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
- ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
- args->len = blen;
-
- /*
- * We are allocating starting at bnew for blen blocks.
- */
- args->agbno = bnew;
- ASSERT(bnew >= ltbno);
- ASSERT(bnew + blen <= ltbno + ltlen);
- /*
- * Set up a cursor for the by-bno tree.
- */
- bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
- args->agbp, args->agno, XFS_BTNUM_BNO);
- /*
- * Fix up the btree entries.
- */
- if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
- ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
- goto error0;
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
+ if (xfs_btree_islastblock(acur.cnt, 0)) {
+ bool allocated = false;
- trace_xfs_alloc_near_first(args);
- return 0;
- }
- /*
- * Second algorithm.
- * Search in the by-bno tree to the left and to the right
- * simultaneously, until in each case we find a space big enough,
- * or run into the edge of the tree. When we run into the edge,
- * we deallocate that cursor.
- * If both searches succeed, we compare the two spaces and pick
- * the better one.
- * With alignment, it's possible for both to fail; the upper
- * level algorithm that picks allocation groups for allocations
- * is not supposed to do this.
- */
- /*
- * Allocate and initialize the cursor for the leftward search.
- */
- bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_BNO);
- /*
- * Lookup <= bno to find the leftward search's starting point.
- */
- if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
- goto error0;
- if (!i) {
- /*
- * Didn't find anything; use this cursor for the rightward
- * search.
- */
- bno_cur_gt = bno_cur_lt;
- bno_cur_lt = NULL;
- }
- /*
- * Found something. Duplicate the cursor for the rightward search.
- */
- else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
- goto error0;
- /*
- * Increment the cursor, so we will point at the entry just right
- * of the leftward entry if any, or to the leftmost entry.
- */
- if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
- goto error0;
- if (!i) {
- /*
- * It failed, there are no rightward entries.
- */
- xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
- bno_cur_gt = NULL;
+ error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
+ &allocated);
+ if (error)
+ goto out;
+ if (allocated)
+ goto alloc_finish;
}
- /*
- * Loop going left with the leftward cursor, right with the
- * rightward cursor, until either both directions give up or
- * we find an entry at least as big as minlen.
- */
- do {
- if (bno_cur_lt) {
- if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
- busy |= xfs_alloc_compute_aligned(args, ltbno, ltlen,
- &ltbnoa, &ltlena, &busy_gen);
- if (ltlena >= args->minlen && ltbnoa >= args->min_agbno)
- break;
- if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
- goto error0;
- if (!i || ltbnoa < args->min_agbno) {
- xfs_btree_del_cursor(bno_cur_lt,
- XFS_BTREE_NOERROR);
- bno_cur_lt = NULL;
- }
- }
- if (bno_cur_gt) {
- if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
- busy |= xfs_alloc_compute_aligned(args, gtbno, gtlen,
- &gtbnoa, &gtlena, &busy_gen);
- if (gtlena >= args->minlen && gtbnoa <= args->max_agbno)
- break;
- if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
- goto error0;
- if (!i || gtbnoa > args->max_agbno) {
- xfs_btree_del_cursor(bno_cur_gt,
- XFS_BTREE_NOERROR);
- bno_cur_gt = NULL;
- }
- }
- } while (bno_cur_lt || bno_cur_gt);
/*
- * Got both cursors still active, need to find better entry.
+ * Second algorithm. Combined cntbt and bnobt search to find ideal
+ * locality.
*/
- if (bno_cur_lt && bno_cur_gt) {
- if (ltlena >= args->minlen) {
- /*
- * Left side is good, look for a right side entry.
- */
- args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
- xfs_alloc_fix_len(args);
- ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
- args->alignment, args->datatype, ltbnoa,
- ltlena, &ltnew);
-
- error = xfs_alloc_find_best_extent(args,
- &bno_cur_lt, &bno_cur_gt,
- ltdiff, &gtbno, &gtlen,
- &gtbnoa, &gtlena,
- 0 /* search right */);
- } else {
- ASSERT(gtlena >= args->minlen);
-
- /*
- * Right side is good, look for a left side entry.
- */
- args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
- xfs_alloc_fix_len(args);
- gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
- args->alignment, args->datatype, gtbnoa,
- gtlena, &gtnew);
-
- error = xfs_alloc_find_best_extent(args,
- &bno_cur_gt, &bno_cur_lt,
- gtdiff, &ltbno, &ltlen,
- &ltbnoa, &ltlena,
- 1 /* search left */);
- }
-
- if (error)
- goto error0;
- }
+ error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
+ if (error)
+ goto out;
/*
* If we couldn't get anything, give up.
*/
- if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
-
- if (busy) {
+ if (!acur.len) {
+ if (acur.busy) {
trace_xfs_alloc_near_busy(args);
- xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
+ xfs_extent_busy_flush(args->mp, args->pag,
+ acur.busy_gen);
goto restart;
}
trace_xfs_alloc_size_neither(args);
args->agbno = NULLAGBLOCK;
- return 0;
+ goto out;
}
- /*
- * At this point we have selected a freespace entry, either to the
- * left or to the right. If it's on the right, copy all the
- * useful variables to the "left" set so we only have one
- * copy of this code.
- */
- if (bno_cur_gt) {
- bno_cur_lt = bno_cur_gt;
- bno_cur_gt = NULL;
- ltbno = gtbno;
- ltbnoa = gtbnoa;
- ltlen = gtlen;
- ltlena = gtlena;
- j = 1;
- } else
- j = 0;
-
- /*
- * Fix up the length and compute the useful address.
- */
- args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
- xfs_alloc_fix_len(args);
- rlen = args->len;
- (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
- args->datatype, ltbnoa, ltlena, &ltnew);
- ASSERT(ltnew >= ltbno);
- ASSERT(ltnew + rlen <= ltbnoa + ltlena);
- ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
- ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno);
- args->agbno = ltnew;
-
- if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
- ltnew, rlen, XFSA_FIXUP_BNO_OK)))
- goto error0;
-
- if (j)
- trace_xfs_alloc_near_greater(args);
- else
- trace_xfs_alloc_near_lesser(args);
-
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
- return 0;
+alloc_finish:
+ /* fix up btrees on a successful allocation */
+ error = xfs_alloc_cur_finish(args, &acur);
- error0:
- trace_xfs_alloc_near_error(args);
- if (cnt_cur != NULL)
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
- if (bno_cur_lt != NULL)
- xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
- if (bno_cur_gt != NULL)
- xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
+out:
+ xfs_alloc_cur_close(&acur, error);
return error;
}
@@ -1545,7 +1716,10 @@ restart:
error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(args->mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
busy = xfs_alloc_compute_aligned(args, fbno, flen,
&rbno, &rlen, &busy_gen);
@@ -1579,8 +1753,13 @@ restart:
* This can't happen in the second case above.
*/
rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
- XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
- (rlen <= flen && rbno + rlen <= fbno + flen), error0);
+ if (XFS_IS_CORRUPT(args->mp,
+ rlen != 0 &&
+ (rlen > flen ||
+ rbno + rlen > fbno + flen))) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if (rlen < args->maxlen) {
xfs_agblock_t bestfbno;
xfs_extlen_t bestflen;
@@ -1599,15 +1778,22 @@ restart:
if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
&i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(args->mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if (flen < bestrlen)
break;
busy = xfs_alloc_compute_aligned(args, fbno, flen,
&rbno, &rlen, &busy_gen);
rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
- XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
- (rlen <= flen && rbno + rlen <= fbno + flen),
- error0);
+ if (XFS_IS_CORRUPT(args->mp,
+ rlen != 0 &&
+ (rlen > flen ||
+ rbno + rlen > fbno + flen))) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if (rlen > bestrlen) {
bestrlen = rlen;
bestrbno = rbno;
@@ -1620,7 +1806,10 @@ restart:
if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
&i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(args->mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
rlen = bestrlen;
rbno = bestrbno;
flen = bestflen;
@@ -1643,7 +1832,10 @@ restart:
xfs_alloc_fix_len(args);
rlen = args->len;
- XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
+ if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* Allocate and initialize a cursor for the by-block tree.
*/
@@ -1657,10 +1849,13 @@ restart:
cnt_cur = bno_cur = NULL;
args->len = rlen;
args->agbno = rbno;
- XFS_WANT_CORRUPTED_GOTO(args->mp,
- args->agbno + args->len <=
- be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
- error0);
+ if (XFS_IS_CORRUPT(args->mp,
+ args->agbno + args->len >
+ be32_to_cpu(
+ XFS_BUF_TO_AGF(args->agbp)->agf_length))) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
trace_xfs_alloc_size_done(args);
return 0;
@@ -1732,7 +1927,10 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* It's not contiguous, though.
*/
@@ -1744,8 +1942,10 @@ xfs_free_ag_extent(
* space was invalid, it's (partly) already free.
* Very bad.
*/
- XFS_WANT_CORRUPTED_GOTO(mp,
- ltbno + ltlen <= bno, error0);
+ if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
}
}
/*
@@ -1760,7 +1960,10 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* It's not contiguous, though.
*/
@@ -1772,7 +1975,10 @@ xfs_free_ag_extent(
* space was invalid, it's (partly) already free.
* Very bad.
*/
- XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
+ if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
}
}
/*
@@ -1789,31 +1995,49 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* Delete the old by-size entry on the right.
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* Delete the old by-block entry for the right block.
*/
if ((error = xfs_btree_delete(bno_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* Move the by-block cursor back to the left neighbor.
*/
if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
#ifdef DEBUG
/*
* Check that this is the right record: delete didn't
@@ -1826,9 +2050,13 @@ xfs_free_ag_extent(
if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
&i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp,
- i == 1 && xxbno == ltbno && xxlen == ltlen,
- error0);
+ if (XFS_IS_CORRUPT(mp,
+ i != 1 ||
+ xxbno != ltbno ||
+ xxlen != ltlen)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
}
#endif
/*
@@ -1849,17 +2077,26 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* Back up the by-block cursor to the left neighbor, and
* update its length.
*/
if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
nbno = ltbno;
nlen = len + ltlen;
if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
@@ -1875,10 +2112,16 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* Update the starting block and length of the right
* neighbor in the by-block tree.
@@ -1897,7 +2140,10 @@ xfs_free_ag_extent(
nlen = len;
if ((error = xfs_btree_insert(bno_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
}
xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
bno_cur = NULL;
@@ -1906,10 +2152,16 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if ((error = xfs_btree_insert(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
cnt_cur = NULL;
@@ -1989,7 +2241,8 @@ xfs_alloc_longest_free_extent(
* reservations and AGFL rules in place, we can return this extent.
*/
if (pag->pagf_longest > delta)
- return pag->pagf_longest - delta;
+ return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable,
+ pag->pagf_longest - delta);
/* Otherwise, let the caller try for 1 block if there's space. */
return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
@@ -2087,7 +2340,7 @@ xfs_free_agfl_block(
return error;
bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno);
- if (!bp)
+ if (XFS_IS_CORRUPT(tp->t_mountp, !bp))
return -EFSCORRUPTED;
xfs_trans_binval(tp, bp);
@@ -2205,7 +2458,7 @@ xfs_defer_agfl_block(
ASSERT(xfs_bmap_free_item_zone != NULL);
ASSERT(oinfo != NULL);
- new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
+ new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0);
new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
new->xefi_blockcount = 1;
new->xefi_oinfo = *oinfo;
@@ -2253,7 +2506,7 @@ xfs_alloc_fix_freelist(
* somewhere else if we are not being asked to try harder at this
* point
*/
- if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) &&
+ if (pag->pagf_metadata && (args->datatype & XFS_ALLOC_USERDATA) &&
(flags & XFS_ALLOC_FLAG_TRYLOCK)) {
ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
goto out_agbp_relse;
@@ -2956,13 +3209,6 @@ xfs_alloc_vextent(
args->len);
#endif
- /* Zero the extent if we were asked to do so */
- if (args->datatype & XFS_ALLOC_USERDATA_ZERO) {
- error = xfs_zero_extent(args->ip, args->fsbno, args->len);
- if (error)
- goto error0;
- }
-
}
xfs_perag_put(args->pag);
return 0;
@@ -3038,12 +3284,18 @@ __xfs_free_extent(
if (error)
return error;
- XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
+ if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
+ error = -EFSCORRUPTED;
+ goto err;
+ }
/* validate the extent size is legal now we have the agf locked */
- XFS_WANT_CORRUPTED_GOTO(mp,
- agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
- err);
+ if (XFS_IS_CORRUPT(mp,
+ agbno + len >
+ be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length))) {
+ error = -EFSCORRUPTED;
+ goto err;
+ }
error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
if (error)
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index d6ed5d2c07c2..7380fbe4a3ff 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -54,7 +54,6 @@ typedef struct xfs_alloc_arg {
struct xfs_mount *mp; /* file system mount point */
struct xfs_buf *agbp; /* buffer for a.g. freelist header */
struct xfs_perag *pag; /* per-ag struct for this agno */
- struct xfs_inode *ip; /* for userdata zeroing method */
xfs_fsblock_t fsbno; /* file system block number */
xfs_agnumber_t agno; /* allocation group number */
xfs_agblock_t agbno; /* allocation group-relative block # */
@@ -83,20 +82,7 @@ typedef struct xfs_alloc_arg {
*/
#define XFS_ALLOC_USERDATA (1 << 0)/* allocation is for user data*/
#define XFS_ALLOC_INITIAL_USER_DATA (1 << 1)/* special case start of file */
-#define XFS_ALLOC_USERDATA_ZERO (1 << 2)/* zero extent on allocation */
-#define XFS_ALLOC_NOBUSY (1 << 3)/* Busy extents not allowed */
-
-static inline bool
-xfs_alloc_is_userdata(int datatype)
-{
- return (datatype & ~XFS_ALLOC_NOBUSY) != 0;
-}
-
-static inline bool
-xfs_alloc_allow_busy_reuse(int datatype)
-{
- return (datatype & XFS_ALLOC_NOBUSY) == 0;
-}
+#define XFS_ALLOC_NOBUSY (1 << 2)/* Busy extents not allowed */
/* freespace limit calculations */
#define XFS_ALLOC_AGFL_RESERVE 4
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index 2a94543857a1..279694d73e4e 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -507,6 +507,7 @@ xfs_allocbt_init_cursor(
cur->bc_private.a.agbp = agbp;
cur->bc_private.a.agno = agno;
+ cur->bc_private.a.priv.abt.active = false;
if (xfs_sb_version_hascrc(&mp->m_sb))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index d48fcf11cc35..0d7fcc983b3d 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -97,7 +97,10 @@ xfs_inode_hasattr(
* Overall external interface routines.
*========================================================================*/
-/* Retrieve an extended attribute and its value. Must have ilock. */
+/*
+ * Retrieve an extended attribute and its value. Must have ilock.
+ * Returns 0 on successful retrieval, otherwise an error.
+ */
int
xfs_attr_get_ilocked(
struct xfs_inode *ip,
@@ -115,12 +118,28 @@ xfs_attr_get_ilocked(
return xfs_attr_node_get(args);
}
-/* Retrieve an extended attribute by name, and its value. */
+/*
+ * Retrieve an extended attribute by name, and its value if requested.
+ *
+ * If ATTR_KERNOVAL is set in @flags, then the caller does not want the value,
+ * just an indication whether the attribute exists and the size of the value if
+ * it exists. The size is returned in @valuelenp,
+ *
+ * If the attribute is found, but exceeds the size limit set by the caller in
+ * @valuelenp, return -ERANGE with the size of the attribute that was found in
+ * @valuelenp.
+ *
+ * If ATTR_ALLOC is set in @flags, allocate the buffer for the value after
+ * existence of the attribute has been determined. On success, return that
+ * buffer to the caller and leave them to free it. On failure, free any
+ * allocated buffer and ensure the buffer pointer returned to the caller is
+ * null.
+ */
int
xfs_attr_get(
struct xfs_inode *ip,
const unsigned char *name,
- unsigned char *value,
+ unsigned char **value,
int *valuelenp,
int flags)
{
@@ -128,6 +147,8 @@ xfs_attr_get(
uint lock_mode;
int error;
+ ASSERT((flags & (ATTR_ALLOC | ATTR_KERNOVAL)) || *value);
+
XFS_STATS_INC(ip->i_mount, xs_attr_get);
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
@@ -137,17 +158,29 @@ xfs_attr_get(
if (error)
return error;
- args.value = value;
- args.valuelen = *valuelenp;
/* Entirely possible to look up a name which doesn't exist */
args.op_flags = XFS_DA_OP_OKNOENT;
+ if (flags & ATTR_ALLOC)
+ args.op_flags |= XFS_DA_OP_ALLOCVAL;
+ else
+ args.value = *value;
+ args.valuelen = *valuelenp;
lock_mode = xfs_ilock_attr_map_shared(ip);
error = xfs_attr_get_ilocked(ip, &args);
xfs_iunlock(ip, lock_mode);
-
*valuelenp = args.valuelen;
- return error == -EEXIST ? 0 : error;
+
+ /* on error, we have to clean up allocated value buffers */
+ if (error) {
+ if (flags & ATTR_ALLOC) {
+ kmem_free(args.value);
+ *value = NULL;
+ }
+ return error;
+ }
+ *value = args.value;
+ return 0;
}
/*
@@ -556,7 +589,7 @@ xfs_attr_leaf_addname(
*/
dp = args->dp;
args->blkno = 0;
- error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp);
if (error)
return error;
@@ -682,7 +715,7 @@ xfs_attr_leaf_addname(
* remove the "old" attr from that block (neat, huh!)
*/
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno,
- -1, &bp);
+ &bp);
if (error)
return error;
@@ -736,7 +769,7 @@ xfs_attr_leaf_removename(
*/
dp = args->dp;
args->blkno = 0;
- error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp);
if (error)
return error;
@@ -768,6 +801,8 @@ xfs_attr_leaf_removename(
*
* This leaf block cannot have a "remote" value, we only call this routine
* if bmap_one_block() says there is only one block (ie: no remote blks).
+ *
+ * Returns 0 on successful retrieval, otherwise an error.
*/
STATIC int
xfs_attr_leaf_get(xfs_da_args_t *args)
@@ -778,7 +813,7 @@ xfs_attr_leaf_get(xfs_da_args_t *args)
trace_xfs_attr_leaf_get(args);
args->blkno = 0;
- error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp);
if (error)
return error;
@@ -789,9 +824,6 @@ xfs_attr_leaf_get(xfs_da_args_t *args)
}
error = xfs_attr3_leaf_getvalue(bp, args);
xfs_trans_brelse(args->trans, bp);
- if (!error && (args->rmtblkno > 0) && !(args->flags & ATTR_KERNOVAL)) {
- error = xfs_attr_rmtval_get(args);
- }
return error;
}
@@ -1141,7 +1173,7 @@ xfs_attr_node_removename(
ASSERT(state->path.blk[0].bp);
state->path.blk[0].bp = NULL;
- error = xfs_attr3_leaf_read(args->trans, args->dp, 0, -1, &bp);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, 0, &bp);
if (error)
goto out;
@@ -1234,10 +1266,9 @@ xfs_attr_refillstate(xfs_da_state_t *state)
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->disk_blkno) {
- error = xfs_da3_node_read(state->args->trans,
- state->args->dp,
- blk->blkno, blk->disk_blkno,
- &blk->bp, XFS_ATTR_FORK);
+ error = xfs_da3_node_read_mapped(state->args->trans,
+ state->args->dp, blk->disk_blkno,
+ &blk->bp, XFS_ATTR_FORK);
if (error)
return error;
} else {
@@ -1253,10 +1284,9 @@ xfs_attr_refillstate(xfs_da_state_t *state)
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->disk_blkno) {
- error = xfs_da3_node_read(state->args->trans,
- state->args->dp,
- blk->blkno, blk->disk_blkno,
- &blk->bp, XFS_ATTR_FORK);
+ error = xfs_da3_node_read_mapped(state->args->trans,
+ state->args->dp, blk->disk_blkno,
+ &blk->bp, XFS_ATTR_FORK);
if (error)
return error;
} else {
@@ -1268,11 +1298,13 @@ xfs_attr_refillstate(xfs_da_state_t *state)
}
/*
- * Look up a filename in a node attribute list.
+ * Retrieve the attribute data from a node attribute list.
*
* This routine gets called for any attribute fork that has more than one
* block, ie: both true Btree attr lists and for single-leaf-blocks with
* "remote" values taking up more blocks.
+ *
+ * Returns 0 on successful retrieval, otherwise an error.
*/
STATIC int
xfs_attr_node_get(xfs_da_args_t *args)
@@ -1294,24 +1326,21 @@ xfs_attr_node_get(xfs_da_args_t *args)
error = xfs_da3_node_lookup_int(state, &retval);
if (error) {
retval = error;
- } else if (retval == -EEXIST) {
- blk = &state->path.blk[ state->path.active-1 ];
- ASSERT(blk->bp != NULL);
- ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
-
- /*
- * Get the value, local or "remote"
- */
- retval = xfs_attr3_leaf_getvalue(blk->bp, args);
- if (!retval && (args->rmtblkno > 0)
- && !(args->flags & ATTR_KERNOVAL)) {
- retval = xfs_attr_rmtval_get(args);
- }
+ goto out_release;
}
+ if (retval != -EEXIST)
+ goto out_release;
+
+ /*
+ * Get the value, local or "remote"
+ */
+ blk = &state->path.blk[state->path.active - 1];
+ retval = xfs_attr3_leaf_getvalue(blk->bp, args);
/*
* If not in a transaction, we have to release all the buffers.
*/
+out_release:
for (i = 0; i < state->path.active; i++) {
xfs_trans_brelse(args->trans, state->path.blk[i].bp);
state->path.blk[i].bp = NULL;
diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
index ff28ebf3b635..94badfa1743e 100644
--- a/fs/xfs/libxfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -37,6 +37,7 @@ struct xfs_attr_list_context;
#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
#define ATTR_INCOMPLETE 0x4000 /* [kernel] return INCOMPLETE attr keys */
+#define ATTR_ALLOC 0x8000 /* allocate xattr buffer on demand */
#define XFS_ATTR_FLAGS \
{ ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
@@ -47,7 +48,8 @@ struct xfs_attr_list_context;
{ ATTR_REPLACE, "REPLACE" }, \
{ ATTR_KERNOTIME, "KERNOTIME" }, \
{ ATTR_KERNOVAL, "KERNOVAL" }, \
- { ATTR_INCOMPLETE, "INCOMPLETE" }
+ { ATTR_INCOMPLETE, "INCOMPLETE" }, \
+ { ATTR_ALLOC, "ALLOC" }
/*
* The maximum size (into the kernel or returned from the kernel) of an
@@ -143,7 +145,7 @@ int xfs_attr_list_int(struct xfs_attr_list_context *);
int xfs_inode_hasattr(struct xfs_inode *ip);
int xfs_attr_get_ilocked(struct xfs_inode *ip, struct xfs_da_args *args);
int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
- unsigned char *value, int *valuelenp, int flags);
+ unsigned char **value, int *valuelenp, int flags);
int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
unsigned char *value, int valuelen, int flags);
int xfs_attr_set_args(struct xfs_da_args *args);
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 70eb941d02e4..08d4b10ae2d5 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -233,6 +233,61 @@ xfs_attr3_leaf_hdr_to_disk(
}
static xfs_failaddr_t
+xfs_attr3_leaf_verify_entry(
+ struct xfs_mount *mp,
+ char *buf_end,
+ struct xfs_attr_leafblock *leaf,
+ struct xfs_attr3_icleaf_hdr *leafhdr,
+ struct xfs_attr_leaf_entry *ent,
+ int idx,
+ __u32 *last_hashval)
+{
+ struct xfs_attr_leaf_name_local *lentry;
+ struct xfs_attr_leaf_name_remote *rentry;
+ char *name_end;
+ unsigned int nameidx;
+ unsigned int namesize;
+ __u32 hashval;
+
+ /* hash order check */
+ hashval = be32_to_cpu(ent->hashval);
+ if (hashval < *last_hashval)
+ return __this_address;
+ *last_hashval = hashval;
+
+ nameidx = be16_to_cpu(ent->nameidx);
+ if (nameidx < leafhdr->firstused || nameidx >= mp->m_attr_geo->blksize)
+ return __this_address;
+
+ /*
+ * Check the name information. The namelen fields are u8 so we can't
+ * possibly exceed the maximum name length of 255 bytes.
+ */
+ if (ent->flags & XFS_ATTR_LOCAL) {
+ lentry = xfs_attr3_leaf_name_local(leaf, idx);
+ namesize = xfs_attr_leaf_entsize_local(lentry->namelen,
+ be16_to_cpu(lentry->valuelen));
+ name_end = (char *)lentry + namesize;
+ if (lentry->namelen == 0)
+ return __this_address;
+ } else {
+ rentry = xfs_attr3_leaf_name_remote(leaf, idx);
+ namesize = xfs_attr_leaf_entsize_remote(rentry->namelen);
+ name_end = (char *)rentry + namesize;
+ if (rentry->namelen == 0)
+ return __this_address;
+ if (!(ent->flags & XFS_ATTR_INCOMPLETE) &&
+ rentry->valueblk == 0)
+ return __this_address;
+ }
+
+ if (name_end > buf_end)
+ return __this_address;
+
+ return NULL;
+}
+
+static xfs_failaddr_t
xfs_attr3_leaf_verify(
struct xfs_buf *bp)
{
@@ -240,7 +295,10 @@ xfs_attr3_leaf_verify(
struct xfs_mount *mp = bp->b_mount;
struct xfs_attr_leafblock *leaf = bp->b_addr;
struct xfs_attr_leaf_entry *entries;
+ struct xfs_attr_leaf_entry *ent;
+ char *buf_end;
uint32_t end; /* must be 32bit - see below */
+ __u32 last_hashval = 0;
int i;
xfs_failaddr_t fa;
@@ -273,8 +331,13 @@ xfs_attr3_leaf_verify(
(char *)bp->b_addr + ichdr.firstused)
return __this_address;
- /* XXX: need to range check rest of attr header values */
- /* XXX: hash order check? */
+ buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize;
+ for (i = 0, ent = entries; i < ichdr.count; ent++, i++) {
+ fa = xfs_attr3_leaf_verify_entry(mp, buf_end, leaf, &ichdr,
+ ent, i, &last_hashval);
+ if (fa)
+ return fa;
+ }
/*
* Quickly check the freemap information. Attribute data has to be
@@ -367,13 +430,12 @@ xfs_attr3_leaf_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t bno,
- xfs_daddr_t mappedbno,
struct xfs_buf **bpp)
{
int err;
- err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
- XFS_ATTR_FORK, &xfs_attr3_leaf_buf_ops);
+ err = xfs_da_read_buf(tp, dp, bno, 0, bpp, XFS_ATTR_FORK,
+ &xfs_attr3_leaf_buf_ops);
if (!err && tp && *bpp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_ATTR_LEAF_BUF);
return err;
@@ -393,6 +455,50 @@ xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags);
}
+static int
+xfs_attr_copy_value(
+ struct xfs_da_args *args,
+ unsigned char *value,
+ int valuelen)
+{
+ /*
+ * No copy if all we have to do is get the length
+ */
+ if (args->flags & ATTR_KERNOVAL) {
+ args->valuelen = valuelen;
+ return 0;
+ }
+
+ /*
+ * No copy if the length of the existing buffer is too small
+ */
+ if (args->valuelen < valuelen) {
+ args->valuelen = valuelen;
+ return -ERANGE;
+ }
+
+ if (args->op_flags & XFS_DA_OP_ALLOCVAL) {
+ args->value = kmem_alloc_large(valuelen, 0);
+ if (!args->value)
+ return -ENOMEM;
+ }
+ args->valuelen = valuelen;
+
+ /* remote block xattr requires IO for copy-in */
+ if (args->rmtblkno)
+ return xfs_attr_rmtval_get(args);
+
+ /*
+ * This is to prevent a GCC warning because the remote xattr case
+ * doesn't have a value to pass in. In that case, we never reach here,
+ * but GCC can't work that out and so throws a "passing NULL to
+ * memcpy" warning.
+ */
+ if (!value)
+ return -EINVAL;
+ memcpy(args->value, value, valuelen);
+ return 0;
+}
/*========================================================================
* External routines when attribute fork size < XFS_LITINO(mp).
@@ -409,13 +515,15 @@ xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
* special case for dev/uuid inodes, they have fixed size data forks.
*/
int
-xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
+xfs_attr_shortform_bytesfit(
+ struct xfs_inode *dp,
+ int bytes)
{
- int offset;
- int minforkoff; /* lower limit on valid forkoff locations */
- int maxforkoff; /* upper limit on valid forkoff locations */
- int dsize;
- xfs_mount_t *mp = dp->i_mount;
+ struct xfs_mount *mp = dp->i_mount;
+ int64_t dsize;
+ int minforkoff;
+ int maxforkoff;
+ int offset;
/* rounded down */
offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3;
@@ -481,7 +589,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
* A data fork btree root must have space for at least
* MINDBTPTRS key/ptr pairs if the data fork is small or empty.
*/
- minforkoff = max(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
+ minforkoff = max_t(int64_t, dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
minforkoff = roundup(minforkoff, 8) >> 3;
/* attr fork btree root can have at least this many key/ptr pairs */
@@ -720,15 +828,19 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
}
/*
- * Look up a name in a shortform attribute list structure.
+ * Retrieve the attribute value and length.
+ *
+ * If ATTR_KERNOVAL is specified, only the length needs to be returned.
+ * Unlike a lookup, we only return an error if the attribute does not
+ * exist or we can't retrieve the value.
*/
-/*ARGSUSED*/
int
-xfs_attr_shortform_getvalue(xfs_da_args_t *args)
+xfs_attr_shortform_getvalue(
+ struct xfs_da_args *args)
{
- xfs_attr_shortform_t *sf;
- xfs_attr_sf_entry_t *sfe;
- int i;
+ struct xfs_attr_shortform *sf;
+ struct xfs_attr_sf_entry *sfe;
+ int i;
ASSERT(args->dp->i_afp->if_flags == XFS_IFINLINE);
sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data;
@@ -741,18 +853,8 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
continue;
if (!xfs_attr_namesp_match(args->flags, sfe->flags))
continue;
- if (args->flags & ATTR_KERNOVAL) {
- args->valuelen = sfe->valuelen;
- return -EEXIST;
- }
- if (args->valuelen < sfe->valuelen) {
- args->valuelen = sfe->valuelen;
- return -ERANGE;
- }
- args->valuelen = sfe->valuelen;
- memcpy(args->value, &sfe->nameval[args->namelen],
- args->valuelen);
- return -EEXIST;
+ return xfs_attr_copy_value(args, &sfe->nameval[args->namelen],
+ sfe->valuelen);
}
return -ENOATTR;
}
@@ -782,38 +884,23 @@ xfs_attr_shortform_to_leaf(
ifp = dp->i_afp;
sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
size = be16_to_cpu(sf->hdr.totsize);
- tmpbuffer = kmem_alloc(size, KM_SLEEP);
+ tmpbuffer = kmem_alloc(size, 0);
ASSERT(tmpbuffer != NULL);
memcpy(tmpbuffer, ifp->if_u1.if_data, size);
sf = (xfs_attr_shortform_t *)tmpbuffer;
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
- xfs_bmap_local_to_extents_empty(dp, XFS_ATTR_FORK);
+ xfs_bmap_local_to_extents_empty(args->trans, dp, XFS_ATTR_FORK);
bp = NULL;
error = xfs_da_grow_inode(args, &blkno);
- if (error) {
- /*
- * If we hit an IO error middle of the transaction inside
- * grow_inode(), we may have inconsistent data. Bail out.
- */
- if (error == -EIO)
- goto out;
- xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */
- memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */
+ if (error)
goto out;
- }
ASSERT(blkno == 0);
error = xfs_attr3_leaf_create(args, blkno, &bp);
- if (error) {
- /* xfs_attr3_leaf_create may not have instantiated a block */
- if (bp && (xfs_da_shrink_inode(args, 0, bp) != 0))
- goto out;
- xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */
- memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */
+ if (error)
goto out;
- }
memset((char *)&nargs, 0, sizeof(nargs));
nargs.dp = dp;
@@ -901,7 +988,7 @@ xfs_attr_shortform_verify(
char *endp;
struct xfs_ifork *ifp;
int i;
- int size;
+ int64_t size;
ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL);
ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
@@ -985,7 +1072,7 @@ xfs_attr3_leaf_to_shortform(
trace_xfs_attr_leaf_to_sf(args);
- tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP);
+ tmpbuffer = kmem_alloc(args->geo->blksize, 0);
if (!tmpbuffer)
return -ENOMEM;
@@ -1057,7 +1144,6 @@ xfs_attr3_leaf_to_node(
struct xfs_attr_leafblock *leaf;
struct xfs_attr3_icleaf_hdr icleafhdr;
struct xfs_attr_leaf_entry *entries;
- struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr icnodehdr;
struct xfs_da_intnode *node;
struct xfs_inode *dp = args->dp;
@@ -1072,11 +1158,11 @@ xfs_attr3_leaf_to_node(
error = xfs_da_grow_inode(args, &blkno);
if (error)
goto out;
- error = xfs_attr3_leaf_read(args->trans, dp, 0, -1, &bp1);
+ error = xfs_attr3_leaf_read(args->trans, dp, 0, &bp1);
if (error)
goto out;
- error = xfs_da_get_buf(args->trans, dp, blkno, -1, &bp2, XFS_ATTR_FORK);
+ error = xfs_da_get_buf(args->trans, dp, blkno, &bp2, XFS_ATTR_FORK);
if (error)
goto out;
@@ -1097,18 +1183,17 @@ xfs_attr3_leaf_to_node(
if (error)
goto out;
node = bp1->b_addr;
- dp->d_ops->node_hdr_from_disk(&icnodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(mp, &icnodehdr, node);
leaf = bp2->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &icleafhdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
/* both on-disk, don't endian-flip twice */
- btree[0].hashval = entries[icleafhdr.count - 1].hashval;
- btree[0].before = cpu_to_be32(blkno);
+ icnodehdr.btree[0].hashval = entries[icleafhdr.count - 1].hashval;
+ icnodehdr.btree[0].before = cpu_to_be32(blkno);
icnodehdr.count = 1;
- dp->d_ops->node_hdr_to_disk(node, &icnodehdr);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, node, &icnodehdr);
xfs_trans_log_buf(args->trans, bp1, 0, args->geo->blksize - 1);
error = 0;
out:
@@ -1138,7 +1223,7 @@ xfs_attr3_leaf_create(
trace_xfs_attr_leaf_create(args);
- error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp,
+ error = xfs_da_get_buf(args->trans, args->dp, blkno, &bp,
XFS_ATTR_FORK);
if (error)
return error;
@@ -1424,7 +1509,9 @@ xfs_attr3_leaf_add_work(
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
if (ichdr->freemap[i].base == tmp) {
ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t);
- ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t);
+ ichdr->freemap[i].size -=
+ min_t(uint16_t, ichdr->freemap[i].size,
+ sizeof(xfs_attr_leaf_entry_t));
}
}
ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index);
@@ -1448,7 +1535,7 @@ xfs_attr3_leaf_compact(
trace_xfs_attr_leaf_compact(args);
- tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP);
+ tmpbuffer = kmem_alloc(args->geo->blksize, 0);
memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
memset(bp->b_addr, 0, args->geo->blksize);
leaf_src = (xfs_attr_leafblock_t *)tmpbuffer;
@@ -1908,7 +1995,7 @@ xfs_attr3_leaf_toosmall(
if (blkno == 0)
continue;
error = xfs_attr3_leaf_read(state->args->trans, state->args->dp,
- blkno, -1, &bp);
+ blkno, &bp);
if (error)
return error;
@@ -2167,7 +2254,7 @@ xfs_attr3_leaf_unbalance(
struct xfs_attr_leafblock *tmp_leaf;
struct xfs_attr3_icleaf_hdr tmphdr;
- tmp_leaf = kmem_zalloc(state->args->geo->blksize, KM_SLEEP);
+ tmp_leaf = kmem_zalloc(state->args->geo->blksize, 0);
/*
* Copy the header into the temp leaf so that all the stuff
@@ -2258,8 +2345,10 @@ xfs_attr3_leaf_lookup_int(
leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
- if (ichdr.count >= args->geo->blksize / 8)
+ if (ichdr.count >= args->geo->blksize / 8) {
+ xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
+ }
/*
* Binary search. (note: small blocks will skip this loop)
@@ -2275,10 +2364,14 @@ xfs_attr3_leaf_lookup_int(
else
break;
}
- if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count)))
+ if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count))) {
+ xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
- if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval))
+ }
+ if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval)) {
+ xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
+ }
/*
* Since we may have duplicate hashval's, find the first matching
@@ -2350,6 +2443,10 @@ xfs_attr3_leaf_lookup_int(
/*
* Get the value associated with an attribute name from a leaf attribute
* list structure.
+ *
+ * If ATTR_KERNOVAL is specified, only the length needs to be returned.
+ * Unlike a lookup, we only return an error if the attribute does not
+ * exist or we can't retrieve the value.
*/
int
xfs_attr3_leaf_getvalue(
@@ -2361,7 +2458,6 @@ xfs_attr3_leaf_getvalue(
struct xfs_attr_leaf_entry *entry;
struct xfs_attr_leaf_name_local *name_loc;
struct xfs_attr_leaf_name_remote *name_rmt;
- int valuelen;
leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
@@ -2373,36 +2469,19 @@ xfs_attr3_leaf_getvalue(
name_loc = xfs_attr3_leaf_name_local(leaf, args->index);
ASSERT(name_loc->namelen == args->namelen);
ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0);
- valuelen = be16_to_cpu(name_loc->valuelen);
- if (args->flags & ATTR_KERNOVAL) {
- args->valuelen = valuelen;
- return 0;
- }
- if (args->valuelen < valuelen) {
- args->valuelen = valuelen;
- return -ERANGE;
- }
- args->valuelen = valuelen;
- memcpy(args->value, &name_loc->nameval[args->namelen], valuelen);
- } else {
- name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
- ASSERT(name_rmt->namelen == args->namelen);
- ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
- args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
- args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
- args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount,
- args->rmtvaluelen);
- if (args->flags & ATTR_KERNOVAL) {
- args->valuelen = args->rmtvaluelen;
- return 0;
- }
- if (args->valuelen < args->rmtvaluelen) {
- args->valuelen = args->rmtvaluelen;
- return -ERANGE;
- }
- args->valuelen = args->rmtvaluelen;
- }
- return 0;
+ return xfs_attr_copy_value(args,
+ &name_loc->nameval[args->namelen],
+ be16_to_cpu(name_loc->valuelen));
+ }
+
+ name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
+ ASSERT(name_rmt->namelen == args->namelen);
+ ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
+ args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
+ args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
+ args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount,
+ args->rmtvaluelen);
+ return xfs_attr_copy_value(args, NULL, args->rmtvaluelen);
}
/*========================================================================
@@ -2652,7 +2731,7 @@ xfs_attr3_leaf_clearflag(
/*
* Set up the operation.
*/
- error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp);
if (error)
return error;
@@ -2719,7 +2798,7 @@ xfs_attr3_leaf_setflag(
/*
* Set up the operation.
*/
- error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp);
if (error)
return error;
@@ -2781,7 +2860,7 @@ xfs_attr3_leaf_flipflags(
/*
* Read the block containing the "old" attr
*/
- error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp1);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp1);
if (error)
return error;
@@ -2790,7 +2869,7 @@ xfs_attr3_leaf_flipflags(
*/
if (args->blkno2 != args->blkno) {
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno2,
- -1, &bp2);
+ &bp2);
if (error)
return error;
} else {
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index 7b74e18becff..f4a188e28b7b 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -17,6 +17,29 @@ struct xfs_inode;
struct xfs_trans;
/*
+ * Incore version of the attribute leaf header.
+ */
+struct xfs_attr3_icleaf_hdr {
+ uint32_t forw;
+ uint32_t back;
+ uint16_t magic;
+ uint16_t count;
+ uint16_t usedbytes;
+ /*
+ * Firstused is 32-bit here instead of 16-bit like the on-disk variant
+ * to support maximum fsb size of 64k without overflow issues throughout
+ * the attr code. Instead, the overflow condition is handled on
+ * conversion to/from disk.
+ */
+ uint32_t firstused;
+ __u8 holes;
+ struct {
+ uint16_t base;
+ uint16_t size;
+ } freemap[XFS_ATTR_LEAF_MAPSIZE];
+};
+
+/*
* Used to keep a list of "remote value" extents when unlinking an inode.
*/
typedef struct xfs_attr_inactive_list {
@@ -67,8 +90,8 @@ int xfs_attr3_leaf_add(struct xfs_buf *leaf_buffer,
struct xfs_da_args *args);
int xfs_attr3_leaf_remove(struct xfs_buf *leaf_buffer,
struct xfs_da_args *args);
-void xfs_attr3_leaf_list_int(struct xfs_buf *bp,
- struct xfs_attr_list_context *context);
+int xfs_attr3_leaf_list_int(struct xfs_buf *bp,
+ struct xfs_attr_list_context *context);
/*
* Routines used for shrinking the Btree.
@@ -85,8 +108,7 @@ int xfs_attr_leaf_order(struct xfs_buf *leaf1_bp,
struct xfs_buf *leaf2_bp);
int xfs_attr_leaf_newentsize(struct xfs_da_args *args, int *local);
int xfs_attr3_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp,
- xfs_dablk_t bno, xfs_daddr_t mappedbno,
- struct xfs_buf **bpp);
+ xfs_dablk_t bno, struct xfs_buf **bpp);
void xfs_attr3_leaf_hdr_from_disk(struct xfs_da_geometry *geo,
struct xfs_attr3_icleaf_hdr *to,
struct xfs_attr_leafblock *from);
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 4eb30d357045..a6ef5df42669 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -19,6 +19,7 @@
#include "xfs_trans.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
+#include "xfs_attr_remote.h"
#include "xfs_trace.h"
#include "xfs_error.h"
@@ -358,6 +359,8 @@ xfs_attr_rmtval_copyin(
/*
* Read the value associated with an attribute from the out-of-line buffer
* that we stored it in.
+ *
+ * Returns 0 on successful retrieval, otherwise an error.
*/
int
xfs_attr_rmtval_get(
diff --git a/fs/xfs/libxfs/xfs_bit.c b/fs/xfs/libxfs/xfs_bit.c
index 7071ff98fdbc..40ce5f3094d1 100644
--- a/fs/xfs/libxfs/xfs_bit.c
+++ b/fs/xfs/libxfs/xfs_bit.c
@@ -5,6 +5,7 @@
*/
#include "xfs.h"
#include "xfs_log_format.h"
+#include "xfs_bit.h"
/*
* XFS bit manipulation routines, used in non-realtime code.
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index baf0b72c0a37..4acc6e37c31d 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -34,6 +34,7 @@
#include "xfs_ag_resv.h"
#include "xfs_refcount.h"
#include "xfs_icache.h"
+#include "xfs_iomap.h"
kmem_zone_t *xfs_bmap_free_item_zone;
@@ -383,8 +384,10 @@ xfs_bmap_check_leaf_extents(
xfs_check_block(block, mp, 0, 0);
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
bno = be64_to_cpu(*pp);
- XFS_WANT_CORRUPTED_GOTO(mp,
- xfs_verify_fsbno(mp, bno), error0);
+ if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if (bp_release) {
bp_release = 0;
xfs_trans_brelse(NULL, bp);
@@ -553,7 +556,7 @@ __xfs_bmap_add_free(
#endif
ASSERT(xfs_bmap_free_item_zone != NULL);
- new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
+ new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0);
new->xefi_startblock = bno;
new->xefi_blockcount = (xfs_extlen_t)len;
if (oinfo)
@@ -611,8 +614,8 @@ xfs_bmap_btree_to_extents(
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
cbno = be64_to_cpu(*pp);
#ifdef DEBUG
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
- xfs_btree_check_lptr(cur, cbno, 1));
+ if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_btree_check_lptr(cur, cbno, 1)))
+ return -EFSCORRUPTED;
#endif
error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF,
&xfs_bmbt_buf_ops);
@@ -728,7 +731,7 @@ xfs_bmap_extents_to_btree(
ip->i_d.di_nblocks++;
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
abp = xfs_btree_get_bufl(mp, tp, args.fsbno);
- if (!abp) {
+ if (XFS_IS_CORRUPT(mp, !abp)) {
error = -EFSCORRUPTED;
goto out_unreserve_dquot;
}
@@ -792,6 +795,7 @@ out_root_realloc:
*/
void
xfs_bmap_local_to_extents_empty(
+ struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork)
{
@@ -808,6 +812,7 @@ xfs_bmap_local_to_extents_empty(
ifp->if_u1.if_root = NULL;
ifp->if_height = 0;
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
@@ -840,7 +845,7 @@ xfs_bmap_local_to_extents(
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
if (!ifp->if_bytes) {
- xfs_bmap_local_to_extents_empty(ip, whichfork);
+ xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
flags = XFS_ILOG_CORE;
goto done;
}
@@ -887,7 +892,7 @@ xfs_bmap_local_to_extents(
/* account for the change in fork size */
xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
- xfs_bmap_local_to_extents_empty(ip, whichfork);
+ xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
flags |= XFS_ILOG_CORE;
ifp->if_u1.if_root = NULL;
@@ -934,7 +939,10 @@ xfs_bmap_add_attrfork_btree(
if (error)
goto error0;
/* must be at least one entry */
- XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
+ if (XFS_IS_CORRUPT(mp, stat != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
goto error0;
if (stat == 0) {
@@ -1081,7 +1089,7 @@ xfs_bmap_add_attrfork(
goto trans_cancel;
if (XFS_IFORK_Q(ip))
goto trans_cancel;
- if (ip->i_d.di_anextents != 0) {
+ if (XFS_IS_CORRUPT(mp, ip->i_d.di_anextents != 0)) {
error = -EFSCORRUPTED;
goto trans_cancel;
}
@@ -1099,7 +1107,7 @@ xfs_bmap_add_attrfork(
if (error)
goto trans_cancel;
ASSERT(ip->i_afp == NULL);
- ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
+ ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, 0);
ip->i_afp->if_flags = XFS_IFEXTENTS;
logflags = 0;
switch (ip->i_d.di_format) {
@@ -1152,6 +1160,65 @@ trans_cancel:
* Internal and external extent tree search functions.
*/
+struct xfs_iread_state {
+ struct xfs_iext_cursor icur;
+ xfs_extnum_t loaded;
+};
+
+/* Stuff every bmbt record from this block into the incore extent map. */
+static int
+xfs_iread_bmbt_block(
+ struct xfs_btree_cur *cur,
+ int level,
+ void *priv)
+{
+ struct xfs_iread_state *ir = priv;
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_inode *ip = cur->bc_private.b.ip;
+ struct xfs_btree_block *block;
+ struct xfs_buf *bp;
+ struct xfs_bmbt_rec *frp;
+ xfs_extnum_t num_recs;
+ xfs_extnum_t j;
+ int whichfork = cur->bc_private.b.whichfork;
+
+ block = xfs_btree_get_block(cur, level, &bp);
+
+ /* Abort if we find more records than nextents. */
+ num_recs = xfs_btree_get_numrecs(block);
+ if (unlikely(ir->loaded + num_recs >
+ XFS_IFORK_NEXTENTS(ip, whichfork))) {
+ xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
+ (unsigned long long)ip->i_ino);
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
+ sizeof(*block), __this_address);
+ return -EFSCORRUPTED;
+ }
+
+ /* Copy records into the incore cache. */
+ frp = XFS_BMBT_REC_ADDR(mp, block, 1);
+ for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
+ struct xfs_bmbt_irec new;
+ xfs_failaddr_t fa;
+
+ xfs_bmbt_disk_get_all(frp, &new);
+ fa = xfs_bmap_validate_extent(ip, whichfork, &new);
+ if (fa) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED,
+ "xfs_iread_extents(2)", frp,
+ sizeof(*frp), fa);
+ return -EFSCORRUPTED;
+ }
+ xfs_iext_insert(ip, &ir->icur, &new,
+ xfs_bmap_fork_to_state(whichfork));
+ trace_xfs_read_extent(ip, &ir->icur,
+ xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
+ xfs_iext_next(XFS_IFORK_PTR(ip, whichfork), &ir->icur);
+ }
+
+ return 0;
+}
+
/*
* Read in extents from a btree-format inode.
*/
@@ -1161,134 +1228,39 @@ xfs_iread_extents(
struct xfs_inode *ip,
int whichfork)
{
- struct xfs_mount *mp = ip->i_mount;
- int state = xfs_bmap_fork_to_state(whichfork);
+ struct xfs_iread_state ir;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
- xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
- struct xfs_btree_block *block = ifp->if_broot;
- struct xfs_iext_cursor icur;
- struct xfs_bmbt_irec new;
- xfs_fsblock_t bno;
- struct xfs_buf *bp;
- xfs_extnum_t i, j;
- int level;
- __be64 *pp;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_btree_cur *cur;
int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
- return -EFSCORRUPTED;
- }
-
- /*
- * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
- */
- level = be16_to_cpu(block->bb_level);
- if (unlikely(level == 0)) {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
- return -EFSCORRUPTED;
- }
- pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
- bno = be64_to_cpu(*pp);
-
- /*
- * Go down the tree until leaf level is reached, following the first
- * pointer (leftmost) at each level.
- */
- while (level-- > 0) {
- error = xfs_btree_read_bufl(mp, tp, bno, &bp,
- XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
- if (error)
- goto out;
- block = XFS_BUF_TO_BLOCK(bp);
- if (level == 0)
- break;
- pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
- bno = be64_to_cpu(*pp);
- XFS_WANT_CORRUPTED_GOTO(mp,
- xfs_verify_fsbno(mp, bno), out_brelse);
- xfs_trans_brelse(tp, bp);
+ if (XFS_IS_CORRUPT(mp,
+ XFS_IFORK_FORMAT(ip, whichfork) !=
+ XFS_DINODE_FMT_BTREE)) {
+ error = -EFSCORRUPTED;
+ goto out;
}
- /*
- * Here with bp and block set to the leftmost leaf node in the tree.
- */
- i = 0;
- xfs_iext_first(ifp, &icur);
-
- /*
- * Loop over all leaf nodes. Copy information to the extent records.
- */
- for (;;) {
- xfs_bmbt_rec_t *frp;
- xfs_fsblock_t nextbno;
- xfs_extnum_t num_recs;
-
- num_recs = xfs_btree_get_numrecs(block);
- if (unlikely(i + num_recs > nextents)) {
- xfs_warn(ip->i_mount,
- "corrupt dinode %Lu, (btree extents).",
- (unsigned long long) ip->i_ino);
- xfs_inode_verifier_error(ip, -EFSCORRUPTED,
- __func__, block, sizeof(*block),
- __this_address);
- error = -EFSCORRUPTED;
- goto out_brelse;
- }
- /*
- * Read-ahead the next leaf block, if any.
- */
- nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
- if (nextbno != NULLFSBLOCK)
- xfs_btree_reada_bufl(mp, nextbno, 1,
- &xfs_bmbt_buf_ops);
- /*
- * Copy records into the extent records.
- */
- frp = XFS_BMBT_REC_ADDR(mp, block, 1);
- for (j = 0; j < num_recs; j++, frp++, i++) {
- xfs_failaddr_t fa;
-
- xfs_bmbt_disk_get_all(frp, &new);
- fa = xfs_bmap_validate_extent(ip, whichfork, &new);
- if (fa) {
- error = -EFSCORRUPTED;
- xfs_inode_verifier_error(ip, error,
- "xfs_iread_extents(2)",
- frp, sizeof(*frp), fa);
- goto out_brelse;
- }
- xfs_iext_insert(ip, &icur, &new, state);
- trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
- xfs_iext_next(ifp, &icur);
- }
- xfs_trans_brelse(tp, bp);
- bno = nextbno;
- /*
- * If we've reached the end, stop.
- */
- if (bno == NULLFSBLOCK)
- break;
- error = xfs_btree_read_bufl(mp, tp, bno, &bp,
- XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
- if (error)
- goto out;
- block = XFS_BUF_TO_BLOCK(bp);
- }
+ ir.loaded = 0;
+ xfs_iext_first(ifp, &ir.icur);
+ cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
+ error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
+ XFS_BTREE_VISIT_RECORDS, &ir);
+ xfs_btree_del_cursor(cur, error);
+ if (error)
+ goto out;
- if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) {
+ if (XFS_IS_CORRUPT(mp,
+ ir.loaded != XFS_IFORK_NEXTENTS(ip, whichfork))) {
error = -EFSCORRUPTED;
goto out;
}
- ASSERT(i == xfs_iext_count(ifp));
+ ASSERT(ir.loaded == xfs_iext_count(ifp));
ifp->if_flags |= XFS_IFEXTENTS;
return 0;
-
-out_brelse:
- xfs_trans_brelse(tp, bp);
out:
xfs_iext_destroy(ifp);
return error;
@@ -1315,8 +1287,7 @@ xfs_bmap_first_unused(
xfs_fileoff_t lowest, max;
int error;
- ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
- XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
+ ASSERT(xfs_ifork_has_extents(ip, whichfork) ||
XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
@@ -1372,7 +1343,8 @@ xfs_bmap_last_before(
case XFS_DINODE_FMT_EXTENTS:
break;
default:
- return -EIO;
+ ASSERT(0);
+ return -EFSCORRUPTED;
}
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
@@ -1471,9 +1443,8 @@ xfs_bmap_last_offset(
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
return 0;
- if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
- return -EIO;
+ if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ip, whichfork)))
+ return -EFSCORRUPTED;
error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
if (error || is_empty)
@@ -1650,15 +1621,24 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_delete(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_decrement(bma->cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(bma->cur, &LEFT);
if (error)
goto done;
@@ -1684,7 +1664,10 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(bma->cur, &LEFT);
if (error)
goto done;
@@ -1714,7 +1697,10 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(bma->cur, &PREV);
if (error)
goto done;
@@ -1739,11 +1725,17 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
break;
@@ -1774,7 +1766,10 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(bma->cur, &LEFT);
if (error)
goto done;
@@ -1795,11 +1790,17 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
@@ -1840,7 +1841,10 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(bma->cur, &RIGHT);
if (error)
goto done;
@@ -1872,11 +1876,17 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
@@ -1952,11 +1962,17 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
@@ -1985,11 +2001,8 @@ xfs_bmap_add_extent_delay_real(
}
/* add reverse mapping unless caller opted out */
- if (!(bma->flags & XFS_BMAPI_NORMAP)) {
- error = xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
- if (error)
- goto done;
- }
+ if (!(bma->flags & XFS_BMAPI_NORMAP))
+ xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
/* convert to a btree if necessary */
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
@@ -2153,19 +2166,34 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &LEFT);
if (error)
goto done;
@@ -2191,13 +2219,22 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &LEFT);
if (error)
goto done;
@@ -2226,13 +2263,22 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &PREV);
if (error)
goto done;
@@ -2255,7 +2301,10 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &PREV);
if (error)
goto done;
@@ -2285,7 +2334,10 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &PREV);
if (error)
goto done;
@@ -2319,14 +2371,20 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &PREV);
if (error)
goto done;
cur->bc_rec.b = *new;
if ((error = xfs_btree_insert(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
break;
@@ -2353,7 +2411,10 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &PREV);
if (error)
goto done;
@@ -2387,17 +2448,26 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &PREV);
if (error)
goto done;
error = xfs_bmbt_lookup_eq(cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_insert(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
break;
@@ -2431,7 +2501,10 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
/* new right extent - oldext */
error = xfs_bmbt_update(cur, &r[1]);
if (error)
@@ -2440,7 +2513,10 @@ xfs_bmap_add_extent_unwritten_real(
cur->bc_rec.b = PREV;
if ((error = xfs_btree_insert(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
/*
* Reset the cursor to the position of the new extent
* we are about to insert as we can't trust it after
@@ -2449,11 +2525,17 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
/* new middle extent - newext */
if ((error = xfs_btree_insert(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
break;
@@ -2471,9 +2553,7 @@ xfs_bmap_add_extent_unwritten_real(
}
/* update reverse mappings */
- error = xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
- if (error)
- goto done;
+ xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
/* convert to a btree if necessary */
if (xfs_bmap_needs_btree(ip, whichfork)) {
@@ -2738,15 +2818,24 @@ xfs_bmap_add_extent_hole_real(
error = xfs_bmbt_lookup_eq(cur, &right, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_delete(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_decrement(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &left);
if (error)
goto done;
@@ -2772,7 +2861,10 @@ xfs_bmap_add_extent_hole_real(
error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &left);
if (error)
goto done;
@@ -2799,7 +2891,10 @@ xfs_bmap_add_extent_hole_real(
error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &right);
if (error)
goto done;
@@ -2822,21 +2917,24 @@ xfs_bmap_add_extent_hole_real(
error = xfs_bmbt_lookup_eq(cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_insert(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
break;
}
/* add reverse mapping unless caller opted out */
- if (!(flags & XFS_BMAPI_NORMAP)) {
- error = xfs_rmap_map_extent(tp, ip, whichfork, new);
- if (error)
- goto done;
- }
+ if (!(flags & XFS_BMAPI_NORMAP))
+ xfs_rmap_map_extent(tp, ip, whichfork, new);
/* convert to a btree if necessary */
if (xfs_bmap_needs_btree(ip, whichfork)) {
@@ -3064,7 +3162,7 @@ xfs_bmap_adjacent(
mp = ap->ip->i_mount;
nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
rt = XFS_IS_REALTIME_INODE(ap->ip) &&
- xfs_alloc_is_userdata(ap->datatype);
+ (ap->datatype & XFS_ALLOC_USERDATA);
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
ap->tp->t_firstblock);
/*
@@ -3417,7 +3515,7 @@ xfs_bmap_btalloc(
if (ap->flags & XFS_BMAPI_COWFORK)
align = xfs_get_cowextsz_hint(ap->ip);
- else if (xfs_alloc_is_userdata(ap->datatype))
+ else if (ap->datatype & XFS_ALLOC_USERDATA)
align = xfs_get_extsz_hint(ap->ip);
if (align) {
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
@@ -3432,7 +3530,7 @@ xfs_bmap_btalloc(
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
ap->tp->t_firstblock);
if (nullfb) {
- if (xfs_alloc_is_userdata(ap->datatype) &&
+ if ((ap->datatype & XFS_ALLOC_USERDATA) &&
xfs_inode_is_filestream(ap->ip)) {
ag = xfs_filestream_lookup_ag(ap->ip);
ag = (ag != NULLAGNUMBER) ? ag : 0;
@@ -3472,7 +3570,7 @@ xfs_bmap_btalloc(
* enough for the request. If one isn't found, then adjust
* the minimum allocation size to the largest space found.
*/
- if (xfs_alloc_is_userdata(ap->datatype) &&
+ if ((ap->datatype & XFS_ALLOC_USERDATA) &&
xfs_inode_is_filestream(ap->ip))
error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
else
@@ -3506,13 +3604,11 @@ xfs_bmap_btalloc(
args.mod = args.prod - args.mod;
}
/*
- * If we are not low on available data blocks, and the
- * underlying logical volume manager is a stripe, and
- * the file offset is zero then try to allocate data
- * blocks on stripe unit boundary.
- * NOTE: ap->aeof is only set if the allocation length
- * is >= the stripe unit and the allocation offset is
- * at the end of file.
+ * If we are not low on available data blocks, and the underlying
+ * logical volume manager is a stripe, and the file offset is zero then
+ * try to allocate data blocks on stripe unit boundary. NOTE: ap->aeof
+ * is only set if the allocation length is >= the stripe unit and the
+ * allocation offset is at the end of file.
*/
if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) {
if (!ap->offset) {
@@ -3520,9 +3616,11 @@ xfs_bmap_btalloc(
atype = args.type;
isaligned = 1;
/*
- * Adjust for alignment
+ * Adjust minlen to try and preserve alignment if we
+ * can't guarantee an aligned maxlen extent.
*/
- if (blen > args.alignment && blen <= args.maxlen)
+ if (blen > args.alignment &&
+ blen <= args.maxlen + args.alignment)
args.minlen = blen - args.alignment;
args.minalignslop = 0;
} else {
@@ -3560,8 +3658,6 @@ xfs_bmap_btalloc(
args.wasdel = ap->wasdel;
args.resv = XFS_AG_RESV_NONE;
args.datatype = ap->datatype;
- if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
- args.ip = ap->ip;
error = xfs_alloc_vextent(&args);
if (error)
@@ -3646,20 +3742,6 @@ xfs_bmap_btalloc(
return 0;
}
-/*
- * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
- * It figures out where to ask the underlying allocator to put the new extent.
- */
-STATIC int
-xfs_bmap_alloc(
- struct xfs_bmalloca *ap) /* bmap alloc argument struct */
-{
- if (XFS_IS_REALTIME_INODE(ap->ip) &&
- xfs_alloc_is_userdata(ap->datatype))
- return xfs_bmap_rtalloc(ap);
- return xfs_bmap_btalloc(ap);
-}
-
/* Trim extent to fit a logical block range. */
void
xfs_trim_extent(
@@ -3821,11 +3903,8 @@ xfs_bmapi_read(
XFS_BMAPI_COWFORK)));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -3835,15 +3914,28 @@ xfs_bmapi_read(
XFS_STATS_INC(mp, xs_blk_mapr);
ifp = XFS_IFORK_PTR(ip, whichfork);
+ if (!ifp) {
+ /* No CoW fork? Return a hole. */
+ if (whichfork == XFS_COW_FORK) {
+ mval->br_startoff = bno;
+ mval->br_startblock = HOLESTARTBLOCK;
+ mval->br_blockcount = len;
+ mval->br_state = XFS_EXT_NORM;
+ *nmap = 1;
+ return 0;
+ }
- /* No CoW fork? Return a hole. */
- if (whichfork == XFS_COW_FORK && !ifp) {
- mval->br_startoff = bno;
- mval->br_startblock = HOLESTARTBLOCK;
- mval->br_blockcount = len;
- mval->br_state = XFS_EXT_NORM;
- *nmap = 1;
- return 0;
+ /*
+ * A missing attr ifork implies that the inode says we're in
+ * extents or btree format but failed to pass the inode fork
+ * verifier while trying to load it. Treat that as a file
+ * corruption too.
+ */
+#ifdef DEBUG
+ xfs_alert(mp, "%s: inode %llu missing fork %d",
+ __func__, ip->i_ino, whichfork);
+#endif /* DEBUG */
+ return -EFSCORRUPTED;
}
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
@@ -4003,6 +4095,39 @@ out_unreserve_quota:
}
static int
+xfs_bmap_alloc_userdata(
+ struct xfs_bmalloca *bma)
+{
+ struct xfs_mount *mp = bma->ip->i_mount;
+ int whichfork = xfs_bmapi_whichfork(bma->flags);
+ int error;
+
+ /*
+ * Set the data type being allocated. For the data fork, the first data
+ * in the file is treated differently to all other allocations. For the
+ * attribute fork, we only need to ensure the allocated range is not on
+ * the busy list.
+ */
+ bma->datatype = XFS_ALLOC_NOBUSY;
+ if (whichfork == XFS_DATA_FORK) {
+ bma->datatype |= XFS_ALLOC_USERDATA;
+ if (bma->offset == 0)
+ bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
+
+ if (mp->m_dalign && bma->length >= mp->m_dalign) {
+ error = xfs_bmap_isaeof(bma, whichfork);
+ if (error)
+ return error;
+ }
+
+ if (XFS_IS_REALTIME_INODE(bma->ip))
+ return xfs_bmap_rtalloc(bma);
+ }
+
+ return xfs_bmap_btalloc(bma);
+}
+
+static int
xfs_bmapi_allocate(
struct xfs_bmalloca *bma)
{
@@ -4021,7 +4146,8 @@ xfs_bmapi_allocate(
if (bma->wasdel) {
bma->length = (xfs_extlen_t)bma->got.br_blockcount;
bma->offset = bma->got.br_startoff;
- xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev);
+ if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev))
+ bma->prev.br_startoff = NULLFILEOFF;
} else {
bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
if (!bma->eof)
@@ -4029,43 +4155,24 @@ xfs_bmapi_allocate(
bma->got.br_startoff - bma->offset);
}
- /*
- * Set the data type being allocated. For the data fork, the first data
- * in the file is treated differently to all other allocations. For the
- * attribute fork, we only need to ensure the allocated range is not on
- * the busy list.
- */
- if (!(bma->flags & XFS_BMAPI_METADATA)) {
- bma->datatype = XFS_ALLOC_NOBUSY;
- if (whichfork == XFS_DATA_FORK) {
- if (bma->offset == 0)
- bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
- else
- bma->datatype |= XFS_ALLOC_USERDATA;
- }
- if (bma->flags & XFS_BMAPI_ZERO)
- bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
- }
+ if (bma->flags & XFS_BMAPI_CONTIG)
+ bma->minlen = bma->length;
+ else
+ bma->minlen = 1;
- bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
+ if (bma->flags & XFS_BMAPI_METADATA)
+ error = xfs_bmap_btalloc(bma);
+ else
+ error = xfs_bmap_alloc_userdata(bma);
+ if (error || bma->blkno == NULLFSBLOCK)
+ return error;
- /*
- * Only want to do the alignment at the eof if it is userdata and
- * allocation length is larger than a stripe unit.
- */
- if (mp->m_dalign && bma->length >= mp->m_dalign &&
- !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
- error = xfs_bmap_isaeof(bma, whichfork);
+ if (bma->flags & XFS_BMAPI_ZERO) {
+ error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
if (error)
return error;
}
- error = xfs_bmap_alloc(bma);
- if (error)
- return error;
-
- if (bma->blkno == NULLFSBLOCK)
- return 0;
if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur)
bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
/*
@@ -4305,11 +4412,8 @@ xfs_bmapi_write(
ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
(XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -4388,12 +4492,9 @@ xfs_bmapi_write(
* If this is a CoW allocation, record the data in
* the refcount btree for orphan recovery.
*/
- if (whichfork == XFS_COW_FORK) {
- error = xfs_refcount_alloc_cow_extent(tp,
- bma.blkno, bma.length);
- if (error)
- goto error0;
- }
+ if (whichfork == XFS_COW_FORK)
+ xfs_refcount_alloc_cow_extent(tp, bma.blkno,
+ bma.length);
}
/* Deal with the allocated space we found. */
@@ -4452,16 +4553,21 @@ int
xfs_bmapi_convert_delalloc(
struct xfs_inode *ip,
int whichfork,
- xfs_fileoff_t offset_fsb,
- struct xfs_bmbt_irec *imap,
+ xfs_off_t offset,
+ struct iomap *iomap,
unsigned int *seq)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
struct xfs_bmalloca bma = { NULL };
+ u16 flags = 0;
struct xfs_trans *tp;
int error;
+ if (whichfork == XFS_COW_FORK)
+ flags |= IOMAP_F_SHARED;
+
/*
* Space for the extent and indirect blocks was reserved when the
* delalloc extent was created so there's no need to do so here.
@@ -4491,7 +4597,7 @@ xfs_bmapi_convert_delalloc(
* the extent. Just return the real extent at this offset.
*/
if (!isnullstartblock(bma.got.br_startblock)) {
- *imap = bma.got;
+ xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
*seq = READ_ONCE(ifp->if_seq);
goto out_trans_cancel;
}
@@ -4501,7 +4607,6 @@ xfs_bmapi_convert_delalloc(
bma.wasdel = true;
bma.offset = bma.got.br_startoff;
bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN);
- bma.total = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
if (whichfork == XFS_COW_FORK)
bma.flags = XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
@@ -4517,22 +4622,18 @@ xfs_bmapi_convert_delalloc(
if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
goto out_finish;
error = -EFSCORRUPTED;
- if (WARN_ON_ONCE(!bma.got.br_startblock && !XFS_IS_REALTIME_INODE(ip)))
+ if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock)))
goto out_finish;
XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
XFS_STATS_INC(mp, xs_xstrat_quick);
ASSERT(!isnullstartblock(bma.got.br_startblock));
- *imap = bma.got;
+ xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
*seq = READ_ONCE(ifp->if_seq);
- if (whichfork == XFS_COW_FORK) {
- error = xfs_refcount_alloc_cow_extent(tp, bma.blkno,
- bma.length);
- if (error)
- goto out_finish;
- }
+ if (whichfork == XFS_COW_FORK)
+ xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
whichfork);
@@ -4578,11 +4679,8 @@ xfs_bmapi_remap(
ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -5013,7 +5111,10 @@ xfs_bmap_del_extent_real(
error = xfs_bmbt_lookup_eq(cur, &got, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
if (got.br_startoff == del->br_startoff)
@@ -5037,7 +5138,10 @@ xfs_bmap_del_extent_real(
}
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
break;
case BMAP_LEFT_FILLING:
/*
@@ -5108,7 +5212,10 @@ xfs_bmap_del_extent_real(
error = xfs_bmbt_lookup_eq(cur, &got, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
/*
* Update the btree record back
* to the original value.
@@ -5125,7 +5232,10 @@ xfs_bmap_del_extent_real(
error = -ENOSPC;
goto done;
}
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
} else
flags |= xfs_ilog_fext(whichfork);
XFS_IFORK_NEXT_SET(ip, whichfork,
@@ -5136,18 +5246,14 @@ xfs_bmap_del_extent_real(
}
/* remove reverse mapping */
- error = xfs_rmap_unmap_extent(tp, ip, whichfork, del);
- if (error)
- goto done;
+ xfs_rmap_unmap_extent(tp, ip, whichfork, del);
/*
* If we need to, add to list of extents to delete.
*/
if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
- error = xfs_refcount_decrease_extent(tp, del);
- if (error)
- goto done;
+ xfs_refcount_decrease_extent(tp, del);
} else {
__xfs_bmap_add_free(tp, del->br_startblock,
del->br_blockcount, NULL,
@@ -5196,7 +5302,7 @@ __xfs_bunmapi(
int isrt; /* freeing in rt area */
int logflags; /* transaction logging flags */
xfs_extlen_t mod; /* rt extent offset */
- struct xfs_mount *mp; /* mount structure */
+ struct xfs_mount *mp = ip->i_mount;
int tmp_logflags; /* partial logging flags */
int wasdel; /* was a delayed alloc extent */
int whichfork; /* data or attribute fork */
@@ -5213,14 +5319,8 @@ __xfs_bunmapi(
whichfork = xfs_bmapi_whichfork(flags);
ASSERT(whichfork != XFS_COW_FORK);
ifp = XFS_IFORK_PTR(ip, whichfork);
- if (unlikely(
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
- XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
- ip->i_mount);
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)))
return -EFSCORRUPTED;
- }
- mp = ip->i_mount;
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
@@ -5614,23 +5714,31 @@ xfs_bmse_merge(
error = xfs_bmbt_lookup_eq(cur, got, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
error = xfs_btree_delete(cur, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
/* lookup and update size of the previous extent */
error = xfs_bmbt_lookup_eq(cur, left, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
error = xfs_bmbt_update(cur, &new);
if (error)
return error;
+ /* change to extent format if required after extent removal */
+ error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
+ if (error)
+ return error;
+
done:
xfs_iext_remove(ip, icur, 0);
xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur);
@@ -5638,12 +5746,11 @@ done:
&new);
/* update reverse mapping. rmap functions merge the rmaps for us */
- error = xfs_rmap_unmap_extent(tp, ip, whichfork, got);
- if (error)
- return error;
+ xfs_rmap_unmap_extent(tp, ip, whichfork, got);
memcpy(&new, got, sizeof(new));
new.br_startoff = left->br_startoff + left->br_blockcount;
- return xfs_rmap_map_extent(tp, ip, whichfork, &new);
+ xfs_rmap_map_extent(tp, ip, whichfork, &new);
+ return 0;
}
static int
@@ -5669,7 +5776,8 @@ xfs_bmap_shift_update_extent(
error = xfs_bmbt_lookup_eq(cur, &prev, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
error = xfs_bmbt_update(cur, got);
if (error)
@@ -5682,10 +5790,9 @@ xfs_bmap_shift_update_extent(
got);
/* update reverse mapping */
- error = xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
- if (error)
- return error;
- return xfs_rmap_map_extent(tp, ip, whichfork, got);
+ xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
+ xfs_rmap_map_extent(tp, ip, whichfork, got);
+ return 0;
}
int
@@ -5706,11 +5813,8 @@ xfs_bmap_collapse_extents(
int error = 0;
int logflags = 0;
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -5734,8 +5838,10 @@ xfs_bmap_collapse_extents(
*done = true;
goto del_cursor;
}
- XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
- del_cursor);
+ if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
+ error = -EFSCORRUPTED;
+ goto del_cursor;
+ }
new_startoff = got.br_startoff - offset_shift_fsb;
if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
@@ -5824,11 +5930,8 @@ xfs_bmap_insert_extents(
int error = 0;
int logflags = 0;
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -5861,11 +5964,14 @@ xfs_bmap_insert_extents(
goto del_cursor;
}
}
- XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
- del_cursor);
+ if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
+ error = -EFSCORRUPTED;
+ goto del_cursor;
+ }
- if (stop_fsb >= got.br_startoff + got.br_blockcount) {
- error = -EIO;
+ if (XFS_IS_CORRUPT(mp,
+ stop_fsb >= got.br_startoff + got.br_blockcount)) {
+ error = -EFSCORRUPTED;
goto del_cursor;
}
@@ -5930,12 +6036,8 @@ xfs_bmap_split_extent_at(
int logflags = 0;
int i = 0;
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
- XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -5969,7 +6071,10 @@ xfs_bmap_split_extent_at(
error = xfs_bmbt_lookup_eq(cur, &got, &i);
if (error)
goto del_cursor;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto del_cursor;
+ }
}
got.br_blockcount = gotblkcnt;
@@ -5994,11 +6099,17 @@ xfs_bmap_split_extent_at(
error = xfs_bmbt_lookup_eq(cur, &new, &i);
if (error)
goto del_cursor;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto del_cursor;
+ }
error = xfs_btree_insert(cur, &i);
if (error)
goto del_cursor;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto del_cursor;
+ }
}
/*
@@ -6081,7 +6192,7 @@ __xfs_bmap_add(
bmap->br_blockcount,
bmap->br_state);
- bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS);
+ bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_NOFS);
INIT_LIST_HEAD(&bi->bi_list);
bi->bi_type = type;
bi->bi_owner = ip;
@@ -6093,29 +6204,29 @@ __xfs_bmap_add(
}
/* Map an extent into a file. */
-int
+void
xfs_bmap_map_extent(
struct xfs_trans *tp,
struct xfs_inode *ip,
struct xfs_bmbt_irec *PREV)
{
if (!xfs_bmap_is_update_needed(PREV))
- return 0;
+ return;
- return __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
+ __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
}
/* Unmap an extent out of a file. */
-int
+void
xfs_bmap_unmap_extent(
struct xfs_trans *tp,
struct xfs_inode *ip,
struct xfs_bmbt_irec *PREV)
{
if (!xfs_bmap_is_update_needed(PREV))
- return 0;
+ return;
- return __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
+ __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
}
/*
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 8f597f9abdbe..14d25e0b7d9c 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -171,11 +171,19 @@ static inline bool xfs_bmap_is_real_extent(struct xfs_bmbt_irec *irec)
!isnullstartblock(irec->br_startblock);
}
+/*
+ * Check the mapping for obviously garbage allocations that could trash the
+ * filesystem immediately.
+ */
+#define xfs_valid_startblock(ip, startblock) \
+ ((startblock) != 0 || XFS_IS_REALTIME_INODE(ip))
+
void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
xfs_filblks_t len);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
int xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
-void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
+void xfs_bmap_local_to_extents_empty(struct xfs_trans *tp,
+ struct xfs_inode *ip, int whichfork);
void __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
xfs_filblks_t len, const struct xfs_owner_info *oinfo,
bool skip_discard);
@@ -220,8 +228,7 @@ int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
struct xfs_bmbt_irec *got, struct xfs_iext_cursor *cur,
int eof);
int xfs_bmapi_convert_delalloc(struct xfs_inode *ip, int whichfork,
- xfs_fileoff_t offset_fsb, struct xfs_bmbt_irec *imap,
- unsigned int *seq);
+ xfs_off_t offset, struct iomap *iomap, unsigned int *seq);
int xfs_bmap_add_extent_unwritten_real(struct xfs_trans *tp,
struct xfs_inode *ip, int whichfork,
struct xfs_iext_cursor *icur, struct xfs_btree_cur **curp,
@@ -254,9 +261,9 @@ int xfs_bmap_finish_one(struct xfs_trans *tp, struct xfs_inode *ip,
enum xfs_bmap_intent_type type, int whichfork,
xfs_fileoff_t startoff, xfs_fsblock_t startblock,
xfs_filblks_t *blockcount, xfs_exntst_t state);
-int xfs_bmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+void xfs_bmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip,
struct xfs_bmbt_irec *imap);
-int xfs_bmap_unmap_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+void xfs_bmap_unmap_extent(struct xfs_trans *tp, struct xfs_inode *ip,
struct xfs_bmbt_irec *imap);
static inline int xfs_bmap_fork_to_state(int whichfork)
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index fbb18ba5d905..ffe608d2a2d9 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -400,8 +400,20 @@ xfs_bmbt_diff_two_keys(
union xfs_btree_key *k1,
union xfs_btree_key *k2)
{
- return (int64_t)be64_to_cpu(k1->bmbt.br_startoff) -
- be64_to_cpu(k2->bmbt.br_startoff);
+ uint64_t a = be64_to_cpu(k1->bmbt.br_startoff);
+ uint64_t b = be64_to_cpu(k2->bmbt.br_startoff);
+
+ /*
+ * Note: This routine previously casted a and b to int64 and subtracted
+ * them to generate a result. This lead to problems if b was the
+ * "maximum" key value (all ones) being signed incorrectly, hence this
+ * somewhat less efficient version.
+ */
+ if (a > b)
+ return 1;
+ if (b > a)
+ return -1;
+ return 0;
}
static xfs_failaddr_t
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index f1048efa4268..e2cc98931552 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -105,11 +105,10 @@ xfs_btree_check_lblock(
xfs_failaddr_t fa;
fa = __xfs_btree_check_lblock(cur, block, level, bp);
- if (unlikely(XFS_TEST_ERROR(fa != NULL, mp,
- XFS_ERRTAG_BTREE_CHECK_LBLOCK))) {
+ if (XFS_IS_CORRUPT(mp, fa != NULL) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK)) {
if (bp)
trace_xfs_btree_corrupt(bp, _RET_IP_);
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
}
return 0;
@@ -169,11 +168,10 @@ xfs_btree_check_sblock(
xfs_failaddr_t fa;
fa = __xfs_btree_check_sblock(cur, block, level, bp);
- if (unlikely(XFS_TEST_ERROR(fa != NULL, mp,
- XFS_ERRTAG_BTREE_CHECK_SBLOCK))) {
+ if (XFS_IS_CORRUPT(mp, fa != NULL) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BTREE_CHECK_SBLOCK)) {
if (bp)
trace_xfs_btree_corrupt(bp, _RET_IP_);
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
}
return 0;
@@ -384,7 +382,7 @@ xfs_btree_del_cursor(
/*
* Free the cursor.
*/
- kmem_zone_free(xfs_btree_cur_zone, cur);
+ kmem_cache_free(xfs_btree_cur_zone, cur);
}
/*
@@ -717,25 +715,6 @@ xfs_btree_get_bufs(
}
/*
- * Check for the cursor referring to the last block at the given level.
- */
-int /* 1=is last block, 0=not last block */
-xfs_btree_islastblock(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level) /* level to check */
-{
- struct xfs_btree_block *block; /* generic btree block pointer */
- xfs_buf_t *bp; /* buffer containing block */
-
- block = xfs_btree_get_block(cur, level, &bp);
- xfs_btree_check_block(cur, block, level, bp);
- if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
- return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
- else
- return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
-}
-
-/*
* Change the cursor to point to the first record at the given level.
* Other levels are unaffected.
*/
@@ -1820,6 +1799,7 @@ xfs_btree_lookup_get_block(
out_bad:
*blkp = NULL;
+ xfs_buf_corruption_error(bp);
xfs_trans_brelse(cur->bc_tp, bp);
return -EFSCORRUPTED;
}
@@ -1867,7 +1847,7 @@ xfs_btree_lookup(
XFS_BTREE_STATS_INC(cur, lookup);
/* No such thing as a zero-level tree. */
- if (cur->bc_nlevels == 0)
+ if (XFS_IS_CORRUPT(cur->bc_mp, cur->bc_nlevels == 0))
return -EFSCORRUPTED;
block = NULL;
@@ -1987,7 +1967,8 @@ xfs_btree_lookup(
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
*stat = 1;
return 0;
}
@@ -2442,7 +2423,10 @@ xfs_btree_lshift(
if (error)
goto error0;
i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(tcur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
error = xfs_btree_decrement(tcur, level, &i);
if (error)
@@ -2609,7 +2593,10 @@ xfs_btree_rshift(
if (error)
goto error0;
i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(tcur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
error = xfs_btree_increment(tcur, level, &i);
if (error)
@@ -3463,7 +3450,10 @@ xfs_btree_insert(
goto error0;
}
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
level++;
/*
@@ -3867,15 +3857,24 @@ xfs_btree_delrec(
* Actually any entry but the first would suffice.
*/
i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
error = xfs_btree_increment(tcur, level, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/* Grab a pointer to the block. */
right = xfs_btree_get_block(tcur, level, &rbp);
@@ -3919,12 +3918,18 @@ xfs_btree_delrec(
rrecs = xfs_btree_get_numrecs(right);
if (!xfs_btree_ptr_is_null(cur, &lptr)) {
i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
error = xfs_btree_decrement(tcur, level, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
}
}
@@ -3938,13 +3943,19 @@ xfs_btree_delrec(
* previous block.
*/
i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
error = xfs_btree_decrement(tcur, level, &i);
if (error)
goto error0;
i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/* Grab a pointer to the block. */
left = xfs_btree_get_block(tcur, level, &lbp);
@@ -4286,6 +4297,7 @@ int
xfs_btree_visit_blocks(
struct xfs_btree_cur *cur,
xfs_btree_visit_blocks_fn fn,
+ unsigned int flags,
void *data)
{
union xfs_btree_ptr lptr;
@@ -4311,6 +4323,11 @@ xfs_btree_visit_blocks(
/* save for the next iteration of the loop */
xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
+
+ if (!(flags & XFS_BTREE_VISIT_LEAVES))
+ continue;
+ } else if (!(flags & XFS_BTREE_VISIT_RECORDS)) {
+ continue;
}
/* for each buffer in the level */
@@ -4413,7 +4430,7 @@ xfs_btree_change_owner(
bbcoi.buffer_list = buffer_list;
return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner,
- &bbcoi);
+ XFS_BTREE_VISIT_ALL, &bbcoi);
}
/* Verify the v5 fields of a long-format btree block. */
@@ -4466,8 +4483,6 @@ xfs_btree_lblock_verify(
* btree block
*
* @bp: buffer containing the btree block
- * @max_recs: pointer to the m_*_mxr max records field in the xfs mount
- * @pag_max_level: pointer to the per-ag max level field
*/
xfs_failaddr_t
xfs_btree_sblock_v5hdr_verify(
@@ -4600,7 +4615,7 @@ xfs_btree_simple_query_range(
/* Callback */
error = fn(cur, recp, priv);
- if (error < 0 || error == XFS_BTREE_QUERY_RANGE_ABORT)
+ if (error)
break;
advloop:
@@ -4702,8 +4717,7 @@ pop_up:
*/
if (ldiff >= 0 && hdiff >= 0) {
error = fn(cur, recp, priv);
- if (error < 0 ||
- error == XFS_BTREE_QUERY_RANGE_ABORT)
+ if (error)
break;
} else if (hdiff < 0) {
/* Record is larger than high key; pop. */
@@ -4774,8 +4788,7 @@ out:
* Query a btree for all records overlapping a given interval of keys. The
* supplied function will be called with each record found; return one of the
* XFS_BTREE_QUERY_RANGE_{CONTINUE,ABORT} values or the usual negative error
- * code. This function returns XFS_BTREE_QUERY_RANGE_ABORT, zero, or a
- * negative error code.
+ * code. This function returns -ECANCELED, zero, or a negative error code.
*/
int
xfs_btree_query_range(
@@ -4869,7 +4882,7 @@ xfs_btree_count_blocks(
{
*blocks = 0;
return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper,
- blocks);
+ XFS_BTREE_VISIT_ALL, blocks);
}
/* Compare two btree pointers. */
@@ -4891,7 +4904,7 @@ xfs_btree_has_record_helper(
union xfs_btree_rec *rec,
void *priv)
{
- return XFS_BTREE_QUERY_RANGE_ABORT;
+ return -ECANCELED;
}
/* Is there a record covering a given range of keys? */
@@ -4906,7 +4919,7 @@ xfs_btree_has_record(
error = xfs_btree_query_range(cur, low, high,
&xfs_btree_has_record_helper, NULL);
- if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
+ if (error == -ECANCELED) {
*exists = true;
return 0;
}
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index fa3cd8ab9aba..fb9b2121c628 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -183,6 +183,9 @@ union xfs_btree_cur_private {
unsigned long nr_ops; /* # record updates */
int shape_changes; /* # of extent splits */
} refc;
+ struct {
+ bool active; /* allocation cursor state */
+ } abt;
};
/*
@@ -315,14 +318,6 @@ xfs_btree_get_bufs(
xfs_agblock_t agbno); /* allocation group block number */
/*
- * Check for the cursor referring to the last block at the given level.
- */
-int /* 1=is last block, 0=not last block */
-xfs_btree_islastblock(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level); /* level to check */
-
-/*
* Compute first and last byte offsets for the fields given.
* Interprets the offsets table, which contains struct field offsets.
*/
@@ -464,9 +459,13 @@ xfs_failaddr_t xfs_btree_lblock_verify(struct xfs_buf *bp,
uint xfs_btree_compute_maxlevels(uint *limits, unsigned long len);
unsigned long long xfs_btree_calc_size(uint *limits, unsigned long long len);
-/* return codes */
-#define XFS_BTREE_QUERY_RANGE_CONTINUE (XFS_ITER_CONTINUE) /* keep iterating */
-#define XFS_BTREE_QUERY_RANGE_ABORT (XFS_ITER_ABORT) /* stop iterating */
+/*
+ * Return codes for the query range iterator function are 0 to continue
+ * iterating, and non-zero to stop iterating. Any non-zero value will be
+ * passed up to the _query_range caller. The special value -ECANCELED can be
+ * used to stop iteration, because _query_range never generates that error
+ * code on its own.
+ */
typedef int (*xfs_btree_query_range_fn)(struct xfs_btree_cur *cur,
union xfs_btree_rec *rec, void *priv);
@@ -478,8 +477,15 @@ int xfs_btree_query_all(struct xfs_btree_cur *cur, xfs_btree_query_range_fn fn,
typedef int (*xfs_btree_visit_blocks_fn)(struct xfs_btree_cur *cur, int level,
void *data);
+/* Visit record blocks. */
+#define XFS_BTREE_VISIT_RECORDS (1 << 0)
+/* Visit leaf blocks. */
+#define XFS_BTREE_VISIT_LEAVES (1 << 1)
+/* Visit all blocks. */
+#define XFS_BTREE_VISIT_ALL (XFS_BTREE_VISIT_RECORDS | \
+ XFS_BTREE_VISIT_LEAVES)
int xfs_btree_visit_blocks(struct xfs_btree_cur *cur,
- xfs_btree_visit_blocks_fn fn, void *data);
+ xfs_btree_visit_blocks_fn fn, unsigned int flags, void *data);
int xfs_btree_count_blocks(struct xfs_btree_cur *cur, xfs_extlen_t *blocks);
@@ -510,4 +516,21 @@ int xfs_btree_has_record(struct xfs_btree_cur *cur, union xfs_btree_irec *low,
union xfs_btree_irec *high, bool *exists);
bool xfs_btree_has_more_records(struct xfs_btree_cur *cur);
+/* Does this cursor point to the last block in the given level? */
+static inline bool
+xfs_btree_islastblock(
+ xfs_btree_cur_t *cur,
+ int level)
+{
+ struct xfs_btree_block *block;
+ struct xfs_buf *bp;
+
+ block = xfs_btree_get_block(cur, level, &bp);
+ ASSERT(block && xfs_btree_check_block(cur, block, level, bp) == 0);
+
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
+ return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
+}
+
#endif /* __XFS_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index d1c77fd0815d..8c3eafe280ed 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -12,9 +12,9 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_inode.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
-#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_bmap.h"
#include "xfs_attr_leaf.h"
@@ -107,7 +107,66 @@ xfs_da_state_free(xfs_da_state_t *state)
#ifdef DEBUG
memset((char *)state, 0, sizeof(*state));
#endif /* DEBUG */
- kmem_zone_free(xfs_da_state_zone, state);
+ kmem_cache_free(xfs_da_state_zone, state);
+}
+
+static inline int xfs_dabuf_nfsb(struct xfs_mount *mp, int whichfork)
+{
+ if (whichfork == XFS_DATA_FORK)
+ return mp->m_dir_geo->fsbcount;
+ return mp->m_attr_geo->fsbcount;
+}
+
+void
+xfs_da3_node_hdr_from_disk(
+ struct xfs_mount *mp,
+ struct xfs_da3_icnode_hdr *to,
+ struct xfs_da_intnode *from)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_da3_intnode *from3 = (struct xfs_da3_intnode *)from;
+
+ to->forw = be32_to_cpu(from3->hdr.info.hdr.forw);
+ to->back = be32_to_cpu(from3->hdr.info.hdr.back);
+ to->magic = be16_to_cpu(from3->hdr.info.hdr.magic);
+ to->count = be16_to_cpu(from3->hdr.__count);
+ to->level = be16_to_cpu(from3->hdr.__level);
+ to->btree = from3->__btree;
+ ASSERT(to->magic == XFS_DA3_NODE_MAGIC);
+ } else {
+ to->forw = be32_to_cpu(from->hdr.info.forw);
+ to->back = be32_to_cpu(from->hdr.info.back);
+ to->magic = be16_to_cpu(from->hdr.info.magic);
+ to->count = be16_to_cpu(from->hdr.__count);
+ to->level = be16_to_cpu(from->hdr.__level);
+ to->btree = from->__btree;
+ ASSERT(to->magic == XFS_DA_NODE_MAGIC);
+ }
+}
+
+void
+xfs_da3_node_hdr_to_disk(
+ struct xfs_mount *mp,
+ struct xfs_da_intnode *to,
+ struct xfs_da3_icnode_hdr *from)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_da3_intnode *to3 = (struct xfs_da3_intnode *)to;
+
+ ASSERT(from->magic == XFS_DA3_NODE_MAGIC);
+ to3->hdr.info.hdr.forw = cpu_to_be32(from->forw);
+ to3->hdr.info.hdr.back = cpu_to_be32(from->back);
+ to3->hdr.info.hdr.magic = cpu_to_be16(from->magic);
+ to3->hdr.__count = cpu_to_be16(from->count);
+ to3->hdr.__level = cpu_to_be16(from->level);
+ } else {
+ ASSERT(from->magic == XFS_DA_NODE_MAGIC);
+ to->hdr.info.forw = cpu_to_be32(from->forw);
+ to->hdr.info.back = cpu_to_be32(from->back);
+ to->hdr.info.magic = cpu_to_be16(from->magic);
+ to->hdr.__count = cpu_to_be16(from->count);
+ to->hdr.__level = cpu_to_be16(from->level);
+ }
}
/*
@@ -145,12 +204,9 @@ xfs_da3_node_verify(
struct xfs_mount *mp = bp->b_mount;
struct xfs_da_intnode *hdr = bp->b_addr;
struct xfs_da3_icnode_hdr ichdr;
- const struct xfs_dir_ops *ops;
xfs_failaddr_t fa;
- ops = xfs_dir_get_ops(mp, NULL);
-
- ops->node_hdr_from_disk(&ichdr, hdr);
+ xfs_da3_node_hdr_from_disk(mp, &ichdr, hdr);
fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
if (fa)
@@ -275,46 +331,76 @@ const struct xfs_buf_ops xfs_da3_node_buf_ops = {
.verify_struct = xfs_da3_node_verify_struct,
};
+static int
+xfs_da3_node_set_type(
+ struct xfs_trans *tp,
+ struct xfs_buf *bp)
+{
+ struct xfs_da_blkinfo *info = bp->b_addr;
+
+ switch (be16_to_cpu(info->magic)) {
+ case XFS_DA_NODE_MAGIC:
+ case XFS_DA3_NODE_MAGIC:
+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
+ return 0;
+ case XFS_ATTR_LEAF_MAGIC:
+ case XFS_ATTR3_LEAF_MAGIC:
+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_ATTR_LEAF_BUF);
+ return 0;
+ case XFS_DIR2_LEAFN_MAGIC:
+ case XFS_DIR3_LEAFN_MAGIC:
+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
+ return 0;
+ default:
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, tp->t_mountp,
+ info, sizeof(*info));
+ xfs_trans_brelse(tp, bp);
+ return -EFSCORRUPTED;
+ }
+}
+
int
xfs_da3_node_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t bno,
+ struct xfs_buf **bpp,
+ int whichfork)
+{
+ int error;
+
+ error = xfs_da_read_buf(tp, dp, bno, 0, bpp, whichfork,
+ &xfs_da3_node_buf_ops);
+ if (error || !*bpp || !tp)
+ return error;
+ return xfs_da3_node_set_type(tp, *bpp);
+}
+
+int
+xfs_da3_node_read_mapped(
+ struct xfs_trans *tp,
+ struct xfs_inode *dp,
xfs_daddr_t mappedbno,
struct xfs_buf **bpp,
- int which_fork)
+ int whichfork)
{
- int err;
+ struct xfs_mount *mp = dp->i_mount;
+ int error;
- err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
- which_fork, &xfs_da3_node_buf_ops);
- if (!err && tp && *bpp) {
- struct xfs_da_blkinfo *info = (*bpp)->b_addr;
- int type;
+ error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, mappedbno,
+ XFS_FSB_TO_BB(mp, xfs_dabuf_nfsb(mp, whichfork)), 0,
+ bpp, &xfs_da3_node_buf_ops);
+ if (error || !*bpp)
+ return error;
- switch (be16_to_cpu(info->magic)) {
- case XFS_DA_NODE_MAGIC:
- case XFS_DA3_NODE_MAGIC:
- type = XFS_BLFT_DA_NODE_BUF;
- break;
- case XFS_ATTR_LEAF_MAGIC:
- case XFS_ATTR3_LEAF_MAGIC:
- type = XFS_BLFT_ATTR_LEAF_BUF;
- break;
- case XFS_DIR2_LEAFN_MAGIC:
- case XFS_DIR3_LEAFN_MAGIC:
- type = XFS_BLFT_DIR_LEAFN_BUF;
- break;
- default:
- XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
- tp->t_mountp, info, sizeof(*info));
- xfs_trans_brelse(tp, *bpp);
- *bpp = NULL;
- return -EFSCORRUPTED;
- }
- xfs_trans_buf_set_type(tp, *bpp, type);
- }
- return err;
+ if (whichfork == XFS_ATTR_FORK)
+ xfs_buf_set_ref(*bpp, XFS_ATTR_BTREE_REF);
+ else
+ xfs_buf_set_ref(*bpp, XFS_DIR_BTREE_REF);
+
+ if (!tp)
+ return 0;
+ return xfs_da3_node_set_type(tp, *bpp);
}
/*========================================================================
@@ -343,7 +429,7 @@ xfs_da3_node_create(
trace_xfs_da_node_create(args);
ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
- error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
+ error = xfs_da_get_buf(tp, dp, blkno, &bp, whichfork);
if (error)
return error;
bp->b_ops = &xfs_da3_node_buf_ops;
@@ -363,9 +449,9 @@ xfs_da3_node_create(
}
ichdr.level = level;
- dp->d_ops->node_hdr_to_disk(node, &ichdr);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, node, &ichdr);
xfs_trans_log_buf(tp, bp,
- XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
+ XFS_DA_LOGRANGE(node, &node->hdr, args->geo->node_hdr_size));
*bpp = bp;
return 0;
@@ -487,10 +573,8 @@ xfs_da3_split(
ASSERT(state->path.active == 0);
oldblk = &state->path.blk[0];
error = xfs_da3_root_split(state, oldblk, addblk);
- if (error) {
- addblk->bp = NULL;
- return error; /* GROT: dir is inconsistent */
- }
+ if (error)
+ goto out;
/*
* Update pointers to the node which used to be block 0 and just got
@@ -505,7 +589,11 @@ xfs_da3_split(
*/
node = oldblk->bp->b_addr;
if (node->hdr.info.forw) {
- ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno);
+ if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
+ xfs_buf_corruption_error(oldblk->bp);
+ error = -EFSCORRUPTED;
+ goto out;
+ }
node = addblk->bp->b_addr;
node->hdr.info.back = cpu_to_be32(oldblk->blkno);
xfs_trans_log_buf(state->args->trans, addblk->bp,
@@ -514,15 +602,20 @@ xfs_da3_split(
}
node = oldblk->bp->b_addr;
if (node->hdr.info.back) {
- ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno);
+ if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
+ xfs_buf_corruption_error(oldblk->bp);
+ error = -EFSCORRUPTED;
+ goto out;
+ }
node = addblk->bp->b_addr;
node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
xfs_trans_log_buf(state->args->trans, addblk->bp,
XFS_DA_LOGRANGE(node, &node->hdr.info,
sizeof(node->hdr.info)));
}
+out:
addblk->bp = NULL;
- return 0;
+ return error;
}
/*
@@ -563,7 +656,7 @@ xfs_da3_root_split(
dp = args->dp;
tp = args->trans;
- error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
+ error = xfs_da_get_buf(tp, dp, blkno, &bp, args->whichfork);
if (error)
return error;
node = bp->b_addr;
@@ -572,8 +665,8 @@ xfs_da3_root_split(
oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
struct xfs_da3_icnode_hdr icnodehdr;
- dp->d_ops->node_hdr_from_disk(&icnodehdr, oldroot);
- btree = dp->d_ops->node_tree_p(oldroot);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &icnodehdr, oldroot);
+ btree = icnodehdr.btree;
size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
level = icnodehdr.level;
@@ -584,15 +677,14 @@ xfs_da3_root_split(
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
} else {
struct xfs_dir3_icleaf_hdr leafhdr;
- struct xfs_dir2_leaf_entry *ents;
leaf = (xfs_dir2_leaf_t *)oldroot;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
- size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
+ size = (int)((char *)&leafhdr.ents[leafhdr.count] -
+ (char *)leaf);
level = 0;
/*
@@ -632,14 +724,14 @@ xfs_da3_root_split(
return error;
node = bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
+ btree = nodehdr.btree;
btree[0].hashval = cpu_to_be32(blk1->hashval);
btree[0].before = cpu_to_be32(blk1->blkno);
btree[1].hashval = cpu_to_be32(blk2->hashval);
btree[1].before = cpu_to_be32(blk2->blkno);
nodehdr.count = 2;
- dp->d_ops->node_hdr_to_disk(node, &nodehdr);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
#ifdef DEBUG
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
@@ -681,7 +773,7 @@ xfs_da3_node_split(
trace_xfs_da_node_split(state->args);
node = oldblk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
/*
* With V2 dirs the extra block is data or freespace.
@@ -728,7 +820,7 @@ xfs_da3_node_split(
* If we had double-split op below us, then add the extra block too.
*/
node = oldblk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
if (oldblk->index <= nodehdr.count) {
oldblk->index++;
xfs_da3_node_add(state, oldblk, addblk);
@@ -783,10 +875,10 @@ xfs_da3_node_rebalance(
node1 = blk1->bp->b_addr;
node2 = blk2->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
- dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
- btree1 = dp->d_ops->node_tree_p(node1);
- btree2 = dp->d_ops->node_tree_p(node2);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
+ btree1 = nodehdr1.btree;
+ btree2 = nodehdr2.btree;
/*
* Figure out how many entries need to move, and in which direction.
@@ -799,10 +891,10 @@ xfs_da3_node_rebalance(
tmpnode = node1;
node1 = node2;
node2 = tmpnode;
- dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
- dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
- btree1 = dp->d_ops->node_tree_p(node1);
- btree2 = dp->d_ops->node_tree_p(node2);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
+ btree1 = nodehdr1.btree;
+ btree2 = nodehdr2.btree;
swap = 1;
}
@@ -864,14 +956,15 @@ xfs_da3_node_rebalance(
/*
* Log header of node 1 and all current bits of node 2.
*/
- dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, node1, &nodehdr1);
xfs_trans_log_buf(tp, blk1->bp,
- XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
+ XFS_DA_LOGRANGE(node1, &node1->hdr,
+ state->args->geo->node_hdr_size));
- dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, node2, &nodehdr2);
xfs_trans_log_buf(tp, blk2->bp,
XFS_DA_LOGRANGE(node2, &node2->hdr,
- dp->d_ops->node_hdr_size +
+ state->args->geo->node_hdr_size +
(sizeof(btree2[0]) * nodehdr2.count)));
/*
@@ -881,10 +974,10 @@ xfs_da3_node_rebalance(
if (swap) {
node1 = blk1->bp->b_addr;
node2 = blk2->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
- dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
- btree1 = dp->d_ops->node_tree_p(node1);
- btree2 = dp->d_ops->node_tree_p(node2);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
+ btree1 = nodehdr1.btree;
+ btree2 = nodehdr2.btree;
}
blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
@@ -916,8 +1009,8 @@ xfs_da3_node_add(
trace_xfs_da_node_add(state->args);
node = oldblk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
+ btree = nodehdr.btree;
ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
ASSERT(newblk->blkno != 0);
@@ -940,9 +1033,10 @@ xfs_da3_node_add(
tmp + sizeof(*btree)));
nodehdr.count += 1;
- dp->d_ops->node_hdr_to_disk(node, &nodehdr);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
xfs_trans_log_buf(state->args->trans, oldblk->bp,
- XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
+ XFS_DA_LOGRANGE(node, &node->hdr,
+ state->args->geo->node_hdr_size));
/*
* Copy the last hash value from the oldblk to propagate upwards.
@@ -1077,7 +1171,6 @@ xfs_da3_root_join(
xfs_dablk_t child;
struct xfs_buf *bp;
struct xfs_da3_icnode_hdr oldroothdr;
- struct xfs_da_node_entry *btree;
int error;
struct xfs_inode *dp = state->args->dp;
@@ -1087,7 +1180,7 @@ xfs_da3_root_join(
args = state->args;
oldroot = root_blk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &oldroothdr, oldroot);
ASSERT(oldroothdr.forw == 0);
ASSERT(oldroothdr.back == 0);
@@ -1101,11 +1194,9 @@ xfs_da3_root_join(
* Read in the (only) child block, then copy those bytes into
* the root block's buffer and free the original child block.
*/
- btree = dp->d_ops->node_tree_p(oldroot);
- child = be32_to_cpu(btree[0].before);
+ child = be32_to_cpu(oldroothdr.btree[0].before);
ASSERT(child != 0);
- error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
- args->whichfork);
+ error = xfs_da3_node_read(args->trans, dp, child, &bp, args->whichfork);
if (error)
return error;
xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
@@ -1167,7 +1258,7 @@ xfs_da3_node_toosmall(
blk = &state->path.blk[ state->path.active-1 ];
info = blk->bp->b_addr;
node = (xfs_da_intnode_t *)info;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
*action = 0; /* blk over 50%, don't try to join */
return 0; /* blk over 50%, don't try to join */
@@ -1219,13 +1310,13 @@ xfs_da3_node_toosmall(
blkno = nodehdr.back;
if (blkno == 0)
continue;
- error = xfs_da3_node_read(state->args->trans, dp,
- blkno, -1, &bp, state->args->whichfork);
+ error = xfs_da3_node_read(state->args->trans, dp, blkno, &bp,
+ state->args->whichfork);
if (error)
return error;
node = bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&thdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &thdr, node);
xfs_trans_brelse(state->args->trans, bp);
if (count - thdr.count >= 0)
@@ -1267,18 +1358,14 @@ xfs_da3_node_lasthash(
struct xfs_buf *bp,
int *count)
{
- struct xfs_da_intnode *node;
- struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
- node = bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, bp->b_addr);
if (count)
*count = nodehdr.count;
if (!nodehdr.count)
return 0;
- btree = dp->d_ops->node_tree_p(node);
- return be32_to_cpu(btree[nodehdr.count - 1].hashval);
+ return be32_to_cpu(nodehdr.btree[nodehdr.count - 1].hashval);
}
/*
@@ -1323,8 +1410,8 @@ xfs_da3_fixhashpath(
struct xfs_da3_icnode_hdr nodehdr;
node = blk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
+ btree = nodehdr.btree;
if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
break;
blk->hashval = lasthash;
@@ -1355,7 +1442,7 @@ xfs_da3_node_remove(
trace_xfs_da_node_remove(state->args);
node = drop_blk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
ASSERT(drop_blk->index < nodehdr.count);
ASSERT(drop_blk->index >= 0);
@@ -1363,7 +1450,7 @@ xfs_da3_node_remove(
* Copy over the offending entry, or just zero it out.
*/
index = drop_blk->index;
- btree = dp->d_ops->node_tree_p(node);
+ btree = nodehdr.btree;
if (index < nodehdr.count - 1) {
tmp = nodehdr.count - index - 1;
tmp *= (uint)sizeof(xfs_da_node_entry_t);
@@ -1376,9 +1463,9 @@ xfs_da3_node_remove(
xfs_trans_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
nodehdr.count -= 1;
- dp->d_ops->node_hdr_to_disk(node, &nodehdr);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
xfs_trans_log_buf(state->args->trans, drop_blk->bp,
- XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
+ XFS_DA_LOGRANGE(node, &node->hdr, state->args->geo->node_hdr_size));
/*
* Copy the last hash value from the block to propagate upwards.
@@ -1411,10 +1498,10 @@ xfs_da3_node_unbalance(
drop_node = drop_blk->bp->b_addr;
save_node = save_blk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
- dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
- drop_btree = dp->d_ops->node_tree_p(drop_node);
- save_btree = dp->d_ops->node_tree_p(save_node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &drop_hdr, drop_node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &save_hdr, save_node);
+ drop_btree = drop_hdr.btree;
+ save_btree = save_hdr.btree;
tp = state->args->trans;
/*
@@ -1448,10 +1535,10 @@ xfs_da3_node_unbalance(
memcpy(&save_btree[sindex], &drop_btree[0], tmp);
save_hdr.count += drop_hdr.count;
- dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, save_node, &save_hdr);
xfs_trans_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, &save_node->hdr,
- dp->d_ops->node_hdr_size));
+ state->args->geo->node_hdr_size));
/*
* Save the last hashval in the remaining block for upward propagation.
@@ -1512,7 +1599,7 @@ xfs_da3_node_lookup_int(
*/
blk->blkno = blkno;
error = xfs_da3_node_read(args->trans, args->dp, blkno,
- -1, &blk->bp, args->whichfork);
+ &blk->bp, args->whichfork);
if (error) {
blk->blkno = 0;
state->path.active--;
@@ -1536,8 +1623,10 @@ xfs_da3_node_lookup_int(
break;
}
- if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC)
+ if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) {
+ xfs_buf_corruption_error(blk->bp);
return -EFSCORRUPTED;
+ }
blk->magic = XFS_DA_NODE_MAGIC;
@@ -1545,19 +1634,22 @@ xfs_da3_node_lookup_int(
* Search an intermediate node for a match.
*/
node = blk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
+ btree = nodehdr.btree;
/* Tree taller than we can handle; bail out! */
- if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
+ if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
+ xfs_buf_corruption_error(blk->bp);
return -EFSCORRUPTED;
+ }
/* Check the level from the root. */
if (blkno == args->geo->leafblk)
expected_level = nodehdr.level - 1;
- else if (expected_level != nodehdr.level)
+ else if (expected_level != nodehdr.level) {
+ xfs_buf_corruption_error(blk->bp);
return -EFSCORRUPTED;
- else
+ } else
expected_level--;
max = nodehdr.count;
@@ -1607,11 +1699,11 @@ xfs_da3_node_lookup_int(
}
/* We can't point back to the root. */
- if (blkno == args->geo->leafblk)
+ if (XFS_IS_CORRUPT(dp->i_mount, blkno == args->geo->leafblk))
return -EFSCORRUPTED;
}
- if (expected_level != 0)
+ if (XFS_IS_CORRUPT(dp->i_mount, expected_level != 0))
return -EFSCORRUPTED;
/*
@@ -1673,10 +1765,10 @@ xfs_da3_node_order(
node1 = node1_bp->b_addr;
node2 = node2_bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
- dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
- btree1 = dp->d_ops->node_tree_p(node1);
- btree2 = dp->d_ops->node_tree_p(node2);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &node1hdr, node1);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &node2hdr, node2);
+ btree1 = node1hdr.btree;
+ btree2 = node2hdr.btree;
if (node1hdr.count > 0 && node2hdr.count > 0 &&
((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
@@ -1741,7 +1833,7 @@ xfs_da3_blk_link(
if (old_info->back) {
error = xfs_da3_node_read(args->trans, dp,
be32_to_cpu(old_info->back),
- -1, &bp, args->whichfork);
+ &bp, args->whichfork);
if (error)
return error;
ASSERT(bp != NULL);
@@ -1762,7 +1854,7 @@ xfs_da3_blk_link(
if (old_info->forw) {
error = xfs_da3_node_read(args->trans, dp,
be32_to_cpu(old_info->forw),
- -1, &bp, args->whichfork);
+ &bp, args->whichfork);
if (error)
return error;
ASSERT(bp != NULL);
@@ -1821,7 +1913,7 @@ xfs_da3_blk_unlink(
if (drop_info->back) {
error = xfs_da3_node_read(args->trans, args->dp,
be32_to_cpu(drop_info->back),
- -1, &bp, args->whichfork);
+ &bp, args->whichfork);
if (error)
return error;
ASSERT(bp != NULL);
@@ -1838,7 +1930,7 @@ xfs_da3_blk_unlink(
if (drop_info->forw) {
error = xfs_da3_node_read(args->trans, args->dp,
be32_to_cpu(drop_info->forw),
- -1, &bp, args->whichfork);
+ &bp, args->whichfork);
if (error)
return error;
ASSERT(bp != NULL);
@@ -1873,7 +1965,6 @@ xfs_da3_path_shift(
{
struct xfs_da_state_blk *blk;
struct xfs_da_blkinfo *info;
- struct xfs_da_intnode *node;
struct xfs_da_args *args;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
@@ -1896,17 +1987,16 @@ xfs_da3_path_shift(
ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
level = (path->active-1) - 1; /* skip bottom layer in path */
for (blk = &path->blk[level]; level >= 0; blk--, level--) {
- node = blk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
+ blk->bp->b_addr);
if (forward && (blk->index < nodehdr.count - 1)) {
blk->index++;
- blkno = be32_to_cpu(btree[blk->index].before);
+ blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
break;
} else if (!forward && (blk->index > 0)) {
blk->index--;
- blkno = be32_to_cpu(btree[blk->index].before);
+ blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
break;
}
}
@@ -1924,7 +2014,7 @@ xfs_da3_path_shift(
/*
* Read the next child block into a local buffer.
*/
- error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp,
+ error = xfs_da3_node_read(args->trans, dp, blkno, &bp,
args->whichfork);
if (error)
return error;
@@ -1957,9 +2047,9 @@ xfs_da3_path_shift(
case XFS_DA_NODE_MAGIC:
case XFS_DA3_NODE_MAGIC:
blk->magic = XFS_DA_NODE_MAGIC;
- node = (xfs_da_intnode_t *)info;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
+ bp->b_addr);
+ btree = nodehdr.btree;
blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
if (forward)
blk->index = 0;
@@ -2039,18 +2129,6 @@ xfs_da_compname(
XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
}
-static xfs_dahash_t
-xfs_default_hashname(
- struct xfs_name *name)
-{
- return xfs_da_hashname(name->name, name->len);
-}
-
-const struct xfs_nameops xfs_default_nameops = {
- .hashname = xfs_default_hashname,
- .compname = xfs_da_compname
-};
-
int
xfs_da_grow_inode_int(
struct xfs_da_args *args,
@@ -2093,7 +2171,7 @@ xfs_da_grow_inode_int(
* If we didn't get it and the block might work if fragmented,
* try without the CONTIG flag. Loop until we get it all.
*/
- mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
+ mapp = kmem_alloc(sizeof(*mapp) * count, 0);
for (b = *bno, mapi = 0; b < *bno + count; ) {
nmap = min(XFS_BMAP_MAX_NMAP, count);
c = (int)(*bno + count - b);
@@ -2208,16 +2286,13 @@ xfs_da3_swap_lastblock(
error = xfs_bmap_last_before(tp, dp, &lastoff, w);
if (error)
return error;
- if (unlikely(lastoff == 0)) {
- XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
- mp);
+ if (XFS_IS_CORRUPT(mp, lastoff == 0))
return -EFSCORRUPTED;
- }
/*
* Read the last block in the btree space.
*/
last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
- error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
+ error = xfs_da3_node_read(tp, dp, last_blkno, &last_buf, w);
if (error)
return error;
/*
@@ -2235,16 +2310,17 @@ xfs_da3_swap_lastblock(
struct xfs_dir2_leaf_entry *ents;
dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
- ents = dp->d_ops->leaf_ents_p(dead_leaf2);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr,
+ dead_leaf2);
+ ents = leafhdr.ents;
dead_level = 0;
dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
} else {
struct xfs_da3_icnode_hdr deadhdr;
dead_node = (xfs_da_intnode_t *)dead_info;
- dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
- btree = dp->d_ops->node_tree_p(dead_node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &deadhdr, dead_node);
+ btree = deadhdr.btree;
dead_level = deadhdr.level;
dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
}
@@ -2253,15 +2329,13 @@ xfs_da3_swap_lastblock(
* If the moved block has a left sibling, fix up the pointers.
*/
if ((sib_blkno = be32_to_cpu(dead_info->back))) {
- error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
+ error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w);
if (error)
goto done;
sib_info = sib_buf->b_addr;
- if (unlikely(
- be32_to_cpu(sib_info->forw) != last_blkno ||
- sib_info->magic != dead_info->magic)) {
- XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
- XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp,
+ be32_to_cpu(sib_info->forw) != last_blkno ||
+ sib_info->magic != dead_info->magic)) {
error = -EFSCORRUPTED;
goto done;
}
@@ -2275,15 +2349,13 @@ xfs_da3_swap_lastblock(
* If the moved block has a right sibling, fix up the pointers.
*/
if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
- error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
+ error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w);
if (error)
goto done;
sib_info = sib_buf->b_addr;
- if (unlikely(
- be32_to_cpu(sib_info->back) != last_blkno ||
- sib_info->magic != dead_info->magic)) {
- XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
- XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp,
+ be32_to_cpu(sib_info->back) != last_blkno ||
+ sib_info->magic != dead_info->magic)) {
error = -EFSCORRUPTED;
goto done;
}
@@ -2299,27 +2371,24 @@ xfs_da3_swap_lastblock(
* Walk down the tree looking for the parent of the moved block.
*/
for (;;) {
- error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
+ error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w);
if (error)
goto done;
par_node = par_buf->b_addr;
- dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
- if (level >= 0 && level != par_hdr.level + 1) {
- XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
- XFS_ERRLEVEL_LOW, mp);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
+ if (XFS_IS_CORRUPT(mp,
+ level >= 0 && level != par_hdr.level + 1)) {
error = -EFSCORRUPTED;
goto done;
}
level = par_hdr.level;
- btree = dp->d_ops->node_tree_p(par_node);
+ btree = par_hdr.btree;
for (entno = 0;
entno < par_hdr.count &&
be32_to_cpu(btree[entno].hashval) < dead_hash;
entno++)
continue;
- if (entno == par_hdr.count) {
- XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
- XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, entno == par_hdr.count)) {
error = -EFSCORRUPTED;
goto done;
}
@@ -2344,24 +2413,20 @@ xfs_da3_swap_lastblock(
par_blkno = par_hdr.forw;
xfs_trans_brelse(tp, par_buf);
par_buf = NULL;
- if (unlikely(par_blkno == 0)) {
- XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
- XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, par_blkno == 0)) {
error = -EFSCORRUPTED;
goto done;
}
- error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
+ error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w);
if (error)
goto done;
par_node = par_buf->b_addr;
- dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
- if (par_hdr.level != level) {
- XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
- XFS_ERRLEVEL_LOW, mp);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
+ if (XFS_IS_CORRUPT(mp, par_hdr.level != level)) {
error = -EFSCORRUPTED;
goto done;
}
- btree = dp->d_ops->node_tree_p(par_node);
+ btree = par_hdr.btree;
entno = 0;
}
/*
@@ -2424,159 +2489,84 @@ xfs_da_shrink_inode(
return error;
}
-/*
- * See if the mapping(s) for this btree block are valid, i.e.
- * don't contain holes, are logically contiguous, and cover the whole range.
- */
-STATIC int
-xfs_da_map_covers_blocks(
- int nmap,
- xfs_bmbt_irec_t *mapp,
- xfs_dablk_t bno,
- int count)
-{
- int i;
- xfs_fileoff_t off;
-
- for (i = 0, off = bno; i < nmap; i++) {
- if (mapp[i].br_startblock == HOLESTARTBLOCK ||
- mapp[i].br_startblock == DELAYSTARTBLOCK) {
- return 0;
- }
- if (off != mapp[i].br_startoff) {
- return 0;
- }
- off += mapp[i].br_blockcount;
- }
- return off == bno + count;
-}
-
-/*
- * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
- *
- * For the single map case, it is assumed that the caller has provided a pointer
- * to a valid xfs_buf_map. For the multiple map case, this function will
- * allocate the xfs_buf_map to hold all the maps and replace the caller's single
- * map pointer with the allocated map.
- */
static int
-xfs_buf_map_from_irec(
- struct xfs_mount *mp,
+xfs_dabuf_map(
+ struct xfs_inode *dp,
+ xfs_dablk_t bno,
+ unsigned int flags,
+ int whichfork,
struct xfs_buf_map **mapp,
- int *nmaps,
- struct xfs_bmbt_irec *irecs,
- int nirecs)
+ int *nmaps)
{
- struct xfs_buf_map *map;
- int i;
-
- ASSERT(*nmaps == 1);
- ASSERT(nirecs >= 1);
+ struct xfs_mount *mp = dp->i_mount;
+ int nfsb = xfs_dabuf_nfsb(mp, whichfork);
+ struct xfs_bmbt_irec irec, *irecs = &irec;
+ struct xfs_buf_map *map = *mapp;
+ xfs_fileoff_t off = bno;
+ int error = 0, nirecs, i;
+
+ if (nfsb > 1)
+ irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_NOFS);
+
+ nirecs = nfsb;
+ error = xfs_bmapi_read(dp, bno, nfsb, irecs, &nirecs,
+ xfs_bmapi_aflag(whichfork));
+ if (error)
+ goto out_free_irecs;
+ /*
+ * Use the caller provided map for the single map case, else allocate a
+ * larger one that needs to be free by the caller.
+ */
if (nirecs > 1) {
- map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
- KM_SLEEP | KM_NOFS);
+ map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_NOFS);
if (!map)
- return -ENOMEM;
+ goto out_free_irecs;
*mapp = map;
}
- *nmaps = nirecs;
- map = *mapp;
- for (i = 0; i < *nmaps; i++) {
- ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
- irecs[i].br_startblock != HOLESTARTBLOCK);
+ for (i = 0; i < nirecs; i++) {
+ if (irecs[i].br_startblock == HOLESTARTBLOCK ||
+ irecs[i].br_startblock == DELAYSTARTBLOCK)
+ goto invalid_mapping;
+ if (off != irecs[i].br_startoff)
+ goto invalid_mapping;
+
map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
+ off += irecs[i].br_blockcount;
}
- return 0;
-}
-
-/*
- * Map the block we are given ready for reading. There are three possible return
- * values:
- * -1 - will be returned if we land in a hole and mappedbno == -2 so the
- * caller knows not to execute a subsequent read.
- * 0 - if we mapped the block successfully
- * >0 - positive error number if there was an error.
- */
-static int
-xfs_dabuf_map(
- struct xfs_inode *dp,
- xfs_dablk_t bno,
- xfs_daddr_t mappedbno,
- int whichfork,
- struct xfs_buf_map **map,
- int *nmaps)
-{
- struct xfs_mount *mp = dp->i_mount;
- int nfsb;
- int error = 0;
- struct xfs_bmbt_irec irec;
- struct xfs_bmbt_irec *irecs = &irec;
- int nirecs;
-
- ASSERT(map && *map);
- ASSERT(*nmaps == 1);
- if (whichfork == XFS_DATA_FORK)
- nfsb = mp->m_dir_geo->fsbcount;
- else
- nfsb = mp->m_attr_geo->fsbcount;
+ if (off != bno + nfsb)
+ goto invalid_mapping;
- /*
- * Caller doesn't have a mapping. -2 means don't complain
- * if we land in a hole.
- */
- if (mappedbno == -1 || mappedbno == -2) {
- /*
- * Optimize the one-block case.
- */
- if (nfsb != 1)
- irecs = kmem_zalloc(sizeof(irec) * nfsb,
- KM_SLEEP | KM_NOFS);
+ *nmaps = nirecs;
+out_free_irecs:
+ if (irecs != &irec)
+ kmem_free(irecs);
+ return error;
- nirecs = nfsb;
- error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
- &nirecs, xfs_bmapi_aflag(whichfork));
- if (error)
- goto out;
- } else {
- irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
- irecs->br_startoff = (xfs_fileoff_t)bno;
- irecs->br_blockcount = nfsb;
- irecs->br_state = 0;
- nirecs = 1;
- }
+invalid_mapping:
+ /* Caller ok with no mapping. */
+ if (XFS_IS_CORRUPT(mp, !(flags & XFS_DABUF_MAP_HOLE_OK))) {
+ error = -EFSCORRUPTED;
+ if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
+ xfs_alert(mp, "%s: bno %u inode %llu",
+ __func__, bno, dp->i_ino);
- if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
- error = mappedbno == -2 ? -1 : -EFSCORRUPTED;
- if (unlikely(error == -EFSCORRUPTED)) {
- if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
- int i;
- xfs_alert(mp, "%s: bno %lld dir: inode %lld",
- __func__, (long long)bno,
- (long long)dp->i_ino);
- for (i = 0; i < *nmaps; i++) {
- xfs_alert(mp,
+ for (i = 0; i < nirecs; i++) {
+ xfs_alert(mp,
"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
- i,
- (long long)irecs[i].br_startoff,
- (long long)irecs[i].br_startblock,
- (long long)irecs[i].br_blockcount,
- irecs[i].br_state);
- }
+ i, irecs[i].br_startoff,
+ irecs[i].br_startblock,
+ irecs[i].br_blockcount,
+ irecs[i].br_state);
}
- XFS_ERROR_REPORT("xfs_da_do_buf(1)",
- XFS_ERRLEVEL_LOW, mp);
}
- goto out;
+ } else {
+ *nmaps = 0;
}
- error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
-out:
- if (irecs != &irec)
- kmem_free(irecs);
- return error;
+ goto out_free_irecs;
}
/*
@@ -2584,37 +2574,28 @@ out:
*/
int
xfs_da_get_buf(
- struct xfs_trans *trans,
+ struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t bno,
- xfs_daddr_t mappedbno,
struct xfs_buf **bpp,
int whichfork)
{
+ struct xfs_mount *mp = dp->i_mount;
struct xfs_buf *bp;
- struct xfs_buf_map map;
- struct xfs_buf_map *mapp;
- int nmap;
+ struct xfs_buf_map map, *mapp = &map;
+ int nmap = 1;
int error;
*bpp = NULL;
- mapp = &map;
- nmap = 1;
- error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
- &mapp, &nmap);
- if (error) {
- /* mapping a hole is not an error, but we don't continue */
- if (error == -1)
- error = 0;
+ error = xfs_dabuf_map(dp, bno, 0, whichfork, &mapp, &nmap);
+ if (error || nmap == 0)
goto out_free;
- }
- bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
- mapp, nmap, 0);
+ bp = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0);
error = bp ? bp->b_error : -EIO;
if (error) {
if (bp)
- xfs_trans_brelse(trans, bp);
+ xfs_trans_brelse(tp, bp);
goto out_free;
}
@@ -2632,35 +2613,27 @@ out_free:
*/
int
xfs_da_read_buf(
- struct xfs_trans *trans,
+ struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t bno,
- xfs_daddr_t mappedbno,
+ unsigned int flags,
struct xfs_buf **bpp,
int whichfork,
const struct xfs_buf_ops *ops)
{
+ struct xfs_mount *mp = dp->i_mount;
struct xfs_buf *bp;
- struct xfs_buf_map map;
- struct xfs_buf_map *mapp;
- int nmap;
+ struct xfs_buf_map map, *mapp = &map;
+ int nmap = 1;
int error;
*bpp = NULL;
- mapp = &map;
- nmap = 1;
- error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
- &mapp, &nmap);
- if (error) {
- /* mapping a hole is not an error, but we don't continue */
- if (error == -1)
- error = 0;
+ error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap);
+ if (error || !nmap)
goto out_free;
- }
- error = xfs_trans_read_buf_map(dp->i_mount, trans,
- dp->i_mount->m_ddev_targp,
- mapp, nmap, 0, &bp, ops);
+ error = xfs_trans_read_buf_map(mp, tp, mp->m_ddev_targp, mapp, nmap, 0,
+ &bp, ops);
if (error)
goto out_free;
@@ -2683,7 +2656,7 @@ int
xfs_da_reada_buf(
struct xfs_inode *dp,
xfs_dablk_t bno,
- xfs_daddr_t mappedbno,
+ unsigned int flags,
int whichfork,
const struct xfs_buf_ops *ops)
{
@@ -2694,16 +2667,10 @@ xfs_da_reada_buf(
mapp = &map;
nmap = 1;
- error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
- &mapp, &nmap);
- if (error) {
- /* mapping a hole is not an error, but we don't continue */
- if (error == -1)
- error = 0;
+ error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap);
+ if (error || !nmap)
goto out_free;
- }
- mappedbno = mapp[0].bm_bn;
xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);
out_free:
diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h
index 84dd865b6c3d..e16610d1c14f 100644
--- a/fs/xfs/libxfs/xfs_da_btree.h
+++ b/fs/xfs/libxfs/xfs_da_btree.h
@@ -10,7 +10,6 @@
struct xfs_inode;
struct xfs_trans;
struct zone;
-struct xfs_dir_ops;
/*
* Directory/attribute geometry information. There will be one of these for each
@@ -18,15 +17,23 @@ struct xfs_dir_ops;
* structures will be attached to the xfs_mount.
*/
struct xfs_da_geometry {
- int blksize; /* da block size in bytes */
- int fsbcount; /* da block size in filesystem blocks */
+ unsigned int blksize; /* da block size in bytes */
+ unsigned int fsbcount; /* da block size in filesystem blocks */
uint8_t fsblog; /* log2 of _filesystem_ block size */
uint8_t blklog; /* log2 of da block size */
- uint node_ents; /* # of entries in a danode */
- int magicpct; /* 37% of block size in bytes */
+ unsigned int node_hdr_size; /* danode header size in bytes */
+ unsigned int node_ents; /* # of entries in a danode */
+ unsigned int magicpct; /* 37% of block size in bytes */
xfs_dablk_t datablk; /* blockno of dir data v2 */
+ unsigned int leaf_hdr_size; /* dir2 leaf header size */
+ unsigned int leaf_max_ents; /* # of entries in dir2 leaf */
xfs_dablk_t leafblk; /* blockno of leaf data v2 */
+ unsigned int free_hdr_size; /* dir2 free header size */
+ unsigned int free_max_bests; /* # of bests entries in dir2 free */
xfs_dablk_t freeblk; /* blockno of free data v2 */
+
+ xfs_dir2_data_aoff_t data_first_offset;
+ size_t data_entry_offset;
};
/*========================================================================
@@ -81,13 +88,15 @@ typedef struct xfs_da_args {
#define XFS_DA_OP_ADDNAME 0x0004 /* this is an add operation */
#define XFS_DA_OP_OKNOENT 0x0008 /* lookup/add op, ENOENT ok, else die */
#define XFS_DA_OP_CILOOKUP 0x0010 /* lookup to return CI name if found */
+#define XFS_DA_OP_ALLOCVAL 0x0020 /* lookup to alloc buffer if found */
#define XFS_DA_OP_FLAGS \
{ XFS_DA_OP_JUSTCHECK, "JUSTCHECK" }, \
{ XFS_DA_OP_RENAME, "RENAME" }, \
{ XFS_DA_OP_ADDNAME, "ADDNAME" }, \
{ XFS_DA_OP_OKNOENT, "OKNOENT" }, \
- { XFS_DA_OP_CILOOKUP, "CILOOKUP" }
+ { XFS_DA_OP_CILOOKUP, "CILOOKUP" }, \
+ { XFS_DA_OP_ALLOCVAL, "ALLOCVAL" }
/*
* Storage for holding state during Btree searches and split/join ops.
@@ -123,6 +132,25 @@ typedef struct xfs_da_state {
} xfs_da_state_t;
/*
+ * In-core version of the node header to abstract the differences in the v2 and
+ * v3 disk format of the headers. Callers need to convert to/from disk format as
+ * appropriate.
+ */
+struct xfs_da3_icnode_hdr {
+ uint32_t forw;
+ uint32_t back;
+ uint16_t magic;
+ uint16_t count;
+ uint16_t level;
+
+ /*
+ * Pointer to the on-disk format entries, which are behind the
+ * variable size (v4 vs v5) header in the on-disk block.
+ */
+ struct xfs_da_node_entry *btree;
+};
+
+/*
* Utility macros to aid in logging changed structure fields.
*/
#define XFS_DA_LOGOFF(BASE, ADDR) ((char *)(ADDR) - (char *)(BASE))
@@ -130,16 +158,6 @@ typedef struct xfs_da_state {
(uint)(XFS_DA_LOGOFF(BASE, ADDR)), \
(uint)(XFS_DA_LOGOFF(BASE, ADDR)+(SIZE)-1)
-/*
- * Name ops for directory and/or attr name operations
- */
-struct xfs_nameops {
- xfs_dahash_t (*hashname)(struct xfs_name *);
- enum xfs_dacmp (*compname)(struct xfs_da_args *,
- const unsigned char *, int);
-};
-
-
/*========================================================================
* Function prototypes.
*========================================================================*/
@@ -170,25 +188,28 @@ int xfs_da3_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
int xfs_da3_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
xfs_da_state_blk_t *new_blk);
int xfs_da3_node_read(struct xfs_trans *tp, struct xfs_inode *dp,
- xfs_dablk_t bno, xfs_daddr_t mappedbno,
- struct xfs_buf **bpp, int which_fork);
+ xfs_dablk_t bno, struct xfs_buf **bpp, int whichfork);
+int xfs_da3_node_read_mapped(struct xfs_trans *tp, struct xfs_inode *dp,
+ xfs_daddr_t mappedbno, struct xfs_buf **bpp,
+ int whichfork);
/*
* Utility routines.
*/
+
+#define XFS_DABUF_MAP_HOLE_OK (1 << 0)
+
int xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno);
int xfs_da_grow_inode_int(struct xfs_da_args *args, xfs_fileoff_t *bno,
int count);
int xfs_da_get_buf(struct xfs_trans *trans, struct xfs_inode *dp,
- xfs_dablk_t bno, xfs_daddr_t mappedbno,
- struct xfs_buf **bp, int whichfork);
+ xfs_dablk_t bno, struct xfs_buf **bp, int whichfork);
int xfs_da_read_buf(struct xfs_trans *trans, struct xfs_inode *dp,
- xfs_dablk_t bno, xfs_daddr_t mappedbno,
- struct xfs_buf **bpp, int whichfork,
- const struct xfs_buf_ops *ops);
+ xfs_dablk_t bno, unsigned int flags, struct xfs_buf **bpp,
+ int whichfork, const struct xfs_buf_ops *ops);
int xfs_da_reada_buf(struct xfs_inode *dp, xfs_dablk_t bno,
- xfs_daddr_t mapped_bno, int whichfork,
- const struct xfs_buf_ops *ops);
+ unsigned int flags, int whichfork,
+ const struct xfs_buf_ops *ops);
int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
struct xfs_buf *dead_buf);
@@ -200,7 +221,11 @@ enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
xfs_da_state_t *xfs_da_state_alloc(void);
void xfs_da_state_free(xfs_da_state_t *state);
+void xfs_da3_node_hdr_from_disk(struct xfs_mount *mp,
+ struct xfs_da3_icnode_hdr *to, struct xfs_da_intnode *from);
+void xfs_da3_node_hdr_to_disk(struct xfs_mount *mp,
+ struct xfs_da_intnode *to, struct xfs_da3_icnode_hdr *from);
+
extern struct kmem_zone *xfs_da_state_zone;
-extern const struct xfs_nameops xfs_default_nameops;
#endif /* __XFS_DA_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_da_format.c b/fs/xfs/libxfs/xfs_da_format.c
deleted file mode 100644
index b1ae572496b6..000000000000
--- a/fs/xfs/libxfs/xfs_da_format.c
+++ /dev/null
@@ -1,888 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2000,2002,2005 Silicon Graphics, Inc.
- * Copyright (c) 2013 Red Hat, Inc.
- * All Rights Reserved.
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_shared.h"
-#include "xfs_format.h"
-#include "xfs_log_format.h"
-#include "xfs_trans_resv.h"
-#include "xfs_mount.h"
-#include "xfs_inode.h"
-#include "xfs_dir2.h"
-
-/*
- * Shortform directory ops
- */
-static int
-xfs_dir2_sf_entsize(
- struct xfs_dir2_sf_hdr *hdr,
- int len)
-{
- int count = sizeof(struct xfs_dir2_sf_entry); /* namelen + offset */
-
- count += len; /* name */
- count += hdr->i8count ? XFS_INO64_SIZE : XFS_INO32_SIZE; /* ino # */
- return count;
-}
-
-static int
-xfs_dir3_sf_entsize(
- struct xfs_dir2_sf_hdr *hdr,
- int len)
-{
- return xfs_dir2_sf_entsize(hdr, len) + sizeof(uint8_t);
-}
-
-static struct xfs_dir2_sf_entry *
-xfs_dir2_sf_nextentry(
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep)
-{
- return (struct xfs_dir2_sf_entry *)
- ((char *)sfep + xfs_dir2_sf_entsize(hdr, sfep->namelen));
-}
-
-static struct xfs_dir2_sf_entry *
-xfs_dir3_sf_nextentry(
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep)
-{
- return (struct xfs_dir2_sf_entry *)
- ((char *)sfep + xfs_dir3_sf_entsize(hdr, sfep->namelen));
-}
-
-
-/*
- * For filetype enabled shortform directories, the file type field is stored at
- * the end of the name. Because it's only a single byte, endian conversion is
- * not necessary. For non-filetype enable directories, the type is always
- * unknown and we never store the value.
- */
-static uint8_t
-xfs_dir2_sfe_get_ftype(
- struct xfs_dir2_sf_entry *sfep)
-{
- return XFS_DIR3_FT_UNKNOWN;
-}
-
-static void
-xfs_dir2_sfe_put_ftype(
- struct xfs_dir2_sf_entry *sfep,
- uint8_t ftype)
-{
- ASSERT(ftype < XFS_DIR3_FT_MAX);
-}
-
-static uint8_t
-xfs_dir3_sfe_get_ftype(
- struct xfs_dir2_sf_entry *sfep)
-{
- uint8_t ftype;
-
- ftype = sfep->name[sfep->namelen];
- if (ftype >= XFS_DIR3_FT_MAX)
- return XFS_DIR3_FT_UNKNOWN;
- return ftype;
-}
-
-static void
-xfs_dir3_sfe_put_ftype(
- struct xfs_dir2_sf_entry *sfep,
- uint8_t ftype)
-{
- ASSERT(ftype < XFS_DIR3_FT_MAX);
-
- sfep->name[sfep->namelen] = ftype;
-}
-
-/*
- * Inode numbers in short-form directories can come in two versions,
- * either 4 bytes or 8 bytes wide. These helpers deal with the
- * two forms transparently by looking at the headers i8count field.
- *
- * For 64-bit inode number the most significant byte must be zero.
- */
-static xfs_ino_t
-xfs_dir2_sf_get_ino(
- struct xfs_dir2_sf_hdr *hdr,
- uint8_t *from)
-{
- if (hdr->i8count)
- return get_unaligned_be64(from) & 0x00ffffffffffffffULL;
- else
- return get_unaligned_be32(from);
-}
-
-static void
-xfs_dir2_sf_put_ino(
- struct xfs_dir2_sf_hdr *hdr,
- uint8_t *to,
- xfs_ino_t ino)
-{
- ASSERT((ino & 0xff00000000000000ULL) == 0);
-
- if (hdr->i8count)
- put_unaligned_be64(ino, to);
- else
- put_unaligned_be32(ino, to);
-}
-
-static xfs_ino_t
-xfs_dir2_sf_get_parent_ino(
- struct xfs_dir2_sf_hdr *hdr)
-{
- return xfs_dir2_sf_get_ino(hdr, hdr->parent);
-}
-
-static void
-xfs_dir2_sf_put_parent_ino(
- struct xfs_dir2_sf_hdr *hdr,
- xfs_ino_t ino)
-{
- xfs_dir2_sf_put_ino(hdr, hdr->parent, ino);
-}
-
-/*
- * In short-form directory entries the inode numbers are stored at variable
- * offset behind the entry name. If the entry stores a filetype value, then it
- * sits between the name and the inode number. Hence the inode numbers may only
- * be accessed through the helpers below.
- */
-static xfs_ino_t
-xfs_dir2_sfe_get_ino(
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep)
-{
- return xfs_dir2_sf_get_ino(hdr, &sfep->name[sfep->namelen]);
-}
-
-static void
-xfs_dir2_sfe_put_ino(
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep,
- xfs_ino_t ino)
-{
- xfs_dir2_sf_put_ino(hdr, &sfep->name[sfep->namelen], ino);
-}
-
-static xfs_ino_t
-xfs_dir3_sfe_get_ino(
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep)
-{
- return xfs_dir2_sf_get_ino(hdr, &sfep->name[sfep->namelen + 1]);
-}
-
-static void
-xfs_dir3_sfe_put_ino(
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep,
- xfs_ino_t ino)
-{
- xfs_dir2_sf_put_ino(hdr, &sfep->name[sfep->namelen + 1], ino);
-}
-
-
-/*
- * Directory data block operations
- */
-
-/*
- * For special situations, the dirent size ends up fixed because we always know
- * what the size of the entry is. That's true for the "." and "..", and
- * therefore we know that they are a fixed size and hence their offsets are
- * constant, as is the first entry.
- *
- * Hence, this calculation is written as a macro to be able to be calculated at
- * compile time and so certain offsets can be calculated directly in the
- * structure initaliser via the macro. There are two macros - one for dirents
- * with ftype and without so there are no unresolvable conditionals in the
- * calculations. We also use round_up() as XFS_DIR2_DATA_ALIGN is always a power
- * of 2 and the compiler doesn't reject it (unlike roundup()).
- */
-#define XFS_DIR2_DATA_ENTSIZE(n) \
- round_up((offsetof(struct xfs_dir2_data_entry, name[0]) + (n) + \
- sizeof(xfs_dir2_data_off_t)), XFS_DIR2_DATA_ALIGN)
-
-#define XFS_DIR3_DATA_ENTSIZE(n) \
- round_up((offsetof(struct xfs_dir2_data_entry, name[0]) + (n) + \
- sizeof(xfs_dir2_data_off_t) + sizeof(uint8_t)), \
- XFS_DIR2_DATA_ALIGN)
-
-static int
-xfs_dir2_data_entsize(
- int n)
-{
- return XFS_DIR2_DATA_ENTSIZE(n);
-}
-
-static int
-xfs_dir3_data_entsize(
- int n)
-{
- return XFS_DIR3_DATA_ENTSIZE(n);
-}
-
-static uint8_t
-xfs_dir2_data_get_ftype(
- struct xfs_dir2_data_entry *dep)
-{
- return XFS_DIR3_FT_UNKNOWN;
-}
-
-static void
-xfs_dir2_data_put_ftype(
- struct xfs_dir2_data_entry *dep,
- uint8_t ftype)
-{
- ASSERT(ftype < XFS_DIR3_FT_MAX);
-}
-
-static uint8_t
-xfs_dir3_data_get_ftype(
- struct xfs_dir2_data_entry *dep)
-{
- uint8_t ftype = dep->name[dep->namelen];
-
- if (ftype >= XFS_DIR3_FT_MAX)
- return XFS_DIR3_FT_UNKNOWN;
- return ftype;
-}
-
-static void
-xfs_dir3_data_put_ftype(
- struct xfs_dir2_data_entry *dep,
- uint8_t type)
-{
- ASSERT(type < XFS_DIR3_FT_MAX);
- ASSERT(dep->namelen != 0);
-
- dep->name[dep->namelen] = type;
-}
-
-/*
- * Pointer to an entry's tag word.
- */
-static __be16 *
-xfs_dir2_data_entry_tag_p(
- struct xfs_dir2_data_entry *dep)
-{
- return (__be16 *)((char *)dep +
- xfs_dir2_data_entsize(dep->namelen) - sizeof(__be16));
-}
-
-static __be16 *
-xfs_dir3_data_entry_tag_p(
- struct xfs_dir2_data_entry *dep)
-{
- return (__be16 *)((char *)dep +
- xfs_dir3_data_entsize(dep->namelen) - sizeof(__be16));
-}
-
-/*
- * location of . and .. in data space (always block 0)
- */
-static struct xfs_dir2_data_entry *
-xfs_dir2_data_dot_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir2_data_hdr));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir2_data_dotdot_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR2_DATA_ENTSIZE(1));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir2_data_first_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR2_DATA_ENTSIZE(1) +
- XFS_DIR2_DATA_ENTSIZE(2));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir2_ftype_data_dotdot_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir2_ftype_data_first_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1) +
- XFS_DIR3_DATA_ENTSIZE(2));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir3_data_dot_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir3_data_hdr));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir3_data_dotdot_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir3_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir3_data_first_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir3_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1) +
- XFS_DIR3_DATA_ENTSIZE(2));
-}
-
-static struct xfs_dir2_data_free *
-xfs_dir2_data_bestfree_p(struct xfs_dir2_data_hdr *hdr)
-{
- return hdr->bestfree;
-}
-
-static struct xfs_dir2_data_free *
-xfs_dir3_data_bestfree_p(struct xfs_dir2_data_hdr *hdr)
-{
- return ((struct xfs_dir3_data_hdr *)hdr)->best_free;
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir2_data_entry_p(struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir2_data_hdr));
-}
-
-static struct xfs_dir2_data_unused *
-xfs_dir2_data_unused_p(struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_unused *)
- ((char *)hdr + sizeof(struct xfs_dir2_data_hdr));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir3_data_entry_p(struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir3_data_hdr));
-}
-
-static struct xfs_dir2_data_unused *
-xfs_dir3_data_unused_p(struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_unused *)
- ((char *)hdr + sizeof(struct xfs_dir3_data_hdr));
-}
-
-
-/*
- * Directory Leaf block operations
- */
-static int
-xfs_dir2_max_leaf_ents(struct xfs_da_geometry *geo)
-{
- return (geo->blksize - sizeof(struct xfs_dir2_leaf_hdr)) /
- (uint)sizeof(struct xfs_dir2_leaf_entry);
-}
-
-static struct xfs_dir2_leaf_entry *
-xfs_dir2_leaf_ents_p(struct xfs_dir2_leaf *lp)
-{
- return lp->__ents;
-}
-
-static int
-xfs_dir3_max_leaf_ents(struct xfs_da_geometry *geo)
-{
- return (geo->blksize - sizeof(struct xfs_dir3_leaf_hdr)) /
- (uint)sizeof(struct xfs_dir2_leaf_entry);
-}
-
-static struct xfs_dir2_leaf_entry *
-xfs_dir3_leaf_ents_p(struct xfs_dir2_leaf *lp)
-{
- return ((struct xfs_dir3_leaf *)lp)->__ents;
-}
-
-static void
-xfs_dir2_leaf_hdr_from_disk(
- struct xfs_dir3_icleaf_hdr *to,
- struct xfs_dir2_leaf *from)
-{
- to->forw = be32_to_cpu(from->hdr.info.forw);
- to->back = be32_to_cpu(from->hdr.info.back);
- to->magic = be16_to_cpu(from->hdr.info.magic);
- to->count = be16_to_cpu(from->hdr.count);
- to->stale = be16_to_cpu(from->hdr.stale);
-
- ASSERT(to->magic == XFS_DIR2_LEAF1_MAGIC ||
- to->magic == XFS_DIR2_LEAFN_MAGIC);
-}
-
-static void
-xfs_dir2_leaf_hdr_to_disk(
- struct xfs_dir2_leaf *to,
- struct xfs_dir3_icleaf_hdr *from)
-{
- ASSERT(from->magic == XFS_DIR2_LEAF1_MAGIC ||
- from->magic == XFS_DIR2_LEAFN_MAGIC);
-
- to->hdr.info.forw = cpu_to_be32(from->forw);
- to->hdr.info.back = cpu_to_be32(from->back);
- to->hdr.info.magic = cpu_to_be16(from->magic);
- to->hdr.count = cpu_to_be16(from->count);
- to->hdr.stale = cpu_to_be16(from->stale);
-}
-
-static void
-xfs_dir3_leaf_hdr_from_disk(
- struct xfs_dir3_icleaf_hdr *to,
- struct xfs_dir2_leaf *from)
-{
- struct xfs_dir3_leaf_hdr *hdr3 = (struct xfs_dir3_leaf_hdr *)from;
-
- to->forw = be32_to_cpu(hdr3->info.hdr.forw);
- to->back = be32_to_cpu(hdr3->info.hdr.back);
- to->magic = be16_to_cpu(hdr3->info.hdr.magic);
- to->count = be16_to_cpu(hdr3->count);
- to->stale = be16_to_cpu(hdr3->stale);
-
- ASSERT(to->magic == XFS_DIR3_LEAF1_MAGIC ||
- to->magic == XFS_DIR3_LEAFN_MAGIC);
-}
-
-static void
-xfs_dir3_leaf_hdr_to_disk(
- struct xfs_dir2_leaf *to,
- struct xfs_dir3_icleaf_hdr *from)
-{
- struct xfs_dir3_leaf_hdr *hdr3 = (struct xfs_dir3_leaf_hdr *)to;
-
- ASSERT(from->magic == XFS_DIR3_LEAF1_MAGIC ||
- from->magic == XFS_DIR3_LEAFN_MAGIC);
-
- hdr3->info.hdr.forw = cpu_to_be32(from->forw);
- hdr3->info.hdr.back = cpu_to_be32(from->back);
- hdr3->info.hdr.magic = cpu_to_be16(from->magic);
- hdr3->count = cpu_to_be16(from->count);
- hdr3->stale = cpu_to_be16(from->stale);
-}
-
-
-/*
- * Directory/Attribute Node block operations
- */
-static struct xfs_da_node_entry *
-xfs_da2_node_tree_p(struct xfs_da_intnode *dap)
-{
- return dap->__btree;
-}
-
-static struct xfs_da_node_entry *
-xfs_da3_node_tree_p(struct xfs_da_intnode *dap)
-{
- return ((struct xfs_da3_intnode *)dap)->__btree;
-}
-
-static void
-xfs_da2_node_hdr_from_disk(
- struct xfs_da3_icnode_hdr *to,
- struct xfs_da_intnode *from)
-{
- ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
- to->forw = be32_to_cpu(from->hdr.info.forw);
- to->back = be32_to_cpu(from->hdr.info.back);
- to->magic = be16_to_cpu(from->hdr.info.magic);
- to->count = be16_to_cpu(from->hdr.__count);
- to->level = be16_to_cpu(from->hdr.__level);
-}
-
-static void
-xfs_da2_node_hdr_to_disk(
- struct xfs_da_intnode *to,
- struct xfs_da3_icnode_hdr *from)
-{
- ASSERT(from->magic == XFS_DA_NODE_MAGIC);
- to->hdr.info.forw = cpu_to_be32(from->forw);
- to->hdr.info.back = cpu_to_be32(from->back);
- to->hdr.info.magic = cpu_to_be16(from->magic);
- to->hdr.__count = cpu_to_be16(from->count);
- to->hdr.__level = cpu_to_be16(from->level);
-}
-
-static void
-xfs_da3_node_hdr_from_disk(
- struct xfs_da3_icnode_hdr *to,
- struct xfs_da_intnode *from)
-{
- struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)from;
-
- ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
- to->forw = be32_to_cpu(hdr3->info.hdr.forw);
- to->back = be32_to_cpu(hdr3->info.hdr.back);
- to->magic = be16_to_cpu(hdr3->info.hdr.magic);
- to->count = be16_to_cpu(hdr3->__count);
- to->level = be16_to_cpu(hdr3->__level);
-}
-
-static void
-xfs_da3_node_hdr_to_disk(
- struct xfs_da_intnode *to,
- struct xfs_da3_icnode_hdr *from)
-{
- struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)to;
-
- ASSERT(from->magic == XFS_DA3_NODE_MAGIC);
- hdr3->info.hdr.forw = cpu_to_be32(from->forw);
- hdr3->info.hdr.back = cpu_to_be32(from->back);
- hdr3->info.hdr.magic = cpu_to_be16(from->magic);
- hdr3->__count = cpu_to_be16(from->count);
- hdr3->__level = cpu_to_be16(from->level);
-}
-
-
-/*
- * Directory free space block operations
- */
-static int
-xfs_dir2_free_max_bests(struct xfs_da_geometry *geo)
-{
- return (geo->blksize - sizeof(struct xfs_dir2_free_hdr)) /
- sizeof(xfs_dir2_data_off_t);
-}
-
-static __be16 *
-xfs_dir2_free_bests_p(struct xfs_dir2_free *free)
-{
- return (__be16 *)((char *)free + sizeof(struct xfs_dir2_free_hdr));
-}
-
-/*
- * Convert data space db to the corresponding free db.
- */
-static xfs_dir2_db_t
-xfs_dir2_db_to_fdb(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
-{
- return xfs_dir2_byte_to_db(geo, XFS_DIR2_FREE_OFFSET) +
- (db / xfs_dir2_free_max_bests(geo));
-}
-
-/*
- * Convert data space db to the corresponding index in a free db.
- */
-static int
-xfs_dir2_db_to_fdindex(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
-{
- return db % xfs_dir2_free_max_bests(geo);
-}
-
-static int
-xfs_dir3_free_max_bests(struct xfs_da_geometry *geo)
-{
- return (geo->blksize - sizeof(struct xfs_dir3_free_hdr)) /
- sizeof(xfs_dir2_data_off_t);
-}
-
-static __be16 *
-xfs_dir3_free_bests_p(struct xfs_dir2_free *free)
-{
- return (__be16 *)((char *)free + sizeof(struct xfs_dir3_free_hdr));
-}
-
-/*
- * Convert data space db to the corresponding free db.
- */
-static xfs_dir2_db_t
-xfs_dir3_db_to_fdb(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
-{
- return xfs_dir2_byte_to_db(geo, XFS_DIR2_FREE_OFFSET) +
- (db / xfs_dir3_free_max_bests(geo));
-}
-
-/*
- * Convert data space db to the corresponding index in a free db.
- */
-static int
-xfs_dir3_db_to_fdindex(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
-{
- return db % xfs_dir3_free_max_bests(geo);
-}
-
-static void
-xfs_dir2_free_hdr_from_disk(
- struct xfs_dir3_icfree_hdr *to,
- struct xfs_dir2_free *from)
-{
- to->magic = be32_to_cpu(from->hdr.magic);
- to->firstdb = be32_to_cpu(from->hdr.firstdb);
- to->nvalid = be32_to_cpu(from->hdr.nvalid);
- to->nused = be32_to_cpu(from->hdr.nused);
- ASSERT(to->magic == XFS_DIR2_FREE_MAGIC);
-}
-
-static void
-xfs_dir2_free_hdr_to_disk(
- struct xfs_dir2_free *to,
- struct xfs_dir3_icfree_hdr *from)
-{
- ASSERT(from->magic == XFS_DIR2_FREE_MAGIC);
-
- to->hdr.magic = cpu_to_be32(from->magic);
- to->hdr.firstdb = cpu_to_be32(from->firstdb);
- to->hdr.nvalid = cpu_to_be32(from->nvalid);
- to->hdr.nused = cpu_to_be32(from->nused);
-}
-
-static void
-xfs_dir3_free_hdr_from_disk(
- struct xfs_dir3_icfree_hdr *to,
- struct xfs_dir2_free *from)
-{
- struct xfs_dir3_free_hdr *hdr3 = (struct xfs_dir3_free_hdr *)from;
-
- to->magic = be32_to_cpu(hdr3->hdr.magic);
- to->firstdb = be32_to_cpu(hdr3->firstdb);
- to->nvalid = be32_to_cpu(hdr3->nvalid);
- to->nused = be32_to_cpu(hdr3->nused);
-
- ASSERT(to->magic == XFS_DIR3_FREE_MAGIC);
-}
-
-static void
-xfs_dir3_free_hdr_to_disk(
- struct xfs_dir2_free *to,
- struct xfs_dir3_icfree_hdr *from)
-{
- struct xfs_dir3_free_hdr *hdr3 = (struct xfs_dir3_free_hdr *)to;
-
- ASSERT(from->magic == XFS_DIR3_FREE_MAGIC);
-
- hdr3->hdr.magic = cpu_to_be32(from->magic);
- hdr3->firstdb = cpu_to_be32(from->firstdb);
- hdr3->nvalid = cpu_to_be32(from->nvalid);
- hdr3->nused = cpu_to_be32(from->nused);
-}
-
-static const struct xfs_dir_ops xfs_dir2_ops = {
- .sf_entsize = xfs_dir2_sf_entsize,
- .sf_nextentry = xfs_dir2_sf_nextentry,
- .sf_get_ftype = xfs_dir2_sfe_get_ftype,
- .sf_put_ftype = xfs_dir2_sfe_put_ftype,
- .sf_get_ino = xfs_dir2_sfe_get_ino,
- .sf_put_ino = xfs_dir2_sfe_put_ino,
- .sf_get_parent_ino = xfs_dir2_sf_get_parent_ino,
- .sf_put_parent_ino = xfs_dir2_sf_put_parent_ino,
-
- .data_entsize = xfs_dir2_data_entsize,
- .data_get_ftype = xfs_dir2_data_get_ftype,
- .data_put_ftype = xfs_dir2_data_put_ftype,
- .data_entry_tag_p = xfs_dir2_data_entry_tag_p,
- .data_bestfree_p = xfs_dir2_data_bestfree_p,
-
- .data_dot_offset = sizeof(struct xfs_dir2_data_hdr),
- .data_dotdot_offset = sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR2_DATA_ENTSIZE(1),
- .data_first_offset = sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR2_DATA_ENTSIZE(1) +
- XFS_DIR2_DATA_ENTSIZE(2),
- .data_entry_offset = sizeof(struct xfs_dir2_data_hdr),
-
- .data_dot_entry_p = xfs_dir2_data_dot_entry_p,
- .data_dotdot_entry_p = xfs_dir2_data_dotdot_entry_p,
- .data_first_entry_p = xfs_dir2_data_first_entry_p,
- .data_entry_p = xfs_dir2_data_entry_p,
- .data_unused_p = xfs_dir2_data_unused_p,
-
- .leaf_hdr_size = sizeof(struct xfs_dir2_leaf_hdr),
- .leaf_hdr_to_disk = xfs_dir2_leaf_hdr_to_disk,
- .leaf_hdr_from_disk = xfs_dir2_leaf_hdr_from_disk,
- .leaf_max_ents = xfs_dir2_max_leaf_ents,
- .leaf_ents_p = xfs_dir2_leaf_ents_p,
-
- .node_hdr_size = sizeof(struct xfs_da_node_hdr),
- .node_hdr_to_disk = xfs_da2_node_hdr_to_disk,
- .node_hdr_from_disk = xfs_da2_node_hdr_from_disk,
- .node_tree_p = xfs_da2_node_tree_p,
-
- .free_hdr_size = sizeof(struct xfs_dir2_free_hdr),
- .free_hdr_to_disk = xfs_dir2_free_hdr_to_disk,
- .free_hdr_from_disk = xfs_dir2_free_hdr_from_disk,
- .free_max_bests = xfs_dir2_free_max_bests,
- .free_bests_p = xfs_dir2_free_bests_p,
- .db_to_fdb = xfs_dir2_db_to_fdb,
- .db_to_fdindex = xfs_dir2_db_to_fdindex,
-};
-
-static const struct xfs_dir_ops xfs_dir2_ftype_ops = {
- .sf_entsize = xfs_dir3_sf_entsize,
- .sf_nextentry = xfs_dir3_sf_nextentry,
- .sf_get_ftype = xfs_dir3_sfe_get_ftype,
- .sf_put_ftype = xfs_dir3_sfe_put_ftype,
- .sf_get_ino = xfs_dir3_sfe_get_ino,
- .sf_put_ino = xfs_dir3_sfe_put_ino,
- .sf_get_parent_ino = xfs_dir2_sf_get_parent_ino,
- .sf_put_parent_ino = xfs_dir2_sf_put_parent_ino,
-
- .data_entsize = xfs_dir3_data_entsize,
- .data_get_ftype = xfs_dir3_data_get_ftype,
- .data_put_ftype = xfs_dir3_data_put_ftype,
- .data_entry_tag_p = xfs_dir3_data_entry_tag_p,
- .data_bestfree_p = xfs_dir2_data_bestfree_p,
-
- .data_dot_offset = sizeof(struct xfs_dir2_data_hdr),
- .data_dotdot_offset = sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1),
- .data_first_offset = sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1) +
- XFS_DIR3_DATA_ENTSIZE(2),
- .data_entry_offset = sizeof(struct xfs_dir2_data_hdr),
-
- .data_dot_entry_p = xfs_dir2_data_dot_entry_p,
- .data_dotdot_entry_p = xfs_dir2_ftype_data_dotdot_entry_p,
- .data_first_entry_p = xfs_dir2_ftype_data_first_entry_p,
- .data_entry_p = xfs_dir2_data_entry_p,
- .data_unused_p = xfs_dir2_data_unused_p,
-
- .leaf_hdr_size = sizeof(struct xfs_dir2_leaf_hdr),
- .leaf_hdr_to_disk = xfs_dir2_leaf_hdr_to_disk,
- .leaf_hdr_from_disk = xfs_dir2_leaf_hdr_from_disk,
- .leaf_max_ents = xfs_dir2_max_leaf_ents,
- .leaf_ents_p = xfs_dir2_leaf_ents_p,
-
- .node_hdr_size = sizeof(struct xfs_da_node_hdr),
- .node_hdr_to_disk = xfs_da2_node_hdr_to_disk,
- .node_hdr_from_disk = xfs_da2_node_hdr_from_disk,
- .node_tree_p = xfs_da2_node_tree_p,
-
- .free_hdr_size = sizeof(struct xfs_dir2_free_hdr),
- .free_hdr_to_disk = xfs_dir2_free_hdr_to_disk,
- .free_hdr_from_disk = xfs_dir2_free_hdr_from_disk,
- .free_max_bests = xfs_dir2_free_max_bests,
- .free_bests_p = xfs_dir2_free_bests_p,
- .db_to_fdb = xfs_dir2_db_to_fdb,
- .db_to_fdindex = xfs_dir2_db_to_fdindex,
-};
-
-static const struct xfs_dir_ops xfs_dir3_ops = {
- .sf_entsize = xfs_dir3_sf_entsize,
- .sf_nextentry = xfs_dir3_sf_nextentry,
- .sf_get_ftype = xfs_dir3_sfe_get_ftype,
- .sf_put_ftype = xfs_dir3_sfe_put_ftype,
- .sf_get_ino = xfs_dir3_sfe_get_ino,
- .sf_put_ino = xfs_dir3_sfe_put_ino,
- .sf_get_parent_ino = xfs_dir2_sf_get_parent_ino,
- .sf_put_parent_ino = xfs_dir2_sf_put_parent_ino,
-
- .data_entsize = xfs_dir3_data_entsize,
- .data_get_ftype = xfs_dir3_data_get_ftype,
- .data_put_ftype = xfs_dir3_data_put_ftype,
- .data_entry_tag_p = xfs_dir3_data_entry_tag_p,
- .data_bestfree_p = xfs_dir3_data_bestfree_p,
-
- .data_dot_offset = sizeof(struct xfs_dir3_data_hdr),
- .data_dotdot_offset = sizeof(struct xfs_dir3_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1),
- .data_first_offset = sizeof(struct xfs_dir3_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1) +
- XFS_DIR3_DATA_ENTSIZE(2),
- .data_entry_offset = sizeof(struct xfs_dir3_data_hdr),
-
- .data_dot_entry_p = xfs_dir3_data_dot_entry_p,
- .data_dotdot_entry_p = xfs_dir3_data_dotdot_entry_p,
- .data_first_entry_p = xfs_dir3_data_first_entry_p,
- .data_entry_p = xfs_dir3_data_entry_p,
- .data_unused_p = xfs_dir3_data_unused_p,
-
- .leaf_hdr_size = sizeof(struct xfs_dir3_leaf_hdr),
- .leaf_hdr_to_disk = xfs_dir3_leaf_hdr_to_disk,
- .leaf_hdr_from_disk = xfs_dir3_leaf_hdr_from_disk,
- .leaf_max_ents = xfs_dir3_max_leaf_ents,
- .leaf_ents_p = xfs_dir3_leaf_ents_p,
-
- .node_hdr_size = sizeof(struct xfs_da3_node_hdr),
- .node_hdr_to_disk = xfs_da3_node_hdr_to_disk,
- .node_hdr_from_disk = xfs_da3_node_hdr_from_disk,
- .node_tree_p = xfs_da3_node_tree_p,
-
- .free_hdr_size = sizeof(struct xfs_dir3_free_hdr),
- .free_hdr_to_disk = xfs_dir3_free_hdr_to_disk,
- .free_hdr_from_disk = xfs_dir3_free_hdr_from_disk,
- .free_max_bests = xfs_dir3_free_max_bests,
- .free_bests_p = xfs_dir3_free_bests_p,
- .db_to_fdb = xfs_dir3_db_to_fdb,
- .db_to_fdindex = xfs_dir3_db_to_fdindex,
-};
-
-static const struct xfs_dir_ops xfs_dir2_nondir_ops = {
- .node_hdr_size = sizeof(struct xfs_da_node_hdr),
- .node_hdr_to_disk = xfs_da2_node_hdr_to_disk,
- .node_hdr_from_disk = xfs_da2_node_hdr_from_disk,
- .node_tree_p = xfs_da2_node_tree_p,
-};
-
-static const struct xfs_dir_ops xfs_dir3_nondir_ops = {
- .node_hdr_size = sizeof(struct xfs_da3_node_hdr),
- .node_hdr_to_disk = xfs_da3_node_hdr_to_disk,
- .node_hdr_from_disk = xfs_da3_node_hdr_from_disk,
- .node_tree_p = xfs_da3_node_tree_p,
-};
-
-/*
- * Return the ops structure according to the current config. If we are passed
- * an inode, then that overrides the default config we use which is based on
- * feature bits.
- */
-const struct xfs_dir_ops *
-xfs_dir_get_ops(
- struct xfs_mount *mp,
- struct xfs_inode *dp)
-{
- if (dp)
- return dp->d_ops;
- if (mp->m_dir_inode_ops)
- return mp->m_dir_inode_ops;
- if (xfs_sb_version_hascrc(&mp->m_sb))
- return &xfs_dir3_ops;
- if (xfs_sb_version_hasftype(&mp->m_sb))
- return &xfs_dir2_ftype_ops;
- return &xfs_dir2_ops;
-}
-
-const struct xfs_dir_ops *
-xfs_nondir_get_ops(
- struct xfs_mount *mp,
- struct xfs_inode *dp)
-{
- if (dp)
- return dp->d_ops;
- if (mp->m_nondir_inode_ops)
- return mp->m_nondir_inode_ops;
- if (xfs_sb_version_hascrc(&mp->m_sb))
- return &xfs_dir3_nondir_ops;
- return &xfs_dir2_nondir_ops;
-}
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
index ae654e06b2fb..3dee33043e09 100644
--- a/fs/xfs/libxfs/xfs_da_format.h
+++ b/fs/xfs/libxfs/xfs_da_format.h
@@ -94,19 +94,6 @@ struct xfs_da3_intnode {
};
/*
- * In-core version of the node header to abstract the differences in the v2 and
- * v3 disk format of the headers. Callers need to convert to/from disk format as
- * appropriate.
- */
-struct xfs_da3_icnode_hdr {
- uint32_t forw;
- uint32_t back;
- uint16_t magic;
- uint16_t count;
- uint16_t level;
-};
-
-/*
* Directory version 2.
*
* There are 4 possible formats:
@@ -434,14 +421,6 @@ struct xfs_dir3_leaf_hdr {
__be32 pad; /* 64 bit alignment */
};
-struct xfs_dir3_icleaf_hdr {
- uint32_t forw;
- uint32_t back;
- uint16_t magic;
- uint16_t count;
- uint16_t stale;
-};
-
/*
* Leaf block entry.
*/
@@ -482,7 +461,7 @@ xfs_dir2_leaf_bests_p(struct xfs_dir2_leaf_tail *ltp)
}
/*
- * Free space block defintions for the node format.
+ * Free space block definitions for the node format.
*/
/*
@@ -521,19 +500,6 @@ struct xfs_dir3_free {
#define XFS_DIR3_FREE_CRC_OFF offsetof(struct xfs_dir3_free, hdr.hdr.crc)
/*
- * In core version of the free block header, abstracted away from on-disk format
- * differences. Use this in the code, and convert to/from the disk version using
- * xfs_dir3_free_hdr_from_disk/xfs_dir3_free_hdr_to_disk.
- */
-struct xfs_dir3_icfree_hdr {
- uint32_t magic;
- uint32_t firstdb;
- uint32_t nvalid;
- uint32_t nused;
-
-};
-
-/*
* Single block format.
*
* The single block format looks like the following drawing on disk:
@@ -710,29 +676,6 @@ struct xfs_attr3_leafblock {
};
/*
- * incore, neutral version of the attribute leaf header
- */
-struct xfs_attr3_icleaf_hdr {
- uint32_t forw;
- uint32_t back;
- uint16_t magic;
- uint16_t count;
- uint16_t usedbytes;
- /*
- * firstused is 32-bit here instead of 16-bit like the on-disk variant
- * to support maximum fsb size of 64k without overflow issues throughout
- * the attr code. Instead, the overflow condition is handled on
- * conversion to/from disk.
- */
- uint32_t firstused;
- __u8 holes;
- struct {
- uint16_t base;
- uint16_t size;
- } freemap[XFS_ATTR_LEAF_MAPSIZE];
-};
-
-/*
* Special value to represent fs block size in the leaf header firstused field.
* Only used when block size overflows the 2-bytes available on disk.
*/
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index eb2be2a6a25a..22557527cfdb 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -517,7 +517,7 @@ xfs_defer_add(
}
if (!dfp) {
dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
- KM_SLEEP | KM_NOFS);
+ KM_NOFS);
dfp->dfp_type = type;
dfp->dfp_intent = NULL;
dfp->dfp_done = NULL;
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index 67840723edbb..0aa87cbde49e 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -52,7 +52,7 @@ xfs_mode_to_ftype(
* ASCII case-insensitive (ie. A-Z) support for directories that was
* used in IRIX.
*/
-STATIC xfs_dahash_t
+xfs_dahash_t
xfs_ascii_ci_hashname(
struct xfs_name *name)
{
@@ -65,14 +65,14 @@ xfs_ascii_ci_hashname(
return hash;
}
-STATIC enum xfs_dacmp
+enum xfs_dacmp
xfs_ascii_ci_compname(
- struct xfs_da_args *args,
- const unsigned char *name,
- int len)
+ struct xfs_da_args *args,
+ const unsigned char *name,
+ int len)
{
- enum xfs_dacmp result;
- int i;
+ enum xfs_dacmp result;
+ int i;
if (args->namelen != len)
return XFS_CMP_DIFFERENT;
@@ -89,30 +89,20 @@ xfs_ascii_ci_compname(
return result;
}
-static const struct xfs_nameops xfs_ascii_ci_nameops = {
- .hashname = xfs_ascii_ci_hashname,
- .compname = xfs_ascii_ci_compname,
-};
-
int
xfs_da_mount(
struct xfs_mount *mp)
{
struct xfs_da_geometry *dageo;
- int nodehdr_size;
ASSERT(mp->m_sb.sb_versionnum & XFS_SB_VERSION_DIRV2BIT);
ASSERT(xfs_dir2_dirblock_bytes(&mp->m_sb) <= XFS_MAX_BLOCKSIZE);
- mp->m_dir_inode_ops = xfs_dir_get_ops(mp, NULL);
- mp->m_nondir_inode_ops = xfs_nondir_get_ops(mp, NULL);
-
- nodehdr_size = mp->m_dir_inode_ops->node_hdr_size;
mp->m_dir_geo = kmem_zalloc(sizeof(struct xfs_da_geometry),
- KM_SLEEP | KM_MAYFAIL);
+ KM_MAYFAIL);
mp->m_attr_geo = kmem_zalloc(sizeof(struct xfs_da_geometry),
- KM_SLEEP | KM_MAYFAIL);
+ KM_MAYFAIL);
if (!mp->m_dir_geo || !mp->m_attr_geo) {
kmem_free(mp->m_dir_geo);
kmem_free(mp->m_attr_geo);
@@ -125,6 +115,27 @@ xfs_da_mount(
dageo->fsblog = mp->m_sb.sb_blocklog;
dageo->blksize = xfs_dir2_dirblock_bytes(&mp->m_sb);
dageo->fsbcount = 1 << mp->m_sb.sb_dirblklog;
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ dageo->node_hdr_size = sizeof(struct xfs_da3_node_hdr);
+ dageo->leaf_hdr_size = sizeof(struct xfs_dir3_leaf_hdr);
+ dageo->free_hdr_size = sizeof(struct xfs_dir3_free_hdr);
+ dageo->data_entry_offset =
+ sizeof(struct xfs_dir3_data_hdr);
+ } else {
+ dageo->node_hdr_size = sizeof(struct xfs_da_node_hdr);
+ dageo->leaf_hdr_size = sizeof(struct xfs_dir2_leaf_hdr);
+ dageo->free_hdr_size = sizeof(struct xfs_dir2_free_hdr);
+ dageo->data_entry_offset =
+ sizeof(struct xfs_dir2_data_hdr);
+ }
+ dageo->leaf_max_ents = (dageo->blksize - dageo->leaf_hdr_size) /
+ sizeof(struct xfs_dir2_leaf_entry);
+ dageo->free_max_bests = (dageo->blksize - dageo->free_hdr_size) /
+ sizeof(xfs_dir2_data_off_t);
+
+ dageo->data_first_offset = dageo->data_entry_offset +
+ xfs_dir2_data_entsize(mp, 1) +
+ xfs_dir2_data_entsize(mp, 2);
/*
* Now we've set up the block conversion variables, we can calculate the
@@ -133,7 +144,7 @@ xfs_da_mount(
dageo->datablk = xfs_dir2_byte_to_da(dageo, XFS_DIR2_DATA_OFFSET);
dageo->leafblk = xfs_dir2_byte_to_da(dageo, XFS_DIR2_LEAF_OFFSET);
dageo->freeblk = xfs_dir2_byte_to_da(dageo, XFS_DIR2_FREE_OFFSET);
- dageo->node_ents = (dageo->blksize - nodehdr_size) /
+ dageo->node_ents = (dageo->blksize - dageo->node_hdr_size) /
(uint)sizeof(xfs_da_node_entry_t);
dageo->magicpct = (dageo->blksize * 37) / 100;
@@ -143,15 +154,10 @@ xfs_da_mount(
dageo->fsblog = mp->m_sb.sb_blocklog;
dageo->blksize = 1 << dageo->blklog;
dageo->fsbcount = 1;
- dageo->node_ents = (dageo->blksize - nodehdr_size) /
+ dageo->node_hdr_size = mp->m_dir_geo->node_hdr_size;
+ dageo->node_ents = (dageo->blksize - dageo->node_hdr_size) /
(uint)sizeof(xfs_da_node_entry_t);
dageo->magicpct = (dageo->blksize * 37) / 100;
-
- if (xfs_sb_version_hasasciici(&mp->m_sb))
- mp->m_dirnameops = &xfs_ascii_ci_nameops;
- else
- mp->m_dirnameops = &xfs_default_nameops;
-
return 0;
}
@@ -191,10 +197,10 @@ xfs_dir_ino_validate(
{
bool ino_ok = xfs_verify_dir_ino(mp, ino);
- if (unlikely(XFS_TEST_ERROR(!ino_ok, mp, XFS_ERRTAG_DIR_INO_VALIDATE))) {
+ if (XFS_IS_CORRUPT(mp, !ino_ok) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DIR_INO_VALIDATE)) {
xfs_warn(mp, "Invalid inode number 0x%Lx",
(unsigned long long) ino);
- XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
}
return 0;
@@ -217,7 +223,7 @@ xfs_dir_init(
if (error)
return error;
- args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS);
+ args = kmem_zalloc(sizeof(*args), KM_NOFS);
if (!args)
return -ENOMEM;
@@ -254,7 +260,7 @@ xfs_dir_createname(
XFS_STATS_INC(dp->i_mount, xs_dir_create);
}
- args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS);
+ args = kmem_zalloc(sizeof(*args), KM_NOFS);
if (!args)
return -ENOMEM;
@@ -262,7 +268,7 @@ xfs_dir_createname(
args->name = name->name;
args->namelen = name->len;
args->filetype = name->type;
- args->hashval = dp->i_mount->m_dirnameops->hashname(name);
+ args->hashval = xfs_dir2_hashname(dp->i_mount, name);
args->inumber = inum;
args->dp = dp;
args->total = total;
@@ -353,12 +359,12 @@ xfs_dir_lookup(
* lockdep Doing this avoids having to add a bunch of lockdep class
* annotations into the reclaim path for the ilock.
*/
- args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS);
+ args = kmem_zalloc(sizeof(*args), KM_NOFS);
args->geo = dp->i_mount->m_dir_geo;
args->name = name->name;
args->namelen = name->len;
args->filetype = name->type;
- args->hashval = dp->i_mount->m_dirnameops->hashname(name);
+ args->hashval = xfs_dir2_hashname(dp->i_mount, name);
args->dp = dp;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
@@ -422,7 +428,7 @@ xfs_dir_removename(
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
XFS_STATS_INC(dp->i_mount, xs_dir_remove);
- args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS);
+ args = kmem_zalloc(sizeof(*args), KM_NOFS);
if (!args)
return -ENOMEM;
@@ -430,7 +436,7 @@ xfs_dir_removename(
args->name = name->name;
args->namelen = name->len;
args->filetype = name->type;
- args->hashval = dp->i_mount->m_dirnameops->hashname(name);
+ args->hashval = xfs_dir2_hashname(dp->i_mount, name);
args->inumber = ino;
args->dp = dp;
args->total = total;
@@ -483,7 +489,7 @@ xfs_dir_replace(
if (rval)
return rval;
- args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS);
+ args = kmem_zalloc(sizeof(*args), KM_NOFS);
if (!args)
return -ENOMEM;
@@ -491,7 +497,7 @@ xfs_dir_replace(
args->name = name->name;
args->namelen = name->len;
args->filetype = name->type;
- args->hashval = dp->i_mount->m_dirnameops->hashname(name);
+ args->hashval = xfs_dir2_hashname(dp->i_mount, name);
args->inumber = inum;
args->dp = dp;
args->total = total;
@@ -600,7 +606,9 @@ xfs_dir2_isblock(
if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
return rval;
rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
- if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
+ if (XFS_IS_CORRUPT(args->dp->i_mount,
+ rval != 0 &&
+ args->dp->i_d.di_size != args->geo->blksize))
return -EFSCORRUPTED;
*vp = rval;
return 0;
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index f54244779492..033777e282f2 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -18,6 +18,8 @@ struct xfs_dir2_sf_entry;
struct xfs_dir2_data_hdr;
struct xfs_dir2_data_entry;
struct xfs_dir2_data_unused;
+struct xfs_dir3_icfree_hdr;
+struct xfs_dir3_icleaf_hdr;
extern struct xfs_name xfs_name_dotdot;
@@ -27,85 +29,6 @@ extern struct xfs_name xfs_name_dotdot;
extern unsigned char xfs_mode_to_ftype(int mode);
/*
- * directory operations vector for encode/decode routines
- */
-struct xfs_dir_ops {
- int (*sf_entsize)(struct xfs_dir2_sf_hdr *hdr, int len);
- struct xfs_dir2_sf_entry *
- (*sf_nextentry)(struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep);
- uint8_t (*sf_get_ftype)(struct xfs_dir2_sf_entry *sfep);
- void (*sf_put_ftype)(struct xfs_dir2_sf_entry *sfep,
- uint8_t ftype);
- xfs_ino_t (*sf_get_ino)(struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep);
- void (*sf_put_ino)(struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep,
- xfs_ino_t ino);
- xfs_ino_t (*sf_get_parent_ino)(struct xfs_dir2_sf_hdr *hdr);
- void (*sf_put_parent_ino)(struct xfs_dir2_sf_hdr *hdr,
- xfs_ino_t ino);
-
- int (*data_entsize)(int len);
- uint8_t (*data_get_ftype)(struct xfs_dir2_data_entry *dep);
- void (*data_put_ftype)(struct xfs_dir2_data_entry *dep,
- uint8_t ftype);
- __be16 * (*data_entry_tag_p)(struct xfs_dir2_data_entry *dep);
- struct xfs_dir2_data_free *
- (*data_bestfree_p)(struct xfs_dir2_data_hdr *hdr);
-
- xfs_dir2_data_aoff_t data_dot_offset;
- xfs_dir2_data_aoff_t data_dotdot_offset;
- xfs_dir2_data_aoff_t data_first_offset;
- size_t data_entry_offset;
-
- struct xfs_dir2_data_entry *
- (*data_dot_entry_p)(struct xfs_dir2_data_hdr *hdr);
- struct xfs_dir2_data_entry *
- (*data_dotdot_entry_p)(struct xfs_dir2_data_hdr *hdr);
- struct xfs_dir2_data_entry *
- (*data_first_entry_p)(struct xfs_dir2_data_hdr *hdr);
- struct xfs_dir2_data_entry *
- (*data_entry_p)(struct xfs_dir2_data_hdr *hdr);
- struct xfs_dir2_data_unused *
- (*data_unused_p)(struct xfs_dir2_data_hdr *hdr);
-
- int leaf_hdr_size;
- void (*leaf_hdr_to_disk)(struct xfs_dir2_leaf *to,
- struct xfs_dir3_icleaf_hdr *from);
- void (*leaf_hdr_from_disk)(struct xfs_dir3_icleaf_hdr *to,
- struct xfs_dir2_leaf *from);
- int (*leaf_max_ents)(struct xfs_da_geometry *geo);
- struct xfs_dir2_leaf_entry *
- (*leaf_ents_p)(struct xfs_dir2_leaf *lp);
-
- int node_hdr_size;
- void (*node_hdr_to_disk)(struct xfs_da_intnode *to,
- struct xfs_da3_icnode_hdr *from);
- void (*node_hdr_from_disk)(struct xfs_da3_icnode_hdr *to,
- struct xfs_da_intnode *from);
- struct xfs_da_node_entry *
- (*node_tree_p)(struct xfs_da_intnode *dap);
-
- int free_hdr_size;
- void (*free_hdr_to_disk)(struct xfs_dir2_free *to,
- struct xfs_dir3_icfree_hdr *from);
- void (*free_hdr_from_disk)(struct xfs_dir3_icfree_hdr *to,
- struct xfs_dir2_free *from);
- int (*free_max_bests)(struct xfs_da_geometry *geo);
- __be16 * (*free_bests_p)(struct xfs_dir2_free *free);
- xfs_dir2_db_t (*db_to_fdb)(struct xfs_da_geometry *geo,
- xfs_dir2_db_t db);
- int (*db_to_fdindex)(struct xfs_da_geometry *geo,
- xfs_dir2_db_t db);
-};
-
-extern const struct xfs_dir_ops *
- xfs_dir_get_ops(struct xfs_mount *mp, struct xfs_inode *dp);
-extern const struct xfs_dir_ops *
- xfs_nondir_get_ops(struct xfs_mount *mp, struct xfs_inode *dp);
-
-/*
* Generic directory interface routines
*/
extern void xfs_dir_startup(void);
@@ -124,6 +47,8 @@ extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp,
extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t ino,
xfs_extlen_t tot);
+extern bool xfs_dir2_sf_replace_needblock(struct xfs_inode *dp,
+ xfs_ino_t inum);
extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t inum,
xfs_extlen_t tot);
@@ -143,10 +68,7 @@ extern int xfs_dir2_isleaf(struct xfs_da_args *args, int *r);
extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
struct xfs_buf *bp);
-extern void xfs_dir2_data_freescan_int(struct xfs_da_geometry *geo,
- const struct xfs_dir_ops *ops,
- struct xfs_dir2_data_hdr *hdr, int *loghead);
-extern void xfs_dir2_data_freescan(struct xfs_inode *dp,
+extern void xfs_dir2_data_freescan(struct xfs_mount *mp,
struct xfs_dir2_data_hdr *hdr, int *loghead);
extern void xfs_dir2_data_log_entry(struct xfs_da_args *args,
struct xfs_buf *bp, struct xfs_dir2_data_entry *dep);
@@ -324,7 +246,7 @@ xfs_dir2_leaf_tail_p(struct xfs_da_geometry *geo, struct xfs_dir2_leaf *lp)
#define XFS_READDIR_BUFSIZE (32768)
unsigned char xfs_dir3_get_dtype(struct xfs_mount *mp, uint8_t filetype);
-void *xfs_dir3_data_endp(struct xfs_da_geometry *geo,
+unsigned int xfs_dir3_data_end_offset(struct xfs_da_geometry *geo,
struct xfs_dir2_data_hdr *hdr);
bool xfs_dir2_namecheck(const void *name, size_t length);
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index a6fb0cc2085e..d6ced59b9567 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -123,7 +123,7 @@ xfs_dir3_block_read(
struct xfs_mount *mp = dp->i_mount;
int err;
- err = xfs_da_read_buf(tp, dp, mp->m_dir_geo->datablk, -1, bpp,
+ err = xfs_da_read_buf(tp, dp, mp->m_dir_geo->datablk, 0, bpp,
XFS_DATA_FORK, &xfs_dir3_block_buf_ops);
if (!err && tp && *bpp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_BLOCK_BUF);
@@ -172,7 +172,7 @@ xfs_dir2_block_need_space(
struct xfs_dir2_data_unused *enddup = NULL;
*compact = 0;
- bf = dp->d_ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
/*
* If there are stale entries we'll use one for the leaf.
@@ -311,7 +311,7 @@ xfs_dir2_block_compact(
* This needs to happen before the next call to use_free.
*/
if (needscan)
- xfs_dir2_data_freescan(args->dp, hdr, needlog);
+ xfs_dir2_data_freescan(args->dp->i_mount, hdr, needlog);
}
/*
@@ -355,7 +355,7 @@ xfs_dir2_block_addname(
if (error)
return error;
- len = dp->d_ops->data_entsize(args->namelen);
+ len = xfs_dir2_data_entsize(dp->i_mount, args->namelen);
/*
* Set up pointers to parts of the block.
@@ -458,7 +458,7 @@ xfs_dir2_block_addname(
* This needs to happen before the next call to use_free.
*/
if (needscan) {
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
needscan = 0;
}
/*
@@ -541,14 +541,14 @@ xfs_dir2_block_addname(
dep->inumber = cpu_to_be64(args->inumber);
dep->namelen = args->namelen;
memcpy(dep->name, args->name, args->namelen);
- dp->d_ops->data_put_ftype(dep, args->filetype);
- tagp = dp->d_ops->data_entry_tag_p(dep);
+ xfs_dir2_data_put_ftype(dp->i_mount, dep, args->filetype);
+ tagp = xfs_dir2_data_entry_tag_p(dp->i_mount, dep);
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
/*
* Clean up the bestfree array and log the header, tail, and entry.
*/
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(args, bp);
xfs_dir2_block_log_tail(tp, bp);
@@ -633,7 +633,7 @@ xfs_dir2_block_lookup(
* Fill in inode number, CI name if appropriate, release the block.
*/
args->inumber = be64_to_cpu(dep->inumber);
- args->filetype = dp->d_ops->data_get_ftype(dep);
+ args->filetype = xfs_dir2_data_get_ftype(dp->i_mount, dep);
error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
xfs_trans_brelse(args->trans, bp);
return error;
@@ -660,13 +660,11 @@ xfs_dir2_block_lookup_int(
int high; /* binary search high index */
int low; /* binary search low index */
int mid; /* binary search current idx */
- xfs_mount_t *mp; /* filesystem mount point */
xfs_trans_t *tp; /* transaction pointer */
enum xfs_dacmp cmp; /* comparison result */
dp = args->dp;
tp = args->trans;
- mp = dp->i_mount;
error = xfs_dir3_block_read(tp, dp, &bp);
if (error)
@@ -718,7 +716,7 @@ xfs_dir2_block_lookup_int(
* and buffer. If it's the first case-insensitive match, store
* the index and buffer and continue looking for an exact match.
*/
- cmp = mp->m_dirnameops->compname(args, dep->name, dep->namelen);
+ cmp = xfs_dir2_compname(args, dep->name, dep->namelen);
if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
args->cmpresult = cmp;
*bpp = bp;
@@ -791,7 +789,8 @@ xfs_dir2_block_removename(
needlog = needscan = 0;
xfs_dir2_data_make_free(args, bp,
(xfs_dir2_data_aoff_t)((char *)dep - (char *)hdr),
- dp->d_ops->data_entsize(dep->namelen), &needlog, &needscan);
+ xfs_dir2_data_entsize(dp->i_mount, dep->namelen), &needlog,
+ &needscan);
/*
* Fix up the block tail.
*/
@@ -806,7 +805,7 @@ xfs_dir2_block_removename(
* Fix up bestfree, log the header if necessary.
*/
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(args, bp);
xfs_dir3_data_check(dp, bp);
@@ -864,7 +863,7 @@ xfs_dir2_block_replace(
* Change the inode number to the new value.
*/
dep->inumber = cpu_to_be64(args->inumber);
- dp->d_ops->data_put_ftype(dep, args->filetype);
+ xfs_dir2_data_put_ftype(dp->i_mount, dep, args->filetype);
xfs_dir2_data_log_entry(args, bp, dep);
xfs_dir3_data_check(dp, bp);
return 0;
@@ -914,7 +913,6 @@ xfs_dir2_leaf_to_block(
__be16 *tagp; /* end of entry (tag) */
int to; /* block/leaf to index */
xfs_trans_t *tp; /* transaction pointer */
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
trace_xfs_dir2_leaf_to_block(args);
@@ -923,8 +921,7 @@ xfs_dir2_leaf_to_block(
tp = args->trans;
mp = dp->i_mount;
leaf = lbp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(mp, &leafhdr, leaf);
ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
ASSERT(leafhdr.magic == XFS_DIR2_LEAF1_MAGIC ||
@@ -938,7 +935,7 @@ xfs_dir2_leaf_to_block(
while (dp->i_d.di_size > args->geo->blksize) {
int hdrsz;
- hdrsz = dp->d_ops->data_entry_offset;
+ hdrsz = args->geo->data_entry_offset;
bestsp = xfs_dir2_leaf_bests_p(ltp);
if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) ==
args->geo->blksize - hdrsz) {
@@ -953,7 +950,7 @@ xfs_dir2_leaf_to_block(
* Read the data block if we don't already have it, give up if it fails.
*/
if (!dbp) {
- error = xfs_dir3_data_read(tp, dp, args->geo->datablk, -1, &dbp);
+ error = xfs_dir3_data_read(tp, dp, args->geo->datablk, 0, &dbp);
if (error)
return error;
}
@@ -1004,9 +1001,10 @@ xfs_dir2_leaf_to_block(
*/
lep = xfs_dir2_block_leaf_p(btp);
for (from = to = 0; from < leafhdr.count; from++) {
- if (ents[from].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
+ if (leafhdr.ents[from].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
continue;
- lep[to++] = ents[from];
+ lep[to++] = leafhdr.ents[from];
}
ASSERT(to == be32_to_cpu(btp->count));
xfs_dir2_block_log_leaf(tp, dbp, 0, be32_to_cpu(btp->count) - 1);
@@ -1014,7 +1012,7 @@ xfs_dir2_leaf_to_block(
* Scan the bestfree if we need it and log the data block header.
*/
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(args, dbp);
/*
@@ -1039,47 +1037,38 @@ xfs_dir2_leaf_to_block(
*/
int /* error */
xfs_dir2_sf_to_block(
- xfs_da_args_t *args) /* operation arguments */
+ struct xfs_da_args *args)
{
+ struct xfs_trans *tp = args->trans;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(dp, XFS_DATA_FORK);
+ struct xfs_da_geometry *geo = args->geo;
xfs_dir2_db_t blkno; /* dir-relative block # (0) */
xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
struct xfs_buf *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail pointer */
xfs_dir2_data_entry_t *dep; /* data entry pointer */
- xfs_inode_t *dp; /* incore directory inode */
int dummy; /* trash */
xfs_dir2_data_unused_t *dup; /* unused entry pointer */
int endoffset; /* end of data objects */
int error; /* error return value */
int i; /* index */
- xfs_mount_t *mp; /* filesystem mount point */
int needlog; /* need to log block header */
int needscan; /* need to scan block freespc */
int newoffset; /* offset from current entry */
- int offset; /* target block offset */
+ unsigned int offset = geo->data_entry_offset;
xfs_dir2_sf_entry_t *sfep; /* sf entry pointer */
xfs_dir2_sf_hdr_t *oldsfp; /* old shortform header */
xfs_dir2_sf_hdr_t *sfp; /* shortform header */
__be16 *tagp; /* end of data entry */
- xfs_trans_t *tp; /* transaction pointer */
struct xfs_name name;
- struct xfs_ifork *ifp;
trace_xfs_dir2_sf_to_block(args);
- dp = args->dp;
- tp = args->trans;
- mp = dp->i_mount;
- ifp = XFS_IFORK_PTR(dp, XFS_DATA_FORK);
ASSERT(ifp->if_flags & XFS_IFINLINE);
- /*
- * Bomb out if the shortform directory is way too short.
- */
- if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
- ASSERT(XFS_FORCED_SHUTDOWN(mp));
- return -EIO;
- }
+ ASSERT(dp->i_d.di_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
oldsfp = (xfs_dir2_sf_hdr_t *)ifp->if_u1.if_data;
@@ -1092,11 +1081,11 @@ xfs_dir2_sf_to_block(
* Copy the directory into a temporary buffer.
* Then pitch the incore inode data so we can make extents.
*/
- sfp = kmem_alloc(ifp->if_bytes, KM_SLEEP);
+ sfp = kmem_alloc(ifp->if_bytes, 0);
memcpy(sfp, oldsfp, ifp->if_bytes);
xfs_idata_realloc(dp, -ifp->if_bytes, XFS_DATA_FORK);
- xfs_bmap_local_to_extents_empty(dp, XFS_DATA_FORK);
+ xfs_bmap_local_to_extents_empty(tp, dp, XFS_DATA_FORK);
dp->i_d.di_size = 0;
/*
@@ -1123,7 +1112,7 @@ xfs_dir2_sf_to_block(
* The whole thing is initialized to free by the init routine.
* Say we're using the leaf and tail area.
*/
- dup = dp->d_ops->data_unused_p(hdr);
+ dup = bp->b_addr + offset;
needlog = needscan = 0;
error = xfs_dir2_data_use_free(args, bp, dup, args->geo->blksize - i,
i, &needlog, &needscan);
@@ -1146,35 +1135,37 @@ xfs_dir2_sf_to_block(
be16_to_cpu(dup->length), &needlog, &needscan);
if (error)
goto out_free;
+
/*
* Create entry for .
*/
- dep = dp->d_ops->data_dot_entry_p(hdr);
+ dep = bp->b_addr + offset;
dep->inumber = cpu_to_be64(dp->i_ino);
dep->namelen = 1;
dep->name[0] = '.';
- dp->d_ops->data_put_ftype(dep, XFS_DIR3_FT_DIR);
- tagp = dp->d_ops->data_entry_tag_p(dep);
- *tagp = cpu_to_be16((char *)dep - (char *)hdr);
+ xfs_dir2_data_put_ftype(mp, dep, XFS_DIR3_FT_DIR);
+ tagp = xfs_dir2_data_entry_tag_p(mp, dep);
+ *tagp = cpu_to_be16(offset);
xfs_dir2_data_log_entry(args, bp, dep);
blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot);
- blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(
- (char *)dep - (char *)hdr));
+ blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(offset));
+ offset += xfs_dir2_data_entsize(mp, dep->namelen);
+
/*
* Create entry for ..
*/
- dep = dp->d_ops->data_dotdot_entry_p(hdr);
- dep->inumber = cpu_to_be64(dp->d_ops->sf_get_parent_ino(sfp));
+ dep = bp->b_addr + offset;
+ dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp));
dep->namelen = 2;
dep->name[0] = dep->name[1] = '.';
- dp->d_ops->data_put_ftype(dep, XFS_DIR3_FT_DIR);
- tagp = dp->d_ops->data_entry_tag_p(dep);
- *tagp = cpu_to_be16((char *)dep - (char *)hdr);
+ xfs_dir2_data_put_ftype(mp, dep, XFS_DIR3_FT_DIR);
+ tagp = xfs_dir2_data_entry_tag_p(mp, dep);
+ *tagp = cpu_to_be16(offset);
xfs_dir2_data_log_entry(args, bp, dep);
blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
- blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(
- (char *)dep - (char *)hdr));
- offset = dp->d_ops->data_first_offset;
+ blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(offset));
+ offset += xfs_dir2_data_entsize(mp, dep->namelen);
+
/*
* Loop over existing entries, stuff them in.
*/
@@ -1183,6 +1174,7 @@ xfs_dir2_sf_to_block(
sfep = NULL;
else
sfep = xfs_dir2_sf_firstentry(sfp);
+
/*
* Need to preserve the existing offset values in the sf directory.
* Insert holes (unused entries) where necessary.
@@ -1199,40 +1191,39 @@ xfs_dir2_sf_to_block(
* There should be a hole here, make one.
*/
if (offset < newoffset) {
- dup = (xfs_dir2_data_unused_t *)((char *)hdr + offset);
+ dup = bp->b_addr + offset;
dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
dup->length = cpu_to_be16(newoffset - offset);
- *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16(
- ((char *)dup - (char *)hdr));
+ *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16(offset);
xfs_dir2_data_log_unused(args, bp, dup);
xfs_dir2_data_freeinsert(hdr,
- dp->d_ops->data_bestfree_p(hdr),
- dup, &dummy);
+ xfs_dir2_data_bestfree_p(mp, hdr),
+ dup, &dummy);
offset += be16_to_cpu(dup->length);
continue;
}
/*
* Copy a real entry.
*/
- dep = (xfs_dir2_data_entry_t *)((char *)hdr + newoffset);
- dep->inumber = cpu_to_be64(dp->d_ops->sf_get_ino(sfp, sfep));
+ dep = bp->b_addr + newoffset;
+ dep->inumber = cpu_to_be64(xfs_dir2_sf_get_ino(mp, sfp, sfep));
dep->namelen = sfep->namelen;
- dp->d_ops->data_put_ftype(dep, dp->d_ops->sf_get_ftype(sfep));
+ xfs_dir2_data_put_ftype(mp, dep,
+ xfs_dir2_sf_get_ftype(mp, sfep));
memcpy(dep->name, sfep->name, dep->namelen);
- tagp = dp->d_ops->data_entry_tag_p(dep);
- *tagp = cpu_to_be16((char *)dep - (char *)hdr);
+ tagp = xfs_dir2_data_entry_tag_p(mp, dep);
+ *tagp = cpu_to_be16(newoffset);
xfs_dir2_data_log_entry(args, bp, dep);
name.name = sfep->name;
name.len = sfep->namelen;
- blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops->
- hashname(&name));
- blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(
- (char *)dep - (char *)hdr));
+ blp[2 + i].hashval = cpu_to_be32(xfs_dir2_hashname(mp, &name));
+ blp[2 + i].address =
+ cpu_to_be32(xfs_dir2_byte_to_dataptr(newoffset));
offset = (int)((char *)(tagp + 1) - (char *)hdr);
if (++i == sfp->count)
sfep = NULL;
else
- sfep = dp->d_ops->sf_nextentry(sfp, sfep);
+ sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep);
}
/* Done with the temporary buffer */
kmem_free(sfp);
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index 2c79be4c3153..b9eba8213180 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -13,6 +13,7 @@
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_dir2.h"
+#include "xfs_dir2_priv.h"
#include "xfs_error.h"
#include "xfs_trans.h"
#include "xfs_buf_item.h"
@@ -23,6 +24,71 @@ static xfs_failaddr_t xfs_dir2_data_freefind_verify(
struct xfs_dir2_data_unused *dup,
struct xfs_dir2_data_free **bf_ent);
+struct xfs_dir2_data_free *
+xfs_dir2_data_bestfree_p(
+ struct xfs_mount *mp,
+ struct xfs_dir2_data_hdr *hdr)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ return ((struct xfs_dir3_data_hdr *)hdr)->best_free;
+ return hdr->bestfree;
+}
+
+/*
+ * Pointer to an entry's tag word.
+ */
+__be16 *
+xfs_dir2_data_entry_tag_p(
+ struct xfs_mount *mp,
+ struct xfs_dir2_data_entry *dep)
+{
+ return (__be16 *)((char *)dep +
+ xfs_dir2_data_entsize(mp, dep->namelen) - sizeof(__be16));
+}
+
+uint8_t
+xfs_dir2_data_get_ftype(
+ struct xfs_mount *mp,
+ struct xfs_dir2_data_entry *dep)
+{
+ if (xfs_sb_version_hasftype(&mp->m_sb)) {
+ uint8_t ftype = dep->name[dep->namelen];
+
+ if (likely(ftype < XFS_DIR3_FT_MAX))
+ return ftype;
+ }
+
+ return XFS_DIR3_FT_UNKNOWN;
+}
+
+void
+xfs_dir2_data_put_ftype(
+ struct xfs_mount *mp,
+ struct xfs_dir2_data_entry *dep,
+ uint8_t ftype)
+{
+ ASSERT(ftype < XFS_DIR3_FT_MAX);
+ ASSERT(dep->namelen != 0);
+
+ if (xfs_sb_version_hasftype(&mp->m_sb))
+ dep->name[dep->namelen] = ftype;
+}
+
+/*
+ * The number of leaf entries is limited by the size of the block and the amount
+ * of space used by the data entries. We don't know how much space is used by
+ * the data entries yet, so just ensure that the count falls somewhere inside
+ * the block right now.
+ */
+static inline unsigned int
+xfs_dir2_data_max_leaf_entries(
+ struct xfs_da_geometry *geo)
+{
+ return (geo->blksize - sizeof(struct xfs_dir2_block_tail) -
+ geo->data_entry_offset) /
+ sizeof(struct xfs_dir2_leaf_entry);
+}
+
/*
* Check the consistency of the data block.
* The input can also be a block-format directory.
@@ -38,40 +104,27 @@ __xfs_dir3_data_check(
xfs_dir2_block_tail_t *btp=NULL; /* block tail */
int count; /* count of entries found */
xfs_dir2_data_hdr_t *hdr; /* data block header */
- xfs_dir2_data_entry_t *dep; /* data entry */
xfs_dir2_data_free_t *dfp; /* bestfree entry */
- xfs_dir2_data_unused_t *dup; /* unused entry */
- char *endp; /* end of useful data */
int freeseen; /* mask of bestfrees seen */
xfs_dahash_t hash; /* hash of current name */
int i; /* leaf index */
int lastfree; /* last entry was unused */
xfs_dir2_leaf_entry_t *lep=NULL; /* block leaf entries */
struct xfs_mount *mp = bp->b_mount;
- char *p; /* current data position */
int stale; /* count of stale leaves */
struct xfs_name name;
- const struct xfs_dir_ops *ops;
- struct xfs_da_geometry *geo;
-
- geo = mp->m_dir_geo;
+ unsigned int offset;
+ unsigned int end;
+ struct xfs_da_geometry *geo = mp->m_dir_geo;
/*
- * We can be passed a null dp here from a verifier, so we need to go the
- * hard way to get them.
+ * If this isn't a directory, something is seriously wrong. Bail out.
*/
- ops = xfs_dir_get_ops(mp, dp);
-
- /*
- * If this isn't a directory, or we don't get handed the dir ops,
- * something is seriously wrong. Bail out.
- */
- if ((dp && !S_ISDIR(VFS_I(dp)->i_mode)) ||
- ops != xfs_dir_get_ops(mp, NULL))
+ if (dp && !S_ISDIR(VFS_I(dp)->i_mode))
return __this_address;
hdr = bp->b_addr;
- p = (char *)ops->data_entry_p(hdr);
+ offset = geo->data_entry_offset;
switch (hdr->magic) {
case cpu_to_be32(XFS_DIR3_BLOCK_MAGIC):
@@ -79,15 +132,8 @@ __xfs_dir3_data_check(
btp = xfs_dir2_block_tail_p(geo, hdr);
lep = xfs_dir2_block_leaf_p(btp);
- /*
- * The number of leaf entries is limited by the size of the
- * block and the amount of space used by the data entries.
- * We don't know how much space is used by the data entries yet,
- * so just ensure that the count falls somewhere inside the
- * block right now.
- */
if (be32_to_cpu(btp->count) >=
- ((char *)btp - p) / sizeof(struct xfs_dir2_leaf_entry))
+ xfs_dir2_data_max_leaf_entries(geo))
return __this_address;
break;
case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
@@ -96,14 +142,14 @@ __xfs_dir3_data_check(
default:
return __this_address;
}
- endp = xfs_dir3_data_endp(geo, hdr);
- if (!endp)
+ end = xfs_dir3_data_end_offset(geo, hdr);
+ if (!end)
return __this_address;
/*
* Account for zero bestfree entries.
*/
- bf = ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(mp, hdr);
count = lastfree = freeseen = 0;
if (!bf[0].length) {
if (bf[0].offset)
@@ -128,8 +174,10 @@ __xfs_dir3_data_check(
/*
* Loop over the data/unused entries.
*/
- while (p < endp) {
- dup = (xfs_dir2_data_unused_t *)p;
+ while (offset < end) {
+ struct xfs_dir2_data_unused *dup = bp->b_addr + offset;
+ struct xfs_dir2_data_entry *dep = bp->b_addr + offset;
+
/*
* If it's unused, look for the space in the bestfree table.
* If we find it, account for that, else make sure it
@@ -140,10 +188,10 @@ __xfs_dir3_data_check(
if (lastfree != 0)
return __this_address;
- if (endp < p + be16_to_cpu(dup->length))
+ if (offset + be16_to_cpu(dup->length) > end)
return __this_address;
if (be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) !=
- (char *)dup - (char *)hdr)
+ offset)
return __this_address;
fa = xfs_dir2_data_freefind_verify(hdr, bf, dup, &dfp);
if (fa)
@@ -158,7 +206,7 @@ __xfs_dir3_data_check(
be16_to_cpu(bf[2].length))
return __this_address;
}
- p += be16_to_cpu(dup->length);
+ offset += be16_to_cpu(dup->length);
lastfree = 1;
continue;
}
@@ -168,17 +216,15 @@ __xfs_dir3_data_check(
* in the leaf section of the block.
* The linear search is crude but this is DEBUG code.
*/
- dep = (xfs_dir2_data_entry_t *)p;
if (dep->namelen == 0)
return __this_address;
if (xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)))
return __this_address;
- if (endp < p + ops->data_entsize(dep->namelen))
+ if (offset + xfs_dir2_data_entsize(mp, dep->namelen) > end)
return __this_address;
- if (be16_to_cpu(*ops->data_entry_tag_p(dep)) !=
- (char *)dep - (char *)hdr)
+ if (be16_to_cpu(*xfs_dir2_data_entry_tag_p(mp, dep)) != offset)
return __this_address;
- if (ops->data_get_ftype(dep) >= XFS_DIR3_FT_MAX)
+ if (xfs_dir2_data_get_ftype(mp, dep) >= XFS_DIR3_FT_MAX)
return __this_address;
count++;
lastfree = 0;
@@ -189,7 +235,7 @@ __xfs_dir3_data_check(
((char *)dep - (char *)hdr));
name.name = dep->name;
name.len = dep->namelen;
- hash = mp->m_dirnameops->hashname(&name);
+ hash = xfs_dir2_hashname(mp, &name);
for (i = 0; i < be32_to_cpu(btp->count); i++) {
if (be32_to_cpu(lep[i].address) == addr &&
be32_to_cpu(lep[i].hashval) == hash)
@@ -198,7 +244,7 @@ __xfs_dir3_data_check(
if (i >= be32_to_cpu(btp->count))
return __this_address;
}
- p += ops->data_entsize(dep->namelen);
+ offset += xfs_dir2_data_entsize(mp, dep->namelen);
}
/*
* Need to have seen all the entries and all the bestfree slots.
@@ -354,13 +400,13 @@ xfs_dir3_data_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t bno,
- xfs_daddr_t mapped_bno,
+ unsigned int flags,
struct xfs_buf **bpp)
{
int err;
- err = xfs_da_read_buf(tp, dp, bno, mapped_bno, bpp,
- XFS_DATA_FORK, &xfs_dir3_data_buf_ops);
+ err = xfs_da_read_buf(tp, dp, bno, flags, bpp, XFS_DATA_FORK,
+ &xfs_dir3_data_buf_ops);
if (!err && tp && *bpp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_DATA_BUF);
return err;
@@ -370,10 +416,10 @@ int
xfs_dir3_data_readahead(
struct xfs_inode *dp,
xfs_dablk_t bno,
- xfs_daddr_t mapped_bno)
+ unsigned int flags)
{
- return xfs_da_reada_buf(dp, bno, mapped_bno,
- XFS_DATA_FORK, &xfs_dir3_data_reada_buf_ops);
+ return xfs_da_reada_buf(dp, bno, flags, XFS_DATA_FORK,
+ &xfs_dir3_data_reada_buf_ops);
}
/*
@@ -561,17 +607,16 @@ xfs_dir2_data_freeremove(
* Given a data block, reconstruct its bestfree map.
*/
void
-xfs_dir2_data_freescan_int(
- struct xfs_da_geometry *geo,
- const struct xfs_dir_ops *ops,
- struct xfs_dir2_data_hdr *hdr,
- int *loghead)
+xfs_dir2_data_freescan(
+ struct xfs_mount *mp,
+ struct xfs_dir2_data_hdr *hdr,
+ int *loghead)
{
- xfs_dir2_data_entry_t *dep; /* active data entry */
- xfs_dir2_data_unused_t *dup; /* unused data entry */
- struct xfs_dir2_data_free *bf;
- char *endp; /* end of block's data */
- char *p; /* current entry pointer */
+ struct xfs_da_geometry *geo = mp->m_dir_geo;
+ struct xfs_dir2_data_free *bf = xfs_dir2_data_bestfree_p(mp, hdr);
+ void *addr = hdr;
+ unsigned int offset = geo->data_entry_offset;
+ unsigned int end;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
@@ -581,79 +626,60 @@ xfs_dir2_data_freescan_int(
/*
* Start by clearing the table.
*/
- bf = ops->data_bestfree_p(hdr);
memset(bf, 0, sizeof(*bf) * XFS_DIR2_DATA_FD_COUNT);
*loghead = 1;
- /*
- * Set up pointers.
- */
- p = (char *)ops->data_entry_p(hdr);
- endp = xfs_dir3_data_endp(geo, hdr);
- /*
- * Loop over the block's entries.
- */
- while (p < endp) {
- dup = (xfs_dir2_data_unused_t *)p;
+
+ end = xfs_dir3_data_end_offset(geo, addr);
+ while (offset < end) {
+ struct xfs_dir2_data_unused *dup = addr + offset;
+ struct xfs_dir2_data_entry *dep = addr + offset;
+
/*
* If it's a free entry, insert it.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
- ASSERT((char *)dup - (char *)hdr ==
+ ASSERT(offset ==
be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)));
xfs_dir2_data_freeinsert(hdr, bf, dup, loghead);
- p += be16_to_cpu(dup->length);
+ offset += be16_to_cpu(dup->length);
+ continue;
}
+
/*
* For active entries, check their tags and skip them.
*/
- else {
- dep = (xfs_dir2_data_entry_t *)p;
- ASSERT((char *)dep - (char *)hdr ==
- be16_to_cpu(*ops->data_entry_tag_p(dep)));
- p += ops->data_entsize(dep->namelen);
- }
+ ASSERT(offset ==
+ be16_to_cpu(*xfs_dir2_data_entry_tag_p(mp, dep)));
+ offset += xfs_dir2_data_entsize(mp, dep->namelen);
}
}
-void
-xfs_dir2_data_freescan(
- struct xfs_inode *dp,
- struct xfs_dir2_data_hdr *hdr,
- int *loghead)
-{
- return xfs_dir2_data_freescan_int(dp->i_mount->m_dir_geo, dp->d_ops,
- hdr, loghead);
-}
-
/*
* Initialize a data block at the given block number in the directory.
* Give back the buffer for the created block.
*/
int /* error */
xfs_dir3_data_init(
- xfs_da_args_t *args, /* directory operation args */
- xfs_dir2_db_t blkno, /* logical dir block number */
- struct xfs_buf **bpp) /* output block buffer */
+ struct xfs_da_args *args, /* directory operation args */
+ xfs_dir2_db_t blkno, /* logical dir block number */
+ struct xfs_buf **bpp) /* output block buffer */
{
- struct xfs_buf *bp; /* block buffer */
- xfs_dir2_data_hdr_t *hdr; /* data block header */
- xfs_inode_t *dp; /* incore directory inode */
- xfs_dir2_data_unused_t *dup; /* unused entry pointer */
- struct xfs_dir2_data_free *bf;
- int error; /* error return value */
- int i; /* bestfree index */
- xfs_mount_t *mp; /* filesystem mount point */
- xfs_trans_t *tp; /* transaction pointer */
- int t; /* temp */
-
- dp = args->dp;
- mp = dp->i_mount;
- tp = args->trans;
+ struct xfs_trans *tp = args->trans;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
+ struct xfs_da_geometry *geo = args->geo;
+ struct xfs_buf *bp;
+ struct xfs_dir2_data_hdr *hdr;
+ struct xfs_dir2_data_unused *dup;
+ struct xfs_dir2_data_free *bf;
+ int error;
+ int i;
+
/*
* Get the buffer set up for the block.
*/
error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(args->geo, blkno),
- -1, &bp, XFS_DATA_FORK);
+ &bp, XFS_DATA_FORK);
if (error)
return error;
bp->b_ops = &xfs_dir3_data_buf_ops;
@@ -675,8 +701,9 @@ xfs_dir3_data_init(
} else
hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
- bf = dp->d_ops->data_bestfree_p(hdr);
- bf[0].offset = cpu_to_be16(dp->d_ops->data_entry_offset);
+ bf = xfs_dir2_data_bestfree_p(mp, hdr);
+ bf[0].offset = cpu_to_be16(geo->data_entry_offset);
+ bf[0].length = cpu_to_be16(geo->blksize - geo->data_entry_offset);
for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) {
bf[i].length = 0;
bf[i].offset = 0;
@@ -685,13 +712,11 @@ xfs_dir3_data_init(
/*
* Set up an unused entry for the block's body.
*/
- dup = dp->d_ops->data_unused_p(hdr);
+ dup = bp->b_addr + geo->data_entry_offset;
dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
-
- t = args->geo->blksize - (uint)dp->d_ops->data_entry_offset;
- bf[0].length = cpu_to_be16(t);
- dup->length = cpu_to_be16(t);
+ dup->length = bf[0].length;
*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16((char *)dup - (char *)hdr);
+
/*
* Log it and return it.
*/
@@ -710,6 +735,7 @@ xfs_dir2_data_log_entry(
struct xfs_buf *bp,
xfs_dir2_data_entry_t *dep) /* data entry pointer */
{
+ struct xfs_mount *mp = bp->b_mount;
struct xfs_dir2_data_hdr *hdr = bp->b_addr;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
@@ -718,7 +744,7 @@ xfs_dir2_data_log_entry(
hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC));
xfs_trans_log_buf(args->trans, bp, (uint)((char *)dep - (char *)hdr),
- (uint)((char *)(args->dp->d_ops->data_entry_tag_p(dep) + 1) -
+ (uint)((char *)(xfs_dir2_data_entry_tag_p(mp, dep) + 1) -
(char *)hdr - 1));
}
@@ -739,8 +765,7 @@ xfs_dir2_data_log_header(
hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC));
#endif
- xfs_trans_log_buf(args->trans, bp, 0,
- args->dp->d_ops->data_entry_offset - 1);
+ xfs_trans_log_buf(args->trans, bp, 0, args->geo->data_entry_offset - 1);
}
/*
@@ -789,11 +814,11 @@ xfs_dir2_data_make_free(
{
xfs_dir2_data_hdr_t *hdr; /* data block pointer */
xfs_dir2_data_free_t *dfp; /* bestfree pointer */
- char *endptr; /* end of data area */
int needscan; /* need to regen bestfree */
xfs_dir2_data_unused_t *newdup; /* new unused entry */
xfs_dir2_data_unused_t *postdup; /* unused entry after us */
xfs_dir2_data_unused_t *prevdup; /* unused entry before us */
+ unsigned int end;
struct xfs_dir2_data_free *bf;
hdr = bp->b_addr;
@@ -801,14 +826,14 @@ xfs_dir2_data_make_free(
/*
* Figure out where the end of the data area is.
*/
- endptr = xfs_dir3_data_endp(args->geo, hdr);
- ASSERT(endptr != NULL);
+ end = xfs_dir3_data_end_offset(args->geo, hdr);
+ ASSERT(end != 0);
/*
* If this isn't the start of the block, then back up to
* the previous entry and see if it's free.
*/
- if (offset > args->dp->d_ops->data_entry_offset) {
+ if (offset > args->geo->data_entry_offset) {
__be16 *tagp; /* tag just before us */
tagp = (__be16 *)((char *)hdr + offset) - 1;
@@ -821,7 +846,7 @@ xfs_dir2_data_make_free(
* If this isn't the end of the block, see if the entry after
* us is free.
*/
- if ((char *)hdr + offset + len < endptr) {
+ if (offset + len < end) {
postdup =
(xfs_dir2_data_unused_t *)((char *)hdr + offset + len);
if (be16_to_cpu(postdup->freetag) != XFS_DIR2_DATA_FREE_TAG)
@@ -834,7 +859,7 @@ xfs_dir2_data_make_free(
* Previous and following entries are both free,
* merge everything into a single free entry.
*/
- bf = args->dp->d_ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(args->dp->i_mount, hdr);
if (prevdup && postdup) {
xfs_dir2_data_free_t *dfp2; /* another bestfree pointer */
@@ -1025,7 +1050,7 @@ xfs_dir2_data_use_free(
* Look up the entry in the bestfree table.
*/
oldlen = be16_to_cpu(dup->length);
- bf = args->dp->d_ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(args->dp->i_mount, hdr);
dfp = xfs_dir2_data_freefind(hdr, bf, dup);
ASSERT(dfp || oldlen <= be16_to_cpu(bf[2].length));
/*
@@ -1149,19 +1174,22 @@ corrupt:
}
/* Find the end of the entry data in a data/block format dir block. */
-void *
-xfs_dir3_data_endp(
+unsigned int
+xfs_dir3_data_end_offset(
struct xfs_da_geometry *geo,
struct xfs_dir2_data_hdr *hdr)
{
+ void *p;
+
switch (hdr->magic) {
case cpu_to_be32(XFS_DIR3_BLOCK_MAGIC):
case cpu_to_be32(XFS_DIR2_BLOCK_MAGIC):
- return xfs_dir2_block_leaf_p(xfs_dir2_block_tail_p(geo, hdr));
+ p = xfs_dir2_block_leaf_p(xfs_dir2_block_tail_p(geo, hdr));
+ return p - (void *)hdr;
case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
case cpu_to_be32(XFS_DIR2_DATA_MAGIC):
- return (char *)hdr + geo->blksize;
+ return geo->blksize;
default:
- return NULL;
+ return 0;
}
}
diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
index a53e4585a2f3..a131b520aac7 100644
--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
+++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
@@ -24,12 +24,73 @@
* Local function declarations.
*/
static int xfs_dir2_leaf_lookup_int(xfs_da_args_t *args, struct xfs_buf **lbpp,
- int *indexp, struct xfs_buf **dbpp);
+ int *indexp, struct xfs_buf **dbpp,
+ struct xfs_dir3_icleaf_hdr *leafhdr);
static void xfs_dir3_leaf_log_bests(struct xfs_da_args *args,
struct xfs_buf *bp, int first, int last);
static void xfs_dir3_leaf_log_tail(struct xfs_da_args *args,
struct xfs_buf *bp);
+void
+xfs_dir2_leaf_hdr_from_disk(
+ struct xfs_mount *mp,
+ struct xfs_dir3_icleaf_hdr *to,
+ struct xfs_dir2_leaf *from)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_dir3_leaf *from3 = (struct xfs_dir3_leaf *)from;
+
+ to->forw = be32_to_cpu(from3->hdr.info.hdr.forw);
+ to->back = be32_to_cpu(from3->hdr.info.hdr.back);
+ to->magic = be16_to_cpu(from3->hdr.info.hdr.magic);
+ to->count = be16_to_cpu(from3->hdr.count);
+ to->stale = be16_to_cpu(from3->hdr.stale);
+ to->ents = from3->__ents;
+
+ ASSERT(to->magic == XFS_DIR3_LEAF1_MAGIC ||
+ to->magic == XFS_DIR3_LEAFN_MAGIC);
+ } else {
+ to->forw = be32_to_cpu(from->hdr.info.forw);
+ to->back = be32_to_cpu(from->hdr.info.back);
+ to->magic = be16_to_cpu(from->hdr.info.magic);
+ to->count = be16_to_cpu(from->hdr.count);
+ to->stale = be16_to_cpu(from->hdr.stale);
+ to->ents = from->__ents;
+
+ ASSERT(to->magic == XFS_DIR2_LEAF1_MAGIC ||
+ to->magic == XFS_DIR2_LEAFN_MAGIC);
+ }
+}
+
+void
+xfs_dir2_leaf_hdr_to_disk(
+ struct xfs_mount *mp,
+ struct xfs_dir2_leaf *to,
+ struct xfs_dir3_icleaf_hdr *from)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_dir3_leaf *to3 = (struct xfs_dir3_leaf *)to;
+
+ ASSERT(from->magic == XFS_DIR3_LEAF1_MAGIC ||
+ from->magic == XFS_DIR3_LEAFN_MAGIC);
+
+ to3->hdr.info.hdr.forw = cpu_to_be32(from->forw);
+ to3->hdr.info.hdr.back = cpu_to_be32(from->back);
+ to3->hdr.info.hdr.magic = cpu_to_be16(from->magic);
+ to3->hdr.count = cpu_to_be16(from->count);
+ to3->hdr.stale = cpu_to_be16(from->stale);
+ } else {
+ ASSERT(from->magic == XFS_DIR2_LEAF1_MAGIC ||
+ from->magic == XFS_DIR2_LEAFN_MAGIC);
+
+ to->hdr.info.forw = cpu_to_be32(from->forw);
+ to->hdr.info.back = cpu_to_be32(from->back);
+ to->hdr.info.magic = cpu_to_be16(from->magic);
+ to->hdr.count = cpu_to_be16(from->count);
+ to->hdr.stale = cpu_to_be16(from->stale);
+ }
+}
+
/*
* Check the internal consistency of a leaf1 block.
* Pop an assert if something is wrong.
@@ -43,7 +104,7 @@ xfs_dir3_leaf1_check(
struct xfs_dir2_leaf *leaf = bp->b_addr;
struct xfs_dir3_icleaf_hdr leafhdr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
if (leafhdr.magic == XFS_DIR3_LEAF1_MAGIC) {
struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
@@ -52,7 +113,7 @@ xfs_dir3_leaf1_check(
} else if (leafhdr.magic != XFS_DIR2_LEAF1_MAGIC)
return __this_address;
- return xfs_dir3_leaf_check_int(dp->i_mount, dp, &leafhdr, leaf);
+ return xfs_dir3_leaf_check_int(dp->i_mount, &leafhdr, leaf);
}
static inline void
@@ -76,31 +137,15 @@ xfs_dir3_leaf_check(
xfs_failaddr_t
xfs_dir3_leaf_check_int(
- struct xfs_mount *mp,
- struct xfs_inode *dp,
- struct xfs_dir3_icleaf_hdr *hdr,
- struct xfs_dir2_leaf *leaf)
+ struct xfs_mount *mp,
+ struct xfs_dir3_icleaf_hdr *hdr,
+ struct xfs_dir2_leaf *leaf)
{
- struct xfs_dir2_leaf_entry *ents;
- xfs_dir2_leaf_tail_t *ltp;
- int stale;
- int i;
- const struct xfs_dir_ops *ops;
- struct xfs_dir3_icleaf_hdr leafhdr;
- struct xfs_da_geometry *geo = mp->m_dir_geo;
-
- /*
- * we can be passed a null dp here from a verifier, so we need to go the
- * hard way to get them.
- */
- ops = xfs_dir_get_ops(mp, dp);
+ struct xfs_da_geometry *geo = mp->m_dir_geo;
+ xfs_dir2_leaf_tail_t *ltp;
+ int stale;
+ int i;
- if (!hdr) {
- ops->leaf_hdr_from_disk(&leafhdr, leaf);
- hdr = &leafhdr;
- }
-
- ents = ops->leaf_ents_p(leaf);
ltp = xfs_dir2_leaf_tail_p(geo, leaf);
/*
@@ -108,23 +153,23 @@ xfs_dir3_leaf_check_int(
* Should factor in the size of the bests table as well.
* We can deduce a value for that from di_size.
*/
- if (hdr->count > ops->leaf_max_ents(geo))
+ if (hdr->count > geo->leaf_max_ents)
return __this_address;
/* Leaves and bests don't overlap in leaf format. */
if ((hdr->magic == XFS_DIR2_LEAF1_MAGIC ||
hdr->magic == XFS_DIR3_LEAF1_MAGIC) &&
- (char *)&ents[hdr->count] > (char *)xfs_dir2_leaf_bests_p(ltp))
+ (char *)&hdr->ents[hdr->count] > (char *)xfs_dir2_leaf_bests_p(ltp))
return __this_address;
/* Check hash value order, count stale entries. */
for (i = stale = 0; i < hdr->count; i++) {
if (i + 1 < hdr->count) {
- if (be32_to_cpu(ents[i].hashval) >
- be32_to_cpu(ents[i + 1].hashval))
+ if (be32_to_cpu(hdr->ents[i].hashval) >
+ be32_to_cpu(hdr->ents[i + 1].hashval))
return __this_address;
}
- if (ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
+ if (hdr->ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
}
if (hdr->stale != stale)
@@ -139,17 +184,18 @@ xfs_dir3_leaf_check_int(
*/
static xfs_failaddr_t
xfs_dir3_leaf_verify(
- struct xfs_buf *bp)
+ struct xfs_buf *bp)
{
- struct xfs_mount *mp = bp->b_mount;
- struct xfs_dir2_leaf *leaf = bp->b_addr;
- xfs_failaddr_t fa;
+ struct xfs_mount *mp = bp->b_mount;
+ struct xfs_dir3_icleaf_hdr leafhdr;
+ xfs_failaddr_t fa;
fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
if (fa)
return fa;
- return xfs_dir3_leaf_check_int(mp, NULL, NULL, leaf);
+ xfs_dir2_leaf_hdr_from_disk(mp, &leafhdr, bp->b_addr);
+ return xfs_dir3_leaf_check_int(mp, &leafhdr, bp->b_addr);
}
static void
@@ -216,13 +262,12 @@ xfs_dir3_leaf_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t fbno,
- xfs_daddr_t mappedbno,
struct xfs_buf **bpp)
{
int err;
- err = xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
- XFS_DATA_FORK, &xfs_dir3_leaf1_buf_ops);
+ err = xfs_da_read_buf(tp, dp, fbno, 0, bpp, XFS_DATA_FORK,
+ &xfs_dir3_leaf1_buf_ops);
if (!err && tp && *bpp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_LEAF1_BUF);
return err;
@@ -233,13 +278,12 @@ xfs_dir3_leafn_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t fbno,
- xfs_daddr_t mappedbno,
struct xfs_buf **bpp)
{
int err;
- err = xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
- XFS_DATA_FORK, &xfs_dir3_leafn_buf_ops);
+ err = xfs_da_read_buf(tp, dp, fbno, 0, bpp, XFS_DATA_FORK,
+ &xfs_dir3_leafn_buf_ops);
if (!err && tp && *bpp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_LEAFN_BUF);
return err;
@@ -311,7 +355,7 @@ xfs_dir3_leaf_get_buf(
bno < xfs_dir2_byte_to_db(args->geo, XFS_DIR2_FREE_OFFSET));
error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(args->geo, bno),
- -1, &bp, XFS_DATA_FORK);
+ &bp, XFS_DATA_FORK);
if (error)
return error;
@@ -346,7 +390,6 @@ xfs_dir2_block_to_leaf(
int needscan; /* need to rescan bestfree */
xfs_trans_t *tp; /* transaction pointer */
struct xfs_dir2_data_free *bf;
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
trace_xfs_dir2_block_to_leaf(args);
@@ -375,24 +418,24 @@ xfs_dir2_block_to_leaf(
xfs_dir3_data_check(dp, dbp);
btp = xfs_dir2_block_tail_p(args->geo, hdr);
blp = xfs_dir2_block_leaf_p(btp);
- bf = dp->d_ops->data_bestfree_p(hdr);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ bf = xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
/*
* Set the counts in the leaf header.
*/
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
leafhdr.count = be32_to_cpu(btp->count);
leafhdr.stale = be32_to_cpu(btp->stale);
- dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf, &leafhdr);
xfs_dir3_leaf_log_header(args, lbp);
/*
* Could compact these but I think we always do the conversion
* after squeezing out stale entries.
*/
- memcpy(ents, blp, be32_to_cpu(btp->count) * sizeof(xfs_dir2_leaf_entry_t));
- xfs_dir3_leaf_log_ents(args, lbp, 0, leafhdr.count - 1);
+ memcpy(leafhdr.ents, blp,
+ be32_to_cpu(btp->count) * sizeof(struct xfs_dir2_leaf_entry));
+ xfs_dir3_leaf_log_ents(args, &leafhdr, lbp, 0, leafhdr.count - 1);
needscan = 0;
needlog = 1;
/*
@@ -415,7 +458,7 @@ xfs_dir2_block_to_leaf(
hdr->magic = cpu_to_be32(XFS_DIR3_DATA_MAGIC);
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
/*
* Set up leaf tail and bests table.
*/
@@ -594,7 +637,7 @@ xfs_dir2_leaf_addname(
trace_xfs_dir2_leaf_addname(args);
- error = xfs_dir3_leaf_read(tp, dp, args->geo->leafblk, -1, &lbp);
+ error = xfs_dir3_leaf_read(tp, dp, args->geo->leafblk, &lbp);
if (error)
return error;
@@ -607,10 +650,10 @@ xfs_dir2_leaf_addname(
index = xfs_dir2_leaf_search_hash(args, lbp);
leaf = lbp->b_addr;
ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
+ ents = leafhdr.ents;
bestsp = xfs_dir2_leaf_bests_p(ltp);
- length = dp->d_ops->data_entsize(args->namelen);
+ length = xfs_dir2_data_entsize(dp->i_mount, args->namelen);
/*
* See if there are any entries with the same hash value
@@ -773,7 +816,7 @@ xfs_dir2_leaf_addname(
else
xfs_dir3_leaf_log_bests(args, lbp, use_block, use_block);
hdr = dbp->b_addr;
- bf = dp->d_ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
bestsp[use_block] = bf[0].length;
grown = 1;
} else {
@@ -783,13 +826,13 @@ xfs_dir2_leaf_addname(
*/
error = xfs_dir3_data_read(tp, dp,
xfs_dir2_db_to_da(args->geo, use_block),
- -1, &dbp);
+ 0, &dbp);
if (error) {
xfs_trans_brelse(tp, lbp);
return error;
}
hdr = dbp->b_addr;
- bf = dp->d_ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
grown = 0;
}
/*
@@ -815,14 +858,14 @@ xfs_dir2_leaf_addname(
dep->inumber = cpu_to_be64(args->inumber);
dep->namelen = args->namelen;
memcpy(dep->name, args->name, dep->namelen);
- dp->d_ops->data_put_ftype(dep, args->filetype);
- tagp = dp->d_ops->data_entry_tag_p(dep);
+ xfs_dir2_data_put_ftype(dp->i_mount, dep, args->filetype);
+ tagp = xfs_dir2_data_entry_tag_p(dp->i_mount, dep);
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
/*
* Need to scan fix up the bestfree table.
*/
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
/*
* Need to log the data block's header.
*/
@@ -852,9 +895,9 @@ xfs_dir2_leaf_addname(
/*
* Log the leaf fields and give up the buffers.
*/
- dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf, &leafhdr);
xfs_dir3_leaf_log_header(args, lbp);
- xfs_dir3_leaf_log_ents(args, lbp, lfloglow, lfloghigh);
+ xfs_dir3_leaf_log_ents(args, &leafhdr, lbp, lfloglow, lfloghigh);
xfs_dir3_leaf_check(dp, lbp);
xfs_dir3_data_check(dp, dbp);
return 0;
@@ -874,7 +917,6 @@ xfs_dir3_leaf_compact(
xfs_dir2_leaf_t *leaf; /* leaf structure */
int loglow; /* first leaf entry to log */
int to; /* target leaf index */
- struct xfs_dir2_leaf_entry *ents;
struct xfs_inode *dp = args->dp;
leaf = bp->b_addr;
@@ -884,9 +926,9 @@ xfs_dir3_leaf_compact(
/*
* Compress out the stale entries in place.
*/
- ents = dp->d_ops->leaf_ents_p(leaf);
for (from = to = 0, loglow = -1; from < leafhdr->count; from++) {
- if (ents[from].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
+ if (leafhdr->ents[from].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
continue;
/*
* Only actually copy the entries that are different.
@@ -894,7 +936,7 @@ xfs_dir3_leaf_compact(
if (from > to) {
if (loglow == -1)
loglow = to;
- ents[to] = ents[from];
+ leafhdr->ents[to] = leafhdr->ents[from];
}
to++;
}
@@ -905,10 +947,10 @@ xfs_dir3_leaf_compact(
leafhdr->count -= leafhdr->stale;
leafhdr->stale = 0;
- dp->d_ops->leaf_hdr_to_disk(leaf, leafhdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf, leafhdr);
xfs_dir3_leaf_log_header(args, bp);
if (loglow != -1)
- xfs_dir3_leaf_log_ents(args, bp, loglow, to - 1);
+ xfs_dir3_leaf_log_ents(args, leafhdr, bp, loglow, to - 1);
}
/*
@@ -1037,6 +1079,7 @@ xfs_dir3_leaf_log_bests(
void
xfs_dir3_leaf_log_ents(
struct xfs_da_args *args,
+ struct xfs_dir3_icleaf_hdr *hdr,
struct xfs_buf *bp,
int first,
int last)
@@ -1044,16 +1087,14 @@ xfs_dir3_leaf_log_ents(
xfs_dir2_leaf_entry_t *firstlep; /* pointer to first entry */
xfs_dir2_leaf_entry_t *lastlep; /* pointer to last entry */
struct xfs_dir2_leaf *leaf = bp->b_addr;
- struct xfs_dir2_leaf_entry *ents;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) ||
leaf->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAF1_MAGIC) ||
leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
leaf->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC));
- ents = args->dp->d_ops->leaf_ents_p(leaf);
- firstlep = &ents[first];
- lastlep = &ents[last];
+ firstlep = &hdr->ents[first];
+ lastlep = &hdr->ents[last];
xfs_trans_log_buf(args->trans, bp,
(uint)((char *)firstlep - (char *)leaf),
(uint)((char *)lastlep - (char *)leaf + sizeof(*lastlep) - 1));
@@ -1076,7 +1117,7 @@ xfs_dir3_leaf_log_header(
xfs_trans_log_buf(args->trans, bp,
(uint)((char *)&leaf->hdr - (char *)leaf),
- args->dp->d_ops->leaf_hdr_size - 1);
+ args->geo->leaf_hdr_size - 1);
}
/*
@@ -1115,28 +1156,27 @@ xfs_dir2_leaf_lookup(
int error; /* error return code */
int index; /* found entry index */
struct xfs_buf *lbp; /* leaf buffer */
- xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_trans_t *tp; /* transaction pointer */
- struct xfs_dir2_leaf_entry *ents;
+ struct xfs_dir3_icleaf_hdr leafhdr;
trace_xfs_dir2_leaf_lookup(args);
/*
* Look up name in the leaf block, returning both buffers and index.
*/
- if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) {
+ error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp, &leafhdr);
+ if (error)
return error;
- }
+
tp = args->trans;
dp = args->dp;
xfs_dir3_leaf_check(dp, lbp);
- leaf = lbp->b_addr;
- ents = dp->d_ops->leaf_ents_p(leaf);
+
/*
* Get to the leaf entry and contained data entry address.
*/
- lep = &ents[index];
+ lep = &leafhdr.ents[index];
/*
* Point to the data entry.
@@ -1148,7 +1188,7 @@ xfs_dir2_leaf_lookup(
* Return the found inode number & CI name if appropriate
*/
args->inumber = be64_to_cpu(dep->inumber);
- args->filetype = dp->d_ops->data_get_ftype(dep);
+ args->filetype = xfs_dir2_data_get_ftype(dp->i_mount, dep);
error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
xfs_trans_brelse(tp, dbp);
xfs_trans_brelse(tp, lbp);
@@ -1166,7 +1206,8 @@ xfs_dir2_leaf_lookup_int(
xfs_da_args_t *args, /* operation arguments */
struct xfs_buf **lbpp, /* out: leaf buffer */
int *indexp, /* out: index in leaf block */
- struct xfs_buf **dbpp) /* out: data buffer */
+ struct xfs_buf **dbpp, /* out: data buffer */
+ struct xfs_dir3_icleaf_hdr *leafhdr)
{
xfs_dir2_db_t curdb = -1; /* current data block number */
struct xfs_buf *dbp = NULL; /* data buffer */
@@ -1182,22 +1223,19 @@ xfs_dir2_leaf_lookup_int(
xfs_trans_t *tp; /* transaction pointer */
xfs_dir2_db_t cidb = -1; /* case match data block no. */
enum xfs_dacmp cmp; /* name compare result */
- struct xfs_dir2_leaf_entry *ents;
- struct xfs_dir3_icleaf_hdr leafhdr;
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
- error = xfs_dir3_leaf_read(tp, dp, args->geo->leafblk, -1, &lbp);
+ error = xfs_dir3_leaf_read(tp, dp, args->geo->leafblk, &lbp);
if (error)
return error;
*lbpp = lbp;
leaf = lbp->b_addr;
xfs_dir3_leaf_check(dp, lbp);
- ents = dp->d_ops->leaf_ents_p(leaf);
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(mp, leafhdr, leaf);
/*
* Look for the first leaf entry with our hash value.
@@ -1207,8 +1245,9 @@ xfs_dir2_leaf_lookup_int(
* Loop over all the entries with the right hash value
* looking to match the name.
*/
- for (lep = &ents[index];
- index < leafhdr.count && be32_to_cpu(lep->hashval) == args->hashval;
+ for (lep = &leafhdr->ents[index];
+ index < leafhdr->count &&
+ be32_to_cpu(lep->hashval) == args->hashval;
lep++, index++) {
/*
* Skip over stale leaf entries.
@@ -1229,7 +1268,7 @@ xfs_dir2_leaf_lookup_int(
xfs_trans_brelse(tp, dbp);
error = xfs_dir3_data_read(tp, dp,
xfs_dir2_db_to_da(args->geo, newdb),
- -1, &dbp);
+ 0, &dbp);
if (error) {
xfs_trans_brelse(tp, lbp);
return error;
@@ -1247,7 +1286,7 @@ xfs_dir2_leaf_lookup_int(
* and buffer. If it's the first case-insensitive match, store
* the index and buffer and continue looking for an exact match.
*/
- cmp = mp->m_dirnameops->compname(args, dep->name, dep->namelen);
+ cmp = xfs_dir2_compname(args, dep->name, dep->namelen);
if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
args->cmpresult = cmp;
*indexp = index;
@@ -1271,7 +1310,7 @@ xfs_dir2_leaf_lookup_int(
xfs_trans_brelse(tp, dbp);
error = xfs_dir3_data_read(tp, dp,
xfs_dir2_db_to_da(args->geo, cidb),
- -1, &dbp);
+ 0, &dbp);
if (error) {
xfs_trans_brelse(tp, lbp);
return error;
@@ -1297,6 +1336,7 @@ int /* error */
xfs_dir2_leaf_removename(
xfs_da_args_t *args) /* operation arguments */
{
+ struct xfs_da_geometry *geo = args->geo;
__be16 *bestsp; /* leaf block best freespace */
xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_db_t db; /* data block number */
@@ -1314,7 +1354,6 @@ xfs_dir2_leaf_removename(
int needscan; /* need to rescan data frees */
xfs_dir2_data_off_t oldbest; /* old value of best free */
struct xfs_dir2_data_free *bf; /* bestfree table */
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
trace_xfs_dir2_leaf_removename(args);
@@ -1322,51 +1361,54 @@ xfs_dir2_leaf_removename(
/*
* Lookup the leaf entry, get the leaf and data blocks read in.
*/
- if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) {
+ error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp, &leafhdr);
+ if (error)
return error;
- }
+
dp = args->dp;
leaf = lbp->b_addr;
hdr = dbp->b_addr;
xfs_dir3_data_check(dp, dbp);
- bf = dp->d_ops->data_bestfree_p(hdr);
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ bf = xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
+
/*
* Point to the leaf entry, use that to point to the data entry.
*/
- lep = &ents[index];
- db = xfs_dir2_dataptr_to_db(args->geo, be32_to_cpu(lep->address));
+ lep = &leafhdr.ents[index];
+ db = xfs_dir2_dataptr_to_db(geo, be32_to_cpu(lep->address));
dep = (xfs_dir2_data_entry_t *)((char *)hdr +
- xfs_dir2_dataptr_to_off(args->geo, be32_to_cpu(lep->address)));
+ xfs_dir2_dataptr_to_off(geo, be32_to_cpu(lep->address)));
needscan = needlog = 0;
oldbest = be16_to_cpu(bf[0].length);
- ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
+ ltp = xfs_dir2_leaf_tail_p(geo, leaf);
bestsp = xfs_dir2_leaf_bests_p(ltp);
- if (be16_to_cpu(bestsp[db]) != oldbest)
+ if (be16_to_cpu(bestsp[db]) != oldbest) {
+ xfs_buf_corruption_error(lbp);
return -EFSCORRUPTED;
+ }
/*
* Mark the former data entry unused.
*/
xfs_dir2_data_make_free(args, dbp,
(xfs_dir2_data_aoff_t)((char *)dep - (char *)hdr),
- dp->d_ops->data_entsize(dep->namelen), &needlog, &needscan);
+ xfs_dir2_data_entsize(dp->i_mount, dep->namelen), &needlog,
+ &needscan);
/*
* We just mark the leaf entry stale by putting a null in it.
*/
leafhdr.stale++;
- dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf, &leafhdr);
xfs_dir3_leaf_log_header(args, lbp);
lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
- xfs_dir3_leaf_log_ents(args, lbp, index, index);
+ xfs_dir3_leaf_log_ents(args, &leafhdr, lbp, index, index);
/*
* Scan the freespace in the data block again if necessary,
* log the data block header if necessary.
*/
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(args, dbp);
/*
@@ -1382,8 +1424,8 @@ xfs_dir2_leaf_removename(
* If the data block is now empty then get rid of the data block.
*/
if (be16_to_cpu(bf[0].length) ==
- args->geo->blksize - dp->d_ops->data_entry_offset) {
- ASSERT(db != args->geo->datablk);
+ geo->blksize - geo->data_entry_offset) {
+ ASSERT(db != geo->datablk);
if ((error = xfs_dir2_shrink_inode(args, db, dbp))) {
/*
* Nope, can't get rid of it because it caused
@@ -1425,7 +1467,7 @@ xfs_dir2_leaf_removename(
/*
* If the data block was not the first one, drop it.
*/
- else if (db != args->geo->datablk)
+ else if (db != geo->datablk)
dbp = NULL;
xfs_dir3_leaf_check(dp, lbp);
@@ -1448,26 +1490,24 @@ xfs_dir2_leaf_replace(
int error; /* error return code */
int index; /* index of leaf entry */
struct xfs_buf *lbp; /* leaf buffer */
- xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_trans_t *tp; /* transaction pointer */
- struct xfs_dir2_leaf_entry *ents;
+ struct xfs_dir3_icleaf_hdr leafhdr;
trace_xfs_dir2_leaf_replace(args);
/*
* Look up the entry.
*/
- if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) {
+ error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp, &leafhdr);
+ if (error)
return error;
- }
+
dp = args->dp;
- leaf = lbp->b_addr;
- ents = dp->d_ops->leaf_ents_p(leaf);
/*
* Point to the leaf entry, get data address from it.
*/
- lep = &ents[index];
+ lep = &leafhdr.ents[index];
/*
* Point to the data entry.
*/
@@ -1479,7 +1519,7 @@ xfs_dir2_leaf_replace(
* Put the new inode number in, log it.
*/
dep->inumber = cpu_to_be64(args->inumber);
- dp->d_ops->data_put_ftype(dep, args->filetype);
+ xfs_dir2_data_put_ftype(dp->i_mount, dep, args->filetype);
tp = args->trans;
xfs_dir2_data_log_entry(args, dbp, dep);
xfs_dir3_leaf_check(dp, lbp);
@@ -1501,21 +1541,17 @@ xfs_dir2_leaf_search_hash(
xfs_dahash_t hashwant; /* hash value looking for */
int high; /* high leaf index */
int low; /* low leaf index */
- xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
int mid=0; /* current leaf index */
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
- leaf = lbp->b_addr;
- ents = args->dp->d_ops->leaf_ents_p(leaf);
- args->dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(args->dp->i_mount, &leafhdr, lbp->b_addr);
/*
* Note, the table cannot be empty, so we have to go through the loop.
* Binary search the leaf entries looking for our hash value.
*/
- for (lep = ents, low = 0, high = leafhdr.count - 1,
+ for (lep = leafhdr.ents, low = 0, high = leafhdr.count - 1,
hashwant = args->hashval;
low <= high; ) {
mid = (low + high) >> 1;
@@ -1552,6 +1588,7 @@ xfs_dir2_leaf_trim_data(
struct xfs_buf *lbp, /* leaf buffer */
xfs_dir2_db_t db) /* data block number */
{
+ struct xfs_da_geometry *geo = args->geo;
__be16 *bestsp; /* leaf bests table */
struct xfs_buf *dbp; /* data block buffer */
xfs_inode_t *dp; /* incore directory inode */
@@ -1565,23 +1602,23 @@ xfs_dir2_leaf_trim_data(
/*
* Read the offending data block. We need its buffer.
*/
- error = xfs_dir3_data_read(tp, dp, xfs_dir2_db_to_da(args->geo, db),
- -1, &dbp);
+ error = xfs_dir3_data_read(tp, dp, xfs_dir2_db_to_da(geo, db), 0, &dbp);
if (error)
return error;
leaf = lbp->b_addr;
- ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
+ ltp = xfs_dir2_leaf_tail_p(geo, leaf);
#ifdef DEBUG
{
struct xfs_dir2_data_hdr *hdr = dbp->b_addr;
- struct xfs_dir2_data_free *bf = dp->d_ops->data_bestfree_p(hdr);
+ struct xfs_dir2_data_free *bf =
+ xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC));
ASSERT(be16_to_cpu(bf[0].length) ==
- args->geo->blksize - dp->d_ops->data_entry_offset);
+ geo->blksize - geo->data_entry_offset);
ASSERT(db == be32_to_cpu(ltp->bestcount) - 1);
}
#endif
@@ -1639,7 +1676,6 @@ xfs_dir2_node_to_leaf(
int error; /* error return code */
struct xfs_buf *fbp; /* buffer for freespace block */
xfs_fileoff_t fo; /* freespace file offset */
- xfs_dir2_free_t *free; /* freespace structure */
struct xfs_buf *lbp; /* buffer for leaf block */
xfs_dir2_leaf_tail_t *ltp; /* tail of leaf structure */
xfs_dir2_leaf_t *leaf; /* leaf structure */
@@ -1697,7 +1733,7 @@ xfs_dir2_node_to_leaf(
return 0;
lbp = state->path.blk[0].bp;
leaf = lbp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(mp, &leafhdr, leaf);
ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
@@ -1708,8 +1744,7 @@ xfs_dir2_node_to_leaf(
error = xfs_dir2_free_read(tp, dp, args->geo->freeblk, &fbp);
if (error)
return error;
- free = fbp->b_addr;
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
+ xfs_dir2_free_hdr_from_disk(mp, &freehdr, fbp->b_addr);
ASSERT(!freehdr.firstdb);
@@ -1743,10 +1778,10 @@ xfs_dir2_node_to_leaf(
/*
* Set up the leaf bests table.
*/
- memcpy(xfs_dir2_leaf_bests_p(ltp), dp->d_ops->free_bests_p(free),
+ memcpy(xfs_dir2_leaf_bests_p(ltp), freehdr.bests,
freehdr.nvalid * sizeof(xfs_dir2_data_off_t));
- dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir2_leaf_hdr_to_disk(mp, leaf, &leafhdr);
xfs_dir3_leaf_log_header(args, lbp);
xfs_dir3_leaf_log_bests(args, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
xfs_dir3_leaf_log_tail(args, lbp);
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index afcc6642690a..a0cc5e240306 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -32,8 +32,25 @@ static void xfs_dir2_leafn_rebalance(xfs_da_state_t *state,
static int xfs_dir2_leafn_remove(xfs_da_args_t *args, struct xfs_buf *bp,
int index, xfs_da_state_blk_t *dblk,
int *rval);
-static int xfs_dir2_node_addname_int(xfs_da_args_t *args,
- xfs_da_state_blk_t *fblk);
+
+/*
+ * Convert data space db to the corresponding free db.
+ */
+static xfs_dir2_db_t
+xfs_dir2_db_to_fdb(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
+{
+ return xfs_dir2_byte_to_db(geo, XFS_DIR2_FREE_OFFSET) +
+ (db / geo->free_max_bests);
+}
+
+/*
+ * Convert data space db to the corresponding index in a free db.
+ */
+static int
+xfs_dir2_db_to_fdindex(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
+{
+ return db % geo->free_max_bests;
+}
/*
* Check internal consistency of a leafn block.
@@ -47,7 +64,7 @@ xfs_dir3_leafn_check(
struct xfs_dir2_leaf *leaf = bp->b_addr;
struct xfs_dir3_icleaf_hdr leafhdr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
if (leafhdr.magic == XFS_DIR3_LEAFN_MAGIC) {
struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
@@ -56,7 +73,7 @@ xfs_dir3_leafn_check(
} else if (leafhdr.magic != XFS_DIR2_LEAFN_MAGIC)
return __this_address;
- return xfs_dir3_leaf_check_int(dp->i_mount, dp, &leafhdr, leaf);
+ return xfs_dir3_leaf_check_int(dp->i_mount, &leafhdr, leaf);
}
static inline void
@@ -162,10 +179,9 @@ xfs_dir3_free_header_check(
struct xfs_buf *bp)
{
struct xfs_mount *mp = dp->i_mount;
+ int maxbests = mp->m_dir_geo->free_max_bests;
unsigned int firstdb;
- int maxbests;
- maxbests = dp->d_ops->free_max_bests(mp->m_dir_geo);
firstdb = (xfs_dir2_da_to_db(mp->m_dir_geo, fbno) -
xfs_dir2_byte_to_db(mp->m_dir_geo, XFS_DIR2_FREE_OFFSET)) *
maxbests;
@@ -196,14 +212,14 @@ __xfs_dir3_free_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t fbno,
- xfs_daddr_t mappedbno,
+ unsigned int flags,
struct xfs_buf **bpp)
{
xfs_failaddr_t fa;
int err;
- err = xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
- XFS_DATA_FORK, &xfs_dir3_free_buf_ops);
+ err = xfs_da_read_buf(tp, dp, fbno, flags, bpp, XFS_DATA_FORK,
+ &xfs_dir3_free_buf_ops);
if (err || !*bpp)
return err;
@@ -222,6 +238,58 @@ __xfs_dir3_free_read(
return 0;
}
+void
+xfs_dir2_free_hdr_from_disk(
+ struct xfs_mount *mp,
+ struct xfs_dir3_icfree_hdr *to,
+ struct xfs_dir2_free *from)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_dir3_free *from3 = (struct xfs_dir3_free *)from;
+
+ to->magic = be32_to_cpu(from3->hdr.hdr.magic);
+ to->firstdb = be32_to_cpu(from3->hdr.firstdb);
+ to->nvalid = be32_to_cpu(from3->hdr.nvalid);
+ to->nused = be32_to_cpu(from3->hdr.nused);
+ to->bests = from3->bests;
+
+ ASSERT(to->magic == XFS_DIR3_FREE_MAGIC);
+ } else {
+ to->magic = be32_to_cpu(from->hdr.magic);
+ to->firstdb = be32_to_cpu(from->hdr.firstdb);
+ to->nvalid = be32_to_cpu(from->hdr.nvalid);
+ to->nused = be32_to_cpu(from->hdr.nused);
+ to->bests = from->bests;
+
+ ASSERT(to->magic == XFS_DIR2_FREE_MAGIC);
+ }
+}
+
+static void
+xfs_dir2_free_hdr_to_disk(
+ struct xfs_mount *mp,
+ struct xfs_dir2_free *to,
+ struct xfs_dir3_icfree_hdr *from)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_dir3_free *to3 = (struct xfs_dir3_free *)to;
+
+ ASSERT(from->magic == XFS_DIR3_FREE_MAGIC);
+
+ to3->hdr.hdr.magic = cpu_to_be32(from->magic);
+ to3->hdr.firstdb = cpu_to_be32(from->firstdb);
+ to3->hdr.nvalid = cpu_to_be32(from->nvalid);
+ to3->hdr.nused = cpu_to_be32(from->nused);
+ } else {
+ ASSERT(from->magic == XFS_DIR2_FREE_MAGIC);
+
+ to->hdr.magic = cpu_to_be32(from->magic);
+ to->hdr.firstdb = cpu_to_be32(from->firstdb);
+ to->hdr.nvalid = cpu_to_be32(from->nvalid);
+ to->hdr.nused = cpu_to_be32(from->nused);
+ }
+}
+
int
xfs_dir2_free_read(
struct xfs_trans *tp,
@@ -229,7 +297,7 @@ xfs_dir2_free_read(
xfs_dablk_t fbno,
struct xfs_buf **bpp)
{
- return __xfs_dir3_free_read(tp, dp, fbno, -1, bpp);
+ return __xfs_dir3_free_read(tp, dp, fbno, 0, bpp);
}
static int
@@ -239,7 +307,7 @@ xfs_dir2_free_try_read(
xfs_dablk_t fbno,
struct xfs_buf **bpp)
{
- return __xfs_dir3_free_read(tp, dp, fbno, -2, bpp);
+ return __xfs_dir3_free_read(tp, dp, fbno, XFS_DABUF_MAP_HOLE_OK, bpp);
}
static int
@@ -256,7 +324,7 @@ xfs_dir3_free_get_buf(
struct xfs_dir3_icfree_hdr hdr;
error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(args->geo, fbno),
- -1, &bp, XFS_DATA_FORK);
+ &bp, XFS_DATA_FORK);
if (error)
return error;
@@ -280,7 +348,7 @@ xfs_dir3_free_get_buf(
uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_meta_uuid);
} else
hdr.magic = XFS_DIR2_FREE_MAGIC;
- dp->d_ops->free_hdr_to_disk(bp->b_addr, &hdr);
+ xfs_dir2_free_hdr_to_disk(mp, bp->b_addr, &hdr);
*bpp = bp;
return 0;
}
@@ -291,21 +359,19 @@ xfs_dir3_free_get_buf(
STATIC void
xfs_dir2_free_log_bests(
struct xfs_da_args *args,
+ struct xfs_dir3_icfree_hdr *hdr,
struct xfs_buf *bp,
int first, /* first entry to log */
int last) /* last entry to log */
{
- xfs_dir2_free_t *free; /* freespace structure */
- __be16 *bests;
+ struct xfs_dir2_free *free = bp->b_addr;
- free = bp->b_addr;
- bests = args->dp->d_ops->free_bests_p(free);
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC) ||
free->hdr.magic == cpu_to_be32(XFS_DIR3_FREE_MAGIC));
xfs_trans_log_buf(args->trans, bp,
- (uint)((char *)&bests[first] - (char *)free),
- (uint)((char *)&bests[last] - (char *)free +
- sizeof(bests[0]) - 1));
+ (char *)&hdr->bests[first] - (char *)free,
+ (char *)&hdr->bests[last] - (char *)free +
+ sizeof(hdr->bests[0]) - 1);
}
/*
@@ -324,7 +390,7 @@ xfs_dir2_free_log_header(
free->hdr.magic == cpu_to_be32(XFS_DIR3_FREE_MAGIC));
#endif
xfs_trans_log_buf(args->trans, bp, 0,
- args->dp->d_ops->free_hdr_size - 1);
+ args->geo->free_hdr_size - 1);
}
/*
@@ -341,14 +407,12 @@ xfs_dir2_leaf_to_node(
int error; /* error return value */
struct xfs_buf *fbp; /* freespace buffer */
xfs_dir2_db_t fdb; /* freespace block number */
- xfs_dir2_free_t *free; /* freespace structure */
__be16 *from; /* pointer to freespace entry */
int i; /* leaf freespace index */
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
int n; /* count of live freespc ents */
xfs_dir2_data_off_t off; /* freespace entry value */
- __be16 *to; /* pointer to freespace entry */
xfs_trans_t *tp; /* transaction pointer */
struct xfs_dir3_icfree_hdr freehdr;
@@ -370,24 +434,25 @@ xfs_dir2_leaf_to_node(
if (error)
return error;
- free = fbp->b_addr;
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
+ xfs_dir2_free_hdr_from_disk(dp->i_mount, &freehdr, fbp->b_addr);
leaf = lbp->b_addr;
ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
if (be32_to_cpu(ltp->bestcount) >
- (uint)dp->i_d.di_size / args->geo->blksize)
+ (uint)dp->i_d.di_size / args->geo->blksize) {
+ xfs_buf_corruption_error(lbp);
return -EFSCORRUPTED;
+ }
/*
* Copy freespace entries from the leaf block to the new block.
* Count active entries.
*/
from = xfs_dir2_leaf_bests_p(ltp);
- to = dp->d_ops->free_bests_p(free);
- for (i = n = 0; i < be32_to_cpu(ltp->bestcount); i++, from++, to++) {
- if ((off = be16_to_cpu(*from)) != NULLDATAOFF)
+ for (i = n = 0; i < be32_to_cpu(ltp->bestcount); i++, from++) {
+ off = be16_to_cpu(*from);
+ if (off != NULLDATAOFF)
n++;
- *to = cpu_to_be16(off);
+ freehdr.bests[i] = cpu_to_be16(off);
}
/*
@@ -396,8 +461,8 @@ xfs_dir2_leaf_to_node(
freehdr.nused = n;
freehdr.nvalid = be32_to_cpu(ltp->bestcount);
- dp->d_ops->free_hdr_to_disk(fbp->b_addr, &freehdr);
- xfs_dir2_free_log_bests(args, fbp, 0, freehdr.nvalid - 1);
+ xfs_dir2_free_hdr_to_disk(dp->i_mount, fbp->b_addr, &freehdr);
+ xfs_dir2_free_log_bests(args, &freehdr, fbp, 0, freehdr.nvalid - 1);
xfs_dir2_free_log_header(args, fbp);
/*
@@ -440,15 +505,17 @@ xfs_dir2_leafn_add(
trace_xfs_dir2_leafn_add(args, index);
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
+ ents = leafhdr.ents;
/*
* Quick check just to make sure we are not going to index
* into other peoples memory
*/
- if (index < 0)
+ if (index < 0) {
+ xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
+ }
/*
* If there are already the maximum number of leaf entries in
@@ -457,7 +524,7 @@ xfs_dir2_leafn_add(
* a compact.
*/
- if (leafhdr.count == dp->d_ops->leaf_max_ents(args->geo)) {
+ if (leafhdr.count == args->geo->leaf_max_ents) {
if (!leafhdr.stale)
return -ENOSPC;
compact = leafhdr.stale > 1;
@@ -495,9 +562,9 @@ xfs_dir2_leafn_add(
lep->address = cpu_to_be32(xfs_dir2_db_off_to_dataptr(args->geo,
args->blkno, args->index));
- dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf, &leafhdr);
xfs_dir3_leaf_log_header(args, bp);
- xfs_dir3_leaf_log_ents(args, bp, lfloglow, lfloghigh);
+ xfs_dir3_leaf_log_ents(args, &leafhdr, bp, lfloglow, lfloghigh);
xfs_dir3_leaf_check(dp, bp);
return 0;
}
@@ -511,10 +578,9 @@ xfs_dir2_free_hdr_check(
{
struct xfs_dir3_icfree_hdr hdr;
- dp->d_ops->free_hdr_from_disk(&hdr, bp->b_addr);
+ xfs_dir2_free_hdr_from_disk(dp->i_mount, &hdr, bp->b_addr);
- ASSERT((hdr.firstdb %
- dp->d_ops->free_max_bests(dp->i_mount->m_dir_geo)) == 0);
+ ASSERT((hdr.firstdb % dp->i_mount->m_dir_geo->free_max_bests) == 0);
ASSERT(hdr.firstdb <= db);
ASSERT(db < hdr.firstdb + hdr.nvalid);
}
@@ -532,11 +598,9 @@ xfs_dir2_leaf_lasthash(
struct xfs_buf *bp, /* leaf buffer */
int *count) /* count of entries in leaf */
{
- struct xfs_dir2_leaf *leaf = bp->b_addr;
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, bp->b_addr);
ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
leafhdr.magic == XFS_DIR3_LEAFN_MAGIC ||
@@ -547,9 +611,7 @@ xfs_dir2_leaf_lasthash(
*count = leafhdr.count;
if (!leafhdr.count)
return 0;
-
- ents = dp->d_ops->leaf_ents_p(leaf);
- return be32_to_cpu(ents[leafhdr.count - 1].hashval);
+ return be32_to_cpu(leafhdr.ents[leafhdr.count - 1].hashval);
}
/*
@@ -578,15 +640,13 @@ xfs_dir2_leafn_lookup_for_addname(
xfs_dir2_db_t newdb; /* new data block number */
xfs_dir2_db_t newfdb; /* new free block number */
xfs_trans_t *tp; /* transaction pointer */
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
leaf = bp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(mp, &leafhdr, leaf);
xfs_dir3_leaf_check(dp, bp);
ASSERT(leafhdr.count > 0);
@@ -606,11 +666,11 @@ xfs_dir2_leafn_lookup_for_addname(
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC) ||
free->hdr.magic == cpu_to_be32(XFS_DIR3_FREE_MAGIC));
}
- length = dp->d_ops->data_entsize(args->namelen);
+ length = xfs_dir2_data_entsize(mp, args->namelen);
/*
* Loop over leaf entries with the right hash value.
*/
- for (lep = &ents[index];
+ for (lep = &leafhdr.ents[index];
index < leafhdr.count && be32_to_cpu(lep->hashval) == args->hashval;
lep++, index++) {
/*
@@ -632,14 +692,14 @@ xfs_dir2_leafn_lookup_for_addname(
* in hand, take a look at it.
*/
if (newdb != curdb) {
- __be16 *bests;
+ struct xfs_dir3_icfree_hdr freehdr;
curdb = newdb;
/*
* Convert the data block to the free block
* holding its freespace information.
*/
- newfdb = dp->d_ops->db_to_fdb(args->geo, newdb);
+ newfdb = xfs_dir2_db_to_fdb(args->geo, newdb);
/*
* If it's not the one we have in hand, read it in.
*/
@@ -663,20 +723,20 @@ xfs_dir2_leafn_lookup_for_addname(
/*
* Get the index for our entry.
*/
- fi = dp->d_ops->db_to_fdindex(args->geo, curdb);
+ fi = xfs_dir2_db_to_fdindex(args->geo, curdb);
/*
* If it has room, return it.
*/
- bests = dp->d_ops->free_bests_p(free);
- if (unlikely(bests[fi] == cpu_to_be16(NULLDATAOFF))) {
- XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int",
- XFS_ERRLEVEL_LOW, mp);
+ xfs_dir2_free_hdr_from_disk(mp, &freehdr, free);
+ if (XFS_IS_CORRUPT(mp,
+ freehdr.bests[fi] ==
+ cpu_to_be16(NULLDATAOFF))) {
if (curfdb != newfdb)
xfs_trans_brelse(tp, curbp);
return -EFSCORRUPTED;
}
curfdb = newfdb;
- if (be16_to_cpu(bests[fi]) >= length)
+ if (be16_to_cpu(freehdr.bests[fi]) >= length)
goto out;
}
}
@@ -730,18 +790,19 @@ xfs_dir2_leafn_lookup_for_entry(
xfs_dir2_db_t newdb; /* new data block number */
xfs_trans_t *tp; /* transaction pointer */
enum xfs_dacmp cmp; /* comparison result */
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
leaf = bp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(mp, &leafhdr, leaf);
xfs_dir3_leaf_check(dp, bp);
- ASSERT(leafhdr.count > 0);
+ if (leafhdr.count <= 0) {
+ xfs_buf_corruption_error(bp);
+ return -EFSCORRUPTED;
+ }
/*
* Look up the hash value in the leaf entries.
@@ -757,7 +818,7 @@ xfs_dir2_leafn_lookup_for_entry(
/*
* Loop over leaf entries with the right hash value.
*/
- for (lep = &ents[index];
+ for (lep = &leafhdr.ents[index];
index < leafhdr.count && be32_to_cpu(lep->hashval) == args->hashval;
lep++, index++) {
/*
@@ -796,7 +857,7 @@ xfs_dir2_leafn_lookup_for_entry(
error = xfs_dir3_data_read(tp, dp,
xfs_dir2_db_to_da(args->geo,
newdb),
- -1, &curbp);
+ 0, &curbp);
if (error)
return error;
}
@@ -814,7 +875,7 @@ xfs_dir2_leafn_lookup_for_entry(
* EEXIST immediately. If it's the first case-insensitive
* match, store the block & inode number and continue looking.
*/
- cmp = mp->m_dirnameops->compname(args, dep->name, dep->namelen);
+ cmp = xfs_dir2_compname(args, dep->name, dep->namelen);
if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
/* If there is a CI match block, drop it */
if (args->cmpresult != XFS_CMP_DIFFERENT &&
@@ -822,7 +883,7 @@ xfs_dir2_leafn_lookup_for_entry(
xfs_trans_brelse(tp, state->extrablk.bp);
args->cmpresult = cmp;
args->inumber = be64_to_cpu(dep->inumber);
- args->filetype = dp->d_ops->data_get_ftype(dep);
+ args->filetype = xfs_dir2_data_get_ftype(mp, dep);
*indexp = index;
state->extravalid = 1;
state->extrablk.bp = curbp;
@@ -912,7 +973,7 @@ xfs_dir3_leafn_moveents(
if (start_d < dhdr->count) {
memmove(&dents[start_d + count], &dents[start_d],
(dhdr->count - start_d) * sizeof(xfs_dir2_leaf_entry_t));
- xfs_dir3_leaf_log_ents(args, bp_d, start_d + count,
+ xfs_dir3_leaf_log_ents(args, dhdr, bp_d, start_d + count,
count + dhdr->count - 1);
}
/*
@@ -934,7 +995,7 @@ xfs_dir3_leafn_moveents(
*/
memcpy(&dents[start_d], &sents[start_s],
count * sizeof(xfs_dir2_leaf_entry_t));
- xfs_dir3_leaf_log_ents(args, bp_d, start_d, start_d + count - 1);
+ xfs_dir3_leaf_log_ents(args, dhdr, bp_d, start_d, start_d + count - 1);
/*
* If there are source entries after the ones we copied,
@@ -943,7 +1004,8 @@ xfs_dir3_leafn_moveents(
if (start_s + count < shdr->count) {
memmove(&sents[start_s], &sents[start_s + count],
count * sizeof(xfs_dir2_leaf_entry_t));
- xfs_dir3_leaf_log_ents(args, bp_s, start_s, start_s + count - 1);
+ xfs_dir3_leaf_log_ents(args, shdr, bp_s, start_s,
+ start_s + count - 1);
}
/*
@@ -972,10 +1034,10 @@ xfs_dir2_leafn_order(
struct xfs_dir3_icleaf_hdr hdr1;
struct xfs_dir3_icleaf_hdr hdr2;
- dp->d_ops->leaf_hdr_from_disk(&hdr1, leaf1);
- dp->d_ops->leaf_hdr_from_disk(&hdr2, leaf2);
- ents1 = dp->d_ops->leaf_ents_p(leaf1);
- ents2 = dp->d_ops->leaf_ents_p(leaf2);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &hdr1, leaf1);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &hdr2, leaf2);
+ ents1 = hdr1.ents;
+ ents2 = hdr2.ents;
if (hdr1.count > 0 && hdr2.count > 0 &&
(be32_to_cpu(ents2[0].hashval) < be32_to_cpu(ents1[0].hashval) ||
@@ -1025,10 +1087,10 @@ xfs_dir2_leafn_rebalance(
leaf1 = blk1->bp->b_addr;
leaf2 = blk2->bp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&hdr1, leaf1);
- dp->d_ops->leaf_hdr_from_disk(&hdr2, leaf2);
- ents1 = dp->d_ops->leaf_ents_p(leaf1);
- ents2 = dp->d_ops->leaf_ents_p(leaf2);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &hdr1, leaf1);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &hdr2, leaf2);
+ ents1 = hdr1.ents;
+ ents2 = hdr2.ents;
oldsum = hdr1.count + hdr2.count;
#if defined(DEBUG) || defined(XFS_WARN)
@@ -1074,8 +1136,8 @@ xfs_dir2_leafn_rebalance(
ASSERT(hdr1.stale + hdr2.stale == oldstale);
/* log the changes made when moving the entries */
- dp->d_ops->leaf_hdr_to_disk(leaf1, &hdr1);
- dp->d_ops->leaf_hdr_to_disk(leaf2, &hdr2);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf1, &hdr1);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf2, &hdr2);
xfs_dir3_leaf_log_header(args, blk1->bp);
xfs_dir3_leaf_log_header(args, blk2->bp);
@@ -1121,19 +1183,17 @@ xfs_dir3_data_block_free(
int longest)
{
int logfree = 0;
- __be16 *bests;
struct xfs_dir3_icfree_hdr freehdr;
struct xfs_inode *dp = args->dp;
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
- bests = dp->d_ops->free_bests_p(free);
+ xfs_dir2_free_hdr_from_disk(dp->i_mount, &freehdr, free);
if (hdr) {
/*
* Data block is not empty, just set the free entry to the new
* value.
*/
- bests[findex] = cpu_to_be16(longest);
- xfs_dir2_free_log_bests(args, fbp, findex, findex);
+ freehdr.bests[findex] = cpu_to_be16(longest);
+ xfs_dir2_free_log_bests(args, &freehdr, fbp, findex, findex);
return 0;
}
@@ -1149,18 +1209,18 @@ xfs_dir3_data_block_free(
int i; /* free entry index */
for (i = findex - 1; i >= 0; i--) {
- if (bests[i] != cpu_to_be16(NULLDATAOFF))
+ if (freehdr.bests[i] != cpu_to_be16(NULLDATAOFF))
break;
}
freehdr.nvalid = i + 1;
logfree = 0;
} else {
/* Not the last entry, just punch it out. */
- bests[findex] = cpu_to_be16(NULLDATAOFF);
+ freehdr.bests[findex] = cpu_to_be16(NULLDATAOFF);
logfree = 1;
}
- dp->d_ops->free_hdr_to_disk(free, &freehdr);
+ xfs_dir2_free_hdr_to_disk(dp->i_mount, free, &freehdr);
xfs_dir2_free_log_header(args, fbp);
/*
@@ -1185,7 +1245,7 @@ xfs_dir3_data_block_free(
/* Log the free entry that changed, unless we got rid of it. */
if (logfree)
- xfs_dir2_free_log_bests(args, fbp, findex, findex);
+ xfs_dir2_free_log_bests(args, &freehdr, fbp, findex, findex);
return 0;
}
@@ -1202,6 +1262,7 @@ xfs_dir2_leafn_remove(
xfs_da_state_blk_t *dblk, /* data block */
int *rval) /* resulting block needs join */
{
+ struct xfs_da_geometry *geo = args->geo;
xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_db_t db; /* data block number */
struct xfs_buf *dbp; /* data block buffer */
@@ -1216,27 +1277,25 @@ xfs_dir2_leafn_remove(
xfs_trans_t *tp; /* transaction pointer */
struct xfs_dir2_data_free *bf; /* bestfree table */
struct xfs_dir3_icleaf_hdr leafhdr;
- struct xfs_dir2_leaf_entry *ents;
trace_xfs_dir2_leafn_remove(args, index);
dp = args->dp;
tp = args->trans;
leaf = bp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
/*
* Point to the entry we're removing.
*/
- lep = &ents[index];
+ lep = &leafhdr.ents[index];
/*
* Extract the data block and offset from the entry.
*/
- db = xfs_dir2_dataptr_to_db(args->geo, be32_to_cpu(lep->address));
+ db = xfs_dir2_dataptr_to_db(geo, be32_to_cpu(lep->address));
ASSERT(dblk->blkno == db);
- off = xfs_dir2_dataptr_to_off(args->geo, be32_to_cpu(lep->address));
+ off = xfs_dir2_dataptr_to_off(geo, be32_to_cpu(lep->address));
ASSERT(dblk->index == off);
/*
@@ -1244,11 +1303,11 @@ xfs_dir2_leafn_remove(
* Log the leaf block changes.
*/
leafhdr.stale++;
- dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf, &leafhdr);
xfs_dir3_leaf_log_header(args, bp);
lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
- xfs_dir3_leaf_log_ents(args, bp, index, index);
+ xfs_dir3_leaf_log_ents(args, &leafhdr, bp, index, index);
/*
* Make the data entry free. Keep track of the longest freespace
@@ -1257,17 +1316,18 @@ xfs_dir2_leafn_remove(
dbp = dblk->bp;
hdr = dbp->b_addr;
dep = (xfs_dir2_data_entry_t *)((char *)hdr + off);
- bf = dp->d_ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
longest = be16_to_cpu(bf[0].length);
needlog = needscan = 0;
xfs_dir2_data_make_free(args, dbp, off,
- dp->d_ops->data_entsize(dep->namelen), &needlog, &needscan);
+ xfs_dir2_data_entsize(dp->i_mount, dep->namelen), &needlog,
+ &needscan);
/*
* Rescan the data block freespaces for bestfree.
* Log the data block header if needed.
*/
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(args, dbp);
xfs_dir3_data_check(dp, dbp);
@@ -1286,9 +1346,8 @@ xfs_dir2_leafn_remove(
* Convert the data block number to a free block,
* read in the free block.
*/
- fdb = dp->d_ops->db_to_fdb(args->geo, db);
- error = xfs_dir2_free_read(tp, dp,
- xfs_dir2_db_to_da(args->geo, fdb),
+ fdb = xfs_dir2_db_to_fdb(geo, db);
+ error = xfs_dir2_free_read(tp, dp, xfs_dir2_db_to_da(geo, fdb),
&fbp);
if (error)
return error;
@@ -1296,23 +1355,22 @@ xfs_dir2_leafn_remove(
#ifdef DEBUG
{
struct xfs_dir3_icfree_hdr freehdr;
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
- ASSERT(freehdr.firstdb == dp->d_ops->free_max_bests(args->geo) *
- (fdb - xfs_dir2_byte_to_db(args->geo,
- XFS_DIR2_FREE_OFFSET)));
+
+ xfs_dir2_free_hdr_from_disk(dp->i_mount, &freehdr, free);
+ ASSERT(freehdr.firstdb == geo->free_max_bests *
+ (fdb - xfs_dir2_byte_to_db(geo, XFS_DIR2_FREE_OFFSET)));
}
#endif
/*
* Calculate which entry we need to fix.
*/
- findex = dp->d_ops->db_to_fdindex(args->geo, db);
+ findex = xfs_dir2_db_to_fdindex(geo, db);
longest = be16_to_cpu(bf[0].length);
/*
* If the data block is now empty we can get rid of it
* (usually).
*/
- if (longest == args->geo->blksize -
- dp->d_ops->data_entry_offset) {
+ if (longest == geo->blksize - geo->data_entry_offset) {
/*
* Try to punch out the data block.
*/
@@ -1344,9 +1402,9 @@ xfs_dir2_leafn_remove(
* Return indication of whether this leaf block is empty enough
* to justify trying to join it with a neighbor.
*/
- *rval = (dp->d_ops->leaf_hdr_size +
- (uint)sizeof(ents[0]) * (leafhdr.count - leafhdr.stale)) <
- args->geo->magicpct;
+ *rval = (geo->leaf_hdr_size +
+ (uint)sizeof(leafhdr.ents) * (leafhdr.count - leafhdr.stale)) <
+ geo->magicpct;
return 0;
}
@@ -1445,12 +1503,12 @@ xfs_dir2_leafn_toosmall(
*/
blk = &state->path.blk[state->path.active - 1];
leaf = blk->bp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
+ ents = leafhdr.ents;
xfs_dir3_leaf_check(dp, blk->bp);
count = leafhdr.count - leafhdr.stale;
- bytes = dp->d_ops->leaf_hdr_size + count * sizeof(ents[0]);
+ bytes = state->args->geo->leaf_hdr_size + count * sizeof(ents[0]);
if (bytes > (state->args->geo->blksize >> 1)) {
/*
* Blk over 50%, don't try to join.
@@ -1495,8 +1553,7 @@ xfs_dir2_leafn_toosmall(
/*
* Read the sibling leaf block.
*/
- error = xfs_dir3_leafn_read(state->args->trans, dp,
- blkno, -1, &bp);
+ error = xfs_dir3_leafn_read(state->args->trans, dp, blkno, &bp);
if (error)
return error;
@@ -1508,8 +1565,8 @@ xfs_dir2_leafn_toosmall(
(state->args->geo->blksize >> 2);
leaf = bp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&hdr2, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &hdr2, leaf);
+ ents = hdr2.ents;
count += hdr2.count - hdr2.stale;
bytes -= count * sizeof(ents[0]);
@@ -1571,10 +1628,10 @@ xfs_dir2_leafn_unbalance(
drop_leaf = drop_blk->bp->b_addr;
save_leaf = save_blk->bp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&savehdr, save_leaf);
- dp->d_ops->leaf_hdr_from_disk(&drophdr, drop_leaf);
- sents = dp->d_ops->leaf_ents_p(save_leaf);
- dents = dp->d_ops->leaf_ents_p(drop_leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &savehdr, save_leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &drophdr, drop_leaf);
+ sents = savehdr.ents;
+ dents = drophdr.ents;
/*
* If there are any stale leaf entries, take this opportunity
@@ -1600,8 +1657,8 @@ xfs_dir2_leafn_unbalance(
save_blk->hashval = be32_to_cpu(sents[savehdr.count - 1].hashval);
/* log the changes made when moving the entries */
- dp->d_ops->leaf_hdr_to_disk(save_leaf, &savehdr);
- dp->d_ops->leaf_hdr_to_disk(drop_leaf, &drophdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, save_leaf, &savehdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, drop_leaf, &drophdr);
xfs_dir3_leaf_log_header(args, save_blk->bp);
xfs_dir3_leaf_log_header(args, drop_blk->bp);
@@ -1610,113 +1667,143 @@ xfs_dir2_leafn_unbalance(
}
/*
- * Top-level node form directory addname routine.
+ * Add a new data block to the directory at the free space index that the caller
+ * has specified.
*/
-int /* error */
-xfs_dir2_node_addname(
- xfs_da_args_t *args) /* operation arguments */
+static int
+xfs_dir2_node_add_datablk(
+ struct xfs_da_args *args,
+ struct xfs_da_state_blk *fblk,
+ xfs_dir2_db_t *dbno,
+ struct xfs_buf **dbpp,
+ struct xfs_buf **fbpp,
+ struct xfs_dir3_icfree_hdr *hdr,
+ int *findex)
{
- xfs_da_state_blk_t *blk; /* leaf block for insert */
- int error; /* error return value */
- int rval; /* sub-return value */
- xfs_da_state_t *state; /* btree cursor */
+ struct xfs_inode *dp = args->dp;
+ struct xfs_trans *tp = args->trans;
+ struct xfs_mount *mp = dp->i_mount;
+ struct xfs_dir2_data_free *bf;
+ xfs_dir2_db_t fbno;
+ struct xfs_buf *fbp;
+ struct xfs_buf *dbp;
+ int error;
- trace_xfs_dir2_node_addname(args);
+ /* Not allowed to allocate, return failure. */
+ if (args->total == 0)
+ return -ENOSPC;
+
+ /* Allocate and initialize the new data block. */
+ error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, dbno);
+ if (error)
+ return error;
+ error = xfs_dir3_data_init(args, *dbno, &dbp);
+ if (error)
+ return error;
/*
- * Allocate and initialize the state (btree cursor).
- */
- state = xfs_da_state_alloc();
- state->args = args;
- state->mp = args->dp->i_mount;
- /*
- * Look up the name. We're not supposed to find it, but
- * this gives us the insertion point.
+ * Get the freespace block corresponding to the data block
+ * that was just allocated.
*/
- error = xfs_da3_node_lookup_int(state, &rval);
+ fbno = xfs_dir2_db_to_fdb(args->geo, *dbno);
+ error = xfs_dir2_free_try_read(tp, dp,
+ xfs_dir2_db_to_da(args->geo, fbno), &fbp);
if (error)
- rval = error;
- if (rval != -ENOENT) {
- goto done;
- }
+ return error;
+
/*
- * Add the data entry to a data block.
- * Extravalid is set to a freeblock found by lookup.
+ * If there wasn't a freespace block, the read will
+ * return a NULL fbp. Allocate and initialize a new one.
*/
- rval = xfs_dir2_node_addname_int(args,
- state->extravalid ? &state->extrablk : NULL);
- if (rval) {
- goto done;
+ if (!fbp) {
+ error = xfs_dir2_grow_inode(args, XFS_DIR2_FREE_SPACE, &fbno);
+ if (error)
+ return error;
+
+ if (XFS_IS_CORRUPT(mp,
+ xfs_dir2_db_to_fdb(args->geo, *dbno) !=
+ fbno)) {
+ xfs_alert(mp,
+"%s: dir ino %llu needed freesp block %lld for data block %lld, got %lld",
+ __func__, (unsigned long long)dp->i_ino,
+ (long long)xfs_dir2_db_to_fdb(args->geo, *dbno),
+ (long long)*dbno, (long long)fbno);
+ if (fblk) {
+ xfs_alert(mp,
+ " fblk "PTR_FMT" blkno %llu index %d magic 0x%x",
+ fblk, (unsigned long long)fblk->blkno,
+ fblk->index, fblk->magic);
+ } else {
+ xfs_alert(mp, " ... fblk is NULL");
+ }
+ return -EFSCORRUPTED;
+ }
+
+ /* Get a buffer for the new block. */
+ error = xfs_dir3_free_get_buf(args, fbno, &fbp);
+ if (error)
+ return error;
+ xfs_dir2_free_hdr_from_disk(mp, hdr, fbp->b_addr);
+
+ /* Remember the first slot as our empty slot. */
+ hdr->firstdb = (fbno - xfs_dir2_byte_to_db(args->geo,
+ XFS_DIR2_FREE_OFFSET)) *
+ args->geo->free_max_bests;
+ } else {
+ xfs_dir2_free_hdr_from_disk(mp, hdr, fbp->b_addr);
}
- blk = &state->path.blk[state->path.active - 1];
- ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
+
+ /* Set the freespace block index from the data block number. */
+ *findex = xfs_dir2_db_to_fdindex(args->geo, *dbno);
+
+ /* Extend the freespace table if the new data block is off the end. */
+ if (*findex >= hdr->nvalid) {
+ ASSERT(*findex < args->geo->free_max_bests);
+ hdr->nvalid = *findex + 1;
+ hdr->bests[*findex] = cpu_to_be16(NULLDATAOFF);
+ }
+
/*
- * Add the new leaf entry.
+ * If this entry was for an empty data block (this should always be
+ * true) then update the header.
*/
- rval = xfs_dir2_leafn_add(blk->bp, args, blk->index);
- if (rval == 0) {
- /*
- * It worked, fix the hash values up the btree.
- */
- if (!(args->op_flags & XFS_DA_OP_JUSTCHECK))
- xfs_da3_fixhashpath(state, &state->path);
- } else {
- /*
- * It didn't work, we need to split the leaf block.
- */
- if (args->total == 0) {
- ASSERT(rval == -ENOSPC);
- goto done;
- }
- /*
- * Split the leaf block and insert the new entry.
- */
- rval = xfs_da3_split(state);
+ if (hdr->bests[*findex] == cpu_to_be16(NULLDATAOFF)) {
+ hdr->nused++;
+ xfs_dir2_free_hdr_to_disk(mp, fbp->b_addr, hdr);
+ xfs_dir2_free_log_header(args, fbp);
}
-done:
- xfs_da_state_free(state);
- return rval;
+
+ /* Update the freespace value for the new block in the table. */
+ bf = xfs_dir2_data_bestfree_p(mp, dbp->b_addr);
+ hdr->bests[*findex] = bf[0].length;
+
+ *dbpp = dbp;
+ *fbpp = fbp;
+ return 0;
}
-/*
- * Add the data entry for a node-format directory name addition.
- * The leaf entry is added in xfs_dir2_leafn_add.
- * We may enter with a freespace block that the lookup found.
- */
-static int /* error */
-xfs_dir2_node_addname_int(
- xfs_da_args_t *args, /* operation arguments */
- xfs_da_state_blk_t *fblk) /* optional freespace block */
+static int
+xfs_dir2_node_find_freeblk(
+ struct xfs_da_args *args,
+ struct xfs_da_state_blk *fblk,
+ xfs_dir2_db_t *dbnop,
+ struct xfs_buf **fbpp,
+ struct xfs_dir3_icfree_hdr *hdr,
+ int *findexp,
+ int length)
{
- xfs_dir2_data_hdr_t *hdr; /* data block header */
- xfs_dir2_db_t dbno; /* data block number */
- struct xfs_buf *dbp; /* data block buffer */
- xfs_dir2_data_entry_t *dep; /* data entry pointer */
- xfs_inode_t *dp; /* incore directory inode */
- xfs_dir2_data_unused_t *dup; /* data unused entry pointer */
- int error; /* error return value */
- xfs_dir2_db_t fbno; /* freespace block number */
- struct xfs_buf *fbp; /* freespace buffer */
- int findex; /* freespace entry index */
- xfs_dir2_free_t *free=NULL; /* freespace block structure */
- xfs_dir2_db_t ifbno; /* initial freespace block no */
- xfs_dir2_db_t lastfbno=0; /* highest freespace block no */
- int length; /* length of the new entry */
- int logfree; /* need to log free entry */
- xfs_mount_t *mp; /* filesystem mount point */
- int needlog; /* need to log data header */
- int needscan; /* need to rescan data frees */
- __be16 *tagp; /* data entry tag pointer */
- xfs_trans_t *tp; /* transaction pointer */
- __be16 *bests;
- struct xfs_dir3_icfree_hdr freehdr;
- struct xfs_dir2_data_free *bf;
- xfs_dir2_data_aoff_t aoff;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_trans *tp = args->trans;
+ struct xfs_buf *fbp = NULL;
+ xfs_dir2_db_t firstfbno;
+ xfs_dir2_db_t lastfbno;
+ xfs_dir2_db_t ifbno = -1;
+ xfs_dir2_db_t dbno = -1;
+ xfs_dir2_db_t fbno;
+ xfs_fileoff_t fo;
+ int findex = 0;
+ int error;
- dp = args->dp;
- mp = dp->i_mount;
- tp = args->trans;
- length = dp->d_ops->data_entsize(args->namelen);
/*
* If we came in with a freespace block that means that lookup
* found an entry with our hash value. This is the freespace
@@ -1724,288 +1811,150 @@ xfs_dir2_node_addname_int(
*/
if (fblk) {
fbp = fblk->bp;
- /*
- * Remember initial freespace block number.
- */
- ifbno = fblk->blkno;
- free = fbp->b_addr;
findex = fblk->index;
- bests = dp->d_ops->free_bests_p(free);
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
-
- /*
- * This means the free entry showed that the data block had
- * space for our entry, so we remembered it.
- * Use that data block.
- */
+ xfs_dir2_free_hdr_from_disk(dp->i_mount, hdr, fbp->b_addr);
if (findex >= 0) {
- ASSERT(findex < freehdr.nvalid);
- ASSERT(be16_to_cpu(bests[findex]) != NULLDATAOFF);
- ASSERT(be16_to_cpu(bests[findex]) >= length);
- dbno = freehdr.firstdb + findex;
- } else {
- /*
- * The data block looked at didn't have enough room.
- * We'll start at the beginning of the freespace entries.
- */
- dbno = -1;
- findex = 0;
+ /* caller already found the freespace for us. */
+ ASSERT(findex < hdr->nvalid);
+ ASSERT(be16_to_cpu(hdr->bests[findex]) != NULLDATAOFF);
+ ASSERT(be16_to_cpu(hdr->bests[findex]) >= length);
+ dbno = hdr->firstdb + findex;
+ goto found_block;
}
- } else {
+
/*
- * Didn't come in with a freespace block, so no data block.
+ * The data block looked at didn't have enough room.
+ * We'll start at the beginning of the freespace entries.
*/
- ifbno = dbno = -1;
+ ifbno = fblk->blkno;
+ xfs_trans_brelse(tp, fbp);
fbp = NULL;
- findex = 0;
+ fblk->bp = NULL;
}
/*
- * If we don't have a data block yet, we're going to scan the
- * freespace blocks looking for one. Figure out what the
- * highest freespace block number is.
- */
- if (dbno == -1) {
- xfs_fileoff_t fo; /* freespace block number */
-
- if ((error = xfs_bmap_last_offset(dp, &fo, XFS_DATA_FORK)))
- return error;
- lastfbno = xfs_dir2_da_to_db(args->geo, (xfs_dablk_t)fo);
- fbno = ifbno;
- }
- /*
- * While we haven't identified a data block, search the freeblock
- * data for a good data block. If we find a null freeblock entry,
- * indicating a hole in the data blocks, remember that.
- */
- while (dbno == -1) {
- /*
- * If we don't have a freeblock in hand, get the next one.
- */
- if (fbp == NULL) {
- /*
- * Happens the first time through unless lookup gave
- * us a freespace block to start with.
- */
- if (++fbno == 0)
- fbno = xfs_dir2_byte_to_db(args->geo,
- XFS_DIR2_FREE_OFFSET);
- /*
- * If it's ifbno we already looked at it.
- */
- if (fbno == ifbno)
- fbno++;
- /*
- * If it's off the end we're done.
- */
- if (fbno >= lastfbno)
- break;
- /*
- * Read the block. There can be holes in the
- * freespace blocks, so this might not succeed.
- * This should be really rare, so there's no reason
- * to avoid it.
- */
- error = xfs_dir2_free_try_read(tp, dp,
- xfs_dir2_db_to_da(args->geo, fbno),
- &fbp);
- if (error)
- return error;
- if (!fbp)
- continue;
- free = fbp->b_addr;
- findex = 0;
- }
- /*
- * Look at the current free entry. Is it good enough?
- *
- * The bests initialisation should be where the bufer is read in
- * the above branch. But gcc is too stupid to realise that bests
- * and the freehdr are actually initialised if they are placed
- * there, so we have to do it here to avoid warnings. Blech.
- */
- bests = dp->d_ops->free_bests_p(free);
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
- if (be16_to_cpu(bests[findex]) != NULLDATAOFF &&
- be16_to_cpu(bests[findex]) >= length)
- dbno = freehdr.firstdb + findex;
- else {
- /*
- * Are we done with the freeblock?
- */
- if (++findex == freehdr.nvalid) {
- /*
- * Drop the block.
- */
- xfs_trans_brelse(tp, fbp);
- fbp = NULL;
- if (fblk && fblk->bp)
- fblk->bp = NULL;
- }
- }
- }
- /*
- * If we don't have a data block, we need to allocate one and make
- * the freespace entries refer to it.
+ * If we don't have a data block yet, we're going to scan the freespace
+ * data for a data block with enough free space in it.
*/
- if (unlikely(dbno == -1)) {
- /*
- * Not allowed to allocate, return failure.
- */
- if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0)
- return -ENOSPC;
-
- /*
- * Allocate and initialize the new data block.
- */
- if (unlikely((error = xfs_dir2_grow_inode(args,
- XFS_DIR2_DATA_SPACE,
- &dbno)) ||
- (error = xfs_dir3_data_init(args, dbno, &dbp))))
- return error;
+ error = xfs_bmap_last_offset(dp, &fo, XFS_DATA_FORK);
+ if (error)
+ return error;
+ lastfbno = xfs_dir2_da_to_db(args->geo, (xfs_dablk_t)fo);
+ firstfbno = xfs_dir2_byte_to_db(args->geo, XFS_DIR2_FREE_OFFSET);
- /*
- * If (somehow) we have a freespace block, get rid of it.
- */
- if (fbp)
- xfs_trans_brelse(tp, fbp);
- if (fblk && fblk->bp)
- fblk->bp = NULL;
+ for (fbno = lastfbno - 1; fbno >= firstfbno; fbno--) {
+ /* If it's ifbno we already looked at it. */
+ if (fbno == ifbno)
+ continue;
/*
- * Get the freespace block corresponding to the data block
- * that was just allocated.
+ * Read the block. There can be holes in the freespace blocks,
+ * so this might not succeed. This should be really rare, so
+ * there's no reason to avoid it.
*/
- fbno = dp->d_ops->db_to_fdb(args->geo, dbno);
error = xfs_dir2_free_try_read(tp, dp,
- xfs_dir2_db_to_da(args->geo, fbno),
- &fbp);
+ xfs_dir2_db_to_da(args->geo, fbno),
+ &fbp);
if (error)
return error;
+ if (!fbp)
+ continue;
- /*
- * If there wasn't a freespace block, the read will
- * return a NULL fbp. Allocate and initialize a new one.
- */
- if (!fbp) {
- error = xfs_dir2_grow_inode(args, XFS_DIR2_FREE_SPACE,
- &fbno);
- if (error)
- return error;
+ xfs_dir2_free_hdr_from_disk(dp->i_mount, hdr, fbp->b_addr);
- if (dp->d_ops->db_to_fdb(args->geo, dbno) != fbno) {
- xfs_alert(mp,
-"%s: dir ino %llu needed freesp block %lld for data block %lld, got %lld ifbno %llu lastfbno %d",
- __func__, (unsigned long long)dp->i_ino,
- (long long)dp->d_ops->db_to_fdb(
- args->geo, dbno),
- (long long)dbno, (long long)fbno,
- (unsigned long long)ifbno, lastfbno);
- if (fblk) {
- xfs_alert(mp,
- " fblk "PTR_FMT" blkno %llu index %d magic 0x%x",
- fblk,
- (unsigned long long)fblk->blkno,
- fblk->index,
- fblk->magic);
- } else {
- xfs_alert(mp, " ... fblk is NULL");
- }
- XFS_ERROR_REPORT("xfs_dir2_node_addname_int",
- XFS_ERRLEVEL_LOW, mp);
- return -EFSCORRUPTED;
+ /* Scan the free entry array for a large enough free space. */
+ for (findex = hdr->nvalid - 1; findex >= 0; findex--) {
+ if (be16_to_cpu(hdr->bests[findex]) != NULLDATAOFF &&
+ be16_to_cpu(hdr->bests[findex]) >= length) {
+ dbno = hdr->firstdb + findex;
+ goto found_block;
}
-
- /*
- * Get a buffer for the new block.
- */
- error = xfs_dir3_free_get_buf(args, fbno, &fbp);
- if (error)
- return error;
- free = fbp->b_addr;
- bests = dp->d_ops->free_bests_p(free);
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
-
- /*
- * Remember the first slot as our empty slot.
- */
- freehdr.firstdb =
- (fbno - xfs_dir2_byte_to_db(args->geo,
- XFS_DIR2_FREE_OFFSET)) *
- dp->d_ops->free_max_bests(args->geo);
- } else {
- free = fbp->b_addr;
- bests = dp->d_ops->free_bests_p(free);
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
}
- /*
- * Set the freespace block index from the data block number.
- */
- findex = dp->d_ops->db_to_fdindex(args->geo, dbno);
- /*
- * If it's after the end of the current entries in the
- * freespace block, extend that table.
- */
- if (findex >= freehdr.nvalid) {
- ASSERT(findex < dp->d_ops->free_max_bests(args->geo));
- freehdr.nvalid = findex + 1;
- /*
- * Tag new entry so nused will go up.
- */
- bests[findex] = cpu_to_be16(NULLDATAOFF);
- }
- /*
- * If this entry was for an empty data block
- * (this should always be true) then update the header.
- */
- if (bests[findex] == cpu_to_be16(NULLDATAOFF)) {
- freehdr.nused++;
- dp->d_ops->free_hdr_to_disk(fbp->b_addr, &freehdr);
- xfs_dir2_free_log_header(args, fbp);
- }
- /*
- * Update the real value in the table.
- * We haven't allocated the data entry yet so this will
- * change again.
- */
- hdr = dbp->b_addr;
- bf = dp->d_ops->data_bestfree_p(hdr);
- bests[findex] = bf[0].length;
- logfree = 1;
+ /* Didn't find free space, go on to next free block */
+ xfs_trans_brelse(tp, fbp);
}
+
+found_block:
+ *dbnop = dbno;
+ *fbpp = fbp;
+ *findexp = findex;
+ return 0;
+}
+
+/*
+ * Add the data entry for a node-format directory name addition.
+ * The leaf entry is added in xfs_dir2_leafn_add.
+ * We may enter with a freespace block that the lookup found.
+ */
+static int
+xfs_dir2_node_addname_int(
+ struct xfs_da_args *args, /* operation arguments */
+ struct xfs_da_state_blk *fblk) /* optional freespace block */
+{
+ struct xfs_dir2_data_unused *dup; /* data unused entry pointer */
+ struct xfs_dir2_data_entry *dep; /* data entry pointer */
+ struct xfs_dir2_data_hdr *hdr; /* data block header */
+ struct xfs_dir2_data_free *bf;
+ struct xfs_trans *tp = args->trans;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_dir3_icfree_hdr freehdr;
+ struct xfs_buf *dbp; /* data block buffer */
+ struct xfs_buf *fbp; /* freespace buffer */
+ xfs_dir2_data_aoff_t aoff;
+ xfs_dir2_db_t dbno; /* data block number */
+ int error; /* error return value */
+ int findex; /* freespace entry index */
+ int length; /* length of the new entry */
+ int logfree = 0; /* need to log free entry */
+ int needlog = 0; /* need to log data header */
+ int needscan = 0; /* need to rescan data frees */
+ __be16 *tagp; /* data entry tag pointer */
+
+ length = xfs_dir2_data_entsize(dp->i_mount, args->namelen);
+ error = xfs_dir2_node_find_freeblk(args, fblk, &dbno, &fbp, &freehdr,
+ &findex, length);
+ if (error)
+ return error;
+
/*
- * We had a data block so we don't have to make a new one.
+ * Now we know if we must allocate blocks, so if we are checking whether
+ * we can insert without allocation then we can return now.
*/
- else {
- /*
- * If just checking, we succeeded.
- */
- if (args->op_flags & XFS_DA_OP_JUSTCHECK)
- return 0;
+ if (args->op_flags & XFS_DA_OP_JUSTCHECK) {
+ if (dbno == -1)
+ return -ENOSPC;
+ return 0;
+ }
- /*
- * Read the data block in.
- */
+ /*
+ * If we don't have a data block, we need to allocate one and make
+ * the freespace entries refer to it.
+ */
+ if (dbno == -1) {
+ /* we're going to have to log the free block index later */
+ logfree = 1;
+ error = xfs_dir2_node_add_datablk(args, fblk, &dbno, &dbp, &fbp,
+ &freehdr, &findex);
+ } else {
+ /* Read the data block in. */
error = xfs_dir3_data_read(tp, dp,
xfs_dir2_db_to_da(args->geo, dbno),
- -1, &dbp);
- if (error)
- return error;
- hdr = dbp->b_addr;
- bf = dp->d_ops->data_bestfree_p(hdr);
- logfree = 0;
+ 0, &dbp);
}
+ if (error)
+ return error;
+
+ /* setup for data block up now */
+ hdr = dbp->b_addr;
+ bf = xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
ASSERT(be16_to_cpu(bf[0].length) >= length);
- /*
- * Point to the existing unused space.
- */
+
+ /* Point to the existing unused space. */
dup = (xfs_dir2_data_unused_t *)
((char *)hdr + be16_to_cpu(bf[0].offset));
- needscan = needlog = 0;
- /*
- * Mark the first part of the unused space, inuse for us.
- */
+
+ /* Mark the first part of the unused space, inuse for us. */
aoff = (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr);
error = xfs_dir2_data_use_free(args, dbp, dup, aoff, length,
&needlog, &needscan);
@@ -2013,46 +1962,106 @@ xfs_dir2_node_addname_int(
xfs_trans_brelse(tp, dbp);
return error;
}
- /*
- * Fill in the new entry and log it.
- */
+
+ /* Fill in the new entry and log it. */
dep = (xfs_dir2_data_entry_t *)dup;
dep->inumber = cpu_to_be64(args->inumber);
dep->namelen = args->namelen;
memcpy(dep->name, args->name, dep->namelen);
- dp->d_ops->data_put_ftype(dep, args->filetype);
- tagp = dp->d_ops->data_entry_tag_p(dep);
+ xfs_dir2_data_put_ftype(dp->i_mount, dep, args->filetype);
+ tagp = xfs_dir2_data_entry_tag_p(dp->i_mount, dep);
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
xfs_dir2_data_log_entry(args, dbp, dep);
- /*
- * Rescan the block for bestfree if needed.
- */
+
+ /* Rescan the freespace and log the data block if needed. */
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
- /*
- * Log the data block header if needed.
- */
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(args, dbp);
+
+ /* If the freespace block entry is now wrong, update it. */
+ if (freehdr.bests[findex] != bf[0].length) {
+ freehdr.bests[findex] = bf[0].length;
+ logfree = 1;
+ }
+
+ /* Log the freespace entry if needed. */
+ if (logfree)
+ xfs_dir2_free_log_bests(args, &freehdr, fbp, findex, findex);
+
+ /* Return the data block and offset in args. */
+ args->blkno = (xfs_dablk_t)dbno;
+ args->index = be16_to_cpu(*tagp);
+ return 0;
+}
+
+/*
+ * Top-level node form directory addname routine.
+ */
+int /* error */
+xfs_dir2_node_addname(
+ xfs_da_args_t *args) /* operation arguments */
+{
+ xfs_da_state_blk_t *blk; /* leaf block for insert */
+ int error; /* error return value */
+ int rval; /* sub-return value */
+ xfs_da_state_t *state; /* btree cursor */
+
+ trace_xfs_dir2_node_addname(args);
+
/*
- * If the freespace entry is now wrong, update it.
+ * Allocate and initialize the state (btree cursor).
*/
- bests = dp->d_ops->free_bests_p(free); /* gcc is so stupid */
- if (be16_to_cpu(bests[findex]) != be16_to_cpu(bf[0].length)) {
- bests[findex] = bf[0].length;
- logfree = 1;
+ state = xfs_da_state_alloc();
+ state->args = args;
+ state->mp = args->dp->i_mount;
+ /*
+ * Look up the name. We're not supposed to find it, but
+ * this gives us the insertion point.
+ */
+ error = xfs_da3_node_lookup_int(state, &rval);
+ if (error)
+ rval = error;
+ if (rval != -ENOENT) {
+ goto done;
}
/*
- * Log the freespace entry if needed.
+ * Add the data entry to a data block.
+ * Extravalid is set to a freeblock found by lookup.
*/
- if (logfree)
- xfs_dir2_free_log_bests(args, fbp, findex, findex);
+ rval = xfs_dir2_node_addname_int(args,
+ state->extravalid ? &state->extrablk : NULL);
+ if (rval) {
+ goto done;
+ }
+ blk = &state->path.blk[state->path.active - 1];
+ ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
/*
- * Return the data block and offset in args, then drop the data block.
+ * Add the new leaf entry.
*/
- args->blkno = (xfs_dablk_t)dbno;
- args->index = be16_to_cpu(*tagp);
- return 0;
+ rval = xfs_dir2_leafn_add(blk->bp, args, blk->index);
+ if (rval == 0) {
+ /*
+ * It worked, fix the hash values up the btree.
+ */
+ if (!(args->op_flags & XFS_DA_OP_JUSTCHECK))
+ xfs_da3_fixhashpath(state, &state->path);
+ } else {
+ /*
+ * It didn't work, we need to split the leaf block.
+ */
+ if (args->total == 0) {
+ ASSERT(rval == -ENOSPC);
+ goto done;
+ }
+ /*
+ * Split the leaf block and insert the new entry.
+ */
+ rval = xfs_da3_split(state);
+ }
+done:
+ xfs_da_state_free(state);
+ return rval;
}
/*
@@ -2186,8 +2195,6 @@ xfs_dir2_node_replace(
int i; /* btree level */
xfs_ino_t inum; /* new inode number */
int ftype; /* new file type */
- xfs_dir2_leaf_t *leaf; /* leaf structure */
- xfs_dir2_leaf_entry_t *lep; /* leaf entry being changed */
int rval; /* internal return value */
xfs_da_state_t *state; /* btree cursor */
@@ -2219,16 +2226,17 @@ xfs_dir2_node_replace(
* and locked it. But paranoia is good.
*/
if (rval == -EEXIST) {
- struct xfs_dir2_leaf_entry *ents;
+ struct xfs_dir3_icleaf_hdr leafhdr;
+
/*
* Find the leaf entry.
*/
blk = &state->path.blk[state->path.active - 1];
ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
- leaf = blk->bp->b_addr;
- ents = args->dp->d_ops->leaf_ents_p(leaf);
- lep = &ents[blk->index];
ASSERT(state->extravalid);
+
+ xfs_dir2_leaf_hdr_from_disk(state->mp, &leafhdr,
+ blk->bp->b_addr);
/*
* Point to the data entry.
*/
@@ -2238,13 +2246,13 @@ xfs_dir2_node_replace(
dep = (xfs_dir2_data_entry_t *)
((char *)hdr +
xfs_dir2_dataptr_to_off(args->geo,
- be32_to_cpu(lep->address)));
+ be32_to_cpu(leafhdr.ents[blk->index].address)));
ASSERT(inum != be64_to_cpu(dep->inumber));
/*
* Fill in the new inode number and log the entry.
*/
dep->inumber = cpu_to_be64(inum);
- args->dp->d_ops->data_put_ftype(dep, ftype);
+ xfs_dir2_data_put_ftype(state->mp, dep, ftype);
xfs_dir2_data_log_entry(args, state->extrablk.bp, dep);
rval = 0;
}
@@ -2301,7 +2309,7 @@ xfs_dir2_node_trim_free(
if (!bp)
return 0;
free = bp->b_addr;
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
+ xfs_dir2_free_hdr_from_disk(dp->i_mount, &freehdr, free);
/*
* If there are used entries, there's nothing to do.
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index 59f9fb2241a5..c031c53d0f0d 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -8,7 +8,41 @@
struct dir_context;
+/*
+ * In-core version of the leaf and free block headers to abstract the
+ * differences in the v2 and v3 disk format of the headers.
+ */
+struct xfs_dir3_icleaf_hdr {
+ uint32_t forw;
+ uint32_t back;
+ uint16_t magic;
+ uint16_t count;
+ uint16_t stale;
+
+ /*
+ * Pointer to the on-disk format entries, which are behind the
+ * variable size (v4 vs v5) header in the on-disk block.
+ */
+ struct xfs_dir2_leaf_entry *ents;
+};
+
+struct xfs_dir3_icfree_hdr {
+ uint32_t magic;
+ uint32_t firstdb;
+ uint32_t nvalid;
+ uint32_t nused;
+
+ /*
+ * Pointer to the on-disk format entries, which are behind the
+ * variable size (v4 vs v5) header in the on-disk block.
+ */
+ __be16 *bests;
+};
+
/* xfs_dir2.c */
+xfs_dahash_t xfs_ascii_ci_hashname(struct xfs_name *name);
+enum xfs_dacmp xfs_ascii_ci_compname(struct xfs_da_args *args,
+ const unsigned char *name, int len);
extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
xfs_dir2_db_t *dbp);
extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
@@ -26,6 +60,15 @@ extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args,
struct xfs_buf *lbp, struct xfs_buf *dbp);
/* xfs_dir2_data.c */
+struct xfs_dir2_data_free *xfs_dir2_data_bestfree_p(struct xfs_mount *mp,
+ struct xfs_dir2_data_hdr *hdr);
+__be16 *xfs_dir2_data_entry_tag_p(struct xfs_mount *mp,
+ struct xfs_dir2_data_entry *dep);
+uint8_t xfs_dir2_data_get_ftype(struct xfs_mount *mp,
+ struct xfs_dir2_data_entry *dep);
+void xfs_dir2_data_put_ftype(struct xfs_mount *mp,
+ struct xfs_dir2_data_entry *dep, uint8_t ftype);
+
#ifdef DEBUG
extern void xfs_dir3_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
#else
@@ -34,10 +77,10 @@ extern void xfs_dir3_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
extern xfs_failaddr_t __xfs_dir3_data_check(struct xfs_inode *dp,
struct xfs_buf *bp);
-extern int xfs_dir3_data_read(struct xfs_trans *tp, struct xfs_inode *dp,
- xfs_dablk_t bno, xfs_daddr_t mapped_bno, struct xfs_buf **bpp);
-extern int xfs_dir3_data_readahead(struct xfs_inode *dp, xfs_dablk_t bno,
- xfs_daddr_t mapped_bno);
+int xfs_dir3_data_read(struct xfs_trans *tp, struct xfs_inode *dp,
+ xfs_dablk_t bno, unsigned int flags, struct xfs_buf **bpp);
+int xfs_dir3_data_readahead(struct xfs_inode *dp, xfs_dablk_t bno,
+ unsigned int flags);
extern struct xfs_dir2_data_free *
xfs_dir2_data_freeinsert(struct xfs_dir2_data_hdr *hdr,
@@ -47,10 +90,14 @@ extern int xfs_dir3_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno,
struct xfs_buf **bpp);
/* xfs_dir2_leaf.c */
-extern int xfs_dir3_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp,
- xfs_dablk_t fbno, xfs_daddr_t mappedbno, struct xfs_buf **bpp);
-extern int xfs_dir3_leafn_read(struct xfs_trans *tp, struct xfs_inode *dp,
- xfs_dablk_t fbno, xfs_daddr_t mappedbno, struct xfs_buf **bpp);
+void xfs_dir2_leaf_hdr_from_disk(struct xfs_mount *mp,
+ struct xfs_dir3_icleaf_hdr *to, struct xfs_dir2_leaf *from);
+void xfs_dir2_leaf_hdr_to_disk(struct xfs_mount *mp, struct xfs_dir2_leaf *to,
+ struct xfs_dir3_icleaf_hdr *from);
+int xfs_dir3_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp,
+ xfs_dablk_t fbno, struct xfs_buf **bpp);
+int xfs_dir3_leafn_read(struct xfs_trans *tp, struct xfs_inode *dp,
+ xfs_dablk_t fbno, struct xfs_buf **bpp);
extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
struct xfs_buf *dbp);
extern int xfs_dir2_leaf_addname(struct xfs_da_args *args);
@@ -62,7 +109,8 @@ extern void xfs_dir3_leaf_compact_x1(struct xfs_dir3_icleaf_hdr *leafhdr,
extern int xfs_dir3_leaf_get_buf(struct xfs_da_args *args, xfs_dir2_db_t bno,
struct xfs_buf **bpp, uint16_t magic);
extern void xfs_dir3_leaf_log_ents(struct xfs_da_args *args,
- struct xfs_buf *bp, int first, int last);
+ struct xfs_dir3_icleaf_hdr *hdr, struct xfs_buf *bp, int first,
+ int last);
extern void xfs_dir3_leaf_log_header(struct xfs_da_args *args,
struct xfs_buf *bp);
extern int xfs_dir2_leaf_lookup(struct xfs_da_args *args);
@@ -79,10 +127,11 @@ xfs_dir3_leaf_find_entry(struct xfs_dir3_icleaf_hdr *leafhdr,
extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state);
extern xfs_failaddr_t xfs_dir3_leaf_check_int(struct xfs_mount *mp,
- struct xfs_inode *dp, struct xfs_dir3_icleaf_hdr *hdr,
- struct xfs_dir2_leaf *leaf);
+ struct xfs_dir3_icleaf_hdr *hdr, struct xfs_dir2_leaf *leaf);
/* xfs_dir2_node.c */
+void xfs_dir2_free_hdr_from_disk(struct xfs_mount *mp,
+ struct xfs_dir3_icfree_hdr *to, struct xfs_dir2_free *from);
extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
struct xfs_buf *lbp);
extern xfs_dahash_t xfs_dir2_leaf_lasthash(struct xfs_inode *dp,
@@ -108,6 +157,14 @@ extern int xfs_dir2_free_read(struct xfs_trans *tp, struct xfs_inode *dp,
xfs_dablk_t fbno, struct xfs_buf **bpp);
/* xfs_dir2_sf.c */
+xfs_ino_t xfs_dir2_sf_get_ino(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep);
+xfs_ino_t xfs_dir2_sf_get_parent_ino(struct xfs_dir2_sf_hdr *hdr);
+void xfs_dir2_sf_put_parent_ino(struct xfs_dir2_sf_hdr *hdr, xfs_ino_t ino);
+uint8_t xfs_dir2_sf_get_ftype(struct xfs_mount *mp,
+ struct xfs_dir2_sf_entry *sfep);
+struct xfs_dir2_sf_entry *xfs_dir2_sf_nextentry(struct xfs_mount *mp,
+ struct xfs_dir2_sf_hdr *hdr, struct xfs_dir2_sf_entry *sfep);
extern int xfs_dir2_block_sfsize(struct xfs_inode *dp,
struct xfs_dir2_data_hdr *block, struct xfs_dir2_sf_hdr *sfhp);
extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_buf *bp,
@@ -123,4 +180,39 @@ extern xfs_failaddr_t xfs_dir2_sf_verify(struct xfs_inode *ip);
extern int xfs_readdir(struct xfs_trans *tp, struct xfs_inode *dp,
struct dir_context *ctx, size_t bufsize);
+static inline unsigned int
+xfs_dir2_data_entsize(
+ struct xfs_mount *mp,
+ unsigned int namelen)
+{
+ unsigned int len;
+
+ len = offsetof(struct xfs_dir2_data_entry, name[0]) + namelen +
+ sizeof(xfs_dir2_data_off_t) /* tag */;
+ if (xfs_sb_version_hasftype(&mp->m_sb))
+ len += sizeof(uint8_t);
+ return round_up(len, XFS_DIR2_DATA_ALIGN);
+}
+
+static inline xfs_dahash_t
+xfs_dir2_hashname(
+ struct xfs_mount *mp,
+ struct xfs_name *name)
+{
+ if (unlikely(xfs_sb_version_hasasciici(&mp->m_sb)))
+ return xfs_ascii_ci_hashname(name);
+ return xfs_da_hashname(name->name, name->len);
+}
+
+static inline enum xfs_dacmp
+xfs_dir2_compname(
+ struct xfs_da_args *args,
+ const unsigned char *name,
+ int len)
+{
+ if (unlikely(xfs_sb_version_hasasciici(&args->dp->i_mount->m_sb)))
+ return xfs_ascii_ci_compname(args, name, len);
+ return xfs_da_compname(args, name, len);
+}
+
#endif /* __XFS_DIR2_PRIV_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index 033589257f54..8b94d33d232f 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -37,6 +37,126 @@ static void xfs_dir2_sf_check(xfs_da_args_t *args);
static void xfs_dir2_sf_toino4(xfs_da_args_t *args);
static void xfs_dir2_sf_toino8(xfs_da_args_t *args);
+static int
+xfs_dir2_sf_entsize(
+ struct xfs_mount *mp,
+ struct xfs_dir2_sf_hdr *hdr,
+ int len)
+{
+ int count = len;
+
+ count += sizeof(struct xfs_dir2_sf_entry); /* namelen + offset */
+ count += hdr->i8count ? XFS_INO64_SIZE : XFS_INO32_SIZE; /* ino # */
+
+ if (xfs_sb_version_hasftype(&mp->m_sb))
+ count += sizeof(uint8_t);
+ return count;
+}
+
+struct xfs_dir2_sf_entry *
+xfs_dir2_sf_nextentry(
+ struct xfs_mount *mp,
+ struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep)
+{
+ return (void *)sfep + xfs_dir2_sf_entsize(mp, hdr, sfep->namelen);
+}
+
+/*
+ * In short-form directory entries the inode numbers are stored at variable
+ * offset behind the entry name. If the entry stores a filetype value, then it
+ * sits between the name and the inode number. The actual inode numbers can
+ * come in two formats as well, either 4 bytes or 8 bytes wide.
+ */
+xfs_ino_t
+xfs_dir2_sf_get_ino(
+ struct xfs_mount *mp,
+ struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep)
+{
+ uint8_t *from = sfep->name + sfep->namelen;
+
+ if (xfs_sb_version_hasftype(&mp->m_sb))
+ from++;
+
+ if (!hdr->i8count)
+ return get_unaligned_be32(from);
+ return get_unaligned_be64(from) & XFS_MAXINUMBER;
+}
+
+static void
+xfs_dir2_sf_put_ino(
+ struct xfs_mount *mp,
+ struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep,
+ xfs_ino_t ino)
+{
+ uint8_t *to = sfep->name + sfep->namelen;
+
+ ASSERT(ino <= XFS_MAXINUMBER);
+
+ if (xfs_sb_version_hasftype(&mp->m_sb))
+ to++;
+
+ if (hdr->i8count)
+ put_unaligned_be64(ino, to);
+ else
+ put_unaligned_be32(ino, to);
+}
+
+xfs_ino_t
+xfs_dir2_sf_get_parent_ino(
+ struct xfs_dir2_sf_hdr *hdr)
+{
+ if (!hdr->i8count)
+ return get_unaligned_be32(hdr->parent);
+ return get_unaligned_be64(hdr->parent) & XFS_MAXINUMBER;
+}
+
+void
+xfs_dir2_sf_put_parent_ino(
+ struct xfs_dir2_sf_hdr *hdr,
+ xfs_ino_t ino)
+{
+ ASSERT(ino <= XFS_MAXINUMBER);
+
+ if (hdr->i8count)
+ put_unaligned_be64(ino, hdr->parent);
+ else
+ put_unaligned_be32(ino, hdr->parent);
+}
+
+/*
+ * The file type field is stored at the end of the name for filetype enabled
+ * shortform directories, or not at all otherwise.
+ */
+uint8_t
+xfs_dir2_sf_get_ftype(
+ struct xfs_mount *mp,
+ struct xfs_dir2_sf_entry *sfep)
+{
+ if (xfs_sb_version_hasftype(&mp->m_sb)) {
+ uint8_t ftype = sfep->name[sfep->namelen];
+
+ if (ftype < XFS_DIR3_FT_MAX)
+ return ftype;
+ }
+
+ return XFS_DIR3_FT_UNKNOWN;
+}
+
+static void
+xfs_dir2_sf_put_ftype(
+ struct xfs_mount *mp,
+ struct xfs_dir2_sf_entry *sfep,
+ uint8_t ftype)
+{
+ ASSERT(ftype < XFS_DIR3_FT_MAX);
+
+ if (xfs_sb_version_hasftype(&mp->m_sb))
+ sfep->name[sfep->namelen] = ftype;
+}
+
/*
* Given a block directory (dp/block), calculate its size as a shortform (sf)
* directory and a header for the sf directory, if it will fit it the
@@ -125,7 +245,7 @@ xfs_dir2_block_sfsize(
*/
sfhp->count = count;
sfhp->i8count = i8count;
- dp->d_ops->sf_put_parent_ino(sfhp, parent);
+ xfs_dir2_sf_put_parent_ino(sfhp, parent);
return size;
}
@@ -135,64 +255,48 @@ xfs_dir2_block_sfsize(
*/
int /* error */
xfs_dir2_block_to_sf(
- xfs_da_args_t *args, /* operation arguments */
+ struct xfs_da_args *args, /* operation arguments */
struct xfs_buf *bp,
int size, /* shortform directory size */
- xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */
+ struct xfs_dir2_sf_hdr *sfhp) /* shortform directory hdr */
{
- xfs_dir2_data_hdr_t *hdr; /* block header */
- xfs_dir2_data_entry_t *dep; /* data entry pointer */
- xfs_inode_t *dp; /* incore directory inode */
- xfs_dir2_data_unused_t *dup; /* unused data pointer */
- char *endptr; /* end of data entries */
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int error; /* error return value */
int logflags; /* inode logging flags */
- xfs_mount_t *mp; /* filesystem mount point */
- char *ptr; /* current data pointer */
- xfs_dir2_sf_entry_t *sfep; /* shortform entry */
- xfs_dir2_sf_hdr_t *sfp; /* shortform directory header */
- xfs_dir2_sf_hdr_t *dst; /* temporary data buffer */
+ struct xfs_dir2_sf_entry *sfep; /* shortform entry */
+ struct xfs_dir2_sf_hdr *sfp; /* shortform directory header */
+ unsigned int offset = args->geo->data_entry_offset;
+ unsigned int end;
trace_xfs_dir2_block_to_sf(args);
- dp = args->dp;
- mp = dp->i_mount;
-
- /*
- * allocate a temporary destination buffer the size of the inode
- * to format the data into. Once we have formatted the data, we
- * can free the block and copy the formatted data into the inode literal
- * area.
- */
- dst = kmem_alloc(mp->m_sb.sb_inodesize, KM_SLEEP);
- hdr = bp->b_addr;
-
/*
- * Copy the header into the newly allocate local space.
+ * Allocate a temporary destination buffer the size of the inode to
+ * format the data into. Once we have formatted the data, we can free
+ * the block and copy the formatted data into the inode literal area.
*/
- sfp = (xfs_dir2_sf_hdr_t *)dst;
+ sfp = kmem_alloc(mp->m_sb.sb_inodesize, 0);
memcpy(sfp, sfhp, xfs_dir2_sf_hdr_size(sfhp->i8count));
/*
- * Set up to loop over the block's entries.
+ * Loop over the active and unused entries. Stop when we reach the
+ * leaf/tail portion of the block.
*/
- ptr = (char *)dp->d_ops->data_entry_p(hdr);
- endptr = xfs_dir3_data_endp(args->geo, hdr);
+ end = xfs_dir3_data_end_offset(args->geo, bp->b_addr);
sfep = xfs_dir2_sf_firstentry(sfp);
- /*
- * Loop over the active and unused entries.
- * Stop when we reach the leaf/tail portion of the block.
- */
- while (ptr < endptr) {
+ while (offset < end) {
+ struct xfs_dir2_data_unused *dup = bp->b_addr + offset;
+ struct xfs_dir2_data_entry *dep = bp->b_addr + offset;
+
/*
* If it's unused, just skip over it.
*/
- dup = (xfs_dir2_data_unused_t *)ptr;
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
- ptr += be16_to_cpu(dup->length);
+ offset += be16_to_cpu(dup->length);
continue;
}
- dep = (xfs_dir2_data_entry_t *)ptr;
+
/*
* Skip .
*/
@@ -204,24 +308,22 @@ xfs_dir2_block_to_sf(
else if (dep->namelen == 2 &&
dep->name[0] == '.' && dep->name[1] == '.')
ASSERT(be64_to_cpu(dep->inumber) ==
- dp->d_ops->sf_get_parent_ino(sfp));
+ xfs_dir2_sf_get_parent_ino(sfp));
/*
* Normal entry, copy it into shortform.
*/
else {
sfep->namelen = dep->namelen;
- xfs_dir2_sf_put_offset(sfep,
- (xfs_dir2_data_aoff_t)
- ((char *)dep - (char *)hdr));
+ xfs_dir2_sf_put_offset(sfep, offset);
memcpy(sfep->name, dep->name, dep->namelen);
- dp->d_ops->sf_put_ino(sfp, sfep,
+ xfs_dir2_sf_put_ino(mp, sfp, sfep,
be64_to_cpu(dep->inumber));
- dp->d_ops->sf_put_ftype(sfep,
- dp->d_ops->data_get_ftype(dep));
+ xfs_dir2_sf_put_ftype(mp, sfep,
+ xfs_dir2_data_get_ftype(mp, dep));
- sfep = dp->d_ops->sf_nextentry(sfp, sfep);
+ sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep);
}
- ptr += dp->d_ops->data_entsize(dep->namelen);
+ offset += xfs_dir2_data_entsize(mp, dep->namelen);
}
ASSERT((char *)sfep - (char *)sfp == size);
@@ -240,7 +342,7 @@ xfs_dir2_block_to_sf(
* Convert the inode to local format and copy the data in.
*/
ASSERT(dp->i_df.if_bytes == 0);
- xfs_init_local_fork(dp, XFS_DATA_FORK, dst, size);
+ xfs_init_local_fork(dp, XFS_DATA_FORK, sfp, size);
dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
dp->i_d.di_size = size;
@@ -248,7 +350,7 @@ xfs_dir2_block_to_sf(
xfs_dir2_sf_check(args);
out:
xfs_trans_log_inode(args->trans, dp, logflags);
- kmem_free(dst);
+ kmem_free(sfp);
return error;
}
@@ -277,13 +379,7 @@ xfs_dir2_sf_addname(
ASSERT(xfs_dir2_sf_lookup(args) == -ENOENT);
dp = args->dp;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
- /*
- * Make sure the shortform value has some of its header.
- */
- if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return -EIO;
- }
+ ASSERT(dp->i_d.di_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
@@ -291,7 +387,7 @@ xfs_dir2_sf_addname(
/*
* Compute entry (and change in) size.
*/
- incr_isize = dp->d_ops->sf_entsize(sfp, args->namelen);
+ incr_isize = xfs_dir2_sf_entsize(dp->i_mount, sfp, args->namelen);
objchange = 0;
/*
@@ -364,18 +460,17 @@ xfs_dir2_sf_addname_easy(
xfs_dir2_data_aoff_t offset, /* offset to use for new ent */
int new_isize) /* new directory size */
{
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int byteoff; /* byte offset in sf dir */
- xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
- dp = args->dp;
-
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
byteoff = (int)((char *)sfep - (char *)sfp);
/*
* Grow the in-inode space.
*/
- xfs_idata_realloc(dp, dp->d_ops->sf_entsize(sfp, args->namelen),
+ xfs_idata_realloc(dp, xfs_dir2_sf_entsize(mp, sfp, args->namelen),
XFS_DATA_FORK);
/*
* Need to set up again due to realloc of the inode data.
@@ -388,8 +483,8 @@ xfs_dir2_sf_addname_easy(
sfep->namelen = args->namelen;
xfs_dir2_sf_put_offset(sfep, offset);
memcpy(sfep->name, args->name, sfep->namelen);
- dp->d_ops->sf_put_ino(sfp, sfep, args->inumber);
- dp->d_ops->sf_put_ftype(sfep, args->filetype);
+ xfs_dir2_sf_put_ino(mp, sfp, sfep, args->inumber);
+ xfs_dir2_sf_put_ftype(mp, sfep, args->filetype);
/*
* Update the header and inode.
@@ -416,9 +511,10 @@ xfs_dir2_sf_addname_hard(
int objchange, /* changing inode number size */
int new_isize) /* new directory size */
{
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int add_datasize; /* data size need for new ent */
char *buf; /* buffer for old */
- xfs_inode_t *dp; /* incore directory inode */
int eof; /* reached end of old dir */
int nbytes; /* temp for byte copies */
xfs_dir2_data_aoff_t new_offset; /* next offset value */
@@ -432,11 +528,9 @@ xfs_dir2_sf_addname_hard(
/*
* Copy the old directory to the stack buffer.
*/
- dp = args->dp;
-
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
old_isize = (int)dp->i_d.di_size;
- buf = kmem_alloc(old_isize, KM_SLEEP);
+ buf = kmem_alloc(old_isize, 0);
oldsfp = (xfs_dir2_sf_hdr_t *)buf;
memcpy(oldsfp, sfp, old_isize);
/*
@@ -444,13 +538,13 @@ xfs_dir2_sf_addname_hard(
* to insert the new entry.
* If it's going to end up at the end then oldsfep will point there.
*/
- for (offset = dp->d_ops->data_first_offset,
+ for (offset = args->geo->data_first_offset,
oldsfep = xfs_dir2_sf_firstentry(oldsfp),
- add_datasize = dp->d_ops->data_entsize(args->namelen),
+ add_datasize = xfs_dir2_data_entsize(mp, args->namelen),
eof = (char *)oldsfep == &buf[old_isize];
!eof;
- offset = new_offset + dp->d_ops->data_entsize(oldsfep->namelen),
- oldsfep = dp->d_ops->sf_nextentry(oldsfp, oldsfep),
+ offset = new_offset + xfs_dir2_data_entsize(mp, oldsfep->namelen),
+ oldsfep = xfs_dir2_sf_nextentry(mp, oldsfp, oldsfep),
eof = (char *)oldsfep == &buf[old_isize]) {
new_offset = xfs_dir2_sf_get_offset(oldsfep);
if (offset + add_datasize <= new_offset)
@@ -479,8 +573,8 @@ xfs_dir2_sf_addname_hard(
sfep->namelen = args->namelen;
xfs_dir2_sf_put_offset(sfep, offset);
memcpy(sfep->name, args->name, sfep->namelen);
- dp->d_ops->sf_put_ino(sfp, sfep, args->inumber);
- dp->d_ops->sf_put_ftype(sfep, args->filetype);
+ xfs_dir2_sf_put_ino(mp, sfp, sfep, args->inumber);
+ xfs_dir2_sf_put_ftype(mp, sfep, args->filetype);
sfp->count++;
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange)
sfp->i8count++;
@@ -488,7 +582,7 @@ xfs_dir2_sf_addname_hard(
* If there's more left to copy, do that.
*/
if (!eof) {
- sfep = dp->d_ops->sf_nextentry(sfp, sfep);
+ sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep);
memcpy(sfep, oldsfep, old_isize - nbytes);
}
kmem_free(buf);
@@ -510,7 +604,8 @@ xfs_dir2_sf_addname_pick(
xfs_dir2_sf_entry_t **sfepp, /* out(1): new entry ptr */
xfs_dir2_data_aoff_t *offsetp) /* out(1): new offset */
{
- xfs_inode_t *dp; /* incore directory inode */
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int holefit; /* found hole it will fit in */
int i; /* entry number */
xfs_dir2_data_aoff_t offset; /* data block offset */
@@ -519,11 +614,9 @@ xfs_dir2_sf_addname_pick(
int size; /* entry's data size */
int used; /* data bytes used */
- dp = args->dp;
-
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
- size = dp->d_ops->data_entsize(args->namelen);
- offset = dp->d_ops->data_first_offset;
+ size = xfs_dir2_data_entsize(mp, args->namelen);
+ offset = args->geo->data_first_offset;
sfep = xfs_dir2_sf_firstentry(sfp);
holefit = 0;
/*
@@ -535,8 +628,8 @@ xfs_dir2_sf_addname_pick(
if (!holefit)
holefit = offset + size <= xfs_dir2_sf_get_offset(sfep);
offset = xfs_dir2_sf_get_offset(sfep) +
- dp->d_ops->data_entsize(sfep->namelen);
- sfep = dp->d_ops->sf_nextentry(sfp, sfep);
+ xfs_dir2_data_entsize(mp, sfep->namelen);
+ sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep);
}
/*
* Calculate data bytes used excluding the new entry, if this
@@ -578,7 +671,8 @@ static void
xfs_dir2_sf_check(
xfs_da_args_t *args) /* operation arguments */
{
- xfs_inode_t *dp; /* incore directory inode */
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int i; /* entry number */
int i8count; /* number of big inode#s */
xfs_ino_t ino; /* entry inode number */
@@ -586,23 +680,21 @@ xfs_dir2_sf_check(
xfs_dir2_sf_entry_t *sfep; /* shortform dir entry */
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
- dp = args->dp;
-
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
- offset = dp->d_ops->data_first_offset;
- ino = dp->d_ops->sf_get_parent_ino(sfp);
+ offset = args->geo->data_first_offset;
+ ino = xfs_dir2_sf_get_parent_ino(sfp);
i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
i < sfp->count;
- i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
+ i++, sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep)) {
ASSERT(xfs_dir2_sf_get_offset(sfep) >= offset);
- ino = dp->d_ops->sf_get_ino(sfp, sfep);
+ ino = xfs_dir2_sf_get_ino(mp, sfp, sfep);
i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
offset =
xfs_dir2_sf_get_offset(sfep) +
- dp->d_ops->data_entsize(sfep->namelen);
- ASSERT(dp->d_ops->sf_get_ftype(sfep) < XFS_DIR3_FT_MAX);
+ xfs_dir2_data_entsize(mp, sfep->namelen);
+ ASSERT(xfs_dir2_sf_get_ftype(mp, sfep) < XFS_DIR3_FT_MAX);
}
ASSERT(i8count == sfp->i8count);
ASSERT((char *)sfep - (char *)sfp == dp->i_d.di_size);
@@ -622,22 +714,16 @@ xfs_dir2_sf_verify(
struct xfs_dir2_sf_entry *sfep;
struct xfs_dir2_sf_entry *next_sfep;
char *endp;
- const struct xfs_dir_ops *dops;
struct xfs_ifork *ifp;
xfs_ino_t ino;
int i;
int i8count;
int offset;
- int size;
+ int64_t size;
int error;
uint8_t filetype;
ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
- /*
- * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops,
- * so we can only trust the mountpoint to have the right pointer.
- */
- dops = xfs_dir_get_ops(mp, NULL);
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data;
@@ -653,12 +739,12 @@ xfs_dir2_sf_verify(
endp = (char *)sfp + size;
/* Check .. entry */
- ino = dops->sf_get_parent_ino(sfp);
+ ino = xfs_dir2_sf_get_parent_ino(sfp);
i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
error = xfs_dir_ino_validate(mp, ino);
if (error)
return __this_address;
- offset = dops->data_first_offset;
+ offset = mp->m_dir_geo->data_first_offset;
/* Check all reported entries */
sfep = xfs_dir2_sf_firstentry(sfp);
@@ -680,7 +766,7 @@ xfs_dir2_sf_verify(
* within the data buffer. The next entry starts after the
* name component, so nextentry is an acceptable test.
*/
- next_sfep = dops->sf_nextentry(sfp, sfep);
+ next_sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep);
if (endp < (char *)next_sfep)
return __this_address;
@@ -689,19 +775,19 @@ xfs_dir2_sf_verify(
return __this_address;
/* Check the inode number. */
- ino = dops->sf_get_ino(sfp, sfep);
+ ino = xfs_dir2_sf_get_ino(mp, sfp, sfep);
i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
error = xfs_dir_ino_validate(mp, ino);
if (error)
return __this_address;
/* Check the file type. */
- filetype = dops->sf_get_ftype(sfep);
+ filetype = xfs_dir2_sf_get_ftype(mp, sfep);
if (filetype >= XFS_DIR3_FT_MAX)
return __this_address;
offset = xfs_dir2_sf_get_offset(sfep) +
- dops->data_entsize(sfep->namelen);
+ xfs_dir2_data_entsize(mp, sfep->namelen);
sfep = next_sfep;
}
@@ -763,7 +849,7 @@ xfs_dir2_sf_create(
/*
* Now can put in the inode number, since i8count is set.
*/
- dp->d_ops->sf_put_parent_ino(sfp, pino);
+ xfs_dir2_sf_put_parent_ino(sfp, pino);
sfp->count = 0;
dp->i_d.di_size = size;
xfs_dir2_sf_check(args);
@@ -779,7 +865,8 @@ int /* error */
xfs_dir2_sf_lookup(
xfs_da_args_t *args) /* operation arguments */
{
- xfs_inode_t *dp; /* incore directory inode */
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int i; /* entry index */
int error;
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
@@ -790,16 +877,9 @@ xfs_dir2_sf_lookup(
trace_xfs_dir2_sf_lookup(args);
xfs_dir2_sf_check(args);
- dp = args->dp;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
- /*
- * Bail out if the directory is way too short.
- */
- if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return -EIO;
- }
+ ASSERT(dp->i_d.di_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
@@ -818,7 +898,7 @@ xfs_dir2_sf_lookup(
*/
if (args->namelen == 2 &&
args->name[0] == '.' && args->name[1] == '.') {
- args->inumber = dp->d_ops->sf_get_parent_ino(sfp);
+ args->inumber = xfs_dir2_sf_get_parent_ino(sfp);
args->cmpresult = XFS_CMP_EXACT;
args->filetype = XFS_DIR3_FT_DIR;
return -EEXIST;
@@ -828,18 +908,17 @@ xfs_dir2_sf_lookup(
*/
ci_sfep = NULL;
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
- i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
+ i++, sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep)) {
/*
* Compare name and if it's an exact match, return the inode
* number. If it's the first case-insensitive match, store the
* inode number and continue looking for an exact match.
*/
- cmp = dp->i_mount->m_dirnameops->compname(args, sfep->name,
- sfep->namelen);
+ cmp = xfs_dir2_compname(args, sfep->name, sfep->namelen);
if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
args->cmpresult = cmp;
- args->inumber = dp->d_ops->sf_get_ino(sfp, sfep);
- args->filetype = dp->d_ops->sf_get_ftype(sfep);
+ args->inumber = xfs_dir2_sf_get_ino(mp, sfp, sfep);
+ args->filetype = xfs_dir2_sf_get_ftype(mp, sfep);
if (cmp == XFS_CMP_EXACT)
return -EEXIST;
ci_sfep = sfep;
@@ -864,8 +943,9 @@ int /* error */
xfs_dir2_sf_removename(
xfs_da_args_t *args)
{
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int byteoff; /* offset of removed entry */
- xfs_inode_t *dp; /* incore directory inode */
int entsize; /* this entry's size */
int i; /* shortform entry index */
int newsize; /* new inode size */
@@ -875,17 +955,9 @@ xfs_dir2_sf_removename(
trace_xfs_dir2_sf_removename(args);
- dp = args->dp;
-
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
oldsize = (int)dp->i_d.di_size;
- /*
- * Bail out if the directory is way too short.
- */
- if (oldsize < offsetof(xfs_dir2_sf_hdr_t, parent)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return -EIO;
- }
+ ASSERT(oldsize >= offsetof(struct xfs_dir2_sf_hdr, parent));
ASSERT(dp->i_df.if_bytes == oldsize);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
@@ -895,10 +967,10 @@ xfs_dir2_sf_removename(
* Find the one we're deleting.
*/
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
- i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
+ i++, sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep)) {
if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
XFS_CMP_EXACT) {
- ASSERT(dp->d_ops->sf_get_ino(sfp, sfep) ==
+ ASSERT(xfs_dir2_sf_get_ino(mp, sfp, sfep) ==
args->inumber);
break;
}
@@ -912,7 +984,7 @@ xfs_dir2_sf_removename(
* Calculate sizes.
*/
byteoff = (int)((char *)sfep - (char *)sfp);
- entsize = dp->d_ops->sf_entsize(sfp, args->namelen);
+ entsize = xfs_dir2_sf_entsize(mp, sfp, args->namelen);
newsize = oldsize - entsize;
/*
* Copy the part if any after the removed entry, sliding it down.
@@ -945,13 +1017,35 @@ xfs_dir2_sf_removename(
}
/*
+ * Check whether the sf dir replace operation need more blocks.
+ */
+bool
+xfs_dir2_sf_replace_needblock(
+ struct xfs_inode *dp,
+ xfs_ino_t inum)
+{
+ int newsize;
+ struct xfs_dir2_sf_hdr *sfp;
+
+ if (dp->i_d.di_format != XFS_DINODE_FMT_LOCAL)
+ return false;
+
+ sfp = (struct xfs_dir2_sf_hdr *)dp->i_df.if_u1.if_data;
+ newsize = dp->i_df.if_bytes + (sfp->count + 1) * XFS_INO64_DIFF;
+
+ return inum > XFS_DIR2_MAX_SHORT_INUM &&
+ sfp->i8count == 0 && newsize > XFS_IFORK_DSIZE(dp);
+}
+
+/*
* Replace the inode number of an entry in a shortform directory.
*/
int /* error */
xfs_dir2_sf_replace(
xfs_da_args_t *args) /* operation arguments */
{
- xfs_inode_t *dp; /* incore directory inode */
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int i; /* entry index */
xfs_ino_t ino=0; /* entry old inode number */
int i8elevated; /* sf_toino8 set i8count=1 */
@@ -960,16 +1054,8 @@ xfs_dir2_sf_replace(
trace_xfs_dir2_sf_replace(args);
- dp = args->dp;
-
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
- /*
- * Bail out if the shortform directory is way too small.
- */
- if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return -EIO;
- }
+ ASSERT(dp->i_d.di_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
@@ -980,17 +1066,14 @@ xfs_dir2_sf_replace(
*/
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->i8count == 0) {
int error; /* error return value */
- int newsize; /* new inode size */
- newsize = dp->i_df.if_bytes + (sfp->count + 1) * XFS_INO64_DIFF;
/*
* Won't fit as shortform, convert to block then do replace.
*/
- if (newsize > XFS_IFORK_DSIZE(dp)) {
+ if (xfs_dir2_sf_replace_needblock(dp, args->inumber)) {
error = xfs_dir2_sf_to_block(args);
- if (error) {
+ if (error)
return error;
- }
return xfs_dir2_block_replace(args);
}
/*
@@ -1008,22 +1091,23 @@ xfs_dir2_sf_replace(
*/
if (args->namelen == 2 &&
args->name[0] == '.' && args->name[1] == '.') {
- ino = dp->d_ops->sf_get_parent_ino(sfp);
+ ino = xfs_dir2_sf_get_parent_ino(sfp);
ASSERT(args->inumber != ino);
- dp->d_ops->sf_put_parent_ino(sfp, args->inumber);
+ xfs_dir2_sf_put_parent_ino(sfp, args->inumber);
}
/*
* Normal entry, look for the name.
*/
else {
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
- i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
+ i++, sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep)) {
if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
XFS_CMP_EXACT) {
- ino = dp->d_ops->sf_get_ino(sfp, sfep);
+ ino = xfs_dir2_sf_get_ino(mp, sfp, sfep);
ASSERT(args->inumber != ino);
- dp->d_ops->sf_put_ino(sfp, sfep, args->inumber);
- dp->d_ops->sf_put_ftype(sfep, args->filetype);
+ xfs_dir2_sf_put_ino(mp, sfp, sfep,
+ args->inumber);
+ xfs_dir2_sf_put_ftype(mp, sfep, args->filetype);
break;
}
}
@@ -1076,8 +1160,9 @@ static void
xfs_dir2_sf_toino4(
xfs_da_args_t *args) /* operation arguments */
{
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
char *buf; /* old dir's buffer */
- xfs_inode_t *dp; /* incore directory inode */
int i; /* entry index */
int newsize; /* new inode size */
xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */
@@ -1088,15 +1173,13 @@ xfs_dir2_sf_toino4(
trace_xfs_dir2_sf_toino4(args);
- dp = args->dp;
-
/*
* Copy the old directory to the buffer.
* Then nuke it from the inode, and add the new buffer to the inode.
* Don't want xfs_idata_realloc copying the data here.
*/
oldsize = dp->i_df.if_bytes;
- buf = kmem_alloc(oldsize, KM_SLEEP);
+ buf = kmem_alloc(oldsize, 0);
oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
ASSERT(oldsfp->i8count == 1);
memcpy(buf, oldsfp, oldsize);
@@ -1116,21 +1199,22 @@ xfs_dir2_sf_toino4(
*/
sfp->count = oldsfp->count;
sfp->i8count = 0;
- dp->d_ops->sf_put_parent_ino(sfp, dp->d_ops->sf_get_parent_ino(oldsfp));
+ xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp));
/*
* Copy the entries field by field.
*/
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
oldsfep = xfs_dir2_sf_firstentry(oldsfp);
i < sfp->count;
- i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep),
- oldsfep = dp->d_ops->sf_nextentry(oldsfp, oldsfep)) {
+ i++, sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep),
+ oldsfep = xfs_dir2_sf_nextentry(mp, oldsfp, oldsfep)) {
sfep->namelen = oldsfep->namelen;
memcpy(sfep->offset, oldsfep->offset, sizeof(sfep->offset));
memcpy(sfep->name, oldsfep->name, sfep->namelen);
- dp->d_ops->sf_put_ino(sfp, sfep,
- dp->d_ops->sf_get_ino(oldsfp, oldsfep));
- dp->d_ops->sf_put_ftype(sfep, dp->d_ops->sf_get_ftype(oldsfep));
+ xfs_dir2_sf_put_ino(mp, sfp, sfep,
+ xfs_dir2_sf_get_ino(mp, oldsfp, oldsfep));
+ xfs_dir2_sf_put_ftype(mp, sfep,
+ xfs_dir2_sf_get_ftype(mp, oldsfep));
}
/*
* Clean up the inode.
@@ -1149,8 +1233,9 @@ static void
xfs_dir2_sf_toino8(
xfs_da_args_t *args) /* operation arguments */
{
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
char *buf; /* old dir's buffer */
- xfs_inode_t *dp; /* incore directory inode */
int i; /* entry index */
int newsize; /* new inode size */
xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */
@@ -1161,15 +1246,13 @@ xfs_dir2_sf_toino8(
trace_xfs_dir2_sf_toino8(args);
- dp = args->dp;
-
/*
* Copy the old directory to the buffer.
* Then nuke it from the inode, and add the new buffer to the inode.
* Don't want xfs_idata_realloc copying the data here.
*/
oldsize = dp->i_df.if_bytes;
- buf = kmem_alloc(oldsize, KM_SLEEP);
+ buf = kmem_alloc(oldsize, 0);
oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
ASSERT(oldsfp->i8count == 0);
memcpy(buf, oldsfp, oldsize);
@@ -1189,21 +1272,22 @@ xfs_dir2_sf_toino8(
*/
sfp->count = oldsfp->count;
sfp->i8count = 1;
- dp->d_ops->sf_put_parent_ino(sfp, dp->d_ops->sf_get_parent_ino(oldsfp));
+ xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp));
/*
* Copy the entries field by field.
*/
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
oldsfep = xfs_dir2_sf_firstentry(oldsfp);
i < sfp->count;
- i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep),
- oldsfep = dp->d_ops->sf_nextentry(oldsfp, oldsfep)) {
+ i++, sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep),
+ oldsfep = xfs_dir2_sf_nextentry(mp, oldsfp, oldsfep)) {
sfep->namelen = oldsfep->namelen;
memcpy(sfep->offset, oldsfep->offset, sizeof(sfep->offset));
memcpy(sfep->name, oldsfep->name, sfep->namelen);
- dp->d_ops->sf_put_ino(sfp, sfep,
- dp->d_ops->sf_get_ino(oldsfp, oldsfep));
- dp->d_ops->sf_put_ftype(sfep, dp->d_ops->sf_get_ftype(oldsfep));
+ xfs_dir2_sf_put_ino(mp, sfp, sfep,
+ xfs_dir2_sf_get_ino(mp, oldsfp, oldsfep));
+ xfs_dir2_sf_put_ftype(mp, sfep,
+ xfs_dir2_sf_get_ftype(mp, oldsfep));
}
/*
* Clean up the inode.
diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
index e8bd688a4073..bedc1e752b60 100644
--- a/fs/xfs/libxfs/xfs_dquot_buf.c
+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
@@ -35,10 +35,10 @@ xfs_calc_dquots_per_chunk(
xfs_failaddr_t
xfs_dquot_verify(
- struct xfs_mount *mp,
- xfs_disk_dquot_t *ddq,
- xfs_dqid_t id,
- uint type) /* used only during quotacheck */
+ struct xfs_mount *mp,
+ struct xfs_disk_dquot *ddq,
+ xfs_dqid_t id,
+ uint type) /* used only during quotacheck */
{
/*
* We can encounter an uninitialized dquot buffer for 2 reasons:
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index c968b60cee15..1b7dcbae051c 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -920,13 +920,13 @@ static inline uint xfs_dinode_size(int version)
* This enum is used in string mapping in xfs_trace.h; please keep the
* TRACE_DEFINE_ENUMs for it up to date.
*/
-typedef enum xfs_dinode_fmt {
+enum xfs_dinode_fmt {
XFS_DINODE_FMT_DEV, /* xfs_dev_t */
XFS_DINODE_FMT_LOCAL, /* bulk data */
XFS_DINODE_FMT_EXTENTS, /* struct xfs_bmbt_rec */
XFS_DINODE_FMT_BTREE, /* struct xfs_bmdr_block */
XFS_DINODE_FMT_UUID /* added long ago, but never used */
-} xfs_dinode_fmt_t;
+};
#define XFS_INODE_FORMAT_STR \
{ XFS_DINODE_FMT_DEV, "dev" }, \
@@ -1144,11 +1144,11 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
/*
* This is the main portion of the on-disk representation of quota
- * information for a user. This is the q_core of the xfs_dquot_t that
+ * information for a user. This is the q_core of the struct xfs_dquot that
* is kept in kernel memory. We pad this with some more expansion room
* to construct the on disk structure.
*/
-typedef struct xfs_disk_dquot {
+struct xfs_disk_dquot {
__be16 d_magic; /* dquot magic = XFS_DQUOT_MAGIC */
__u8 d_version; /* dquot version */
__u8 d_flags; /* XFS_DQ_USER/PROJ/GROUP */
@@ -1171,15 +1171,15 @@ typedef struct xfs_disk_dquot {
__be32 d_rtbtimer; /* similar to above; for RT disk blocks */
__be16 d_rtbwarns; /* warnings issued wrt RT disk blocks */
__be16 d_pad;
-} xfs_disk_dquot_t;
+};
/*
* This is what goes on disk. This is separated from the xfs_disk_dquot because
* carrying the unnecessary padding would be a waste of memory.
*/
typedef struct xfs_dqblk {
- xfs_disk_dquot_t dd_diskdq; /* portion that lives incore as well */
- char dd_fill[4]; /* filling for posterity */
+ struct xfs_disk_dquot dd_diskdq; /* portion living incore as well */
+ char dd_fill[4];/* filling for posterity */
/*
* These two are only present on filesystems with the CRC bits set.
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index 52d03a3a02a4..ef95ca07d084 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -287,7 +287,7 @@ struct xfs_ag_geometry {
uint32_t ag_ifree; /* o: inodes free */
uint32_t ag_sick; /* o: sick things in ag */
uint32_t ag_checked; /* o: checked metadata in ag */
- uint32_t ag_reserved32; /* o: zero */
+ uint32_t ag_flags; /* i/o: flags for this ag */
uint64_t ag_reserved[12];/* o: zero */
};
#define XFS_AG_GEOM_SICK_SB (1 << 0) /* superblock */
@@ -324,7 +324,7 @@ typedef struct xfs_growfs_rt {
* Structures returned from ioctl XFS_IOC_FSBULKSTAT & XFS_IOC_FSBULKSTAT_SINGLE
*/
typedef struct xfs_bstime {
- time_t tv_sec; /* seconds */
+ __kernel_long_t tv_sec; /* seconds */
__s32 tv_nsec; /* and nanoseconds */
} xfs_bstime_t;
@@ -366,11 +366,11 @@ struct xfs_bulkstat {
uint64_t bs_blocks; /* number of blocks */
uint64_t bs_xflags; /* extended flags */
- uint64_t bs_atime; /* access time, seconds */
- uint64_t bs_mtime; /* modify time, seconds */
+ int64_t bs_atime; /* access time, seconds */
+ int64_t bs_mtime; /* modify time, seconds */
- uint64_t bs_ctime; /* inode change time, seconds */
- uint64_t bs_btime; /* creation time, seconds */
+ int64_t bs_ctime; /* inode change time, seconds */
+ int64_t bs_btime; /* creation time, seconds */
uint32_t bs_gen; /* generation count */
uint32_t bs_uid; /* user id */
@@ -416,7 +416,7 @@ struct xfs_bulkstat {
/*
* Project quota id helpers (previously projid was 16bit only
- * and using two 16bit values to hold new 32bit projid was choosen
+ * and using two 16bit values to hold new 32bit projid was chosen
* to retain compatibility with "old" filesystems).
*/
static inline uint32_t
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 04377ab75863..988cde7744e6 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -544,7 +544,10 @@ xfs_inobt_insert_sprec(
nrec->ir_free, &i);
if (error)
goto error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
goto out;
}
@@ -557,17 +560,23 @@ xfs_inobt_insert_sprec(
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error)
goto error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
- XFS_WANT_CORRUPTED_GOTO(mp,
- rec.ir_startino == nrec->ir_startino,
- error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
+ if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
/*
* This should never fail. If we have coexisting records that
* cannot merge, something is seriously wrong.
*/
- XFS_WANT_CORRUPTED_GOTO(mp, __xfs_inobt_can_merge(nrec, &rec),
- error);
+ if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
trace_xfs_irec_merge_pre(mp, agno, rec.ir_startino,
rec.ir_holemask, nrec->ir_startino,
@@ -1057,7 +1066,8 @@ xfs_ialloc_next_rec(
error = xfs_inobt_get_rec(cur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
}
return 0;
@@ -1081,7 +1091,8 @@ xfs_ialloc_get_rec(
error = xfs_inobt_get_rec(cur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
}
return 0;
@@ -1161,12 +1172,18 @@ xfs_dialloc_ag_inobt(
error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
error = xfs_inobt_get_rec(cur, &rec, &j);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, j == 1, error0);
+ if (XFS_IS_CORRUPT(mp, j != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if (rec.ir_freecount > 0) {
/*
@@ -1321,19 +1338,28 @@ xfs_dialloc_ag_inobt(
error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
for (;;) {
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if (rec.ir_freecount > 0)
break;
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
}
alloc_inode:
@@ -1393,7 +1419,8 @@ xfs_dialloc_ag_finobt_near(
error = xfs_inobt_get_rec(lcur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(lcur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
/*
* See if we've landed in the parent inode record. The finobt
@@ -1416,10 +1443,16 @@ xfs_dialloc_ag_finobt_near(
error = xfs_inobt_get_rec(rcur, &rrec, &j);
if (error)
goto error_rcur;
- XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, j == 1, error_rcur);
+ if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) {
+ error = -EFSCORRUPTED;
+ goto error_rcur;
+ }
}
- XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, i == 1 || j == 1, error_rcur);
+ if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) {
+ error = -EFSCORRUPTED;
+ goto error_rcur;
+ }
if (i == 1 && j == 1) {
/*
* Both the left and right records are valid. Choose the closer
@@ -1472,7 +1505,8 @@ xfs_dialloc_ag_finobt_newino(
error = xfs_inobt_get_rec(cur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
return 0;
}
}
@@ -1483,12 +1517,14 @@ xfs_dialloc_ag_finobt_newino(
error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
error = xfs_inobt_get_rec(cur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
return 0;
}
@@ -1510,20 +1546,24 @@ xfs_dialloc_ag_update_inobt(
error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
XFS_INODES_PER_CHUNK) == 0);
rec.ir_free &= ~XFS_INOBT_MASK(offset);
rec.ir_freecount--;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, (rec.ir_free == frec->ir_free) &&
- (rec.ir_freecount == frec->ir_freecount));
+ if (XFS_IS_CORRUPT(cur->bc_mp,
+ rec.ir_free != frec->ir_free ||
+ rec.ir_freecount != frec->ir_freecount))
+ return -EFSCORRUPTED;
return xfs_inobt_update(cur, &rec);
}
@@ -1933,14 +1973,20 @@ xfs_difree_inobt(
__func__, error);
goto error0;
}
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error) {
xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
__func__, error);
goto error0;
}
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* Get the offset in the inode chunk.
*/
@@ -2052,7 +2098,10 @@ xfs_difree_finobt(
* freed an inode in a previously fully allocated chunk. If not,
* something is out of sync.
*/
- XFS_WANT_CORRUPTED_GOTO(mp, ibtrec->ir_freecount == 1, error);
+ if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask,
ibtrec->ir_count,
@@ -2075,14 +2124,20 @@ xfs_difree_finobt(
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error)
goto error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
rec.ir_free |= XFS_INOBT_MASK(offset);
rec.ir_freecount++;
- XFS_WANT_CORRUPTED_GOTO(mp, (rec.ir_free == ibtrec->ir_free) &&
- (rec.ir_freecount == ibtrec->ir_freecount),
- error);
+ if (XFS_IS_CORRUPT(mp,
+ rec.ir_free != ibtrec->ir_free ||
+ rec.ir_freecount != ibtrec->ir_freecount)) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
/*
* The content of inobt records should always match between the inobt
@@ -2787,8 +2842,13 @@ xfs_ialloc_setup_geometry(
igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr,
inodes);
- /* Set the maximum inode count for this filesystem. */
- if (sbp->sb_imax_pct) {
+ /*
+ * Set the maximum inode count for this filesystem, being careful not
+ * to use obviously garbage sb_inopblog/sb_inopblock values. Regular
+ * users should never get here due to failing sb verification, but
+ * certain users (xfs_db) need to be usable even with corrupt metadata.
+ */
+ if (sbp->sb_imax_pct && igeo->ialloc_blks) {
/*
* Make sure the maximum inode count is a multiple
* of the units we allocate inodes in.
diff --git a/fs/xfs/libxfs/xfs_iext_tree.c b/fs/xfs/libxfs/xfs_iext_tree.c
index 27aa3f2bc4bc..52451809c478 100644
--- a/fs/xfs/libxfs/xfs_iext_tree.c
+++ b/fs/xfs/libxfs/xfs_iext_tree.c
@@ -596,7 +596,7 @@ xfs_iext_realloc_root(
struct xfs_ifork *ifp,
struct xfs_iext_cursor *cur)
{
- size_t new_size = ifp->if_bytes + sizeof(struct xfs_iext_rec);
+ int64_t new_size = ifp->if_bytes + sizeof(struct xfs_iext_rec);
void *new;
/* account for the prev/next pointers */
@@ -616,7 +616,7 @@ xfs_iext_realloc_root(
* sequence counter is seen before the modifications to the extent tree itself
* take effect.
*/
-static inline void xfs_iext_inc_seq(struct xfs_ifork *ifp, int state)
+static inline void xfs_iext_inc_seq(struct xfs_ifork *ifp)
{
WRITE_ONCE(ifp->if_seq, READ_ONCE(ifp->if_seq) + 1);
}
@@ -633,7 +633,7 @@ xfs_iext_insert(
struct xfs_iext_leaf *new = NULL;
int nr_entries, i;
- xfs_iext_inc_seq(ifp, state);
+ xfs_iext_inc_seq(ifp);
if (ifp->if_height == 0)
xfs_iext_alloc_root(ifp, cur);
@@ -875,7 +875,7 @@ xfs_iext_remove(
ASSERT(ifp->if_u1.if_root != NULL);
ASSERT(xfs_iext_valid(ifp, cur));
- xfs_iext_inc_seq(ifp, state);
+ xfs_iext_inc_seq(ifp);
nr_entries = xfs_iext_leaf_nr_entries(ifp, leaf, cur->pos) - 1;
for (i = cur->pos; i < nr_entries; i++)
@@ -983,7 +983,7 @@ xfs_iext_update_extent(
{
struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
- xfs_iext_inc_seq(ifp, state);
+ xfs_iext_inc_seq(ifp);
if (cur->pos == 0) {
struct xfs_bmbt_irec old;
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 28ab3c5255e1..8afacfe4be0a 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -213,13 +213,12 @@ xfs_inode_from_disk(
to->di_version = from->di_version;
if (to->di_version == 1) {
set_nlink(inode, be16_to_cpu(from->di_onlink));
- to->di_projid_lo = 0;
- to->di_projid_hi = 0;
+ to->di_projid = 0;
to->di_version = 2;
} else {
set_nlink(inode, be32_to_cpu(from->di_nlink));
- to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
- to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
+ to->di_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
+ be16_to_cpu(from->di_projid_lo);
}
to->di_format = from->di_format;
@@ -256,8 +255,8 @@ xfs_inode_from_disk(
if (to->di_version == 3) {
inode_set_iversion_queried(inode,
be64_to_cpu(from->di_changecount));
- to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
- to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
+ to->di_crtime.tv_sec = be32_to_cpu(from->di_crtime.t_sec);
+ to->di_crtime.tv_nsec = be32_to_cpu(from->di_crtime.t_nsec);
to->di_flags2 = be64_to_cpu(from->di_flags2);
to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
}
@@ -279,8 +278,8 @@ xfs_inode_to_disk(
to->di_format = from->di_format;
to->di_uid = cpu_to_be32(from->di_uid);
to->di_gid = cpu_to_be32(from->di_gid);
- to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
- to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
+ to->di_projid_lo = cpu_to_be16(from->di_projid & 0xffff);
+ to->di_projid_hi = cpu_to_be16(from->di_projid >> 16);
memset(to->di_pad, 0, sizeof(to->di_pad));
to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
@@ -306,8 +305,8 @@ xfs_inode_to_disk(
if (from->di_version == 3) {
to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
- to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
- to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
+ to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.tv_sec);
+ to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.tv_nsec);
to->di_flags2 = cpu_to_be64(from->di_flags2);
to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
to->di_ino = cpu_to_be64(ip->i_ino);
@@ -632,8 +631,6 @@ xfs_iread(
if ((iget_flags & XFS_IGET_CREATE) &&
xfs_sb_version_hascrc(&mp->m_sb) &&
!(mp->m_flags & XFS_MOUNT_IKEEP)) {
- /* initialise the on-disk inode core */
- memset(&ip->i_d, 0, sizeof(ip->i_d));
VFS_I(ip)->i_generation = prandom_u32();
ip->i_d.di_version = 3;
return 0;
diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h
index ab0f84165317..fd94b1078722 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.h
+++ b/fs/xfs/libxfs/xfs_inode_buf.h
@@ -21,8 +21,7 @@ struct xfs_icdinode {
uint16_t di_flushiter; /* incremented on flush */
uint32_t di_uid; /* owner's user id */
uint32_t di_gid; /* owner's group id */
- uint16_t di_projid_lo; /* lower part of owner's project id */
- uint16_t di_projid_hi; /* higher part of owner's project id */
+ uint32_t di_projid; /* owner's project id */
xfs_fsize_t di_size; /* number of bytes in file */
xfs_rfsblock_t di_nblocks; /* # of direct & btree blocks used */
xfs_extlen_t di_extsize; /* basic/minimum extent size for file */
@@ -37,7 +36,7 @@ struct xfs_icdinode {
uint64_t di_flags2; /* more random flags */
uint32_t di_cowextsize; /* basic cow extent size for file */
- xfs_ictimestamp_t di_crtime; /* time created */
+ struct timespec64 di_crtime; /* time created */
};
/*
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index bf3e04018246..ad2b9c313fd2 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -75,11 +75,15 @@ xfs_iformat_fork(
error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
break;
default:
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
+ dip, sizeof(*dip), __this_address);
return -EFSCORRUPTED;
}
break;
default:
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
+ sizeof(*dip), __this_address);
return -EFSCORRUPTED;
}
if (error)
@@ -94,7 +98,7 @@ xfs_iformat_fork(
return 0;
ASSERT(ip->i_afp == NULL);
- ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS);
+ ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_NOFS);
switch (dip->di_aformat) {
case XFS_DINODE_FMT_LOCAL:
@@ -110,14 +114,16 @@ xfs_iformat_fork(
error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
break;
default:
+ xfs_inode_verifier_error(ip, error, __func__, dip,
+ sizeof(*dip), __this_address);
error = -EFSCORRUPTED;
break;
}
if (error) {
- kmem_zone_free(xfs_ifork_zone, ip->i_afp);
+ kmem_cache_free(xfs_ifork_zone, ip->i_afp);
ip->i_afp = NULL;
if (ip->i_cowfp)
- kmem_zone_free(xfs_ifork_zone, ip->i_cowfp);
+ kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
ip->i_cowfp = NULL;
xfs_idestroy_fork(ip, XFS_DATA_FORK);
}
@@ -129,7 +135,7 @@ xfs_init_local_fork(
struct xfs_inode *ip,
int whichfork,
const void *data,
- int size)
+ int64_t size)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
int mem_size = size, real_size = 0;
@@ -147,7 +153,7 @@ xfs_init_local_fork(
if (size) {
real_size = roundup(mem_size, 4);
- ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
+ ifp->if_u1.if_data = kmem_alloc(real_size, KM_NOFS);
memcpy(ifp->if_u1.if_data, data, size);
if (zero_terminate)
ifp->if_u1.if_data[size] = '\0';
@@ -302,7 +308,7 @@ xfs_iformat_btree(
}
ifp->if_broot_bytes = size;
- ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS);
+ ifp->if_broot = kmem_alloc(size, KM_NOFS);
ASSERT(ifp->if_broot != NULL);
/*
* Copy and convert from the on-disk structure
@@ -367,7 +373,7 @@ xfs_iroot_realloc(
*/
if (ifp->if_broot_bytes == 0) {
new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff);
- ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
+ ifp->if_broot = kmem_alloc(new_size, KM_NOFS);
ifp->if_broot_bytes = (int)new_size;
return;
}
@@ -382,7 +388,7 @@ xfs_iroot_realloc(
new_max = cur_max + rec_diff;
new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
- KM_SLEEP | KM_NOFS);
+ KM_NOFS);
op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
ifp->if_broot_bytes);
np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
@@ -408,7 +414,7 @@ xfs_iroot_realloc(
else
new_size = 0;
if (new_size > 0) {
- new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
+ new_broot = kmem_alloc(new_size, KM_NOFS);
/*
* First copy over the btree block header.
*/
@@ -467,11 +473,11 @@ xfs_iroot_realloc(
void
xfs_idata_realloc(
struct xfs_inode *ip,
- int byte_diff,
+ int64_t byte_diff,
int whichfork)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
- int new_size = (int)ifp->if_bytes + byte_diff;
+ int64_t new_size = ifp->if_bytes + byte_diff;
ASSERT(new_size >= 0);
ASSERT(new_size <= XFS_IFORK_SIZE(ip, whichfork));
@@ -492,7 +498,7 @@ xfs_idata_realloc(
* We enforce that here.
*/
ifp->if_u1.if_data = kmem_realloc(ifp->if_u1.if_data,
- roundup(new_size, 4), KM_SLEEP | KM_NOFS);
+ roundup(new_size, 4), KM_NOFS);
ifp->if_bytes = new_size;
}
@@ -525,10 +531,10 @@ xfs_idestroy_fork(
}
if (whichfork == XFS_ATTR_FORK) {
- kmem_zone_free(xfs_ifork_zone, ip->i_afp);
+ kmem_cache_free(xfs_ifork_zone, ip->i_afp);
ip->i_afp = NULL;
} else if (whichfork == XFS_COW_FORK) {
- kmem_zone_free(xfs_ifork_zone, ip->i_cowfp);
+ kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
ip->i_cowfp = NULL;
}
}
@@ -552,7 +558,7 @@ xfs_iextents_copy(
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_iext_cursor icur;
struct xfs_bmbt_irec rec;
- int copied = 0;
+ int64_t copied = 0;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
ASSERT(ifp->if_bytes > 0);
@@ -683,7 +689,7 @@ xfs_ifork_init_cow(
return;
ip->i_cowfp = kmem_zone_zalloc(xfs_ifork_zone,
- KM_SLEEP | KM_NOFS);
+ KM_NOFS);
ip->i_cowfp->if_flags = XFS_IFEXTENTS;
ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
ip->i_cnextents = 0;
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 00c62ce170d0..500333d0101e 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -13,16 +13,16 @@ struct xfs_dinode;
* File incore extent information, present for each of data & attr forks.
*/
struct xfs_ifork {
- int if_bytes; /* bytes in if_u1 */
- unsigned int if_seq; /* fork mod counter */
+ int64_t if_bytes; /* bytes in if_u1 */
struct xfs_btree_block *if_broot; /* file's incore btree root */
- short if_broot_bytes; /* bytes allocated for root */
- unsigned char if_flags; /* per-fork flags */
+ unsigned int if_seq; /* fork mod counter */
int if_height; /* height of the extent tree */
union {
void *if_root; /* extent tree root */
char *if_data; /* inline file data */
} if_u1;
+ short if_broot_bytes; /* bytes allocated for root */
+ unsigned char if_flags; /* per-fork flags */
};
/*
@@ -87,18 +87,24 @@ struct xfs_ifork {
#define XFS_IFORK_MAXEXT(ip, w) \
(XFS_IFORK_SIZE(ip, w) / sizeof(xfs_bmbt_rec_t))
+#define xfs_ifork_has_extents(ip, w) \
+ (XFS_IFORK_FORMAT((ip), (w)) == XFS_DINODE_FMT_EXTENTS || \
+ XFS_IFORK_FORMAT((ip), (w)) == XFS_DINODE_FMT_BTREE)
+
struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
struct xfs_inode_log_item *, int);
void xfs_idestroy_fork(struct xfs_inode *, int);
-void xfs_idata_realloc(struct xfs_inode *, int, int);
+void xfs_idata_realloc(struct xfs_inode *ip, int64_t byte_diff,
+ int whichfork);
void xfs_iroot_realloc(struct xfs_inode *, int, int);
int xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int);
int xfs_iextents_copy(struct xfs_inode *, struct xfs_bmbt_rec *,
int);
-void xfs_init_local_fork(struct xfs_inode *, int, const void *, int);
+void xfs_init_local_fork(struct xfs_inode *ip, int whichfork,
+ const void *data, int64_t size);
xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp);
void xfs_iext_insert(struct xfs_inode *, struct xfs_iext_cursor *cur,
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index e5f97c69b320..8ef31d71a9c7 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -432,9 +432,9 @@ static inline uint xfs_log_dinode_size(int version)
}
/*
- * Buffer Log Format defintions
+ * Buffer Log Format definitions
*
- * These are the physical dirty bitmap defintions for the log format structure.
+ * These are the physical dirty bitmap definitions for the log format structure.
*/
#define XFS_BLF_CHUNK 128
#define XFS_BLF_SHIFT 7
diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h
index f3d18eaecebb..3bf671637a91 100644
--- a/fs/xfs/libxfs/xfs_log_recover.h
+++ b/fs/xfs/libxfs/xfs_log_recover.h
@@ -30,14 +30,14 @@ typedef struct xlog_recover_item {
xfs_log_iovec_t *ri_buf; /* ptr to regions buffer */
} xlog_recover_item_t;
-typedef struct xlog_recover {
+struct xlog_recover {
struct hlist_node r_list;
xlog_tid_t r_log_tid; /* log's transaction id */
xfs_trans_header_t r_theader; /* trans header for partial */
int r_state; /* not needed */
xfs_lsn_t r_lsn; /* xact lsn */
struct list_head r_itemq; /* q for items */
-} xlog_recover_t;
+};
#define ITEM_TYPE(i) (*(unsigned short *)(i)->ri_buf[0].i_addr)
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 51bb9bdb0e84..d7d702ee4d1a 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -200,7 +200,10 @@ xfs_refcount_insert(
error = xfs_btree_insert(cur, i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, *i == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
out_error:
if (error)
@@ -227,10 +230,16 @@ xfs_refcount_delete(
error = xfs_refcount_get_rec(cur, &irec, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
trace_xfs_refcount_delete(cur->bc_mp, cur->bc_private.a.agno, &irec);
error = xfs_btree_delete(cur, i);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, *i == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (error)
goto out_error;
error = xfs_refcount_lookup_ge(cur, irec.rc_startblock, &found_rec);
@@ -349,7 +358,10 @@ xfs_refcount_split_extent(
error = xfs_refcount_get_rec(cur, &rcext, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (rcext.rc_startblock == agbno || xfs_refc_next(&rcext) <= agbno)
return 0;
@@ -371,7 +383,10 @@ xfs_refcount_split_extent(
error = xfs_refcount_insert(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
return error;
out_error:
@@ -410,19 +425,27 @@ xfs_refcount_merge_center_extents(
&found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (center->rc_refcount > 1) {
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
/* Enlarge the left extent. */
@@ -430,7 +453,10 @@ xfs_refcount_merge_center_extents(
&found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
left->rc_blockcount = extlen;
error = xfs_refcount_update(cur, left);
@@ -469,14 +495,18 @@ xfs_refcount_merge_left_extent(
&found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
/* Enlarge the left extent. */
@@ -484,7 +514,10 @@ xfs_refcount_merge_left_extent(
&found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
left->rc_blockcount += cleft->rc_blockcount;
error = xfs_refcount_update(cur, left);
@@ -526,14 +559,18 @@ xfs_refcount_merge_right_extent(
&found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
/* Enlarge the right extent. */
@@ -541,7 +578,10 @@ xfs_refcount_merge_right_extent(
&found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
right->rc_startblock -= cright->rc_blockcount;
right->rc_blockcount += cright->rc_blockcount;
@@ -587,7 +627,10 @@ xfs_refcount_find_left_extents(
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (xfs_refc_next(&tmp) != agbno)
return 0;
@@ -605,8 +648,10 @@ xfs_refcount_find_left_extents(
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/* if tmp starts at the end of our range, just use that */
if (tmp.rc_startblock == agbno)
@@ -671,7 +716,10 @@ xfs_refcount_find_right_extents(
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (tmp.rc_startblock != agbno + aglen)
return 0;
@@ -689,8 +737,10 @@ xfs_refcount_find_right_extents(
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/* if tmp ends at the end of our range, just use that */
if (xfs_refc_next(&tmp) == agbno + aglen)
@@ -913,8 +963,11 @@ xfs_refcount_adjust_extents(
&found_tmp);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- found_tmp == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp,
+ found_tmp != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
cur->bc_private.a.priv.refc.nr_ops++;
} else {
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
@@ -955,8 +1008,10 @@ xfs_refcount_adjust_extents(
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
cur->bc_private.a.priv.refc.nr_ops++;
goto advloop;
} else {
@@ -1122,7 +1177,7 @@ xfs_refcount_finish_one(
XFS_ALLOC_FLAG_FREEING, &agbp);
if (error)
return error;
- if (!agbp)
+ if (XFS_IS_CORRUPT(tp->t_mountp, !agbp))
return -EFSCORRUPTED;
rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
@@ -1174,7 +1229,7 @@ out_cur:
/*
* Record a refcount intent for later processing.
*/
-static int
+static void
__xfs_refcount_add(
struct xfs_trans *tp,
enum xfs_refcount_intent_type type,
@@ -1189,44 +1244,43 @@ __xfs_refcount_add(
blockcount);
ri = kmem_alloc(sizeof(struct xfs_refcount_intent),
- KM_SLEEP | KM_NOFS);
+ KM_NOFS);
INIT_LIST_HEAD(&ri->ri_list);
ri->ri_type = type;
ri->ri_startblock = startblock;
ri->ri_blockcount = blockcount;
xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_REFCOUNT, &ri->ri_list);
- return 0;
}
/*
* Increase the reference count of the blocks backing a file's extent.
*/
-int
+void
xfs_refcount_increase_extent(
struct xfs_trans *tp,
struct xfs_bmbt_irec *PREV)
{
if (!xfs_sb_version_hasreflink(&tp->t_mountp->m_sb))
- return 0;
+ return;
- return __xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE,
- PREV->br_startblock, PREV->br_blockcount);
+ __xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE, PREV->br_startblock,
+ PREV->br_blockcount);
}
/*
* Decrease the reference count of the blocks backing a file's extent.
*/
-int
+void
xfs_refcount_decrease_extent(
struct xfs_trans *tp,
struct xfs_bmbt_irec *PREV)
{
if (!xfs_sb_version_hasreflink(&tp->t_mountp->m_sb))
- return 0;
+ return;
- return __xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE,
- PREV->br_startblock, PREV->br_blockcount);
+ __xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE, PREV->br_startblock,
+ PREV->br_blockcount);
}
/*
@@ -1273,7 +1327,10 @@ xfs_refcount_find_shared(
error = xfs_refcount_get_rec(cur, &tmp, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/* If the extent ends before the start, look at the next one */
if (tmp.rc_startblock + tmp.rc_blockcount <= agbno) {
@@ -1285,7 +1342,10 @@ xfs_refcount_find_shared(
error = xfs_refcount_get_rec(cur, &tmp, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
/* If the extent starts after the range we want, bail out */
@@ -1313,7 +1373,10 @@ xfs_refcount_find_shared(
error = xfs_refcount_get_rec(cur, &tmp, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (tmp.rc_startblock >= agbno + aglen ||
tmp.rc_startblock != *fbno + *flen)
break;
@@ -1414,8 +1477,11 @@ xfs_refcount_adjust_cow_extents(
switch (adj) {
case XFS_REFCOUNT_ADJUST_COW_ALLOC:
/* Adding a CoW reservation, there should be nothing here. */
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- ext.rc_startblock >= agbno + aglen, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp,
+ agbno + aglen > ext.rc_startblock)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
tmp.rc_startblock = agbno;
tmp.rc_blockcount = aglen;
@@ -1427,17 +1493,25 @@ xfs_refcount_adjust_cow_extents(
&found_tmp);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- found_tmp == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_tmp != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
break;
case XFS_REFCOUNT_ADJUST_COW_FREE:
/* Removing a CoW reservation, there should be one extent. */
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- ext.rc_startblock == agbno, out_error);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- ext.rc_blockcount == aglen, out_error);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- ext.rc_refcount == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_startblock != agbno)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount != aglen)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_refcount != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
ext.rc_refcount = 0;
trace_xfs_refcount_modify_extent(cur->bc_mp,
@@ -1445,8 +1519,10 @@ xfs_refcount_adjust_cow_extents(
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
break;
default:
ASSERT(0);
@@ -1541,47 +1617,40 @@ __xfs_refcount_cow_free(
}
/* Record a CoW staging extent in the refcount btree. */
-int
+void
xfs_refcount_alloc_cow_extent(
struct xfs_trans *tp,
xfs_fsblock_t fsb,
xfs_extlen_t len)
{
struct xfs_mount *mp = tp->t_mountp;
- int error;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
- return 0;
+ return;
- error = __xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, fsb, len);
- if (error)
- return error;
+ __xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, fsb, len);
/* Add rmap entry */
- return xfs_rmap_alloc_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
+ xfs_rmap_alloc_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
}
/* Forget a CoW staging event in the refcount btree. */
-int
+void
xfs_refcount_free_cow_extent(
struct xfs_trans *tp,
xfs_fsblock_t fsb,
xfs_extlen_t len)
{
struct xfs_mount *mp = tp->t_mountp;
- int error;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
- return 0;
+ return;
/* Remove rmap entry */
- error = xfs_rmap_free_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
+ xfs_rmap_free_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
- if (error)
- return error;
-
- return __xfs_refcount_add(tp, XFS_REFCOUNT_FREE_COW, fsb, len);
+ __xfs_refcount_add(tp, XFS_REFCOUNT_FREE_COW, fsb, len);
}
struct xfs_refcount_recovery {
@@ -1592,17 +1661,18 @@ struct xfs_refcount_recovery {
/* Stuff an extent on the recovery list. */
STATIC int
xfs_refcount_recover_extent(
- struct xfs_btree_cur *cur,
+ struct xfs_btree_cur *cur,
union xfs_btree_rec *rec,
void *priv)
{
struct list_head *debris = priv;
struct xfs_refcount_recovery *rr;
- if (be32_to_cpu(rec->refc.rc_refcount) != 1)
+ if (XFS_IS_CORRUPT(cur->bc_mp,
+ be32_to_cpu(rec->refc.rc_refcount) != 1))
return -EFSCORRUPTED;
- rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), KM_SLEEP);
+ rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), 0);
xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
list_add_tail(&rr->rr_list, debris);
@@ -1679,10 +1749,8 @@ xfs_refcount_recover_cow_leftovers(
/* Free the orphan record */
agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START;
fsb = XFS_AGB_TO_FSB(mp, agno, agbno);
- error = xfs_refcount_free_cow_extent(tp, fsb,
+ xfs_refcount_free_cow_extent(tp, fsb,
rr->rr_rrec.rc_blockcount);
- if (error)
- goto out_trans;
/* Free the block. */
xfs_bmap_add_free(tp, fsb, rr->rr_rrec.rc_blockcount, NULL);
diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h
index 1d9c518575e7..209795539c8d 100644
--- a/fs/xfs/libxfs/xfs_refcount.h
+++ b/fs/xfs/libxfs/xfs_refcount.h
@@ -29,9 +29,9 @@ struct xfs_refcount_intent {
xfs_extlen_t ri_blockcount;
};
-extern int xfs_refcount_increase_extent(struct xfs_trans *tp,
+void xfs_refcount_increase_extent(struct xfs_trans *tp,
struct xfs_bmbt_irec *irec);
-extern int xfs_refcount_decrease_extent(struct xfs_trans *tp,
+void xfs_refcount_decrease_extent(struct xfs_trans *tp,
struct xfs_bmbt_irec *irec);
extern void xfs_refcount_finish_one_cleanup(struct xfs_trans *tp,
@@ -45,10 +45,10 @@ extern int xfs_refcount_find_shared(struct xfs_btree_cur *cur,
xfs_agblock_t agbno, xfs_extlen_t aglen, xfs_agblock_t *fbno,
xfs_extlen_t *flen, bool find_end_of_shared);
-extern int xfs_refcount_alloc_cow_extent(struct xfs_trans *tp,
- xfs_fsblock_t fsb, xfs_extlen_t len);
-extern int xfs_refcount_free_cow_extent(struct xfs_trans *tp,
- xfs_fsblock_t fsb, xfs_extlen_t len);
+void xfs_refcount_alloc_cow_extent(struct xfs_trans *tp, xfs_fsblock_t fsb,
+ xfs_extlen_t len);
+void xfs_refcount_free_cow_extent(struct xfs_trans *tp, xfs_fsblock_t fsb,
+ xfs_extlen_t len);
extern int xfs_refcount_recover_cow_leftovers(struct xfs_mount *mp,
xfs_agnumber_t agno);
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index e6aeb390b2fb..ff9412f113c4 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -113,7 +113,10 @@ xfs_rmap_insert(
error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 0, done);
+ if (XFS_IS_CORRUPT(rcur->bc_mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
rcur->bc_rec.r.rm_startblock = agbno;
rcur->bc_rec.r.rm_blockcount = len;
@@ -123,7 +126,10 @@ xfs_rmap_insert(
error = xfs_btree_insert(rcur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 1, done);
+ if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
done:
if (error)
trace_xfs_rmap_insert_error(rcur->bc_mp,
@@ -149,12 +155,18 @@ xfs_rmap_delete(
error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 1, done);
+ if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_delete(rcur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 1, done);
+ if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
done:
if (error)
trace_xfs_rmap_delete_error(rcur->bc_mp,
@@ -168,7 +180,6 @@ xfs_rmap_btrec_to_irec(
union xfs_btree_rec *rec,
struct xfs_rmap_irec *irec)
{
- irec->rm_flags = 0;
irec->rm_startblock = be32_to_cpu(rec->rmap.rm_startblock);
irec->rm_blockcount = be32_to_cpu(rec->rmap.rm_blockcount);
irec->rm_owner = be64_to_cpu(rec->rmap.rm_owner);
@@ -254,15 +265,15 @@ xfs_rmap_find_left_neighbor_helper(
rec->rm_flags);
if (rec->rm_owner != info->high.rm_owner)
- return XFS_BTREE_QUERY_RANGE_CONTINUE;
+ return 0;
if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) &&
!(rec->rm_flags & XFS_RMAP_BMBT_BLOCK) &&
rec->rm_offset + rec->rm_blockcount - 1 != info->high.rm_offset)
- return XFS_BTREE_QUERY_RANGE_CONTINUE;
+ return 0;
*info->irec = *rec;
*info->stat = 1;
- return XFS_BTREE_QUERY_RANGE_ABORT;
+ return -ECANCELED;
}
/*
@@ -305,7 +316,7 @@ xfs_rmap_find_left_neighbor(
error = xfs_rmap_query_range(cur, &info.high, &info.high,
xfs_rmap_find_left_neighbor_helper, &info);
- if (error == XFS_BTREE_QUERY_RANGE_ABORT)
+ if (error == -ECANCELED)
error = 0;
if (*stat)
trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
@@ -330,16 +341,16 @@ xfs_rmap_lookup_le_range_helper(
rec->rm_flags);
if (rec->rm_owner != info->high.rm_owner)
- return XFS_BTREE_QUERY_RANGE_CONTINUE;
+ return 0;
if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) &&
!(rec->rm_flags & XFS_RMAP_BMBT_BLOCK) &&
(rec->rm_offset > info->high.rm_offset ||
rec->rm_offset + rec->rm_blockcount <= info->high.rm_offset))
- return XFS_BTREE_QUERY_RANGE_CONTINUE;
+ return 0;
*info->irec = *rec;
*info->stat = 1;
- return XFS_BTREE_QUERY_RANGE_ABORT;
+ return -ECANCELED;
}
/*
@@ -377,7 +388,7 @@ xfs_rmap_lookup_le_range(
cur->bc_private.a.agno, bno, 0, owner, offset, flags);
error = xfs_rmap_query_range(cur, &info.high, &info.high,
xfs_rmap_lookup_le_range_helper, &info);
- if (error == XFS_BTREE_QUERY_RANGE_ABORT)
+ if (error == -ECANCELED)
error = 0;
if (*stat)
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
@@ -407,24 +418,39 @@ xfs_rmap_free_check_owner(
return 0;
/* Make sure the unwritten flag matches. */
- XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
- (rec->rm_flags & XFS_RMAP_UNWRITTEN), out);
+ if (XFS_IS_CORRUPT(mp,
+ (flags & XFS_RMAP_UNWRITTEN) !=
+ (rec->rm_flags & XFS_RMAP_UNWRITTEN))) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
/* Make sure the owner matches what we expect to find in the tree. */
- XFS_WANT_CORRUPTED_GOTO(mp, owner == rec->rm_owner, out);
+ if (XFS_IS_CORRUPT(mp, owner != rec->rm_owner)) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
/* Check the offset, if necessary. */
if (XFS_RMAP_NON_INODE_OWNER(owner))
goto out;
if (flags & XFS_RMAP_BMBT_BLOCK) {
- XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_flags & XFS_RMAP_BMBT_BLOCK,
- out);
+ if (XFS_IS_CORRUPT(mp,
+ !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK))) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
} else {
- XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_offset <= offset, out);
- XFS_WANT_CORRUPTED_GOTO(mp,
- ltoff + rec->rm_blockcount >= offset + len,
- out);
+ if (XFS_IS_CORRUPT(mp, rec->rm_offset > offset)) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
+ if (XFS_IS_CORRUPT(mp,
+ offset + len > ltoff + rec->rm_blockcount)) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
}
out:
@@ -483,12 +509,18 @@ xfs_rmap_unmap(
error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, flags, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
error = xfs_rmap_get_rec(cur, &ltrec, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
cur->bc_private.a.agno, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
@@ -503,8 +535,12 @@ xfs_rmap_unmap(
* be the case that the "left" extent goes all the way to EOFS.
*/
if (owner == XFS_RMAP_OWN_NULL) {
- XFS_WANT_CORRUPTED_GOTO(mp, bno >= ltrec.rm_startblock +
- ltrec.rm_blockcount, out_error);
+ if (XFS_IS_CORRUPT(mp,
+ bno <
+ ltrec.rm_startblock + ltrec.rm_blockcount)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
goto out_done;
}
@@ -527,15 +563,22 @@ xfs_rmap_unmap(
error = xfs_rmap_get_rec(cur, &rtrec, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (rtrec.rm_startblock >= bno + len)
goto out_done;
}
/* Make sure the extent we found covers the entire freeing range. */
- XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno &&
- ltrec.rm_startblock + ltrec.rm_blockcount >=
- bno + len, out_error);
+ if (XFS_IS_CORRUPT(mp,
+ ltrec.rm_startblock > bno ||
+ ltrec.rm_startblock + ltrec.rm_blockcount <
+ bno + len)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/* Check owner information. */
error = xfs_rmap_free_check_owner(mp, ltoff, &ltrec, len, owner,
@@ -552,7 +595,10 @@ xfs_rmap_unmap(
error = xfs_btree_delete(cur, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
} else if (ltrec.rm_startblock == bno) {
/*
* overlap left hand side of extent: move the start, trim the
@@ -744,7 +790,10 @@ xfs_rmap_map(
error = xfs_rmap_get_rec(cur, &ltrec, &have_lt);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, have_lt != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
cur->bc_private.a.agno, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
@@ -754,9 +803,12 @@ xfs_rmap_map(
have_lt = 0;
}
- XFS_WANT_CORRUPTED_GOTO(mp,
- have_lt == 0 ||
- ltrec.rm_startblock + ltrec.rm_blockcount <= bno, out_error);
+ if (XFS_IS_CORRUPT(mp,
+ have_lt != 0 &&
+ ltrec.rm_startblock + ltrec.rm_blockcount > bno)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/*
* Increment the cursor to see if we have a right-adjacent record to our
@@ -770,9 +822,14 @@ xfs_rmap_map(
error = xfs_rmap_get_rec(cur, &gtrec, &have_gt);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, have_gt == 1, out_error);
- XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= gtrec.rm_startblock,
- out_error);
+ if (XFS_IS_CORRUPT(mp, have_gt != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ if (XFS_IS_CORRUPT(mp, bno + len > gtrec.rm_startblock)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
cur->bc_private.a.agno, gtrec.rm_startblock,
gtrec.rm_blockcount, gtrec.rm_owner,
@@ -822,7 +879,10 @@ xfs_rmap_map(
error = xfs_btree_delete(cur, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
/* point the cursor back to the left record and update */
@@ -866,7 +926,10 @@ xfs_rmap_map(
error = xfs_btree_insert(cur, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
trace_xfs_rmap_map_done(mp, cur->bc_private.a.agno, bno, len,
@@ -958,12 +1021,18 @@ xfs_rmap_convert(
error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, oldext, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_rmap_get_rec(cur, &PREV, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
cur->bc_private.a.agno, PREV.rm_startblock,
PREV.rm_blockcount, PREV.rm_owner,
@@ -996,10 +1065,16 @@ xfs_rmap_convert(
error = xfs_rmap_get_rec(cur, &LEFT, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- XFS_WANT_CORRUPTED_GOTO(mp,
- LEFT.rm_startblock + LEFT.rm_blockcount <= bno,
- done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
+ if (XFS_IS_CORRUPT(mp,
+ LEFT.rm_startblock + LEFT.rm_blockcount >
+ bno)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
cur->bc_private.a.agno, LEFT.rm_startblock,
LEFT.rm_blockcount, LEFT.rm_owner,
@@ -1018,7 +1093,10 @@ xfs_rmap_convert(
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto done;
@@ -1027,9 +1105,14 @@ xfs_rmap_convert(
error = xfs_rmap_get_rec(cur, &RIGHT, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= RIGHT.rm_startblock,
- done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
+ if (XFS_IS_CORRUPT(mp, bno + len > RIGHT.rm_startblock)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
cur->bc_private.a.agno, RIGHT.rm_startblock,
RIGHT.rm_blockcount, RIGHT.rm_owner,
@@ -1056,7 +1139,10 @@ xfs_rmap_convert(
error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, oldext, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
/*
* Switch out based on the FILLING and CONTIG state bits.
@@ -1072,7 +1158,10 @@ xfs_rmap_convert(
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
RIGHT.rm_startblock, RIGHT.rm_blockcount,
RIGHT.rm_owner, RIGHT.rm_offset,
@@ -1080,11 +1169,17 @@ xfs_rmap_convert(
error = xfs_btree_delete(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_decrement(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
PREV.rm_startblock, PREV.rm_blockcount,
PREV.rm_owner, PREV.rm_offset,
@@ -1092,11 +1187,17 @@ xfs_rmap_convert(
error = xfs_btree_delete(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_decrement(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW = LEFT;
NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
error = xfs_rmap_update(cur, &NEW);
@@ -1116,11 +1217,17 @@ xfs_rmap_convert(
error = xfs_btree_delete(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_decrement(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW = LEFT;
NEW.rm_blockcount += PREV.rm_blockcount;
error = xfs_rmap_update(cur, &NEW);
@@ -1136,7 +1243,10 @@ xfs_rmap_convert(
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
RIGHT.rm_startblock, RIGHT.rm_blockcount,
RIGHT.rm_owner, RIGHT.rm_offset,
@@ -1144,11 +1254,17 @@ xfs_rmap_convert(
error = xfs_btree_delete(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_decrement(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW = PREV;
NEW.rm_blockcount = len + RIGHT.rm_blockcount;
NEW.rm_flags = newext;
@@ -1215,7 +1331,10 @@ xfs_rmap_convert(
error = xfs_btree_insert(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
break;
case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
@@ -1254,7 +1373,10 @@ xfs_rmap_convert(
oldext, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_startblock = bno;
NEW.rm_owner = owner;
NEW.rm_offset = offset;
@@ -1266,7 +1388,10 @@ xfs_rmap_convert(
error = xfs_btree_insert(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
break;
case 0:
@@ -1296,7 +1421,10 @@ xfs_rmap_convert(
error = xfs_btree_insert(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
/*
* Reset the cursor to the position of the new extent
* we are about to insert as we can't trust it after
@@ -1306,7 +1434,10 @@ xfs_rmap_convert(
oldext, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
/* new middle extent - newext */
cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
cur->bc_rec.r.rm_flags |= newext;
@@ -1315,7 +1446,10 @@ xfs_rmap_convert(
error = xfs_btree_insert(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
break;
case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
@@ -1384,7 +1518,10 @@ xfs_rmap_convert_shared(
&PREV, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
ASSERT(PREV.rm_offset <= offset);
ASSERT(PREV.rm_offset + PREV.rm_blockcount >= new_endoff);
@@ -1407,9 +1544,12 @@ xfs_rmap_convert_shared(
goto done;
if (i) {
state |= RMAP_LEFT_VALID;
- XFS_WANT_CORRUPTED_GOTO(mp,
- LEFT.rm_startblock + LEFT.rm_blockcount <= bno,
- done);
+ if (XFS_IS_CORRUPT(mp,
+ LEFT.rm_startblock + LEFT.rm_blockcount >
+ bno)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if (xfs_rmap_is_mergeable(&LEFT, owner, newext))
state |= RMAP_LEFT_CONTIG;
}
@@ -1424,9 +1564,14 @@ xfs_rmap_convert_shared(
error = xfs_rmap_get_rec(cur, &RIGHT, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= RIGHT.rm_startblock,
- done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
+ if (XFS_IS_CORRUPT(mp, bno + len > RIGHT.rm_startblock)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
cur->bc_private.a.agno, RIGHT.rm_startblock,
RIGHT.rm_blockcount, RIGHT.rm_owner,
@@ -1473,7 +1618,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
error = xfs_rmap_update(cur, &NEW);
if (error)
@@ -1496,7 +1644,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_blockcount += PREV.rm_blockcount;
error = xfs_rmap_update(cur, &NEW);
if (error)
@@ -1519,7 +1670,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_blockcount += RIGHT.rm_blockcount;
NEW.rm_flags = RIGHT.rm_flags;
error = xfs_rmap_update(cur, &NEW);
@@ -1539,7 +1693,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_flags = newext;
error = xfs_rmap_update(cur, &NEW);
if (error)
@@ -1571,7 +1728,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_blockcount += len;
error = xfs_rmap_update(cur, &NEW);
if (error)
@@ -1613,7 +1773,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_blockcount = offset - NEW.rm_offset;
error = xfs_rmap_update(cur, &NEW);
if (error)
@@ -1645,7 +1808,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_blockcount -= len;
error = xfs_rmap_update(cur, &NEW);
if (error)
@@ -1680,7 +1846,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_blockcount = offset - NEW.rm_offset;
error = xfs_rmap_update(cur, &NEW);
if (error)
@@ -1766,25 +1935,44 @@ xfs_rmap_unmap_shared(
&ltrec, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
ltoff = ltrec.rm_offset;
/* Make sure the extent we found covers the entire freeing range. */
- XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno &&
- ltrec.rm_startblock + ltrec.rm_blockcount >=
- bno + len, out_error);
+ if (XFS_IS_CORRUPT(mp,
+ ltrec.rm_startblock > bno ||
+ ltrec.rm_startblock + ltrec.rm_blockcount <
+ bno + len)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/* Make sure the owner matches what we expect to find in the tree. */
- XFS_WANT_CORRUPTED_GOTO(mp, owner == ltrec.rm_owner, out_error);
+ if (XFS_IS_CORRUPT(mp, owner != ltrec.rm_owner)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/* Make sure the unwritten flag matches. */
- XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
- (ltrec.rm_flags & XFS_RMAP_UNWRITTEN), out_error);
+ if (XFS_IS_CORRUPT(mp,
+ (flags & XFS_RMAP_UNWRITTEN) !=
+ (ltrec.rm_flags & XFS_RMAP_UNWRITTEN))) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/* Check the offset. */
- XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_offset <= offset, out_error);
- XFS_WANT_CORRUPTED_GOTO(mp, offset <= ltoff + ltrec.rm_blockcount,
- out_error);
+ if (XFS_IS_CORRUPT(mp, ltrec.rm_offset > offset)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ if (XFS_IS_CORRUPT(mp, offset > ltoff + ltrec.rm_blockcount)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
/* Exact match, simply remove the record from rmap tree. */
@@ -1837,7 +2025,10 @@ xfs_rmap_unmap_shared(
ltrec.rm_offset, ltrec.rm_flags, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
ltrec.rm_blockcount -= len;
error = xfs_rmap_update(cur, &ltrec);
if (error)
@@ -1863,7 +2054,10 @@ xfs_rmap_unmap_shared(
ltrec.rm_offset, ltrec.rm_flags, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
ltrec.rm_blockcount = bno - ltrec.rm_startblock;
error = xfs_rmap_update(cur, &ltrec);
if (error)
@@ -1939,7 +2133,10 @@ xfs_rmap_map_shared(
error = xfs_rmap_get_rec(cur, &gtrec, &have_gt);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, have_gt == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, have_gt != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
cur->bc_private.a.agno, gtrec.rm_startblock,
gtrec.rm_blockcount, gtrec.rm_owner,
@@ -1988,7 +2185,10 @@ xfs_rmap_map_shared(
ltrec.rm_offset, ltrec.rm_flags, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
error = xfs_rmap_update(cur, &ltrec);
if (error)
@@ -2200,7 +2400,7 @@ xfs_rmap_finish_one(
error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
if (error)
return error;
- if (!agbp)
+ if (XFS_IS_CORRUPT(tp->t_mountp, !agbp))
return -EFSCORRUPTED;
rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
@@ -2268,7 +2468,7 @@ xfs_rmap_update_is_needed(
* Record a rmap intent; the list is kept sorted first by AG and then by
* increasing age.
*/
-static int
+static void
__xfs_rmap_add(
struct xfs_trans *tp,
enum xfs_rmap_intent_type type,
@@ -2287,7 +2487,7 @@ __xfs_rmap_add(
bmap->br_blockcount,
bmap->br_state);
- ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_SLEEP | KM_NOFS);
+ ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_NOFS);
INIT_LIST_HEAD(&ri->ri_list);
ri->ri_type = type;
ri->ri_owner = owner;
@@ -2295,11 +2495,10 @@ __xfs_rmap_add(
ri->ri_bmap = *bmap;
xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_RMAP, &ri->ri_list);
- return 0;
}
/* Map an extent into a file. */
-int
+void
xfs_rmap_map_extent(
struct xfs_trans *tp,
struct xfs_inode *ip,
@@ -2307,15 +2506,15 @@ xfs_rmap_map_extent(
struct xfs_bmbt_irec *PREV)
{
if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
- return 0;
+ return;
- return __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
+ __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
XFS_RMAP_MAP_SHARED : XFS_RMAP_MAP, ip->i_ino,
whichfork, PREV);
}
/* Unmap an extent out of a file. */
-int
+void
xfs_rmap_unmap_extent(
struct xfs_trans *tp,
struct xfs_inode *ip,
@@ -2323,9 +2522,9 @@ xfs_rmap_unmap_extent(
struct xfs_bmbt_irec *PREV)
{
if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
- return 0;
+ return;
- return __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
+ __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
XFS_RMAP_UNMAP_SHARED : XFS_RMAP_UNMAP, ip->i_ino,
whichfork, PREV);
}
@@ -2336,7 +2535,7 @@ xfs_rmap_unmap_extent(
* Note that tp can be NULL here as no transaction is used for COW fork
* unwritten conversion.
*/
-int
+void
xfs_rmap_convert_extent(
struct xfs_mount *mp,
struct xfs_trans *tp,
@@ -2345,15 +2544,15 @@ xfs_rmap_convert_extent(
struct xfs_bmbt_irec *PREV)
{
if (!xfs_rmap_update_is_needed(mp, whichfork))
- return 0;
+ return;
- return __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
+ __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
XFS_RMAP_CONVERT_SHARED : XFS_RMAP_CONVERT, ip->i_ino,
whichfork, PREV);
}
/* Schedule the creation of an rmap for non-file data. */
-int
+void
xfs_rmap_alloc_extent(
struct xfs_trans *tp,
xfs_agnumber_t agno,
@@ -2364,18 +2563,18 @@ xfs_rmap_alloc_extent(
struct xfs_bmbt_irec bmap;
if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
- return 0;
+ return;
bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
bmap.br_blockcount = len;
bmap.br_startoff = 0;
bmap.br_state = XFS_EXT_NORM;
- return __xfs_rmap_add(tp, XFS_RMAP_ALLOC, owner, XFS_DATA_FORK, &bmap);
+ __xfs_rmap_add(tp, XFS_RMAP_ALLOC, owner, XFS_DATA_FORK, &bmap);
}
/* Schedule the deletion of an rmap for non-file data. */
-int
+void
xfs_rmap_free_extent(
struct xfs_trans *tp,
xfs_agnumber_t agno,
@@ -2386,14 +2585,14 @@ xfs_rmap_free_extent(
struct xfs_bmbt_irec bmap;
if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
- return 0;
+ return;
bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
bmap.br_blockcount = len;
bmap.br_startoff = 0;
bmap.br_state = XFS_EXT_NORM;
- return __xfs_rmap_add(tp, XFS_RMAP_FREE, owner, XFS_DATA_FORK, &bmap);
+ __xfs_rmap_add(tp, XFS_RMAP_FREE, owner, XFS_DATA_FORK, &bmap);
}
/* Compare rmap records. Returns -1 if a < b, 1 if a > b, and 0 if equal. */
@@ -2511,7 +2710,7 @@ xfs_rmap_has_other_keys_helper(
((rks->flags & rec->rm_flags) & XFS_RMAP_KEY_FLAGS) == rks->flags)
return 0;
rks->has_rmap = true;
- return XFS_BTREE_QUERY_RANGE_ABORT;
+ return -ECANCELED;
}
/*
@@ -2540,8 +2739,11 @@ xfs_rmap_has_other_keys(
error = xfs_rmap_query_range(cur, &low, &high,
xfs_rmap_has_other_keys_helper, &rks);
+ if (error < 0)
+ return error;
+
*has_rmap = rks.has_rmap;
- return error;
+ return 0;
}
const struct xfs_owner_info XFS_RMAP_OINFO_SKIP_UPDATE = {
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index e21ed0294e5c..abe633403fd1 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -68,6 +68,7 @@ xfs_rmap_irec_offset_unpack(
if (offset & ~(XFS_RMAP_OFF_MASK | XFS_RMAP_OFF_FLAGS))
return -EFSCORRUPTED;
irec->rm_offset = XFS_RMAP_OFF(offset);
+ irec->rm_flags = 0;
if (offset & XFS_RMAP_OFF_ATTR_FORK)
irec->rm_flags |= XFS_RMAP_ATTR_FORK;
if (offset & XFS_RMAP_OFF_BMBT_BLOCK)
@@ -161,16 +162,16 @@ struct xfs_rmap_intent {
};
/* functions for updating the rmapbt based on bmbt map/unmap operations */
-int xfs_rmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+void xfs_rmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip,
int whichfork, struct xfs_bmbt_irec *imap);
-int xfs_rmap_unmap_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+void xfs_rmap_unmap_extent(struct xfs_trans *tp, struct xfs_inode *ip,
int whichfork, struct xfs_bmbt_irec *imap);
-int xfs_rmap_convert_extent(struct xfs_mount *mp, struct xfs_trans *tp,
+void xfs_rmap_convert_extent(struct xfs_mount *mp, struct xfs_trans *tp,
struct xfs_inode *ip, int whichfork,
struct xfs_bmbt_irec *imap);
-int xfs_rmap_alloc_extent(struct xfs_trans *tp, xfs_agnumber_t agno,
+void xfs_rmap_alloc_extent(struct xfs_trans *tp, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner);
-int xfs_rmap_free_extent(struct xfs_trans *tp, xfs_agnumber_t agno,
+void xfs_rmap_free_extent(struct xfs_trans *tp, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner);
void xfs_rmap_finish_one_cleanup(struct xfs_trans *tp,
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 8ea1efc97b41..f42c74cb8be5 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -15,7 +15,7 @@
#include "xfs_bmap.h"
#include "xfs_trans.h"
#include "xfs_rtalloc.h"
-
+#include "xfs_error.h"
/*
* Realtime allocator bitmap functions shared with userspace.
@@ -70,7 +70,7 @@ xfs_rtbuf_get(
if (error)
return error;
- if (nmap == 0 || !xfs_bmap_is_real_extent(&map))
+ if (XFS_IS_CORRUPT(mp, nmap == 0 || !xfs_bmap_is_real_extent(&map)))
return -EFSCORRUPTED;
ASSERT(map.br_startblock != NULLFSBLOCK);
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index a08dd8f40346..0ac69751fe85 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -10,6 +10,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
+#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
@@ -928,7 +929,7 @@ xfs_log_sb(
xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
- xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb));
+ xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb) - 1);
}
/*
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index e0641b7337b3..c45acbd3add9 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -177,10 +177,4 @@ struct xfs_ino_geometry {
unsigned int agino_log; /* #bits for agino in inum */
};
-/* Keep iterating the data structure. */
-#define XFS_ITER_CONTINUE (0)
-
-/* Stop iterating the data structure. */
-#define XFS_ITER_ABORT (1)
-
#endif /* __XFS_SHARED_H__ */
diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index a9ad90926b87..2b8ccb5b975d 100644
--- a/fs/xfs/libxfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -55,7 +55,7 @@ xfs_trans_ichgtime(
int flags)
{
struct inode *inode = VFS_I(ip);
- struct timespec64 tv;
+ struct timespec64 tv;
ASSERT(tp);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
@@ -66,10 +66,8 @@ xfs_trans_ichgtime(
inode->i_mtime = tv;
if (flags & XFS_ICHGTIME_CHG)
inode->i_ctime = tv;
- if (flags & XFS_ICHGTIME_CREATE) {
- ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
- ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
- }
+ if (flags & XFS_ICHGTIME_CREATE)
+ ip->i_d.di_crtime = tv;
}
/*
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index d12bbd526e7c..c55cd9a3dec9 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -718,7 +718,7 @@ xfs_calc_clear_agi_bucket_reservation(
/*
* Adjusting quota limits.
- * the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot)
+ * the disk quota buffer: sizeof(struct xfs_disk_dquot)
*/
STATIC uint
xfs_calc_qm_setqlim_reservation(void)
@@ -742,7 +742,7 @@ xfs_calc_qm_dqalloc_reservation(
/*
* Turning off quotas.
- * the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
+ * the quota off logitems: sizeof(struct xfs_qoff_logitem) * 2
* the superblock for the quota flags: sector size
*/
STATIC uint
@@ -755,7 +755,7 @@ xfs_calc_qm_quotaoff_reservation(
/*
* End of turning off quotas.
- * the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
+ * the quota off logitems: sizeof(struct xfs_qoff_logitem) * 2
*/
STATIC uint
xfs_calc_qm_quotaoff_end_reservation(void)
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 802b34cd10fe..397d94775440 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -21,7 +21,6 @@ typedef int32_t xfs_suminfo_t; /* type of bitmap summary info */
typedef uint32_t xfs_rtword_t; /* word type for bitmap manipulations */
typedef int64_t xfs_lsn_t; /* log sequence number */
-typedef int32_t xfs_tid_t; /* transaction identifier */
typedef uint32_t xfs_dablk_t; /* dir/attr block number (in file) */
typedef uint32_t xfs_dahash_t; /* dir/attr hash value */
@@ -33,7 +32,6 @@ typedef uint64_t xfs_fileoff_t; /* block number in a file */
typedef uint64_t xfs_filblks_t; /* number of blocks in a file */
typedef int64_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */
-typedef int64_t xfs_sfiloff_t; /* signed block number in a file */
/*
* New verifiers will return the instruction address of the failing check.
@@ -169,6 +167,14 @@ typedef struct xfs_bmbt_irec
xfs_exntst_t br_state; /* extent state */
} xfs_bmbt_irec_t;
+/* per-AG block reservation types */
+enum xfs_ag_resv_type {
+ XFS_AG_RESV_NONE = 0,
+ XFS_AG_RESV_AGFL,
+ XFS_AG_RESV_METADATA,
+ XFS_AG_RESV_RMAPBT,
+};
+
/*
* Type verifier functions
*/
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index 16b09b941441..ba0f747c82e8 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -639,7 +639,7 @@ xchk_agfl_block(
xchk_agfl_block_xref(sc, agbno);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
- return XFS_ITER_ABORT;
+ return -ECANCELED;
return 0;
}
@@ -730,7 +730,7 @@ xchk_agfl(
/* Check the blocks in the AGFL. */
error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
sc->sa.agfl_bp, xchk_agfl_block, &sai);
- if (error == XFS_ITER_ABORT) {
+ if (error == -ECANCELED) {
error = 0;
goto out_free;
}
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index a43d1813c4ff..5533e48e605d 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -97,7 +97,6 @@ xchk_allocbt_rec(
xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
xfs_agblock_t bno;
xfs_extlen_t len;
- int error = 0;
bno = be32_to_cpu(rec->alloc.ar_startblock);
len = be32_to_cpu(rec->alloc.ar_blockcount);
@@ -109,7 +108,7 @@ xchk_allocbt_rec(
xchk_allocbt_xref(bs->sc, bno, len);
- return error;
+ return 0;
}
/* Scrub the freespace btrees for some AG. */
diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c
index 1afc58bf71dd..d9f0dd444b80 100644
--- a/fs/xfs/scrub/attr.c
+++ b/fs/xfs/scrub/attr.c
@@ -80,7 +80,7 @@ xchk_setup_xattr(
* without the inode lock held, which means we can sleep.
*/
if (sc->flags & XCHK_TRY_HARDER) {
- error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, KM_SLEEP);
+ error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, 0);
if (error)
return error;
}
@@ -163,8 +163,6 @@ xchk_xattr_listent(
args.valuelen = valuelen;
error = xfs_attr_get_ilocked(context->dp, &args);
- if (error == -EEXIST)
- error = 0;
if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
&error))
goto fail_xref;
@@ -173,7 +171,7 @@ xchk_xattr_listent(
args.blkno);
fail_xref:
if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
- context->seen_enough = XFS_ITER_ABORT;
+ context->seen_enough = 1;
return;
}
@@ -400,15 +398,14 @@ out:
STATIC int
xchk_xattr_rec(
struct xchk_da_btree *ds,
- int level,
- void *rec)
+ int level)
{
struct xfs_mount *mp = ds->state->mp;
- struct xfs_attr_leaf_entry *ent = rec;
- struct xfs_da_state_blk *blk;
+ struct xfs_da_state_blk *blk = &ds->state->path.blk[level];
struct xfs_attr_leaf_name_local *lentry;
struct xfs_attr_leaf_name_remote *rentry;
struct xfs_buf *bp;
+ struct xfs_attr_leaf_entry *ent;
xfs_dahash_t calc_hash;
xfs_dahash_t hash;
int nameidx;
@@ -416,7 +413,9 @@ xchk_xattr_rec(
unsigned int badflags;
int error;
- blk = &ds->state->path.blk[level];
+ ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
+
+ ent = xfs_attr3_leaf_entryp(blk->bp->b_addr) + blk->index;
/* Check the whole block, if necessary. */
error = xchk_xattr_block(ds, level);
diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
index 3d47d111be5a..18a684e18a69 100644
--- a/fs/xfs/scrub/bitmap.c
+++ b/fs/xfs/scrub/bitmap.c
@@ -294,5 +294,6 @@ xfs_bitmap_set_btblocks(
struct xfs_bitmap *bitmap,
struct xfs_btree_cur *cur)
{
- return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock, bitmap);
+ return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock,
+ XFS_BTREE_VISIT_ALL, bitmap);
}
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index 1bd29fdc2ab5..fa6ea6407992 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -75,6 +75,7 @@ struct xchk_bmap_info {
xfs_fileoff_t lastoff;
bool is_rt;
bool is_shared;
+ bool was_loaded;
int whichfork;
};
@@ -213,25 +214,20 @@ xchk_bmap_xref_rmap(
/* Cross-reference a single rtdev extent record. */
STATIC void
-xchk_bmap_rt_extent_xref(
- struct xchk_bmap_info *info,
+xchk_bmap_rt_iextent_xref(
struct xfs_inode *ip,
- struct xfs_btree_cur *cur,
+ struct xchk_bmap_info *info,
struct xfs_bmbt_irec *irec)
{
- if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
- return;
-
xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
irec->br_blockcount);
}
/* Cross-reference a single datadev extent record. */
STATIC void
-xchk_bmap_extent_xref(
- struct xchk_bmap_info *info,
+xchk_bmap_iextent_xref(
struct xfs_inode *ip,
- struct xfs_btree_cur *cur,
+ struct xchk_bmap_info *info,
struct xfs_bmbt_irec *irec)
{
struct xfs_mount *mp = info->sc->mp;
@@ -240,9 +236,6 @@ xchk_bmap_extent_xref(
xfs_extlen_t len;
int error;
- if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
- return;
-
agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
len = irec->br_blockcount;
@@ -300,20 +293,15 @@ xchk_bmap_dirattr_extent(
/* Scrub a single extent record. */
STATIC int
-xchk_bmap_extent(
+xchk_bmap_iextent(
struct xfs_inode *ip,
- struct xfs_btree_cur *cur,
struct xchk_bmap_info *info,
struct xfs_bmbt_irec *irec)
{
struct xfs_mount *mp = info->sc->mp;
- struct xfs_buf *bp = NULL;
xfs_filblks_t end;
int error = 0;
- if (cur)
- xfs_btree_get_block(cur, 0, &bp);
-
/*
* Check for out-of-order extents. This record could have come
* from the incore list, for which there is no ordering check.
@@ -364,10 +352,13 @@ xchk_bmap_extent(
xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
+ if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return 0;
+
if (info->is_rt)
- xchk_bmap_rt_extent_xref(info, ip, cur, irec);
+ xchk_bmap_rt_iextent_xref(ip, info, irec);
else
- xchk_bmap_extent_xref(info, ip, cur, irec);
+ xchk_bmap_iextent_xref(ip, info, irec);
info->lastoff = irec->br_startoff + irec->br_blockcount;
return error;
@@ -380,10 +371,13 @@ xchk_bmapbt_rec(
union xfs_btree_rec *rec)
{
struct xfs_bmbt_irec irec;
+ struct xfs_bmbt_irec iext_irec;
+ struct xfs_iext_cursor icur;
struct xchk_bmap_info *info = bs->private;
struct xfs_inode *ip = bs->cur->bc_private.b.ip;
struct xfs_buf *bp = NULL;
struct xfs_btree_block *block;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, info->whichfork);
uint64_t owner;
int i;
@@ -402,9 +396,26 @@ xchk_bmapbt_rec(
}
}
- /* Set up the in-core record and scrub it. */
+ /*
+ * Check that the incore extent tree contains an extent that matches
+ * this one exactly. We validate those cached bmaps later, so we don't
+ * need to check them here. If the incore extent tree was just loaded
+ * from disk by the scrubber, we assume that its contents match what's
+ * on disk (we still hold the ILOCK) and skip the equivalence check.
+ */
+ if (!info->was_loaded)
+ return 0;
+
xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
- return xchk_bmap_extent(ip, bs->cur, info, &irec);
+ if (!xfs_iext_lookup_extent(ip, ifp, irec.br_startoff, &icur,
+ &iext_irec) ||
+ irec.br_startoff != iext_irec.br_startoff ||
+ irec.br_startblock != iext_irec.br_startblock ||
+ irec.br_blockcount != iext_irec.br_blockcount ||
+ irec.br_state != iext_irec.br_state)
+ xchk_fblock_set_corrupt(bs->sc, info->whichfork,
+ irec.br_startoff);
+ return 0;
}
/* Scan the btree records. */
@@ -415,15 +426,26 @@ xchk_bmap_btree(
struct xchk_bmap_info *info)
{
struct xfs_owner_info oinfo;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork);
struct xfs_mount *mp = sc->mp;
struct xfs_inode *ip = sc->ip;
struct xfs_btree_cur *cur;
int error;
+ /* Load the incore bmap cache if it's not loaded. */
+ info->was_loaded = ifp->if_flags & XFS_IFEXTENTS;
+ if (!info->was_loaded) {
+ error = xfs_iread_extents(sc->tp, ip, whichfork);
+ if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
+ goto out;
+ }
+
+ /* Check the btree structure. */
cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
xfs_btree_del_cursor(cur, error);
+out:
return error;
}
@@ -500,7 +522,7 @@ xchk_bmap_check_rmap(
out:
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
- return XFS_BTREE_QUERY_RANGE_ABORT;
+ return -ECANCELED;
return 0;
}
@@ -529,7 +551,7 @@ xchk_bmap_check_ag_rmaps(
sbcri.sc = sc;
sbcri.whichfork = whichfork;
error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
- if (error == XFS_BTREE_QUERY_RANGE_ABORT)
+ if (error == -ECANCELED)
error = 0;
xfs_btree_del_cursor(cur, error);
@@ -671,13 +693,6 @@ xchk_bmap(
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
- /* Now try to scrub the in-memory extent list. */
- if (!(ifp->if_flags & XFS_IFEXTENTS)) {
- error = xfs_iread_extents(sc->tp, ip, whichfork);
- if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
- goto out;
- }
-
/* Find the offset of the last extent in the mapping. */
error = xfs_bmap_last_offset(ip, &endoff, whichfork);
if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
@@ -689,7 +704,7 @@ xchk_bmap(
for_each_xfs_iext(ifp, &icur, &irec) {
if (xchk_should_terminate(sc, &error) ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
- break;
+ goto out;
if (isnullstartblock(irec.br_startblock))
continue;
if (irec.br_startoff >= endoff) {
@@ -697,7 +712,7 @@ xchk_bmap(
irec.br_startoff);
goto out;
}
- error = xchk_bmap_extent(ip, NULL, &info, &irec);
+ error = xchk_bmap_iextent(ip, &info, &irec);
if (error)
goto out;
}
diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
index 003a772cd26c..2e50d146105d 100644
--- a/fs/xfs/scrub/common.h
+++ b/fs/xfs/scrub/common.h
@@ -14,8 +14,15 @@
static inline bool
xchk_should_terminate(
struct xfs_scrub *sc,
- int *error)
+ int *error)
{
+ /*
+ * If preemption is disabled, we need to yield to the scheduler every
+ * few seconds so that we don't run afoul of the soft lockup watchdog
+ * or RCU stall detector.
+ */
+ cond_resched();
+
if (fatal_signal_pending(current)) {
if (*error == 0)
*error = -EAGAIN;
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index 94c4f1de1922..97a15b6f2865 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -77,40 +77,18 @@ xchk_da_set_corrupt(
__return_address);
}
-/* Find an entry at a certain level in a da btree. */
-STATIC void *
-xchk_da_btree_entry(
- struct xchk_da_btree *ds,
- int level,
- int rec)
+static struct xfs_da_node_entry *
+xchk_da_btree_node_entry(
+ struct xchk_da_btree *ds,
+ int level)
{
- char *ents;
- struct xfs_da_state_blk *blk;
- void *baddr;
+ struct xfs_da_state_blk *blk = &ds->state->path.blk[level];
+ struct xfs_da3_icnode_hdr hdr;
- /* Dispatch the entry finding function. */
- blk = &ds->state->path.blk[level];
- baddr = blk->bp->b_addr;
- switch (blk->magic) {
- case XFS_ATTR_LEAF_MAGIC:
- case XFS_ATTR3_LEAF_MAGIC:
- ents = (char *)xfs_attr3_leaf_entryp(baddr);
- return ents + (rec * sizeof(struct xfs_attr_leaf_entry));
- case XFS_DIR2_LEAFN_MAGIC:
- case XFS_DIR3_LEAFN_MAGIC:
- ents = (char *)ds->dargs.dp->d_ops->leaf_ents_p(baddr);
- return ents + (rec * sizeof(struct xfs_dir2_leaf_entry));
- case XFS_DIR2_LEAF1_MAGIC:
- case XFS_DIR3_LEAF1_MAGIC:
- ents = (char *)ds->dargs.dp->d_ops->leaf_ents_p(baddr);
- return ents + (rec * sizeof(struct xfs_dir2_leaf_entry));
- case XFS_DA_NODE_MAGIC:
- case XFS_DA3_NODE_MAGIC:
- ents = (char *)ds->dargs.dp->d_ops->node_tree_p(baddr);
- return ents + (rec * sizeof(struct xfs_da_node_entry));
- }
+ ASSERT(blk->magic == XFS_DA_NODE_MAGIC);
- return NULL;
+ xfs_da3_node_hdr_from_disk(ds->sc->mp, &hdr, blk->bp->b_addr);
+ return hdr.btree + blk->index;
}
/* Scrub a da btree hash (key). */
@@ -120,7 +98,6 @@ xchk_da_btree_hash(
int level,
__be32 *hashp)
{
- struct xfs_da_state_blk *blks;
struct xfs_da_node_entry *entry;
xfs_dahash_t hash;
xfs_dahash_t parent_hash;
@@ -135,8 +112,7 @@ xchk_da_btree_hash(
return 0;
/* Is this hash no larger than the parent hash? */
- blks = ds->state->path.blk;
- entry = xchk_da_btree_entry(ds, level - 1, blks[level - 1].index);
+ entry = xchk_da_btree_node_entry(ds, level - 1);
parent_hash = be32_to_cpu(entry->hashval);
if (parent_hash < hash)
xchk_da_set_corrupt(ds, level);
@@ -278,7 +254,11 @@ xchk_da_btree_block_check_sibling(
/* Compare upper level pointer to sibling pointer. */
if (ds->state->altpath.blk[level].blkno != sibling)
xchk_da_set_corrupt(ds, level);
- xfs_trans_brelse(ds->dargs.trans, ds->state->altpath.blk[level].bp);
+ if (ds->state->altpath.blk[level].bp) {
+ xfs_trans_brelse(ds->dargs.trans,
+ ds->state->altpath.blk[level].bp);
+ ds->state->altpath.blk[level].bp = NULL;
+ }
out:
return error;
}
@@ -351,8 +331,8 @@ xchk_da_btree_block(
goto out_nobuf;
/* Read the buffer. */
- error = xfs_da_read_buf(dargs->trans, dargs->dp, blk->blkno, -2,
- &blk->bp, dargs->whichfork,
+ error = xfs_da_read_buf(dargs->trans, dargs->dp, blk->blkno,
+ XFS_DABUF_MAP_HOLE_OK, &blk->bp, dargs->whichfork,
&xchk_da_btree_buf_ops);
if (!xchk_da_process_error(ds, level, &error))
goto out_nobuf;
@@ -429,8 +409,8 @@ xchk_da_btree_block(
XFS_BLFT_DA_NODE_BUF);
blk->magic = XFS_DA_NODE_MAGIC;
node = blk->bp->b_addr;
- ip->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = ip->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(ip->i_mount, &nodehdr, node);
+ btree = nodehdr.btree;
*pmaxrecs = nodehdr.count;
blk->hashval = be32_to_cpu(btree[*pmaxrecs - 1].hashval);
if (level == 0) {
@@ -475,14 +455,12 @@ xchk_da_btree(
struct xfs_mount *mp = sc->mp;
struct xfs_da_state_blk *blks;
struct xfs_da_node_entry *key;
- void *rec;
xfs_dablk_t blkno;
int level;
int error;
/* Skip short format data structures; no btree to scan. */
- if (XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_BTREE)
+ if (!xfs_ifork_has_extents(sc->ip, whichfork))
return 0;
/* Set up initial da state. */
@@ -534,9 +512,7 @@ xchk_da_btree(
}
/* Dispatch record scrubbing. */
- rec = xchk_da_btree_entry(&ds, level,
- blks[level].index);
- error = scrub_fn(&ds, level, rec);
+ error = scrub_fn(&ds, level);
if (error)
break;
if (xchk_should_terminate(sc, &error) ||
@@ -558,7 +534,7 @@ xchk_da_btree(
}
/* Hashes in order for scrub? */
- key = xchk_da_btree_entry(&ds, level, blks[level].index);
+ key = xchk_da_btree_node_entry(&ds, level);
error = xchk_da_btree_hash(&ds, level, &key->hashval);
if (error)
goto out;
diff --git a/fs/xfs/scrub/dabtree.h b/fs/xfs/scrub/dabtree.h
index cb3f0003245b..1f3515c6d5a8 100644
--- a/fs/xfs/scrub/dabtree.h
+++ b/fs/xfs/scrub/dabtree.h
@@ -28,8 +28,7 @@ struct xchk_da_btree {
int tree_level;
};
-typedef int (*xchk_da_btree_rec_fn)(struct xchk_da_btree *ds,
- int level, void *rec);
+typedef int (*xchk_da_btree_rec_fn)(struct xchk_da_btree *ds, int level);
/* Check for da btree operation errors. */
bool xchk_da_process_error(struct xchk_da_btree *ds, int level, int *error);
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
index 1e2e11721eb9..266da4e4bde6 100644
--- a/fs/xfs/scrub/dir.c
+++ b/fs/xfs/scrub/dir.c
@@ -113,6 +113,9 @@ xchk_dir_actor(
offset = xfs_dir2_db_to_da(mp->m_dir_geo,
xfs_dir2_dataptr_to_db(mp->m_dir_geo, pos));
+ if (xchk_should_terminate(sdc->sc, &error))
+ return error;
+
/* Does this inode number make sense? */
if (!xfs_verify_dir_ino(mp, ino)) {
xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
@@ -179,15 +182,17 @@ out:
STATIC int
xchk_dir_rec(
struct xchk_da_btree *ds,
- int level,
- void *rec)
+ int level)
{
+ struct xfs_da_state_blk *blk = &ds->state->path.blk[level];
struct xfs_mount *mp = ds->state->mp;
- struct xfs_dir2_leaf_entry *ent = rec;
struct xfs_inode *dp = ds->dargs.dp;
+ struct xfs_da_geometry *geo = mp->m_dir_geo;
struct xfs_dir2_data_entry *dent;
struct xfs_buf *bp;
- char *p, *endp;
+ struct xfs_dir2_leaf_entry *ent;
+ unsigned int end;
+ unsigned int iter_off;
xfs_ino_t ino;
xfs_dablk_t rec_bno;
xfs_dir2_db_t db;
@@ -195,9 +200,16 @@ xchk_dir_rec(
xfs_dir2_dataptr_t ptr;
xfs_dahash_t calc_hash;
xfs_dahash_t hash;
+ struct xfs_dir3_icleaf_hdr hdr;
unsigned int tag;
int error;
+ ASSERT(blk->magic == XFS_DIR2_LEAF1_MAGIC ||
+ blk->magic == XFS_DIR2_LEAFN_MAGIC);
+
+ xfs_dir2_leaf_hdr_from_disk(mp, &hdr, blk->bp->b_addr);
+ ent = hdr.ents + blk->index;
+
/* Check the hash of the entry. */
error = xchk_da_btree_hash(ds, level, &ent->hashval);
if (error)
@@ -209,15 +221,16 @@ xchk_dir_rec(
return 0;
/* Find the directory entry's location. */
- db = xfs_dir2_dataptr_to_db(mp->m_dir_geo, ptr);
- off = xfs_dir2_dataptr_to_off(mp->m_dir_geo, ptr);
- rec_bno = xfs_dir2_db_to_da(mp->m_dir_geo, db);
+ db = xfs_dir2_dataptr_to_db(geo, ptr);
+ off = xfs_dir2_dataptr_to_off(geo, ptr);
+ rec_bno = xfs_dir2_db_to_da(geo, db);
- if (rec_bno >= mp->m_dir_geo->leafblk) {
+ if (rec_bno >= geo->leafblk) {
xchk_da_set_corrupt(ds, level);
goto out;
}
- error = xfs_dir3_data_read(ds->dargs.trans, dp, rec_bno, -2, &bp);
+ error = xfs_dir3_data_read(ds->dargs.trans, dp, rec_bno,
+ XFS_DABUF_MAP_HOLE_OK, &bp);
if (!xchk_fblock_process_error(ds->sc, XFS_DATA_FORK, rec_bno,
&error))
goto out;
@@ -230,38 +243,37 @@ xchk_dir_rec(
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out_relse;
- dent = (struct xfs_dir2_data_entry *)(((char *)bp->b_addr) + off);
+ dent = bp->b_addr + off;
/* Make sure we got a real directory entry. */
- p = (char *)mp->m_dir_inode_ops->data_entry_p(bp->b_addr);
- endp = xfs_dir3_data_endp(mp->m_dir_geo, bp->b_addr);
- if (!endp) {
+ iter_off = geo->data_entry_offset;
+ end = xfs_dir3_data_end_offset(geo, bp->b_addr);
+ if (!end) {
xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out_relse;
}
- while (p < endp) {
- struct xfs_dir2_data_entry *dep;
- struct xfs_dir2_data_unused *dup;
+ for (;;) {
+ struct xfs_dir2_data_entry *dep = bp->b_addr + iter_off;
+ struct xfs_dir2_data_unused *dup = bp->b_addr + iter_off;
+
+ if (iter_off >= end) {
+ xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ goto out_relse;
+ }
- dup = (struct xfs_dir2_data_unused *)p;
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
- p += be16_to_cpu(dup->length);
+ iter_off += be16_to_cpu(dup->length);
continue;
}
- dep = (struct xfs_dir2_data_entry *)p;
if (dep == dent)
break;
- p += mp->m_dir_inode_ops->data_entsize(dep->namelen);
- }
- if (p >= endp) {
- xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
- goto out_relse;
+ iter_off += xfs_dir2_data_entsize(mp, dep->namelen);
}
/* Retrieve the entry, sanity check it, and compare hashes. */
ino = be64_to_cpu(dent->inumber);
hash = be32_to_cpu(ent->hashval);
- tag = be16_to_cpup(dp->d_ops->data_entry_tag_p(dent));
+ tag = be16_to_cpup(xfs_dir2_data_entry_tag_p(mp, dent));
if (!xfs_verify_dir_ino(mp, ino) || tag != off)
xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
if (dent->namelen == 0) {
@@ -319,19 +331,15 @@ xchk_directory_data_bestfree(
struct xfs_buf *bp;
struct xfs_dir2_data_free *bf;
struct xfs_mount *mp = sc->mp;
- const struct xfs_dir_ops *d_ops;
- char *ptr;
- char *endptr;
u16 tag;
unsigned int nr_bestfrees = 0;
unsigned int nr_frees = 0;
unsigned int smallest_bestfree;
int newlen;
- int offset;
+ unsigned int offset;
+ unsigned int end;
int error;
- d_ops = sc->ip->d_ops;
-
if (is_block) {
/* dir block format */
if (lblk != XFS_B_TO_FSBT(mp, XFS_DIR2_DATA_OFFSET))
@@ -339,7 +347,7 @@ xchk_directory_data_bestfree(
error = xfs_dir3_block_read(sc->tp, sc->ip, &bp);
} else {
/* dir data format */
- error = xfs_dir3_data_read(sc->tp, sc->ip, lblk, -1, &bp);
+ error = xfs_dir3_data_read(sc->tp, sc->ip, lblk, 0, &bp);
}
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
@@ -351,7 +359,7 @@ xchk_directory_data_bestfree(
goto out_buf;
/* Do the bestfrees correspond to actual free space? */
- bf = d_ops->data_bestfree_p(bp->b_addr);
+ bf = xfs_dir2_data_bestfree_p(mp, bp->b_addr);
smallest_bestfree = UINT_MAX;
for (dfp = &bf[0]; dfp < &bf[XFS_DIR2_DATA_FD_COUNT]; dfp++) {
offset = be16_to_cpu(dfp->offset);
@@ -361,13 +369,13 @@ xchk_directory_data_bestfree(
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
- dup = (struct xfs_dir2_data_unused *)(bp->b_addr + offset);
+ dup = bp->b_addr + offset;
tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup));
/* bestfree doesn't match the entry it points at? */
if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG) ||
be16_to_cpu(dup->length) != be16_to_cpu(dfp->length) ||
- tag != ((char *)dup - (char *)bp->b_addr)) {
+ tag != offset) {
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
@@ -383,30 +391,30 @@ xchk_directory_data_bestfree(
}
/* Make sure the bestfrees are actually the best free spaces. */
- ptr = (char *)d_ops->data_entry_p(bp->b_addr);
- endptr = xfs_dir3_data_endp(mp->m_dir_geo, bp->b_addr);
+ offset = mp->m_dir_geo->data_entry_offset;
+ end = xfs_dir3_data_end_offset(mp->m_dir_geo, bp->b_addr);
/* Iterate the entries, stopping when we hit or go past the end. */
- while (ptr < endptr) {
- dup = (struct xfs_dir2_data_unused *)ptr;
+ while (offset < end) {
+ dup = bp->b_addr + offset;
+
/* Skip real entries */
if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG)) {
- struct xfs_dir2_data_entry *dep;
+ struct xfs_dir2_data_entry *dep = bp->b_addr + offset;
- dep = (struct xfs_dir2_data_entry *)ptr;
- newlen = d_ops->data_entsize(dep->namelen);
+ newlen = xfs_dir2_data_entsize(mp, dep->namelen);
if (newlen <= 0) {
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
lblk);
goto out_buf;
}
- ptr += newlen;
+ offset += newlen;
continue;
}
/* Spot check this free entry */
tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup));
- if (tag != ((char *)dup - (char *)bp->b_addr)) {
+ if (tag != offset) {
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
@@ -425,13 +433,13 @@ xchk_directory_data_bestfree(
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
- ptr += newlen;
- if (ptr <= endptr)
+ offset += newlen;
+ if (offset <= end)
nr_frees++;
}
/* We're required to fill all the space. */
- if (ptr != endptr)
+ if (offset != end)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
/* Did we see at least as many free slots as there are bestfrees? */
@@ -458,7 +466,7 @@ xchk_directory_check_freesp(
{
struct xfs_dir2_data_free *dfp;
- dfp = sc->ip->d_ops->data_bestfree_p(dbp->b_addr);
+ dfp = xfs_dir2_data_bestfree_p(sc->mp, dbp->b_addr);
if (len != be16_to_cpu(dfp->length))
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
@@ -475,12 +483,10 @@ xchk_directory_leaf1_bestfree(
xfs_dablk_t lblk)
{
struct xfs_dir3_icleaf_hdr leafhdr;
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir2_leaf_tail *ltp;
struct xfs_dir2_leaf *leaf;
struct xfs_buf *dbp;
struct xfs_buf *bp;
- const struct xfs_dir_ops *d_ops = sc->ip->d_ops;
struct xfs_da_geometry *geo = sc->mp->m_dir_geo;
__be16 *bestp;
__u16 best;
@@ -492,14 +498,13 @@ xchk_directory_leaf1_bestfree(
int error;
/* Read the free space block. */
- error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, -1, &bp);
+ error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, &bp);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
xchk_buffer_recheck(sc, bp);
leaf = bp->b_addr;
- d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(sc->ip->i_mount, &leafhdr, leaf);
ltp = xfs_dir2_leaf_tail_p(geo, leaf);
bestcount = be32_to_cpu(ltp->bestcount);
bestp = xfs_dir2_leaf_bests_p(ltp);
@@ -521,24 +526,25 @@ xchk_directory_leaf1_bestfree(
}
/* Is the leaf count even remotely sane? */
- if (leafhdr.count > d_ops->leaf_max_ents(geo)) {
+ if (leafhdr.count > geo->leaf_max_ents) {
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
/* Leaves and bests don't overlap in leaf format. */
- if ((char *)&ents[leafhdr.count] > (char *)bestp) {
+ if ((char *)&leafhdr.ents[leafhdr.count] > (char *)bestp) {
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
/* Check hash value order, count stale entries. */
for (i = 0; i < leafhdr.count; i++) {
- hash = be32_to_cpu(ents[i].hashval);
+ hash = be32_to_cpu(leafhdr.ents[i].hashval);
if (i > 0 && lasthash > hash)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
lasthash = hash;
- if (ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
+ if (leafhdr.ents[i].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
}
if (leafhdr.stale != stale)
@@ -552,7 +558,7 @@ xchk_directory_leaf1_bestfree(
if (best == NULLDATAOFF)
continue;
error = xfs_dir3_data_read(sc->tp, sc->ip,
- i * args->geo->fsbcount, -1, &dbp);
+ i * args->geo->fsbcount, 0, &dbp);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
&error))
break;
@@ -575,7 +581,6 @@ xchk_directory_free_bestfree(
struct xfs_dir3_icfree_hdr freehdr;
struct xfs_buf *dbp;
struct xfs_buf *bp;
- __be16 *bestp;
__u16 best;
unsigned int stale = 0;
int i;
@@ -595,17 +600,16 @@ xchk_directory_free_bestfree(
}
/* Check all the entries. */
- sc->ip->d_ops->free_hdr_from_disk(&freehdr, bp->b_addr);
- bestp = sc->ip->d_ops->free_bests_p(bp->b_addr);
- for (i = 0; i < freehdr.nvalid; i++, bestp++) {
- best = be16_to_cpu(*bestp);
+ xfs_dir2_free_hdr_from_disk(sc->ip->i_mount, &freehdr, bp->b_addr);
+ for (i = 0; i < freehdr.nvalid; i++) {
+ best = be16_to_cpu(freehdr.bests[i]);
if (best == NULLDATAOFF) {
stale++;
continue;
}
error = xfs_dir3_data_read(sc->tp, sc->ip,
(freehdr.firstdb + i) * args->geo->fsbcount,
- -1, &dbp);
+ 0, &dbp);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
&error))
break;
diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
index fc3f510c9034..7251c66a82c9 100644
--- a/fs/xfs/scrub/fscounters.c
+++ b/fs/xfs/scrub/fscounters.c
@@ -104,7 +104,7 @@ next_loop_perag:
pag = NULL;
error = 0;
- if (fatal_signal_pending(current))
+ if (xchk_should_terminate(sc, &error))
break;
}
@@ -125,7 +125,7 @@ xchk_setup_fscounters(
struct xchk_fscounters *fsc;
int error;
- sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), KM_SLEEP);
+ sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), 0);
if (!sc->buf)
return -ENOMEM;
fsc = sc->buf;
@@ -163,6 +163,7 @@ xchk_fscount_aggregate_agcounts(
uint64_t delayed;
xfs_agnumber_t agno;
int tries = 8;
+ int error = 0;
retry:
fsc->icount = 0;
@@ -196,10 +197,13 @@ retry:
xfs_perag_put(pag);
- if (fatal_signal_pending(current))
+ if (xchk_should_terminate(sc, &error))
break;
}
+ if (error)
+ return error;
+
/*
* The global incore space reservation is taken from the incore
* counters, so leave that out of the computation.
diff --git a/fs/xfs/scrub/health.c b/fs/xfs/scrub/health.c
index b2f602811e9d..83d27cdf579b 100644
--- a/fs/xfs/scrub/health.c
+++ b/fs/xfs/scrub/health.c
@@ -11,6 +11,7 @@
#include "xfs_sb.h"
#include "xfs_health.h"
#include "scrub/scrub.h"
+#include "scrub/health.h"
/*
* Scrub and In-Core Filesystem Health Assessments
diff --git a/fs/xfs/scrub/parent.c b/fs/xfs/scrub/parent.c
index c962bd534690..5705adc43a75 100644
--- a/fs/xfs/scrub/parent.c
+++ b/fs/xfs/scrub/parent.c
@@ -32,8 +32,10 @@ xchk_setup_parent(
struct xchk_parent_ctx {
struct dir_context dc;
+ struct xfs_scrub *sc;
xfs_ino_t ino;
xfs_nlink_t nlink;
+ bool cancelled;
};
/* Look for a single entry in a directory pointing to an inode. */
@@ -47,11 +49,21 @@ xchk_parent_actor(
unsigned type)
{
struct xchk_parent_ctx *spc;
+ int error = 0;
spc = container_of(dc, struct xchk_parent_ctx, dc);
if (spc->ino == ino)
spc->nlink++;
- return 0;
+
+ /*
+ * If we're facing a fatal signal, bail out. Store the cancellation
+ * status separately because the VFS readdir code squashes error codes
+ * into short directory reads.
+ */
+ if (xchk_should_terminate(spc->sc, &error))
+ spc->cancelled = true;
+
+ return error;
}
/* Count the number of dentries in the parent dir that point to this inode. */
@@ -62,10 +74,9 @@ xchk_parent_count_parent_dentries(
xfs_nlink_t *nlink)
{
struct xchk_parent_ctx spc = {
- .dc.actor = xchk_parent_actor,
- .dc.pos = 0,
- .ino = sc->ip->i_ino,
- .nlink = 0,
+ .dc.actor = xchk_parent_actor,
+ .ino = sc->ip->i_ino,
+ .sc = sc,
};
size_t bufsize;
loff_t oldpos;
@@ -80,7 +91,7 @@ xchk_parent_count_parent_dentries(
*/
lock_mode = xfs_ilock_data_map_shared(parent);
if (parent->i_d.di_nextents > 0)
- error = xfs_dir3_data_readahead(parent, 0, -1);
+ error = xfs_dir3_data_readahead(parent, 0, 0);
xfs_iunlock(parent, lock_mode);
if (error)
return error;
@@ -97,6 +108,10 @@ xchk_parent_count_parent_dentries(
error = xfs_readdir(sc->tp, parent, &spc.dc, bufsize);
if (error)
goto out;
+ if (spc.cancelled) {
+ error = -EAGAIN;
+ goto out;
+ }
if (oldpos == spc.dc.pos)
break;
oldpos = spc.dc.pos;
diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c
index 0a33b4421c32..905a34558361 100644
--- a/fs/xfs/scrub/quota.c
+++ b/fs/xfs/scrub/quota.c
@@ -93,6 +93,10 @@ xchk_quota_item(
unsigned long long rcount;
xfs_ino_t fs_icount;
xfs_dqid_t id = be32_to_cpu(d->d_id);
+ int error = 0;
+
+ if (xchk_should_terminate(sc, &error))
+ return error;
/*
* Except for the root dquot, the actual dquot we got must either have
@@ -178,6 +182,9 @@ xchk_quota_item(
if (id != 0 && rhard != 0 && rcount > rhard)
xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return -EFSCORRUPTED;
+
return 0;
}
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
index 93b3793bc5b3..0cab11a5d390 100644
--- a/fs/xfs/scrub/refcount.c
+++ b/fs/xfs/scrub/refcount.c
@@ -341,7 +341,6 @@ xchk_refcountbt_rec(
xfs_extlen_t len;
xfs_nlink_t refcount;
bool has_cowflag;
- int error = 0;
bno = be32_to_cpu(rec->refc.rc_startblock);
len = be32_to_cpu(rec->refc.rc_blockcount);
@@ -366,7 +365,7 @@ xchk_refcountbt_rec(
xchk_refcountbt_xref(bs->sc, bno, len, refcount);
- return error;
+ return 0;
}
/* Make sure we have as many refc blocks as the rmap says. */
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index 4cfeec57fb05..b70a88bc975e 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -351,7 +351,7 @@ xrep_init_btblock(
xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
xfs_btree_init_block(mp, bp, btnum, 0, 0, sc->sa.agno);
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF);
- xfs_trans_log_buf(tp, bp, 0, bp->b_length);
+ xfs_trans_log_buf(tp, bp, 0, BBTOB(bp->b_length) - 1);
bp->b_ops = ops;
*bpp = bp;
@@ -664,7 +664,7 @@ xrep_findroot_agfl_walk(
{
xfs_agblock_t *agbno = priv;
- return (*agbno == bno) ? XFS_ITER_ABORT : 0;
+ return (*agbno == bno) ? -ECANCELED : 0;
}
/* Does this block match the btree information passed in? */
@@ -694,7 +694,7 @@ xrep_findroot_block(
if (owner == XFS_RMAP_OWN_AG) {
error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp,
xrep_findroot_agfl_walk, &agbno);
- if (error == XFS_ITER_ABORT)
+ if (error == -ECANCELED)
return 0;
if (error)
return error;
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index 15c8c5f3f688..f1775bb19313 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -16,6 +16,7 @@
#include "xfs_qm.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
+#include "xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
diff --git a/fs/xfs/scrub/symlink.c b/fs/xfs/scrub/symlink.c
index 99c0b1234c3c..5641ae512c9e 100644
--- a/fs/xfs/scrub/symlink.c
+++ b/fs/xfs/scrub/symlink.c
@@ -22,7 +22,7 @@ xchk_setup_symlink(
struct xfs_inode *ip)
{
/* Allocate the buffer without the inode lock held. */
- sc->buf = kmem_zalloc_large(XFS_SYMLINK_MAXLEN + 1, KM_SLEEP);
+ sc->buf = kmem_zalloc_large(XFS_SYMLINK_MAXLEN + 1, 0);
if (!sc->buf)
return -ENOMEM;
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index cbda40d40326..91693fce34a8 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -12,8 +12,10 @@
#include "xfs_inode.h"
#include "xfs_attr.h"
#include "xfs_trace.h"
-#include <linux/posix_acl_xattr.h>
+#include "xfs_error.h"
+#include "xfs_acl.h"
+#include <linux/posix_acl_xattr.h>
/*
* Locking scheme:
@@ -23,6 +25,7 @@
STATIC struct posix_acl *
xfs_acl_from_disk(
+ struct xfs_mount *mp,
const struct xfs_acl *aclp,
int len,
int max_entries)
@@ -32,11 +35,18 @@ xfs_acl_from_disk(
const struct xfs_acl_entry *ace;
unsigned int count, i;
- if (len < sizeof(*aclp))
+ if (len < sizeof(*aclp)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
+ len);
return ERR_PTR(-EFSCORRUPTED);
+ }
+
count = be32_to_cpu(aclp->acl_cnt);
- if (count > max_entries || XFS_ACL_SIZE(count) != len)
+ if (count > max_entries || XFS_ACL_SIZE(count) != len) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
+ len);
return ERR_PTR(-EFSCORRUPTED);
+ }
acl = posix_acl_alloc(count, GFP_KERNEL);
if (!acl)
@@ -112,7 +122,7 @@ xfs_get_acl(struct inode *inode, int type)
{
struct xfs_inode *ip = XFS_I(inode);
struct posix_acl *acl = NULL;
- struct xfs_acl *xfs_acl;
+ struct xfs_acl *xfs_acl = NULL;
unsigned char *ea_name;
int error;
int len;
@@ -135,12 +145,8 @@ xfs_get_acl(struct inode *inode, int type)
* go out to the disk.
*/
len = XFS_ACL_MAX_SIZE(ip->i_mount);
- xfs_acl = kmem_zalloc_large(len, KM_SLEEP);
- if (!xfs_acl)
- return ERR_PTR(-ENOMEM);
-
- error = xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl,
- &len, ATTR_ROOT);
+ error = xfs_attr_get(ip, ea_name, (unsigned char **)&xfs_acl, &len,
+ ATTR_ALLOC | ATTR_ROOT);
if (error) {
/*
* If the attribute doesn't exist make sure we have a negative
@@ -149,10 +155,10 @@ xfs_get_acl(struct inode *inode, int type)
if (error != -ENOATTR)
acl = ERR_PTR(error);
} else {
- acl = xfs_acl_from_disk(xfs_acl, len,
+ acl = xfs_acl_from_disk(ip->i_mount, xfs_acl, len,
XFS_ACL_MAX_ENTRIES(ip->i_mount));
+ kmem_free(xfs_acl);
}
- kmem_free(xfs_acl);
return acl;
}
@@ -180,7 +186,7 @@ __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
struct xfs_acl *xfs_acl;
int len = XFS_ACL_MAX_SIZE(ip->i_mount);
- xfs_acl = kmem_zalloc_large(len, KM_SLEEP);
+ xfs_acl = kmem_zalloc_large(len, 0);
if (!xfs_acl)
return -ENOMEM;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index f16d5f196c6b..3a688eb5c5ae 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -18,108 +18,22 @@
#include "xfs_bmap_util.h"
#include "xfs_reflink.h"
-/*
- * structure owned by writepages passed to individual writepage calls
- */
struct xfs_writepage_ctx {
- struct xfs_bmbt_irec imap;
- int fork;
+ struct iomap_writepage_ctx ctx;
unsigned int data_seq;
unsigned int cow_seq;
- struct xfs_ioend *ioend;
};
-struct block_device *
-xfs_find_bdev_for_inode(
- struct inode *inode)
-{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
-
- if (XFS_IS_REALTIME_INODE(ip))
- return mp->m_rtdev_targp->bt_bdev;
- else
- return mp->m_ddev_targp->bt_bdev;
-}
-
-struct dax_device *
-xfs_find_daxdev_for_inode(
- struct inode *inode)
+static inline struct xfs_writepage_ctx *
+XFS_WPC(struct iomap_writepage_ctx *ctx)
{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
-
- if (XFS_IS_REALTIME_INODE(ip))
- return mp->m_rtdev_targp->bt_daxdev;
- else
- return mp->m_ddev_targp->bt_daxdev;
-}
-
-static void
-xfs_finish_page_writeback(
- struct inode *inode,
- struct bio_vec *bvec,
- int error)
-{
- struct iomap_page *iop = to_iomap_page(bvec->bv_page);
-
- if (error) {
- SetPageError(bvec->bv_page);
- mapping_set_error(inode->i_mapping, -EIO);
- }
-
- ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
- ASSERT(!iop || atomic_read(&iop->write_count) > 0);
-
- if (!iop || atomic_dec_and_test(&iop->write_count))
- end_page_writeback(bvec->bv_page);
-}
-
-/*
- * We're now finished for good with this ioend structure. Update the page
- * state, release holds on bios, and finally free up memory. Do not use the
- * ioend after this.
- */
-STATIC void
-xfs_destroy_ioend(
- struct xfs_ioend *ioend,
- int error)
-{
- struct inode *inode = ioend->io_inode;
- struct bio *bio = &ioend->io_inline_bio;
- struct bio *last = ioend->io_bio, *next;
- u64 start = bio->bi_iter.bi_sector;
- bool quiet = bio_flagged(bio, BIO_QUIET);
-
- for (bio = &ioend->io_inline_bio; bio; bio = next) {
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
-
- /*
- * For the last bio, bi_private points to the ioend, so we
- * need to explicitly end the iteration here.
- */
- if (bio == last)
- next = NULL;
- else
- next = bio->bi_private;
-
- /* walk each page on bio, ending page IO on them */
- bio_for_each_segment_all(bvec, bio, iter_all)
- xfs_finish_page_writeback(inode, bvec, error);
- bio_put(bio);
- }
-
- if (unlikely(error && !quiet)) {
- xfs_err_ratelimited(XFS_I(inode)->i_mount,
- "writeback error on sector %llu", start);
- }
+ return container_of(ctx, struct xfs_writepage_ctx, ctx);
}
/*
* Fast and loose check if this write could update the on-disk inode size.
*/
-static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
+static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
{
return ioend->io_offset + ioend->io_size >
XFS_I(ioend->io_inode)->i_d.di_size;
@@ -127,7 +41,7 @@ static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
STATIC int
xfs_setfilesize_trans_alloc(
- struct xfs_ioend *ioend)
+ struct iomap_ioend *ioend)
{
struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
struct xfs_trans *tp;
@@ -137,7 +51,7 @@ xfs_setfilesize_trans_alloc(
if (error)
return error;
- ioend->io_append_trans = tp;
+ ioend->io_private = tp;
/*
* We may pass freeze protection with a transaction. So tell lockdep
@@ -200,11 +114,11 @@ xfs_setfilesize(
STATIC int
xfs_setfilesize_ioend(
- struct xfs_ioend *ioend,
+ struct iomap_ioend *ioend,
int error)
{
struct xfs_inode *ip = XFS_I(ioend->io_inode);
- struct xfs_trans *tp = ioend->io_append_trans;
+ struct xfs_trans *tp = ioend->io_private;
/*
* The transaction may have been allocated in the I/O submission thread,
@@ -228,9 +142,8 @@ xfs_setfilesize_ioend(
*/
STATIC void
xfs_end_ioend(
- struct xfs_ioend *ioend)
+ struct iomap_ioend *ioend)
{
- struct list_head ioend_list;
struct xfs_inode *ip = XFS_I(ioend->io_inode);
xfs_off_t offset = ioend->io_offset;
size_t size = ioend->io_size;
@@ -257,7 +170,7 @@ xfs_end_ioend(
*/
error = blk_status_to_errno(ioend->io_bio->bi_status);
if (unlikely(error)) {
- if (ioend->io_fork == XFS_COW_FORK)
+ if (ioend->io_flags & IOMAP_F_SHARED)
xfs_reflink_cancel_cow_range(ip, offset, size, true);
goto done;
}
@@ -265,154 +178,86 @@ xfs_end_ioend(
/*
* Success: commit the COW or unwritten blocks if needed.
*/
- if (ioend->io_fork == XFS_COW_FORK)
+ if (ioend->io_flags & IOMAP_F_SHARED)
error = xfs_reflink_end_cow(ip, offset, size);
- else if (ioend->io_state == XFS_EXT_UNWRITTEN)
+ else if (ioend->io_type == IOMAP_UNWRITTEN)
error = xfs_iomap_write_unwritten(ip, offset, size, false);
else
- ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
+ ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_private);
done:
- if (ioend->io_append_trans)
+ if (ioend->io_private)
error = xfs_setfilesize_ioend(ioend, error);
- list_replace_init(&ioend->io_list, &ioend_list);
- xfs_destroy_ioend(ioend, error);
-
- while (!list_empty(&ioend_list)) {
- ioend = list_first_entry(&ioend_list, struct xfs_ioend,
- io_list);
- list_del_init(&ioend->io_list);
- xfs_destroy_ioend(ioend, error);
- }
-
+ iomap_finish_ioends(ioend, error);
memalloc_nofs_restore(nofs_flag);
}
/*
- * We can merge two adjacent ioends if they have the same set of work to do.
- */
-static bool
-xfs_ioend_can_merge(
- struct xfs_ioend *ioend,
- struct xfs_ioend *next)
-{
- if (ioend->io_bio->bi_status != next->io_bio->bi_status)
- return false;
- if ((ioend->io_fork == XFS_COW_FORK) ^ (next->io_fork == XFS_COW_FORK))
- return false;
- if ((ioend->io_state == XFS_EXT_UNWRITTEN) ^
- (next->io_state == XFS_EXT_UNWRITTEN))
- return false;
- if (ioend->io_offset + ioend->io_size != next->io_offset)
- return false;
- return true;
-}
-
-/*
* If the to be merged ioend has a preallocated transaction for file
* size updates we need to ensure the ioend it is merged into also
* has one. If it already has one we can simply cancel the transaction
* as it is guaranteed to be clean.
*/
static void
-xfs_ioend_merge_append_transactions(
- struct xfs_ioend *ioend,
- struct xfs_ioend *next)
+xfs_ioend_merge_private(
+ struct iomap_ioend *ioend,
+ struct iomap_ioend *next)
{
- if (!ioend->io_append_trans) {
- ioend->io_append_trans = next->io_append_trans;
- next->io_append_trans = NULL;
+ if (!ioend->io_private) {
+ ioend->io_private = next->io_private;
+ next->io_private = NULL;
} else {
xfs_setfilesize_ioend(next, -ECANCELED);
}
}
-/* Try to merge adjacent completions. */
-STATIC void
-xfs_ioend_try_merge(
- struct xfs_ioend *ioend,
- struct list_head *more_ioends)
-{
- struct xfs_ioend *next_ioend;
-
- while (!list_empty(more_ioends)) {
- next_ioend = list_first_entry(more_ioends, struct xfs_ioend,
- io_list);
- if (!xfs_ioend_can_merge(ioend, next_ioend))
- break;
- list_move_tail(&next_ioend->io_list, &ioend->io_list);
- ioend->io_size += next_ioend->io_size;
- if (next_ioend->io_append_trans)
- xfs_ioend_merge_append_transactions(ioend, next_ioend);
- }
-}
-
-/* list_sort compare function for ioends */
-static int
-xfs_ioend_compare(
- void *priv,
- struct list_head *a,
- struct list_head *b)
-{
- struct xfs_ioend *ia;
- struct xfs_ioend *ib;
-
- ia = container_of(a, struct xfs_ioend, io_list);
- ib = container_of(b, struct xfs_ioend, io_list);
- if (ia->io_offset < ib->io_offset)
- return -1;
- else if (ia->io_offset > ib->io_offset)
- return 1;
- return 0;
-}
-
/* Finish all pending io completions. */
void
xfs_end_io(
struct work_struct *work)
{
- struct xfs_inode *ip;
- struct xfs_ioend *ioend;
- struct list_head completion_list;
+ struct xfs_inode *ip =
+ container_of(work, struct xfs_inode, i_ioend_work);
+ struct iomap_ioend *ioend;
+ struct list_head tmp;
unsigned long flags;
- ip = container_of(work, struct xfs_inode, i_ioend_work);
-
spin_lock_irqsave(&ip->i_ioend_lock, flags);
- list_replace_init(&ip->i_ioend_list, &completion_list);
+ list_replace_init(&ip->i_ioend_list, &tmp);
spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
- list_sort(NULL, &completion_list, xfs_ioend_compare);
-
- while (!list_empty(&completion_list)) {
- ioend = list_first_entry(&completion_list, struct xfs_ioend,
- io_list);
+ iomap_sort_ioends(&tmp);
+ while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
+ io_list))) {
list_del_init(&ioend->io_list);
- xfs_ioend_try_merge(ioend, &completion_list);
+ iomap_ioend_try_merge(ioend, &tmp, xfs_ioend_merge_private);
xfs_end_ioend(ioend);
}
}
+static inline bool xfs_ioend_needs_workqueue(struct iomap_ioend *ioend)
+{
+ return ioend->io_private ||
+ ioend->io_type == IOMAP_UNWRITTEN ||
+ (ioend->io_flags & IOMAP_F_SHARED);
+}
+
STATIC void
xfs_end_bio(
struct bio *bio)
{
- struct xfs_ioend *ioend = bio->bi_private;
+ struct iomap_ioend *ioend = bio->bi_private;
struct xfs_inode *ip = XFS_I(ioend->io_inode);
- struct xfs_mount *mp = ip->i_mount;
unsigned long flags;
- if (ioend->io_fork == XFS_COW_FORK ||
- ioend->io_state == XFS_EXT_UNWRITTEN ||
- ioend->io_append_trans != NULL) {
- spin_lock_irqsave(&ip->i_ioend_lock, flags);
- if (list_empty(&ip->i_ioend_list))
- WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
- &ip->i_ioend_work));
- list_add_tail(&ioend->io_list, &ip->i_ioend_list);
- spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
- } else
- xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
+ ASSERT(xfs_ioend_needs_workqueue(ioend));
+
+ spin_lock_irqsave(&ip->i_ioend_lock, flags);
+ if (list_empty(&ip->i_ioend_list))
+ WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
+ &ip->i_ioend_work));
+ list_add_tail(&ioend->io_list, &ip->i_ioend_list);
+ spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
}
/*
@@ -421,19 +266,19 @@ xfs_end_bio(
*/
static bool
xfs_imap_valid(
- struct xfs_writepage_ctx *wpc,
+ struct iomap_writepage_ctx *wpc,
struct xfs_inode *ip,
- xfs_fileoff_t offset_fsb)
+ loff_t offset)
{
- if (offset_fsb < wpc->imap.br_startoff ||
- offset_fsb >= wpc->imap.br_startoff + wpc->imap.br_blockcount)
+ if (offset < wpc->iomap.offset ||
+ offset >= wpc->iomap.offset + wpc->iomap.length)
return false;
/*
* If this is a COW mapping, it is sufficient to check that the mapping
* covers the offset. Be careful to check this first because the caller
* can revalidate a COW mapping without updating the data seqno.
*/
- if (wpc->fork == XFS_COW_FORK)
+ if (wpc->iomap.flags & IOMAP_F_SHARED)
return true;
/*
@@ -443,17 +288,17 @@ xfs_imap_valid(
* checked (and found nothing at this offset) could have added
* overlapping blocks.
*/
- if (wpc->data_seq != READ_ONCE(ip->i_df.if_seq))
+ if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq))
return false;
if (xfs_inode_has_cow_data(ip) &&
- wpc->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
+ XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
return false;
return true;
}
/*
* Pass in a dellalloc extent and convert it to real extents, return the real
- * extent that maps offset_fsb in wpc->imap.
+ * extent that maps offset_fsb in wpc->iomap.
*
* The current page is held locked so nothing could have removed the block
* backing offset_fsb, although it could have moved from the COW to the data
@@ -461,32 +306,38 @@ xfs_imap_valid(
*/
static int
xfs_convert_blocks(
- struct xfs_writepage_ctx *wpc,
+ struct iomap_writepage_ctx *wpc,
struct xfs_inode *ip,
- xfs_fileoff_t offset_fsb)
+ int whichfork,
+ loff_t offset)
{
int error;
+ unsigned *seq;
+
+ if (whichfork == XFS_COW_FORK)
+ seq = &XFS_WPC(wpc)->cow_seq;
+ else
+ seq = &XFS_WPC(wpc)->data_seq;
/*
- * Attempt to allocate whatever delalloc extent currently backs
- * offset_fsb and put the result into wpc->imap. Allocate in a loop
- * because it may take several attempts to allocate real blocks for a
- * contiguous delalloc extent if free space is sufficiently fragmented.
+ * Attempt to allocate whatever delalloc extent currently backs offset
+ * and put the result into wpc->iomap. Allocate in a loop because it
+ * may take several attempts to allocate real blocks for a contiguous
+ * delalloc extent if free space is sufficiently fragmented.
*/
do {
- error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset_fsb,
- &wpc->imap, wpc->fork == XFS_COW_FORK ?
- &wpc->cow_seq : &wpc->data_seq);
+ error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
+ &wpc->iomap, seq);
if (error)
return error;
- } while (wpc->imap.br_startoff + wpc->imap.br_blockcount <= offset_fsb);
+ } while (wpc->iomap.offset + wpc->iomap.length <= offset);
return 0;
}
-STATIC int
+static int
xfs_map_blocks(
- struct xfs_writepage_ctx *wpc,
+ struct iomap_writepage_ctx *wpc,
struct inode *inode,
loff_t offset)
{
@@ -496,6 +347,7 @@ xfs_map_blocks(
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
xfs_fileoff_t cow_fsb = NULLFILEOFF;
+ int whichfork = XFS_DATA_FORK;
struct xfs_bmbt_irec imap;
struct xfs_iext_cursor icur;
int retries = 0;
@@ -519,7 +371,7 @@ xfs_map_blocks(
* against concurrent updates and provides a memory barrier on the way
* out that ensures that we always see the current value.
*/
- if (xfs_imap_valid(wpc, ip, offset_fsb))
+ if (xfs_imap_valid(wpc, ip, offset))
return 0;
/*
@@ -541,10 +393,10 @@ retry:
xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
cow_fsb = imap.br_startoff;
if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
- wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
+ XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- wpc->fork = XFS_COW_FORK;
+ whichfork = XFS_COW_FORK;
goto allocate_blocks;
}
@@ -552,7 +404,7 @@ retry:
* No COW extent overlap. Revalidate now that we may have updated
* ->cow_seq. If the data mapping is still valid, we're done.
*/
- if (xfs_imap_valid(wpc, ip, offset_fsb)) {
+ if (xfs_imap_valid(wpc, ip, offset)) {
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return 0;
}
@@ -564,11 +416,9 @@ retry:
*/
if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
imap.br_startoff = end_fsb; /* fake a hole past EOF */
- wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
+ XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- wpc->fork = XFS_DATA_FORK;
-
/* landed in a hole or beyond EOF? */
if (imap.br_startoff > offset_fsb) {
imap.br_blockcount = imap.br_startoff - offset_fsb;
@@ -592,11 +442,11 @@ retry:
isnullstartblock(imap.br_startblock))
goto allocate_blocks;
- wpc->imap = imap;
- trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
+ xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
+ trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
return 0;
allocate_blocks:
- error = xfs_convert_blocks(wpc, ip, offset_fsb);
+ error = xfs_convert_blocks(wpc, ip, whichfork, offset);
if (error) {
/*
* If we failed to find the extent in the COW fork we might have
@@ -605,7 +455,7 @@ allocate_blocks:
* the former case, but prevent additional retries to avoid
* looping forever for the latter case.
*/
- if (error == -EAGAIN && wpc->fork == XFS_COW_FORK && !retries++)
+ if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
goto retry;
ASSERT(error != -EAGAIN);
return error;
@@ -616,34 +466,22 @@ allocate_blocks:
* original delalloc one. Trim the return extent to the next COW
* boundary again to force a re-lookup.
*/
- if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF &&
- cow_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount)
- wpc->imap.br_blockcount = cow_fsb - wpc->imap.br_startoff;
+ if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
+ loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
+
+ if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
+ wpc->iomap.length = cow_offset - wpc->iomap.offset;
+ }
- ASSERT(wpc->imap.br_startoff <= offset_fsb);
- ASSERT(wpc->imap.br_startoff + wpc->imap.br_blockcount > offset_fsb);
- trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
+ ASSERT(wpc->iomap.offset <= offset);
+ ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
+ trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
return 0;
}
-/*
- * Submit the bio for an ioend. We are passed an ioend with a bio attached to
- * it, and we submit that bio. The ioend may be used for multiple bio
- * submissions, so we only want to allocate an append transaction for the ioend
- * once. In the case of multiple bio submission, each bio will take an IO
- * reference to the ioend to ensure that the ioend completion is only done once
- * all bios have been submitted and the ioend is really done.
- *
- * If @status is non-zero, it means that we have a situation where some part of
- * the submission process has failed after we have marked paged for writeback
- * and unlocked them. In this situation, we need to fail the bio and ioend
- * rather than submit it to IO. This typically only happens on a filesystem
- * shutdown.
- */
-STATIC int
-xfs_submit_ioend(
- struct writeback_control *wbc,
- struct xfs_ioend *ioend,
+static int
+xfs_prepare_ioend(
+ struct iomap_ioend *ioend,
int status)
{
unsigned int nofs_flag;
@@ -656,157 +494,24 @@ xfs_submit_ioend(
nofs_flag = memalloc_nofs_save();
/* Convert CoW extents to regular */
- if (!status && ioend->io_fork == XFS_COW_FORK) {
+ if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
ioend->io_offset, ioend->io_size);
}
/* Reserve log space if we might write beyond the on-disk inode size. */
if (!status &&
- (ioend->io_fork == XFS_COW_FORK ||
- ioend->io_state != XFS_EXT_UNWRITTEN) &&
+ ((ioend->io_flags & IOMAP_F_SHARED) ||
+ ioend->io_type != IOMAP_UNWRITTEN) &&
xfs_ioend_is_append(ioend) &&
- !ioend->io_append_trans)
+ !ioend->io_private)
status = xfs_setfilesize_trans_alloc(ioend);
memalloc_nofs_restore(nofs_flag);
- ioend->io_bio->bi_private = ioend;
- ioend->io_bio->bi_end_io = xfs_end_bio;
-
- /*
- * If we are failing the IO now, just mark the ioend with an
- * error and finish it. This will run IO completion immediately
- * as there is only one reference to the ioend at this point in
- * time.
- */
- if (status) {
- ioend->io_bio->bi_status = errno_to_blk_status(status);
- bio_endio(ioend->io_bio);
- return status;
- }
-
- submit_bio(ioend->io_bio);
- return 0;
-}
-
-static struct xfs_ioend *
-xfs_alloc_ioend(
- struct inode *inode,
- int fork,
- xfs_exntst_t state,
- xfs_off_t offset,
- struct block_device *bdev,
- sector_t sector,
- struct writeback_control *wbc)
-{
- struct xfs_ioend *ioend;
- struct bio *bio;
-
- bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
- bio_set_dev(bio, bdev);
- bio->bi_iter.bi_sector = sector;
- bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
- bio->bi_write_hint = inode->i_write_hint;
- wbc_init_bio(wbc, bio);
-
- ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
- INIT_LIST_HEAD(&ioend->io_list);
- ioend->io_fork = fork;
- ioend->io_state = state;
- ioend->io_inode = inode;
- ioend->io_size = 0;
- ioend->io_offset = offset;
- ioend->io_append_trans = NULL;
- ioend->io_bio = bio;
- return ioend;
-}
-
-/*
- * Allocate a new bio, and chain the old bio to the new one.
- *
- * Note that we have to do perform the chaining in this unintuitive order
- * so that the bi_private linkage is set up in the right direction for the
- * traversal in xfs_destroy_ioend().
- */
-static struct bio *
-xfs_chain_bio(
- struct bio *prev)
-{
- struct bio *new;
-
- new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
- bio_copy_dev(new, prev);/* also copies over blkcg information */
- new->bi_iter.bi_sector = bio_end_sector(prev);
- new->bi_opf = prev->bi_opf;
- new->bi_write_hint = prev->bi_write_hint;
-
- bio_chain(prev, new);
- bio_get(prev); /* for xfs_destroy_ioend */
- submit_bio(prev);
- return new;
-}
-
-/*
- * Test to see if we have an existing ioend structure that we could append to
- * first, otherwise finish off the current ioend and start another.
- */
-STATIC void
-xfs_add_to_ioend(
- struct inode *inode,
- xfs_off_t offset,
- struct page *page,
- struct iomap_page *iop,
- struct xfs_writepage_ctx *wpc,
- struct writeback_control *wbc,
- struct list_head *iolist)
-{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
- struct block_device *bdev = xfs_find_bdev_for_inode(inode);
- unsigned len = i_blocksize(inode);
- unsigned poff = offset & (PAGE_SIZE - 1);
- bool merged, same_page = false;
- sector_t sector;
-
- sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
- ((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
-
- if (!wpc->ioend ||
- wpc->fork != wpc->ioend->io_fork ||
- wpc->imap.br_state != wpc->ioend->io_state ||
- sector != bio_end_sector(wpc->ioend->io_bio) ||
- offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
- if (wpc->ioend)
- list_add(&wpc->ioend->io_list, iolist);
- wpc->ioend = xfs_alloc_ioend(inode, wpc->fork,
- wpc->imap.br_state, offset, bdev, sector, wbc);
- }
-
- merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
- &same_page);
-
- if (iop && !same_page)
- atomic_inc(&iop->write_count);
-
- if (!merged) {
- if (bio_full(wpc->ioend->io_bio, len))
- wpc->ioend->io_bio = xfs_chain_bio(wpc->ioend->io_bio);
- bio_add_page(wpc->ioend->io_bio, page, len, poff);
- }
-
- wpc->ioend->io_size += len;
- wbc_account_cgroup_owner(wbc, page, len);
-}
-
-STATIC void
-xfs_vm_invalidatepage(
- struct page *page,
- unsigned int offset,
- unsigned int length)
-{
- trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
- iomap_invalidatepage(page, offset, length);
+ if (xfs_ioend_needs_workqueue(ioend))
+ ioend->io_bio->bi_end_io = xfs_end_bio;
+ return status;
}
/*
@@ -820,8 +525,8 @@ xfs_vm_invalidatepage(
* transaction as there is no space left for block reservation (typically why we
* see a ENOSPC in writeback).
*/
-STATIC void
-xfs_aops_discard_page(
+static void
+xfs_discard_page(
struct page *page)
{
struct inode *inode = page->mapping->host;
@@ -843,246 +548,14 @@ xfs_aops_discard_page(
if (error && !XFS_FORCED_SHUTDOWN(mp))
xfs_alert(mp, "page discard unable to remove delalloc mapping.");
out_invalidate:
- xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
-}
-
-/*
- * We implement an immediate ioend submission policy here to avoid needing to
- * chain multiple ioends and hence nest mempool allocations which can violate
- * forward progress guarantees we need to provide. The current ioend we are
- * adding blocks to is cached on the writepage context, and if the new block
- * does not append to the cached ioend it will create a new ioend and cache that
- * instead.
- *
- * If a new ioend is created and cached, the old ioend is returned and queued
- * locally for submission once the entire page is processed or an error has been
- * detected. While ioends are submitted immediately after they are completed,
- * batching optimisations are provided by higher level block plugging.
- *
- * At the end of a writeback pass, there will be a cached ioend remaining on the
- * writepage context that the caller will need to submit.
- */
-static int
-xfs_writepage_map(
- struct xfs_writepage_ctx *wpc,
- struct writeback_control *wbc,
- struct inode *inode,
- struct page *page,
- uint64_t end_offset)
-{
- LIST_HEAD(submit_list);
- struct iomap_page *iop = to_iomap_page(page);
- unsigned len = i_blocksize(inode);
- struct xfs_ioend *ioend, *next;
- uint64_t file_offset; /* file offset of page */
- int error = 0, count = 0, i;
-
- ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
- ASSERT(!iop || atomic_read(&iop->write_count) == 0);
-
- /*
- * Walk through the page to find areas to write back. If we run off the
- * end of the current map or find the current map invalid, grab a new
- * one.
- */
- for (i = 0, file_offset = page_offset(page);
- i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
- i++, file_offset += len) {
- if (iop && !test_bit(i, iop->uptodate))
- continue;
-
- error = xfs_map_blocks(wpc, inode, file_offset);
- if (error)
- break;
- if (wpc->imap.br_startblock == HOLESTARTBLOCK)
- continue;
- xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
- &submit_list);
- count++;
- }
-
- ASSERT(wpc->ioend || list_empty(&submit_list));
- ASSERT(PageLocked(page));
- ASSERT(!PageWriteback(page));
-
- /*
- * On error, we have to fail the ioend here because we may have set
- * pages under writeback, we have to make sure we run IO completion to
- * mark the error state of the IO appropriately, so we can't cancel the
- * ioend directly here. That means we have to mark this page as under
- * writeback if we included any blocks from it in the ioend chain so
- * that completion treats it correctly.
- *
- * If we didn't include the page in the ioend, the on error we can
- * simply discard and unlock it as there are no other users of the page
- * now. The caller will still need to trigger submission of outstanding
- * ioends on the writepage context so they are treated correctly on
- * error.
- */
- if (unlikely(error)) {
- if (!count) {
- xfs_aops_discard_page(page);
- ClearPageUptodate(page);
- unlock_page(page);
- goto done;
- }
-
- /*
- * If the page was not fully cleaned, we need to ensure that the
- * higher layers come back to it correctly. That means we need
- * to keep the page dirty, and for WB_SYNC_ALL writeback we need
- * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
- * so another attempt to write this page in this writeback sweep
- * will be made.
- */
- set_page_writeback_keepwrite(page);
- } else {
- clear_page_dirty_for_io(page);
- set_page_writeback(page);
- }
-
- unlock_page(page);
-
- /*
- * Preserve the original error if there was one, otherwise catch
- * submission errors here and propagate into subsequent ioend
- * submissions.
- */
- list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
- int error2;
-
- list_del_init(&ioend->io_list);
- error2 = xfs_submit_ioend(wbc, ioend, error);
- if (error2 && !error)
- error = error2;
- }
-
- /*
- * We can end up here with no error and nothing to write only if we race
- * with a partial page truncate on a sub-page block sized filesystem.
- */
- if (!count)
- end_page_writeback(page);
-done:
- mapping_set_error(page->mapping, error);
- return error;
+ iomap_invalidatepage(page, 0, PAGE_SIZE);
}
-/*
- * Write out a dirty page.
- *
- * For delalloc space on the page we need to allocate space and flush it.
- * For unwritten space on the page we need to start the conversion to
- * regular allocated space.
- */
-STATIC int
-xfs_do_writepage(
- struct page *page,
- struct writeback_control *wbc,
- void *data)
-{
- struct xfs_writepage_ctx *wpc = data;
- struct inode *inode = page->mapping->host;
- loff_t offset;
- uint64_t end_offset;
- pgoff_t end_index;
-
- trace_xfs_writepage(inode, page, 0, 0);
-
- /*
- * Refuse to write the page out if we are called from reclaim context.
- *
- * This avoids stack overflows when called from deeply used stacks in
- * random callers for direct reclaim or memcg reclaim. We explicitly
- * allow reclaim from kswapd as the stack usage there is relatively low.
- *
- * This should never happen except in the case of a VM regression so
- * warn about it.
- */
- if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
- PF_MEMALLOC))
- goto redirty;
-
- /*
- * Given that we do not allow direct reclaim to call us, we should
- * never be called while in a filesystem transaction.
- */
- if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
- goto redirty;
-
- /*
- * Is this page beyond the end of the file?
- *
- * The page index is less than the end_index, adjust the end_offset
- * to the highest offset that this page should represent.
- * -----------------------------------------------------
- * | file mapping | <EOF> |
- * -----------------------------------------------------
- * | Page ... | Page N-2 | Page N-1 | Page N | |
- * ^--------------------------------^----------|--------
- * | desired writeback range | see else |
- * ---------------------------------^------------------|
- */
- offset = i_size_read(inode);
- end_index = offset >> PAGE_SHIFT;
- if (page->index < end_index)
- end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
- else {
- /*
- * Check whether the page to write out is beyond or straddles
- * i_size or not.
- * -------------------------------------------------------
- * | file mapping | <EOF> |
- * -------------------------------------------------------
- * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
- * ^--------------------------------^-----------|---------
- * | | Straddles |
- * ---------------------------------^-----------|--------|
- */
- unsigned offset_into_page = offset & (PAGE_SIZE - 1);
-
- /*
- * Skip the page if it is fully outside i_size, e.g. due to a
- * truncate operation that is in progress. We must redirty the
- * page so that reclaim stops reclaiming it. Otherwise
- * xfs_vm_releasepage() is called on it and gets confused.
- *
- * Note that the end_index is unsigned long, it would overflow
- * if the given offset is greater than 16TB on 32-bit system
- * and if we do check the page is fully outside i_size or not
- * via "if (page->index >= end_index + 1)" as "end_index + 1"
- * will be evaluated to 0. Hence this page will be redirtied
- * and be written out repeatedly which would result in an
- * infinite loop, the user program that perform this operation
- * will hang. Instead, we can verify this situation by checking
- * if the page to write is totally beyond the i_size or if it's
- * offset is just equal to the EOF.
- */
- if (page->index > end_index ||
- (page->index == end_index && offset_into_page == 0))
- goto redirty;
-
- /*
- * The page straddles i_size. It must be zeroed out on each
- * and every writepage invocation because it may be mmapped.
- * "A file is mapped in multiples of the page size. For a file
- * that is not a multiple of the page size, the remaining
- * memory is zeroed when mapped, and writes to that region are
- * not written out to the file."
- */
- zero_user_segment(page, offset_into_page, PAGE_SIZE);
-
- /* Adjust the end_offset to the end of file */
- end_offset = offset;
- }
-
- return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
-
-redirty:
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return 0;
-}
+static const struct iomap_writeback_ops xfs_writeback_ops = {
+ .map_blocks = xfs_map_blocks,
+ .prepare_ioend = xfs_prepare_ioend,
+ .discard_page = xfs_discard_page,
+};
STATIC int
xfs_vm_writepage(
@@ -1090,12 +563,8 @@ xfs_vm_writepage(
struct writeback_control *wbc)
{
struct xfs_writepage_ctx wpc = { };
- int ret;
- ret = xfs_do_writepage(page, wbc, &wpc);
- if (wpc.ioend)
- ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
- return ret;
+ return iomap_writepage(page, wbc, &wpc.ctx, &xfs_writeback_ops);
}
STATIC int
@@ -1104,13 +573,9 @@ xfs_vm_writepages(
struct writeback_control *wbc)
{
struct xfs_writepage_ctx wpc = { };
- int ret;
xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
- ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
- if (wpc.ioend)
- ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
- return ret;
+ return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
}
STATIC int
@@ -1118,18 +583,11 @@ xfs_dax_writepages(
struct address_space *mapping,
struct writeback_control *wbc)
{
- xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
- return dax_writeback_mapping_range(mapping,
- xfs_find_bdev_for_inode(mapping->host), wbc);
-}
+ struct xfs_inode *ip = XFS_I(mapping->host);
-STATIC int
-xfs_vm_releasepage(
- struct page *page,
- gfp_t gfp_mask)
-{
- trace_xfs_releasepage(page->mapping->host, page, 0, 0);
- return iomap_releasepage(page, gfp_mask);
+ xfs_iflags_clear(ip, XFS_ITRUNCATED);
+ return dax_writeback_mapping_range(mapping,
+ xfs_inode_buftarg(ip)->bt_bdev, wbc);
}
STATIC sector_t
@@ -1152,7 +610,7 @@ xfs_vm_bmap(
*/
if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
return 0;
- return iomap_bmap(mapping, block, &xfs_iomap_ops);
+ return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
}
STATIC int
@@ -1160,8 +618,7 @@ xfs_vm_readpage(
struct file *unused,
struct page *page)
{
- trace_xfs_vm_readpage(page->mapping->host, 1);
- return iomap_readpage(page, &xfs_iomap_ops);
+ return iomap_readpage(page, &xfs_read_iomap_ops);
}
STATIC int
@@ -1171,8 +628,7 @@ xfs_vm_readpages(
struct list_head *pages,
unsigned nr_pages)
{
- trace_xfs_vm_readpages(mapping->host, nr_pages);
- return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
+ return iomap_readpages(mapping, pages, nr_pages, &xfs_read_iomap_ops);
}
static int
@@ -1181,8 +637,9 @@ xfs_iomap_swapfile_activate(
struct file *swap_file,
sector_t *span)
{
- sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
- return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops);
+ sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev;
+ return iomap_swapfile_activate(sis, swap_file, span,
+ &xfs_read_iomap_ops);
}
const struct address_space_operations xfs_address_space_operations = {
@@ -1191,8 +648,8 @@ const struct address_space_operations xfs_address_space_operations = {
.writepage = xfs_vm_writepage,
.writepages = xfs_vm_writepages,
.set_page_dirty = iomap_set_page_dirty,
- .releasepage = xfs_vm_releasepage,
- .invalidatepage = xfs_vm_invalidatepage,
+ .releasepage = iomap_releasepage,
+ .invalidatepage = iomap_invalidatepage,
.bmap = xfs_vm_bmap,
.direct_IO = noop_direct_IO,
.migratepage = iomap_migrate_page,
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 45a1ea240cbb..e0bd68419764 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -6,29 +6,9 @@
#ifndef __XFS_AOPS_H__
#define __XFS_AOPS_H__
-extern struct bio_set xfs_ioend_bioset;
-
-/*
- * Structure for buffered I/O completions.
- */
-struct xfs_ioend {
- struct list_head io_list; /* next ioend in chain */
- int io_fork; /* inode fork written back */
- xfs_exntst_t io_state; /* extent state */
- struct inode *io_inode; /* file being written to */
- size_t io_size; /* size of the extent */
- xfs_off_t io_offset; /* offset in the file */
- struct xfs_trans *io_append_trans;/* xact. for size update */
- struct bio *io_bio; /* bio being built */
- struct bio io_inline_bio; /* MUST BE LAST! */
-};
-
extern const struct address_space_operations xfs_address_space_operations;
extern const struct address_space_operations xfs_dax_aops;
int xfs_setfilesize(struct xfs_inode *ip, xfs_off_t offset, size_t size);
-extern struct block_device *xfs_find_bdev_for_inode(struct inode *);
-extern struct dax_device *xfs_find_daxdev_for_inode(struct inode *);
-
#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index dc93c51c17de..5ff49523d8ea 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -22,6 +22,7 @@
#include "xfs_attr_leaf.h"
#include "xfs_quota.h"
#include "xfs_dir2.h"
+#include "xfs_error.h"
/*
* Look at all the extents for this logical region,
@@ -147,7 +148,7 @@ xfs_attr3_leaf_inactive(
* Allocate storage for a list of all the "remote" value extents.
*/
size = count * sizeof(xfs_attr_inactive_list_t);
- list = kmem_alloc(size, KM_SLEEP);
+ list = kmem_alloc(size, 0);
/*
* Identify each of the "remote" value extents.
@@ -190,37 +191,35 @@ xfs_attr3_leaf_inactive(
*/
STATIC int
xfs_attr3_node_inactive(
- struct xfs_trans **trans,
- struct xfs_inode *dp,
- struct xfs_buf *bp,
- int level)
+ struct xfs_trans **trans,
+ struct xfs_inode *dp,
+ struct xfs_buf *bp,
+ int level)
{
- xfs_da_blkinfo_t *info;
- xfs_da_intnode_t *node;
- xfs_dablk_t child_fsb;
- xfs_daddr_t parent_blkno, child_blkno;
- int error, i;
- struct xfs_buf *child_bp;
- struct xfs_da_node_entry *btree;
+ struct xfs_mount *mp = dp->i_mount;
+ struct xfs_da_blkinfo *info;
+ xfs_dablk_t child_fsb;
+ xfs_daddr_t parent_blkno, child_blkno;
+ struct xfs_buf *child_bp;
struct xfs_da3_icnode_hdr ichdr;
+ int error, i;
/*
* Since this code is recursive (gasp!) we must protect ourselves.
*/
if (level > XFS_DA_NODE_MAXDEPTH) {
xfs_trans_brelse(*trans, bp); /* no locks for later trans */
- return -EIO;
+ xfs_buf_corruption_error(bp);
+ return -EFSCORRUPTED;
}
- node = bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&ichdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &ichdr, bp->b_addr);
parent_blkno = bp->b_bn;
if (!ichdr.count) {
xfs_trans_brelse(*trans, bp);
return 0;
}
- btree = dp->d_ops->node_tree_p(node);
- child_fsb = be32_to_cpu(btree[0].before);
+ child_fsb = be32_to_cpu(ichdr.btree[0].before);
xfs_trans_brelse(*trans, bp); /* no locks for later trans */
/*
@@ -235,7 +234,7 @@ xfs_attr3_node_inactive(
* traversal of the tree so we may deal with many blocks
* before we come back to this one.
*/
- error = xfs_da3_node_read(*trans, dp, child_fsb, -1, &child_bp,
+ error = xfs_da3_node_read(*trans, dp, child_fsb, &child_bp,
XFS_ATTR_FORK);
if (error)
return error;
@@ -258,8 +257,9 @@ xfs_attr3_node_inactive(
error = xfs_attr3_leaf_inactive(trans, dp, child_bp);
break;
default:
- error = -EIO;
+ xfs_buf_corruption_error(child_bp);
xfs_trans_brelse(*trans, child_bp);
+ error = -EFSCORRUPTED;
break;
}
if (error)
@@ -268,10 +268,16 @@ xfs_attr3_node_inactive(
/*
* Remove the subsidiary block from the cache and from the log.
*/
- error = xfs_da_get_buf(*trans, dp, 0, child_blkno, &child_bp,
- XFS_ATTR_FORK);
- if (error)
+ child_bp = xfs_trans_get_buf(*trans, mp->m_ddev_targp,
+ child_blkno,
+ XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0);
+ if (!child_bp)
+ return -EIO;
+ error = bp->b_error;
+ if (error) {
+ xfs_trans_brelse(*trans, child_bp);
return error;
+ }
xfs_trans_binval(*trans, child_bp);
/*
@@ -279,13 +285,15 @@ xfs_attr3_node_inactive(
* child block number.
*/
if (i + 1 < ichdr.count) {
- error = xfs_da3_node_read(*trans, dp, 0, parent_blkno,
- &bp, XFS_ATTR_FORK);
+ struct xfs_da3_icnode_hdr phdr;
+
+ error = xfs_da3_node_read_mapped(*trans, dp,
+ parent_blkno, &bp, XFS_ATTR_FORK);
if (error)
return error;
- node = bp->b_addr;
- btree = dp->d_ops->node_tree_p(node);
- child_fsb = be32_to_cpu(btree[i + 1].before);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &phdr,
+ bp->b_addr);
+ child_fsb = be32_to_cpu(phdr.btree[i + 1].before);
xfs_trans_brelse(*trans, bp);
}
/*
@@ -310,6 +318,7 @@ xfs_attr3_root_inactive(
struct xfs_trans **trans,
struct xfs_inode *dp)
{
+ struct xfs_mount *mp = dp->i_mount;
struct xfs_da_blkinfo *info;
struct xfs_buf *bp;
xfs_daddr_t blkno;
@@ -321,7 +330,7 @@ xfs_attr3_root_inactive(
* the extents in reverse order the extent containing
* block 0 must still be there.
*/
- error = xfs_da3_node_read(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
+ error = xfs_da3_node_read(*trans, dp, 0, &bp, XFS_ATTR_FORK);
if (error)
return error;
blkno = bp->b_bn;
@@ -341,7 +350,8 @@ xfs_attr3_root_inactive(
error = xfs_attr3_leaf_inactive(trans, dp, bp);
break;
default:
- error = -EIO;
+ error = -EFSCORRUPTED;
+ xfs_buf_corruption_error(bp);
xfs_trans_brelse(*trans, bp);
break;
}
@@ -351,9 +361,15 @@ xfs_attr3_root_inactive(
/*
* Invalidate the incore copy of the root block.
*/
- error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
- if (error)
+ bp = xfs_trans_get_buf(*trans, mp->m_ddev_targp, blkno,
+ XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0);
+ if (!bp)
+ return -EIO;
+ error = bp->b_error;
+ if (error) {
+ xfs_trans_brelse(*trans, bp);
return error;
+ }
xfs_trans_binval(*trans, bp); /* remove from cache */
/*
* Commit the invalidate and start the next transaction.
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 58fc820a70c6..d37743bdf274 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -49,14 +49,16 @@ xfs_attr_shortform_compare(const void *a, const void *b)
* we can begin returning them to the user.
*/
static int
-xfs_attr_shortform_list(xfs_attr_list_context_t *context)
+xfs_attr_shortform_list(
+ struct xfs_attr_list_context *context)
{
- attrlist_cursor_kern_t *cursor;
- xfs_attr_sf_sort_t *sbuf, *sbp;
- xfs_attr_shortform_t *sf;
- xfs_attr_sf_entry_t *sfe;
- xfs_inode_t *dp;
- int sbsize, nsbuf, count, i;
+ struct attrlist_cursor_kern *cursor;
+ struct xfs_attr_sf_sort *sbuf, *sbp;
+ struct xfs_attr_shortform *sf;
+ struct xfs_attr_sf_entry *sfe;
+ struct xfs_inode *dp;
+ int sbsize, nsbuf, count, i;
+ int error = 0;
ASSERT(context != NULL);
dp = context->dp;
@@ -84,6 +86,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
(XFS_ISRESET_CURSOR(cursor) &&
(dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
+ if (XFS_IS_CORRUPT(context->dp->i_mount,
+ !xfs_attr_namecheck(sfe->nameval,
+ sfe->namelen)))
+ return -EFSCORRUPTED;
context->put_listent(context,
sfe->flags,
sfe->nameval,
@@ -109,7 +115,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
* It didn't all fit, so we have to sort everything on hashval.
*/
sbsize = sf->hdr.count * sizeof(*sbuf);
- sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
+ sbp = sbuf = kmem_alloc(sbsize, KM_NOFS);
/*
* Scan the attribute list for the rest of the entries, storing
@@ -161,10 +167,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
break;
}
}
- if (i == nsbuf) {
- kmem_free(sbuf);
- return 0;
- }
+ if (i == nsbuf)
+ goto out;
/*
* Loop putting entries into the user buffer.
@@ -174,6 +178,12 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
cursor->hashval = sbp->hash;
cursor->offset = 0;
}
+ if (XFS_IS_CORRUPT(context->dp->i_mount,
+ !xfs_attr_namecheck(sbp->name,
+ sbp->namelen))) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
context->put_listent(context,
sbp->flags,
sbp->name,
@@ -183,9 +193,9 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
break;
cursor->offset++;
}
-
+out:
kmem_free(sbuf);
- return 0;
+ return error;
}
/*
@@ -213,7 +223,7 @@ xfs_attr_node_list_lookup(
ASSERT(*pbp == NULL);
cursor->blkno = 0;
for (;;) {
- error = xfs_da3_node_read(tp, dp, cursor->blkno, -1, &bp,
+ error = xfs_da3_node_read(tp, dp, cursor->blkno, &bp,
XFS_ATTR_FORK);
if (error)
return error;
@@ -229,7 +239,7 @@ xfs_attr_node_list_lookup(
goto out_corruptbuf;
}
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ xfs_da3_node_hdr_from_disk(mp, &nodehdr, node);
/* Tree taller than we can handle; bail out! */
if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
@@ -243,7 +253,7 @@ xfs_attr_node_list_lookup(
else
expected_level--;
- btree = dp->d_ops->node_tree_p(node);
+ btree = nodehdr.btree;
for (i = 0; i < nodehdr.count; btree++, i++) {
if (cursor->hashval <= be32_to_cpu(btree->hashval)) {
cursor->blkno = be32_to_cpu(btree->before);
@@ -258,7 +268,7 @@ xfs_attr_node_list_lookup(
return 0;
/* We can't point back to the root. */
- if (cursor->blkno == 0)
+ if (XFS_IS_CORRUPT(mp, cursor->blkno == 0))
return -EFSCORRUPTED;
}
@@ -269,6 +279,7 @@ xfs_attr_node_list_lookup(
return 0;
out_corruptbuf:
+ xfs_buf_corruption_error(bp);
xfs_trans_brelse(tp, bp);
return -EFSCORRUPTED;
}
@@ -284,7 +295,7 @@ xfs_attr_node_list(
struct xfs_buf *bp;
struct xfs_inode *dp = context->dp;
struct xfs_mount *mp = dp->i_mount;
- int error;
+ int error = 0;
trace_xfs_attr_node_list(context);
@@ -298,8 +309,8 @@ xfs_attr_node_list(
*/
bp = NULL;
if (cursor->blkno > 0) {
- error = xfs_da3_node_read(context->tp, dp, cursor->blkno, -1,
- &bp, XFS_ATTR_FORK);
+ error = xfs_da3_node_read(context->tp, dp, cursor->blkno, &bp,
+ XFS_ATTR_FORK);
if ((error != 0) && (error != -EFSCORRUPTED))
return error;
if (bp) {
@@ -358,24 +369,27 @@ xfs_attr_node_list(
*/
for (;;) {
leaf = bp->b_addr;
- xfs_attr3_leaf_list_int(bp, context);
+ error = xfs_attr3_leaf_list_int(bp, context);
+ if (error)
+ break;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
if (context->seen_enough || leafhdr.forw == 0)
break;
cursor->blkno = leafhdr.forw;
xfs_trans_brelse(context->tp, bp);
- error = xfs_attr3_leaf_read(context->tp, dp, cursor->blkno, -1, &bp);
+ error = xfs_attr3_leaf_read(context->tp, dp, cursor->blkno,
+ &bp);
if (error)
return error;
}
xfs_trans_brelse(context->tp, bp);
- return 0;
+ return error;
}
/*
* Copy out attribute list entries for attr_list(), for leaf attribute lists.
*/
-void
+int
xfs_attr3_leaf_list_int(
struct xfs_buf *bp,
struct xfs_attr_list_context *context)
@@ -417,7 +431,7 @@ xfs_attr3_leaf_list_int(
}
if (i == ichdr.count) {
trace_xfs_attr_list_notfound(context);
- return;
+ return 0;
}
} else {
entry = &entries[0];
@@ -457,6 +471,9 @@ xfs_attr3_leaf_list_int(
valuelen = be32_to_cpu(name_rmt->valuelen);
}
+ if (XFS_IS_CORRUPT(context->dp->i_mount,
+ !xfs_attr_namecheck(name, namelen)))
+ return -EFSCORRUPTED;
context->put_listent(context, entry->flags,
name, namelen, valuelen);
if (context->seen_enough)
@@ -464,7 +481,7 @@ xfs_attr3_leaf_list_int(
cursor->offset++;
}
trace_xfs_attr_list_leaf_end(context);
- return;
+ return 0;
}
/*
@@ -479,13 +496,13 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context)
trace_xfs_attr_leaf_list(context);
context->cursor->blkno = 0;
- error = xfs_attr3_leaf_read(context->tp, context->dp, 0, -1, &bp);
+ error = xfs_attr3_leaf_read(context->tp, context->dp, 0, &bp);
if (error)
return error;
- xfs_attr3_leaf_list_int(bp, context);
+ error = xfs_attr3_leaf_list_int(bp, context);
xfs_trans_brelse(context->tp, bp);
- return 0;
+ return error;
}
int
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index 9fa4a7ee8cfc..ee6f4229cebc 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -21,7 +21,7 @@
#include "xfs_icache.h"
#include "xfs_bmap_btree.h"
#include "xfs_trans_space.h"
-
+#include "xfs_error.h"
kmem_zone_t *xfs_bui_zone;
kmem_zone_t *xfs_bud_zone;
@@ -35,7 +35,7 @@ void
xfs_bui_item_free(
struct xfs_bui_log_item *buip)
{
- kmem_zone_free(xfs_bui_zone, buip);
+ kmem_cache_free(xfs_bui_zone, buip);
}
/*
@@ -141,7 +141,7 @@ xfs_bui_init(
{
struct xfs_bui_log_item *buip;
- buip = kmem_zone_zalloc(xfs_bui_zone, KM_SLEEP);
+ buip = kmem_zone_zalloc(xfs_bui_zone, 0);
xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops);
buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS;
@@ -201,7 +201,7 @@ xfs_bud_item_release(
struct xfs_bud_log_item *budp = BUD_ITEM(lip);
xfs_bui_release(budp->bud_buip);
- kmem_zone_free(xfs_bud_zone, budp);
+ kmem_cache_free(xfs_bud_zone, budp);
}
static const struct xfs_item_ops xfs_bud_item_ops = {
@@ -218,7 +218,7 @@ xfs_trans_get_bud(
{
struct xfs_bud_log_item *budp;
- budp = kmem_zone_zalloc(xfs_bud_zone, KM_SLEEP);
+ budp = kmem_zone_zalloc(xfs_bud_zone, 0);
xfs_log_item_init(tp->t_mountp, &budp->bud_item, XFS_LI_BUD,
&xfs_bud_item_ops);
budp->bud_buip = buip;
@@ -456,7 +456,7 @@ xfs_bui_recover(
if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
xfs_bui_release(buip);
- return -EIO;
+ return -EFSCORRUPTED;
}
/*
@@ -490,7 +490,7 @@ xfs_bui_recover(
*/
set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
xfs_bui_release(buip);
- return -EIO;
+ return -EFSCORRUPTED;
}
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
@@ -525,6 +525,7 @@ xfs_bui_recover(
type = bui_type;
break;
default:
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto err_inode;
}
@@ -542,9 +543,7 @@ xfs_bui_recover(
irec.br_blockcount = count;
irec.br_startoff = bmap->me_startoff;
irec.br_state = state;
- error = xfs_bmap_unmap_extent(tp, ip, &irec);
- if (error)
- goto err_inode;
+ xfs_bmap_unmap_extent(tp, ip, &irec);
}
set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 98c6a7a71427..2efd78a9719e 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -39,9 +39,9 @@
xfs_daddr_t
xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
{
- return (XFS_IS_REALTIME_INODE(ip) ? \
- (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
- XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
+ if (XFS_IS_REALTIME_INODE(ip))
+ return XFS_FSB_TO_BB(ip->i_mount, fsb);
+ return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
}
/*
@@ -53,15 +53,16 @@ xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
*/
int
xfs_zero_extent(
- struct xfs_inode *ip,
- xfs_fsblock_t start_fsb,
- xfs_off_t count_fsb)
+ struct xfs_inode *ip,
+ xfs_fsblock_t start_fsb,
+ xfs_off_t count_fsb)
{
- struct xfs_mount *mp = ip->i_mount;
- xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
- sector_t block = XFS_BB_TO_FSBT(mp, sector);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
+ xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
+ sector_t block = XFS_BB_TO_FSBT(mp, sector);
- return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
+ return blkdev_issue_zeroout(target->bt_bdev,
block << (mp->m_super->s_blocksize_bits - 9),
count_fsb << (mp->m_super->s_blocksize_bits - 9),
GFP_NOFS, 0);
@@ -164,13 +165,6 @@ xfs_bmap_rtalloc(
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
-
- /* Zero the extent if we were asked to do so */
- if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
- error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
- if (error)
- return error;
- }
} else {
ap->length = 0;
}
@@ -179,29 +173,6 @@ xfs_bmap_rtalloc(
#endif /* CONFIG_XFS_RT */
/*
- * Check if the endoff is outside the last extent. If so the caller will grow
- * the allocation to a stripe unit boundary. All offsets are considered outside
- * the end of file for an empty fork, so 1 is returned in *eof in that case.
- */
-int
-xfs_bmap_eof(
- struct xfs_inode *ip,
- xfs_fileoff_t endoff,
- int whichfork,
- int *eof)
-{
- struct xfs_bmbt_irec rec;
- int error;
-
- error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
- if (error || *eof)
- return error;
-
- *eof = endoff >= rec.br_startoff + rec.br_blockcount;
- return 0;
-}
-
-/*
* Extent tree block counting routines.
*/
@@ -229,106 +200,6 @@ xfs_bmap_count_leaves(
}
/*
- * Count leaf blocks given a range of extent records originally
- * in btree format.
- */
-STATIC void
-xfs_bmap_disk_count_leaves(
- struct xfs_mount *mp,
- struct xfs_btree_block *block,
- int numrecs,
- xfs_filblks_t *count)
-{
- int b;
- xfs_bmbt_rec_t *frp;
-
- for (b = 1; b <= numrecs; b++) {
- frp = XFS_BMBT_REC_ADDR(mp, block, b);
- *count += xfs_bmbt_disk_get_blockcount(frp);
- }
-}
-
-/*
- * Recursively walks each level of a btree
- * to count total fsblocks in use.
- */
-STATIC int
-xfs_bmap_count_tree(
- struct xfs_mount *mp,
- struct xfs_trans *tp,
- struct xfs_ifork *ifp,
- xfs_fsblock_t blockno,
- int levelin,
- xfs_extnum_t *nextents,
- xfs_filblks_t *count)
-{
- int error;
- struct xfs_buf *bp, *nbp;
- int level = levelin;
- __be64 *pp;
- xfs_fsblock_t bno = blockno;
- xfs_fsblock_t nextbno;
- struct xfs_btree_block *block, *nextblock;
- int numrecs;
-
- error = xfs_btree_read_bufl(mp, tp, bno, &bp, XFS_BMAP_BTREE_REF,
- &xfs_bmbt_buf_ops);
- if (error)
- return error;
- *count += 1;
- block = XFS_BUF_TO_BLOCK(bp);
-
- if (--level) {
- /* Not at node above leaves, count this level of nodes */
- nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
- while (nextbno != NULLFSBLOCK) {
- error = xfs_btree_read_bufl(mp, tp, nextbno, &nbp,
- XFS_BMAP_BTREE_REF,
- &xfs_bmbt_buf_ops);
- if (error)
- return error;
- *count += 1;
- nextblock = XFS_BUF_TO_BLOCK(nbp);
- nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
- xfs_trans_brelse(tp, nbp);
- }
-
- /* Dive to the next level */
- pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
- bno = be64_to_cpu(*pp);
- error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
- count);
- if (error) {
- xfs_trans_brelse(tp, bp);
- XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
- XFS_ERRLEVEL_LOW, mp);
- return -EFSCORRUPTED;
- }
- xfs_trans_brelse(tp, bp);
- } else {
- /* count all level 1 nodes and their leaves */
- for (;;) {
- nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
- numrecs = be16_to_cpu(block->bb_numrecs);
- (*nextents) += numrecs;
- xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
- xfs_trans_brelse(tp, bp);
- if (nextbno == NULLFSBLOCK)
- break;
- bno = nextbno;
- error = xfs_btree_read_bufl(mp, tp, bno, &bp,
- XFS_BMAP_BTREE_REF,
- &xfs_bmbt_buf_ops);
- if (error)
- return error;
- *count += 1;
- block = XFS_BUF_TO_BLOCK(bp);
- }
- }
- return 0;
-}
-
-/*
* Count fsblocks of the given fork. Delayed allocation extents are
* not counted towards the totals.
*/
@@ -340,26 +211,19 @@ xfs_bmap_count_blocks(
xfs_extnum_t *nextents,
xfs_filblks_t *count)
{
- struct xfs_mount *mp; /* file system mount structure */
- __be64 *pp; /* pointer to block address */
- struct xfs_btree_block *block; /* current btree block */
- struct xfs_ifork *ifp; /* fork structure */
- xfs_fsblock_t bno; /* block # of "block" */
- int level; /* btree level, for checking */
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_btree_cur *cur;
+ xfs_extlen_t btblocks = 0;
int error;
- bno = NULLFSBLOCK;
- mp = ip->i_mount;
*nextents = 0;
*count = 0;
- ifp = XFS_IFORK_PTR(ip, whichfork);
+
if (!ifp)
return 0;
switch (XFS_IFORK_FORMAT(ip, whichfork)) {
- case XFS_DINODE_FMT_EXTENTS:
- *nextents = xfs_bmap_count_leaves(ifp, count);
- return 0;
case XFS_DINODE_FMT_BTREE:
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(tp, ip, whichfork);
@@ -367,26 +231,23 @@ xfs_bmap_count_blocks(
return error;
}
+ cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
+ error = xfs_btree_count_blocks(cur, &btblocks);
+ xfs_btree_del_cursor(cur, error);
+ if (error)
+ return error;
+
/*
- * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
+ * xfs_btree_count_blocks includes the root block contained in
+ * the inode fork in @btblocks, so subtract one because we're
+ * only interested in allocated disk blocks.
*/
- block = ifp->if_broot;
- level = be16_to_cpu(block->bb_level);
- ASSERT(level > 0);
- pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
- bno = be64_to_cpu(*pp);
- ASSERT(bno != NULLFSBLOCK);
- ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
- ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
-
- error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
- nextents, count);
- if (error) {
- XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
- XFS_ERRLEVEL_LOW, mp);
- return -EFSCORRUPTED;
- }
- return 0;
+ *count += btblocks - 1;
+
+ /* fall through */
+ case XFS_DINODE_FMT_EXTENTS:
+ *nextents = xfs_bmap_count_leaves(ifp, count);
+ break;
}
return 0;
@@ -864,6 +725,7 @@ xfs_alloc_file_space(
xfs_filblks_t allocatesize_fsb;
xfs_extlen_t extsz, temp;
xfs_fileoff_t startoffset_fsb;
+ xfs_fileoff_t endoffset_fsb;
int nimaps;
int quota_flag;
int rt;
@@ -891,7 +753,8 @@ xfs_alloc_file_space(
imapp = &imaps[0];
nimaps = 1;
startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
- allocatesize_fsb = XFS_B_TO_FSB(mp, count);
+ endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
+ allocatesize_fsb = endoffset_fsb - startoffset_fsb;
/*
* Allocate file space until done or until there is an error
@@ -962,8 +825,8 @@ xfs_alloc_file_space(
xfs_trans_ijoin(tp, ip, 0);
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
- allocatesize_fsb, alloc_type, resblks,
- imapp, &nimaps);
+ allocatesize_fsb, alloc_type, 0, imapp,
+ &nimaps);
if (error)
goto error0;
@@ -1037,6 +900,7 @@ out_trans_cancel:
goto out_unlock;
}
+/* Caller must first wait for the completion of any pending DIOs if required. */
int
xfs_flush_unmap_range(
struct xfs_inode *ip,
@@ -1048,9 +912,6 @@ xfs_flush_unmap_range(
xfs_off_t rounding, start, end;
int error;
- /* wait for the completion of any pending DIOs */
- inode_dio_wait(inode);
-
rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
start = round_down(offset, rounding);
end = round_up(offset + len, rounding) - 1;
@@ -1082,10 +943,6 @@ xfs_free_file_space(
if (len <= 0) /* if nothing being freed */
return 0;
- error = xfs_flush_unmap_range(ip, offset, len);
- if (error)
- return error;
-
startoffset_fsb = XFS_B_TO_FSB(mp, offset);
endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
@@ -1111,7 +968,8 @@ xfs_free_file_space(
return 0;
if (offset + len > XFS_ISIZE(ip))
len = XFS_ISIZE(ip) - offset;
- error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+ error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
+ &xfs_buffered_write_iomap_ops);
if (error)
return error;
@@ -1129,43 +987,6 @@ xfs_free_file_space(
return error;
}
-/*
- * Preallocate and zero a range of a file. This mechanism has the allocation
- * semantics of fallocate and in addition converts data in the range to zeroes.
- */
-int
-xfs_zero_file_space(
- struct xfs_inode *ip,
- xfs_off_t offset,
- xfs_off_t len)
-{
- struct xfs_mount *mp = ip->i_mount;
- uint blksize;
- int error;
-
- trace_xfs_zero_file_space(ip);
-
- blksize = 1 << mp->m_sb.sb_blocklog;
-
- /*
- * Punch a hole and prealloc the range. We use hole punch rather than
- * unwritten extent conversion for two reasons:
- *
- * 1.) Hole punch handles partial block zeroing for us.
- *
- * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
- * by virtue of the hole punch.
- */
- error = xfs_free_file_space(ip, offset, len);
- if (error || xfs_is_always_cow_inode(ip))
- return error;
-
- return xfs_alloc_file_space(ip, round_down(offset, blksize),
- round_up(offset + len, blksize) -
- round_down(offset, blksize),
- XFS_BMAPI_PREALLOC);
-}
-
static int
xfs_prepare_shift(
struct xfs_inode *ip,
@@ -1532,24 +1353,16 @@ xfs_swap_extent_rmap(
trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
/* Remove the mapping from the donor file. */
- error = xfs_bmap_unmap_extent(tp, tip, &uirec);
- if (error)
- goto out;
+ xfs_bmap_unmap_extent(tp, tip, &uirec);
/* Remove the mapping from the source file. */
- error = xfs_bmap_unmap_extent(tp, ip, &irec);
- if (error)
- goto out;
+ xfs_bmap_unmap_extent(tp, ip, &irec);
/* Map the donor file's blocks into the source file. */
- error = xfs_bmap_map_extent(tp, ip, &uirec);
- if (error)
- goto out;
+ xfs_bmap_map_extent(tp, ip, &uirec);
/* Map the source file's blocks into the donor file. */
- error = xfs_bmap_map_extent(tp, tip, &irec);
- if (error)
- goto out;
+ xfs_bmap_map_extent(tp, tip, &irec);
error = xfs_defer_finish(tpp);
tp = *tpp;
@@ -1756,6 +1569,14 @@ xfs_swap_extents(
goto out_unlock;
}
+ error = xfs_qm_dqattach(ip);
+ if (error)
+ goto out_unlock;
+
+ error = xfs_qm_dqattach(tip);
+ if (error)
+ goto out_unlock;
+
error = xfs_swap_extent_flush(ip);
if (error)
goto out_unlock;
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 7a78229cf1a7..9f993168b55b 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -30,8 +30,6 @@ xfs_bmap_rtalloc(struct xfs_bmalloca *ap)
}
#endif /* CONFIG_XFS_RT */
-int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
- int whichfork, int *eof);
int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
xfs_fileoff_t start_fsb, xfs_fileoff_t length);
@@ -59,8 +57,6 @@ int xfs_alloc_file_space(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t len, int alloc_type);
int xfs_free_file_space(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t len);
-int xfs_zero_file_space(struct xfs_inode *ip, xfs_off_t offset,
- xfs_off_t len);
int xfs_collapse_file_space(struct xfs_inode *, xfs_off_t offset,
xfs_off_t len);
int xfs_insert_file_space(struct xfs_inode *, xfs_off_t offset,
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index ca0849043f54..a0229c368e78 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -238,7 +238,7 @@ _xfs_buf_alloc(
*/
error = xfs_buf_get_maps(bp, nmaps);
if (error) {
- kmem_zone_free(xfs_buf_zone, bp);
+ kmem_cache_free(xfs_buf_zone, bp);
return NULL;
}
@@ -304,7 +304,7 @@ _xfs_buf_free_pages(
* The buffer must not be on any hash - use xfs_buf_rele instead for
* hashed and refcounted buffers
*/
-void
+static void
xfs_buf_free(
xfs_buf_t *bp)
{
@@ -328,7 +328,7 @@ xfs_buf_free(
kmem_free(bp->b_addr);
_xfs_buf_free_pages(bp);
xfs_buf_free_maps(bp);
- kmem_zone_free(xfs_buf_zone, bp);
+ kmem_cache_free(xfs_buf_zone, bp);
}
/*
@@ -345,6 +345,15 @@ xfs_buf_allocate_memory(
unsigned short page_count, i;
xfs_off_t start, end;
int error;
+ xfs_km_flags_t kmflag_mask = 0;
+
+ /*
+ * assure zeroed buffer for non-read cases.
+ */
+ if (!(flags & XBF_READ)) {
+ kmflag_mask |= KM_ZERO;
+ gfp_mask |= __GFP_ZERO;
+ }
/*
* for buffers that are contained within a single page, just allocate
@@ -353,7 +362,9 @@ xfs_buf_allocate_memory(
*/
size = BBTOB(bp->b_length);
if (size < PAGE_SIZE) {
- bp->b_addr = kmem_alloc(size, KM_NOFS);
+ int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
+ bp->b_addr = kmem_alloc_io(size, align_mask,
+ KM_NOFS | kmflag_mask);
if (!bp->b_addr) {
/* low memory - use alloc_page loop instead */
goto use_alloc_page;
@@ -368,7 +379,7 @@ xfs_buf_allocate_memory(
}
bp->b_offset = offset_in_page(bp->b_addr);
bp->b_pages = bp->b_page_array;
- bp->b_pages[0] = virt_to_page(bp->b_addr);
+ bp->b_pages[0] = kmem_to_page(bp->b_addr);
bp->b_page_count = 1;
bp->b_flags |= _XBF_KMEM;
return 0;
@@ -450,7 +461,7 @@ _xfs_buf_map_pages(
unsigned nofs_flag;
/*
- * vm_map_ram() will allocate auxillary structures (e.g.
+ * vm_map_ram() will allocate auxiliary structures (e.g.
* pagetables) with GFP_KERNEL, yet we are likely to be under
* GFP_NOFS context here. Hence we need to tell memory reclaim
* that we are in such a context via PF_MEMALLOC_NOFS to prevent
@@ -938,7 +949,7 @@ xfs_buf_get_uncached(
_xfs_buf_free_pages(bp);
fail_free_buf:
xfs_buf_free_maps(bp);
- kmem_zone_free(xfs_buf_zone, bp);
+ kmem_cache_free(xfs_buf_zone, bp);
fail:
return NULL;
}
@@ -1250,8 +1261,7 @@ xfs_buf_ioapply_map(
int map,
int *buf_offset,
int *count,
- int op,
- int op_flags)
+ int op)
{
int page_index;
int total_nr_pages = bp->b_page_count;
@@ -1286,7 +1296,7 @@ next_chunk:
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = bp;
- bio_set_op_attrs(bio, op, op_flags);
+ bio->bi_opf = op;
for (; size && nr_pages; nr_pages--, page_index++) {
int rbytes, nbytes = PAGE_SIZE - offset;
@@ -1331,7 +1341,6 @@ _xfs_buf_ioapply(
{
struct blk_plug plug;
int op;
- int op_flags = 0;
int offset;
int size;
int i;
@@ -1373,15 +1382,14 @@ _xfs_buf_ioapply(
dump_stack();
}
}
- } else if (bp->b_flags & XBF_READ_AHEAD) {
- op = REQ_OP_READ;
- op_flags = REQ_RAHEAD;
} else {
op = REQ_OP_READ;
+ if (bp->b_flags & XBF_READ_AHEAD)
+ op |= REQ_RAHEAD;
}
/* we only use the buffer cache for meta-data */
- op_flags |= REQ_META;
+ op |= REQ_META;
/*
* Walk all the vectors issuing IO on them. Set up the initial offset
@@ -1393,7 +1401,7 @@ _xfs_buf_ioapply(
size = BBTOB(bp->b_length);
blk_start_plug(&plug);
for (i = 0; i < bp->b_map_count; i++) {
- xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags);
+ xfs_buf_ioapply_map(bp, i, &offset, &size, op);
if (bp->b_error)
break;
if (size <= 0)
@@ -1741,7 +1749,7 @@ xfs_alloc_buftarg(
{
xfs_buftarg_t *btp;
- btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
+ btp = kmem_zalloc(sizeof(*btp), KM_NOFS);
btp->bt_mount = mp;
btp->bt_dev = bdev->bd_dev;
@@ -2052,8 +2060,9 @@ xfs_buf_delwri_pushbuf(
int __init
xfs_buf_init(void)
{
- xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
- KM_ZONE_HWALIGN, NULL);
+ xfs_buf_zone = kmem_cache_create("xfs_buf",
+ sizeof(struct xfs_buf), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
if (!xfs_buf_zone)
goto out;
@@ -2066,7 +2075,7 @@ xfs_buf_init(void)
void
xfs_buf_terminate(void)
{
- kmem_zone_destroy(xfs_buf_zone);
+ kmem_cache_destroy(xfs_buf_zone);
}
void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
@@ -2096,7 +2105,7 @@ xfs_verify_magic(
int idx;
idx = xfs_sb_version_hascrc(&mp->m_sb);
- if (unlikely(WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx])))
+ if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
return false;
return dmagic == bp->b_ops->magic[idx];
}
@@ -2114,7 +2123,7 @@ xfs_verify_magic16(
int idx;
idx = xfs_sb_version_hascrc(&mp->m_sb);
- if (unlikely(WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx])))
+ if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
return false;
return dmagic == bp->b_ops->magic16[idx];
}
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index c6e57a3f409e..56e081dd1d96 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -244,7 +244,6 @@ int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
void xfs_buf_hold(struct xfs_buf *bp);
/* Releasing Buffers */
-extern void xfs_buf_free(xfs_buf_t *);
extern void xfs_buf_rele(xfs_buf_t *);
/* Locking and Unlocking Buffers */
@@ -350,6 +349,12 @@ extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
+static inline int
+xfs_buftarg_dma_alignment(struct xfs_buftarg *bt)
+{
+ return queue_dma_alignment(bt->bt_bdev->bd_disk->queue);
+}
+
int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 7dcaec54a20b..3458a1264a3f 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -702,7 +702,7 @@ xfs_buf_item_get_format(
}
bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
- KM_SLEEP);
+ 0);
if (!bip->bli_formats)
return -ENOMEM;
return 0;
@@ -747,7 +747,7 @@ xfs_buf_item_init(
return 0;
}
- bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
+ bip = kmem_zone_zalloc(xfs_buf_item_zone, 0);
xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
bip->bli_buf = bp;
@@ -763,7 +763,7 @@ xfs_buf_item_init(
error = xfs_buf_item_get_format(bip, bp->b_map_count);
ASSERT(error == 0);
if (error) { /* to stop gcc throwing set-but-unused warnings */
- kmem_zone_free(xfs_buf_item_zone, bip);
+ kmem_cache_free(xfs_buf_item_zone, bip);
return error;
}
@@ -851,7 +851,7 @@ xfs_buf_item_log_segment(
* first_bit and last_bit.
*/
while ((bits_to_set - bits_set) >= NBWORD) {
- *wordp |= 0xffffffff;
+ *wordp = 0xffffffff;
bits_set += NBWORD;
wordp++;
}
@@ -939,7 +939,7 @@ xfs_buf_item_free(
{
xfs_buf_item_free_format(bip);
kmem_free(bip->bli_item.li_lv_shadow);
- kmem_zone_free(xfs_buf_item_zone, bip);
+ kmem_cache_free(xfs_buf_item_zone, bip);
}
/*
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 283df898dd9f..0d3b640cf1cc 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -17,6 +17,7 @@
#include "xfs_trace.h"
#include "xfs_bmap.h"
#include "xfs_trans.h"
+#include "xfs_error.h"
/*
* Directory file type support functions
@@ -47,6 +48,7 @@ xfs_dir2_sf_getdents(
{
int i; /* shortform entry number */
struct xfs_inode *dp = args->dp; /* incore directory inode */
+ struct xfs_mount *mp = dp->i_mount;
xfs_dir2_dataptr_t off; /* current entry's offset */
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
@@ -68,15 +70,15 @@ xfs_dir2_sf_getdents(
return 0;
/*
- * Precalculate offsets for . and .. as we will always need them.
- *
- * XXX(hch): the second argument is sometimes 0 and sometimes
- * geo->datablk
+ * Precalculate offsets for "." and ".." as we will always need them.
+ * This relies on the fact that directories always start with the
+ * entries for "." and "..".
*/
dot_offset = xfs_dir2_db_off_to_dataptr(geo, geo->datablk,
- dp->d_ops->data_dot_offset);
+ geo->data_entry_offset);
dotdot_offset = xfs_dir2_db_off_to_dataptr(geo, geo->datablk,
- dp->d_ops->data_dotdot_offset);
+ geo->data_entry_offset +
+ xfs_dir2_data_entsize(mp, sizeof(".") - 1));
/*
* Put . entry unless we're starting past it.
@@ -91,7 +93,7 @@ xfs_dir2_sf_getdents(
* Put .. entry unless we're starting past it.
*/
if (ctx->pos <= dotdot_offset) {
- ino = dp->d_ops->sf_get_parent_ino(sfp);
+ ino = xfs_dir2_sf_get_parent_ino(sfp);
ctx->pos = dotdot_offset & 0x7fffffff;
if (!dir_emit(ctx, "..", 2, ino, DT_DIR))
return 0;
@@ -108,17 +110,21 @@ xfs_dir2_sf_getdents(
xfs_dir2_sf_get_offset(sfep));
if (ctx->pos > off) {
- sfep = dp->d_ops->sf_nextentry(sfp, sfep);
+ sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep);
continue;
}
- ino = dp->d_ops->sf_get_ino(sfp, sfep);
- filetype = dp->d_ops->sf_get_ftype(sfep);
+ ino = xfs_dir2_sf_get_ino(mp, sfp, sfep);
+ filetype = xfs_dir2_sf_get_ftype(mp, sfep);
ctx->pos = off & 0x7fffffff;
+ if (XFS_IS_CORRUPT(dp->i_mount,
+ !xfs_dir2_namecheck(sfep->name,
+ sfep->namelen)))
+ return -EFSCORRUPTED;
if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
- xfs_dir3_get_dtype(dp->i_mount, filetype)))
+ xfs_dir3_get_dtype(mp, filetype)))
return 0;
- sfep = dp->d_ops->sf_nextentry(sfp, sfep);
+ sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep);
}
ctx->pos = xfs_dir2_db_off_to_dataptr(geo, geo->datablk + 1, 0) &
@@ -135,17 +141,14 @@ xfs_dir2_block_getdents(
struct dir_context *ctx)
{
struct xfs_inode *dp = args->dp; /* incore directory inode */
- xfs_dir2_data_hdr_t *hdr; /* block header */
struct xfs_buf *bp; /* buffer for block */
- xfs_dir2_data_entry_t *dep; /* block data entry */
- xfs_dir2_data_unused_t *dup; /* block unused entry */
- char *endptr; /* end of the data entries */
int error; /* error return value */
- char *ptr; /* current data entry */
int wantoff; /* starting block offset */
xfs_off_t cook;
struct xfs_da_geometry *geo = args->geo;
int lock_mode;
+ unsigned int offset;
+ unsigned int end;
/*
* If the block number in the offset is out of range, we're done.
@@ -164,56 +167,55 @@ xfs_dir2_block_getdents(
* We'll skip entries before this.
*/
wantoff = xfs_dir2_dataptr_to_off(geo, ctx->pos);
- hdr = bp->b_addr;
xfs_dir3_data_check(dp, bp);
- /*
- * Set up values for the loop.
- */
- ptr = (char *)dp->d_ops->data_entry_p(hdr);
- endptr = xfs_dir3_data_endp(geo, hdr);
/*
* Loop over the data portion of the block.
* Each object is a real entry (dep) or an unused one (dup).
*/
- while (ptr < endptr) {
+ offset = geo->data_entry_offset;
+ end = xfs_dir3_data_end_offset(geo, bp->b_addr);
+ while (offset < end) {
+ struct xfs_dir2_data_unused *dup = bp->b_addr + offset;
+ struct xfs_dir2_data_entry *dep = bp->b_addr + offset;
uint8_t filetype;
- dup = (xfs_dir2_data_unused_t *)ptr;
/*
* Unused, skip it.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
- ptr += be16_to_cpu(dup->length);
+ offset += be16_to_cpu(dup->length);
continue;
}
- dep = (xfs_dir2_data_entry_t *)ptr;
-
/*
* Bump pointer for the next iteration.
*/
- ptr += dp->d_ops->data_entsize(dep->namelen);
+ offset += xfs_dir2_data_entsize(dp->i_mount, dep->namelen);
+
/*
* The entry is before the desired starting point, skip it.
*/
- if ((char *)dep - (char *)hdr < wantoff)
+ if (offset < wantoff)
continue;
- cook = xfs_dir2_db_off_to_dataptr(geo, geo->datablk,
- (char *)dep - (char *)hdr);
+ cook = xfs_dir2_db_off_to_dataptr(geo, geo->datablk, offset);
ctx->pos = cook & 0x7fffffff;
- filetype = dp->d_ops->data_get_ftype(dep);
+ filetype = xfs_dir2_data_get_ftype(dp->i_mount, dep);
/*
* If it didn't fit, set the final offset to here & return.
*/
+ if (XFS_IS_CORRUPT(dp->i_mount,
+ !xfs_dir2_namecheck(dep->name,
+ dep->namelen))) {
+ error = -EFSCORRUPTED;
+ goto out_rele;
+ }
if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
be64_to_cpu(dep->inumber),
- xfs_dir3_get_dtype(dp->i_mount, filetype))) {
- xfs_trans_brelse(args->trans, bp);
- return 0;
- }
+ xfs_dir3_get_dtype(dp->i_mount, filetype)))
+ goto out_rele;
}
/*
@@ -222,8 +224,9 @@ xfs_dir2_block_getdents(
*/
ctx->pos = xfs_dir2_db_off_to_dataptr(geo, geo->datablk + 1, 0) &
0x7fffffff;
+out_rele:
xfs_trans_brelse(args->trans, bp);
- return 0;
+ return error;
}
/*
@@ -276,7 +279,7 @@ xfs_dir2_leaf_readbuf(
new_off = xfs_dir2_da_to_byte(geo, map.br_startoff);
if (new_off > *cur_off)
*cur_off = new_off;
- error = xfs_dir3_data_read(args->trans, dp, map.br_startoff, -1, &bp);
+ error = xfs_dir3_data_read(args->trans, dp, map.br_startoff, 0, &bp);
if (error)
goto out;
@@ -311,7 +314,8 @@ xfs_dir2_leaf_readbuf(
break;
}
if (next_ra > *ra_blk) {
- xfs_dir3_data_readahead(dp, next_ra, -2);
+ xfs_dir3_data_readahead(dp, next_ra,
+ XFS_DABUF_MAP_HOLE_OK);
*ra_blk = next_ra;
}
ra_want -= geo->fsbcount;
@@ -343,17 +347,17 @@ xfs_dir2_leaf_getdents(
size_t bufsize)
{
struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
struct xfs_buf *bp = NULL; /* data block buffer */
- xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_data_entry_t *dep; /* data entry */
xfs_dir2_data_unused_t *dup; /* unused entry */
- char *ptr = NULL; /* pointer to current data */
struct xfs_da_geometry *geo = args->geo;
xfs_dablk_t rablk = 0; /* current readahead block */
xfs_dir2_off_t curoff; /* current overall offset */
int length; /* temporary length value */
int byteoff; /* offset in current block */
int lock_mode;
+ unsigned int offset = 0;
int error = 0; /* error return value */
/*
@@ -380,7 +384,7 @@ xfs_dir2_leaf_getdents(
* If we have no buffer, or we're off the end of the
* current buffer, need to get another one.
*/
- if (!bp || ptr >= (char *)bp->b_addr + geo->blksize) {
+ if (!bp || offset >= geo->blksize) {
if (bp) {
xfs_trans_brelse(args->trans, bp);
bp = NULL;
@@ -393,36 +397,35 @@ xfs_dir2_leaf_getdents(
if (error || !bp)
break;
- hdr = bp->b_addr;
xfs_dir3_data_check(dp, bp);
/*
* Find our position in the block.
*/
- ptr = (char *)dp->d_ops->data_entry_p(hdr);
+ offset = geo->data_entry_offset;
byteoff = xfs_dir2_byte_to_off(geo, curoff);
/*
* Skip past the header.
*/
if (byteoff == 0)
- curoff += dp->d_ops->data_entry_offset;
+ curoff += geo->data_entry_offset;
/*
* Skip past entries until we reach our offset.
*/
else {
- while ((char *)ptr - (char *)hdr < byteoff) {
- dup = (xfs_dir2_data_unused_t *)ptr;
+ while (offset < byteoff) {
+ dup = bp->b_addr + offset;
if (be16_to_cpu(dup->freetag)
== XFS_DIR2_DATA_FREE_TAG) {
length = be16_to_cpu(dup->length);
- ptr += length;
+ offset += length;
continue;
}
- dep = (xfs_dir2_data_entry_t *)ptr;
- length =
- dp->d_ops->data_entsize(dep->namelen);
- ptr += length;
+ dep = bp->b_addr + offset;
+ length = xfs_dir2_data_entsize(mp,
+ dep->namelen);
+ offset += length;
}
/*
* Now set our real offset.
@@ -430,32 +433,38 @@ xfs_dir2_leaf_getdents(
curoff =
xfs_dir2_db_off_to_byte(geo,
xfs_dir2_byte_to_db(geo, curoff),
- (char *)ptr - (char *)hdr);
- if (ptr >= (char *)hdr + geo->blksize) {
+ offset);
+ if (offset >= geo->blksize)
continue;
- }
}
}
+
/*
- * We have a pointer to an entry.
- * Is it a live one?
+ * We have a pointer to an entry. Is it a live one?
*/
- dup = (xfs_dir2_data_unused_t *)ptr;
+ dup = bp->b_addr + offset;
+
/*
* No, it's unused, skip over it.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
length = be16_to_cpu(dup->length);
- ptr += length;
+ offset += length;
curoff += length;
continue;
}
- dep = (xfs_dir2_data_entry_t *)ptr;
- length = dp->d_ops->data_entsize(dep->namelen);
- filetype = dp->d_ops->data_get_ftype(dep);
+ dep = bp->b_addr + offset;
+ length = xfs_dir2_data_entsize(mp, dep->namelen);
+ filetype = xfs_dir2_data_get_ftype(mp, dep);
ctx->pos = xfs_dir2_byte_to_dataptr(curoff) & 0x7fffffff;
+ if (XFS_IS_CORRUPT(dp->i_mount,
+ !xfs_dir2_namecheck(dep->name,
+ dep->namelen))) {
+ error = -EFSCORRUPTED;
+ break;
+ }
if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
be64_to_cpu(dep->inumber),
xfs_dir3_get_dtype(dp->i_mount, filetype)))
@@ -464,7 +473,7 @@ xfs_dir2_leaf_getdents(
/*
* Advance to next entry in the block.
*/
- ptr += length;
+ offset += length;
curoff += length;
/* bufsize may have just been a guess; don't go negative */
bufsize = bufsize > length ? bufsize - length : 0;
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 8ec7aab89044..cae613620175 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -13,6 +13,7 @@
#include "xfs_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
+#include "xfs_discard.h"
#include "xfs_error.h"
#include "xfs_extent_busy.h"
#include "xfs_trace.h"
@@ -70,7 +71,10 @@ xfs_trim_extents(
error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
if (error)
goto out_del_cursor;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_del_cursor);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_del_cursor;
+ }
ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest));
/*
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index fb1ad4483081..2bff21ca9d78 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -48,7 +48,7 @@ static struct lock_class_key xfs_dquot_project_class;
*/
void
xfs_qm_dqdestroy(
- xfs_dquot_t *dqp)
+ struct xfs_dquot *dqp)
{
ASSERT(list_empty(&dqp->q_lru));
@@ -56,7 +56,7 @@ xfs_qm_dqdestroy(
mutex_destroy(&dqp->q_qlock);
XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
- kmem_zone_free(xfs_qm_dqzone, dqp);
+ kmem_cache_free(xfs_qm_dqzone, dqp);
}
/*
@@ -113,8 +113,8 @@ xfs_qm_adjust_dqlimits(
*/
void
xfs_qm_adjust_dqtimers(
- xfs_mount_t *mp,
- xfs_disk_dquot_t *d)
+ struct xfs_mount *mp,
+ struct xfs_disk_dquot *d)
{
ASSERT(d->d_id);
@@ -305,8 +305,8 @@ xfs_dquot_disk_alloc(
/* Create the block mapping. */
xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
- XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
- XFS_QM_DQALLOC_SPACE_RES(mp), &map, &nmaps);
+ XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map,
+ &nmaps);
if (error)
return error;
ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
@@ -440,7 +440,7 @@ xfs_dquot_alloc(
{
struct xfs_dquot *dqp;
- dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
+ dqp = kmem_zone_zalloc(xfs_qm_dqzone, 0);
dqp->dq_flags = type;
dqp->q_core.d_id = cpu_to_be32(id);
@@ -497,7 +497,7 @@ xfs_dquot_from_disk(
struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset;
/* copy everything from disk dquot to the incore dquot */
- memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
+ memcpy(&dqp->q_core, ddqp, sizeof(struct xfs_disk_dquot));
/*
* Reservation counters are defined as reservation plus current usage
@@ -833,7 +833,7 @@ xfs_qm_id_for_quotatype(
case XFS_DQ_GROUP:
return ip->i_d.di_gid;
case XFS_DQ_PROJ:
- return xfs_get_projid(ip);
+ return ip->i_d.di_projid;
}
ASSERT(0);
return 0;
@@ -989,7 +989,7 @@ xfs_qm_dqput(
*/
void
xfs_qm_dqrele(
- xfs_dquot_t *dqp)
+ struct xfs_dquot *dqp)
{
if (!dqp)
return;
@@ -1018,8 +1018,8 @@ xfs_qm_dqflush_done(
struct xfs_buf *bp,
struct xfs_log_item *lip)
{
- xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip;
- xfs_dquot_t *dqp = qip->qli_dquot;
+ struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip;
+ struct xfs_dquot *dqp = qip->qli_dquot;
struct xfs_ail *ailp = lip->li_ailp;
/*
@@ -1126,11 +1126,11 @@ xfs_qm_dqflush(
xfs_buf_relse(bp);
xfs_dqfunlock(dqp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- return -EIO;
+ return -EFSCORRUPTED;
}
/* This is the only portion of data that needs to persist */
- memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
+ memcpy(ddqp, &dqp->q_core, sizeof(struct xfs_disk_dquot));
/*
* Clear the dirty field and remember the flush lsn for later use.
@@ -1188,8 +1188,8 @@ out_unlock:
*/
void
xfs_dqlock2(
- xfs_dquot_t *d1,
- xfs_dquot_t *d2)
+ struct xfs_dquot *d1,
+ struct xfs_dquot *d2)
{
if (d1 && d2) {
ASSERT(d1 != d2);
@@ -1211,20 +1211,22 @@ xfs_dqlock2(
int __init
xfs_qm_init(void)
{
- xfs_qm_dqzone =
- kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
+ xfs_qm_dqzone = kmem_cache_create("xfs_dquot",
+ sizeof(struct xfs_dquot),
+ 0, 0, NULL);
if (!xfs_qm_dqzone)
goto out;
- xfs_qm_dqtrxzone =
- kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
+ xfs_qm_dqtrxzone = kmem_cache_create("xfs_dqtrx",
+ sizeof(struct xfs_dquot_acct),
+ 0, 0, NULL);
if (!xfs_qm_dqtrxzone)
goto out_free_dqzone;
return 0;
out_free_dqzone:
- kmem_zone_destroy(xfs_qm_dqzone);
+ kmem_cache_destroy(xfs_qm_dqzone);
out:
return -ENOMEM;
}
@@ -1232,14 +1234,14 @@ out:
void
xfs_qm_exit(void)
{
- kmem_zone_destroy(xfs_qm_dqtrxzone);
- kmem_zone_destroy(xfs_qm_dqzone);
+ kmem_cache_destroy(xfs_qm_dqtrxzone);
+ kmem_cache_destroy(xfs_qm_dqzone);
}
/*
* Iterate every dquot of a particular type. The caller must ensure that the
* particular quota type is active. iter_fn can return negative error codes,
- * or XFS_ITER_ABORT to indicate that it wants to stop iterating.
+ * or -ECANCELED to indicate that it wants to stop iterating.
*/
int
xfs_qm_dqiterate(
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index 4fe85709d55d..fe3e46df604b 100644
--- a/fs/xfs/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
@@ -30,33 +30,36 @@ enum {
/*
* The incore dquot structure
*/
-typedef struct xfs_dquot {
- uint dq_flags; /* various flags (XFS_DQ_*) */
- struct list_head q_lru; /* global free list of dquots */
- struct xfs_mount*q_mount; /* filesystem this relates to */
- uint q_nrefs; /* # active refs from inodes */
- xfs_daddr_t q_blkno; /* blkno of dquot buffer */
- int q_bufoffset; /* off of dq in buffer (# dquots) */
- xfs_fileoff_t q_fileoffset; /* offset in quotas file */
-
- xfs_disk_dquot_t q_core; /* actual usage & quotas */
- xfs_dq_logitem_t q_logitem; /* dquot log item */
- xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */
- xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */
- xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */
- xfs_qcnt_t q_prealloc_lo_wmark;/* prealloc throttle wmark */
- xfs_qcnt_t q_prealloc_hi_wmark;/* prealloc disabled wmark */
- int64_t q_low_space[XFS_QLOWSP_MAX];
- struct mutex q_qlock; /* quota lock */
- struct completion q_flush; /* flush completion queue */
- atomic_t q_pincount; /* dquot pin count */
- wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
-} xfs_dquot_t;
+struct xfs_dquot {
+ uint dq_flags;
+ struct list_head q_lru;
+ struct xfs_mount *q_mount;
+ uint q_nrefs;
+ xfs_daddr_t q_blkno;
+ int q_bufoffset;
+ xfs_fileoff_t q_fileoffset;
+
+ struct xfs_disk_dquot q_core;
+ struct xfs_dq_logitem q_logitem;
+ /* total regular nblks used+reserved */
+ xfs_qcnt_t q_res_bcount;
+ /* total inos allocd+reserved */
+ xfs_qcnt_t q_res_icount;
+ /* total realtime blks used+reserved */
+ xfs_qcnt_t q_res_rtbcount;
+ xfs_qcnt_t q_prealloc_lo_wmark;
+ xfs_qcnt_t q_prealloc_hi_wmark;
+ int64_t q_low_space[XFS_QLOWSP_MAX];
+ struct mutex q_qlock;
+ struct completion q_flush;
+ atomic_t q_pincount;
+ struct wait_queue_head q_pinwait;
+};
/*
* Lock hierarchy for q_qlock:
* XFS_QLOCK_NORMAL is the implicit default,
- * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2
+ * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2
*/
enum {
XFS_QLOCK_NORMAL = 0,
@@ -64,21 +67,21 @@ enum {
};
/*
- * Manage the q_flush completion queue embedded in the dquot. This completion
+ * Manage the q_flush completion queue embedded in the dquot. This completion
* queue synchronizes processes attempting to flush the in-core dquot back to
* disk.
*/
-static inline void xfs_dqflock(xfs_dquot_t *dqp)
+static inline void xfs_dqflock(struct xfs_dquot *dqp)
{
wait_for_completion(&dqp->q_flush);
}
-static inline bool xfs_dqflock_nowait(xfs_dquot_t *dqp)
+static inline bool xfs_dqflock_nowait(struct xfs_dquot *dqp)
{
return try_wait_for_completion(&dqp->q_flush);
}
-static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
+static inline void xfs_dqfunlock(struct xfs_dquot *dqp)
{
complete(&dqp->q_flush);
}
@@ -112,7 +115,7 @@ static inline int xfs_this_quota_on(struct xfs_mount *mp, int type)
}
}
-static inline xfs_dquot_t *xfs_inode_dquot(struct xfs_inode *ip, int type)
+static inline struct xfs_dquot *xfs_inode_dquot(struct xfs_inode *ip, int type)
{
switch (type & XFS_DQ_ALLTYPES) {
case XFS_DQ_USER:
@@ -147,31 +150,30 @@ static inline bool xfs_dquot_lowsp(struct xfs_dquot *dqp)
#define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ)
#define XFS_QM_ISGDQ(dqp) ((dqp)->dq_flags & XFS_DQ_GROUP)
-extern void xfs_qm_dqdestroy(xfs_dquot_t *);
-extern int xfs_qm_dqflush(struct xfs_dquot *, struct xfs_buf **);
-extern void xfs_qm_dqunpin_wait(xfs_dquot_t *);
-extern void xfs_qm_adjust_dqtimers(xfs_mount_t *,
- xfs_disk_dquot_t *);
-extern void xfs_qm_adjust_dqlimits(struct xfs_mount *,
- struct xfs_dquot *);
-extern xfs_dqid_t xfs_qm_id_for_quotatype(struct xfs_inode *ip,
- uint type);
-extern int xfs_qm_dqget(struct xfs_mount *mp, xfs_dqid_t id,
+void xfs_qm_dqdestroy(struct xfs_dquot *dqp);
+int xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf **bpp);
+void xfs_qm_dqunpin_wait(struct xfs_dquot *dqp);
+void xfs_qm_adjust_dqtimers(struct xfs_mount *mp,
+ struct xfs_disk_dquot *d);
+void xfs_qm_adjust_dqlimits(struct xfs_mount *mp,
+ struct xfs_dquot *d);
+xfs_dqid_t xfs_qm_id_for_quotatype(struct xfs_inode *ip, uint type);
+int xfs_qm_dqget(struct xfs_mount *mp, xfs_dqid_t id,
uint type, bool can_alloc,
struct xfs_dquot **dqpp);
-extern int xfs_qm_dqget_inode(struct xfs_inode *ip, uint type,
- bool can_alloc,
- struct xfs_dquot **dqpp);
-extern int xfs_qm_dqget_next(struct xfs_mount *mp, xfs_dqid_t id,
+int xfs_qm_dqget_inode(struct xfs_inode *ip, uint type,
+ bool can_alloc,
+ struct xfs_dquot **dqpp);
+int xfs_qm_dqget_next(struct xfs_mount *mp, xfs_dqid_t id,
uint type, struct xfs_dquot **dqpp);
-extern int xfs_qm_dqget_uncached(struct xfs_mount *mp,
- xfs_dqid_t id, uint type,
- struct xfs_dquot **dqpp);
-extern void xfs_qm_dqput(xfs_dquot_t *);
+int xfs_qm_dqget_uncached(struct xfs_mount *mp,
+ xfs_dqid_t id, uint type,
+ struct xfs_dquot **dqpp);
+void xfs_qm_dqput(struct xfs_dquot *dqp);
-extern void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *);
+void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *);
-extern void xfs_dquot_set_prealloc_limits(struct xfs_dquot *);
+void xfs_dquot_set_prealloc_limits(struct xfs_dquot *);
static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
{
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 282ec5af293e..d60647d7197b 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -347,7 +347,7 @@ xfs_qm_qoff_logitem_init(
{
struct xfs_qoff_logitem *qf;
- qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP);
+ qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), 0);
xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
&xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
diff --git a/fs/xfs/xfs_dquot_item.h b/fs/xfs/xfs_dquot_item.h
index 1aed34ccdabc..3bb19e556ade 100644
--- a/fs/xfs/xfs_dquot_item.h
+++ b/fs/xfs/xfs_dquot_item.h
@@ -11,25 +11,27 @@ struct xfs_trans;
struct xfs_mount;
struct xfs_qoff_logitem;
-typedef struct xfs_dq_logitem {
- struct xfs_log_item qli_item; /* common portion */
- struct xfs_dquot *qli_dquot; /* dquot ptr */
- xfs_lsn_t qli_flush_lsn; /* lsn at last flush */
-} xfs_dq_logitem_t;
+struct xfs_dq_logitem {
+ struct xfs_log_item qli_item; /* common portion */
+ struct xfs_dquot *qli_dquot; /* dquot ptr */
+ xfs_lsn_t qli_flush_lsn; /* lsn at last flush */
+};
-typedef struct xfs_qoff_logitem {
- struct xfs_log_item qql_item; /* common portion */
- struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */
+struct xfs_qoff_logitem {
+ struct xfs_log_item qql_item; /* common portion */
+ struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */
unsigned int qql_flags;
-} xfs_qoff_logitem_t;
+};
-extern void xfs_qm_dquot_logitem_init(struct xfs_dquot *);
-extern xfs_qoff_logitem_t *xfs_qm_qoff_logitem_init(struct xfs_mount *,
- struct xfs_qoff_logitem *, uint);
-extern xfs_qoff_logitem_t *xfs_trans_get_qoff_item(struct xfs_trans *,
- struct xfs_qoff_logitem *, uint);
-extern void xfs_trans_log_quotaoff_item(struct xfs_trans *,
- struct xfs_qoff_logitem *);
+void xfs_qm_dquot_logitem_init(struct xfs_dquot *dqp);
+struct xfs_qoff_logitem *xfs_qm_qoff_logitem_init(struct xfs_mount *mp,
+ struct xfs_qoff_logitem *start,
+ uint flags);
+struct xfs_qoff_logitem *xfs_trans_get_qoff_item(struct xfs_trans *tp,
+ struct xfs_qoff_logitem *startqoff,
+ uint flags);
+void xfs_trans_log_quotaoff_item(struct xfs_trans *tp,
+ struct xfs_qoff_logitem *qlp);
#endif /* __XFS_DQUOT_ITEM_H__ */
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 544c9482a0ef..331765afc53e 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -213,7 +213,7 @@ xfs_errortag_init(
struct xfs_mount *mp)
{
mp->m_errortag = kmem_zalloc(sizeof(unsigned int) * XFS_ERRTAG_MAX,
- KM_SLEEP | KM_MAYFAIL);
+ KM_MAYFAIL);
if (!mp->m_errortag)
return -ENOMEM;
@@ -257,7 +257,7 @@ xfs_errortag_test(
xfs_warn_ratelimited(mp,
"Injecting error (%s) at file %s, line %d, on filesystem \"%s\"",
- expression, file, line, mp->m_fsname);
+ expression, file, line, mp->m_super->s_id);
return true;
}
@@ -329,19 +329,40 @@ xfs_corruption_error(
const char *tag,
int level,
struct xfs_mount *mp,
- void *buf,
+ const void *buf,
size_t bufsize,
const char *filename,
int linenum,
xfs_failaddr_t failaddr)
{
- if (level <= xfs_error_level)
+ if (buf && level <= xfs_error_level)
xfs_hex_dump(buf, bufsize);
xfs_error_report(tag, level, mp, filename, linenum, failaddr);
xfs_alert(mp, "Corruption detected. Unmount and run xfs_repair");
}
/*
+ * Complain about the kinds of metadata corruption that we can't detect from a
+ * verifier, such as incorrect inter-block relationship data. Does not set
+ * bp->b_error.
+ */
+void
+xfs_buf_corruption_error(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_mount;
+
+ xfs_alert_tag(mp, XFS_PTAG_VERIFIER_ERROR,
+ "Metadata corruption detected at %pS, %s block 0x%llx",
+ __return_address, bp->b_ops->name, bp->b_bn);
+
+ xfs_alert(mp, "Unmount and run xfs_repair");
+
+ if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
+ xfs_stack_trace();
+}
+
+/*
* Warnings specifically for verifier errors. Differentiate CRC vs. invalid
* values, and omit the stack trace unless the error level is tuned high.
*/
@@ -350,7 +371,7 @@ xfs_buf_verifier_error(
struct xfs_buf *bp,
int error,
const char *name,
- void *buf,
+ const void *buf,
size_t bufsz,
xfs_failaddr_t failaddr)
{
@@ -402,7 +423,7 @@ xfs_inode_verifier_error(
struct xfs_inode *ip,
int error,
const char *name,
- void *buf,
+ const void *buf,
size_t bufsz,
xfs_failaddr_t failaddr)
{
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 602aa7d62b66..31a5d321ba9a 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -12,16 +12,17 @@ extern void xfs_error_report(const char *tag, int level, struct xfs_mount *mp,
const char *filename, int linenum,
xfs_failaddr_t failaddr);
extern void xfs_corruption_error(const char *tag, int level,
- struct xfs_mount *mp, void *buf, size_t bufsize,
+ struct xfs_mount *mp, const void *buf, size_t bufsize,
const char *filename, int linenum,
xfs_failaddr_t failaddr);
+void xfs_buf_corruption_error(struct xfs_buf *bp);
extern void xfs_buf_verifier_error(struct xfs_buf *bp, int error,
- const char *name, void *buf, size_t bufsz,
+ const char *name, const void *buf, size_t bufsz,
xfs_failaddr_t failaddr);
extern void xfs_verifier_error(struct xfs_buf *bp, int error,
xfs_failaddr_t failaddr);
extern void xfs_inode_verifier_error(struct xfs_inode *ip, int error,
- const char *name, void *buf, size_t bufsz,
+ const char *name, const void *buf, size_t bufsz,
xfs_failaddr_t failaddr);
#define XFS_ERROR_REPORT(e, lvl, mp) \
@@ -37,32 +38,6 @@ extern void xfs_inode_verifier_error(struct xfs_inode *ip, int error,
/* Dump 128 bytes of any corrupt buffer */
#define XFS_CORRUPTION_DUMP_LEN (128)
-/*
- * Macros to set EFSCORRUPTED & return/branch.
- */
-#define XFS_WANT_CORRUPTED_GOTO(mp, x, l) \
- { \
- int fs_is_ok = (x); \
- ASSERT(fs_is_ok); \
- if (unlikely(!fs_is_ok)) { \
- XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_GOTO", \
- XFS_ERRLEVEL_LOW, mp); \
- error = -EFSCORRUPTED; \
- goto l; \
- } \
- }
-
-#define XFS_WANT_CORRUPTED_RETURN(mp, x) \
- { \
- int fs_is_ok = (x); \
- ASSERT(fs_is_ok); \
- if (unlikely(!fs_is_ok)) { \
- XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_RETURN", \
- XFS_ERRLEVEL_LOW, mp); \
- return -EFSCORRUPTED; \
- } \
- }
-
#ifdef DEBUG
extern int xfs_errortag_init(struct xfs_mount *mp);
extern void xfs_errortag_del(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index 0ed68379e551..3991e59cfd18 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -33,7 +33,7 @@ xfs_extent_busy_insert(
struct rb_node **rbp;
struct rb_node *parent = NULL;
- new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_SLEEP);
+ new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0);
new->agno = agno;
new->bno = bno;
new->length = len;
@@ -367,7 +367,7 @@ restart:
* If this is a metadata allocation, try to reuse the busy
* extent instead of trimming the allocation.
*/
- if (!xfs_alloc_is_userdata(args->datatype) &&
+ if (!(args->datatype & XFS_ALLOC_USERDATA) &&
!(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
if (!xfs_extent_busy_update_extent(args->mp, args->pag,
busyp, fbno, flen,
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 86f6512d6864..6ea847f6e298 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -21,7 +21,7 @@
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_trace.h"
-
+#include "xfs_error.h"
kmem_zone_t *xfs_efi_zone;
kmem_zone_t *xfs_efd_zone;
@@ -39,7 +39,7 @@ xfs_efi_item_free(
if (efip->efi_format.efi_nextents > XFS_EFI_MAX_FAST_EXTENTS)
kmem_free(efip);
else
- kmem_zone_free(xfs_efi_zone, efip);
+ kmem_cache_free(xfs_efi_zone, efip);
}
/*
@@ -163,9 +163,9 @@ xfs_efi_init(
if (nextents > XFS_EFI_MAX_FAST_EXTENTS) {
size = (uint)(sizeof(xfs_efi_log_item_t) +
((nextents - 1) * sizeof(xfs_extent_t)));
- efip = kmem_zalloc(size, KM_SLEEP);
+ efip = kmem_zalloc(size, 0);
} else {
- efip = kmem_zone_zalloc(xfs_efi_zone, KM_SLEEP);
+ efip = kmem_zone_zalloc(xfs_efi_zone, 0);
}
xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops);
@@ -228,6 +228,7 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
}
return 0;
}
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
}
@@ -243,7 +244,7 @@ xfs_efd_item_free(struct xfs_efd_log_item *efdp)
if (efdp->efd_format.efd_nextents > XFS_EFD_MAX_FAST_EXTENTS)
kmem_free(efdp);
else
- kmem_zone_free(xfs_efd_zone, efdp);
+ kmem_cache_free(xfs_efd_zone, efdp);
}
/*
@@ -333,9 +334,9 @@ xfs_trans_get_efd(
if (nextents > XFS_EFD_MAX_FAST_EXTENTS) {
efdp = kmem_zalloc(sizeof(struct xfs_efd_log_item) +
(nextents - 1) * sizeof(struct xfs_extent),
- KM_SLEEP);
+ 0);
} else {
- efdp = kmem_zone_zalloc(xfs_efd_zone, KM_SLEEP);
+ efdp = kmem_zone_zalloc(xfs_efd_zone, 0);
}
xfs_log_item_init(tp->t_mountp, &efdp->efd_item, XFS_LI_EFD,
@@ -624,7 +625,7 @@ xfs_efi_recover(
*/
set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
xfs_efi_release(efip);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 28101bbc0b78..c93250108952 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -28,6 +28,7 @@
#include <linux/falloc.h>
#include <linux/backing-dev.h>
#include <linux/mman.h>
+#include <linux/fadvise.h>
static const struct vm_operations_struct xfs_file_vm_ops;
@@ -187,7 +188,8 @@ xfs_file_dio_aio_read(
file_accessed(iocb->ki_filp);
xfs_ilock(ip, XFS_IOLOCK_SHARED);
- ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
+ ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL,
+ is_sync_kiocb(iocb));
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return ret;
@@ -214,7 +216,7 @@ xfs_file_dax_read(
xfs_ilock(ip, XFS_IOLOCK_SHARED);
}
- ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
+ ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
file_accessed(iocb->ki_filp);
@@ -350,7 +352,7 @@ restart:
trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
- NULL, &xfs_iomap_ops);
+ NULL, &xfs_buffered_write_iomap_ops);
if (error)
return error;
} else
@@ -369,21 +371,23 @@ static int
xfs_dio_write_end_io(
struct kiocb *iocb,
ssize_t size,
+ int error,
unsigned flags)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct xfs_inode *ip = XFS_I(inode);
loff_t offset = iocb->ki_pos;
unsigned int nofs_flag;
- int error = 0;
trace_xfs_end_io_direct_write(ip, offset, size);
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -EIO;
- if (size <= 0)
- return size;
+ if (error)
+ return error;
+ if (!size)
+ return 0;
/*
* Capture amount written on completion as we can't reliably account
@@ -440,6 +444,10 @@ out:
return error;
}
+static const struct iomap_dio_ops xfs_dio_write_ops = {
+ .end_io = xfs_dio_write_end_io,
+};
+
/*
* xfs_file_dio_aio_write - handle direct IO writes
*
@@ -479,8 +487,7 @@ xfs_file_dio_aio_write(
int unaligned_io = 0;
int iolock;
size_t count = iov_iter_count(from);
- struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
- mp->m_rtdev_targp : mp->m_ddev_targp;
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
/* DIO must be aligned to device logical sector size */
if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
@@ -540,15 +547,13 @@ xfs_file_dio_aio_write(
}
trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
- ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
-
/*
- * If unaligned, this is the only IO in-flight. If it has not yet
- * completed, wait on it before we release the iolock to prevent
- * subsequent overlapping IO.
+ * If unaligned, this is the only IO in-flight. Wait on it before we
+ * release the iolock to prevent subsequent overlapping IO.
*/
- if (ret == -EIOCBQUEUED && unaligned_io)
- inode_dio_wait(inode);
+ ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
+ &xfs_dio_write_ops,
+ is_sync_kiocb(iocb) || unaligned_io);
out:
xfs_iunlock(ip, iolock);
@@ -587,7 +592,7 @@ xfs_file_dax_write(
count = iov_iter_count(from);
trace_xfs_file_dax_write(ip, count, pos);
- ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
+ ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
i_size_write(inode, iocb->ki_pos);
error = xfs_setfilesize(ip, pos, ret);
@@ -634,7 +639,8 @@ write_retry:
current->backing_dev_info = inode_to_bdi(inode);
trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
- ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
+ ret = iomap_file_buffered_write(iocb, from,
+ &xfs_buffered_write_iomap_ops);
if (likely(ret >= 0))
iocb->ki_pos += ret;
@@ -811,6 +817,36 @@ xfs_file_fallocate(
if (error)
goto out_unlock;
+ /*
+ * Must wait for all AIO to complete before we continue as AIO can
+ * change the file size on completion without holding any locks we
+ * currently hold. We must do this first because AIO can update both
+ * the on disk and in memory inode sizes, and the operations that follow
+ * require the in-memory size to be fully up-to-date.
+ */
+ inode_dio_wait(inode);
+
+ /*
+ * Now AIO and DIO has drained we flush and (if necessary) invalidate
+ * the cached range over the first operation we are about to run.
+ *
+ * We care about zero and collapse here because they both run a hole
+ * punch over the range first. Because that can zero data, and the range
+ * of invalidation for the shift operations is much larger, we still do
+ * the required flush for collapse in xfs_prepare_shift().
+ *
+ * Insert has the same range requirements as collapse, and we extend the
+ * file first which can zero data. Hence insert has the same
+ * flush/invalidate requirements as collapse and so they are both
+ * handled at the right time by xfs_prepare_shift().
+ */
+ if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
+ FALLOC_FL_COLLAPSE_RANGE)) {
+ error = xfs_flush_unmap_range(ip, offset, len);
+ if (error)
+ goto out_unlock;
+ }
+
if (mode & FALLOC_FL_PUNCH_HOLE) {
error = xfs_free_file_space(ip, offset, len);
if (error)
@@ -874,16 +910,30 @@ xfs_file_fallocate(
}
if (mode & FALLOC_FL_ZERO_RANGE) {
- error = xfs_zero_file_space(ip, offset, len);
+ /*
+ * Punch a hole and prealloc the range. We use a hole
+ * punch rather than unwritten extent conversion for two
+ * reasons:
+ *
+ * 1.) Hole punch handles partial block zeroing for us.
+ * 2.) If prealloc returns ENOSPC, the file range is
+ * still zero-valued by virtue of the hole punch.
+ */
+ unsigned int blksize = i_blocksize(inode);
+
+ trace_xfs_zero_file_space(ip);
+
+ error = xfs_free_file_space(ip, offset, len);
+ if (error)
+ goto out_unlock;
+
+ len = round_up(offset + len, blksize) -
+ round_down(offset, blksize);
+ offset = round_down(offset, blksize);
} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
error = xfs_reflink_unshare(ip, offset, len);
if (error)
goto out_unlock;
-
- if (!xfs_is_always_cow_inode(ip)) {
- error = xfs_alloc_file_space(ip, offset, len,
- XFS_BMAPI_PREALLOC);
- }
} else {
/*
* If always_cow mode we can't use preallocations and
@@ -893,12 +943,14 @@ xfs_file_fallocate(
error = -EOPNOTSUPP;
goto out_unlock;
}
+ }
+ if (!xfs_is_always_cow_inode(ip)) {
error = xfs_alloc_file_space(ip, offset, len,
XFS_BMAPI_PREALLOC);
+ if (error)
+ goto out_unlock;
}
- if (error)
- goto out_unlock;
}
if (file->f_flags & O_DSYNC)
@@ -933,6 +985,30 @@ out_unlock:
return error;
}
+STATIC int
+xfs_file_fadvise(
+ struct file *file,
+ loff_t start,
+ loff_t end,
+ int advice)
+{
+ struct xfs_inode *ip = XFS_I(file_inode(file));
+ int ret;
+ int lockflags = 0;
+
+ /*
+ * Operations creating pages in page cache need protection from hole
+ * punching and similar ops
+ */
+ if (advice == POSIX_FADV_WILLNEED) {
+ lockflags = XFS_IOLOCK_SHARED;
+ xfs_ilock(ip, lockflags);
+ }
+ ret = generic_fadvise(file, start, end, advice);
+ if (lockflags)
+ xfs_iunlock(ip, lockflags);
+ return ret;
+}
STATIC loff_t
xfs_file_remap_range(
@@ -1028,7 +1104,7 @@ xfs_dir_open(
*/
mode = xfs_ilock_data_map_shared(ip);
if (ip->i_d.di_nextents > 0)
- error = xfs_dir3_data_readahead(ip, 0, -1);
+ error = xfs_dir3_data_readahead(ip, 0, 0);
xfs_iunlock(ip, mode);
return error;
}
@@ -1125,12 +1201,16 @@ __xfs_filemap_fault(
if (IS_DAX(inode)) {
pfn_t pfn;
- ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
+ ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
+ (write_fault && !vmf->cow_page) ?
+ &xfs_direct_write_iomap_ops :
+ &xfs_read_iomap_ops);
if (ret & VM_FAULT_NEEDDSYNC)
ret = dax_finish_sync_fault(vmf, pe_size, pfn);
} else {
if (write_fault)
- ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
+ ret = iomap_page_mkwrite(vmf,
+ &xfs_buffered_write_iomap_ops);
else
ret = filemap_fault(vmf);
}
@@ -1194,22 +1274,22 @@ static const struct vm_operations_struct xfs_file_vm_ops = {
STATIC int
xfs_file_mmap(
- struct file *filp,
- struct vm_area_struct *vma)
+ struct file *file,
+ struct vm_area_struct *vma)
{
- struct dax_device *dax_dev;
+ struct inode *inode = file_inode(file);
+ struct xfs_buftarg *target = xfs_inode_buftarg(XFS_I(inode));
- dax_dev = xfs_find_daxdev_for_inode(file_inode(filp));
/*
* We don't support synchronous mappings for non-DAX files and
* for DAX files if underneath dax_device is not synchronous.
*/
- if (!daxdev_mapping_supported(vma, dax_dev))
+ if (!daxdev_mapping_supported(vma, target->bt_daxdev))
return -EOPNOTSUPP;
- file_accessed(filp);
+ file_accessed(file);
vma->vm_ops = &xfs_file_vm_ops;
- if (IS_DAX(file_inode(filp)))
+ if (IS_DAX(inode))
vma->vm_flags |= VM_HUGEPAGE;
return 0;
}
@@ -1232,6 +1312,7 @@ const struct file_operations xfs_file_operations = {
.fsync = xfs_file_fsync,
.get_unmapped_area = thp_get_unmapped_area,
.fallocate = xfs_file_fallocate,
+ .fadvise = xfs_file_fadvise,
.remap_file_range = xfs_file_remap_range,
};
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 574a7a8b4736..5f12b5d8527a 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -18,6 +18,7 @@
#include "xfs_trace.h"
#include "xfs_ag_resv.h"
#include "xfs_trans.h"
+#include "xfs_filestream.h"
struct xfs_fstrm_item {
struct xfs_mru_cache_elem mru;
@@ -374,7 +375,7 @@ xfs_filestream_new_ag(
startag = (item->ag + 1) % mp->m_sb.sb_agcount;
}
- if (xfs_alloc_is_userdata(ap->datatype))
+ if (ap->datatype & XFS_ALLOC_USERDATA)
flags |= XFS_PICK_USERDATA;
if (ap->tp->t_flags & XFS_TRANS_LOWMODE)
flags |= XFS_PICK_LOWSPACE;
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 5a8f9641562a..918456ca29e1 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -146,6 +146,7 @@ xfs_fsmap_owner_from_rmap(
dest->fmr_owner = XFS_FMR_OWN_FREE;
break;
default:
+ ASSERT(0);
return -EFSCORRUPTED;
}
return 0;
@@ -250,7 +251,7 @@ xfs_getfsmap_helper(
rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
if (info->next_daddr < rec_daddr)
info->next_daddr = rec_daddr;
- return XFS_BTREE_QUERY_RANGE_CONTINUE;
+ return 0;
}
/* Are we just counting mappings? */
@@ -259,14 +260,14 @@ xfs_getfsmap_helper(
info->head->fmh_entries++;
if (info->last)
- return XFS_BTREE_QUERY_RANGE_CONTINUE;
+ return 0;
info->head->fmh_entries++;
rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
if (info->next_daddr < rec_daddr)
info->next_daddr = rec_daddr;
- return XFS_BTREE_QUERY_RANGE_CONTINUE;
+ return 0;
}
/*
@@ -276,7 +277,7 @@ xfs_getfsmap_helper(
*/
if (rec_daddr > info->next_daddr) {
if (info->head->fmh_entries >= info->head->fmh_count)
- return XFS_BTREE_QUERY_RANGE_ABORT;
+ return -ECANCELED;
fmr.fmr_device = info->dev;
fmr.fmr_physical = info->next_daddr;
@@ -295,7 +296,7 @@ xfs_getfsmap_helper(
/* Fill out the extent we found */
if (info->head->fmh_entries >= info->head->fmh_count)
- return XFS_BTREE_QUERY_RANGE_ABORT;
+ return -ECANCELED;
trace_xfs_fsmap_mapping(mp, info->dev, info->agno, rec);
@@ -328,7 +329,7 @@ out:
rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
if (info->next_daddr < rec_daddr)
info->next_daddr = rec_daddr;
- return XFS_BTREE_QUERY_RANGE_CONTINUE;
+ return 0;
}
/* Transform a rmapbt irec into a fsmap */
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 0b0fd10a36d4..8dc2e5414276 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -40,11 +40,11 @@ xfs_inode_alloc(
* KM_MAYFAIL and return NULL here on ENOMEM. Set the
* code up to do this anyway.
*/
- ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
+ ip = kmem_zone_alloc(xfs_inode_zone, 0);
if (!ip)
return NULL;
if (inode_init_always(mp->m_super, VFS_I(ip))) {
- kmem_zone_free(xfs_inode_zone, ip);
+ kmem_cache_free(xfs_inode_zone, ip);
return NULL;
}
@@ -104,7 +104,7 @@ xfs_inode_free_callback(
ip->i_itemp = NULL;
}
- kmem_zone_free(xfs_inode_zone, ip);
+ kmem_cache_free(xfs_inode_zone, ip);
}
static void
@@ -1419,7 +1419,7 @@ xfs_inode_match_id(
return 0;
if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
- xfs_get_projid(ip) != eofb->eof_prid)
+ ip->i_d.di_projid != eofb->eof_prid)
return 0;
return 1;
@@ -1443,7 +1443,7 @@ xfs_inode_match_id_union(
return 1;
if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
- xfs_get_projid(ip) == eofb->eof_prid)
+ ip->i_d.di_projid == eofb->eof_prid)
return 1;
return 0;
diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c
index d99a0a3e5f40..490fee22b878 100644
--- a/fs/xfs/xfs_icreate_item.c
+++ b/fs/xfs/xfs_icreate_item.c
@@ -55,7 +55,7 @@ STATIC void
xfs_icreate_item_release(
struct xfs_log_item *lip)
{
- kmem_zone_free(xfs_icreate_zone, ICR_ITEM(lip));
+ kmem_cache_free(xfs_icreate_zone, ICR_ITEM(lip));
}
static const struct xfs_item_ops xfs_icreate_item_ops = {
@@ -89,7 +89,7 @@ xfs_icreate_log(
{
struct xfs_icreate_item *icp;
- icp = kmem_zone_zalloc(xfs_icreate_zone, KM_SLEEP);
+ icp = kmem_zone_zalloc(xfs_icreate_zone, 0);
xfs_log_item_init(tp->t_mountp, &icp->ic_item, XFS_LI_ICREATE,
&xfs_icreate_item_ops);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 6467d5e1df2d..401da197f012 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -55,6 +55,12 @@ xfs_extlen_t
xfs_get_extsz_hint(
struct xfs_inode *ip)
{
+ /*
+ * No point in aligning allocations if we need to COW to actually
+ * write to them.
+ */
+ if (xfs_is_always_cow_inode(ip))
+ return 0;
if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
return ip->i_d.di_extsize;
if (XFS_IS_REALTIME_INODE(ip))
@@ -809,7 +815,7 @@ xfs_ialloc(
ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
inode->i_rdev = rdev;
- xfs_set_projid(ip, prid);
+ ip->i_d.di_projid = prid;
if (pip && XFS_INHERIT_GID(pip)) {
ip->i_d.di_gid = pip->i_d.di_gid;
@@ -845,8 +851,7 @@ xfs_ialloc(
inode_set_iversion(inode, 1);
ip->i_d.di_flags2 = 0;
ip->i_d.di_cowextsize = 0;
- ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
- ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
+ ip->i_d.di_crtime = tv;
}
@@ -1418,7 +1423,7 @@ xfs_link(
* the tree quota mechanism could be circumvented.
*/
if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
- (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
+ tdp->i_d.di_projid != sip->i_d.di_projid)) {
error = -EXDEV;
goto error_return;
}
@@ -2018,7 +2023,7 @@ xfs_iunlink_add_backref(
if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
return 0;
- iu = kmem_zalloc(sizeof(*iu), KM_SLEEP | KM_NOFS);
+ iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
iu->iu_agino = prev_agino;
iu->iu_next_unlinked = this_agino;
@@ -2130,8 +2135,10 @@ xfs_iunlink_update_bucket(
* passed in because either we're adding or removing ourselves from the
* head of the list.
*/
- if (old_value == new_agino)
+ if (old_value == new_agino) {
+ xfs_buf_corruption_error(agibp);
return -EFSCORRUPTED;
+ }
agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
offset = offsetof(struct xfs_agi, agi_unlinked) +
@@ -2194,6 +2201,8 @@ xfs_iunlink_update_inode(
/* Make sure the old pointer isn't garbage. */
old_value = be32_to_cpu(dip->di_next_unlinked);
if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
+ sizeof(*dip), __this_address);
error = -EFSCORRUPTED;
goto out;
}
@@ -2205,8 +2214,11 @@ xfs_iunlink_update_inode(
*/
*old_next_agino = old_value;
if (old_value == next_agino) {
- if (next_agino != NULLAGINO)
+ if (next_agino != NULLAGINO) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
+ dip, sizeof(*dip), __this_address);
error = -EFSCORRUPTED;
+ }
goto out;
}
@@ -2257,8 +2269,10 @@ xfs_iunlink(
*/
next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
if (next_agino == agino ||
- !xfs_verify_agino_or_null(mp, agno, next_agino))
+ !xfs_verify_agino_or_null(mp, agno, next_agino)) {
+ xfs_buf_corruption_error(agibp);
return -EFSCORRUPTED;
+ }
if (next_agino != NULLAGINO) {
struct xfs_perag *pag;
@@ -3196,6 +3210,7 @@ xfs_rename(
struct xfs_trans *tp;
struct xfs_inode *wip = NULL; /* whiteout inode */
struct xfs_inode *inodes[__XFS_SORT_INODES];
+ struct xfs_buf *agibp;
int num_inodes = __XFS_SORT_INODES;
bool new_parent = (src_dp != target_dp);
bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
@@ -3270,7 +3285,7 @@ xfs_rename(
* tree quota mechanism would be circumvented.
*/
if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
- (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
+ target_dp->i_d.di_projid != src_ip->i_d.di_projid)) {
error = -EXDEV;
goto out_trans_cancel;
}
@@ -3282,7 +3297,8 @@ xfs_rename(
spaceres);
/*
- * Set up the target.
+ * Check for expected errors before we dirty the transaction
+ * so we can return an error without a transaction abort.
*/
if (target_ip == NULL) {
/*
@@ -3294,6 +3310,45 @@ xfs_rename(
if (error)
goto out_trans_cancel;
}
+ } else {
+ /*
+ * If target exists and it's a directory, check that whether
+ * it can be destroyed.
+ */
+ if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
+ (!xfs_dir_isempty(target_ip) ||
+ (VFS_I(target_ip)->i_nlink > 2))) {
+ error = -EEXIST;
+ goto out_trans_cancel;
+ }
+ }
+
+ /*
+ * Directory entry creation below may acquire the AGF. Remove
+ * the whiteout from the unlinked list first to preserve correct
+ * AGI/AGF locking order. This dirties the transaction so failures
+ * after this point will abort and log recovery will clean up the
+ * mess.
+ *
+ * For whiteouts, we need to bump the link count on the whiteout
+ * inode. After this point, we have a real link, clear the tmpfile
+ * state flag from the inode so it doesn't accidentally get misused
+ * in future.
+ */
+ if (wip) {
+ ASSERT(VFS_I(wip)->i_nlink == 0);
+ error = xfs_iunlink_remove(tp, wip);
+ if (error)
+ goto out_trans_cancel;
+
+ xfs_bumplink(tp, wip);
+ VFS_I(wip)->i_state &= ~I_LINKABLE;
+ }
+
+ /*
+ * Set up the target.
+ */
+ if (target_ip == NULL) {
/*
* If target does not exist and the rename crosses
* directories, adjust the target directory link count
@@ -3312,22 +3367,6 @@ xfs_rename(
}
} else { /* target_ip != NULL */
/*
- * If target exists and it's a directory, check that both
- * target and source are directories and that target can be
- * destroyed, or that neither is a directory.
- */
- if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
- /*
- * Make sure target dir is empty.
- */
- if (!(xfs_dir_isempty(target_ip)) ||
- (VFS_I(target_ip)->i_nlink > 2)) {
- error = -EEXIST;
- goto out_trans_cancel;
- }
- }
-
- /*
* Link the source inode under the target name.
* If the source inode is a directory and we are moving
* it across directories, its ".." entry will be
@@ -3336,6 +3375,22 @@ xfs_rename(
* In case there is already an entry with the same
* name at the destination directory, remove it first.
*/
+
+ /*
+ * Check whether the replace operation will need to allocate
+ * blocks. This happens when the shortform directory lacks
+ * space and we have to convert it to a block format directory.
+ * When more blocks are necessary, we must lock the AGI first
+ * to preserve locking order (AGI -> AGF).
+ */
+ if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) {
+ error = xfs_read_agi(mp, tp,
+ XFS_INO_TO_AGNO(mp, target_ip->i_ino),
+ &agibp);
+ if (error)
+ goto out_trans_cancel;
+ }
+
error = xfs_dir_replace(tp, target_dp, target_name,
src_ip->i_ino, spaceres);
if (error)
@@ -3417,30 +3472,6 @@ xfs_rename(
if (error)
goto out_trans_cancel;
- /*
- * For whiteouts, we need to bump the link count on the whiteout inode.
- * This means that failures all the way up to this point leave the inode
- * on the unlinked list and so cleanup is a simple matter of dropping
- * the remaining reference to it. If we fail here after bumping the link
- * count, we're shutting down the filesystem so we'll never see the
- * intermediate state on disk.
- */
- if (wip) {
- ASSERT(VFS_I(wip)->i_nlink == 0);
- xfs_bumplink(tp, wip);
- error = xfs_iunlink_remove(tp, wip);
- if (error)
- goto out_trans_cancel;
- xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
-
- /*
- * Now we have a real link, clear the "I'm a tmpfile" state
- * flag from the inode so it doesn't accidentally get misused in
- * future.
- */
- VFS_I(wip)->i_state &= ~I_LINKABLE;
- }
-
xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
if (new_parent)
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 558173f95a03..492e53992fa9 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -37,9 +37,6 @@ typedef struct xfs_inode {
struct xfs_ifork *i_cowfp; /* copy on write extents */
struct xfs_ifork i_df; /* data fork */
- /* operations vectors */
- const struct xfs_dir_ops *d_ops; /* directory ops vector */
-
/* Transaction and locking information. */
struct xfs_inode_log_item *i_itemp; /* logging information */
mrlock_t i_lock; /* inode lock */
@@ -177,30 +174,11 @@ xfs_iflags_test_and_set(xfs_inode_t *ip, unsigned short flags)
return ret;
}
-/*
- * Project quota id helpers (previously projid was 16bit only
- * and using two 16bit values to hold new 32bit projid was chosen
- * to retain compatibility with "old" filesystems).
- */
-static inline prid_t
-xfs_get_projid(struct xfs_inode *ip)
-{
- return (prid_t)ip->i_d.di_projid_hi << 16 | ip->i_d.di_projid_lo;
-}
-
-static inline void
-xfs_set_projid(struct xfs_inode *ip,
- prid_t projid)
-{
- ip->i_d.di_projid_hi = (uint16_t) (projid >> 16);
- ip->i_d.di_projid_lo = (uint16_t) (projid & 0xffff);
-}
-
static inline prid_t
xfs_get_initial_prid(struct xfs_inode *dp)
{
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
- return xfs_get_projid(dp);
+ return dp->i_d.di_projid;
return XFS_PROJID_DEFAULT;
}
@@ -220,6 +198,13 @@ static inline bool xfs_inode_has_cow_data(struct xfs_inode *ip)
}
/*
+ * Return the buftarg used for data allocations on a given inode.
+ */
+#define xfs_inode_buftarg(ip) \
+ (XFS_IS_REALTIME_INODE(ip) ? \
+ (ip)->i_mount->m_rtdev_targp : (ip)->i_mount->m_ddev_targp)
+
+/*
* In-core inode flags.
*/
#define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index c9a502eed204..8bd5d0de6321 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -17,6 +17,7 @@
#include "xfs_trans_priv.h"
#include "xfs_buf_item.h"
#include "xfs_log.h"
+#include "xfs_error.h"
#include <linux/iversion.h>
@@ -309,8 +310,8 @@ xfs_inode_to_log_dinode(
to->di_format = from->di_format;
to->di_uid = from->di_uid;
to->di_gid = from->di_gid;
- to->di_projid_lo = from->di_projid_lo;
- to->di_projid_hi = from->di_projid_hi;
+ to->di_projid_lo = from->di_projid & 0xffff;
+ to->di_projid_hi = from->di_projid >> 16;
memset(to->di_pad, 0, sizeof(to->di_pad));
memset(to->di_pad3, 0, sizeof(to->di_pad3));
@@ -340,8 +341,8 @@ xfs_inode_to_log_dinode(
if (from->di_version == 3) {
to->di_changecount = inode_peek_iversion(inode);
- to->di_crtime.t_sec = from->di_crtime.t_sec;
- to->di_crtime.t_nsec = from->di_crtime.t_nsec;
+ to->di_crtime.t_sec = from->di_crtime.tv_sec;
+ to->di_crtime.t_nsec = from->di_crtime.tv_nsec;
to->di_flags2 = from->di_flags2;
to->di_cowextsize = from->di_cowextsize;
to->di_ino = ip->i_ino;
@@ -651,7 +652,7 @@ xfs_inode_item_init(
struct xfs_inode_log_item *iip;
ASSERT(ip->i_itemp == NULL);
- iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP);
+ iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, 0);
iip->ili_inode = ip;
xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE,
@@ -666,7 +667,7 @@ xfs_inode_item_destroy(
xfs_inode_t *ip)
{
kmem_free(ip->i_itemp->ili_item.li_lv_shadow);
- kmem_zone_free(xfs_ili_zone, ip->i_itemp);
+ kmem_cache_free(xfs_ili_zone, ip->i_itemp);
}
@@ -828,8 +829,10 @@ xfs_inode_item_format_convert(
{
struct xfs_inode_log_format_32 *in_f32 = buf->i_addr;
- if (buf->i_len != sizeof(*in_f32))
+ if (buf->i_len != sizeof(*in_f32)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
+ }
in_f->ilf_type = in_f32->ilf_type;
in_f->ilf_size = in_f32->ilf_size;
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 6f7848cd5527..7b35d62ede9f 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -33,6 +33,8 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_health.h"
+#include "xfs_reflink.h"
+#include "xfs_ioctl.h"
#include <linux/mount.h>
#include <linux/namei.h>
@@ -67,7 +69,7 @@ xfs_find_handle(
return -EBADF;
inode = file_inode(f.file);
} else {
- error = user_lpath((const char __user *)hreq->path, &path);
+ error = user_path_at(AT_FDCWD, hreq->path, 0, &path);
if (error)
return error;
inode = d_inode(path.dentry);
@@ -290,82 +292,6 @@ xfs_readlink_by_handle(
return error;
}
-int
-xfs_set_dmattrs(
- xfs_inode_t *ip,
- uint evmask,
- uint16_t state)
-{
- xfs_mount_t *mp = ip->i_mount;
- xfs_trans_t *tp;
- int error;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
- if (error)
- return error;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
- ip->i_d.di_dmevmask = evmask;
- ip->i_d.di_dmstate = state;
-
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- error = xfs_trans_commit(tp);
-
- return error;
-}
-
-STATIC int
-xfs_fssetdm_by_handle(
- struct file *parfilp,
- void __user *arg)
-{
- int error;
- struct fsdmidata fsd;
- xfs_fsop_setdm_handlereq_t dmhreq;
- struct dentry *dentry;
-
- if (!capable(CAP_MKNOD))
- return -EPERM;
- if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
- return -EFAULT;
-
- error = mnt_want_write_file(parfilp);
- if (error)
- return error;
-
- dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq);
- if (IS_ERR(dentry)) {
- mnt_drop_write_file(parfilp);
- return PTR_ERR(dentry);
- }
-
- if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) {
- error = -EPERM;
- goto out;
- }
-
- if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
- error = -EFAULT;
- goto out;
- }
-
- error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask,
- fsd.fsd_dmstate);
-
- out:
- mnt_drop_write_file(parfilp);
- dput(dentry);
- return error;
-}
-
STATIC int
xfs_attrlist_by_handle(
struct file *parfilp,
@@ -396,7 +322,7 @@ xfs_attrlist_by_handle(
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP);
+ kbuf = kmem_zalloc_large(al_hreq.buflen, 0);
if (!kbuf)
goto out_dput;
@@ -434,11 +360,11 @@ xfs_attrmulti_attr_get(
if (*len > XFS_XATTR_SIZE_MAX)
return -EINVAL;
- kbuf = kmem_zalloc_large(*len, KM_SLEEP);
+ kbuf = kmem_zalloc_large(*len, 0);
if (!kbuf)
return -ENOMEM;
- error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags);
+ error = xfs_attr_get(XFS_I(inode), name, &kbuf, (int *)len, flags);
if (error)
goto out_kfree;
@@ -588,13 +514,12 @@ xfs_attrmulti_by_handle(
int
xfs_ioc_space(
struct file *filp,
- unsigned int cmd,
xfs_flock64_t *bf)
{
struct inode *inode = file_inode(filp);
struct xfs_inode *ip = XFS_I(inode);
struct iattr iattr;
- enum xfs_prealloc_flags flags = 0;
+ enum xfs_prealloc_flags flags = XFS_PREALLOC_CLEAR;
uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
int error;
@@ -607,6 +532,9 @@ xfs_ioc_space(
if (!S_ISREG(inode->i_mode))
return -EINVAL;
+ if (xfs_is_always_cow_inode(ip))
+ return -EOPNOTSUPP;
+
if (filp->f_flags & O_DSYNC)
flags |= XFS_PREALLOC_SYNC;
if (filp->f_mode & FMODE_NOCMTIME)
@@ -620,6 +548,7 @@ xfs_ioc_space(
error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
if (error)
goto out_unlock;
+ inode_dio_wait(inode);
switch (bf->l_whence) {
case 0: /*SEEK_SET*/
@@ -635,73 +564,21 @@ xfs_ioc_space(
goto out_unlock;
}
- /*
- * length of <= 0 for resv/unresv/zero is invalid. length for
- * alloc/free is ignored completely and we have no idea what userspace
- * might have set it to, so set it to zero to allow range
- * checks to pass.
- */
- switch (cmd) {
- case XFS_IOC_ZERO_RANGE:
- case XFS_IOC_RESVSP:
- case XFS_IOC_RESVSP64:
- case XFS_IOC_UNRESVSP:
- case XFS_IOC_UNRESVSP64:
- if (bf->l_len <= 0) {
- error = -EINVAL;
- goto out_unlock;
- }
- break;
- default:
- bf->l_len = 0;
- break;
- }
-
- if (bf->l_start < 0 ||
- bf->l_start > inode->i_sb->s_maxbytes ||
- bf->l_start + bf->l_len < 0 ||
- bf->l_start + bf->l_len >= inode->i_sb->s_maxbytes) {
+ if (bf->l_start < 0 || bf->l_start > inode->i_sb->s_maxbytes) {
error = -EINVAL;
goto out_unlock;
}
- switch (cmd) {
- case XFS_IOC_ZERO_RANGE:
- flags |= XFS_PREALLOC_SET;
- error = xfs_zero_file_space(ip, bf->l_start, bf->l_len);
- break;
- case XFS_IOC_RESVSP:
- case XFS_IOC_RESVSP64:
- flags |= XFS_PREALLOC_SET;
- error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len,
- XFS_BMAPI_PREALLOC);
- break;
- case XFS_IOC_UNRESVSP:
- case XFS_IOC_UNRESVSP64:
- error = xfs_free_file_space(ip, bf->l_start, bf->l_len);
- break;
- case XFS_IOC_ALLOCSP:
- case XFS_IOC_ALLOCSP64:
- case XFS_IOC_FREESP:
- case XFS_IOC_FREESP64:
- flags |= XFS_PREALLOC_CLEAR;
- if (bf->l_start > XFS_ISIZE(ip)) {
- error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
- bf->l_start - XFS_ISIZE(ip), 0);
- if (error)
- goto out_unlock;
- }
-
- iattr.ia_valid = ATTR_SIZE;
- iattr.ia_size = bf->l_start;
-
- error = xfs_vn_setattr_size(file_dentry(filp), &iattr);
- break;
- default:
- ASSERT(0);
- error = -EINVAL;
+ if (bf->l_start > XFS_ISIZE(ip)) {
+ error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
+ bf->l_start - XFS_ISIZE(ip), 0);
+ if (error)
+ goto out_unlock;
}
+ iattr.ia_valid = ATTR_SIZE;
+ iattr.ia_size = bf->l_start;
+ error = xfs_vn_setattr_size(file_dentry(filp), &iattr);
if (error)
goto out_unlock;
@@ -831,7 +708,7 @@ xfs_bulkstat_fmt(
/*
* Check the incoming bulk request @hdr from userspace and initialize the
* internal @breq bulk request appropriately. Returns 0 if the bulk request
- * should proceed; XFS_ITER_ABORT if there's nothing to do; or the usual
+ * should proceed; -ECANCELED if there's nothing to do; or the usual
* negative error code.
*/
static int
@@ -889,13 +766,13 @@ xfs_bulk_ireq_setup(
/* Asking for an inode past the end of the AG? We're done! */
if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno)
- return XFS_ITER_ABORT;
+ return -ECANCELED;
} else if (hdr->agno)
return -EINVAL;
/* Asking for an inode past the end of the FS? We're done! */
if (XFS_INO_TO_AGNO(mp, breq->startino) >= mp->m_sb.sb_agcount)
- return XFS_ITER_ABORT;
+ return -ECANCELED;
return 0;
}
@@ -936,7 +813,7 @@ xfs_ioc_bulkstat(
return -EFAULT;
error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->bulkstat);
- if (error == XFS_ITER_ABORT)
+ if (error == -ECANCELED)
goto out_teardown;
if (error < 0)
return error;
@@ -986,7 +863,7 @@ xfs_ioc_inumbers(
return -EFAULT;
error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->inumbers);
- if (error == XFS_ITER_ABORT)
+ if (error == -ECANCELED)
goto out_teardown;
if (error < 0)
return error;
@@ -1038,6 +915,10 @@ xfs_ioc_ag_geometry(
if (copy_from_user(&ageo, arg, sizeof(ageo)))
return -EFAULT;
+ if (ageo.ag_flags)
+ return -EINVAL;
+ if (memchr_inv(&ageo.ag_reserved, 0, sizeof(ageo.ag_reserved)))
+ return -EINVAL;
error = xfs_ag_get_geometry(mp, ageo.ag_number, &ageo);
if (error)
@@ -1112,7 +993,7 @@ xfs_fill_fsxattr(
fa->fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
fa->fsx_cowextsize = ip->i_d.di_cowextsize <<
ip->i_mount->m_sb.sb_blocklog;
- fa->fsx_projid = xfs_get_projid(ip);
+ fa->fsx_projid = ip->i_d.di_projid;
if (attr) {
if (ip->i_afp) {
@@ -1307,11 +1188,9 @@ xfs_ioctl_setattr_dax_invalidate(
* have to check the device for dax support or flush pagecache.
*/
if (fa->fsx_xflags & FS_XFLAG_DAX) {
- if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
- return -EINVAL;
- if (S_ISREG(inode->i_mode) &&
- !bdev_dax_supported(xfs_find_bdev_for_inode(VFS_I(ip)),
- sb->s_blocksize))
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
+
+ if (!bdev_dax_supported(target->bt_bdev, sb->s_blocksize))
return -EINVAL;
}
@@ -1566,7 +1445,7 @@ xfs_ioctl_setattr(
}
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) &&
- xfs_get_projid(ip) != fa->fsx_projid) {
+ ip->i_d.di_projid != fa->fsx_projid) {
code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp,
capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0);
if (code) /* out of quota */
@@ -1603,13 +1482,13 @@ xfs_ioctl_setattr(
VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
/* Change the ownerships and register project quota modifications */
- if (xfs_get_projid(ip) != fa->fsx_projid) {
+ if (ip->i_d.di_projid != fa->fsx_projid) {
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
olddquot = xfs_qm_vop_chown(tp, ip,
&ip->i_pdquot, pdqp);
}
ASSERT(ip->i_d.di_version > 1);
- xfs_set_projid(ip, fa->fsx_projid);
+ ip->i_d.di_projid = fa->fsx_projid;
}
/*
@@ -1881,7 +1760,7 @@ xfs_ioc_getfsmap(
info.mp = ip->i_mount;
info.data = arg;
error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info);
- if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
+ if (error == -ECANCELED) {
error = 0;
aborted = true;
} else if (error)
@@ -2119,24 +1998,17 @@ xfs_file_ioctl(
return xfs_ioc_setlabel(filp, mp, arg);
case XFS_IOC_ALLOCSP:
case XFS_IOC_FREESP:
- case XFS_IOC_RESVSP:
- case XFS_IOC_UNRESVSP:
case XFS_IOC_ALLOCSP64:
- case XFS_IOC_FREESP64:
- case XFS_IOC_RESVSP64:
- case XFS_IOC_UNRESVSP64:
- case XFS_IOC_ZERO_RANGE: {
+ case XFS_IOC_FREESP64: {
xfs_flock64_t bf;
if (copy_from_user(&bf, arg, sizeof(bf)))
return -EFAULT;
- return xfs_ioc_space(filp, cmd, &bf);
+ return xfs_ioc_space(filp, &bf);
}
case XFS_IOC_DIOINFO: {
- struct dioattr da;
- xfs_buftarg_t *target =
- XFS_IS_REALTIME_INODE(ip) ?
- mp->m_rtdev_targp : mp->m_ddev_targp;
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
+ struct dioattr da;
da.d_mem = da.d_miniosz = target->bt_logical_sectorsize;
da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
@@ -2180,22 +2052,6 @@ xfs_file_ioctl(
case XFS_IOC_SETXFLAGS:
return xfs_ioc_setxflags(ip, filp, arg);
- case XFS_IOC_FSSETDM: {
- struct fsdmidata dmi;
-
- if (copy_from_user(&dmi, arg, sizeof(dmi)))
- return -EFAULT;
-
- error = mnt_want_write_file(filp);
- if (error)
- return error;
-
- error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask,
- dmi.fsd_dmstate);
- mnt_drop_write_file(filp);
- return error;
- }
-
case XFS_IOC_GETBMAP:
case XFS_IOC_GETBMAPA:
case XFS_IOC_GETBMAPX:
@@ -2223,8 +2079,6 @@ xfs_file_ioctl(
return -EFAULT;
return xfs_open_by_handle(filp, &hreq);
}
- case XFS_IOC_FSSETDM_BY_HANDLE:
- return xfs_fssetdm_by_handle(filp, arg);
case XFS_IOC_READLINK_BY_HANDLE: {
xfs_fsop_handlereq_t hreq;
diff --git a/fs/xfs/xfs_ioctl.h b/fs/xfs/xfs_ioctl.h
index 654c0bb1bcf8..420bd95dc326 100644
--- a/fs/xfs/xfs_ioctl.h
+++ b/fs/xfs/xfs_ioctl.h
@@ -9,7 +9,6 @@
extern int
xfs_ioc_space(
struct file *filp,
- unsigned int cmd,
xfs_flock64_t *bf);
int
@@ -71,12 +70,6 @@ xfs_file_compat_ioctl(
unsigned int cmd,
unsigned long arg);
-extern int
-xfs_set_dmattrs(
- struct xfs_inode *ip,
- uint evmask,
- uint16_t state);
-
struct xfs_ibulk;
struct xfs_bstat;
struct xfs_inogrp;
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 7fcf7569743f..c4c4f09113d3 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -381,7 +381,7 @@ xfs_compat_attrlist_by_handle(
return PTR_ERR(dentry);
error = -ENOMEM;
- kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP);
+ kbuf = kmem_zalloc_large(al_hreq.buflen, 0);
if (!kbuf)
goto out_dput;
@@ -500,44 +500,6 @@ xfs_compat_attrmulti_by_handle(
return error;
}
-STATIC int
-xfs_compat_fssetdm_by_handle(
- struct file *parfilp,
- void __user *arg)
-{
- int error;
- struct fsdmidata fsd;
- compat_xfs_fsop_setdm_handlereq_t dmhreq;
- struct dentry *dentry;
-
- if (!capable(CAP_MKNOD))
- return -EPERM;
- if (copy_from_user(&dmhreq, arg,
- sizeof(compat_xfs_fsop_setdm_handlereq_t)))
- return -EFAULT;
-
- dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
-
- if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) {
- error = -EPERM;
- goto out;
- }
-
- if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) {
- error = -EFAULT;
- goto out;
- }
-
- error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask,
- fsd.fsd_dmstate);
-
-out:
- dput(dentry);
- return error;
-}
-
long
xfs_file_compat_ioctl(
struct file *filp,
@@ -547,79 +509,23 @@ xfs_file_compat_ioctl(
struct inode *inode = file_inode(filp);
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- void __user *arg = (void __user *)p;
+ void __user *arg = compat_ptr(p);
int error;
trace_xfs_file_compat_ioctl(ip);
switch (cmd) {
- /* No size or alignment issues on any arch */
- case XFS_IOC_DIOINFO:
- case XFS_IOC_FSGEOMETRY_V4:
- case XFS_IOC_FSGEOMETRY:
- case XFS_IOC_AG_GEOMETRY:
- case XFS_IOC_FSGETXATTR:
- case XFS_IOC_FSSETXATTR:
- case XFS_IOC_FSGETXATTRA:
- case XFS_IOC_FSSETDM:
- case XFS_IOC_GETBMAP:
- case XFS_IOC_GETBMAPA:
- case XFS_IOC_GETBMAPX:
- case XFS_IOC_FSCOUNTS:
- case XFS_IOC_SET_RESBLKS:
- case XFS_IOC_GET_RESBLKS:
- case XFS_IOC_FSGROWFSLOG:
- case XFS_IOC_GOINGDOWN:
- case XFS_IOC_ERROR_INJECTION:
- case XFS_IOC_ERROR_CLEARALL:
- case FS_IOC_GETFSMAP:
- case XFS_IOC_SCRUB_METADATA:
- case XFS_IOC_BULKSTAT:
- case XFS_IOC_INUMBERS:
- return xfs_file_ioctl(filp, cmd, p);
-#if !defined(BROKEN_X86_ALIGNMENT) || defined(CONFIG_X86_X32)
- /*
- * These are handled fine if no alignment issues. To support x32
- * which uses native 64-bit alignment we must emit these cases in
- * addition to the ia-32 compat set below.
- */
- case XFS_IOC_ALLOCSP:
- case XFS_IOC_FREESP:
- case XFS_IOC_RESVSP:
- case XFS_IOC_UNRESVSP:
- case XFS_IOC_ALLOCSP64:
- case XFS_IOC_FREESP64:
- case XFS_IOC_RESVSP64:
- case XFS_IOC_UNRESVSP64:
- case XFS_IOC_FSGEOMETRY_V1:
- case XFS_IOC_FSGROWFSDATA:
- case XFS_IOC_FSGROWFSRT:
- case XFS_IOC_ZERO_RANGE:
-#ifdef CONFIG_X86_X32
- /*
- * x32 special: this gets a different cmd number from the ia-32 compat
- * case below; the associated data will match native 64-bit alignment.
- */
- case XFS_IOC_SWAPEXT:
-#endif
- return xfs_file_ioctl(filp, cmd, p);
-#endif
#if defined(BROKEN_X86_ALIGNMENT)
case XFS_IOC_ALLOCSP_32:
case XFS_IOC_FREESP_32:
case XFS_IOC_ALLOCSP64_32:
- case XFS_IOC_FREESP64_32:
- case XFS_IOC_RESVSP_32:
- case XFS_IOC_UNRESVSP_32:
- case XFS_IOC_RESVSP64_32:
- case XFS_IOC_UNRESVSP64_32:
- case XFS_IOC_ZERO_RANGE_32: {
+ case XFS_IOC_FREESP64_32: {
struct xfs_flock64 bf;
if (xfs_compat_flock64_copyin(&bf, arg))
return -EFAULT;
cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
- return xfs_ioc_space(filp, cmd, &bf);
+ return xfs_ioc_space(filp, &bf);
}
case XFS_IOC_FSGEOMETRY_V1_32:
return xfs_compat_ioc_fsgeometry_v1(mp, arg);
@@ -702,9 +608,8 @@ xfs_file_compat_ioctl(
return xfs_compat_attrlist_by_handle(filp, arg);
case XFS_IOC_ATTRMULTI_BY_HANDLE_32:
return xfs_compat_attrmulti_by_handle(filp, arg);
- case XFS_IOC_FSSETDM_BY_HANDLE_32:
- return xfs_compat_fssetdm_by_handle(filp, arg);
default:
- return -ENOIOCTLCMD;
+ /* try the native version */
+ return xfs_file_ioctl(filp, cmd, (unsigned long)arg);
}
}
diff --git a/fs/xfs/xfs_ioctl32.h b/fs/xfs/xfs_ioctl32.h
index 7985344d3aa6..8c7743cd490e 100644
--- a/fs/xfs/xfs_ioctl32.h
+++ b/fs/xfs/xfs_ioctl32.h
@@ -99,7 +99,7 @@ typedef struct compat_xfs_fsop_handlereq {
_IOWR('X', 108, struct compat_xfs_fsop_handlereq)
/* The bstat field in the swapext struct needs translation */
-typedef struct compat_xfs_swapext {
+struct compat_xfs_swapext {
int64_t sx_version; /* version */
int64_t sx_fdtarget; /* fd of target file */
int64_t sx_fdtmp; /* fd of tmp file */
@@ -107,7 +107,7 @@ typedef struct compat_xfs_swapext {
xfs_off_t sx_length; /* leng from offset */
char sx_pad[16]; /* pad space, unused */
struct compat_xfs_bstat sx_stat; /* stat of target b4 copy */
-} __compat_packed compat_xfs_swapext_t;
+} __compat_packed;
#define XFS_IOC_SWAPEXT_32 _IOWR('X', 109, struct compat_xfs_swapext)
@@ -143,15 +143,6 @@ typedef struct compat_xfs_fsop_attrmulti_handlereq {
#define XFS_IOC_ATTRMULTI_BY_HANDLE_32 \
_IOW('X', 123, struct compat_xfs_fsop_attrmulti_handlereq)
-typedef struct compat_xfs_fsop_setdm_handlereq {
- struct compat_xfs_fsop_handlereq hreq; /* handle information */
- /* ptr to struct fsdmidata */
- compat_uptr_t data; /* DMAPI data */
-} compat_xfs_fsop_setdm_handlereq_t;
-
-#define XFS_IOC_FSSETDM_BY_HANDLE_32 \
- _IOW('X', 121, struct compat_xfs_fsop_setdm_handlereq)
-
#ifdef BROKEN_X86_ALIGNMENT
/* on ia32 l_start is on a 32-bit boundary */
typedef struct compat_xfs_flock64 {
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 3a4310d7cb59..28e2d1f37267 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -29,8 +29,8 @@
#include "xfs_reflink.h"
-#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
- << mp->m_writeio_log)
+#define XFS_ALLOC_ALIGN(mp, off) \
+ (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
static int
xfs_alert_fsblock_zero(
@@ -54,11 +54,12 @@ xfs_bmbt_to_iomap(
struct xfs_inode *ip,
struct iomap *iomap,
struct xfs_bmbt_irec *imap,
- bool shared)
+ u16 flags)
{
struct xfs_mount *mp = ip->i_mount;
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
- if (unlikely(!imap->br_startblock && !XFS_IS_REALTIME_INODE(ip)))
+ if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
return xfs_alert_fsblock_zero(ip, imap);
if (imap->br_startblock == HOLESTARTBLOCK) {
@@ -77,14 +78,13 @@ xfs_bmbt_to_iomap(
}
iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
- iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
- iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
+ iomap->bdev = target->bt_bdev;
+ iomap->dax_dev = target->bt_daxdev;
+ iomap->flags = flags;
if (xfs_ipincount(ip) &&
(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
iomap->flags |= IOMAP_F_DIRTY;
- if (shared)
- iomap->flags |= IOMAP_F_SHARED;
return 0;
}
@@ -95,18 +95,30 @@ xfs_hole_to_iomap(
xfs_fileoff_t offset_fsb,
xfs_fileoff_t end_fsb)
{
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
+
iomap->addr = IOMAP_NULL_ADDR;
iomap->type = IOMAP_HOLE;
iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
- iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
- iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
+ iomap->bdev = target->bt_bdev;
+ iomap->dax_dev = target->bt_daxdev;
+}
+
+static inline xfs_fileoff_t
+xfs_iomap_end_fsb(
+ struct xfs_mount *mp,
+ loff_t offset,
+ loff_t count)
+{
+ ASSERT(offset <= mp->m_super->s_maxbytes);
+ return min(XFS_B_TO_FSB(mp, offset + count),
+ XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
}
-xfs_extlen_t
+static xfs_extlen_t
xfs_eof_alignment(
- struct xfs_inode *ip,
- xfs_extlen_t extsize)
+ struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
xfs_extlen_t align = 0;
@@ -129,111 +141,80 @@ xfs_eof_alignment(
align = 0;
}
- /*
- * Always round up the allocation request to an extent boundary
- * (when file on a real-time subvolume or has di_extsize hint).
- */
- if (extsize) {
- if (align)
- align = roundup_64(align, extsize);
- else
- align = extsize;
- }
-
return align;
}
-STATIC int
+/*
+ * Check if last_fsb is outside the last extent, and if so grow it to the next
+ * stripe unit boundary.
+ */
+xfs_fileoff_t
xfs_iomap_eof_align_last_fsb(
struct xfs_inode *ip,
- xfs_extlen_t extsize,
- xfs_fileoff_t *last_fsb)
+ xfs_fileoff_t end_fsb)
{
- xfs_extlen_t align = xfs_eof_alignment(ip, extsize);
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ xfs_extlen_t extsz = xfs_get_extsz_hint(ip);
+ xfs_extlen_t align = xfs_eof_alignment(ip);
+ struct xfs_bmbt_irec irec;
+ struct xfs_iext_cursor icur;
+
+ ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+
+ /*
+ * Always round up the allocation request to the extent hint boundary.
+ */
+ if (extsz) {
+ if (align)
+ align = roundup_64(align, extsz);
+ else
+ align = extsz;
+ }
if (align) {
- xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align);
- int eof, error;
+ xfs_fileoff_t aligned_end_fsb = roundup_64(end_fsb, align);
- error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
- if (error)
- return error;
- if (eof)
- *last_fsb = new_last_fsb;
+ xfs_iext_last(ifp, &icur);
+ if (!xfs_iext_get_extent(ifp, &icur, &irec) ||
+ aligned_end_fsb >= irec.br_startoff + irec.br_blockcount)
+ return aligned_end_fsb;
}
- return 0;
+
+ return end_fsb;
}
int
xfs_iomap_write_direct(
- xfs_inode_t *ip,
- xfs_off_t offset,
- size_t count,
- xfs_bmbt_irec_t *imap,
- int nmaps)
+ struct xfs_inode *ip,
+ xfs_fileoff_t offset_fsb,
+ xfs_fileoff_t count_fsb,
+ struct xfs_bmbt_irec *imap)
{
- xfs_mount_t *mp = ip->i_mount;
- xfs_fileoff_t offset_fsb;
- xfs_fileoff_t last_fsb;
- xfs_filblks_t count_fsb, resaligned;
- xfs_extlen_t extsz;
- int nimaps;
- int quota_flag;
- int rt;
- xfs_trans_t *tp;
- uint qblocks, resblks, resrtextents;
- int error;
- int lockmode;
- int bmapi_flags = XFS_BMAPI_PREALLOC;
- uint tflags = 0;
-
- rt = XFS_IS_REALTIME_INODE(ip);
- extsz = xfs_get_extsz_hint(ip);
- lockmode = XFS_ILOCK_SHARED; /* locked by caller */
-
- ASSERT(xfs_isilocked(ip, lockmode));
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ xfs_filblks_t resaligned;
+ int nimaps;
+ int quota_flag;
+ uint qblocks, resblks;
+ unsigned int resrtextents = 0;
+ int error;
+ int bmapi_flags = XFS_BMAPI_PREALLOC;
+ uint tflags = 0;
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
- if ((offset + count) > XFS_ISIZE(ip)) {
- /*
- * Assert that the in-core extent list is present since this can
- * call xfs_iread_extents() and we only have the ilock shared.
- * This should be safe because the lock was held around a bmapi
- * call in the caller and we only need it to access the in-core
- * list.
- */
- ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
- XFS_IFEXTENTS);
- error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
- if (error)
- goto out_unlock;
- } else {
- if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
- last_fsb = min(last_fsb, (xfs_fileoff_t)
- imap->br_blockcount +
- imap->br_startoff);
- }
- count_fsb = last_fsb - offset_fsb;
ASSERT(count_fsb > 0);
- resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz);
- if (unlikely(rt)) {
+ resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
+ xfs_get_extsz_hint(ip));
+ if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
resrtextents = qblocks = resaligned;
resrtextents /= mp->m_sb.sb_rextsize;
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
quota_flag = XFS_QMOPT_RES_RTBLKS;
} else {
- resrtextents = 0;
resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
quota_flag = XFS_QMOPT_RES_REGBLKS;
}
- /*
- * Drop the shared lock acquired by the caller, attach the dquot if
- * necessary and move on to transaction setup.
- */
- xfs_iunlock(ip, lockmode);
error = xfs_qm_dqattach(ip);
if (error)
return error;
@@ -263,8 +244,7 @@ xfs_iomap_write_direct(
if (error)
return error;
- lockmode = XFS_ILOCK_EXCL;
- xfs_ilock(ip, lockmode);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
if (error)
@@ -277,8 +257,8 @@ xfs_iomap_write_direct(
* caller gave to us.
*/
nimaps = 1;
- error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
- bmapi_flags, resblks, imap, &nimaps);
+ error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0,
+ imap, &nimaps);
if (error)
goto out_res_cancel;
@@ -297,11 +277,11 @@ xfs_iomap_write_direct(
goto out_unlock;
}
- if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
+ if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
error = xfs_alert_fsblock_zero(ip, imap);
out_unlock:
- xfs_iunlock(ip, lockmode);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
out_res_cancel:
@@ -410,19 +390,19 @@ xfs_iomap_prealloc_size(
if (offset + count <= XFS_ISIZE(ip))
return 0;
- if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
- (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)))
+ if (!(mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
+ (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks)))
return 0;
/*
* If an explicit allocsize is set, the file is small, or we
* are writing behind a hole, then use the minimum prealloc:
*/
- if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
+ if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) ||
XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
!xfs_iext_peek_prev_extent(ifp, icur, &prev) ||
prev.br_startoff + prev.br_blockcount < offset_fsb)
- return mp->m_writeio_blocks;
+ return mp->m_allocsize_blocks;
/*
* Determine the initial size of the preallocation. We are beyond the
@@ -515,219 +495,13 @@ xfs_iomap_prealloc_size(
while (alloc_blocks && alloc_blocks >= freesp)
alloc_blocks >>= 4;
check_writeio:
- if (alloc_blocks < mp->m_writeio_blocks)
- alloc_blocks = mp->m_writeio_blocks;
+ if (alloc_blocks < mp->m_allocsize_blocks)
+ alloc_blocks = mp->m_allocsize_blocks;
trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
- mp->m_writeio_blocks);
+ mp->m_allocsize_blocks);
return alloc_blocks;
}
-static int
-xfs_file_iomap_begin_delay(
- struct inode *inode,
- loff_t offset,
- loff_t count,
- unsigned flags,
- struct iomap *iomap)
-{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
- xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
- xfs_fileoff_t maxbytes_fsb =
- XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
- xfs_fileoff_t end_fsb;
- struct xfs_bmbt_irec imap, cmap;
- struct xfs_iext_cursor icur, ccur;
- xfs_fsblock_t prealloc_blocks = 0;
- bool eof = false, cow_eof = false, shared = false;
- int whichfork = XFS_DATA_FORK;
- int error = 0;
-
- ASSERT(!XFS_IS_REALTIME_INODE(ip));
- ASSERT(!xfs_get_extsz_hint(ip));
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
-
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
- error = -EFSCORRUPTED;
- goto out_unlock;
- }
-
- XFS_STATS_INC(mp, xs_blk_mapw);
-
- if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
- error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
- if (error)
- goto out_unlock;
- }
-
- end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
-
- /*
- * Search the data fork fork first to look up our source mapping. We
- * always need the data fork map, as we have to return it to the
- * iomap code so that the higher level write code can read data in to
- * perform read-modify-write cycles for unaligned writes.
- */
- eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
- if (eof)
- imap.br_startoff = end_fsb; /* fake hole until the end */
-
- /* We never need to allocate blocks for zeroing a hole. */
- if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
- xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
- goto out_unlock;
- }
-
- /*
- * Search the COW fork extent list even if we did not find a data fork
- * extent. This serves two purposes: first this implements the
- * speculative preallocation using cowextsize, so that we also unshare
- * block adjacent to shared blocks instead of just the shared blocks
- * themselves. Second the lookup in the extent list is generally faster
- * than going out to the shared extent tree.
- */
- if (xfs_is_cow_inode(ip)) {
- if (!ip->i_cowfp) {
- ASSERT(!xfs_is_reflink_inode(ip));
- xfs_ifork_init_cow(ip);
- }
- cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
- &ccur, &cmap);
- if (!cow_eof && cmap.br_startoff <= offset_fsb) {
- trace_xfs_reflink_cow_found(ip, &cmap);
- whichfork = XFS_COW_FORK;
- goto done;
- }
- }
-
- if (imap.br_startoff <= offset_fsb) {
- /*
- * For reflink files we may need a delalloc reservation when
- * overwriting shared extents. This includes zeroing of
- * existing extents that contain data.
- */
- if (!xfs_is_cow_inode(ip) ||
- ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
- trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
- &imap);
- goto done;
- }
-
- xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
-
- /* Trim the mapping to the nearest shared extent boundary. */
- error = xfs_inode_need_cow(ip, &imap, &shared);
- if (error)
- goto out_unlock;
-
- /* Not shared? Just report the (potentially capped) extent. */
- if (!shared) {
- trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
- &imap);
- goto done;
- }
-
- /*
- * Fork all the shared blocks from our write offset until the
- * end of the extent.
- */
- whichfork = XFS_COW_FORK;
- end_fsb = imap.br_startoff + imap.br_blockcount;
- } else {
- /*
- * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
- * pages to keep the chunks of work done where somewhat
- * symmetric with the work writeback does. This is a completely
- * arbitrary number pulled out of thin air.
- *
- * Note that the values needs to be less than 32-bits wide until
- * the lower level functions are updated.
- */
- count = min_t(loff_t, count, 1024 * PAGE_SIZE);
- end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
-
- if (xfs_is_always_cow_inode(ip))
- whichfork = XFS_COW_FORK;
- }
-
- error = xfs_qm_dqattach_locked(ip, false);
- if (error)
- goto out_unlock;
-
- if (eof) {
- prealloc_blocks = xfs_iomap_prealloc_size(ip, whichfork, offset,
- count, &icur);
- if (prealloc_blocks) {
- xfs_extlen_t align;
- xfs_off_t end_offset;
- xfs_fileoff_t p_end_fsb;
-
- end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1);
- p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
- prealloc_blocks;
-
- align = xfs_eof_alignment(ip, 0);
- if (align)
- p_end_fsb = roundup_64(p_end_fsb, align);
-
- p_end_fsb = min(p_end_fsb, maxbytes_fsb);
- ASSERT(p_end_fsb > offset_fsb);
- prealloc_blocks = p_end_fsb - end_fsb;
- }
- }
-
-retry:
- error = xfs_bmapi_reserve_delalloc(ip, whichfork, offset_fsb,
- end_fsb - offset_fsb, prealloc_blocks,
- whichfork == XFS_DATA_FORK ? &imap : &cmap,
- whichfork == XFS_DATA_FORK ? &icur : &ccur,
- whichfork == XFS_DATA_FORK ? eof : cow_eof);
- switch (error) {
- case 0:
- break;
- case -ENOSPC:
- case -EDQUOT:
- /* retry without any preallocation */
- trace_xfs_delalloc_enospc(ip, offset, count);
- if (prealloc_blocks) {
- prealloc_blocks = 0;
- goto retry;
- }
- /*FALLTHRU*/
- default:
- goto out_unlock;
- }
-
- /*
- * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
- * them out if the write happens to fail.
- */
- iomap->flags |= IOMAP_F_NEW;
- trace_xfs_iomap_alloc(ip, offset, count, whichfork,
- whichfork == XFS_DATA_FORK ? &imap : &cmap);
-done:
- if (whichfork == XFS_COW_FORK) {
- if (imap.br_startoff > offset_fsb) {
- xfs_trim_extent(&cmap, offset_fsb,
- imap.br_startoff - offset_fsb);
- error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
- goto out_unlock;
- }
- /* ensure we only report blocks we have a reservation for */
- xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount);
- shared = true;
- }
- error = xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
-out_unlock:
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- return error;
-}
-
int
xfs_iomap_write_unwritten(
xfs_inode_t *ip,
@@ -765,6 +539,11 @@ xfs_iomap_write_unwritten(
*/
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
+ /* Attach dquots so that bmbt splits are accounted correctly. */
+ error = xfs_qm_dqattach(ip);
+ if (error)
+ return error;
+
do {
/*
* Set up a transaction to convert the range of extents
@@ -783,6 +562,11 @@ xfs_iomap_write_unwritten(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
+ error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
+ XFS_QMOPT_RES_REGBLKS);
+ if (error)
+ goto error_on_bmapi_transaction;
+
/*
* Modify the unwritten extent state of the buffer.
*/
@@ -814,7 +598,7 @@ xfs_iomap_write_unwritten(
if (error)
return error;
- if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
+ if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock)))
return xfs_alert_fsblock_zero(ip, &imap);
if ((numblks_fsb = imap.br_blockcount) == 0) {
@@ -840,23 +624,42 @@ error_on_bmapi_transaction:
static inline bool
imap_needs_alloc(
struct inode *inode,
+ unsigned flags,
struct xfs_bmbt_irec *imap,
int nimaps)
{
- return !nimaps ||
- imap->br_startblock == HOLESTARTBLOCK ||
- imap->br_startblock == DELAYSTARTBLOCK ||
- (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN);
+ /* don't allocate blocks when just zeroing */
+ if (flags & IOMAP_ZERO)
+ return false;
+ if (!nimaps ||
+ imap->br_startblock == HOLESTARTBLOCK ||
+ imap->br_startblock == DELAYSTARTBLOCK)
+ return true;
+ /* we convert unwritten extents before copying the data for DAX */
+ if (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN)
+ return true;
+ return false;
}
static inline bool
-needs_cow_for_zeroing(
+imap_needs_cow(
+ struct xfs_inode *ip,
+ unsigned int flags,
struct xfs_bmbt_irec *imap,
int nimaps)
{
- return nimaps &&
- imap->br_startblock != HOLESTARTBLOCK &&
- imap->br_state != XFS_EXT_UNWRITTEN;
+ if (!xfs_is_cow_inode(ip))
+ return false;
+
+ /* when zeroing we don't have to COW holes or unwritten extents */
+ if (flags & IOMAP_ZERO) {
+ if (!nimaps ||
+ imap->br_startblock == HOLESTARTBLOCK ||
+ imap->br_state == XFS_EXT_UNWRITTEN)
+ return false;
+ }
+
+ return true;
}
static int
@@ -872,15 +675,8 @@ xfs_ilock_for_iomap(
* COW writes may allocate delalloc space or convert unwritten COW
* extents, so we need to make sure to take the lock exclusively here.
*/
- if (xfs_is_cow_inode(ip) && is_write) {
- /*
- * FIXME: It could still overwrite on unshared extents and not
- * need allocation.
- */
- if (flags & IOMAP_NOWAIT)
- return -EAGAIN;
+ if (xfs_is_cow_inode(ip) && is_write)
mode = XFS_ILOCK_EXCL;
- }
/*
* Extents not yet cached requires exclusive access, don't block. This
@@ -917,111 +713,73 @@ relock:
}
static int
-xfs_file_iomap_begin(
+xfs_direct_write_iomap_begin(
struct inode *inode,
loff_t offset,
loff_t length,
unsigned flags,
- struct iomap *iomap)
+ struct iomap *iomap,
+ struct iomap *srcmap)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- struct xfs_bmbt_irec imap;
- xfs_fileoff_t offset_fsb, end_fsb;
+ struct xfs_bmbt_irec imap, cmap;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
int nimaps = 1, error = 0;
bool shared = false;
+ u16 iomap_flags = 0;
unsigned lockmode;
+ ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
+
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && !(flags & IOMAP_DIRECT) &&
- !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
- /* Reserve delalloc blocks for regular writeback. */
- return xfs_file_iomap_begin_delay(inode, offset, length, flags,
- iomap);
- }
-
/*
- * Lock the inode in the manner required for the specified operation and
- * check for as many conditions that would result in blocking as
- * possible. This removes most of the non-blocking checks from the
- * mapping code below.
+ * Writes that span EOF might trigger an IO size update on completion,
+ * so consider them to be dirty for the purposes of O_DSYNC even if
+ * there is no other metadata changes pending or have been made here.
*/
+ if (offset + length > i_size_read(inode))
+ iomap_flags |= IOMAP_F_DIRTY;
+
error = xfs_ilock_for_iomap(ip, flags, &lockmode);
if (error)
return error;
- ASSERT(offset <= mp->m_super->s_maxbytes);
- if (offset > mp->m_super->s_maxbytes - length)
- length = mp->m_super->s_maxbytes - offset;
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- end_fsb = XFS_B_TO_FSB(mp, offset + length);
-
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
&nimaps, 0);
if (error)
goto out_unlock;
- if (flags & IOMAP_REPORT) {
- /* Trim the mapping to the nearest shared extent boundary. */
- error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
- if (error)
+ if (imap_needs_cow(ip, flags, &imap, nimaps)) {
+ error = -EAGAIN;
+ if (flags & IOMAP_NOWAIT)
goto out_unlock;
- }
-
- /* Non-modifying mapping requested, so we are done */
- if (!(flags & (IOMAP_WRITE | IOMAP_ZERO)))
- goto out_found;
-
- /*
- * Break shared extents if necessary. Checks for non-blocking IO have
- * been done up front, so we don't need to do them here.
- */
- if (xfs_is_cow_inode(ip)) {
- struct xfs_bmbt_irec cmap;
- bool directio = (flags & IOMAP_DIRECT);
-
- /* if zeroing doesn't need COW allocation, then we are done. */
- if ((flags & IOMAP_ZERO) &&
- !needs_cow_for_zeroing(&imap, nimaps))
- goto out_found;
/* may drop and re-acquire the ilock */
- cmap = imap;
- error = xfs_reflink_allocate_cow(ip, &cmap, &shared, &lockmode,
- directio);
+ error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
+ &lockmode, flags & IOMAP_DIRECT);
if (error)
goto out_unlock;
-
- /*
- * For buffered writes we need to report the address of the
- * previous block (if there was any) so that the higher level
- * write code can perform read-modify-write operations; we
- * won't need the CoW fork mapping until writeback. For direct
- * I/O, which must be block aligned, we need to report the
- * newly allocated address. If the data fork has a hole, copy
- * the COW fork mapping to avoid allocating to the data fork.
- */
- if (directio || imap.br_startblock == HOLESTARTBLOCK)
- imap = cmap;
-
+ if (shared)
+ goto out_found_cow;
end_fsb = imap.br_startoff + imap.br_blockcount;
length = XFS_FSB_TO_B(mp, end_fsb) - offset;
}
- /* Don't need to allocate over holes when doing zeroing operations. */
- if (flags & IOMAP_ZERO)
- goto out_found;
+ if (imap_needs_alloc(inode, flags, &imap, nimaps))
+ goto allocate_blocks;
- if (!imap_needs_alloc(inode, &imap, nimaps))
- goto out_found;
+ xfs_iunlock(ip, lockmode);
+ trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
- /* If nowait is set bail since we are going to make allocations. */
- if (flags & IOMAP_NOWAIT) {
- error = -EAGAIN;
+allocate_blocks:
+ error = -EAGAIN;
+ if (flags & IOMAP_NOWAIT)
goto out_unlock;
- }
/*
* We cap the maximum length we map to a sane size to keep the chunks
@@ -1033,48 +791,273 @@ xfs_file_iomap_begin(
* lower level functions are updated.
*/
length = min_t(loff_t, length, 1024 * PAGE_SIZE);
+ end_fsb = xfs_iomap_end_fsb(mp, offset, length);
- /*
- * xfs_iomap_write_direct() expects the shared lock. It is unlocked on
- * return.
- */
- if (lockmode == XFS_ILOCK_EXCL)
- xfs_ilock_demote(ip, lockmode);
- error = xfs_iomap_write_direct(ip, offset, length, &imap,
- nimaps);
+ if (offset + length > XFS_ISIZE(ip))
+ end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
+ else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
+ end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
+ xfs_iunlock(ip, lockmode);
+
+ error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
+ &imap);
if (error)
return error;
- iomap->flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags | IOMAP_F_NEW);
-out_finish:
- return xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
-
-out_found:
- ASSERT(nimaps);
+out_found_cow:
xfs_iunlock(ip, lockmode);
- trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
- goto out_finish;
+ length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
+ trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
+ if (imap.br_startblock != HOLESTARTBLOCK) {
+ error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
+ if (error)
+ return error;
+ }
+ return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
out_unlock:
xfs_iunlock(ip, lockmode);
return error;
}
+const struct iomap_ops xfs_direct_write_iomap_ops = {
+ .iomap_begin = xfs_direct_write_iomap_begin,
+};
+
static int
-xfs_file_iomap_end_delalloc(
- struct xfs_inode *ip,
+xfs_buffered_write_iomap_begin(
+ struct inode *inode,
+ loff_t offset,
+ loff_t count,
+ unsigned flags,
+ struct iomap *iomap,
+ struct iomap *srcmap)
+{
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count);
+ struct xfs_bmbt_irec imap, cmap;
+ struct xfs_iext_cursor icur, ccur;
+ xfs_fsblock_t prealloc_blocks = 0;
+ bool eof = false, cow_eof = false, shared = false;
+ int allocfork = XFS_DATA_FORK;
+ int error = 0;
+
+ /* we can't use delayed allocations when using extent size hints */
+ if (xfs_get_extsz_hint(ip))
+ return xfs_direct_write_iomap_begin(inode, offset, count,
+ flags, iomap, srcmap);
+
+ ASSERT(!XFS_IS_REALTIME_INODE(ip));
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, XFS_DATA_FORK)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
+ error = -EFSCORRUPTED;
+ goto out_unlock;
+ }
+
+ XFS_STATS_INC(mp, xs_blk_mapw);
+
+ if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
+ if (error)
+ goto out_unlock;
+ }
+
+ /*
+ * Search the data fork fork first to look up our source mapping. We
+ * always need the data fork map, as we have to return it to the
+ * iomap code so that the higher level write code can read data in to
+ * perform read-modify-write cycles for unaligned writes.
+ */
+ eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
+ if (eof)
+ imap.br_startoff = end_fsb; /* fake hole until the end */
+
+ /* We never need to allocate blocks for zeroing a hole. */
+ if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
+ xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
+ goto out_unlock;
+ }
+
+ /*
+ * Search the COW fork extent list even if we did not find a data fork
+ * extent. This serves two purposes: first this implements the
+ * speculative preallocation using cowextsize, so that we also unshare
+ * block adjacent to shared blocks instead of just the shared blocks
+ * themselves. Second the lookup in the extent list is generally faster
+ * than going out to the shared extent tree.
+ */
+ if (xfs_is_cow_inode(ip)) {
+ if (!ip->i_cowfp) {
+ ASSERT(!xfs_is_reflink_inode(ip));
+ xfs_ifork_init_cow(ip);
+ }
+ cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
+ &ccur, &cmap);
+ if (!cow_eof && cmap.br_startoff <= offset_fsb) {
+ trace_xfs_reflink_cow_found(ip, &cmap);
+ goto found_cow;
+ }
+ }
+
+ if (imap.br_startoff <= offset_fsb) {
+ /*
+ * For reflink files we may need a delalloc reservation when
+ * overwriting shared extents. This includes zeroing of
+ * existing extents that contain data.
+ */
+ if (!xfs_is_cow_inode(ip) ||
+ ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
+ trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
+ &imap);
+ goto found_imap;
+ }
+
+ xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
+
+ /* Trim the mapping to the nearest shared extent boundary. */
+ error = xfs_inode_need_cow(ip, &imap, &shared);
+ if (error)
+ goto out_unlock;
+
+ /* Not shared? Just report the (potentially capped) extent. */
+ if (!shared) {
+ trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
+ &imap);
+ goto found_imap;
+ }
+
+ /*
+ * Fork all the shared blocks from our write offset until the
+ * end of the extent.
+ */
+ allocfork = XFS_COW_FORK;
+ end_fsb = imap.br_startoff + imap.br_blockcount;
+ } else {
+ /*
+ * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
+ * pages to keep the chunks of work done where somewhat
+ * symmetric with the work writeback does. This is a completely
+ * arbitrary number pulled out of thin air.
+ *
+ * Note that the values needs to be less than 32-bits wide until
+ * the lower level functions are updated.
+ */
+ count = min_t(loff_t, count, 1024 * PAGE_SIZE);
+ end_fsb = xfs_iomap_end_fsb(mp, offset, count);
+
+ if (xfs_is_always_cow_inode(ip))
+ allocfork = XFS_COW_FORK;
+ }
+
+ error = xfs_qm_dqattach_locked(ip, false);
+ if (error)
+ goto out_unlock;
+
+ if (eof) {
+ prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork, offset,
+ count, &icur);
+ if (prealloc_blocks) {
+ xfs_extlen_t align;
+ xfs_off_t end_offset;
+ xfs_fileoff_t p_end_fsb;
+
+ end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1);
+ p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
+ prealloc_blocks;
+
+ align = xfs_eof_alignment(ip);
+ if (align)
+ p_end_fsb = roundup_64(p_end_fsb, align);
+
+ p_end_fsb = min(p_end_fsb,
+ XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
+ ASSERT(p_end_fsb > offset_fsb);
+ prealloc_blocks = p_end_fsb - end_fsb;
+ }
+ }
+
+retry:
+ error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
+ end_fsb - offset_fsb, prealloc_blocks,
+ allocfork == XFS_DATA_FORK ? &imap : &cmap,
+ allocfork == XFS_DATA_FORK ? &icur : &ccur,
+ allocfork == XFS_DATA_FORK ? eof : cow_eof);
+ switch (error) {
+ case 0:
+ break;
+ case -ENOSPC:
+ case -EDQUOT:
+ /* retry without any preallocation */
+ trace_xfs_delalloc_enospc(ip, offset, count);
+ if (prealloc_blocks) {
+ prealloc_blocks = 0;
+ goto retry;
+ }
+ /*FALLTHRU*/
+ default:
+ goto out_unlock;
+ }
+
+ if (allocfork == XFS_COW_FORK) {
+ trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
+ goto found_cow;
+ }
+
+ /*
+ * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
+ * them out if the write happens to fail.
+ */
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, IOMAP_F_NEW);
+
+found_imap:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
+
+found_cow:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ if (imap.br_startoff <= offset_fsb) {
+ error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
+ if (error)
+ return error;
+ } else {
+ xfs_trim_extent(&cmap, offset_fsb,
+ imap.br_startoff - offset_fsb);
+ }
+ return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
+
+out_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+}
+
+static int
+xfs_buffered_write_iomap_end(
+ struct inode *inode,
loff_t offset,
loff_t length,
ssize_t written,
+ unsigned flags,
struct iomap *iomap)
{
+ struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t start_fsb;
xfs_fileoff_t end_fsb;
int error = 0;
+ if (iomap->type != IOMAP_DELALLOC)
+ return 0;
+
/*
* Behave as if the write failed if drop writes is enabled. Set the NEW
* flag to force delalloc cleanup.
@@ -1119,24 +1102,51 @@ xfs_file_iomap_end_delalloc(
return 0;
}
+const struct iomap_ops xfs_buffered_write_iomap_ops = {
+ .iomap_begin = xfs_buffered_write_iomap_begin,
+ .iomap_end = xfs_buffered_write_iomap_end,
+};
+
static int
-xfs_file_iomap_end(
+xfs_read_iomap_begin(
struct inode *inode,
loff_t offset,
loff_t length,
- ssize_t written,
unsigned flags,
- struct iomap *iomap)
+ struct iomap *iomap,
+ struct iomap *srcmap)
{
- if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
- return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
- length, written, iomap);
- return 0;
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_bmbt_irec imap;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
+ int nimaps = 1, error = 0;
+ bool shared = false;
+ unsigned lockmode;
+
+ ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
+ error = xfs_ilock_for_iomap(ip, flags, &lockmode);
+ if (error)
+ return error;
+ error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
+ &nimaps, 0);
+ if (!error && (flags & IOMAP_REPORT))
+ error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
+ xfs_iunlock(ip, lockmode);
+
+ if (error)
+ return error;
+ trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, shared ? IOMAP_F_SHARED : 0);
}
-const struct iomap_ops xfs_iomap_ops = {
- .iomap_begin = xfs_file_iomap_begin,
- .iomap_end = xfs_file_iomap_end,
+const struct iomap_ops xfs_read_iomap_ops = {
+ .iomap_begin = xfs_read_iomap_begin,
};
static int
@@ -1145,7 +1155,8 @@ xfs_seek_iomap_begin(
loff_t offset,
loff_t length,
unsigned flags,
- struct iomap *iomap)
+ struct iomap *iomap,
+ struct iomap *srcmap)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
@@ -1178,8 +1189,7 @@ xfs_seek_iomap_begin(
/*
* Fake a hole until the end of the file.
*/
- data_fsb = min(XFS_B_TO_FSB(mp, offset + length),
- XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
+ data_fsb = xfs_iomap_end_fsb(mp, offset, length);
}
/*
@@ -1193,7 +1203,7 @@ xfs_seek_iomap_begin(
if (data_fsb < cow_fsb + cmap.br_blockcount)
end_fsb = min(end_fsb, data_fsb);
xfs_trim_extent(&cmap, offset_fsb, end_fsb);
- error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
+ error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
/*
* This is a COW extent, so we must probe the page cache
* because there could be dirty page cache being backed
@@ -1215,7 +1225,7 @@ xfs_seek_iomap_begin(
imap.br_state = XFS_EXT_NORM;
done:
xfs_trim_extent(&imap, offset_fsb, end_fsb);
- error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
out_unlock:
xfs_iunlock(ip, lockmode);
return error;
@@ -1231,7 +1241,8 @@ xfs_xattr_iomap_begin(
loff_t offset,
loff_t length,
unsigned flags,
- struct iomap *iomap)
+ struct iomap *iomap,
+ struct iomap *srcmap)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
@@ -1261,7 +1272,7 @@ out_unlock:
if (error)
return error;
ASSERT(nimaps);
- return xfs_bmbt_to_iomap(ip, iomap, &imap, false);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
}
const struct iomap_ops xfs_xattr_iomap_ops = {
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 5c2f6aa6d78f..7d3703556d0e 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -11,13 +11,14 @@
struct xfs_inode;
struct xfs_bmbt_irec;
-int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
- struct xfs_bmbt_irec *, int);
+int xfs_iomap_write_direct(struct xfs_inode *ip, xfs_fileoff_t offset_fsb,
+ xfs_fileoff_t count_fsb, struct xfs_bmbt_irec *imap);
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
+xfs_fileoff_t xfs_iomap_eof_align_last_fsb(struct xfs_inode *ip,
+ xfs_fileoff_t end_fsb);
int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
- struct xfs_bmbt_irec *, bool shared);
-xfs_extlen_t xfs_eof_alignment(struct xfs_inode *ip, xfs_extlen_t extsize);
+ struct xfs_bmbt_irec *, u16);
static inline xfs_filblks_t
xfs_aligned_fsb_count(
@@ -39,7 +40,9 @@ xfs_aligned_fsb_count(
return count_fsb;
}
-extern const struct iomap_ops xfs_iomap_ops;
+extern const struct iomap_ops xfs_buffered_write_iomap_ops;
+extern const struct iomap_ops xfs_direct_write_iomap_ops;
+extern const struct iomap_ops xfs_read_iomap_ops;
extern const struct iomap_ops xfs_seek_iomap_ops;
extern const struct iomap_ops xfs_xattr_iomap_ops;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ff3c1fae5357..8afe69ca188b 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -20,6 +20,7 @@
#include "xfs_symlink.h"
#include "xfs_dir2.h"
#include "xfs_iomap.h"
+#include "xfs_error.h"
#include <linux/xattr.h>
#include <linux/posix_acl.h>
@@ -470,20 +471,57 @@ xfs_vn_get_link_inline(
struct inode *inode,
struct delayed_call *done)
{
+ struct xfs_inode *ip = XFS_I(inode);
char *link;
- ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE);
+ ASSERT(ip->i_df.if_flags & XFS_IFINLINE);
/*
* The VFS crashes on a NULL pointer, so return -EFSCORRUPTED if
* if_data is junk.
*/
- link = XFS_I(inode)->i_df.if_u1.if_data;
- if (!link)
+ link = ip->i_df.if_u1.if_data;
+ if (XFS_IS_CORRUPT(ip->i_mount, !link))
return ERR_PTR(-EFSCORRUPTED);
return link;
}
+static uint32_t
+xfs_stat_blksize(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+
+ /*
+ * If the file blocks are being allocated from a realtime volume, then
+ * always return the realtime extent size.
+ */
+ if (XFS_IS_REALTIME_INODE(ip))
+ return xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog;
+
+ /*
+ * Allow large block sizes to be reported to userspace programs if the
+ * "largeio" mount option is used.
+ *
+ * If compatibility mode is specified, simply return the basic unit of
+ * caching so that we don't get inefficient read/modify/write I/O from
+ * user apps. Otherwise....
+ *
+ * If the underlying volume is a stripe, then return the stripe width in
+ * bytes as the recommended I/O size. It is not a stripe and we've set a
+ * default buffered I/O size, return that, otherwise return the compat
+ * default.
+ */
+ if (mp->m_flags & XFS_MOUNT_LARGEIO) {
+ if (mp->m_swidth)
+ return mp->m_swidth << mp->m_sb.sb_blocklog;
+ if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
+ return 1U << mp->m_allocsize_log;
+ }
+
+ return PAGE_SIZE;
+}
+
STATIC int
xfs_vn_getattr(
const struct path *path,
@@ -516,8 +554,7 @@ xfs_vn_getattr(
if (ip->i_d.di_version == 3) {
if (request_mask & STATX_BTIME) {
stat->result_mask |= STATX_BTIME;
- stat->btime.tv_sec = ip->i_d.di_crtime.t_sec;
- stat->btime.tv_nsec = ip->i_d.di_crtime.t_nsec;
+ stat->btime = ip->i_d.di_crtime;
}
}
@@ -543,16 +580,7 @@ xfs_vn_getattr(
stat->rdev = inode->i_rdev;
break;
default:
- if (XFS_IS_REALTIME_INODE(ip)) {
- /*
- * If the file blocks are being allocated from a
- * realtime volume, then return the inode's realtime
- * extent size or the realtime volume's extent size.
- */
- stat->blksize =
- xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog;
- } else
- stat->blksize = xfs_preferred_iosize(mp);
+ stat->blksize = xfs_stat_blksize(ip);
stat->rdev = 0;
break;
}
@@ -664,7 +692,7 @@ xfs_setattr_nonsize(
ASSERT(gdqp == NULL);
error = xfs_qm_vop_dqalloc(ip, xfs_kuid_to_uid(uid),
xfs_kgid_to_gid(gid),
- xfs_get_projid(ip),
+ ip->i_d.di_projid,
qflags, &udqp, &gdqp, NULL);
if (error)
return error;
@@ -793,6 +821,7 @@ xfs_setattr_nonsize(
out_cancel:
xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_dqrele:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
@@ -882,10 +911,10 @@ xfs_setattr_size(
if (newsize > oldsize) {
trace_xfs_zero_eof(ip, oldsize, newsize - oldsize);
error = iomap_zero_range(inode, oldsize, newsize - oldsize,
- &did_zeroing, &xfs_iomap_ops);
+ &did_zeroing, &xfs_buffered_write_iomap_ops);
} else {
error = iomap_truncate_page(inode, newsize, &did_zeroing,
- &xfs_iomap_ops);
+ &xfs_buffered_write_iomap_ops);
}
if (error)
@@ -1113,7 +1142,7 @@ xfs_vn_fiemap(
&xfs_xattr_iomap_ops);
} else {
error = iomap_fiemap(inode, fieinfo, start, length,
- &xfs_iomap_ops);
+ &xfs_read_iomap_ops);
}
xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED);
@@ -1226,7 +1255,7 @@ xfs_inode_supports_dax(
return false;
/* Device has to support DAX too. */
- return xfs_find_daxdev_for_inode(VFS_I(ip)) != NULL;
+ return xfs_inode_buftarg(ip)->bt_daxdev != NULL;
}
STATIC void
@@ -1289,9 +1318,7 @@ xfs_setup_inode(
lockdep_set_class(&inode->i_rwsem,
&inode->i_sb->s_type->i_mutex_dir_key);
lockdep_set_class(&ip->i_lock.mr_lock, &xfs_dir_ilock_class);
- ip->d_ops = ip->i_mount->m_dir_inode_ops;
} else {
- ip->d_ops = ip->i_mount->m_nondir_inode_ops;
lockdep_set_class(&ip->i_lock.mr_lock, &xfs_nondir_ilock_class);
}
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index a8a06bb78ea8..4b31c29b7e6b 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -84,7 +84,7 @@ xfs_bulkstat_one_int(
/* xfs_iget returns the following without needing
* further change.
*/
- buf->bs_projectid = xfs_get_projid(ip);
+ buf->bs_projectid = ip->i_d.di_projid;
buf->bs_ino = ino;
buf->bs_uid = dic->di_uid;
buf->bs_gid = dic->di_gid;
@@ -97,8 +97,8 @@ xfs_bulkstat_one_int(
buf->bs_mtime_nsec = inode->i_mtime.tv_nsec;
buf->bs_ctime = inode->i_ctime.tv_sec;
buf->bs_ctime_nsec = inode->i_ctime.tv_nsec;
- buf->bs_btime = dic->di_crtime.t_sec;
- buf->bs_btime_nsec = dic->di_crtime.t_nsec;
+ buf->bs_btime = dic->di_crtime.tv_sec;
+ buf->bs_btime_nsec = dic->di_crtime.tv_nsec;
buf->bs_gen = inode->i_generation;
buf->bs_mode = inode->i_mode;
@@ -137,7 +137,7 @@ xfs_bulkstat_one_int(
xfs_irele(ip);
error = bc->formatter(bc->breq, buf);
- if (error == XFS_IBULK_ABORT)
+ if (error == -ECANCELED)
goto out_advance;
if (error)
goto out;
@@ -169,7 +169,7 @@ xfs_bulkstat_one(
ASSERT(breq->icount == 1);
bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
- KM_SLEEP | KM_MAYFAIL);
+ KM_MAYFAIL);
if (!bc.buf)
return -ENOMEM;
@@ -181,7 +181,7 @@ xfs_bulkstat_one(
* If we reported one inode to userspace then we abort because we hit
* the end of the buffer. Don't leak that back to userspace.
*/
- if (error == XFS_IWALK_ABORT)
+ if (error == -ECANCELED)
error = 0;
return error;
@@ -243,7 +243,7 @@ xfs_bulkstat(
return 0;
bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
- KM_SLEEP | KM_MAYFAIL);
+ KM_MAYFAIL);
if (!bc.buf)
return -ENOMEM;
@@ -272,6 +272,7 @@ xfs_bulkstat_to_bstat(
struct xfs_bstat *bs1,
const struct xfs_bulkstat *bstat)
{
+ /* memset is needed here because of padding holes in the structure. */
memset(bs1, 0, sizeof(struct xfs_bstat));
bs1->bs_ino = bstat->bs_ino;
bs1->bs_mode = bstat->bs_mode;
@@ -341,7 +342,7 @@ xfs_inumbers_walk(
int error;
error = ic->formatter(ic->breq, &inogrp);
- if (error && error != XFS_IBULK_ABORT)
+ if (error && error != -ECANCELED)
return error;
ic->breq->startino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino) +
@@ -388,6 +389,8 @@ xfs_inumbers_to_inogrp(
struct xfs_inogrp *ig1,
const struct xfs_inumbers *ig)
{
+ /* memset is needed here because of padding holes in the structure. */
+ memset(ig1, 0, sizeof(struct xfs_inogrp));
ig1->xi_startino = ig->xi_startino;
ig1->xi_alloccount = ig->xi_alloccount;
ig1->xi_allocmask = ig->xi_allocmask;
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index e90c1fc5b981..96a1e2a9be3f 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -18,9 +18,6 @@ struct xfs_ibulk {
/* Only iterate within the same AG as startino */
#define XFS_IBULK_SAME_AG (XFS_IWALK_SAME_AG)
-/* Return value that means we want to abort the walk. */
-#define XFS_IBULK_ABORT (XFS_IWALK_ABORT)
-
/*
* Advance the user buffer pointer by one record of the given size. If the
* buffer is now full, return the appropriate error code.
@@ -34,13 +31,21 @@ xfs_ibulk_advance(
breq->ubuffer = b + bytes;
breq->ocount++;
- return breq->ocount == breq->icount ? XFS_IBULK_ABORT : 0;
+ return breq->ocount == breq->icount ? -ECANCELED : 0;
}
/*
* Return stat information in bulk (by-inode) for the filesystem.
*/
+/*
+ * Return codes for the formatter function are 0 to continue iterating, and
+ * non-zero to stop iterating. Any non-zero value will be passed up to the
+ * bulkstat/inumbers caller. The special value -ECANCELED can be used to stop
+ * iteration, as neither bulkstat nor inumbers will ever generate that error
+ * code on their own.
+ */
+
typedef int (*bulkstat_one_fmt_pf)(struct xfs_ibulk *breq,
const struct xfs_bulkstat *bstat);
diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c
index 8c7d727149ea..233dcc8784db 100644
--- a/fs/xfs/xfs_iwalk.c
+++ b/fs/xfs/xfs_iwalk.c
@@ -31,7 +31,7 @@
* inode it finds, it calls a walk function with the relevant inode number and
* a pointer to caller-provided data. The walk function can return the usual
* negative error code to stop the iteration; 0 to continue the iteration; or
- * XFS_IWALK_ABORT to stop the iteration. This return value is returned to the
+ * -ECANCELED to stop the iteration. This return value is returned to the
* caller.
*
* Internally, we allow the walk function to do anything, which means that we
@@ -298,7 +298,8 @@ xfs_iwalk_ag_start(
error = xfs_inobt_get_rec(*curpp, irec, has_more);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, *has_more == 1);
+ if (XFS_IS_CORRUPT(mp, *has_more != 1))
+ return -EFSCORRUPTED;
/*
* If the LE lookup yielded an inobt record before the cursor position,
@@ -616,7 +617,7 @@ xfs_iwalk_threaded(
if (xfs_pwork_ctl_want_abort(&pctl))
break;
- iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), KM_SLEEP);
+ iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), 0);
iwag->mp = mp;
iwag->iwalk_fn = iwalk_fn;
iwag->data = data;
diff --git a/fs/xfs/xfs_iwalk.h b/fs/xfs/xfs_iwalk.h
index 6c960e10ed4d..37a795f03267 100644
--- a/fs/xfs/xfs_iwalk.h
+++ b/fs/xfs/xfs_iwalk.h
@@ -6,12 +6,17 @@
#ifndef __XFS_IWALK_H__
#define __XFS_IWALK_H__
+/*
+ * Return codes for the inode/inobt walk function are 0 to continue iterating,
+ * and non-zero to stop iterating. Any non-zero value will be passed up to the
+ * iwalk or inobt_walk caller. The special value -ECANCELED can be used to
+ * stop iteration, as neither iwalk nor inobt_walk will ever generate that
+ * error code on their own.
+ */
+
/* Walk all inodes in the filesystem starting from @startino. */
typedef int (*xfs_iwalk_fn)(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_ino_t ino, void *data);
-/* Return values for xfs_iwalk_fn. */
-#define XFS_IWALK_CONTINUE (XFS_ITER_CONTINUE)
-#define XFS_IWALK_ABORT (XFS_ITER_ABORT)
int xfs_iwalk(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t startino,
unsigned int flags, xfs_iwalk_fn iwalk_fn,
@@ -30,8 +35,6 @@ typedef int (*xfs_inobt_walk_fn)(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno,
const struct xfs_inobt_rec_incore *irec,
void *data);
-/* Return value (for xfs_inobt_walk_fn) that aborts the walk immediately. */
-#define XFS_INOBT_WALK_ABORT (XFS_IWALK_ABORT)
int xfs_inobt_walk(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_ino_t startino, unsigned int flags,
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index ca15105681ca..8738bb03f253 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -223,26 +223,32 @@ int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
char *data, unsigned int op);
#define ASSERT_ALWAYS(expr) \
- (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+ (likely(expr) ? (void)0 : assfail(NULL, #expr, __FILE__, __LINE__))
#ifdef DEBUG
#define ASSERT(expr) \
- (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+ (likely(expr) ? (void)0 : assfail(NULL, #expr, __FILE__, __LINE__))
#else /* !DEBUG */
#ifdef XFS_WARN
#define ASSERT(expr) \
- (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
+ (likely(expr) ? (void)0 : asswarn(NULL, #expr, __FILE__, __LINE__))
#else /* !DEBUG && !XFS_WARN */
-#define ASSERT(expr) ((void)0)
+#define ASSERT(expr) ((void)0)
#endif /* XFS_WARN */
#endif /* DEBUG */
+#define XFS_IS_CORRUPT(mp, expr) \
+ (unlikely(expr) ? xfs_corruption_error(#expr, XFS_ERRLEVEL_LOW, (mp), \
+ NULL, 0, __FILE__, __LINE__, \
+ __this_address), \
+ true : false)
+
#define STATIC static noinline
#ifdef CONFIG_XFS_RT
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 00e9f5c388d3..6a147c63a8a6 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -57,10 +57,6 @@ xlog_state_get_iclog_space(
struct xlog_ticket *ticket,
int *continued_write,
int *logoffsetp);
-STATIC int
-xlog_state_release_iclog(
- struct xlog *log,
- struct xlog_in_core *iclog);
STATIC void
xlog_state_switch_iclogs(
struct xlog *log,
@@ -83,7 +79,10 @@ STATIC void
xlog_ungrant_log_space(
struct xlog *log,
struct xlog_ticket *ticket);
-
+STATIC void
+xlog_sync(
+ struct xlog *log,
+ struct xlog_in_core *iclog);
#if defined(DEBUG)
STATIC void
xlog_verify_dest_ptr(
@@ -214,15 +213,42 @@ xlog_grant_head_wake(
{
struct xlog_ticket *tic;
int need_bytes;
+ bool woken_task = false;
list_for_each_entry(tic, &head->waiters, t_queue) {
+
+ /*
+ * There is a chance that the size of the CIL checkpoints in
+ * progress at the last AIL push target calculation resulted in
+ * limiting the target to the log head (l_last_sync_lsn) at the
+ * time. This may not reflect where the log head is now as the
+ * CIL checkpoints may have completed.
+ *
+ * Hence when we are woken here, it may be that the head of the
+ * log that has moved rather than the tail. As the tail didn't
+ * move, there still won't be space available for the
+ * reservation we require. However, if the AIL has already
+ * pushed to the target defined by the old log head location, we
+ * will hang here waiting for something else to update the AIL
+ * push target.
+ *
+ * Therefore, if there isn't space to wake the first waiter on
+ * the grant head, we need to push the AIL again to ensure the
+ * target reflects both the current log tail and log head
+ * position before we wait for the tail to move again.
+ */
+
need_bytes = xlog_ticket_reservation(log, head, tic);
- if (*free_bytes < need_bytes)
+ if (*free_bytes < need_bytes) {
+ if (!woken_task)
+ xlog_grant_push_ail(log, need_bytes);
return false;
+ }
*free_bytes -= need_bytes;
trace_xfs_log_grant_wake_up(log, tic);
wake_up_process(tic->t_task);
+ woken_task = true;
}
return true;
@@ -428,11 +454,7 @@ xfs_log_reserve(
XFS_STATS_INC(mp, xs_try_logspace);
ASSERT(*ticp == NULL);
- tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent,
- KM_SLEEP | KM_MAYFAIL);
- if (!tic)
- return -ENOMEM;
-
+ tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 0);
*ticp = tic;
xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
@@ -529,16 +551,71 @@ xfs_log_done(
return lsn;
}
+static bool
+__xlog_state_release_iclog(
+ struct xlog *log,
+ struct xlog_in_core *iclog)
+{
+ lockdep_assert_held(&log->l_icloglock);
+
+ if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
+ /* update tail before writing to iclog */
+ xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
+
+ iclog->ic_state = XLOG_STATE_SYNCING;
+ iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+ xlog_verify_tail_lsn(log, iclog, tail_lsn);
+ /* cycle incremented when incrementing curr_block */
+ return true;
+ }
+
+ ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
+ return false;
+}
+
+/*
+ * Flush iclog to disk if this is the last reference to the given iclog and the
+ * it is in the WANT_SYNC state.
+ */
+static int
+xlog_state_release_iclog(
+ struct xlog *log,
+ struct xlog_in_core *iclog)
+{
+ lockdep_assert_held(&log->l_icloglock);
+
+ if (iclog->ic_state == XLOG_STATE_IOERROR)
+ return -EIO;
+
+ if (atomic_dec_and_test(&iclog->ic_refcnt) &&
+ __xlog_state_release_iclog(log, iclog)) {
+ spin_unlock(&log->l_icloglock);
+ xlog_sync(log, iclog);
+ spin_lock(&log->l_icloglock);
+ }
+
+ return 0;
+}
+
int
xfs_log_release_iclog(
- struct xfs_mount *mp,
+ struct xfs_mount *mp,
struct xlog_in_core *iclog)
{
- if (xlog_state_release_iclog(mp->m_log, iclog)) {
+ struct xlog *log = mp->m_log;
+ bool sync;
+
+ if (iclog->ic_state == XLOG_STATE_IOERROR) {
xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
return -EIO;
}
+ if (atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) {
+ sync = __xlog_state_release_iclog(log, iclog);
+ spin_unlock(&log->l_icloglock);
+ if (sync)
+ xlog_sync(log, iclog);
+ }
return 0;
}
@@ -843,10 +920,7 @@ out_err:
iclog = log->l_iclog;
atomic_inc(&iclog->ic_refcnt);
xlog_state_want_sync(log, iclog);
- spin_unlock(&log->l_icloglock);
error = xlog_state_release_iclog(log, iclog);
-
- spin_lock(&log->l_icloglock);
switch (iclog->ic_state) {
default:
if (!XLOG_FORCED_SHUTDOWN(log)) {
@@ -901,8 +975,8 @@ xfs_log_unmount_write(xfs_mount_t *mp)
#ifdef DEBUG
first_iclog = iclog = log->l_iclog;
do {
- if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
- ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE);
+ if (iclog->ic_state != XLOG_STATE_IOERROR) {
+ ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
ASSERT(iclog->ic_offset == 0);
}
iclog = iclog->ic_next;
@@ -927,21 +1001,17 @@ xfs_log_unmount_write(xfs_mount_t *mp)
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
atomic_inc(&iclog->ic_refcnt);
-
xlog_state_want_sync(log, iclog);
- spin_unlock(&log->l_icloglock);
error = xlog_state_release_iclog(log, iclog);
-
- spin_lock(&log->l_icloglock);
-
- if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE
- || iclog->ic_state == XLOG_STATE_DIRTY
- || iclog->ic_state == XLOG_STATE_IOERROR) ) {
-
- xlog_wait(&iclog->ic_force_wait,
- &log->l_icloglock);
- } else {
+ switch (iclog->ic_state) {
+ case XLOG_STATE_ACTIVE:
+ case XLOG_STATE_DIRTY:
+ case XLOG_STATE_IOERROR:
spin_unlock(&log->l_icloglock);
+ break;
+ default:
+ xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
+ break;
}
}
@@ -1231,7 +1301,7 @@ xlog_ioend_work(
* didn't succeed.
*/
aborted = true;
- } else if (iclog->ic_state & XLOG_STATE_IOERROR) {
+ } else if (iclog->ic_state == XLOG_STATE_IOERROR) {
aborted = true;
}
@@ -1407,6 +1477,7 @@ xlog_alloc_log(
*/
ASSERT(log->l_iclog_size >= 4096);
for (i = 0; i < log->l_iclog_bufs; i++) {
+ int align_mask = xfs_buftarg_dma_alignment(mp->m_logdev_targp);
size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
sizeof(struct bio_vec);
@@ -1418,8 +1489,8 @@ xlog_alloc_log(
iclog->ic_prev = prev_iclog;
prev_iclog = iclog;
- iclog->ic_data = kmem_alloc_large(log->l_iclog_size,
- KM_MAYFAIL);
+ iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask,
+ KM_MAYFAIL | KM_ZERO);
if (!iclog->ic_data)
goto out_free_iclog;
#ifdef DEBUG
@@ -1455,7 +1526,7 @@ xlog_alloc_log(
log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI, 0,
- mp->m_fsname);
+ mp->m_super->s_id);
if (!log->l_ioend_workqueue)
goto out_free_iclog;
@@ -1703,7 +1774,7 @@ xlog_write_iclog(
* across the log IO to archieve that.
*/
down(&iclog->ic_sema);
- if (unlikely(iclog->ic_state & XLOG_STATE_IOERROR)) {
+ if (unlikely(iclog->ic_state == XLOG_STATE_IOERROR)) {
/*
* It would seem logical to return EIO here, but we rely on
* the log state machine to propagate I/O errors instead of
@@ -1711,13 +1782,11 @@ xlog_write_iclog(
* the buffer manually, the code needs to be kept in sync
* with the I/O completion path.
*/
- xlog_state_done_syncing(iclog, XFS_LI_ABORTED);
+ xlog_state_done_syncing(iclog, true);
up(&iclog->ic_sema);
return;
}
- iclog->ic_io_size = count;
-
bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE));
bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev);
iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
@@ -1727,9 +1796,9 @@ xlog_write_iclog(
if (need_flush)
iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
- xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, iclog->ic_io_size);
+ xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count);
if (is_vmalloc_addr(iclog->ic_data))
- flush_kernel_vmap_range(iclog->ic_data, iclog->ic_io_size);
+ flush_kernel_vmap_range(iclog->ic_data, count);
/*
* If this log buffer would straddle the end of the log we will have
@@ -1945,7 +2014,6 @@ xlog_dealloc_log(
/*
* Update counters atomically now that memcpy is done.
*/
-/* ARGSUSED */
static inline void
xlog_state_finish_copy(
struct xlog *log,
@@ -1953,16 +2021,11 @@ xlog_state_finish_copy(
int record_cnt,
int copy_bytes)
{
- spin_lock(&log->l_icloglock);
+ lockdep_assert_held(&log->l_icloglock);
be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
iclog->ic_offset += copy_bytes;
-
- spin_unlock(&log->l_icloglock);
-} /* xlog_state_finish_copy */
-
-
-
+}
/*
* print out info relating to regions written which consume
@@ -2239,15 +2302,18 @@ xlog_write_copy_finish(
int log_offset,
struct xlog_in_core **commit_iclog)
{
+ int error;
+
if (*partial_copy) {
/*
* This iclog has already been marked WANT_SYNC by
* xlog_state_get_iclog_space.
*/
+ spin_lock(&log->l_icloglock);
xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
*record_cnt = 0;
*data_cnt = 0;
- return xlog_state_release_iclog(log, iclog);
+ goto release_iclog;
}
*partial_copy = 0;
@@ -2255,21 +2321,25 @@ xlog_write_copy_finish(
if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
/* no more space in this iclog - push it. */
+ spin_lock(&log->l_icloglock);
xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
*record_cnt = 0;
*data_cnt = 0;
- spin_lock(&log->l_icloglock);
xlog_state_want_sync(log, iclog);
- spin_unlock(&log->l_icloglock);
-
if (!commit_iclog)
- return xlog_state_release_iclog(log, iclog);
+ goto release_iclog;
+ spin_unlock(&log->l_icloglock);
ASSERT(flags & XLOG_COMMIT_TRANS);
*commit_iclog = iclog;
}
return 0;
+
+release_iclog:
+ error = xlog_state_release_iclog(log, iclog);
+ spin_unlock(&log->l_icloglock);
+ return error;
}
/*
@@ -2331,7 +2401,7 @@ xlog_write(
int contwr = 0;
int record_cnt = 0;
int data_cnt = 0;
- int error;
+ int error = 0;
*start_lsn = 0;
@@ -2482,13 +2552,17 @@ next_lv:
ASSERT(len == 0);
+ spin_lock(&log->l_icloglock);
xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
- if (!commit_iclog)
- return xlog_state_release_iclog(log, iclog);
+ if (commit_iclog) {
+ ASSERT(flags & XLOG_COMMIT_TRANS);
+ *commit_iclog = iclog;
+ } else {
+ error = xlog_state_release_iclog(log, iclog);
+ }
+ spin_unlock(&log->l_icloglock);
- ASSERT(flags & XLOG_COMMIT_TRANS);
- *commit_iclog = iclog;
- return 0;
+ return error;
}
@@ -2499,21 +2573,35 @@ next_lv:
*****************************************************************************
*/
-/* Clean iclogs starting from the head. This ordering must be
- * maintained, so an iclog doesn't become ACTIVE beyond one that
- * is SYNCING. This is also required to maintain the notion that we use
- * a ordered wait queue to hold off would be writers to the log when every
- * iclog is trying to sync to disk.
+/*
+ * An iclog has just finished IO completion processing, so we need to update
+ * the iclog state and propagate that up into the overall log state. Hence we
+ * prepare the iclog for cleaning, and then clean all the pending dirty iclogs
+ * starting from the head, and then wake up any threads that are waiting for the
+ * iclog to be marked clean.
+ *
+ * The ordering of marking iclogs ACTIVE must be maintained, so an iclog
+ * doesn't become ACTIVE beyond one that is SYNCING. This is also required to
+ * maintain the notion that we use a ordered wait queue to hold off would be
+ * writers to the log when every iclog is trying to sync to disk.
+ *
+ * Caller must hold the icloglock before calling us.
*
- * State Change: DIRTY -> ACTIVE
+ * State Change: !IOERROR -> DIRTY -> ACTIVE
*/
STATIC void
-xlog_state_clean_log(
- struct xlog *log)
+xlog_state_clean_iclog(
+ struct xlog *log,
+ struct xlog_in_core *dirty_iclog)
{
- xlog_in_core_t *iclog;
- int changed = 0;
+ struct xlog_in_core *iclog;
+ int changed = 0;
+ /* Prepare the completed iclog. */
+ if (dirty_iclog->ic_state != XLOG_STATE_IOERROR)
+ dirty_iclog->ic_state = XLOG_STATE_DIRTY;
+
+ /* Walk all the iclogs to update the ordered active state. */
iclog = log->l_iclog;
do {
if (iclog->ic_state == XLOG_STATE_DIRTY) {
@@ -2551,7 +2639,13 @@ xlog_state_clean_log(
iclog = iclog->ic_next;
} while (iclog != log->l_iclog);
- /* log is locked when we are called */
+
+ /*
+ * Wake up threads waiting in xfs_log_force() for the dirty iclog
+ * to be cleaned.
+ */
+ wake_up_all(&dirty_iclog->ic_force_wait);
+
/*
* Change state for the dummy log recording.
* We usually go to NEED. But we go to NEED2 if the changed indicates
@@ -2585,7 +2679,7 @@ xlog_state_clean_log(
ASSERT(0);
}
}
-} /* xlog_state_clean_log */
+}
STATIC xfs_lsn_t
xlog_get_lowest_lsn(
@@ -2595,7 +2689,8 @@ xlog_get_lowest_lsn(
xfs_lsn_t lowest_lsn = 0, lsn;
do {
- if (iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))
+ if (iclog->ic_state == XLOG_STATE_ACTIVE ||
+ iclog->ic_state == XLOG_STATE_DIRTY)
continue;
lsn = be64_to_cpu(iclog->ic_header.h_lsn);
@@ -2606,30 +2701,150 @@ xlog_get_lowest_lsn(
return lowest_lsn;
}
+/*
+ * Completion of a iclog IO does not imply that a transaction has completed, as
+ * transactions can be large enough to span many iclogs. We cannot change the
+ * tail of the log half way through a transaction as this may be the only
+ * transaction in the log and moving the tail to point to the middle of it
+ * will prevent recovery from finding the start of the transaction. Hence we
+ * should only update the last_sync_lsn if this iclog contains transaction
+ * completion callbacks on it.
+ *
+ * We have to do this before we drop the icloglock to ensure we are the only one
+ * that can update it.
+ *
+ * If we are moving the last_sync_lsn forwards, we also need to ensure we kick
+ * the reservation grant head pushing. This is due to the fact that the push
+ * target is bound by the current last_sync_lsn value. Hence if we have a large
+ * amount of log space bound up in this committing transaction then the
+ * last_sync_lsn value may be the limiting factor preventing tail pushing from
+ * freeing space in the log. Hence once we've updated the last_sync_lsn we
+ * should push the AIL to ensure the push target (and hence the grant head) is
+ * no longer bound by the old log head location and can move forwards and make
+ * progress again.
+ */
+static void
+xlog_state_set_callback(
+ struct xlog *log,
+ struct xlog_in_core *iclog,
+ xfs_lsn_t header_lsn)
+{
+ iclog->ic_state = XLOG_STATE_CALLBACK;
+
+ ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
+ header_lsn) <= 0);
+
+ if (list_empty_careful(&iclog->ic_callbacks))
+ return;
+
+ atomic64_set(&log->l_last_sync_lsn, header_lsn);
+ xlog_grant_push_ail(log, 0);
+}
+
+/*
+ * Return true if we need to stop processing, false to continue to the next
+ * iclog. The caller will need to run callbacks if the iclog is returned in the
+ * XLOG_STATE_CALLBACK state.
+ */
+static bool
+xlog_state_iodone_process_iclog(
+ struct xlog *log,
+ struct xlog_in_core *iclog,
+ bool *ioerror)
+{
+ xfs_lsn_t lowest_lsn;
+ xfs_lsn_t header_lsn;
+
+ switch (iclog->ic_state) {
+ case XLOG_STATE_ACTIVE:
+ case XLOG_STATE_DIRTY:
+ /*
+ * Skip all iclogs in the ACTIVE & DIRTY states:
+ */
+ return false;
+ case XLOG_STATE_IOERROR:
+ /*
+ * Between marking a filesystem SHUTDOWN and stopping the log,
+ * we do flush all iclogs to disk (if there wasn't a log I/O
+ * error). So, we do want things to go smoothly in case of just
+ * a SHUTDOWN w/o a LOG_IO_ERROR.
+ */
+ *ioerror = true;
+ return false;
+ case XLOG_STATE_DONE_SYNC:
+ /*
+ * Now that we have an iclog that is in the DONE_SYNC state, do
+ * one more check here to see if we have chased our tail around.
+ * If this is not the lowest lsn iclog, then we will leave it
+ * for another completion to process.
+ */
+ header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
+ lowest_lsn = xlog_get_lowest_lsn(log);
+ if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
+ return false;
+ xlog_state_set_callback(log, iclog, header_lsn);
+ return false;
+ default:
+ /*
+ * Can only perform callbacks in order. Since this iclog is not
+ * in the DONE_SYNC state, we skip the rest and just try to
+ * clean up.
+ */
+ return true;
+ }
+}
+
+/*
+ * Keep processing entries in the iclog callback list until we come around and
+ * it is empty. We need to atomically see that the list is empty and change the
+ * state to DIRTY so that we don't miss any more callbacks being added.
+ *
+ * This function is called with the icloglock held and returns with it held. We
+ * drop it while running callbacks, however, as holding it over thousands of
+ * callbacks is unnecessary and causes excessive contention if we do.
+ */
+static void
+xlog_state_do_iclog_callbacks(
+ struct xlog *log,
+ struct xlog_in_core *iclog,
+ bool aborted)
+ __releases(&log->l_icloglock)
+ __acquires(&log->l_icloglock)
+{
+ spin_unlock(&log->l_icloglock);
+ spin_lock(&iclog->ic_callback_lock);
+ while (!list_empty(&iclog->ic_callbacks)) {
+ LIST_HEAD(tmp);
+
+ list_splice_init(&iclog->ic_callbacks, &tmp);
+
+ spin_unlock(&iclog->ic_callback_lock);
+ xlog_cil_process_committed(&tmp, aborted);
+ spin_lock(&iclog->ic_callback_lock);
+ }
+
+ /*
+ * Pick up the icloglock while still holding the callback lock so we
+ * serialise against anyone trying to add more callbacks to this iclog
+ * now we've finished processing.
+ */
+ spin_lock(&log->l_icloglock);
+ spin_unlock(&iclog->ic_callback_lock);
+}
+
STATIC void
xlog_state_do_callback(
struct xlog *log,
- bool aborted,
- struct xlog_in_core *ciclog)
-{
- xlog_in_core_t *iclog;
- xlog_in_core_t *first_iclog; /* used to know when we've
- * processed all iclogs once */
- int flushcnt = 0;
- xfs_lsn_t lowest_lsn;
- int ioerrors; /* counter: iclogs with errors */
- int loopdidcallbacks; /* flag: inner loop did callbacks*/
- int funcdidcallbacks; /* flag: function did callbacks */
- int repeats; /* for issuing console warnings if
- * looping too many times */
- int wake = 0;
+ bool aborted)
+{
+ struct xlog_in_core *iclog;
+ struct xlog_in_core *first_iclog;
+ bool cycled_icloglock;
+ bool ioerror;
+ int flushcnt = 0;
+ int repeats = 0;
spin_lock(&log->l_icloglock);
- first_iclog = iclog = log->l_iclog;
- ioerrors = 0;
- funcdidcallbacks = 0;
- repeats = 0;
-
do {
/*
* Scan all iclogs starting with the one pointed to by the
@@ -2641,134 +2856,29 @@ xlog_state_do_callback(
*/
first_iclog = log->l_iclog;
iclog = log->l_iclog;
- loopdidcallbacks = 0;
+ cycled_icloglock = false;
+ ioerror = false;
repeats++;
do {
+ if (xlog_state_iodone_process_iclog(log, iclog,
+ &ioerror))
+ break;
- /* skip all iclogs in the ACTIVE & DIRTY states */
- if (iclog->ic_state &
- (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) {
+ if (iclog->ic_state != XLOG_STATE_CALLBACK &&
+ iclog->ic_state != XLOG_STATE_IOERROR) {
iclog = iclog->ic_next;
continue;
}
/*
- * Between marking a filesystem SHUTDOWN and stopping
- * the log, we do flush all iclogs to disk (if there
- * wasn't a log I/O error). So, we do want things to
- * go smoothly in case of just a SHUTDOWN w/o a
- * LOG_IO_ERROR.
+ * Running callbacks will drop the icloglock which means
+ * we'll have to run at least one more complete loop.
*/
- if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
- /*
- * Can only perform callbacks in order. Since
- * this iclog is not in the DONE_SYNC/
- * DO_CALLBACK state, we skip the rest and
- * just try to clean up. If we set our iclog
- * to DO_CALLBACK, we will not process it when
- * we retry since a previous iclog is in the
- * CALLBACK and the state cannot change since
- * we are holding the l_icloglock.
- */
- if (!(iclog->ic_state &
- (XLOG_STATE_DONE_SYNC |
- XLOG_STATE_DO_CALLBACK))) {
- if (ciclog && (ciclog->ic_state ==
- XLOG_STATE_DONE_SYNC)) {
- ciclog->ic_state = XLOG_STATE_DO_CALLBACK;
- }
- break;
- }
- /*
- * We now have an iclog that is in either the
- * DO_CALLBACK or DONE_SYNC states. The other
- * states (WANT_SYNC, SYNCING, or CALLBACK were
- * caught by the above if and are going to
- * clean (i.e. we aren't doing their callbacks)
- * see the above if.
- */
-
- /*
- * We will do one more check here to see if we
- * have chased our tail around.
- */
-
- lowest_lsn = xlog_get_lowest_lsn(log);
- if (lowest_lsn &&
- XFS_LSN_CMP(lowest_lsn,
- be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
- iclog = iclog->ic_next;
- continue; /* Leave this iclog for
- * another thread */
- }
-
- iclog->ic_state = XLOG_STATE_CALLBACK;
-
-
- /*
- * Completion of a iclog IO does not imply that
- * a transaction has completed, as transactions
- * can be large enough to span many iclogs. We
- * cannot change the tail of the log half way
- * through a transaction as this may be the only
- * transaction in the log and moving th etail to
- * point to the middle of it will prevent
- * recovery from finding the start of the
- * transaction. Hence we should only update the
- * last_sync_lsn if this iclog contains
- * transaction completion callbacks on it.
- *
- * We have to do this before we drop the
- * icloglock to ensure we are the only one that
- * can update it.
- */
- ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
- be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
- if (!list_empty_careful(&iclog->ic_callbacks))
- atomic64_set(&log->l_last_sync_lsn,
- be64_to_cpu(iclog->ic_header.h_lsn));
-
- } else
- ioerrors++;
-
- spin_unlock(&log->l_icloglock);
-
- /*
- * Keep processing entries in the callback list until
- * we come around and it is empty. We need to
- * atomically see that the list is empty and change the
- * state to DIRTY so that we don't miss any more
- * callbacks being added.
- */
- spin_lock(&iclog->ic_callback_lock);
- while (!list_empty(&iclog->ic_callbacks)) {
- LIST_HEAD(tmp);
-
- list_splice_init(&iclog->ic_callbacks, &tmp);
-
- spin_unlock(&iclog->ic_callback_lock);
- xlog_cil_process_committed(&tmp, aborted);
- spin_lock(&iclog->ic_callback_lock);
- }
-
- loopdidcallbacks++;
- funcdidcallbacks++;
-
- spin_lock(&log->l_icloglock);
- spin_unlock(&iclog->ic_callback_lock);
- if (!(iclog->ic_state & XLOG_STATE_IOERROR))
- iclog->ic_state = XLOG_STATE_DIRTY;
-
- /*
- * Transition from DIRTY to ACTIVE if applicable.
- * NOP if STATE_IOERROR.
- */
- xlog_state_clean_log(log);
-
- /* wake up threads waiting in xfs_log_force() */
- wake_up_all(&iclog->ic_force_wait);
+ cycled_icloglock = true;
+ xlog_state_do_iclog_callbacks(log, iclog, aborted);
+ xlog_state_clean_iclog(log, iclog);
iclog = iclog->ic_next;
} while (first_iclog != iclog);
@@ -2779,50 +2889,13 @@ xlog_state_do_callback(
"%s: possible infinite loop (%d iterations)",
__func__, flushcnt);
}
- } while (!ioerrors && loopdidcallbacks);
+ } while (!ioerror && cycled_icloglock);
-#ifdef DEBUG
- /*
- * Make one last gasp attempt to see if iclogs are being left in limbo.
- * If the above loop finds an iclog earlier than the current iclog and
- * in one of the syncing states, the current iclog is put into
- * DO_CALLBACK and the callbacks are deferred to the completion of the
- * earlier iclog. Walk the iclogs in order and make sure that no iclog
- * is in DO_CALLBACK unless an earlier iclog is in one of the syncing
- * states.
- *
- * Note that SYNCING|IOABORT is a valid state so we cannot just check
- * for ic_state == SYNCING.
- */
- if (funcdidcallbacks) {
- first_iclog = iclog = log->l_iclog;
- do {
- ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK);
- /*
- * Terminate the loop if iclogs are found in states
- * which will cause other threads to clean up iclogs.
- *
- * SYNCING - i/o completion will go through logs
- * DONE_SYNC - interrupt thread should be waiting for
- * l_icloglock
- * IOERROR - give up hope all ye who enter here
- */
- if (iclog->ic_state == XLOG_STATE_WANT_SYNC ||
- iclog->ic_state & XLOG_STATE_SYNCING ||
- iclog->ic_state == XLOG_STATE_DONE_SYNC ||
- iclog->ic_state == XLOG_STATE_IOERROR )
- break;
- iclog = iclog->ic_next;
- } while (first_iclog != iclog);
- }
-#endif
+ if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE ||
+ log->l_iclog->ic_state == XLOG_STATE_IOERROR)
+ wake_up_all(&log->l_flush_wait);
- if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR))
- wake = 1;
spin_unlock(&log->l_icloglock);
-
- if (wake)
- wake_up_all(&log->l_flush_wait);
}
@@ -2848,8 +2921,6 @@ xlog_state_done_syncing(
spin_lock(&log->l_icloglock);
- ASSERT(iclog->ic_state == XLOG_STATE_SYNCING ||
- iclog->ic_state == XLOG_STATE_IOERROR);
ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
/*
@@ -2858,8 +2929,10 @@ xlog_state_done_syncing(
* and none should ever be attempted to be written to disk
* again.
*/
- if (iclog->ic_state != XLOG_STATE_IOERROR)
+ if (iclog->ic_state == XLOG_STATE_SYNCING)
iclog->ic_state = XLOG_STATE_DONE_SYNC;
+ else
+ ASSERT(iclog->ic_state == XLOG_STATE_IOERROR);
/*
* Someone could be sleeping prior to writing out the next
@@ -2868,7 +2941,7 @@ xlog_state_done_syncing(
*/
wake_up_all(&iclog->ic_write_wait);
spin_unlock(&log->l_icloglock);
- xlog_state_do_callback(log, aborted, iclog); /* also cleans log */
+ xlog_state_do_callback(log, aborted); /* also cleans log */
} /* xlog_state_done_syncing */
@@ -2902,7 +2975,6 @@ xlog_state_get_iclog_space(
int log_offset;
xlog_rec_header_t *head;
xlog_in_core_t *iclog;
- int error;
restart:
spin_lock(&log->l_icloglock);
@@ -2951,24 +3023,22 @@ restart:
* can fit into remaining data section.
*/
if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
+ int error = 0;
+
xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
/*
- * If I'm the only one writing to this iclog, sync it to disk.
- * We need to do an atomic compare and decrement here to avoid
- * racing with concurrent atomic_dec_and_lock() calls in
+ * If we are the only one writing to this iclog, sync it to
+ * disk. We need to do an atomic compare and decrement here to
+ * avoid racing with concurrent atomic_dec_and_lock() calls in
* xlog_state_release_iclog() when there is more than one
* reference to the iclog.
*/
- if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) {
- /* we are the only one */
- spin_unlock(&log->l_icloglock);
+ if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
error = xlog_state_release_iclog(log, iclog);
- if (error)
- return error;
- } else {
- spin_unlock(&log->l_icloglock);
- }
+ spin_unlock(&log->l_icloglock);
+ if (error)
+ return error;
goto restart;
}
@@ -3080,60 +3150,6 @@ xlog_ungrant_log_space(
}
/*
- * Flush iclog to disk if this is the last reference to the given iclog and
- * the WANT_SYNC bit is set.
- *
- * When this function is entered, the iclog is not necessarily in the
- * WANT_SYNC state. It may be sitting around waiting to get filled.
- *
- *
- */
-STATIC int
-xlog_state_release_iclog(
- struct xlog *log,
- struct xlog_in_core *iclog)
-{
- int sync = 0; /* do we sync? */
-
- if (iclog->ic_state & XLOG_STATE_IOERROR)
- return -EIO;
-
- ASSERT(atomic_read(&iclog->ic_refcnt) > 0);
- if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock))
- return 0;
-
- if (iclog->ic_state & XLOG_STATE_IOERROR) {
- spin_unlock(&log->l_icloglock);
- return -EIO;
- }
- ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE ||
- iclog->ic_state == XLOG_STATE_WANT_SYNC);
-
- if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
- /* update tail before writing to iclog */
- xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
- sync++;
- iclog->ic_state = XLOG_STATE_SYNCING;
- iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
- xlog_verify_tail_lsn(log, iclog, tail_lsn);
- /* cycle incremented when incrementing curr_block */
- }
- spin_unlock(&log->l_icloglock);
-
- /*
- * We let the log lock go, so it's possible that we hit a log I/O
- * error or some other SHUTDOWN condition that marks the iclog
- * as XLOG_STATE_IOERROR before the bwrite. However, we know that
- * this iclog has consistent data, so we ignore IOERROR
- * flags after this point.
- */
- if (sync)
- xlog_sync(log, iclog);
- return 0;
-} /* xlog_state_release_iclog */
-
-
-/*
* This routine will mark the current iclog in the ring as WANT_SYNC
* and move the current iclog pointer to the next iclog in the ring.
* When this routine is called from xlog_state_get_iclog_space(), the
@@ -3226,7 +3242,7 @@ xfs_log_force(
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
- if (iclog->ic_state & XLOG_STATE_IOERROR)
+ if (iclog->ic_state == XLOG_STATE_IOERROR)
goto out_error;
if (iclog->ic_state == XLOG_STATE_DIRTY ||
@@ -3256,12 +3272,9 @@ xfs_log_force(
atomic_inc(&iclog->ic_refcnt);
lsn = be64_to_cpu(iclog->ic_header.h_lsn);
xlog_state_switch_iclogs(log, iclog, 0);
- spin_unlock(&log->l_icloglock);
-
if (xlog_state_release_iclog(log, iclog))
- return -EIO;
+ goto out_error;
- spin_lock(&log->l_icloglock);
if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn ||
iclog->ic_state == XLOG_STATE_DIRTY)
goto out_unlock;
@@ -3286,11 +3299,11 @@ xfs_log_force(
if (!(flags & XFS_LOG_SYNC))
goto out_unlock;
- if (iclog->ic_state & XLOG_STATE_IOERROR)
+ if (iclog->ic_state == XLOG_STATE_IOERROR)
goto out_error;
XFS_STATS_INC(mp, xs_log_force_sleep);
xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
- if (iclog->ic_state & XLOG_STATE_IOERROR)
+ if (iclog->ic_state == XLOG_STATE_IOERROR)
return -EIO;
return 0;
@@ -3315,7 +3328,7 @@ __xfs_log_force_lsn(
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
- if (iclog->ic_state & XLOG_STATE_IOERROR)
+ if (iclog->ic_state == XLOG_STATE_IOERROR)
goto out_error;
while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
@@ -3344,10 +3357,8 @@ __xfs_log_force_lsn(
* will go out then.
*/
if (!already_slept &&
- (iclog->ic_prev->ic_state &
- (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) {
- ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
-
+ (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
+ iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
XFS_STATS_INC(mp, xs_log_force_sleep);
xlog_wait(&iclog->ic_prev->ic_write_wait,
@@ -3356,24 +3367,23 @@ __xfs_log_force_lsn(
}
atomic_inc(&iclog->ic_refcnt);
xlog_state_switch_iclogs(log, iclog, 0);
- spin_unlock(&log->l_icloglock);
if (xlog_state_release_iclog(log, iclog))
- return -EIO;
+ goto out_error;
if (log_flushed)
*log_flushed = 1;
- spin_lock(&log->l_icloglock);
}
if (!(flags & XFS_LOG_SYNC) ||
- (iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY)))
+ (iclog->ic_state == XLOG_STATE_ACTIVE ||
+ iclog->ic_state == XLOG_STATE_DIRTY))
goto out_unlock;
- if (iclog->ic_state & XLOG_STATE_IOERROR)
+ if (iclog->ic_state == XLOG_STATE_IOERROR)
goto out_error;
XFS_STATS_INC(mp, xs_log_force_sleep);
xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
- if (iclog->ic_state & XLOG_STATE_IOERROR)
+ if (iclog->ic_state == XLOG_STATE_IOERROR)
return -EIO;
return 0;
@@ -3436,8 +3446,8 @@ xlog_state_want_sync(
if (iclog->ic_state == XLOG_STATE_ACTIVE) {
xlog_state_switch_iclogs(log, iclog, 0);
} else {
- ASSERT(iclog->ic_state &
- (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR));
+ ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
+ iclog->ic_state == XLOG_STATE_IOERROR);
}
}
@@ -3458,7 +3468,7 @@ xfs_log_ticket_put(
{
ASSERT(atomic_read(&ticket->t_ref) > 0);
if (atomic_dec_and_test(&ticket->t_ref))
- kmem_zone_free(xfs_log_ticket_zone, ticket);
+ kmem_cache_free(xfs_log_ticket_zone, ticket);
}
xlog_ticket_t *
@@ -3814,7 +3824,7 @@ xlog_state_ioerror(
xlog_in_core_t *iclog, *ic;
iclog = log->l_iclog;
- if (! (iclog->ic_state & XLOG_STATE_IOERROR)) {
+ if (iclog->ic_state != XLOG_STATE_IOERROR) {
/*
* Mark all the incore logs IOERROR.
* From now on, no log flushes will result.
@@ -3874,7 +3884,7 @@ xfs_log_force_umount(
* Somebody could've already done the hard work for us.
* No need to get locks for this.
*/
- if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) {
+ if (logerror && log->l_iclog->ic_state == XLOG_STATE_IOERROR) {
ASSERT(XLOG_FORCED_SHUTDOWN(log));
return 1;
}
@@ -3922,22 +3932,11 @@ xfs_log_force_umount(
* item committed callback functions will do this again under lock to
* avoid races.
*/
+ spin_lock(&log->l_cilp->xc_push_lock);
wake_up_all(&log->l_cilp->xc_commit_wait);
- xlog_state_do_callback(log, true, NULL);
+ spin_unlock(&log->l_cilp->xc_push_lock);
+ xlog_state_do_callback(log, true);
-#ifdef XFSERRORDEBUG
- {
- xlog_in_core_t *iclog;
-
- spin_lock(&log->l_icloglock);
- iclog = log->l_iclog;
- do {
- ASSERT(iclog->ic_callback == 0);
- iclog = iclog->ic_next;
- } while (iclog != log->l_iclog);
- spin_unlock(&log->l_icloglock);
- }
-#endif
/* return non-zero if log IOERROR transition had already happened */
return retval;
}
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index fa5602d0fd7f..48435cf2aa16 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -38,7 +38,7 @@ xlog_cil_ticket_alloc(
struct xlog_ticket *tic;
tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0,
- KM_SLEEP|KM_NOFS);
+ KM_NOFS);
/*
* set the current reservation to zero so we know to steal the basic
@@ -179,14 +179,14 @@ xlog_cil_alloc_shadow_bufs(
/*
* We free and allocate here as a realloc would copy
- * unecessary data. We don't use kmem_zalloc() for the
+ * unnecessary data. We don't use kmem_zalloc() for the
* same reason - we don't need to zero the data area in
* the buffer, only the log vector header and the iovec
* storage.
*/
kmem_free(lip->li_lv_shadow);
- lv = kmem_alloc_large(buf_size, KM_SLEEP | KM_NOFS);
+ lv = kmem_alloc_large(buf_size, KM_NOFS);
memset(lv, 0, xlog_cil_iovec_space(niovecs));
lv->lv_item = lip;
@@ -660,7 +660,7 @@ xlog_cil_push(
if (!cil)
return 0;
- new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
+ new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS);
new_ctx->ticket = xlog_cil_ticket_alloc(log);
down_write(&cil->xc_ctx_lock);
@@ -682,7 +682,7 @@ xlog_cil_push(
}
- /* check for a previously pushed seqeunce */
+ /* check for a previously pushed sequence */
if (push_seq < cil->xc_ctx->sequence) {
spin_unlock(&cil->xc_push_lock);
goto out_skip;
@@ -847,7 +847,7 @@ restart:
goto out_abort;
spin_lock(&commit_iclog->ic_callback_lock);
- if (commit_iclog->ic_state & XLOG_STATE_IOERROR) {
+ if (commit_iclog->ic_state == XLOG_STATE_IOERROR) {
spin_unlock(&commit_iclog->ic_callback_lock);
goto out_abort;
}
@@ -1179,11 +1179,11 @@ xlog_cil_init(
struct xfs_cil *cil;
struct xfs_cil_ctx *ctx;
- cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL);
+ cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
if (!cil)
return -ENOMEM;
- ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL);
+ ctx = kmem_zalloc(sizeof(*ctx), KM_MAYFAIL);
if (!ctx) {
kmem_free(cil);
return -ENOMEM;
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index b880c23cb6e4..b192c5a9f9fd 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -40,17 +40,15 @@ static inline uint xlog_get_client_id(__be32 i)
/*
* In core log state
*/
-#define XLOG_STATE_ACTIVE 0x0001 /* Current IC log being written to */
-#define XLOG_STATE_WANT_SYNC 0x0002 /* Want to sync this iclog; no more writes */
-#define XLOG_STATE_SYNCING 0x0004 /* This IC log is syncing */
-#define XLOG_STATE_DONE_SYNC 0x0008 /* Done syncing to disk */
-#define XLOG_STATE_DO_CALLBACK \
- 0x0010 /* Process callback functions */
-#define XLOG_STATE_CALLBACK 0x0020 /* Callback functions now */
-#define XLOG_STATE_DIRTY 0x0040 /* Dirty IC log, not ready for ACTIVE status*/
-#define XLOG_STATE_IOERROR 0x0080 /* IO error happened in sync'ing log */
-#define XLOG_STATE_ALL 0x7FFF /* All possible valid flags */
-#define XLOG_STATE_NOTUSED 0x8000 /* This IC log not being used */
+enum xlog_iclog_state {
+ XLOG_STATE_ACTIVE, /* Current IC log being written to */
+ XLOG_STATE_WANT_SYNC, /* Want to sync this iclog; no more writes */
+ XLOG_STATE_SYNCING, /* This IC log is syncing */
+ XLOG_STATE_DONE_SYNC, /* Done syncing to disk */
+ XLOG_STATE_CALLBACK, /* Callback functions now */
+ XLOG_STATE_DIRTY, /* Dirty IC log, not ready for ACTIVE status */
+ XLOG_STATE_IOERROR, /* IO error happened in sync'ing log */
+};
/*
* Flags to log ticket
@@ -179,8 +177,6 @@ typedef struct xlog_ticket {
* - ic_next is the pointer to the next iclog in the ring.
* - ic_log is a pointer back to the global log structure.
* - ic_size is the full size of the log buffer, minus the cycle headers.
- * - ic_io_size is the size of the currently pending log buffer write, which
- * might be smaller than ic_size
* - ic_offset is the current number of bytes written to in this iclog.
* - ic_refcnt is bumped when someone is writing to the log.
* - ic_state is the state of the iclog.
@@ -205,9 +201,8 @@ typedef struct xlog_in_core {
struct xlog_in_core *ic_prev;
struct xlog *ic_log;
u32 ic_size;
- u32 ic_io_size;
u32 ic_offset;
- unsigned short ic_state;
+ enum xlog_iclog_state ic_state;
char *ic_datap; /* pointer to iclog data */
/* Callback structures need their own cacheline */
@@ -399,8 +394,6 @@ struct xlog {
/* The following field are used for debugging; need to hold icloglock */
#ifdef DEBUG
void *l_iclog_bak[XLOG_MAX_ICLOGS];
- /* log record crc error injection factor */
- uint32_t l_badcrc_factor;
#endif
/* log recovery lsn tracking (for buffer submission */
xfs_lsn_t l_recovery_lsn;
@@ -542,7 +535,11 @@ xlog_cil_force(struct xlog *log)
* by a spinlock. This matches the semantics of all the wait queues used in the
* log code.
*/
-static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock)
+static inline void
+xlog_wait(
+ struct wait_queue_head *wq,
+ struct spinlock *lock)
+ __releases(lock)
{
DECLARE_WAITQUEUE(wait, current);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 13d1d3e95b88..99ec3fba4548 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -97,14 +97,15 @@ xlog_alloc_buffer(
struct xlog *log,
int nbblks)
{
+ int align_mask = xfs_buftarg_dma_alignment(log->l_targ);
+
/*
* Pass log block 0 since we don't have an addr yet, buffer will be
* verified on read.
*/
- if (!xlog_verify_bno(log, 0, nbblks)) {
+ if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
nbblks);
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return NULL;
}
@@ -125,7 +126,7 @@ xlog_alloc_buffer(
if (nbblks > 1 && log->l_sectBBsize > 1)
nbblks += log->l_sectBBsize;
nbblks = round_up(nbblks, log->l_sectBBsize);
- return kmem_alloc_large(BBTOB(nbblks), KM_MAYFAIL);
+ return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
}
/*
@@ -150,11 +151,10 @@ xlog_do_io(
{
int error;
- if (!xlog_verify_bno(log, blk_no, nbblks)) {
+ if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
xfs_warn(log->l_mp,
"Invalid log block/length (0x%llx, 0x%x) for buffer",
blk_no, nbblks);
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return -EFSCORRUPTED;
}
@@ -242,19 +242,17 @@ xlog_header_check_recover(
* (XLOG_FMT_UNKNOWN). This stops us from trying to recover
* a dirty log created in IRIX.
*/
- if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
+ if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
xfs_warn(mp,
"dirty log written in incompatible format - can't recover");
xlog_header_check_dump(mp, head);
- XFS_ERROR_REPORT("xlog_header_check_recover(1)",
- XFS_ERRLEVEL_HIGH, mp);
return -EFSCORRUPTED;
- } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
+ }
+ if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
+ &head->h_fs_uuid))) {
xfs_warn(mp,
"dirty log entry has mismatched uuid - can't recover");
xlog_header_check_dump(mp, head);
- XFS_ERROR_REPORT("xlog_header_check_recover(2)",
- XFS_ERRLEVEL_HIGH, mp);
return -EFSCORRUPTED;
}
return 0;
@@ -277,11 +275,10 @@ xlog_header_check_mount(
* by IRIX and continue.
*/
xfs_warn(mp, "null uuid in log - IRIX style log");
- } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
+ } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
+ &head->h_fs_uuid))) {
xfs_warn(mp, "log has mismatched uuid - can't recover");
xlog_header_check_dump(mp, head);
- XFS_ERROR_REPORT("xlog_header_check_mount",
- XFS_ERRLEVEL_HIGH, mp);
return -EFSCORRUPTED;
}
return 0;
@@ -469,7 +466,7 @@ xlog_find_verify_log_record(
xfs_warn(log->l_mp,
"Log inconsistent (didn't find previous header)");
ASSERT(0);
- error = -EIO;
+ error = -EFSCORRUPTED;
goto out;
}
@@ -1345,10 +1342,11 @@ xlog_find_tail(
error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
&rhead_blk, &rhead, &wrapped);
if (error < 0)
- return error;
+ goto done;
if (!error) {
xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
- return -EIO;
+ error = -EFSCORRUPTED;
+ goto done;
}
*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
@@ -1697,11 +1695,10 @@ xlog_clear_stale_blocks(
* the distance from the beginning of the log to the
* tail.
*/
- if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
- XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
- XFS_ERRLEVEL_LOW, log->l_mp);
+ if (XFS_IS_CORRUPT(log->l_mp,
+ head_block < tail_block ||
+ head_block >= log->l_logBBsize))
return -EFSCORRUPTED;
- }
tail_distance = tail_block + (log->l_logBBsize - head_block);
} else {
/*
@@ -1709,11 +1706,10 @@ xlog_clear_stale_blocks(
* so the distance from the head to the tail is just
* the tail block minus the head block.
*/
- if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
- XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
- XFS_ERRLEVEL_LOW, log->l_mp);
+ if (XFS_IS_CORRUPT(log->l_mp,
+ head_block >= tail_block ||
+ head_cycle != tail_cycle + 1))
return -EFSCORRUPTED;
- }
tail_distance = tail_block - head_block;
}
@@ -1960,7 +1956,7 @@ xlog_recover_buffer_pass1(
}
}
- bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
+ bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0);
bcp->bc_blkno = buf_f->blf_blkno;
bcp->bc_len = buf_f->blf_len;
bcp->bc_refcount = 1;
@@ -2133,13 +2129,11 @@ xlog_recover_do_inode_buffer(
*/
logged_nextp = item->ri_buf[item_index].i_addr +
next_unlinked_offset - reg_buf_offset;
- if (unlikely(*logged_nextp == 0)) {
+ if (XFS_IS_CORRUPT(mp, *logged_nextp == 0)) {
xfs_alert(mp,
"Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
"Trying to replay bad (0) inode di_next_unlinked field.",
item, bp);
- XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
- XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
}
@@ -2574,6 +2568,7 @@ xlog_recover_do_reg_buffer(
int bit;
int nbits;
xfs_failaddr_t fa;
+ const size_t size_disk_dquot = sizeof(struct xfs_disk_dquot);
trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
@@ -2616,7 +2611,7 @@ xlog_recover_do_reg_buffer(
"XFS: NULL dquot in %s.", __func__);
goto next;
}
- if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
+ if (item->ri_buf[i].i_len < size_disk_dquot) {
xfs_alert(mp,
"XFS: dquot too small (%d) in %s.",
item->ri_buf[i].i_len, __func__);
@@ -2930,7 +2925,7 @@ xlog_recover_inode_pass2(
if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
in_f = item->ri_buf[0].i_addr;
} else {
- in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), KM_SLEEP);
+ in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), 0);
need_free = 1;
error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
if (error)
@@ -2967,22 +2962,18 @@ xlog_recover_inode_pass2(
* Make sure the place we're flushing out to really looks
* like an inode!
*/
- if (unlikely(!xfs_verify_magic16(bp, dip->di_magic))) {
+ if (XFS_IS_CORRUPT(mp, !xfs_verify_magic16(bp, dip->di_magic))) {
xfs_alert(mp,
"%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld",
__func__, dip, bp, in_f->ilf_ino);
- XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
- XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto out_release;
}
ldip = item->ri_buf[1].i_addr;
- if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
+ if (XFS_IS_CORRUPT(mp, ldip->di_magic != XFS_DINODE_MAGIC)) {
xfs_alert(mp,
"%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld",
__func__, item, in_f->ilf_ino);
- XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
- XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto out_release;
}
@@ -3164,7 +3155,7 @@ xlog_recover_inode_pass2(
default:
xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
ASSERT(0);
- error = -EIO;
+ error = -EFSCORRUPTED;
goto out_release;
}
}
@@ -3245,12 +3236,12 @@ xlog_recover_dquot_pass2(
recddq = item->ri_buf[1].i_addr;
if (recddq == NULL) {
xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
- return -EIO;
+ return -EFSCORRUPTED;
}
- if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
+ if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) {
xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
item->ri_buf[1].i_len, __func__);
- return -EIO;
+ return -EFSCORRUPTED;
}
/*
@@ -3277,7 +3268,7 @@ xlog_recover_dquot_pass2(
if (fa) {
xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS",
dq_f->qlf_id, fa);
- return -EIO;
+ return -EFSCORRUPTED;
}
ASSERT(dq_f->qlf_len == 1);
@@ -3535,6 +3526,7 @@ xfs_cui_copy_format(
memcpy(dst_cui_fmt, src_cui_fmt, len);
return 0;
}
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
}
@@ -3599,8 +3591,10 @@ xlog_recover_cud_pass2(
struct xfs_ail *ailp = log->l_ailp;
cud_formatp = item->ri_buf[0].i_addr;
- if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format))
+ if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
return -EFSCORRUPTED;
+ }
cui_id = cud_formatp->cud_cui_id;
/*
@@ -3652,6 +3646,7 @@ xfs_bui_copy_format(
memcpy(dst_bui_fmt, src_bui_fmt, len);
return 0;
}
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
}
@@ -3675,8 +3670,10 @@ xlog_recover_bui_pass2(
bui_formatp = item->ri_buf[0].i_addr;
- if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
+ if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
return -EFSCORRUPTED;
+ }
buip = xfs_bui_init(mp);
error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
if (error) {
@@ -3718,8 +3715,10 @@ xlog_recover_bud_pass2(
struct xfs_ail *ailp = log->l_ailp;
bud_formatp = item->ri_buf[0].i_addr;
- if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format))
+ if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
return -EFSCORRUPTED;
+ }
bui_id = bud_formatp->bud_bui_id;
/*
@@ -4016,7 +4015,7 @@ xlog_recover_commit_pass1(
xfs_warn(log->l_mp, "%s: invalid item type (%d)",
__func__, ITEM_TYPE(item));
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
@@ -4064,7 +4063,7 @@ xlog_recover_commit_pass2(
xfs_warn(log->l_mp, "%s: invalid item type (%d)",
__func__, ITEM_TYPE(item));
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
@@ -4161,7 +4160,7 @@ xlog_recover_add_item(
{
xlog_recover_item_t *item;
- item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
+ item = kmem_zalloc(sizeof(xlog_recover_item_t), 0);
INIT_LIST_HEAD(&item->ri_list);
list_add_tail(&item->ri_list, head);
}
@@ -4185,7 +4184,7 @@ xlog_recover_add_to_cont_trans(
ASSERT(len <= sizeof(struct xfs_trans_header));
if (len > sizeof(struct xfs_trans_header)) {
xfs_warn(log->l_mp, "%s: bad header length", __func__);
- return -EIO;
+ return -EFSCORRUPTED;
}
xlog_recover_add_item(&trans->r_itemq);
@@ -4201,7 +4200,7 @@ xlog_recover_add_to_cont_trans(
old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
old_len = item->ri_buf[item->ri_cnt-1].i_len;
- ptr = kmem_realloc(old_ptr, len + old_len, KM_SLEEP);
+ ptr = kmem_realloc(old_ptr, len + old_len, 0);
memcpy(&ptr[old_len], dp, len);
item->ri_buf[item->ri_cnt-1].i_len += len;
item->ri_buf[item->ri_cnt-1].i_addr = ptr;
@@ -4241,13 +4240,13 @@ xlog_recover_add_to_trans(
xfs_warn(log->l_mp, "%s: bad header magic number",
__func__);
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
if (len > sizeof(struct xfs_trans_header)) {
xfs_warn(log->l_mp, "%s: bad header length", __func__);
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
/*
@@ -4261,7 +4260,7 @@ xlog_recover_add_to_trans(
return 0;
}
- ptr = kmem_alloc(len, KM_SLEEP);
+ ptr = kmem_alloc(len, 0);
memcpy(ptr, dp, len);
in_f = (struct xfs_inode_log_format *)ptr;
@@ -4283,15 +4282,24 @@ xlog_recover_add_to_trans(
in_f->ilf_size);
ASSERT(0);
kmem_free(ptr);
- return -EIO;
+ return -EFSCORRUPTED;
}
item->ri_total = in_f->ilf_size;
item->ri_buf =
kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
- KM_SLEEP);
+ 0);
+ }
+
+ if (item->ri_total <= item->ri_cnt) {
+ xfs_warn(log->l_mp,
+ "log item region count (%d) overflowed size (%d)",
+ item->ri_cnt, item->ri_total);
+ ASSERT(0);
+ kmem_free(ptr);
+ return -EFSCORRUPTED;
}
- ASSERT(item->ri_total > item->ri_cnt);
+
/* Description region is ri_buf[0] */
item->ri_buf[item->ri_cnt].i_addr = ptr;
item->ri_buf[item->ri_cnt].i_len = len;
@@ -4378,7 +4386,7 @@ xlog_recovery_process_trans(
default:
xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
ASSERT(0);
- error = -EIO;
+ error = -EFSCORRUPTED;
break;
}
if (error || freeit)
@@ -4423,7 +4431,7 @@ xlog_recover_ophdr_to_trans(
* This is a new transaction so allocate a new recovery container to
* hold the recovery ops that will follow.
*/
- trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP);
+ trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
trans->r_log_tid = tid;
trans->r_lsn = be64_to_cpu(rhead->h_lsn);
INIT_LIST_HEAD(&trans->r_itemq);
@@ -4458,7 +4466,7 @@ xlog_recover_process_ophdr(
xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
__func__, ohead->oh_clientid);
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
/*
@@ -4468,7 +4476,7 @@ xlog_recover_process_ophdr(
if (dp + len > end) {
xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
WARN_ON(1);
- return -EIO;
+ return -EFSCORRUPTED;
}
trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
@@ -5022,16 +5030,27 @@ xlog_recover_process_one_iunlink(
}
/*
- * xlog_iunlink_recover
+ * Recover AGI unlinked lists
*
- * This is called during recovery to process any inodes which
- * we unlinked but not freed when the system crashed. These
- * inodes will be on the lists in the AGI blocks. What we do
- * here is scan all the AGIs and fully truncate and free any
- * inodes found on the lists. Each inode is removed from the
- * lists when it has been fully truncated and is freed. The
- * freeing of the inode and its removal from the list must be
- * atomic.
+ * This is called during recovery to process any inodes which we unlinked but
+ * not freed when the system crashed. These inodes will be on the lists in the
+ * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
+ * any inodes found on the lists. Each inode is removed from the lists when it
+ * has been fully truncated and is freed. The freeing of the inode and its
+ * removal from the list must be atomic.
+ *
+ * If everything we touch in the agi processing loop is already in memory, this
+ * loop can hold the cpu for a long time. It runs without lock contention,
+ * memory allocation contention, the need wait for IO, etc, and so will run
+ * until we either run out of inodes to process, run low on memory or we run out
+ * of log space.
+ *
+ * This behaviour is bad for latency on single CPU and non-preemptible kernels,
+ * and can prevent other filesytem work (such as CIL pushes) from running. This
+ * can lead to deadlocks if the recovery process runs out of log reservation
+ * space. Hence we need to yield the CPU when there is other kernel work
+ * scheduled on this CPU to ensure other scheduled work can run without undue
+ * latency.
*/
STATIC void
xlog_recover_process_iunlinks(
@@ -5078,6 +5097,7 @@ xlog_recover_process_iunlinks(
while (agino != NULLAGINO) {
agino = xlog_recover_process_one_iunlink(mp,
agno, agino, bucket);
+ cond_resched();
}
}
xfs_buf_rele(agibp);
@@ -5158,8 +5178,10 @@ xlog_recover_process(
* If the filesystem is CRC enabled, this mismatch becomes a
* fatal log corruption failure.
*/
- if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
+ if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
return -EFSCORRUPTED;
+ }
}
xlog_unpack_data(rhead, dp, log);
@@ -5176,31 +5198,25 @@ xlog_valid_rec_header(
{
int hlen;
- if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
- XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
- XFS_ERRLEVEL_LOW, log->l_mp);
+ if (XFS_IS_CORRUPT(log->l_mp,
+ rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
return -EFSCORRUPTED;
- }
- if (unlikely(
- (!rhead->h_version ||
- (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
+ if (XFS_IS_CORRUPT(log->l_mp,
+ (!rhead->h_version ||
+ (be32_to_cpu(rhead->h_version) &
+ (~XLOG_VERSION_OKBITS))))) {
xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
__func__, be32_to_cpu(rhead->h_version));
- return -EIO;
+ return -EFSCORRUPTED;
}
/* LR body must have data or it wouldn't have been written */
hlen = be32_to_cpu(rhead->h_len);
- if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
- XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
- XFS_ERRLEVEL_LOW, log->l_mp);
+ if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > INT_MAX))
return -EFSCORRUPTED;
- }
- if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
- XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
- XFS_ERRLEVEL_LOW, log->l_mp);
+ if (XFS_IS_CORRUPT(log->l_mp,
+ blkno > log->l_logBBsize || blkno > INT_MAX))
return -EFSCORRUPTED;
- }
return 0;
}
@@ -5282,8 +5298,12 @@ xlog_do_recovery_pass(
"invalid iclog size (%d bytes), using lsunit (%d bytes)",
h_size, log->l_mp->m_logbsize);
h_size = log->l_mp->m_logbsize;
- } else
- return -EFSCORRUPTED;
+ } else {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
+ log->l_mp);
+ error = -EFSCORRUPTED;
+ goto bread_err1;
+ }
}
if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
@@ -5527,7 +5547,7 @@ xlog_do_log_recovery(
*/
log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
sizeof(struct list_head),
- KM_SLEEP);
+ 0);
for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c
index 9804efe525a9..e0f9d3b6abe9 100644
--- a/fs/xfs/xfs_message.c
+++ b/fs/xfs/xfs_message.c
@@ -20,8 +20,8 @@ __xfs_printk(
const struct xfs_mount *mp,
struct va_format *vaf)
{
- if (mp && mp->m_fsname) {
- printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
+ if (mp && mp->m_super) {
+ printk("%sXFS (%s): %pV\n", level, mp->m_super->s_id, vaf);
return;
}
printk("%sXFS: %pV\n", level, vaf);
@@ -86,17 +86,25 @@ xfs_alert_tag(
}
void
-asswarn(char *expr, char *file, int line)
+asswarn(
+ struct xfs_mount *mp,
+ char *expr,
+ char *file,
+ int line)
{
- xfs_warn(NULL, "Assertion failed: %s, file: %s, line: %d",
+ xfs_warn(mp, "Assertion failed: %s, file: %s, line: %d",
expr, file, line);
WARN_ON(1);
}
void
-assfail(char *expr, char *file, int line)
+assfail(
+ struct xfs_mount *mp,
+ char *expr,
+ char *file,
+ int line)
{
- xfs_emerg(NULL, "Assertion failed: %s, file: %s, line: %d",
+ xfs_emerg(mp, "Assertion failed: %s, file: %s, line: %d",
expr, file, line);
if (xfs_globals.bug_on_assert)
BUG();
@@ -105,7 +113,7 @@ assfail(char *expr, char *file, int line)
}
void
-xfs_hex_dump(void *p, int length)
+xfs_hex_dump(const void *p, int length)
{
print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1);
}
diff --git a/fs/xfs/xfs_message.h b/fs/xfs/xfs_message.h
index 34447dca97d1..0b05e10995a0 100644
--- a/fs/xfs/xfs_message.h
+++ b/fs/xfs/xfs_message.h
@@ -57,9 +57,9 @@ do { \
#define xfs_debug_ratelimited(dev, fmt, ...) \
xfs_printk_ratelimited(xfs_debug, dev, fmt, ##__VA_ARGS__)
-extern void assfail(char *expr, char *f, int l);
-extern void asswarn(char *expr, char *f, int l);
+void assfail(struct xfs_mount *mp, char *expr, char *f, int l);
+void asswarn(struct xfs_mount *mp, char *expr, char *f, int l);
-extern void xfs_hex_dump(void *p, int length);
+extern void xfs_hex_dump(const void *p, int length);
#endif /* __XFS_MESSAGE_H */
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 322da6909290..fca65109cf24 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -82,7 +82,7 @@ xfs_uuid_mount(
if (hole < 0) {
xfs_uuid_table = kmem_realloc(xfs_uuid_table,
(xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
- KM_SLEEP);
+ 0);
hole = xfs_uuid_table_size++;
}
xfs_uuid_table[hole] = *uuid;
@@ -214,7 +214,7 @@ xfs_initialize_perag(
spin_lock(&mp->m_perag_lock);
if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
- BUG();
+ WARN_ON_ONCE(1);
spin_unlock(&mp->m_perag_lock);
radix_tree_preload_end();
error = -EEXIST;
@@ -426,45 +426,6 @@ xfs_update_alignment(xfs_mount_t *mp)
}
/*
- * Set the default minimum read and write sizes unless
- * already specified in a mount option.
- * We use smaller I/O sizes when the file system
- * is being used for NFS service (wsync mount option).
- */
-STATIC void
-xfs_set_rw_sizes(xfs_mount_t *mp)
-{
- xfs_sb_t *sbp = &(mp->m_sb);
- int readio_log, writeio_log;
-
- if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
- if (mp->m_flags & XFS_MOUNT_WSYNC) {
- readio_log = XFS_WSYNC_READIO_LOG;
- writeio_log = XFS_WSYNC_WRITEIO_LOG;
- } else {
- readio_log = XFS_READIO_LOG_LARGE;
- writeio_log = XFS_WRITEIO_LOG_LARGE;
- }
- } else {
- readio_log = mp->m_readio_log;
- writeio_log = mp->m_writeio_log;
- }
-
- if (sbp->sb_blocklog > readio_log) {
- mp->m_readio_log = sbp->sb_blocklog;
- } else {
- mp->m_readio_log = readio_log;
- }
- mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
- if (sbp->sb_blocklog > writeio_log) {
- mp->m_writeio_log = sbp->sb_blocklog;
- } else {
- mp->m_writeio_log = writeio_log;
- }
- mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
-}
-
-/*
* precalculate the low space thresholds for dynamic speculative preallocation.
*/
void
@@ -706,7 +667,8 @@ xfs_mountfs(
/* enable fail_at_unmount as default */
mp->m_fail_unmount = true;
- error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
+ error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype,
+ NULL, mp->m_super->s_id);
if (error)
goto out;
@@ -728,9 +690,12 @@ xfs_mountfs(
goto out_remove_errortag;
/*
- * Set the minimum read and write sizes
+ * Update the preferred write size based on the information from the
+ * on-disk superblock.
*/
- xfs_set_rw_sizes(mp);
+ mp->m_allocsize_log =
+ max_t(uint32_t, sbp->sb_blocklog, mp->m_allocsize_log);
+ mp->m_allocsize_blocks = 1U << (mp->m_allocsize_log - sbp->sb_blocklog);
/* set the low space thresholds for dynamic preallocation */
xfs_set_low_space_thresholds(mp);
@@ -796,9 +761,8 @@ xfs_mountfs(
goto out_free_dir;
}
- if (!sbp->sb_logblocks) {
+ if (XFS_IS_CORRUPT(mp, !sbp->sb_logblocks)) {
xfs_warn(mp, "no log defined");
- XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto out_free_perag;
}
@@ -836,12 +800,10 @@ xfs_mountfs(
ASSERT(rip != NULL);
- if (unlikely(!S_ISDIR(VFS_I(rip)->i_mode))) {
+ if (XFS_IS_CORRUPT(mp, !S_ISDIR(VFS_I(rip)->i_mode))) {
xfs_warn(mp, "corrupted root inode %llu: not a directory",
(unsigned long long)rip->i_ino);
xfs_iunlock(rip, XFS_ILOCK_EXCL);
- XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
- mp);
error = -EFSCORRUPTED;
goto out_rele_rip;
}
@@ -1277,7 +1239,7 @@ xfs_mod_fdblocks(
printk_once(KERN_WARNING
"Filesystem \"%s\": reserve blocks depleted! "
"Consider increasing reserve pool size.",
- mp->m_fsname);
+ mp->m_super->s_id);
fdblocks_enospc:
spin_unlock(&mp->m_sb_lock);
return -ENOSPC;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 4adb6837439a..88ab09ed29e7 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -9,10 +9,8 @@
struct xlog;
struct xfs_inode;
struct xfs_mru_cache;
-struct xfs_nameops;
struct xfs_ail;
struct xfs_quotainfo;
-struct xfs_dir_ops;
struct xfs_da_geometry;
/* dynamic preallocation free space thresholds, 5% down to 1% */
@@ -59,7 +57,6 @@ struct xfs_error_cfg {
typedef struct xfs_mount {
struct super_block *m_super;
- xfs_tid_t m_tid; /* next unused tid for fs */
/*
* Bitsets of per-fs metadata that have been checked and/or are sick.
@@ -89,8 +86,6 @@ typedef struct xfs_mount {
struct percpu_counter m_delalloc_blks;
struct xfs_buf *m_sb_bp; /* buffer for superblock */
- char *m_fsname; /* filesystem name */
- int m_fsname_len; /* strlen of fs name */
char *m_rtname; /* realtime device name */
char *m_logname; /* external log device name */
int m_bsize; /* fs logical block size */
@@ -98,10 +93,8 @@ typedef struct xfs_mount {
xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */
spinlock_t m_agirotor_lock;/* .. and lock protecting it */
xfs_agnumber_t m_maxagi; /* highest inode alloc group */
- uint m_readio_log; /* min read size log bytes */
- uint m_readio_blocks; /* min read size blocks */
- uint m_writeio_log; /* min write size log bytes */
- uint m_writeio_blocks; /* min write size blocks */
+ uint m_allocsize_log;/* min write size log bytes */
+ uint m_allocsize_blocks; /* min write size blocks */
struct xfs_da_geometry *m_dir_geo; /* directory block geometry */
struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */
struct xlog *m_log; /* log specific stuff */
@@ -159,10 +152,6 @@ typedef struct xfs_mount {
int m_dalign; /* stripe unit */
int m_swidth; /* stripe width */
uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
- const struct xfs_nameops *m_dirnameops; /* vector of dir name ops */
- const struct xfs_dir_ops *m_dir_inode_ops; /* vector of dir inode ops */
- const struct xfs_dir_ops *m_nondir_inode_ops; /* !dir inode ops */
- uint m_chsize; /* size of next field */
atomic_t m_active_trans; /* number trans frozen */
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
struct delayed_work m_reclaim_work; /* background inode reclaim */
@@ -229,7 +218,7 @@ typedef struct xfs_mount {
#define XFS_MOUNT_ATTR2 (1ULL << 8) /* allow use of attr2 format */
#define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */
#define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */
-#define XFS_MOUNT_DFLT_IOSIZE (1ULL << 12) /* set default i/o size */
+#define XFS_MOUNT_ALLOCSIZE (1ULL << 12) /* specified allocation size */
#define XFS_MOUNT_SMALL_INUMS (1ULL << 14) /* user wants 32bit inodes */
#define XFS_MOUNT_32BITINODES (1ULL << 15) /* inode32 allocator active */
#define XFS_MOUNT_NOUUID (1ULL << 16) /* ignore uuid during mount */
@@ -238,7 +227,7 @@ typedef struct xfs_mount {
* allocation */
#define XFS_MOUNT_RDONLY (1ULL << 20) /* read-only fs */
#define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */
-#define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred
+#define XFS_MOUNT_LARGEIO (1ULL << 22) /* report large preferred
* I/O size in stat() */
#define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams
allocator */
@@ -246,13 +235,6 @@ typedef struct xfs_mount {
#define XFS_MOUNT_DAX (1ULL << 62) /* TEST ONLY! */
-
-/*
- * Default minimum read and write sizes.
- */
-#define XFS_READIO_LOG_LARGE 16
-#define XFS_WRITEIO_LOG_LARGE 16
-
/*
* Max and min values for mount-option defined I/O
* preallocation sizes.
@@ -260,37 +242,6 @@ typedef struct xfs_mount {
#define XFS_MAX_IO_LOG 30 /* 1G */
#define XFS_MIN_IO_LOG PAGE_SHIFT
-/*
- * Synchronous read and write sizes. This should be
- * better for NFSv2 wsync filesystems.
- */
-#define XFS_WSYNC_READIO_LOG 15 /* 32k */
-#define XFS_WSYNC_WRITEIO_LOG 14 /* 16k */
-
-/*
- * Allow large block sizes to be reported to userspace programs if the
- * "largeio" mount option is used.
- *
- * If compatibility mode is specified, simply return the basic unit of caching
- * so that we don't get inefficient read/modify/write I/O from user apps.
- * Otherwise....
- *
- * If the underlying volume is a stripe, then return the stripe width in bytes
- * as the recommended I/O size. It is not a stripe and we've set a default
- * buffered I/O size, return that, otherwise return the compat default.
- */
-static inline unsigned long
-xfs_preferred_iosize(xfs_mount_t *mp)
-{
- if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)
- return PAGE_SIZE;
- return (mp->m_swidth ?
- (mp->m_swidth << mp->m_sb.sb_blocklog) :
- ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ?
- (1 << (int)max(mp->m_readio_log, mp->m_writeio_log)) :
- PAGE_SIZE));
-}
-
#define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \
((mp)->m_flags & XFS_MOUNT_WAS_CLEAN)
#define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN)
@@ -327,13 +278,6 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
}
/* per-AG block reservation data structures*/
-enum xfs_ag_resv_type {
- XFS_AG_RESV_NONE = 0,
- XFS_AG_RESV_AGFL,
- XFS_AG_RESV_METADATA,
- XFS_AG_RESV_RMAPBT,
-};
-
struct xfs_ag_resv {
/* number of blocks originally reserved here */
xfs_extlen_t ar_orig_reserved;
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 74738813f60d..a06661dac5be 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -333,12 +333,12 @@ xfs_mru_cache_create(
if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count))
return -EINVAL;
- if (!(mru = kmem_zalloc(sizeof(*mru), KM_SLEEP)))
+ if (!(mru = kmem_zalloc(sizeof(*mru), 0)))
return -ENOMEM;
/* An extra list is needed to avoid reaping up to a grp_time early. */
mru->grp_count = grp_count + 1;
- mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP);
+ mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), 0);
if (!mru->lists) {
err = -ENOMEM;
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 0c954cad7449..bb3008d390aa 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -12,6 +12,7 @@
#include "xfs_trans.h"
#include "xfs_bmap.h"
#include "xfs_iomap.h"
+#include "xfs_pnfs.h"
/*
* Ensure that we do not have any outstanding pNFS layouts that can be used by
@@ -32,7 +33,7 @@ xfs_break_leased_layouts(
struct xfs_inode *ip = XFS_I(inode);
int error;
- while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
+ while ((error = break_layout(inode, false)) == -EWOULDBLOCK) {
xfs_iunlock(ip, *iolock);
*did_unlock = true;
error = break_layout(inode, true);
@@ -59,7 +60,7 @@ xfs_fs_get_uuid(
printk_once(KERN_NOTICE
"XFS (%s): using experimental pNFS feature, use at your own risk!\n",
- mp->m_fsname);
+ mp->m_super->s_id);
if (*len < sizeof(uuid_t))
return -EINVAL;
@@ -142,43 +143,38 @@ xfs_fs_map_blocks(
lock_flags = xfs_ilock_data_map_shared(ip);
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
&imap, &nimaps, bmapi_flags);
- xfs_iunlock(ip, lock_flags);
- if (error)
- goto out_unlock;
+ ASSERT(!nimaps || imap.br_startblock != DELAYSTARTBLOCK);
+
+ if (!error && write &&
+ (!nimaps || imap.br_startblock == HOLESTARTBLOCK)) {
+ if (offset + length > XFS_ISIZE(ip))
+ end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
+ else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
+ end_fsb = min(end_fsb, imap.br_startoff +
+ imap.br_blockcount);
+ xfs_iunlock(ip, lock_flags);
+
+ error = xfs_iomap_write_direct(ip, offset_fsb,
+ end_fsb - offset_fsb, &imap);
+ if (error)
+ goto out_unlock;
- if (write) {
- enum xfs_prealloc_flags flags = 0;
-
- ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
-
- if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) {
- /*
- * xfs_iomap_write_direct() expects to take ownership of
- * the shared ilock.
- */
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- error = xfs_iomap_write_direct(ip, offset, length,
- &imap, nimaps);
- if (error)
- goto out_unlock;
-
- /*
- * Ensure the next transaction is committed
- * synchronously so that the blocks allocated and
- * handed out to the client are guaranteed to be
- * present even after a server crash.
- */
- flags |= XFS_PREALLOC_SET | XFS_PREALLOC_SYNC;
- }
-
- error = xfs_update_prealloc_flags(ip, flags);
+ /*
+ * Ensure the next transaction is committed synchronously so
+ * that the blocks allocated and handed out to the client are
+ * guaranteed to be present even after a server crash.
+ */
+ error = xfs_update_prealloc_flags(ip,
+ XFS_PREALLOC_SET | XFS_PREALLOC_SYNC);
if (error)
goto out_unlock;
+ } else {
+ xfs_iunlock(ip, lock_flags);
}
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
*device_generation = mp->m_generation;
return error;
out_unlock:
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 5e7a37f0cf84..0b0909657bad 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -22,6 +22,7 @@
#include "xfs_qm.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
+#include "xfs_error.h"
/*
* The global quota manager. There is only one of these for the entire
@@ -29,10 +30,10 @@
* quota functionality, including maintaining the freelist and hash
* tables of dquots.
*/
-STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
-STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
+STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
+STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
-STATIC void xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
+STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
/*
* We use the batch lookup interface to iterate over the dquots as it
@@ -243,14 +244,14 @@ xfs_qm_unmount_quotas(
STATIC int
xfs_qm_dqattach_one(
- xfs_inode_t *ip,
- xfs_dqid_t id,
- uint type,
- bool doalloc,
- xfs_dquot_t **IO_idqpp)
+ struct xfs_inode *ip,
+ xfs_dqid_t id,
+ uint type,
+ bool doalloc,
+ struct xfs_dquot **IO_idqpp)
{
- xfs_dquot_t *dqp;
- int error;
+ struct xfs_dquot *dqp;
+ int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
error = 0;
@@ -341,7 +342,7 @@ xfs_qm_dqattach_locked(
}
if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
- error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
+ error = xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
doalloc, &ip->i_pdquot);
if (error)
goto done;
@@ -539,12 +540,12 @@ xfs_qm_shrink_count(
STATIC void
xfs_qm_set_defquota(
- xfs_mount_t *mp,
- uint type,
- xfs_quotainfo_t *qinf)
+ struct xfs_mount *mp,
+ uint type,
+ struct xfs_quotainfo *qinf)
{
- xfs_dquot_t *dqp;
- struct xfs_def_quota *defq;
+ struct xfs_dquot *dqp;
+ struct xfs_def_quota *defq;
struct xfs_disk_dquot *ddqp;
int error;
@@ -642,7 +643,7 @@ xfs_qm_init_quotainfo(
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
- qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
+ qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
error = list_lru_init(&qinf->qi_lru);
if (error)
@@ -709,9 +710,9 @@ out_free_qinf:
*/
void
xfs_qm_destroy_quotainfo(
- xfs_mount_t *mp)
+ struct xfs_mount *mp)
{
- xfs_quotainfo_t *qi;
+ struct xfs_quotainfo *qi;
qi = mp->m_quotainfo;
ASSERT(qi != NULL);
@@ -754,11 +755,15 @@ xfs_qm_qino_alloc(
if ((flags & XFS_QMOPT_PQUOTA) &&
(mp->m_sb.sb_gquotino != NULLFSINO)) {
ino = mp->m_sb.sb_gquotino;
- ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
+ if (XFS_IS_CORRUPT(mp,
+ mp->m_sb.sb_pquotino != NULLFSINO))
+ return -EFSCORRUPTED;
} else if ((flags & XFS_QMOPT_GQUOTA) &&
(mp->m_sb.sb_pquotino != NULLFSINO)) {
ino = mp->m_sb.sb_pquotino;
- ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
+ if (XFS_IS_CORRUPT(mp,
+ mp->m_sb.sb_gquotino != NULLFSINO))
+ return -EFSCORRUPTED;
}
if (ino != NULLFSINO) {
error = xfs_iget(mp, NULL, ino, 0, 0, ip);
@@ -978,7 +983,7 @@ xfs_qm_reset_dqcounts_buf(
if (qip->i_d.di_nblocks == 0)
return 0;
- map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
+ map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
lblkno = 0;
maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
@@ -1559,7 +1564,7 @@ error_rele:
STATIC void
xfs_qm_destroy_quotainos(
- xfs_quotainfo_t *qi)
+ struct xfs_quotainfo *qi)
{
if (qi->qi_uquotaip) {
xfs_irele(qi->qi_uquotaip);
@@ -1693,7 +1698,7 @@ xfs_qm_vop_dqalloc(
}
}
if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
- if (xfs_get_projid(ip) != prid) {
+ if (ip->i_d.di_projid != prid) {
xfs_iunlock(ip, lockflags);
error = xfs_qm_dqget(mp, (xfs_dqid_t)prid, XFS_DQ_PROJ,
true, &pq);
@@ -1737,14 +1742,14 @@ error_rele:
* Actually transfer ownership, and do dquot modifications.
* These were already reserved.
*/
-xfs_dquot_t *
+struct xfs_dquot *
xfs_qm_vop_chown(
- xfs_trans_t *tp,
- xfs_inode_t *ip,
- xfs_dquot_t **IO_olddq,
- xfs_dquot_t *newdq)
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ struct xfs_dquot **IO_olddq,
+ struct xfs_dquot *newdq)
{
- xfs_dquot_t *prevdq;
+ struct xfs_dquot *prevdq;
uint bfield = XFS_IS_REALTIME_INODE(ip) ?
XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
@@ -1827,7 +1832,7 @@ xfs_qm_vop_chown_reserve(
}
if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
- xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
+ ip->i_d.di_projid != be32_to_cpu(pdqp->q_core.d_id)) {
prjflags = XFS_QMOPT_ENOSPC;
pdq_delblks = pdqp;
if (delblks) {
@@ -1928,7 +1933,7 @@ xfs_qm_vop_create_dqattach(
}
if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
ASSERT(ip->i_pdquot == NULL);
- ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
+ ASSERT(ip->i_d.di_projid == be32_to_cpu(pdqp->q_core.d_id));
ip->i_pdquot = xfs_qm_dqhold(pdqp);
xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index b41b75089548..7823af39008b 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -54,7 +54,7 @@ struct xfs_def_quota {
* Various quota information for individual filesystems.
* The mount structure keeps a pointer to this.
*/
-typedef struct xfs_quotainfo {
+struct xfs_quotainfo {
struct radix_tree_root qi_uquota_tree;
struct radix_tree_root qi_gquota_tree;
struct radix_tree_root qi_pquota_tree;
@@ -76,8 +76,8 @@ typedef struct xfs_quotainfo {
struct xfs_def_quota qi_usr_default;
struct xfs_def_quota qi_grp_default;
struct xfs_def_quota qi_prj_default;
- struct shrinker qi_shrinker;
-} xfs_quotainfo_t;
+ struct shrinker qi_shrinker;
+};
static inline struct radix_tree_root *
xfs_dquot_tree(
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index 5d72e88598b4..fc2fa418919f 100644
--- a/fs/xfs/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
@@ -54,13 +54,13 @@ xfs_fill_statvfs_from_dquot(
*/
void
xfs_qm_statvfs(
- xfs_inode_t *ip,
+ struct xfs_inode *ip,
struct kstatfs *statp)
{
- xfs_mount_t *mp = ip->i_mount;
- xfs_dquot_t *dqp;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_dquot *dqp;
- if (!xfs_qm_dqget(mp, xfs_get_projid(ip), XFS_DQ_PROJ, false, &dqp)) {
+ if (!xfs_qm_dqget(mp, ip->i_d.di_projid, XFS_DQ_PROJ, false, &dqp)) {
xfs_fill_statvfs_from_dquot(statp, dqp);
xfs_qm_dqput(dqp);
}
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index da7ad0383037..1ea82764bf89 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -19,9 +19,72 @@
#include "xfs_qm.h"
#include "xfs_icache.h"
-STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
-STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
- uint);
+STATIC int
+xfs_qm_log_quotaoff(
+ struct xfs_mount *mp,
+ struct xfs_qoff_logitem **qoffstartp,
+ uint flags)
+{
+ struct xfs_trans *tp;
+ int error;
+ struct xfs_qoff_logitem *qoffi;
+
+ *qoffstartp = NULL;
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
+ if (error)
+ goto out;
+
+ qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
+ xfs_trans_log_quotaoff_item(tp, qoffi);
+
+ spin_lock(&mp->m_sb_lock);
+ mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
+ spin_unlock(&mp->m_sb_lock);
+
+ xfs_log_sb(tp);
+
+ /*
+ * We have to make sure that the transaction is secure on disk before we
+ * return and actually stop quota accounting. So, make it synchronous.
+ * We don't care about quotoff's performance.
+ */
+ xfs_trans_set_sync(tp);
+ error = xfs_trans_commit(tp);
+ if (error)
+ goto out;
+
+ *qoffstartp = qoffi;
+out:
+ return error;
+}
+
+STATIC int
+xfs_qm_log_quotaoff_end(
+ struct xfs_mount *mp,
+ struct xfs_qoff_logitem *startqoff,
+ uint flags)
+{
+ struct xfs_trans *tp;
+ int error;
+ struct xfs_qoff_logitem *qoffi;
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
+ if (error)
+ return error;
+
+ qoffi = xfs_trans_get_qoff_item(tp, startqoff,
+ flags & XFS_ALL_QUOTA_ACCT);
+ xfs_trans_log_quotaoff_item(tp, qoffi);
+
+ /*
+ * We have to make sure that the transaction is secure on disk before we
+ * return and actually stop quota accounting. So, make it synchronous.
+ * We don't care about quotoff's performance.
+ */
+ xfs_trans_set_sync(tp);
+ return xfs_trans_commit(tp);
+}
/*
* Turn off quota accounting and/or enforcement for all udquots and/or
@@ -40,7 +103,7 @@ xfs_qm_scall_quotaoff(
uint dqtype;
int error;
uint inactivate_flags;
- xfs_qoff_logitem_t *qoffstart;
+ struct xfs_qoff_logitem *qoffstart;
/*
* No file system can have quotas enabled on disk but not in core.
@@ -538,74 +601,6 @@ out_unlock:
return error;
}
-STATIC int
-xfs_qm_log_quotaoff_end(
- xfs_mount_t *mp,
- xfs_qoff_logitem_t *startqoff,
- uint flags)
-{
- xfs_trans_t *tp;
- int error;
- xfs_qoff_logitem_t *qoffi;
-
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
- if (error)
- return error;
-
- qoffi = xfs_trans_get_qoff_item(tp, startqoff,
- flags & XFS_ALL_QUOTA_ACCT);
- xfs_trans_log_quotaoff_item(tp, qoffi);
-
- /*
- * We have to make sure that the transaction is secure on disk before we
- * return and actually stop quota accounting. So, make it synchronous.
- * We don't care about quotoff's performance.
- */
- xfs_trans_set_sync(tp);
- return xfs_trans_commit(tp);
-}
-
-
-STATIC int
-xfs_qm_log_quotaoff(
- xfs_mount_t *mp,
- xfs_qoff_logitem_t **qoffstartp,
- uint flags)
-{
- xfs_trans_t *tp;
- int error;
- xfs_qoff_logitem_t *qoffi;
-
- *qoffstartp = NULL;
-
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
- if (error)
- goto out;
-
- qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
- xfs_trans_log_quotaoff_item(tp, qoffi);
-
- spin_lock(&mp->m_sb_lock);
- mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
- spin_unlock(&mp->m_sb_lock);
-
- xfs_log_sb(tp);
-
- /*
- * We have to make sure that the transaction is secure on disk before we
- * return and actually stop quota accounting. So, make it synchronous.
- * We don't care about quotoff's performance.
- */
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp);
- if (error)
- goto out;
-
- *qoffstartp = qoffi;
-out:
- return error;
-}
-
/* Fill out the quota context. */
static void
xfs_qm_scall_getquota_fill_qc(
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index cd6c7210a373..c7de17deeae6 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -201,6 +201,9 @@ xfs_fs_rm_xquota(
if (XFS_IS_QUOTA_ON(mp))
return -EINVAL;
+ if (uflags & ~(FS_USER_QUOTA | FS_GROUP_QUOTA | FS_PROJ_QUOTA))
+ return -EINVAL;
+
if (uflags & FS_USER_QUOTA)
flags |= XFS_DQ_USER;
if (uflags & FS_GROUP_QUOTA)
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index d8288aa0670a..8eeed73928cd 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -17,7 +17,7 @@
#include "xfs_refcount_item.h"
#include "xfs_log.h"
#include "xfs_refcount.h"
-
+#include "xfs_error.h"
kmem_zone_t *xfs_cui_zone;
kmem_zone_t *xfs_cud_zone;
@@ -34,7 +34,7 @@ xfs_cui_item_free(
if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
kmem_free(cuip);
else
- kmem_zone_free(xfs_cui_zone, cuip);
+ kmem_cache_free(xfs_cui_zone, cuip);
}
/*
@@ -144,9 +144,9 @@ xfs_cui_init(
ASSERT(nextents > 0);
if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
- KM_SLEEP);
+ 0);
else
- cuip = kmem_zone_zalloc(xfs_cui_zone, KM_SLEEP);
+ cuip = kmem_zone_zalloc(xfs_cui_zone, 0);
xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
cuip->cui_format.cui_nextents = nextents;
@@ -206,7 +206,7 @@ xfs_cud_item_release(
struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
xfs_cui_release(cudp->cud_cuip);
- kmem_zone_free(xfs_cud_zone, cudp);
+ kmem_cache_free(xfs_cud_zone, cudp);
}
static const struct xfs_item_ops xfs_cud_item_ops = {
@@ -223,7 +223,7 @@ xfs_trans_get_cud(
{
struct xfs_cud_log_item *cudp;
- cudp = kmem_zone_zalloc(xfs_cud_zone, KM_SLEEP);
+ cudp = kmem_zone_zalloc(xfs_cud_zone, 0);
xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
&xfs_cud_item_ops);
cudp->cud_cuip = cuip;
@@ -497,7 +497,7 @@ xfs_cui_recover(
*/
set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
xfs_cui_release(cuip);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
@@ -536,6 +536,7 @@ xfs_cui_recover(
type = refc_type;
break;
default:
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto abort_error;
}
@@ -555,26 +556,24 @@ xfs_cui_recover(
irec.br_blockcount = new_len;
switch (type) {
case XFS_REFCOUNT_INCREASE:
- error = xfs_refcount_increase_extent(tp, &irec);
+ xfs_refcount_increase_extent(tp, &irec);
break;
case XFS_REFCOUNT_DECREASE:
- error = xfs_refcount_decrease_extent(tp, &irec);
+ xfs_refcount_decrease_extent(tp, &irec);
break;
case XFS_REFCOUNT_ALLOC_COW:
- error = xfs_refcount_alloc_cow_extent(tp,
+ xfs_refcount_alloc_cow_extent(tp,
irec.br_startblock,
irec.br_blockcount);
break;
case XFS_REFCOUNT_FREE_COW:
- error = xfs_refcount_free_cow_extent(tp,
+ xfs_refcount_free_cow_extent(tp,
irec.br_startblock,
irec.br_blockcount);
break;
default:
ASSERT(0);
}
- if (error)
- goto abort_error;
requeue_only = true;
}
}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index c4ec7afd1170..de451235c4ee 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -308,13 +308,13 @@ static int
xfs_find_trim_cow_extent(
struct xfs_inode *ip,
struct xfs_bmbt_irec *imap,
+ struct xfs_bmbt_irec *cmap,
bool *shared,
bool *found)
{
xfs_fileoff_t offset_fsb = imap->br_startoff;
xfs_filblks_t count_fsb = imap->br_blockcount;
struct xfs_iext_cursor icur;
- struct xfs_bmbt_irec got;
*found = false;
@@ -322,23 +322,22 @@ xfs_find_trim_cow_extent(
* If we don't find an overlapping extent, trim the range we need to
* allocate to fit the hole we found.
*/
- if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got))
- got.br_startoff = offset_fsb + count_fsb;
- if (got.br_startoff > offset_fsb) {
+ if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, cmap))
+ cmap->br_startoff = offset_fsb + count_fsb;
+ if (cmap->br_startoff > offset_fsb) {
xfs_trim_extent(imap, imap->br_startoff,
- got.br_startoff - imap->br_startoff);
+ cmap->br_startoff - imap->br_startoff);
return xfs_inode_need_cow(ip, imap, shared);
}
*shared = true;
- if (isnullstartblock(got.br_startblock)) {
- xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
+ if (isnullstartblock(cmap->br_startblock)) {
+ xfs_trim_extent(imap, cmap->br_startoff, cmap->br_blockcount);
return 0;
}
/* real extent found - no need to allocate */
- xfs_trim_extent(&got, offset_fsb, count_fsb);
- *imap = got;
+ xfs_trim_extent(cmap, offset_fsb, count_fsb);
*found = true;
return 0;
}
@@ -348,6 +347,7 @@ int
xfs_reflink_allocate_cow(
struct xfs_inode *ip,
struct xfs_bmbt_irec *imap,
+ struct xfs_bmbt_irec *cmap,
bool *shared,
uint *lockmode,
bool convert_now)
@@ -367,7 +367,7 @@ xfs_reflink_allocate_cow(
xfs_ifork_init_cow(ip);
}
- error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
+ error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found);
if (error || !*shared)
return error;
if (found)
@@ -392,7 +392,7 @@ xfs_reflink_allocate_cow(
/*
* Check for an overlapping extent again now that we dropped the ilock.
*/
- error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
+ error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found);
if (error || !*shared)
goto out_trans_cancel;
if (found) {
@@ -410,8 +410,8 @@ xfs_reflink_allocate_cow(
/* Allocate the entire reservation as unwritten blocks. */
nimaps = 1;
error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
- XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC,
- resblks, imap, &nimaps);
+ XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, 0, cmap,
+ &nimaps);
if (error)
goto out_unreserve;
@@ -427,15 +427,15 @@ xfs_reflink_allocate_cow(
if (nimaps == 0)
return -ENOSPC;
convert:
- xfs_trim_extent(imap, offset_fsb, count_fsb);
+ xfs_trim_extent(cmap, offset_fsb, count_fsb);
/*
* COW fork extents are supposed to remain unwritten until we're ready
* to initiate a disk write. For direct I/O we are going to write the
* data and need the conversion, but for buffered writes we're done.
*/
- if (!convert_now || imap->br_state == XFS_EXT_NORM)
+ if (!convert_now || cmap->br_state == XFS_EXT_NORM)
return 0;
- trace_xfs_reflink_convert_cow(ip, imap);
+ trace_xfs_reflink_convert_cow(ip, cmap);
return xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
out_unreserve:
@@ -495,10 +495,8 @@ xfs_reflink_cancel_cow_blocks(
ASSERT((*tpp)->t_firstblock == NULLFSBLOCK);
/* Free the CoW orphan record. */
- error = xfs_refcount_free_cow_extent(*tpp,
- del.br_startblock, del.br_blockcount);
- if (error)
- break;
+ xfs_refcount_free_cow_extent(*tpp, del.br_startblock,
+ del.br_blockcount);
xfs_bmap_add_free(*tpp, del.br_startblock,
del.br_blockcount, NULL);
@@ -675,15 +673,10 @@ xfs_reflink_end_cow_extent(
trace_xfs_reflink_cow_remap(ip, &del);
/* Free the CoW orphan record. */
- error = xfs_refcount_free_cow_extent(tp, del.br_startblock,
- del.br_blockcount);
- if (error)
- goto out_cancel;
+ xfs_refcount_free_cow_extent(tp, del.br_startblock, del.br_blockcount);
/* Map the new blocks into the data fork. */
- error = xfs_bmap_map_extent(tp, ip, &del);
- if (error)
- goto out_cancel;
+ xfs_bmap_map_extent(tp, ip, &del);
/* Charge this new data fork mapping to the on-disk quota. */
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT,
@@ -1070,14 +1063,10 @@ xfs_reflink_remap_extent(
uirec.br_blockcount, uirec.br_startblock);
/* Update the refcount tree */
- error = xfs_refcount_increase_extent(tp, &uirec);
- if (error)
- goto out_cancel;
+ xfs_refcount_increase_extent(tp, &uirec);
/* Map the new blocks into the data fork. */
- error = xfs_bmap_map_extent(tp, ip, &uirec);
- if (error)
- goto out_cancel;
+ xfs_bmap_map_extent(tp, ip, &uirec);
/* Update quota accounting. */
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT,
@@ -1190,11 +1179,11 @@ xfs_reflink_remap_blocks(
}
/*
- * Grab the exclusive iolock for a data copy from src to dest, making
- * sure to abide vfs locking order (lowest pointer value goes first) and
- * breaking the pnfs layout leases on dest before proceeding. The loop
- * is needed because we cannot call the blocking break_layout() with the
- * src iolock held, and therefore have to back out both locks.
+ * Grab the exclusive iolock for a data copy from src to dest, making sure to
+ * abide vfs locking order (lowest pointer value goes first) and breaking the
+ * layout leases before proceeding. The loop is needed because we cannot call
+ * the blocking break_layout() with the iolocks held, and therefore have to
+ * back out both locks.
*/
static int
xfs_iolock_two_inodes_and_break_layout(
@@ -1203,33 +1192,44 @@ xfs_iolock_two_inodes_and_break_layout(
{
int error;
-retry:
- if (src < dest) {
- inode_lock_shared(src);
- inode_lock_nested(dest, I_MUTEX_NONDIR2);
- } else {
- /* src >= dest */
- inode_lock(dest);
- }
+ if (src > dest)
+ swap(src, dest);
- error = break_layout(dest, false);
- if (error == -EWOULDBLOCK) {
- inode_unlock(dest);
- if (src < dest)
- inode_unlock_shared(src);
+retry:
+ /* Wait to break both inodes' layouts before we start locking. */
+ error = break_layout(src, true);
+ if (error)
+ return error;
+ if (src != dest) {
error = break_layout(dest, true);
if (error)
return error;
- goto retry;
}
+
+ /* Lock one inode and make sure nobody got in and leased it. */
+ inode_lock(src);
+ error = break_layout(src, false);
+ if (error) {
+ inode_unlock(src);
+ if (error == -EWOULDBLOCK)
+ goto retry;
+ return error;
+ }
+
+ if (src == dest)
+ return 0;
+
+ /* Lock the other inode and make sure nobody got in and leased it. */
+ inode_lock_nested(dest, I_MUTEX_NONDIR2);
+ error = break_layout(dest, false);
if (error) {
+ inode_unlock(src);
inode_unlock(dest);
- if (src < dest)
- inode_unlock_shared(src);
+ if (error == -EWOULDBLOCK)
+ goto retry;
return error;
}
- if (src > dest)
- inode_lock_shared_nested(src, I_MUTEX_NONDIR2);
+
return 0;
}
@@ -1247,10 +1247,10 @@ xfs_reflink_remap_unlock(
xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
if (!same_inode)
- xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
+ xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
inode_unlock(inode_out);
if (!same_inode)
- inode_unlock_shared(inode_in);
+ inode_unlock(inode_in);
}
/*
@@ -1270,7 +1270,7 @@ xfs_reflink_zero_posteof(
trace_xfs_zero_eof(ip, isize, pos - isize);
return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
- &xfs_iomap_ops);
+ &xfs_buffered_write_iomap_ops);
}
/*
@@ -1325,7 +1325,7 @@ xfs_reflink_remap_prep(
if (same_inode)
xfs_ilock(src, XFS_MMAPLOCK_EXCL);
else
- xfs_lock_two_inodes(src, XFS_MMAPLOCK_SHARED, dest,
+ xfs_lock_two_inodes(src, XFS_MMAPLOCK_EXCL, dest,
XFS_MMAPLOCK_EXCL);
/* Check file eligibility and prepare for block sharing. */
@@ -1381,85 +1381,6 @@ out_unlock:
return ret;
}
-/*
- * The user wants to preemptively CoW all shared blocks in this file,
- * which enables us to turn off the reflink flag. Iterate all
- * extents which are not prealloc/delalloc to see which ranges are
- * mentioned in the refcount tree, then read those blocks into the
- * pagecache, dirty them, fsync them back out, and then we can update
- * the inode flag. What happens if we run out of memory? :)
- */
-STATIC int
-xfs_reflink_dirty_extents(
- struct xfs_inode *ip,
- xfs_fileoff_t fbno,
- xfs_filblks_t end,
- xfs_off_t isize)
-{
- struct xfs_mount *mp = ip->i_mount;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- xfs_extlen_t aglen;
- xfs_agblock_t rbno;
- xfs_extlen_t rlen;
- xfs_off_t fpos;
- xfs_off_t flen;
- struct xfs_bmbt_irec map[2];
- int nmaps;
- int error = 0;
-
- while (end - fbno > 0) {
- nmaps = 1;
- /*
- * Look for extents in the file. Skip holes, delalloc, or
- * unwritten extents; they can't be reflinked.
- */
- error = xfs_bmapi_read(ip, fbno, end - fbno, map, &nmaps, 0);
- if (error)
- goto out;
- if (nmaps == 0)
- break;
- if (!xfs_bmap_is_real_extent(&map[0]))
- goto next;
-
- map[1] = map[0];
- while (map[1].br_blockcount) {
- agno = XFS_FSB_TO_AGNO(mp, map[1].br_startblock);
- agbno = XFS_FSB_TO_AGBNO(mp, map[1].br_startblock);
- aglen = map[1].br_blockcount;
-
- error = xfs_reflink_find_shared(mp, NULL, agno, agbno,
- aglen, &rbno, &rlen, true);
- if (error)
- goto out;
- if (rbno == NULLAGBLOCK)
- break;
-
- /* Dirty the pages */
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- fpos = XFS_FSB_TO_B(mp, map[1].br_startoff +
- (rbno - agbno));
- flen = XFS_FSB_TO_B(mp, rlen);
- if (fpos + flen > isize)
- flen = isize - fpos;
- error = iomap_file_dirty(VFS_I(ip), fpos, flen,
- &xfs_iomap_ops);
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- if (error)
- goto out;
-
- map[1].br_blockcount -= (rbno - agbno + rlen);
- map[1].br_startoff += (rbno - agbno + rlen);
- map[1].br_startblock += (rbno - agbno + rlen);
- }
-
-next:
- fbno = map[0].br_startoff + map[0].br_blockcount;
- }
-out:
- return error;
-}
-
/* Does this inode need the reflink flag? */
int
xfs_reflink_inode_has_shared_extents(
@@ -1596,10 +1517,7 @@ xfs_reflink_unshare(
xfs_off_t offset,
xfs_off_t len)
{
- struct xfs_mount *mp = ip->i_mount;
- xfs_fileoff_t fbno;
- xfs_filblks_t end;
- xfs_off_t isize;
+ struct inode *inode = VFS_I(ip);
int error;
if (!xfs_is_reflink_inode(ip))
@@ -1607,20 +1525,13 @@ xfs_reflink_unshare(
trace_xfs_reflink_unshare(ip, offset, len);
- inode_dio_wait(VFS_I(ip));
+ inode_dio_wait(inode);
- /* Try to CoW the selected ranges */
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- fbno = XFS_B_TO_FSBT(mp, offset);
- isize = i_size_read(VFS_I(ip));
- end = XFS_B_TO_FSB(mp, offset + len);
- error = xfs_reflink_dirty_extents(ip, fbno, end, isize);
+ error = iomap_file_unshare(inode, offset, len,
+ &xfs_buffered_write_iomap_ops);
if (error)
- goto out_unlock;
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
- /* Wait for the IO to finish */
- error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
+ goto out;
+ error = filemap_write_and_wait(inode->i_mapping);
if (error)
goto out;
@@ -1628,11 +1539,8 @@ xfs_reflink_unshare(
error = xfs_reflink_try_clear_inode_flag(ip);
if (error)
goto out;
-
return 0;
-out_unlock:
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
out:
trace_xfs_reflink_unshare_error(ip, error, _RET_IP_);
return error;
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 28a43b7f581d..d18ad7f4fb64 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -25,8 +25,8 @@ extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
bool xfs_inode_need_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
bool *shared);
-extern int xfs_reflink_allocate_cow(struct xfs_inode *ip,
- struct xfs_bmbt_irec *imap, bool *shared, uint *lockmode,
+int xfs_reflink_allocate_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
+ struct xfs_bmbt_irec *cmap, bool *shared, uint *lockmode,
bool convert_now);
extern int xfs_reflink_convert_cow(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t count);
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index 77ed557b6127..4911b68f95dd 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -17,7 +17,7 @@
#include "xfs_rmap_item.h"
#include "xfs_log.h"
#include "xfs_rmap.h"
-
+#include "xfs_error.h"
kmem_zone_t *xfs_rui_zone;
kmem_zone_t *xfs_rud_zone;
@@ -34,7 +34,7 @@ xfs_rui_item_free(
if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
kmem_free(ruip);
else
- kmem_zone_free(xfs_rui_zone, ruip);
+ kmem_cache_free(xfs_rui_zone, ruip);
}
/*
@@ -142,9 +142,9 @@ xfs_rui_init(
ASSERT(nextents > 0);
if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
- ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), KM_SLEEP);
+ ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
else
- ruip = kmem_zone_zalloc(xfs_rui_zone, KM_SLEEP);
+ ruip = kmem_zone_zalloc(xfs_rui_zone, 0);
xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
ruip->rui_format.rui_nextents = nextents;
@@ -171,8 +171,10 @@ xfs_rui_copy_format(
src_rui_fmt = buf->i_addr;
len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
- if (buf->i_len != len)
+ if (buf->i_len != len) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
+ }
memcpy(dst_rui_fmt, src_rui_fmt, len);
return 0;
@@ -227,7 +229,7 @@ xfs_rud_item_release(
struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
xfs_rui_release(rudp->rud_ruip);
- kmem_zone_free(xfs_rud_zone, rudp);
+ kmem_cache_free(xfs_rud_zone, rudp);
}
static const struct xfs_item_ops xfs_rud_item_ops = {
@@ -244,7 +246,7 @@ xfs_trans_get_rud(
{
struct xfs_rud_log_item *rudp;
- rudp = kmem_zone_zalloc(xfs_rud_zone, KM_SLEEP);
+ rudp = kmem_zone_zalloc(xfs_rud_zone, 0);
xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
&xfs_rud_item_ops);
rudp->rud_ruip = ruip;
@@ -539,7 +541,7 @@ xfs_rui_recover(
*/
set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
xfs_rui_release(ruip);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
@@ -581,6 +583,7 @@ xfs_rui_recover(
type = XFS_RMAP_FREE;
break;
default:
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
error = -EFSCORRUPTED;
goto abort_error;
}
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 5fa4db3c3e32..d42b5a2047e0 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -792,8 +792,7 @@ xfs_growfs_rt_alloc(
*/
nmap = 1;
error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
- XFS_BMAPI_METADATA, resblks, &map,
- &nmap);
+ XFS_BMAPI_METADATA, 0, &map, &nmap);
if (!error && nmap < 1)
error = -ENOSPC;
if (error)
@@ -865,7 +864,7 @@ xfs_alloc_rsum_cache(
* lower bound on the minimum level with any free extents. We can
* continue without the cache if it couldn't be allocated.
*/
- mp->m_rsum_cache = kmem_zalloc_large(rbmblocks, KM_SLEEP);
+ mp->m_rsum_cache = kmem_zalloc_large(rbmblocks, 0);
if (!mp->m_rsum_cache)
xfs_warn(mp, "could not allocate realtime summary cache");
}
@@ -963,7 +962,7 @@ xfs_growfs_rt(
/*
* Allocate a new (fake) mount/sb.
*/
- nmp = kmem_alloc(sizeof(*nmp), KM_SLEEP);
+ nmp = kmem_alloc(sizeof(*nmp), 0);
/*
* Loop over the bitmap blocks.
* We will do everything one bitmap block at a time.
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index f9450235533c..d9ae27ddf253 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -37,10 +37,10 @@
#include "xfs_reflink.h"
#include <linux/magic.h>
-#include <linux/parser.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
static const struct super_operations xfs_super_operations;
-struct bio_set xfs_ioend_bioset;
static struct kset *xfs_kset; /* top-level xfs sysfs dir */
#ifdef DEBUG
@@ -51,7 +51,7 @@ static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
* Table driven mount option parser.
*/
enum {
- Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, Opt_biosize,
+ Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
@@ -59,382 +59,67 @@ enum {
Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
- Opt_discard, Opt_nodiscard, Opt_dax, Opt_err,
+ Opt_discard, Opt_nodiscard, Opt_dax,
};
-static const match_table_t tokens = {
- {Opt_logbufs, "logbufs=%u"}, /* number of XFS log buffers */
- {Opt_logbsize, "logbsize=%s"}, /* size of XFS log buffers */
- {Opt_logdev, "logdev=%s"}, /* log device */
- {Opt_rtdev, "rtdev=%s"}, /* realtime I/O device */
- {Opt_biosize, "biosize=%u"}, /* log2 of preferred buffered io size */
- {Opt_wsync, "wsync"}, /* safe-mode nfs compatible mount */
- {Opt_noalign, "noalign"}, /* turn off stripe alignment */
- {Opt_swalloc, "swalloc"}, /* turn on stripe width allocation */
- {Opt_sunit, "sunit=%u"}, /* data volume stripe unit */
- {Opt_swidth, "swidth=%u"}, /* data volume stripe width */
- {Opt_nouuid, "nouuid"}, /* ignore filesystem UUID */
- {Opt_grpid, "grpid"}, /* group-ID from parent directory */
- {Opt_nogrpid, "nogrpid"}, /* group-ID from current process */
- {Opt_bsdgroups, "bsdgroups"}, /* group-ID from parent directory */
- {Opt_sysvgroups,"sysvgroups"}, /* group-ID from current process */
- {Opt_allocsize, "allocsize=%s"},/* preferred allocation size */
- {Opt_norecovery,"norecovery"}, /* don't run XFS recovery */
- {Opt_inode64, "inode64"}, /* inodes can be allocated anywhere */
- {Opt_inode32, "inode32"}, /* inode allocation limited to
- * XFS_MAXINUMBER_32 */
- {Opt_ikeep, "ikeep"}, /* do not free empty inode clusters */
- {Opt_noikeep, "noikeep"}, /* free empty inode clusters */
- {Opt_largeio, "largeio"}, /* report large I/O sizes in stat() */
- {Opt_nolargeio, "nolargeio"}, /* do not report large I/O sizes
- * in stat(). */
- {Opt_attr2, "attr2"}, /* do use attr2 attribute format */
- {Opt_noattr2, "noattr2"}, /* do not use attr2 attribute format */
- {Opt_filestreams,"filestreams"},/* use filestreams allocator */
- {Opt_quota, "quota"}, /* disk quotas (user) */
- {Opt_noquota, "noquota"}, /* no quotas */
- {Opt_usrquota, "usrquota"}, /* user quota enabled */
- {Opt_grpquota, "grpquota"}, /* group quota enabled */
- {Opt_prjquota, "prjquota"}, /* project quota enabled */
- {Opt_uquota, "uquota"}, /* user quota (IRIX variant) */
- {Opt_gquota, "gquota"}, /* group quota (IRIX variant) */
- {Opt_pquota, "pquota"}, /* project quota (IRIX variant) */
- {Opt_uqnoenforce,"uqnoenforce"},/* user quota limit enforcement */
- {Opt_gqnoenforce,"gqnoenforce"},/* group quota limit enforcement */
- {Opt_pqnoenforce,"pqnoenforce"},/* project quota limit enforcement */
- {Opt_qnoenforce, "qnoenforce"}, /* same as uqnoenforce */
- {Opt_discard, "discard"}, /* Discard unused blocks */
- {Opt_nodiscard, "nodiscard"}, /* Do not discard unused blocks */
- {Opt_dax, "dax"}, /* Enable direct access to bdev pages */
- {Opt_err, NULL},
+static const struct fs_parameter_spec xfs_param_specs[] = {
+ fsparam_u32("logbufs", Opt_logbufs),
+ fsparam_string("logbsize", Opt_logbsize),
+ fsparam_string("logdev", Opt_logdev),
+ fsparam_string("rtdev", Opt_rtdev),
+ fsparam_flag("wsync", Opt_wsync),
+ fsparam_flag("noalign", Opt_noalign),
+ fsparam_flag("swalloc", Opt_swalloc),
+ fsparam_u32("sunit", Opt_sunit),
+ fsparam_u32("swidth", Opt_swidth),
+ fsparam_flag("nouuid", Opt_nouuid),
+ fsparam_flag("grpid", Opt_grpid),
+ fsparam_flag("nogrpid", Opt_nogrpid),
+ fsparam_flag("bsdgroups", Opt_bsdgroups),
+ fsparam_flag("sysvgroups", Opt_sysvgroups),
+ fsparam_string("allocsize", Opt_allocsize),
+ fsparam_flag("norecovery", Opt_norecovery),
+ fsparam_flag("inode64", Opt_inode64),
+ fsparam_flag("inode32", Opt_inode32),
+ fsparam_flag("ikeep", Opt_ikeep),
+ fsparam_flag("noikeep", Opt_noikeep),
+ fsparam_flag("largeio", Opt_largeio),
+ fsparam_flag("nolargeio", Opt_nolargeio),
+ fsparam_flag("attr2", Opt_attr2),
+ fsparam_flag("noattr2", Opt_noattr2),
+ fsparam_flag("filestreams", Opt_filestreams),
+ fsparam_flag("quota", Opt_quota),
+ fsparam_flag("noquota", Opt_noquota),
+ fsparam_flag("usrquota", Opt_usrquota),
+ fsparam_flag("grpquota", Opt_grpquota),
+ fsparam_flag("prjquota", Opt_prjquota),
+ fsparam_flag("uquota", Opt_uquota),
+ fsparam_flag("gquota", Opt_gquota),
+ fsparam_flag("pquota", Opt_pquota),
+ fsparam_flag("uqnoenforce", Opt_uqnoenforce),
+ fsparam_flag("gqnoenforce", Opt_gqnoenforce),
+ fsparam_flag("pqnoenforce", Opt_pqnoenforce),
+ fsparam_flag("qnoenforce", Opt_qnoenforce),
+ fsparam_flag("discard", Opt_discard),
+ fsparam_flag("nodiscard", Opt_nodiscard),
+ fsparam_flag("dax", Opt_dax),
+ {}
};
-
-STATIC int
-suffix_kstrtoint(const substring_t *s, unsigned int base, int *res)
-{
- int last, shift_left_factor = 0, _res;
- char *value;
- int ret = 0;
-
- value = match_strdup(s);
- if (!value)
- return -ENOMEM;
-
- last = strlen(value) - 1;
- if (value[last] == 'K' || value[last] == 'k') {
- shift_left_factor = 10;
- value[last] = '\0';
- }
- if (value[last] == 'M' || value[last] == 'm') {
- shift_left_factor = 20;
- value[last] = '\0';
- }
- if (value[last] == 'G' || value[last] == 'g') {
- shift_left_factor = 30;
- value[last] = '\0';
- }
-
- if (kstrtoint(value, base, &_res))
- ret = -EINVAL;
- kfree(value);
- *res = _res << shift_left_factor;
- return ret;
-}
-
-/*
- * This function fills in xfs_mount_t fields based on mount args.
- * Note: the superblock has _not_ yet been read in.
- *
- * Note that this function leaks the various device name allocations on
- * failure. The caller takes care of them.
- *
- * *sb is const because this is also used to test options on the remount
- * path, and we don't want this to have any side effects at remount time.
- * Today this function does not change *sb, but just to future-proof...
- */
-STATIC int
-xfs_parseargs(
- struct xfs_mount *mp,
- char *options)
-{
- const struct super_block *sb = mp->m_super;
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int dsunit = 0;
- int dswidth = 0;
- int iosize = 0;
- uint8_t iosizelog = 0;
-
- /*
- * set up the mount name first so all the errors will refer to the
- * correct device.
- */
- mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
- if (!mp->m_fsname)
- return -ENOMEM;
- mp->m_fsname_len = strlen(mp->m_fsname) + 1;
-
- /*
- * Copy binary VFS mount flags we are interested in.
- */
- if (sb_rdonly(sb))
- mp->m_flags |= XFS_MOUNT_RDONLY;
- if (sb->s_flags & SB_DIRSYNC)
- mp->m_flags |= XFS_MOUNT_DIRSYNC;
- if (sb->s_flags & SB_SYNCHRONOUS)
- mp->m_flags |= XFS_MOUNT_WSYNC;
-
- /*
- * Set some default flags that could be cleared by the mount option
- * parsing.
- */
- mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
-
- /*
- * These can be overridden by the mount option parsing.
- */
- mp->m_logbufs = -1;
- mp->m_logbsize = -1;
-
- if (!options)
- goto done;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_logbufs:
- if (match_int(args, &mp->m_logbufs))
- return -EINVAL;
- break;
- case Opt_logbsize:
- if (suffix_kstrtoint(args, 10, &mp->m_logbsize))
- return -EINVAL;
- break;
- case Opt_logdev:
- kfree(mp->m_logname);
- mp->m_logname = match_strdup(args);
- if (!mp->m_logname)
- return -ENOMEM;
- break;
- case Opt_rtdev:
- kfree(mp->m_rtname);
- mp->m_rtname = match_strdup(args);
- if (!mp->m_rtname)
- return -ENOMEM;
- break;
- case Opt_allocsize:
- case Opt_biosize:
- if (suffix_kstrtoint(args, 10, &iosize))
- return -EINVAL;
- iosizelog = ffs(iosize) - 1;
- break;
- case Opt_grpid:
- case Opt_bsdgroups:
- mp->m_flags |= XFS_MOUNT_GRPID;
- break;
- case Opt_nogrpid:
- case Opt_sysvgroups:
- mp->m_flags &= ~XFS_MOUNT_GRPID;
- break;
- case Opt_wsync:
- mp->m_flags |= XFS_MOUNT_WSYNC;
- break;
- case Opt_norecovery:
- mp->m_flags |= XFS_MOUNT_NORECOVERY;
- break;
- case Opt_noalign:
- mp->m_flags |= XFS_MOUNT_NOALIGN;
- break;
- case Opt_swalloc:
- mp->m_flags |= XFS_MOUNT_SWALLOC;
- break;
- case Opt_sunit:
- if (match_int(args, &dsunit))
- return -EINVAL;
- break;
- case Opt_swidth:
- if (match_int(args, &dswidth))
- return -EINVAL;
- break;
- case Opt_inode32:
- mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
- break;
- case Opt_inode64:
- mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
- break;
- case Opt_nouuid:
- mp->m_flags |= XFS_MOUNT_NOUUID;
- break;
- case Opt_ikeep:
- mp->m_flags |= XFS_MOUNT_IKEEP;
- break;
- case Opt_noikeep:
- mp->m_flags &= ~XFS_MOUNT_IKEEP;
- break;
- case Opt_largeio:
- mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
- break;
- case Opt_nolargeio:
- mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
- break;
- case Opt_attr2:
- mp->m_flags |= XFS_MOUNT_ATTR2;
- break;
- case Opt_noattr2:
- mp->m_flags &= ~XFS_MOUNT_ATTR2;
- mp->m_flags |= XFS_MOUNT_NOATTR2;
- break;
- case Opt_filestreams:
- mp->m_flags |= XFS_MOUNT_FILESTREAMS;
- break;
- case Opt_noquota:
- mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
- mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
- mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
- break;
- case Opt_quota:
- case Opt_uquota:
- case Opt_usrquota:
- mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
- XFS_UQUOTA_ENFD);
- break;
- case Opt_qnoenforce:
- case Opt_uqnoenforce:
- mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
- mp->m_qflags &= ~XFS_UQUOTA_ENFD;
- break;
- case Opt_pquota:
- case Opt_prjquota:
- mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
- XFS_PQUOTA_ENFD);
- break;
- case Opt_pqnoenforce:
- mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
- mp->m_qflags &= ~XFS_PQUOTA_ENFD;
- break;
- case Opt_gquota:
- case Opt_grpquota:
- mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
- XFS_GQUOTA_ENFD);
- break;
- case Opt_gqnoenforce:
- mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
- mp->m_qflags &= ~XFS_GQUOTA_ENFD;
- break;
- case Opt_discard:
- mp->m_flags |= XFS_MOUNT_DISCARD;
- break;
- case Opt_nodiscard:
- mp->m_flags &= ~XFS_MOUNT_DISCARD;
- break;
-#ifdef CONFIG_FS_DAX
- case Opt_dax:
- mp->m_flags |= XFS_MOUNT_DAX;
- break;
-#endif
- default:
- xfs_warn(mp, "unknown mount option [%s].", p);
- return -EINVAL;
- }
- }
-
- /*
- * no recovery flag requires a read-only mount
- */
- if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
- !(mp->m_flags & XFS_MOUNT_RDONLY)) {
- xfs_warn(mp, "no-recovery mounts must be read-only.");
- return -EINVAL;
- }
-
- if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
- xfs_warn(mp,
- "sunit and swidth options incompatible with the noalign option");
- return -EINVAL;
- }
-
-#ifndef CONFIG_XFS_QUOTA
- if (XFS_IS_QUOTA_RUNNING(mp)) {
- xfs_warn(mp, "quota support not available in this kernel.");
- return -EINVAL;
- }
-#endif
-
- if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
- xfs_warn(mp, "sunit and swidth must be specified together");
- return -EINVAL;
- }
-
- if (dsunit && (dswidth % dsunit != 0)) {
- xfs_warn(mp,
- "stripe width (%d) must be a multiple of the stripe unit (%d)",
- dswidth, dsunit);
- return -EINVAL;
- }
-
-done:
- if (dsunit && !(mp->m_flags & XFS_MOUNT_NOALIGN)) {
- /*
- * At this point the superblock has not been read
- * in, therefore we do not know the block size.
- * Before the mount call ends we will convert
- * these to FSBs.
- */
- mp->m_dalign = dsunit;
- mp->m_swidth = dswidth;
- }
-
- if (mp->m_logbufs != -1 &&
- mp->m_logbufs != 0 &&
- (mp->m_logbufs < XLOG_MIN_ICLOGS ||
- mp->m_logbufs > XLOG_MAX_ICLOGS)) {
- xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
- mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
- return -EINVAL;
- }
- if (mp->m_logbsize != -1 &&
- mp->m_logbsize != 0 &&
- (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
- mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
- !is_power_of_2(mp->m_logbsize))) {
- xfs_warn(mp,
- "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
- mp->m_logbsize);
- return -EINVAL;
- }
-
- if (iosizelog) {
- if (iosizelog > XFS_MAX_IO_LOG ||
- iosizelog < XFS_MIN_IO_LOG) {
- xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
- iosizelog, XFS_MIN_IO_LOG,
- XFS_MAX_IO_LOG);
- return -EINVAL;
- }
-
- mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
- mp->m_readio_log = iosizelog;
- mp->m_writeio_log = iosizelog;
- }
-
- return 0;
-}
+static const struct fs_parameter_description xfs_fs_parameters = {
+ .name = "xfs",
+ .specs = xfs_param_specs,
+};
struct proc_xfs_info {
uint64_t flag;
char *str;
};
-STATIC void
-xfs_showargs(
- struct xfs_mount *mp,
- struct seq_file *m)
+static int
+xfs_fs_show_options(
+ struct seq_file *m,
+ struct dentry *root)
{
static struct proc_xfs_info xfs_info_set[] = {
/* the few simple ones we can get from the mount struct */
@@ -448,30 +133,24 @@ xfs_showargs(
{ XFS_MOUNT_FILESTREAMS, ",filestreams" },
{ XFS_MOUNT_GRPID, ",grpid" },
{ XFS_MOUNT_DISCARD, ",discard" },
- { XFS_MOUNT_SMALL_INUMS, ",inode32" },
+ { XFS_MOUNT_LARGEIO, ",largeio" },
{ XFS_MOUNT_DAX, ",dax" },
{ 0, NULL }
};
- static struct proc_xfs_info xfs_info_unset[] = {
- /* the few simple ones we can get from the mount struct */
- { XFS_MOUNT_COMPAT_IOSIZE, ",largeio" },
- { XFS_MOUNT_SMALL_INUMS, ",inode64" },
- { 0, NULL }
- };
+ struct xfs_mount *mp = XFS_M(root->d_sb);
struct proc_xfs_info *xfs_infop;
for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
if (mp->m_flags & xfs_infop->flag)
seq_puts(m, xfs_infop->str);
}
- for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
- if (!(mp->m_flags & xfs_infop->flag))
- seq_puts(m, xfs_infop->str);
- }
- if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
+ seq_printf(m, ",inode%d",
+ (mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
+
+ if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
seq_printf(m, ",allocsize=%dk",
- (int)(1 << mp->m_writeio_log) >> 10);
+ (1 << mp->m_allocsize_log) >> 10);
if (mp->m_logbufs > 0)
seq_printf(m, ",logbufs=%d", mp->m_logbufs);
@@ -510,6 +189,8 @@ xfs_showargs(
if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
seq_puts(m, ",noquota");
+
+ return 0;
}
static uint64_t
@@ -808,32 +489,33 @@ xfs_init_mount_workqueues(
struct xfs_mount *mp)
{
mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_fsname);
+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_super->s_id);
if (!mp->m_buf_workqueue)
goto out;
mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
if (!mp->m_unwritten_workqueue)
goto out_destroy_buf;
mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
+ WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
+ 0, mp->m_super->s_id);
if (!mp->m_cil_workqueue)
goto out_destroy_unwritten;
mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
if (!mp->m_reclaim_workqueue)
goto out_destroy_cil;
mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
if (!mp->m_eofblocks_workqueue)
goto out_destroy_reclaim;
mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
- mp->m_fsname);
+ mp->m_super->s_id);
if (!mp->m_sync_workqueue)
goto out_destroy_eofb;
@@ -1037,13 +719,13 @@ xfs_fs_drop_inode(
return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
}
-STATIC void
-xfs_free_fsname(
+static void
+xfs_mount_free(
struct xfs_mount *mp)
{
- kfree(mp->m_fsname);
kfree(mp->m_rtname);
kfree(mp->m_logname);
+ kmem_free(mp);
}
STATIC int
@@ -1204,181 +886,6 @@ xfs_quiesce_attr(
xfs_log_quiesce(mp);
}
-STATIC int
-xfs_test_remount_options(
- struct super_block *sb,
- char *options)
-{
- int error = 0;
- struct xfs_mount *tmp_mp;
-
- tmp_mp = kmem_zalloc(sizeof(*tmp_mp), KM_MAYFAIL);
- if (!tmp_mp)
- return -ENOMEM;
-
- tmp_mp->m_super = sb;
- error = xfs_parseargs(tmp_mp, options);
- xfs_free_fsname(tmp_mp);
- kmem_free(tmp_mp);
-
- return error;
-}
-
-STATIC int
-xfs_fs_remount(
- struct super_block *sb,
- int *flags,
- char *options)
-{
- struct xfs_mount *mp = XFS_M(sb);
- xfs_sb_t *sbp = &mp->m_sb;
- substring_t args[MAX_OPT_ARGS];
- char *p;
- int error;
-
- /* First, check for complete junk; i.e. invalid options */
- error = xfs_test_remount_options(sb, options);
- if (error)
- return error;
-
- sync_filesystem(sb);
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_inode64:
- mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
- mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
- break;
- case Opt_inode32:
- mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
- mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
- break;
- default:
- /*
- * Logically we would return an error here to prevent
- * users from believing they might have changed
- * mount options using remount which can't be changed.
- *
- * But unfortunately mount(8) adds all options from
- * mtab and fstab to the mount arguments in some cases
- * so we can't blindly reject options, but have to
- * check for each specified option if it actually
- * differs from the currently set option and only
- * reject it if that's the case.
- *
- * Until that is implemented we return success for
- * every remount request, and silently ignore all
- * options that we can't actually change.
- */
-#if 0
- xfs_info(mp,
- "mount option \"%s\" not supported for remount", p);
- return -EINVAL;
-#else
- break;
-#endif
- }
- }
-
- /* ro -> rw */
- if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & SB_RDONLY)) {
- if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
- xfs_warn(mp,
- "ro->rw transition prohibited on norecovery mount");
- return -EINVAL;
- }
-
- if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
- xfs_sb_has_ro_compat_feature(sbp,
- XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
- xfs_warn(mp,
-"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
- (sbp->sb_features_ro_compat &
- XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
- return -EINVAL;
- }
-
- mp->m_flags &= ~XFS_MOUNT_RDONLY;
-
- /*
- * If this is the first remount to writeable state we
- * might have some superblock changes to update.
- */
- if (mp->m_update_sb) {
- error = xfs_sync_sb(mp, false);
- if (error) {
- xfs_warn(mp, "failed to write sb changes");
- return error;
- }
- mp->m_update_sb = false;
- }
-
- /*
- * Fill out the reserve pool if it is empty. Use the stashed
- * value if it is non-zero, otherwise go with the default.
- */
- xfs_restore_resvblks(mp);
- xfs_log_work_queue(mp);
-
- /* Recover any CoW blocks that never got remapped. */
- error = xfs_reflink_recover_cow(mp);
- if (error) {
- xfs_err(mp,
- "Error %d recovering leftover CoW allocations.", error);
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- return error;
- }
- xfs_start_block_reaping(mp);
-
- /* Create the per-AG metadata reservation pool .*/
- error = xfs_fs_reserve_ag_blocks(mp);
- if (error && error != -ENOSPC)
- return error;
- }
-
- /* rw -> ro */
- if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
- /*
- * Cancel background eofb scanning so it cannot race with the
- * final log force+buftarg wait and deadlock the remount.
- */
- xfs_stop_block_reaping(mp);
-
- /* Get rid of any leftover CoW reservations... */
- error = xfs_icache_free_cowblocks(mp, NULL);
- if (error) {
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- return error;
- }
-
- /* Free the per-AG metadata reservation pool. */
- error = xfs_fs_unreserve_ag_blocks(mp);
- if (error) {
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- return error;
- }
-
- /*
- * Before we sync the metadata, we need to free up the reserve
- * block pool so that the used block count in the superblock on
- * disk is correct at the end of the remount. Stash the current
- * reserve pool size so that if we get remounted rw, we can
- * return it to the same size.
- */
- xfs_save_resvblks(mp);
-
- xfs_quiesce_attr(mp);
- mp->m_flags |= XFS_MOUNT_RDONLY;
- }
-
- return 0;
-}
-
/*
* Second stage of a freeze. The data is already frozen so we only
* need to take care of the metadata. Once that's done sync the superblock
@@ -1409,15 +916,6 @@ xfs_fs_unfreeze(
return 0;
}
-STATIC int
-xfs_fs_show_options(
- struct seq_file *m,
- struct dentry *root)
-{
- xfs_showargs(XFS_M(root->d_sb), m);
- return 0;
-}
-
/*
* This function fills in xfs_mount_t fields based on mount args.
* Note: the superblock _has_ now been read in.
@@ -1540,60 +1038,337 @@ xfs_destroy_percpu_counters(
percpu_counter_destroy(&mp->m_delalloc_blks);
}
-static struct xfs_mount *
-xfs_mount_alloc(
+static void
+xfs_fs_put_super(
struct super_block *sb)
{
- struct xfs_mount *mp;
+ struct xfs_mount *mp = XFS_M(sb);
- mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
- if (!mp)
- return NULL;
+ /* if ->fill_super failed, we have no mount to tear down */
+ if (!sb->s_fs_info)
+ return;
- mp->m_super = sb;
- spin_lock_init(&mp->m_sb_lock);
- spin_lock_init(&mp->m_agirotor_lock);
- INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
- spin_lock_init(&mp->m_perag_lock);
- mutex_init(&mp->m_growlock);
- atomic_set(&mp->m_active_trans, 0);
- INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
- INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
- INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
- mp->m_kobj.kobject.kset = xfs_kset;
- /*
- * We don't create the finobt per-ag space reservation until after log
- * recovery, so we must set this to true so that an ifree transaction
- * started during log recovery will not depend on space reservations
- * for finobt expansion.
- */
- mp->m_finobt_nores = true;
- return mp;
+ xfs_notice(mp, "Unmounting Filesystem");
+ xfs_filestream_unmount(mp);
+ xfs_unmountfs(mp);
+
+ xfs_freesb(mp);
+ free_percpu(mp->m_stats.xs_stats);
+ xfs_destroy_percpu_counters(mp);
+ xfs_destroy_mount_workqueues(mp);
+ xfs_close_devices(mp);
+
+ sb->s_fs_info = NULL;
+ xfs_mount_free(mp);
}
+static long
+xfs_fs_nr_cached_objects(
+ struct super_block *sb,
+ struct shrink_control *sc)
+{
+ /* Paranoia: catch incorrect calls during mount setup or teardown */
+ if (WARN_ON_ONCE(!sb->s_fs_info))
+ return 0;
+ return xfs_reclaim_inodes_count(XFS_M(sb));
+}
-STATIC int
-xfs_fs_fill_super(
+static long
+xfs_fs_free_cached_objects(
struct super_block *sb,
- void *data,
- int silent)
+ struct shrink_control *sc)
{
- struct inode *root;
- struct xfs_mount *mp = NULL;
- int flags = 0, error = -ENOMEM;
+ return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
+}
+static const struct super_operations xfs_super_operations = {
+ .alloc_inode = xfs_fs_alloc_inode,
+ .destroy_inode = xfs_fs_destroy_inode,
+ .dirty_inode = xfs_fs_dirty_inode,
+ .drop_inode = xfs_fs_drop_inode,
+ .put_super = xfs_fs_put_super,
+ .sync_fs = xfs_fs_sync_fs,
+ .freeze_fs = xfs_fs_freeze,
+ .unfreeze_fs = xfs_fs_unfreeze,
+ .statfs = xfs_fs_statfs,
+ .show_options = xfs_fs_show_options,
+ .nr_cached_objects = xfs_fs_nr_cached_objects,
+ .free_cached_objects = xfs_fs_free_cached_objects,
+};
+
+static int
+suffix_kstrtoint(
+ const char *s,
+ unsigned int base,
+ int *res)
+{
+ int last, shift_left_factor = 0, _res;
+ char *value;
+ int ret = 0;
+
+ value = kstrdup(s, GFP_KERNEL);
+ if (!value)
+ return -ENOMEM;
+
+ last = strlen(value) - 1;
+ if (value[last] == 'K' || value[last] == 'k') {
+ shift_left_factor = 10;
+ value[last] = '\0';
+ }
+ if (value[last] == 'M' || value[last] == 'm') {
+ shift_left_factor = 20;
+ value[last] = '\0';
+ }
+ if (value[last] == 'G' || value[last] == 'g') {
+ shift_left_factor = 30;
+ value[last] = '\0';
+ }
+
+ if (kstrtoint(value, base, &_res))
+ ret = -EINVAL;
+ kfree(value);
+ *res = _res << shift_left_factor;
+ return ret;
+}
+
+/*
+ * Set mount state from a mount option.
+ *
+ * NOTE: mp->m_super is NULL here!
+ */
+static int
+xfs_fc_parse_param(
+ struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct xfs_mount *mp = fc->s_fs_info;
+ struct fs_parse_result result;
+ int size = 0;
+ int opt;
+
+ opt = fs_parse(fc, &xfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_logbufs:
+ mp->m_logbufs = result.uint_32;
+ return 0;
+ case Opt_logbsize:
+ if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize))
+ return -EINVAL;
+ return 0;
+ case Opt_logdev:
+ kfree(mp->m_logname);
+ mp->m_logname = kstrdup(param->string, GFP_KERNEL);
+ if (!mp->m_logname)
+ return -ENOMEM;
+ return 0;
+ case Opt_rtdev:
+ kfree(mp->m_rtname);
+ mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
+ if (!mp->m_rtname)
+ return -ENOMEM;
+ return 0;
+ case Opt_allocsize:
+ if (suffix_kstrtoint(param->string, 10, &size))
+ return -EINVAL;
+ mp->m_allocsize_log = ffs(size) - 1;
+ mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
+ return 0;
+ case Opt_grpid:
+ case Opt_bsdgroups:
+ mp->m_flags |= XFS_MOUNT_GRPID;
+ return 0;
+ case Opt_nogrpid:
+ case Opt_sysvgroups:
+ mp->m_flags &= ~XFS_MOUNT_GRPID;
+ return 0;
+ case Opt_wsync:
+ mp->m_flags |= XFS_MOUNT_WSYNC;
+ return 0;
+ case Opt_norecovery:
+ mp->m_flags |= XFS_MOUNT_NORECOVERY;
+ return 0;
+ case Opt_noalign:
+ mp->m_flags |= XFS_MOUNT_NOALIGN;
+ return 0;
+ case Opt_swalloc:
+ mp->m_flags |= XFS_MOUNT_SWALLOC;
+ return 0;
+ case Opt_sunit:
+ mp->m_dalign = result.uint_32;
+ return 0;
+ case Opt_swidth:
+ mp->m_swidth = result.uint_32;
+ return 0;
+ case Opt_inode32:
+ mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
+ return 0;
+ case Opt_inode64:
+ mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
+ return 0;
+ case Opt_nouuid:
+ mp->m_flags |= XFS_MOUNT_NOUUID;
+ return 0;
+ case Opt_ikeep:
+ mp->m_flags |= XFS_MOUNT_IKEEP;
+ return 0;
+ case Opt_noikeep:
+ mp->m_flags &= ~XFS_MOUNT_IKEEP;
+ return 0;
+ case Opt_largeio:
+ mp->m_flags |= XFS_MOUNT_LARGEIO;
+ return 0;
+ case Opt_nolargeio:
+ mp->m_flags &= ~XFS_MOUNT_LARGEIO;
+ return 0;
+ case Opt_attr2:
+ mp->m_flags |= XFS_MOUNT_ATTR2;
+ return 0;
+ case Opt_noattr2:
+ mp->m_flags &= ~XFS_MOUNT_ATTR2;
+ mp->m_flags |= XFS_MOUNT_NOATTR2;
+ return 0;
+ case Opt_filestreams:
+ mp->m_flags |= XFS_MOUNT_FILESTREAMS;
+ return 0;
+ case Opt_noquota:
+ mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
+ mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
+ mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
+ return 0;
+ case Opt_quota:
+ case Opt_uquota:
+ case Opt_usrquota:
+ mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
+ XFS_UQUOTA_ENFD);
+ return 0;
+ case Opt_qnoenforce:
+ case Opt_uqnoenforce:
+ mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
+ mp->m_qflags &= ~XFS_UQUOTA_ENFD;
+ return 0;
+ case Opt_pquota:
+ case Opt_prjquota:
+ mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
+ XFS_PQUOTA_ENFD);
+ return 0;
+ case Opt_pqnoenforce:
+ mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
+ mp->m_qflags &= ~XFS_PQUOTA_ENFD;
+ return 0;
+ case Opt_gquota:
+ case Opt_grpquota:
+ mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
+ XFS_GQUOTA_ENFD);
+ return 0;
+ case Opt_gqnoenforce:
+ mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
+ mp->m_qflags &= ~XFS_GQUOTA_ENFD;
+ return 0;
+ case Opt_discard:
+ mp->m_flags |= XFS_MOUNT_DISCARD;
+ return 0;
+ case Opt_nodiscard:
+ mp->m_flags &= ~XFS_MOUNT_DISCARD;
+ return 0;
+#ifdef CONFIG_FS_DAX
+ case Opt_dax:
+ mp->m_flags |= XFS_MOUNT_DAX;
+ return 0;
+#endif
+ default:
+ xfs_warn(mp, "unknown mount option [%s].", param->key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+xfs_fc_validate_params(
+ struct xfs_mount *mp)
+{
/*
- * allocate mp and do all low-level struct initializations before we
- * attach it to the super
+ * no recovery flag requires a read-only mount
*/
- mp = xfs_mount_alloc(sb);
- if (!mp)
- goto out;
- sb->s_fs_info = mp;
+ if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
+ !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+ xfs_warn(mp, "no-recovery mounts must be read-only.");
+ return -EINVAL;
+ }
+
+ if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
+ (mp->m_dalign || mp->m_swidth)) {
+ xfs_warn(mp,
+ "sunit and swidth options incompatible with the noalign option");
+ return -EINVAL;
+ }
+
+ if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
+ xfs_warn(mp, "quota support not available in this kernel.");
+ return -EINVAL;
+ }
+
+ if ((mp->m_dalign && !mp->m_swidth) ||
+ (!mp->m_dalign && mp->m_swidth)) {
+ xfs_warn(mp, "sunit and swidth must be specified together");
+ return -EINVAL;
+ }
+
+ if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
+ xfs_warn(mp,
+ "stripe width (%d) must be a multiple of the stripe unit (%d)",
+ mp->m_swidth, mp->m_dalign);
+ return -EINVAL;
+ }
+
+ if (mp->m_logbufs != -1 &&
+ mp->m_logbufs != 0 &&
+ (mp->m_logbufs < XLOG_MIN_ICLOGS ||
+ mp->m_logbufs > XLOG_MAX_ICLOGS)) {
+ xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
+ mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
+ return -EINVAL;
+ }
+
+ if (mp->m_logbsize != -1 &&
+ mp->m_logbsize != 0 &&
+ (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
+ mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
+ !is_power_of_2(mp->m_logbsize))) {
+ xfs_warn(mp,
+ "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
+ mp->m_logbsize);
+ return -EINVAL;
+ }
+
+ if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
+ (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
+ mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
+ xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
+ mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
+ return -EINVAL;
+ }
- error = xfs_parseargs(mp, (char *)data);
+ return 0;
+}
+
+static int
+xfs_fc_fill_super(
+ struct super_block *sb,
+ struct fs_context *fc)
+{
+ struct xfs_mount *mp = sb->s_fs_info;
+ struct inode *root;
+ int flags = 0, error;
+
+ mp->m_super = sb;
+
+ error = xfs_fc_validate_params(mp);
if (error)
- goto out_free_fsname;
+ goto out_free_names;
sb_min_blocksize(sb, BBSIZE);
sb->s_xattr = xfs_xattr_handlers;
@@ -1615,12 +1390,12 @@ xfs_fs_fill_super(
msleep(xfs_globals.mount_delay * 1000);
}
- if (silent)
+ if (fc->sb_flags & SB_SILENT)
flags |= XFS_MFSI_QUIET;
error = xfs_open_devices(mp);
if (error)
- goto out_free_fsname;
+ goto out_free_names;
error = xfs_init_mount_workqueues(mp);
if (error)
@@ -1663,6 +1438,8 @@ xfs_fs_fill_super(
sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
sb->s_max_links = XFS_MAXLINK;
sb->s_time_gran = 1;
+ sb->s_time_min = S32_MIN;
+ sb->s_time_max = S32_MAX;
sb->s_iflags |= SB_I_CGROUPWB;
set_posix_acl_flag(sb);
@@ -1755,11 +1532,9 @@ xfs_fs_fill_super(
xfs_destroy_mount_workqueues(mp);
out_close_devices:
xfs_close_devices(mp);
- out_free_fsname:
+ out_free_names:
sb->s_fs_info = NULL;
- xfs_free_fsname(mp);
- kfree(mp);
- out:
+ xfs_mount_free(mp);
return error;
out_unmount:
@@ -1768,80 +1543,252 @@ xfs_fs_fill_super(
goto out_free_sb;
}
-STATIC void
-xfs_fs_put_super(
- struct super_block *sb)
+static int
+xfs_fc_get_tree(
+ struct fs_context *fc)
{
- struct xfs_mount *mp = XFS_M(sb);
+ return get_tree_bdev(fc, xfs_fc_fill_super);
+}
- /* if ->fill_super failed, we have no mount to tear down */
- if (!sb->s_fs_info)
- return;
+static int
+xfs_remount_rw(
+ struct xfs_mount *mp)
+{
+ struct xfs_sb *sbp = &mp->m_sb;
+ int error;
- xfs_notice(mp, "Unmounting Filesystem");
- xfs_filestream_unmount(mp);
- xfs_unmountfs(mp);
+ if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
+ xfs_warn(mp,
+ "ro->rw transition prohibited on norecovery mount");
+ return -EINVAL;
+ }
- xfs_freesb(mp);
- free_percpu(mp->m_stats.xs_stats);
- xfs_destroy_percpu_counters(mp);
- xfs_destroy_mount_workqueues(mp);
- xfs_close_devices(mp);
+ if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
+ xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+ xfs_warn(mp,
+ "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
+ (sbp->sb_features_ro_compat &
+ XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
+ return -EINVAL;
+ }
- sb->s_fs_info = NULL;
- xfs_free_fsname(mp);
- kfree(mp);
+ mp->m_flags &= ~XFS_MOUNT_RDONLY;
+
+ /*
+ * If this is the first remount to writeable state we might have some
+ * superblock changes to update.
+ */
+ if (mp->m_update_sb) {
+ error = xfs_sync_sb(mp, false);
+ if (error) {
+ xfs_warn(mp, "failed to write sb changes");
+ return error;
+ }
+ mp->m_update_sb = false;
+ }
+
+ /*
+ * Fill out the reserve pool if it is empty. Use the stashed value if
+ * it is non-zero, otherwise go with the default.
+ */
+ xfs_restore_resvblks(mp);
+ xfs_log_work_queue(mp);
+
+ /* Recover any CoW blocks that never got remapped. */
+ error = xfs_reflink_recover_cow(mp);
+ if (error) {
+ xfs_err(mp,
+ "Error %d recovering leftover CoW allocations.", error);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ return error;
+ }
+ xfs_start_block_reaping(mp);
+
+ /* Create the per-AG metadata reservation pool .*/
+ error = xfs_fs_reserve_ag_blocks(mp);
+ if (error && error != -ENOSPC)
+ return error;
+
+ return 0;
}
-STATIC struct dentry *
-xfs_fs_mount(
- struct file_system_type *fs_type,
- int flags,
- const char *dev_name,
- void *data)
+static int
+xfs_remount_ro(
+ struct xfs_mount *mp)
{
- return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
+ int error;
+
+ /*
+ * Cancel background eofb scanning so it cannot race with the final
+ * log force+buftarg wait and deadlock the remount.
+ */
+ xfs_stop_block_reaping(mp);
+
+ /* Get rid of any leftover CoW reservations... */
+ error = xfs_icache_free_cowblocks(mp, NULL);
+ if (error) {
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ return error;
+ }
+
+ /* Free the per-AG metadata reservation pool. */
+ error = xfs_fs_unreserve_ag_blocks(mp);
+ if (error) {
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ return error;
+ }
+
+ /*
+ * Before we sync the metadata, we need to free up the reserve block
+ * pool so that the used block count in the superblock on disk is
+ * correct at the end of the remount. Stash the current* reserve pool
+ * size so that if we get remounted rw, we can return it to the same
+ * size.
+ */
+ xfs_save_resvblks(mp);
+
+ xfs_quiesce_attr(mp);
+ mp->m_flags |= XFS_MOUNT_RDONLY;
+
+ return 0;
}
-static long
-xfs_fs_nr_cached_objects(
- struct super_block *sb,
- struct shrink_control *sc)
+/*
+ * Logically we would return an error here to prevent users from believing
+ * they might have changed mount options using remount which can't be changed.
+ *
+ * But unfortunately mount(8) adds all options from mtab and fstab to the mount
+ * arguments in some cases so we can't blindly reject options, but have to
+ * check for each specified option if it actually differs from the currently
+ * set option and only reject it if that's the case.
+ *
+ * Until that is implemented we return success for every remount request, and
+ * silently ignore all options that we can't actually change.
+ */
+static int
+xfs_fc_reconfigure(
+ struct fs_context *fc)
{
- /* Paranoia: catch incorrect calls during mount setup or teardown */
- if (WARN_ON_ONCE(!sb->s_fs_info))
- return 0;
- return xfs_reclaim_inodes_count(XFS_M(sb));
+ struct xfs_mount *mp = XFS_M(fc->root->d_sb);
+ struct xfs_mount *new_mp = fc->s_fs_info;
+ xfs_sb_t *sbp = &mp->m_sb;
+ int flags = fc->sb_flags;
+ int error;
+
+ error = xfs_fc_validate_params(new_mp);
+ if (error)
+ return error;
+
+ sync_filesystem(mp->m_super);
+
+ /* inode32 -> inode64 */
+ if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
+ !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
+ mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
+ mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
+ }
+
+ /* inode64 -> inode32 */
+ if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
+ (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
+ mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
+ mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
+ }
+
+ /* ro -> rw */
+ if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
+ error = xfs_remount_rw(mp);
+ if (error)
+ return error;
+ }
+
+ /* rw -> ro */
+ if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
+ error = xfs_remount_ro(mp);
+ if (error)
+ return error;
+ }
+
+ return 0;
}
-static long
-xfs_fs_free_cached_objects(
- struct super_block *sb,
- struct shrink_control *sc)
+static void xfs_fc_free(
+ struct fs_context *fc)
{
- return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
+ struct xfs_mount *mp = fc->s_fs_info;
+
+ /*
+ * mp is stored in the fs_context when it is initialized.
+ * mp is transferred to the superblock on a successful mount,
+ * but if an error occurs before the transfer we have to free
+ * it here.
+ */
+ if (mp)
+ xfs_mount_free(mp);
}
-static const struct super_operations xfs_super_operations = {
- .alloc_inode = xfs_fs_alloc_inode,
- .destroy_inode = xfs_fs_destroy_inode,
- .dirty_inode = xfs_fs_dirty_inode,
- .drop_inode = xfs_fs_drop_inode,
- .put_super = xfs_fs_put_super,
- .sync_fs = xfs_fs_sync_fs,
- .freeze_fs = xfs_fs_freeze,
- .unfreeze_fs = xfs_fs_unfreeze,
- .statfs = xfs_fs_statfs,
- .remount_fs = xfs_fs_remount,
- .show_options = xfs_fs_show_options,
- .nr_cached_objects = xfs_fs_nr_cached_objects,
- .free_cached_objects = xfs_fs_free_cached_objects,
+static const struct fs_context_operations xfs_context_ops = {
+ .parse_param = xfs_fc_parse_param,
+ .get_tree = xfs_fc_get_tree,
+ .reconfigure = xfs_fc_reconfigure,
+ .free = xfs_fc_free,
};
+static int xfs_init_fs_context(
+ struct fs_context *fc)
+{
+ struct xfs_mount *mp;
+
+ mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
+ if (!mp)
+ return -ENOMEM;
+
+ spin_lock_init(&mp->m_sb_lock);
+ spin_lock_init(&mp->m_agirotor_lock);
+ INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
+ spin_lock_init(&mp->m_perag_lock);
+ mutex_init(&mp->m_growlock);
+ atomic_set(&mp->m_active_trans, 0);
+ INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
+ INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
+ INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
+ mp->m_kobj.kobject.kset = xfs_kset;
+ /*
+ * We don't create the finobt per-ag space reservation until after log
+ * recovery, so we must set this to true so that an ifree transaction
+ * started during log recovery will not depend on space reservations
+ * for finobt expansion.
+ */
+ mp->m_finobt_nores = true;
+
+ /*
+ * These can be overridden by the mount option parsing.
+ */
+ mp->m_logbufs = -1;
+ mp->m_logbsize = -1;
+ mp->m_allocsize_log = 16; /* 64k */
+
+ /*
+ * Copy binary VFS mount flags we are interested in.
+ */
+ if (fc->sb_flags & SB_RDONLY)
+ mp->m_flags |= XFS_MOUNT_RDONLY;
+ if (fc->sb_flags & SB_DIRSYNC)
+ mp->m_flags |= XFS_MOUNT_DIRSYNC;
+ if (fc->sb_flags & SB_SYNCHRONOUS)
+ mp->m_flags |= XFS_MOUNT_WSYNC;
+
+ fc->s_fs_info = mp;
+ fc->ops = &xfs_context_ops;
+
+ return 0;
+}
+
static struct file_system_type xfs_fs_type = {
.owner = THIS_MODULE,
.name = "xfs",
- .mount = xfs_fs_mount,
+ .init_fs_context = xfs_init_fs_context,
+ .parameters = &xfs_fs_parameters,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
@@ -1850,37 +1797,39 @@ MODULE_ALIAS_FS("xfs");
STATIC int __init
xfs_init_zones(void)
{
- if (bioset_init(&xfs_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
- offsetof(struct xfs_ioend, io_inline_bio),
- BIOSET_NEED_BVECS))
- goto out;
-
- xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
- "xfs_log_ticket");
+ xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
+ sizeof(struct xlog_ticket),
+ 0, 0, NULL);
if (!xfs_log_ticket_zone)
- goto out_free_ioend_bioset;
+ goto out;
- xfs_bmap_free_item_zone = kmem_zone_init(
- sizeof(struct xfs_extent_free_item),
- "xfs_bmap_free_item");
+ xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
+ sizeof(struct xfs_extent_free_item),
+ 0, 0, NULL);
if (!xfs_bmap_free_item_zone)
goto out_destroy_log_ticket_zone;
- xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
- "xfs_btree_cur");
+ xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
+ sizeof(struct xfs_btree_cur),
+ 0, 0, NULL);
if (!xfs_btree_cur_zone)
goto out_destroy_bmap_free_item_zone;
- xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
- "xfs_da_state");
+ xfs_da_state_zone = kmem_cache_create("xfs_da_state",
+ sizeof(struct xfs_da_state),
+ 0, 0, NULL);
if (!xfs_da_state_zone)
goto out_destroy_btree_cur_zone;
- xfs_ifork_zone = kmem_zone_init(sizeof(struct xfs_ifork), "xfs_ifork");
+ xfs_ifork_zone = kmem_cache_create("xfs_ifork",
+ sizeof(struct xfs_ifork),
+ 0, 0, NULL);
if (!xfs_ifork_zone)
goto out_destroy_da_state_zone;
- xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
+ xfs_trans_zone = kmem_cache_create("xf_trans",
+ sizeof(struct xfs_trans),
+ 0, 0, NULL);
if (!xfs_trans_zone)
goto out_destroy_ifork_zone;
@@ -1890,111 +1839,121 @@ xfs_init_zones(void)
* size possible under XFS. This wastes a little bit of memory,
* but it is much faster.
*/
- xfs_buf_item_zone = kmem_zone_init(sizeof(struct xfs_buf_log_item),
- "xfs_buf_item");
+ xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
+ sizeof(struct xfs_buf_log_item),
+ 0, 0, NULL);
if (!xfs_buf_item_zone)
goto out_destroy_trans_zone;
- xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
- ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
- sizeof(xfs_extent_t))), "xfs_efd_item");
+ xfs_efd_zone = kmem_cache_create("xfs_efd_item",
+ (sizeof(struct xfs_efd_log_item) +
+ (XFS_EFD_MAX_FAST_EXTENTS - 1) *
+ sizeof(struct xfs_extent)),
+ 0, 0, NULL);
if (!xfs_efd_zone)
goto out_destroy_buf_item_zone;
- xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
- ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
- sizeof(xfs_extent_t))), "xfs_efi_item");
+ xfs_efi_zone = kmem_cache_create("xfs_efi_item",
+ (sizeof(struct xfs_efi_log_item) +
+ (XFS_EFI_MAX_FAST_EXTENTS - 1) *
+ sizeof(struct xfs_extent)),
+ 0, 0, NULL);
if (!xfs_efi_zone)
goto out_destroy_efd_zone;
- xfs_inode_zone =
- kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
- KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD |
- KM_ZONE_ACCOUNT, xfs_fs_inode_init_once);
+ xfs_inode_zone = kmem_cache_create("xfs_inode",
+ sizeof(struct xfs_inode), 0,
+ (SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_MEM_SPREAD | SLAB_ACCOUNT),
+ xfs_fs_inode_init_once);
if (!xfs_inode_zone)
goto out_destroy_efi_zone;
- xfs_ili_zone =
- kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
- KM_ZONE_SPREAD, NULL);
+ xfs_ili_zone = kmem_cache_create("xfs_ili",
+ sizeof(struct xfs_inode_log_item), 0,
+ SLAB_MEM_SPREAD, NULL);
if (!xfs_ili_zone)
goto out_destroy_inode_zone;
- xfs_icreate_zone = kmem_zone_init(sizeof(struct xfs_icreate_item),
- "xfs_icr");
+
+ xfs_icreate_zone = kmem_cache_create("xfs_icr",
+ sizeof(struct xfs_icreate_item),
+ 0, 0, NULL);
if (!xfs_icreate_zone)
goto out_destroy_ili_zone;
- xfs_rud_zone = kmem_zone_init(sizeof(struct xfs_rud_log_item),
- "xfs_rud_item");
+ xfs_rud_zone = kmem_cache_create("xfs_rud_item",
+ sizeof(struct xfs_rud_log_item),
+ 0, 0, NULL);
if (!xfs_rud_zone)
goto out_destroy_icreate_zone;
- xfs_rui_zone = kmem_zone_init(
+ xfs_rui_zone = kmem_cache_create("xfs_rui_item",
xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
- "xfs_rui_item");
+ 0, 0, NULL);
if (!xfs_rui_zone)
goto out_destroy_rud_zone;
- xfs_cud_zone = kmem_zone_init(sizeof(struct xfs_cud_log_item),
- "xfs_cud_item");
+ xfs_cud_zone = kmem_cache_create("xfs_cud_item",
+ sizeof(struct xfs_cud_log_item),
+ 0, 0, NULL);
if (!xfs_cud_zone)
goto out_destroy_rui_zone;
- xfs_cui_zone = kmem_zone_init(
+ xfs_cui_zone = kmem_cache_create("xfs_cui_item",
xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
- "xfs_cui_item");
+ 0, 0, NULL);
if (!xfs_cui_zone)
goto out_destroy_cud_zone;
- xfs_bud_zone = kmem_zone_init(sizeof(struct xfs_bud_log_item),
- "xfs_bud_item");
+ xfs_bud_zone = kmem_cache_create("xfs_bud_item",
+ sizeof(struct xfs_bud_log_item),
+ 0, 0, NULL);
if (!xfs_bud_zone)
goto out_destroy_cui_zone;
- xfs_bui_zone = kmem_zone_init(
+ xfs_bui_zone = kmem_cache_create("xfs_bui_item",
xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
- "xfs_bui_item");
+ 0, 0, NULL);
if (!xfs_bui_zone)
goto out_destroy_bud_zone;
return 0;
out_destroy_bud_zone:
- kmem_zone_destroy(xfs_bud_zone);
+ kmem_cache_destroy(xfs_bud_zone);
out_destroy_cui_zone:
- kmem_zone_destroy(xfs_cui_zone);
+ kmem_cache_destroy(xfs_cui_zone);
out_destroy_cud_zone:
- kmem_zone_destroy(xfs_cud_zone);
+ kmem_cache_destroy(xfs_cud_zone);
out_destroy_rui_zone:
- kmem_zone_destroy(xfs_rui_zone);
+ kmem_cache_destroy(xfs_rui_zone);
out_destroy_rud_zone:
- kmem_zone_destroy(xfs_rud_zone);
+ kmem_cache_destroy(xfs_rud_zone);
out_destroy_icreate_zone:
- kmem_zone_destroy(xfs_icreate_zone);
+ kmem_cache_destroy(xfs_icreate_zone);
out_destroy_ili_zone:
- kmem_zone_destroy(xfs_ili_zone);
+ kmem_cache_destroy(xfs_ili_zone);
out_destroy_inode_zone:
- kmem_zone_destroy(xfs_inode_zone);
+ kmem_cache_destroy(xfs_inode_zone);
out_destroy_efi_zone:
- kmem_zone_destroy(xfs_efi_zone);
+ kmem_cache_destroy(xfs_efi_zone);
out_destroy_efd_zone:
- kmem_zone_destroy(xfs_efd_zone);
+ kmem_cache_destroy(xfs_efd_zone);
out_destroy_buf_item_zone:
- kmem_zone_destroy(xfs_buf_item_zone);
+ kmem_cache_destroy(xfs_buf_item_zone);
out_destroy_trans_zone:
- kmem_zone_destroy(xfs_trans_zone);
+ kmem_cache_destroy(xfs_trans_zone);
out_destroy_ifork_zone:
- kmem_zone_destroy(xfs_ifork_zone);
+ kmem_cache_destroy(xfs_ifork_zone);
out_destroy_da_state_zone:
- kmem_zone_destroy(xfs_da_state_zone);
+ kmem_cache_destroy(xfs_da_state_zone);
out_destroy_btree_cur_zone:
- kmem_zone_destroy(xfs_btree_cur_zone);
+ kmem_cache_destroy(xfs_btree_cur_zone);
out_destroy_bmap_free_item_zone:
- kmem_zone_destroy(xfs_bmap_free_item_zone);
+ kmem_cache_destroy(xfs_bmap_free_item_zone);
out_destroy_log_ticket_zone:
- kmem_zone_destroy(xfs_log_ticket_zone);
- out_free_ioend_bioset:
- bioset_exit(&xfs_ioend_bioset);
+ kmem_cache_destroy(xfs_log_ticket_zone);
out:
return -ENOMEM;
}
@@ -2007,25 +1966,24 @@ xfs_destroy_zones(void)
* destroy caches.
*/
rcu_barrier();
- kmem_zone_destroy(xfs_bui_zone);
- kmem_zone_destroy(xfs_bud_zone);
- kmem_zone_destroy(xfs_cui_zone);
- kmem_zone_destroy(xfs_cud_zone);
- kmem_zone_destroy(xfs_rui_zone);
- kmem_zone_destroy(xfs_rud_zone);
- kmem_zone_destroy(xfs_icreate_zone);
- kmem_zone_destroy(xfs_ili_zone);
- kmem_zone_destroy(xfs_inode_zone);
- kmem_zone_destroy(xfs_efi_zone);
- kmem_zone_destroy(xfs_efd_zone);
- kmem_zone_destroy(xfs_buf_item_zone);
- kmem_zone_destroy(xfs_trans_zone);
- kmem_zone_destroy(xfs_ifork_zone);
- kmem_zone_destroy(xfs_da_state_zone);
- kmem_zone_destroy(xfs_btree_cur_zone);
- kmem_zone_destroy(xfs_bmap_free_item_zone);
- kmem_zone_destroy(xfs_log_ticket_zone);
- bioset_exit(&xfs_ioend_bioset);
+ kmem_cache_destroy(xfs_bui_zone);
+ kmem_cache_destroy(xfs_bud_zone);
+ kmem_cache_destroy(xfs_cui_zone);
+ kmem_cache_destroy(xfs_cud_zone);
+ kmem_cache_destroy(xfs_rui_zone);
+ kmem_cache_destroy(xfs_rud_zone);
+ kmem_cache_destroy(xfs_icreate_zone);
+ kmem_cache_destroy(xfs_ili_zone);
+ kmem_cache_destroy(xfs_inode_zone);
+ kmem_cache_destroy(xfs_efi_zone);
+ kmem_cache_destroy(xfs_efd_zone);
+ kmem_cache_destroy(xfs_buf_item_zone);
+ kmem_cache_destroy(xfs_trans_zone);
+ kmem_cache_destroy(xfs_ifork_zone);
+ kmem_cache_destroy(xfs_da_state_zone);
+ kmem_cache_destroy(xfs_btree_cur_zone);
+ kmem_cache_destroy(xfs_bmap_free_item_zone);
+ kmem_cache_destroy(xfs_log_ticket_zone);
}
STATIC int __init
diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h
index 763e43d22dee..b552cf6d3379 100644
--- a/fs/xfs/xfs_super.h
+++ b/fs/xfs/xfs_super.h
@@ -11,9 +11,11 @@
#ifdef CONFIG_XFS_QUOTA
extern int xfs_qm_init(void);
extern void xfs_qm_exit(void);
+# define XFS_QUOTA_STRING "quota, "
#else
# define xfs_qm_init() (0)
# define xfs_qm_exit() do { } while (0)
+# define XFS_QUOTA_STRING
#endif
#ifdef CONFIG_XFS_POSIX_ACL
@@ -50,6 +52,12 @@ extern void xfs_qm_exit(void);
# define XFS_WARN_STRING
#endif
+#ifdef CONFIG_XFS_ASSERT_FATAL
+# define XFS_ASSERT_FATAL_STRING "fatal assert, "
+#else
+# define XFS_ASSERT_FATAL_STRING
+#endif
+
#ifdef DEBUG
# define XFS_DBG_STRING "debug"
#else
@@ -63,6 +71,8 @@ extern void xfs_qm_exit(void);
XFS_SCRUB_STRING \
XFS_REPAIR_STRING \
XFS_WARN_STRING \
+ XFS_QUOTA_STRING \
+ XFS_ASSERT_FATAL_STRING \
XFS_DBG_STRING /* DBG must be last */
struct xfs_inode;
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index ed66fd2de327..a25502bc2071 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -17,6 +17,7 @@
#include "xfs_bmap.h"
#include "xfs_bmap_btree.h"
#include "xfs_quota.h"
+#include "xfs_symlink.h"
#include "xfs_trans_space.h"
#include "xfs_trace.h"
#include "xfs_trans.h"
diff --git a/fs/xfs/xfs_symlink.h b/fs/xfs/xfs_symlink.h
index 9743d8c9394b..b1fa091427e6 100644
--- a/fs/xfs/xfs_symlink.h
+++ b/fs/xfs/xfs_symlink.h
@@ -5,7 +5,7 @@
#ifndef __XFS_SYMLINK_H
#define __XFS_SYMLINK_H 1
-/* Kernel only symlink defintions */
+/* Kernel only symlink definitions */
int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
const char *target_path, umode_t mode, struct xfs_inode **ipp);
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index ddd0bf7a4740..f1bc88f4367c 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -63,19 +63,6 @@ static const struct sysfs_ops xfs_sysfs_ops = {
.store = xfs_sysfs_object_store,
};
-/*
- * xfs_mount kobject. The mp kobject also serves as the per-mount parent object
- * that is identified by the fsname under sysfs.
- */
-
-static inline struct xfs_mount *
-to_mp(struct kobject *kobject)
-{
- struct xfs_kobj *kobj = to_kobj(kobject);
-
- return container_of(kobj, struct xfs_mount, m_kobj);
-}
-
static struct attribute *xfs_mp_attrs[] = {
NULL,
};
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 8094b1920eef..c13bb3655e48 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -23,6 +23,7 @@ struct xlog;
struct xlog_ticket;
struct xlog_recover;
struct xlog_recover_item;
+struct xlog_rec_header;
struct xfs_buf_log_format;
struct xfs_inode_log_format;
struct xfs_bmbt_irec;
@@ -30,6 +31,10 @@ struct xfs_btree_cur;
struct xfs_refcount_irec;
struct xfs_fsmap;
struct xfs_rmap_irec;
+struct xfs_icreate_log;
+struct xfs_owner_info;
+struct xfs_trans_res;
+struct xfs_inobt_rec_incore;
DECLARE_EVENT_CLASS(xfs_attr_list_class,
TP_PROTO(struct xfs_attr_list_context *ctx),
@@ -720,7 +725,7 @@ TRACE_EVENT(xfs_iomap_prealloc_size,
__entry->writeio_blocks = writeio_blocks;
),
TP_printk("dev %d:%d ino 0x%llx prealloc blocks %llu shift %d "
- "m_writeio_blocks %u",
+ "m_allocsize_blocks %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino,
__entry->blocks, __entry->shift, __entry->writeio_blocks)
)
@@ -1153,71 +1158,6 @@ DEFINE_RW_EVENT(xfs_file_buffered_write);
DEFINE_RW_EVENT(xfs_file_direct_write);
DEFINE_RW_EVENT(xfs_file_dax_write);
-DECLARE_EVENT_CLASS(xfs_page_class,
- TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
- unsigned int len),
- TP_ARGS(inode, page, off, len),
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(xfs_ino_t, ino)
- __field(pgoff_t, pgoff)
- __field(loff_t, size)
- __field(unsigned long, offset)
- __field(unsigned int, length)
- ),
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = XFS_I(inode)->i_ino;
- __entry->pgoff = page_offset(page);
- __entry->size = i_size_read(inode);
- __entry->offset = off;
- __entry->length = len;
- ),
- TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
- "length %x",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino,
- __entry->pgoff,
- __entry->size,
- __entry->offset,
- __entry->length)
-)
-
-#define DEFINE_PAGE_EVENT(name) \
-DEFINE_EVENT(xfs_page_class, name, \
- TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \
- unsigned int len), \
- TP_ARGS(inode, page, off, len))
-DEFINE_PAGE_EVENT(xfs_writepage);
-DEFINE_PAGE_EVENT(xfs_releasepage);
-DEFINE_PAGE_EVENT(xfs_invalidatepage);
-
-DECLARE_EVENT_CLASS(xfs_readpage_class,
- TP_PROTO(struct inode *inode, int nr_pages),
- TP_ARGS(inode, nr_pages),
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(xfs_ino_t, ino)
- __field(int, nr_pages)
- ),
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->nr_pages = nr_pages;
- ),
- TP_printk("dev %d:%d ino 0x%llx nr_pages %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino,
- __entry->nr_pages)
-)
-
-#define DEFINE_READPAGE_EVENT(name) \
-DEFINE_EVENT(xfs_readpage_class, name, \
- TP_PROTO(struct inode *inode, int nr_pages), \
- TP_ARGS(inode, nr_pages))
-DEFINE_READPAGE_EVENT(xfs_vm_readpage);
-DEFINE_READPAGE_EVENT(xfs_vm_readpages);
-
DECLARE_EVENT_CLASS(xfs_imap_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
int whichfork, struct xfs_bmbt_irec *irec),
@@ -1637,8 +1577,11 @@ DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound);
DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
-DEFINE_ALLOC_EVENT(xfs_alloc_near_greater);
-DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser);
+DEFINE_ALLOC_EVENT(xfs_alloc_cur);
+DEFINE_ALLOC_EVENT(xfs_alloc_cur_right);
+DEFINE_ALLOC_EVENT(xfs_alloc_cur_left);
+DEFINE_ALLOC_EVENT(xfs_alloc_cur_lookup);
+DEFINE_ALLOC_EVENT(xfs_alloc_cur_lookup_done);
DEFINE_ALLOC_EVENT(xfs_alloc_near_error);
DEFINE_ALLOC_EVENT(xfs_alloc_near_noentry);
DEFINE_ALLOC_EVENT(xfs_alloc_near_busy);
@@ -1658,6 +1601,32 @@ DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
+TRACE_EVENT(xfs_alloc_cur_check,
+ TP_PROTO(struct xfs_mount *mp, xfs_btnum_t btnum, xfs_agblock_t bno,
+ xfs_extlen_t len, xfs_extlen_t diff, bool new),
+ TP_ARGS(mp, btnum, bno, len, diff, new),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_btnum_t, btnum)
+ __field(xfs_agblock_t, bno)
+ __field(xfs_extlen_t, len)
+ __field(xfs_extlen_t, diff)
+ __field(bool, new)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->btnum = btnum;
+ __entry->bno = bno;
+ __entry->len = len;
+ __entry->diff = diff;
+ __entry->new = new;
+ ),
+ TP_printk("dev %d:%d btree %s bno 0x%x len 0x%x diff 0x%x new %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
+ __entry->bno, __entry->len, __entry->diff, __entry->new)
+)
+
DECLARE_EVENT_CLASS(xfs_da_class,
TP_PROTO(struct xfs_da_args *args),
TP_ARGS(args),
@@ -3575,6 +3544,35 @@ TRACE_EVENT(xfs_pwork_init,
__entry->nr_threads, __entry->pid)
)
+DECLARE_EVENT_CLASS(xfs_kmem_class,
+ TP_PROTO(ssize_t size, int flags, unsigned long caller_ip),
+ TP_ARGS(size, flags, caller_ip),
+ TP_STRUCT__entry(
+ __field(ssize_t, size)
+ __field(int, flags)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->size = size;
+ __entry->flags = flags;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("size %zd flags 0x%x caller %pS",
+ __entry->size,
+ __entry->flags,
+ (char *)__entry->caller_ip)
+)
+
+#define DEFINE_KMEM_EVENT(name) \
+DEFINE_EVENT(xfs_kmem_class, name, \
+ TP_PROTO(ssize_t size, int flags, unsigned long caller_ip), \
+ TP_ARGS(size, flags, caller_ip))
+DEFINE_KMEM_EVENT(kmem_alloc);
+DEFINE_KMEM_EVENT(kmem_alloc_io);
+DEFINE_KMEM_EVENT(kmem_alloc_large);
+DEFINE_KMEM_EVENT(kmem_realloc);
+DEFINE_KMEM_EVENT(kmem_zone_alloc);
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index d42a68d8313b..3b208f9a865c 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -71,7 +71,7 @@ xfs_trans_free(
if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
sb_end_intwrite(tp->t_mountp->m_super);
xfs_trans_free_dqinfo(tp);
- kmem_zone_free(xfs_trans_zone, tp);
+ kmem_cache_free(xfs_trans_zone, tp);
}
/*
@@ -90,7 +90,7 @@ xfs_trans_dup(
trace_xfs_trans_dup(tp, _RET_IP_);
- ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
+ ntp = kmem_zone_zalloc(xfs_trans_zone, 0);
/*
* Initialize the new transaction structure.
@@ -263,7 +263,7 @@ xfs_trans_alloc(
* GFP_NOFS allocation context so that we avoid lockdep false positives
* by doing GFP_KERNEL allocations inside sb_start_intwrite().
*/
- tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
+ tp = kmem_zone_zalloc(xfs_trans_zone, 0);
if (!(flags & XFS_TRANS_NO_WRITECOUNT))
sb_start_intwrite(mp->m_super);
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 6ccfd75d3c24..00cc5b8734be 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -427,15 +427,15 @@ xfsaild_push(
case XFS_ITEM_FLUSHING:
/*
- * The item or its backing buffer is already beeing
+ * The item or its backing buffer is already being
* flushed. The typical reason for that is that an
* inode buffer is locked because we already pushed the
* updates to it as part of inode clustering.
*
* We do not want to to stop flushing just because lots
- * of items are already beeing flushed, but we need to
+ * of items are already being flushed, but we need to
* re-try the flushing relatively soon if most of the
- * AIL is beeing flushed.
+ * AIL is being flushed.
*/
XFS_STATS_INC(mp, xs_push_ail_flushing);
trace_xfs_ail_flushing(lip);
@@ -612,7 +612,7 @@ xfsaild(
* The push is run asynchronously in a workqueue, which means the caller needs
* to handle waiting on the async flush for space to become available.
* We don't want to interrupt any push that is in progress, hence we only queue
- * work if we set the pushing bit approriately.
+ * work if we set the pushing bit appropriately.
*
* We do this unlocked - we only need to know whether there is anything in the
* AIL at the time we are called. We don't need to access the contents of
@@ -836,7 +836,7 @@ xfs_trans_ail_init(
init_waitqueue_head(&ailp->ail_empty);
ailp->ail_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
- ailp->ail_mount->m_fsname);
+ ailp->ail_mount->m_super->s_id);
if (IS_ERR(ailp->ail_task))
goto out_free_ailp;
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index 1027c9ca6eb8..a6fe2d8dc40f 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -25,8 +25,8 @@ STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
*/
void
xfs_trans_dqjoin(
- xfs_trans_t *tp,
- xfs_dquot_t *dqp)
+ struct xfs_trans *tp,
+ struct xfs_dquot *dqp)
{
ASSERT(XFS_DQ_IS_LOCKED(dqp));
ASSERT(dqp->q_logitem.qli_dquot == dqp);
@@ -49,8 +49,8 @@ xfs_trans_dqjoin(
*/
void
xfs_trans_log_dquot(
- xfs_trans_t *tp,
- xfs_dquot_t *dqp)
+ struct xfs_trans *tp,
+ struct xfs_dquot *dqp)
{
ASSERT(XFS_DQ_IS_LOCKED(dqp));
@@ -486,12 +486,12 @@ xfs_trans_apply_dquot_deltas(
*/
void
xfs_trans_unreserve_and_mod_dquots(
- xfs_trans_t *tp)
+ struct xfs_trans *tp)
{
int i, j;
- xfs_dquot_t *dqp;
+ struct xfs_dquot *dqp;
struct xfs_dqtrx *qtrx, *qa;
- bool locked;
+ bool locked;
if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
return;
@@ -571,21 +571,21 @@ xfs_quota_warn(
*/
STATIC int
xfs_trans_dqresv(
- xfs_trans_t *tp,
- xfs_mount_t *mp,
- xfs_dquot_t *dqp,
- int64_t nblks,
- long ninos,
- uint flags)
+ struct xfs_trans *tp,
+ struct xfs_mount *mp,
+ struct xfs_dquot *dqp,
+ int64_t nblks,
+ long ninos,
+ uint flags)
{
- xfs_qcnt_t hardlimit;
- xfs_qcnt_t softlimit;
- time_t timer;
- xfs_qwarncnt_t warns;
- xfs_qwarncnt_t warnlimit;
- xfs_qcnt_t total_count;
- xfs_qcnt_t *resbcountp;
- xfs_quotainfo_t *q = mp->m_quotainfo;
+ xfs_qcnt_t hardlimit;
+ xfs_qcnt_t softlimit;
+ time_t timer;
+ xfs_qwarncnt_t warns;
+ xfs_qwarncnt_t warnlimit;
+ xfs_qcnt_t total_count;
+ xfs_qcnt_t *resbcountp;
+ struct xfs_quotainfo *q = mp->m_quotainfo;
struct xfs_def_quota *defq;
@@ -824,13 +824,13 @@ xfs_trans_reserve_quota_nblks(
/*
* This routine is called to allocate a quotaoff log item.
*/
-xfs_qoff_logitem_t *
+struct xfs_qoff_logitem *
xfs_trans_get_qoff_item(
- xfs_trans_t *tp,
- xfs_qoff_logitem_t *startqoff,
+ struct xfs_trans *tp,
+ struct xfs_qoff_logitem *startqoff,
uint flags)
{
- xfs_qoff_logitem_t *q;
+ struct xfs_qoff_logitem *q;
ASSERT(tp != NULL);
@@ -852,8 +852,8 @@ xfs_trans_get_qoff_item(
*/
void
xfs_trans_log_quotaoff_item(
- xfs_trans_t *tp,
- xfs_qoff_logitem_t *qlp)
+ struct xfs_trans *tp,
+ struct xfs_qoff_logitem *qlp)
{
tp->t_flags |= XFS_TRANS_DIRTY;
set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags);
@@ -863,7 +863,7 @@ STATIC void
xfs_trans_alloc_dqinfo(
xfs_trans_t *tp)
{
- tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, KM_SLEEP);
+ tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, 0);
}
void
@@ -872,6 +872,6 @@ xfs_trans_free_dqinfo(
{
if (!tp->t_dqinfo)
return;
- kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
+ kmem_cache_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
tp->t_dqinfo = NULL;
}
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 3123b5aaad2a..383f0203d103 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -11,6 +11,7 @@
#include "xfs_da_format.h"
#include "xfs_inode.h"
#include "xfs_attr.h"
+#include "xfs_acl.h"
#include <linux/posix_acl_xattr.h>
#include <linux/xattr.h>
@@ -30,7 +31,7 @@ xfs_xattr_get(const struct xattr_handler *handler, struct dentry *unused,
value = NULL;
}
- error = xfs_attr_get(ip, (unsigned char *)name, value, &asize, xflags);
+ error = xfs_attr_get(ip, name, (unsigned char **)&value, &asize, xflags);
if (error)
return error;
return asize;