diff options
Diffstat (limited to 'include/linux/fs.h')
-rw-r--r-- | include/linux/fs.h | 102 |
1 files changed, 77 insertions, 25 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h index 42efe13077b6..ec0f1dc66b9b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -34,6 +34,7 @@ #include <asm/byteorder.h> #include <uapi/linux/fs.h> +struct backing_dev_info; struct export_operations; struct hd_geometry; struct iovec; @@ -394,14 +395,12 @@ int pagecache_write_end(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); -struct backing_dev_info; struct address_space { struct inode *host; /* owner: inode, block_device */ struct radix_tree_root page_tree; /* radix tree of all pages */ spinlock_t tree_lock; /* and lock protecting it */ atomic_t i_mmap_writable;/* count VM_SHARED mappings */ struct rb_root i_mmap; /* tree of private and shared mappings */ - struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ /* Protected by tree_lock together with the radix tree */ unsigned long nrpages; /* number of total pages */ @@ -409,7 +408,6 @@ struct address_space { pgoff_t writeback_index;/* writeback starts here */ const struct address_space_operations *a_ops; /* methods */ unsigned long flags; /* error bits/gfp mask */ - struct backing_dev_info *backing_dev_info; /* device readahead, etc */ spinlock_t private_lock; /* for use by the address_space */ struct list_head private_list; /* ditto */ void *private_data; /* ditto */ @@ -493,8 +491,7 @@ static inline void i_mmap_unlock_read(struct address_space *mapping) */ static inline int mapping_mapped(struct address_space *mapping) { - return !RB_EMPTY_ROOT(&mapping->i_mmap) || - !list_empty(&mapping->i_mmap_nonlinear); + return !RB_EMPTY_ROOT(&mapping->i_mmap); } /* @@ -625,7 +622,7 @@ struct inode { atomic_t i_readcount; /* struct files open RO */ #endif const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ - struct file_lock *i_flock; + struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union { @@ -875,6 +872,7 @@ static inline struct file *get_file(struct file *f) #define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */ #define FL_UNLOCK_PENDING 512 /* Lease is being broken */ #define FL_OFDLCK 1024 /* lock is "owned" by struct file */ +#define FL_LAYOUT 2048 /* outstanding pNFS layout */ /* * Special return value from posix_lock_file() and vfs_lock_file() for @@ -885,6 +883,8 @@ static inline struct file *get_file(struct file *f) /* legacy typedef, should eventually be removed */ typedef void *fl_owner_t; +struct file_lock; + struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); @@ -898,7 +898,7 @@ struct lock_manager_operations { void (*lm_notify)(struct file_lock *); /* unblock callback */ int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); - int (*lm_change)(struct file_lock **, int, struct list_head *); + int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); }; @@ -923,17 +923,17 @@ int locks_in_grace(struct net *); * FIXME: should we create a separate "struct lock_request" to help distinguish * these two uses? * - * The i_flock list is ordered by: + * The varous i_flctx lists are ordered by: * - * 1) lock type -- FL_LEASEs first, then FL_FLOCK, and finally FL_POSIX - * 2) lock owner - * 3) lock range start - * 4) lock range end + * 1) lock owner + * 2) lock range start + * 3) lock range end * * Obviously, the last two criteria only matter for POSIX locks. */ struct file_lock { struct file_lock *fl_next; /* singly linked list for this inode */ + struct list_head fl_list; /* link into file_lock_context */ struct hlist_node fl_link; /* node in global lists */ struct list_head fl_block; /* circular list of blocked processes */ fl_owner_t fl_owner; @@ -964,6 +964,16 @@ struct file_lock { } fl_u; }; +struct file_lock_context { + spinlock_t flc_lock; + struct list_head flc_flock; + struct list_head flc_posix; + struct list_head flc_lease; + int flc_flock_cnt; + int flc_posix_cnt; + int flc_lease_cnt; +}; + /* The following constant reflects the upper bound of the file/locking space */ #ifndef OFFSET_MAX #define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1))) @@ -990,6 +1000,7 @@ extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); extern int fcntl_getlease(struct file *filp); /* fs/locks.c */ +void locks_free_lock_context(struct file_lock_context *ctx); void locks_free_lock(struct file_lock *fl); extern void locks_init_lock(struct file_lock *); extern struct file_lock * locks_alloc_lock(void); @@ -1010,7 +1021,7 @@ extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int t extern void lease_get_mtime(struct inode *, struct timespec *time); extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); extern int vfs_setlease(struct file *, long, struct file_lock **, void **); -extern int lease_modify(struct file_lock **, int, struct list_head *); +extern int lease_modify(struct file_lock *, int, struct list_head *); #else /* !CONFIG_FILE_LOCKING */ static inline int fcntl_getlk(struct file *file, unsigned int cmd, struct flock __user *user) @@ -1047,6 +1058,11 @@ static inline int fcntl_getlease(struct file *filp) return F_UNLCK; } +static inline void +locks_free_lock_context(struct file_lock_context *ctx) +{ +} + static inline void locks_init_lock(struct file_lock *fl) { return; @@ -1137,7 +1153,7 @@ static inline int vfs_setlease(struct file *filp, long arg, return -EINVAL; } -static inline int lease_modify(struct file_lock **before, int arg, +static inline int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) { return -EINVAL; @@ -1184,8 +1200,6 @@ struct mm_struct; #define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ #define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ -extern struct list_head super_blocks; -extern spinlock_t sb_lock; /* Possible states of 'frozen' field */ enum { @@ -1502,6 +1516,26 @@ struct block_device_operations; #define HAVE_COMPAT_IOCTL 1 #define HAVE_UNLOCKED_IOCTL 1 +/* + * These flags let !MMU mmap() govern direct device mapping vs immediate + * copying more easily for MAP_PRIVATE, especially for ROM filesystems. + * + * NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE) + * NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED) + * NOMMU_MAP_READ: Can be mapped for reading + * NOMMU_MAP_WRITE: Can be mapped for writing + * NOMMU_MAP_EXEC: Can be mapped for execution + */ +#define NOMMU_MAP_COPY 0x00000001 +#define NOMMU_MAP_DIRECT 0x00000008 +#define NOMMU_MAP_READ VM_MAYREAD +#define NOMMU_MAP_WRITE VM_MAYWRITE +#define NOMMU_MAP_EXEC VM_MAYEXEC + +#define NOMMU_VMFLAGS \ + (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC) + + struct iov_iter; struct file_operations { @@ -1536,6 +1570,9 @@ struct file_operations { long (*fallocate)(struct file *file, int mode, loff_t offset, loff_t len); void (*show_fdinfo)(struct seq_file *m, struct file *f); +#ifndef CONFIG_MMU + unsigned (*mmap_capabilities)(struct file *); +#endif }; struct inode_operations { @@ -1959,7 +1996,7 @@ static inline int locks_verify_truncate(struct inode *inode, struct file *filp, loff_t size) { - if (inode->i_flock && mandatory_lock(inode)) + if (inode->i_flctx && mandatory_lock(inode)) return locks_mandatory_area( FLOCK_VERIFY_WRITE, inode, filp, size < inode->i_size ? size : inode->i_size, @@ -1973,11 +2010,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode) { /* * Since this check is lockless, we must ensure that any refcounts - * taken are done before checking inode->i_flock. Otherwise, we could - * end up racing with tasks trying to set a new lease on this file. + * taken are done before checking i_flctx->flc_lease. Otherwise, we + * could end up racing with tasks trying to set a new lease on this + * file. */ smp_mb(); - if (inode->i_flock) + if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) return __break_lease(inode, mode, FL_LEASE); return 0; } @@ -1986,11 +2024,12 @@ static inline int break_deleg(struct inode *inode, unsigned int mode) { /* * Since this check is lockless, we must ensure that any refcounts - * taken are done before checking inode->i_flock. Otherwise, we could - * end up racing with tasks trying to set a new lease on this file. + * taken are done before checking i_flctx->flc_lease. Otherwise, we + * could end up racing with tasks trying to set a new lease on this + * file. */ smp_mb(); - if (inode->i_flock) + if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) return __break_lease(inode, mode, FL_DELEG); return 0; } @@ -2017,6 +2056,16 @@ static inline int break_deleg_wait(struct inode **delegated_inode) return ret; } +static inline int break_layout(struct inode *inode, bool wait) +{ + smp_mb(); + if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) + return __break_lease(inode, + wait ? O_WRONLY : O_WRONLY | O_NONBLOCK, + FL_LAYOUT); + return 0; +} + #else /* !CONFIG_FILE_LOCKING */ static inline int locks_mandatory_locked(struct file *file) { @@ -2072,6 +2121,11 @@ static inline int break_deleg_wait(struct inode **delegated_inode) return 0; } +static inline int break_layout(struct inode *inode, bool wait) +{ + return 0; +} + #endif /* CONFIG_FILE_LOCKING */ /* fs/open.c */ @@ -2481,8 +2535,6 @@ extern int sb_min_blocksize(struct super_block *, int); extern int generic_file_mmap(struct file *, struct vm_area_struct *); extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); -extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr, - unsigned long size, pgoff_t pgoff); int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); |