diff options
Diffstat (limited to 'include/linux/dax.h')
-rw-r--r-- | include/linux/dax.h | 70 |
1 files changed, 44 insertions, 26 deletions
diff --git a/include/linux/dax.h b/include/linux/dax.h index add6c4bc568f..d8a3dc042e1c 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -8,25 +8,47 @@ struct iomap_ops; -/* We use lowest available exceptional entry bit for locking */ +/* + * We use lowest available bit in exceptional entry for locking, one bit for + * the entry size (PMD) and two more to tell us if the entry is a huge zero + * page (HZP) or an empty entry that is just used for locking. In total four + * special bits. + * + * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and + * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem + * block allocation. + */ +#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4) #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) +#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) +#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) +#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)) -ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, - struct iomap_ops *ops); -ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, - get_block_t, dio_iodone_t, int flags); -int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); -int dax_truncate_page(struct inode *, loff_t from, get_block_t); -int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, - struct iomap_ops *ops); -int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); +static inline unsigned long dax_radix_sector(void *entry) +{ + return (unsigned long)entry >> RADIX_DAX_SHIFT; +} + +static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags) +{ + return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags | + ((unsigned long)sector << RADIX_DAX_SHIFT) | + RADIX_DAX_ENTRY_LOCK); +} + +ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, + const struct iomap_ops *ops); +int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, + const struct iomap_ops *ops); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); +int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index); +int dax_invalidate_mapping_entry_sync(struct address_space *mapping, + pgoff_t index); void dax_wake_mapping_entry_waiter(struct address_space *mapping, - pgoff_t index, bool wake_all); + pgoff_t index, void *entry, bool wake_all); #ifdef CONFIG_FS_DAX struct page *read_dax_sector(struct block_device *bdev, sector_t n); -void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index); int __dax_zero_page_range(struct block_device *bdev, sector_t sector, unsigned int offset, unsigned int length); #else @@ -35,12 +57,6 @@ static inline struct page *read_dax_sector(struct block_device *bdev, { return ERR_PTR(-ENXIO); } -/* Shouldn't ever be called when dax is disabled. */ -static inline void dax_unlock_mapping_entry(struct address_space *mapping, - pgoff_t index) -{ - BUG(); -} static inline int __dax_zero_page_range(struct block_device *bdev, sector_t sector, unsigned int offset, unsigned int length) { @@ -48,18 +64,20 @@ static inline int __dax_zero_page_range(struct block_device *bdev, } #endif -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) -int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, - unsigned int flags, get_block_t); +#ifdef CONFIG_FS_DAX_PMD +static inline unsigned int dax_radix_order(void *entry) +{ + if ((unsigned long)entry & RADIX_DAX_PMD) + return PMD_SHIFT - PAGE_SHIFT; + return 0; +} #else -static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, unsigned int flags, get_block_t gb) +static inline unsigned int dax_radix_order(void *entry) { - return VM_FAULT_FALLBACK; + return 0; } #endif -int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); -#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) +int dax_pfn_mkwrite(struct vm_fault *vmf); static inline bool vma_is_dax(struct vm_area_struct *vma) { |