summaryrefslogtreecommitdiff
path: root/include/linux/pagemap.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r--include/linux/pagemap.h84
1 files changed, 24 insertions, 60 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 92395a0a7dc5..97354102794d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -86,31 +86,16 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
(__force unsigned long)mask;
}
-/*
- * The page cache can be done in larger chunks than
- * one page, because it allows for more efficient
- * throughput (it can then be mapped into user
- * space in smaller chunks for same flexibility).
- *
- * Or rather, it _will_ be done in larger chunks.
- */
-#define PAGE_CACHE_SHIFT PAGE_SHIFT
-#define PAGE_CACHE_SIZE PAGE_SIZE
-#define PAGE_CACHE_MASK PAGE_MASK
-#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
-
-#define page_cache_get(page) get_page(page)
-#define page_cache_release(page) put_page(page)
void release_pages(struct page **pages, int nr, bool cold);
/*
* speculatively take a reference to a page.
- * If the page is free (_count == 0), then _count is untouched, and 0
- * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
+ * If the page is free (_refcount == 0), then _refcount is untouched, and 0
+ * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
*
* This function must be called inside the same rcu_read_lock() section as has
* been used to lookup the page in the pagecache radix-tree (or page table):
- * this allows allocators to use a synchronize_rcu() to stabilize _count.
+ * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
*
* Unless an RCU grace period has passed, the count of all pages coming out
* of the allocator must be considered unstable. page_count may return higher
@@ -126,7 +111,7 @@ void release_pages(struct page **pages, int nr, bool cold);
* 2. conditionally increment refcount
* 3. check the page is still in pagecache (if no, goto 1)
*
- * Remove-side that cares about stability of _count (eg. reclaim) has the
+ * Remove-side that cares about stability of _refcount (eg. reclaim) has the
* following (with tree_lock held for write):
* A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
* B. remove page from pagecache
@@ -165,7 +150,7 @@ static inline int page_cache_get_speculative(struct page *page)
* SMP requires.
*/
VM_BUG_ON_PAGE(page_count(page) == 0, page);
- atomic_inc(&page->_count);
+ page_ref_inc(page);
#else
if (unlikely(!get_page_unless_zero(page))) {
@@ -194,10 +179,10 @@ static inline int page_cache_add_speculative(struct page *page, int count)
VM_BUG_ON(!in_atomic());
# endif
VM_BUG_ON_PAGE(page_count(page) == 0, page);
- atomic_add(count, &page->_count);
+ page_ref_add(page, count);
#else
- if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
+ if (unlikely(!page_ref_add_unless(page, count, 0)))
return 0;
#endif
VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
@@ -205,19 +190,6 @@ static inline int page_cache_add_speculative(struct page *page, int count)
return 1;
}
-static inline int page_freeze_refs(struct page *page, int count)
-{
- return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
-}
-
-static inline void page_unfreeze_refs(struct page *page, int count)
-{
- VM_BUG_ON_PAGE(page_count(page) != 0, page);
- VM_BUG_ON(count == 0);
-
- atomic_set(&page->_count, count);
-}
-
#ifdef CONFIG_NUMA
extern struct page *__page_cache_alloc(gfp_t gfp);
#else
@@ -403,13 +375,13 @@ static inline pgoff_t page_to_pgoff(struct page *page)
return page->index << compound_order(page);
if (likely(!PageTransTail(page)))
- return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ return page->index;
/*
* We don't initialize ->index for tail pages: calculate based on
* head page
*/
- pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ pgoff = compound_head(page)->index;
pgoff += page - compound_head(page);
return pgoff;
}
@@ -419,12 +391,12 @@ static inline pgoff_t page_to_pgoff(struct page *page)
*/
static inline loff_t page_offset(struct page *page)
{
- return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
+ return ((loff_t)page->index) << PAGE_SHIFT;
}
static inline loff_t page_file_offset(struct page *page)
{
- return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
+ return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
}
extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
@@ -438,7 +410,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
return linear_hugepage_index(vma, address);
pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
- return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ return pgoff;
}
extern void __lock_page(struct page *page);
@@ -546,34 +518,27 @@ void page_endio(struct page *page, int rw, int err);
extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
/*
- * Fault a userspace page into pagetables. Return non-zero on a fault.
- *
- * This assumes that two userspace pages are always sufficient. That's
- * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
+ * Fault one or two userspace pages into pagetables.
+ * Return -EINVAL if more than two pages would be needed.
+ * Return non-zero on a fault.
*/
static inline int fault_in_pages_writeable(char __user *uaddr, int size)
{
- int ret;
+ int span, ret;
if (unlikely(size == 0))
return 0;
+ span = offset_in_page(uaddr) + size;
+ if (span > 2 * PAGE_SIZE)
+ return -EINVAL;
/*
* Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it.
*/
ret = __put_user(0, uaddr);
- if (ret == 0) {
- char __user *end = uaddr + size - 1;
-
- /*
- * If the page was already mapped, this will get a cache miss
- * for sure, so try to avoid doing it.
- */
- if (((unsigned long)uaddr & PAGE_MASK) !=
- ((unsigned long)end & PAGE_MASK))
- ret = __put_user(0, end);
- }
+ if (ret == 0 && span > PAGE_SIZE)
+ ret = __put_user(0, uaddr + size - 1);
return ret;
}
@@ -663,8 +628,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
-extern void __delete_from_page_cache(struct page *page, void *shadow,
- struct mem_cgroup *memcg);
+extern void __delete_from_page_cache(struct page *page, void *shadow);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
/*
@@ -685,8 +649,8 @@ static inline int add_to_page_cache(struct page *page,
static inline unsigned long dir_pages(struct inode *inode)
{
- return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
}
#endif /* _LINUX_PAGEMAP_H */