summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/atomic.h34
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/brcmphy.h2
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/ceph/libceph.h4
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/configfs.h4
-rw-r--r--include/linux/f2fs_fs.h4
-rw-r--r--include/linux/filter.h4
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set.h4
-rw-r--r--include/linux/nfs_page.h6
-rw-r--r--include/linux/nilfs2_fs.h4
-rw-r--r--include/linux/pagemap.h32
-rw-r--r--include/linux/pmem.h22
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/target/target_core_fabric.h2
-rw-r--r--include/trace/events/page_isolation.h2
-rw-r--r--include/uapi/linux/bpf.h1
-rw-r--r--include/uapi/linux/stddef.h4
28 files changed, 85 insertions, 77 deletions
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index df4f369254c0..506c3531832e 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -559,25 +559,25 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
/**
- * fetch_or - perform *ptr |= mask and return old value of *ptr
- * @ptr: pointer to value
- * @mask: mask to OR on the value
- *
- * cmpxchg based fetch_or, macro so it works for different integer types
+ * atomic_fetch_or - perform *p |= mask and return old value of *p
+ * @p: pointer to atomic_t
+ * @mask: mask to OR on the atomic_t
*/
-#ifndef fetch_or
-#define fetch_or(ptr, mask) \
-({ typeof(*(ptr)) __old, __val = *(ptr); \
- for (;;) { \
- __old = cmpxchg((ptr), __val, __val | (mask)); \
- if (__old == __val) \
- break; \
- __val = __old; \
- } \
- __old; \
-})
-#endif
+#ifndef atomic_fetch_or
+static inline int atomic_fetch_or(atomic_t *p, int mask)
+{
+ int old, val = atomic_read(p);
+
+ for (;;) {
+ old = atomic_cmpxchg(p, val, val | mask);
+ if (old == val)
+ break;
+ val = old;
+ }
+ return old;
+}
+#endif
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 1b4d69f68c33..3f103076d0bf 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -135,7 +135,7 @@ struct bdi_writeback {
struct backing_dev_info {
struct list_head bdi_list;
- unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
+ unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
unsigned int capabilities; /* Device capabilities */
congested_fn *congested_fn; /* Function pointer if device is md/dm */
void *congested_data; /* Pointer to aux data for congested func */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 88bc64f00bb5..6b7481f62218 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -41,7 +41,7 @@
#endif
#define BIO_MAX_PAGES 256
-#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
+#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_SHIFT)
#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7e5d7e018bea..669e419d6234 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1372,7 +1372,7 @@ unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
static inline void put_dev_sector(Sector p)
{
- page_cache_release(p.v);
+ put_page(p.v);
}
static inline bool __bvec_gap_to_prev(struct request_queue *q,
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index f0ba9c2ec639..e3354b74286c 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -24,6 +24,8 @@
#define PHY_ID_BCM7250 0xae025280
#define PHY_ID_BCM7364 0xae025260
#define PHY_ID_BCM7366 0x600d8490
+#define PHY_ID_BCM7346 0x600d8650
+#define PHY_ID_BCM7362 0x600d84b0
#define PHY_ID_BCM7425 0x600d86b0
#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7435 0x600d8750
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c67f052cc5e5..d48daa3f6f20 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -43,7 +43,7 @@ enum bh_state_bits {
*/
};
-#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
+#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
struct page;
struct buffer_head;
@@ -263,7 +263,7 @@ void buffer_init(void);
static inline void attach_page_buffers(struct page *page,
struct buffer_head *head)
{
- page_cache_get(page);
+ get_page(page);
SetPagePrivate(page);
set_page_private(page, (unsigned long)head);
}
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index e7975e4681e1..db92a8d4926e 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -176,8 +176,8 @@ extern void ceph_put_snap_context(struct ceph_snap_context *sc);
*/
static inline int calc_pages_for(u64 off, u64 len)
{
- return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) -
- (off >> PAGE_CACHE_SHIFT);
+ return ((off+len+PAGE_SIZE-1) >> PAGE_SHIFT) -
+ (off >> PAGE_SHIFT);
}
extern struct kmem_cache *ceph_inode_cachep;
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 22ab246feed3..eeae401a2412 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -199,7 +199,7 @@
#define unreachable() __builtin_unreachable()
/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__))
+#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
#endif /* GCC_VERSION >= 40500 */
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 485fe5519448..d9d6a9d77489 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -188,7 +188,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \
}
#define CONFIGFS_BIN_ATTR_RO(_pfx, _name, _priv, _maxsz) \
-static struct configfs_attribute _pfx##attr_##_name = { \
+static struct configfs_bin_attribute _pfx##attr_##_name = { \
.cb_attr = { \
.ca_name = __stringify(_name), \
.ca_mode = S_IRUGO, \
@@ -200,7 +200,7 @@ static struct configfs_attribute _pfx##attr_##_name = { \
}
#define CONFIGFS_BIN_ATTR_WO(_pfx, _name, _priv, _maxsz) \
-static struct configfs_attribute _pfx##attr_##_name = { \
+static struct configfs_bin_attribute _pfx##attr_##_name = { \
.cb_attr = { \
.ca_name = __stringify(_name), \
.ca_mode = S_IWUSR, \
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 9eb215a155e0..b90e9bdbd1dd 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -262,7 +262,7 @@ struct f2fs_node {
/*
* For NAT entries
*/
-#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry))
+#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
@@ -282,7 +282,7 @@ struct f2fs_nat_block {
* Not allow to change this.
*/
#define SIT_VBLOCK_MAP_SIZE 64
-#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry))
+#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
/*
* Note that f2fs_sit_entry->vblocks has the following bit-field information.
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 43aa1f8855c7..a51a5361695f 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -465,10 +465,14 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
void bpf_prog_destroy(struct bpf_prog *fp);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
+int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
+ bool locked);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
int sk_detach_filter(struct sock *sk);
+int __sk_detach_filter(struct sock *sk, bool locked);
+
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
unsigned int len);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 14a97194b34b..304991a80e23 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -929,7 +929,7 @@ static inline struct file *get_file(struct file *f)
/* Page cache limit. The filesystems should put that into their s_maxbytes
limits, otherwise bad things can happen in VM. */
#if BITS_PER_LONG==32
-#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
+#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
#elif BITS_PER_LONG==64
#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
#endif
@@ -2067,7 +2067,7 @@ extern int generic_update_time(struct inode *, struct timespec *, int);
/* /sys/fs */
extern struct kobject *fs_kobj;
-#define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK)
+#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
#ifdef CONFIG_MANDATORY_FILE_LOCKING
extern int locks_mandatory_locked(struct file *);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 79b0ef6aaa14..7008623e24b1 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -127,7 +127,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
else
- return false;
+ return NULL;
}
static inline int hpage_nr_pages(struct page *page)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ed6407d1b7b5..ffcff53e3b2b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -623,7 +623,7 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
*
* A page may belong to an inode's memory mapping. In this case, page->mapping
* is the pointer to the inode, and page->index is the file offset of the page,
- * in units of PAGE_CACHE_SIZE.
+ * in units of PAGE_SIZE.
*
* If pagecache pages are not associated with an inode, they are said to be
* anonymous pages. These may become associated with the swapcache, and in that
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 944b2b37313b..c2d75b4fa86c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -341,7 +341,7 @@ struct vm_area_struct {
/* Information about our backing store: */
unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
- units, *not* PAGE_CACHE_SIZE */
+ units */
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 0e1f433cc4b7..f48b8a664b0f 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -234,6 +234,10 @@ struct ip_set {
spinlock_t lock;
/* References to the set */
u32 ref;
+ /* References to the set for netlink events like dump,
+ * ref can be swapped out by ip_set_swap
+ */
+ u32 ref_netlink;
/* The core set type */
struct ip_set_type *type;
/* The type variant doing the real job */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index f2f650f136ee..957049f72290 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -41,8 +41,8 @@ struct nfs_page {
struct page *wb_page; /* page to read in/write out */
struct nfs_open_context *wb_context; /* File state context info */
struct nfs_lock_context *wb_lock_context; /* lock context info */
- pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */
- unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */
+ pgoff_t wb_index; /* Offset >> PAGE_SHIFT */
+ unsigned int wb_offset, /* Offset & ~PAGE_MASK */
wb_pgbase, /* Start of page data */
wb_bytes; /* Length of request */
struct kref wb_kref; /* reference count */
@@ -184,7 +184,7 @@ nfs_list_entry(struct list_head *head)
static inline
loff_t req_offset(struct nfs_page *req)
{
- return (((loff_t)req->wb_index) << PAGE_CACHE_SHIFT) + req->wb_offset;
+ return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
}
#endif /* _LINUX_NFS_PAGE_H */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 9abb763e4b86..e9fcf90b270d 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -331,7 +331,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
{
unsigned len = le16_to_cpu(dlen);
-#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536)
+#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
if (len == NILFS_MAX_REC_LEN)
return 1 << 16;
#endif
@@ -340,7 +340,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
static inline __le16 nilfs_rec_len_to_disk(unsigned len)
{
-#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536)
+#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
if (len == (1 << 16))
return cpu_to_le16(NILFS_MAX_REC_LEN);
else if (len > (1 << 16))
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1ebd65c91422..7e1ab155c67c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -86,21 +86,6 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
(__force unsigned long)mask;
}
-/*
- * The page cache can be done in larger chunks than
- * one page, because it allows for more efficient
- * throughput (it can then be mapped into user
- * space in smaller chunks for same flexibility).
- *
- * Or rather, it _will_ be done in larger chunks.
- */
-#define PAGE_CACHE_SHIFT PAGE_SHIFT
-#define PAGE_CACHE_SIZE PAGE_SIZE
-#define PAGE_CACHE_MASK PAGE_MASK
-#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
-
-#define page_cache_get(page) get_page(page)
-#define page_cache_release(page) put_page(page)
void release_pages(struct page **pages, int nr, bool cold);
/*
@@ -390,13 +375,13 @@ static inline pgoff_t page_to_pgoff(struct page *page)
return page->index << compound_order(page);
if (likely(!PageTransTail(page)))
- return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ return page->index;
/*
* We don't initialize ->index for tail pages: calculate based on
* head page
*/
- pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ pgoff = compound_head(page)->index;
pgoff += page - compound_head(page);
return pgoff;
}
@@ -406,12 +391,12 @@ static inline pgoff_t page_to_pgoff(struct page *page)
*/
static inline loff_t page_offset(struct page *page)
{
- return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
+ return ((loff_t)page->index) << PAGE_SHIFT;
}
static inline loff_t page_file_offset(struct page *page)
{
- return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
+ return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
}
extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
@@ -425,7 +410,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
return linear_hugepage_index(vma, address);
pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
- return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ return pgoff;
}
extern void __lock_page(struct page *page);
@@ -535,8 +520,7 @@ extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
/*
* Fault a userspace page into pagetables. Return non-zero on a fault.
*
- * This assumes that two userspace pages are always sufficient. That's
- * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
+ * This assumes that two userspace pages are always sufficient.
*/
static inline int fault_in_pages_writeable(char __user *uaddr, int size)
{
@@ -671,8 +655,8 @@ static inline int add_to_page_cache(struct page *page,
static inline unsigned long dir_pages(struct inode *inode)
{
- return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
}
#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 3ec5309e29f3..ac6d872ce067 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -42,6 +42,13 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
BUG();
}
+static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
+ size_t n)
+{
+ BUG();
+ return -EFAULT;
+}
+
static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
struct iov_iter *i)
{
@@ -66,14 +73,17 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
#endif
/*
- * Architectures that define ARCH_HAS_PMEM_API must provide
- * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
- * arch_copy_from_iter_pmem(), arch_clear_pmem(), arch_wb_cache_pmem()
- * and arch_has_wmb_pmem().
+ * memcpy_from_pmem - read from persistent memory with error handling
+ * @dst: destination buffer
+ * @src: source buffer
+ * @size: transfer length
+ *
+ * Returns 0 on success negative error code on failure.
*/
-static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
+static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
+ size_t size)
{
- memcpy(dst, (void __force const *) src, size);
+ return arch_memcpy_from_pmem(dst, src, size);
}
static inline bool arch_has_pmem_api(void)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 60bba7e032dc..52c4847b05e2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -720,7 +720,7 @@ struct signal_struct {
struct task_cputime cputime_expires;
#ifdef CONFIG_NO_HZ_FULL
- unsigned long tick_dep_mask;
+ atomic_t tick_dep_mask;
#endif
struct list_head cpu_timers[3];
@@ -1549,7 +1549,7 @@ struct task_struct {
#endif
#ifdef CONFIG_NO_HZ_FULL
- unsigned long tick_dep_mask;
+ atomic_t tick_dep_mask;
#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
u64 start_time; /* monotonic time in nsec */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 4bcf5a61aada..e6bc30a42a74 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -108,7 +108,6 @@ struct stmmac_axi {
};
struct plat_stmmacenet_data {
- char *phy_bus_name;
int bus_id;
int phy_addr;
int interface;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cc0fc712bb82..7ca44fb5b675 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -129,7 +129,7 @@ static inline void svc_get(struct svc_serv *serv)
*
* These happen to all be powers of 2, which is not strictly
* necessary but helps enforce the real limitation, which is
- * that they should be multiples of PAGE_CACHE_SIZE.
+ * that they should be multiples of PAGE_SIZE.
*
* For UDP transports, a block plus NFS,RPC, and UDP headers
* has to fit into the IP datagram limit of 64K. The largest
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d18b65c53dbb..2b83359c19ca 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -433,9 +433,9 @@ struct backing_dev_info;
#define si_swapinfo(val) \
do { (val)->freeswap = (val)->totalswap = 0; } while (0)
/* only sparc can not include linux/pagemap.h in this file
- * so leave page_cache_release and release_pages undeclared... */
+ * so leave put_page and release_pages undeclared... */
#define free_page_and_swap_cache(page) \
- page_cache_release(page)
+ put_page(page)
#define free_pages_and_swap_cache(pages, nr) \
release_pages((pages), (nr), false);
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 685a51aa98cc..8ff6d40a294f 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -76,6 +76,7 @@ struct target_core_fabric_ops {
struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
struct config_group *, const char *);
void (*fabric_drop_wwn)(struct se_wwn *);
+ void (*add_wwn_groups)(struct se_wwn *);
struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
struct config_group *, const char *);
void (*fabric_drop_tpg)(struct se_portal_group *);
@@ -87,7 +88,6 @@ struct target_core_fabric_ops {
struct config_group *, const char *);
void (*fabric_drop_np)(struct se_tpg_np *);
int (*fabric_init_nodeacl)(struct se_node_acl *, const char *);
- void (*fabric_cleanup_nodeacl)(struct se_node_acl *);
struct configfs_attribute **tfc_discovery_attrs;
struct configfs_attribute **tfc_wwn_attrs;
diff --git a/include/trace/events/page_isolation.h b/include/trace/events/page_isolation.h
index 6fb644029c80..8738a78e6bf4 100644
--- a/include/trace/events/page_isolation.h
+++ b/include/trace/events/page_isolation.h
@@ -29,7 +29,7 @@ TRACE_EVENT(test_pages_isolated,
TP_printk("start_pfn=0x%lx end_pfn=0x%lx fin_pfn=0x%lx ret=%s",
__entry->start_pfn, __entry->end_pfn, __entry->fin_pfn,
- __entry->end_pfn == __entry->fin_pfn ? "success" : "fail")
+ __entry->end_pfn <= __entry->fin_pfn ? "success" : "fail")
);
#endif /* _TRACE_PAGE_ISOLATION_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 924f537183fd..23917bb47bf3 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -375,6 +375,7 @@ struct bpf_tunnel_key {
};
__u8 tunnel_tos;
__u8 tunnel_ttl;
+ __u16 tunnel_ext;
__u32 tunnel_label;
};
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index aa9f10428743..621fa8ac4425 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -1 +1,5 @@
#include <linux/compiler.h>
+
+#ifndef __always_inline
+#define __always_inline inline
+#endif