From 0224a4a30b57385a60065aa598181868881d8fc6 Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Wed, 27 Apr 2016 14:04:20 +0300 Subject: device property: Avoid potential dereferences of invalid pointers Since fwnode may hold ERR_PTR(-ENODEV) or it may be NULL, the fwnode type checks is_of_node(), is_acpi_node() and is is_pset_node() need to consider it. Using IS_ERR_OR_NULL() to check it. Fixes: 0d67e0fa1664 (device property: fix for a case of use-after-free) Reported-by: Dan Carpenter Signed-off-by: Heikki Krogerus [ rjw: Subject & changelog ] Signed-off-by: Rafael J. Wysocki --- include/linux/of.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/of.h b/include/linux/of.h index 7fcb681baadf..31758036787c 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -133,7 +133,7 @@ void of_core_init(void); static inline bool is_of_node(struct fwnode_handle *fwnode) { - return fwnode && fwnode->type == FWNODE_OF; + return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF; } static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) -- cgit v1.2.3 From 7b7483409f09c15f30ac43242ead1ab3061e1f59 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Mon, 25 Apr 2016 15:13:17 -0300 Subject: net: fix net_gso_ok for new GSO types. Fix casting in net_gso_ok. Otherwise the shift on gso_type << NETIF_F_GSO_SHIFT may hit the 32th bit and make it look like a INT_MIN, which is then promoted from signed to uint64 which is 0xffffffff80000000, resulting in wrong behavior when it is and'ed with the feature itself, as in: This test app: #include #include int main(int argc, char **argv) { uint64_t feature1; uint64_t feature2; int gso_type = 1 << 15; feature1 = gso_type << 16; feature2 = (uint64_t)gso_type << 16; printf("%lx %lx\n", feature1, feature2); return 0; } Gives: ffffffff80000000 80000000 So that this: return (features & feature) == feature; Actually works on more bits than expected and invalid ones. Fix is to promote it earlier. Issue noted while rebasing SCTP GSO patch but posting separetely as someone else may experience this meanwhile. Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8395308a2445..b3c46b019ac1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4004,7 +4004,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb); static inline bool net_gso_ok(netdev_features_t features, int gso_type) { - netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; + netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; /* check flags correspondence */ BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); -- cgit v1.2.3 From 92117d8443bc5afacc8d5ba82e541946310f106e Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 27 Apr 2016 18:56:20 -0700 Subject: bpf: fix refcnt overflow On a system with >32Gbyte of phyiscal memory and infinite RLIMIT_MEMLOCK, the malicious application may overflow 32-bit bpf program refcnt. It's also possible to overflow map refcnt on 1Tb system. Impose 32k hard limit which means that the same bpf program or map cannot be shared by more than 32k processes. Fixes: 1be7f75d1668 ("bpf: enable non-root eBPF programs") Reported-by: Jann Horn Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/bpf.h | 3 ++- kernel/bpf/inode.c | 7 ++++--- kernel/bpf/syscall.c | 24 ++++++++++++++++++++---- kernel/bpf/verifier.c | 11 +++++++---- 4 files changed, 33 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 21ee41b92e8a..f1d5c5acc8dd 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -171,12 +171,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl); void bpf_register_map_type(struct bpf_map_type_list *tl); struct bpf_prog *bpf_prog_get(u32 ufd); +struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog); void bpf_prog_put_rcu(struct bpf_prog *prog); struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); -void bpf_map_inc(struct bpf_map *map, bool uref); +struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); int bpf_map_precharge_memlock(u32 pages); diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index f2ece3c174a5..8f94ca1860cf 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type) { switch (type) { case BPF_TYPE_PROG: - atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt); + raw = bpf_prog_inc(raw); break; case BPF_TYPE_MAP: - bpf_map_inc(raw, true); + raw = bpf_map_inc(raw, true); break; default: WARN_ON_ONCE(1); @@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname, goto out; raw = bpf_any_get(inode->i_private, *type); - touch_atime(&path); + if (!IS_ERR(raw)) + touch_atime(&path); path_put(&path); return raw; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index adc5e4bd74f8..cf5e9f7ad13a 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -218,11 +218,18 @@ struct bpf_map *__bpf_map_get(struct fd f) return f.file->private_data; } -void bpf_map_inc(struct bpf_map *map, bool uref) +/* prog's and map's refcnt limit */ +#define BPF_MAX_REFCNT 32768 + +struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref) { - atomic_inc(&map->refcnt); + if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) { + atomic_dec(&map->refcnt); + return ERR_PTR(-EBUSY); + } if (uref) atomic_inc(&map->usercnt); + return map; } struct bpf_map *bpf_map_get_with_uref(u32 ufd) @@ -234,7 +241,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd) if (IS_ERR(map)) return map; - bpf_map_inc(map, true); + map = bpf_map_inc(map, true); fdput(f); return map; @@ -658,6 +665,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f) return f.file->private_data; } +struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) +{ + if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) { + atomic_dec(&prog->aux->refcnt); + return ERR_PTR(-EBUSY); + } + return prog; +} + /* called by sockets/tracing/seccomp before attaching program to an event * pairs with bpf_prog_put() */ @@ -670,7 +686,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd) if (IS_ERR(prog)) return prog; - atomic_inc(&prog->aux->refcnt); + prog = bpf_prog_inc(prog); fdput(f); return prog; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index db2574e7b8b0..89bcaa0966da 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2049,15 +2049,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env) return -E2BIG; } - /* remember this map */ - env->used_maps[env->used_map_cnt++] = map; - /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it * will be used by the valid program until it's unloaded * and all maps are released in free_bpf_prog_info() */ - bpf_map_inc(map, false); + map = bpf_map_inc(map, false); + if (IS_ERR(map)) { + fdput(f); + return PTR_ERR(map); + } + env->used_maps[env->used_map_cnt++] = map; + fdput(f); next_insn: insn++; -- cgit v1.2.3 From 2c94b53738549d81dc7464a32117d1f5112c64d3 Mon Sep 17 00:00:00 2001 From: Tim Bingham Date: Fri, 29 Apr 2016 13:30:23 -0400 Subject: net: Implement net_dbg_ratelimited() for CONFIG_DYNAMIC_DEBUG case Prior to commit d92cff89a0c8 ("net_dbg_ratelimited: turn into no-op when !DEBUG") the implementation of net_dbg_ratelimited() was buggy for both the DEBUG and CONFIG_DYNAMIC_DEBUG cases. The bug was that net_ratelimit() was being called and, despite returning true, nothing was being printed to the console. This resulted in messages like the following - "net_ratelimit: %d callbacks suppressed" with no other output nearby. After commit d92cff89a0c8 ("net_dbg_ratelimited: turn into no-op when !DEBUG") the bug is fixed for the DEBUG case. However, there's no output at all for CONFIG_DYNAMIC_DEBUG case. This patch restores debug output (if enabled) for the CONFIG_DYNAMIC_DEBUG case. Add a definition of net_dbg_ratelimited() for the CONFIG_DYNAMIC_DEBUG case. The implementation takes care to check that dynamic debugging is enabled before calling net_ratelimit(). Fixes: d92cff89a0c8 ("net_dbg_ratelimited: turn into no-op when !DEBUG") Signed-off-by: Tim Bingham Signed-off-by: David S. Miller --- include/linux/net.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/net.h b/include/linux/net.h index 49175e4ced11..f840d77c6c31 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -246,7 +246,15 @@ do { \ net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) #define net_info_ratelimited(fmt, ...) \ net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) -#if defined(DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) +#define net_dbg_ratelimited(fmt, ...) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ + net_ratelimit()) \ + __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \ +} while (0) +#elif defined(DEBUG) #define net_dbg_ratelimited(fmt, ...) \ net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__) #else -- cgit v1.2.3 From 689de1d6ca95b3b5bd8ee446863bf81a4883ea25 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 2 May 2016 12:46:42 -0700 Subject: Minimal fix-up of bad hashing behavior of hash_64() This is a fairly minimal fixup to the horribly bad behavior of hash_64() with certain input patterns. In particular, because the multiplicative value used for the 64-bit hash was intentionally bit-sparse (so that the multiply could be done with shifts and adds on architectures without hardware multipliers), some bits did not get spread out very much. In particular, certain fairly common bit ranges in the input (roughly bits 12-20: commonly with the most information in them when you hash things like byte offsets in files or memory that have block factors that mean that the low bits are often zero) would not necessarily show up much in the result. There's a bigger patch-series brewing to fix up things more completely, but this is the fairly minimal fix for the 64-bit hashing problem. It simply picks a much better constant multiplier, spreading the bits out a lot better. NOTE! For 32-bit architectures, the bad old hash_64() remains the same for now, since 64-bit multiplies are expensive. The bigger hashing cleanup will replace the 32-bit case with something better. The new constants were picked by George Spelvin who wrote that bigger cleanup series. I just picked out the constants and part of the comment from that series. Cc: stable@vger.kernel.org Cc: George Spelvin Cc: Thomas Gleixner Signed-off-by: Linus Torvalds --- include/linux/hash.h | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hash.h b/include/linux/hash.h index 1afde47e1528..79c52fa81cac 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -32,12 +32,28 @@ #error Wordsize not 32 or 64 #endif +/* + * The above primes are actively bad for hashing, since they are + * too sparse. The 32-bit one is mostly ok, the 64-bit one causes + * real problems. Besides, the "prime" part is pointless for the + * multiplicative hash. + * + * Although a random odd number will do, it turns out that the golden + * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice + * properties. + * + * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2. + * (See Knuth vol 3, section 6.4, exercise 9.) + */ +#define GOLDEN_RATIO_32 0x61C88647 +#define GOLDEN_RATIO_64 0x61C8864680B583EBull + static __always_inline u64 hash_64(u64 val, unsigned int bits) { u64 hash = val; -#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 - hash = hash * GOLDEN_RATIO_PRIME_64; +#if BITS_PER_LONG == 64 + hash = hash * GOLDEN_RATIO_64; #else /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ u64 n = hash; -- cgit v1.2.3 From af67eb9e7e1ab37880459f83153d34b3c42b0075 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 2 May 2016 09:25:16 -0700 Subject: vxlan: Add checksum check to the features check function We need to perform an additional check on the inner headers to determine if we can offload the checksum for them. Previously this check didn't occur so we would generate an invalid frame in the case of an IPv6 header encapsulated inside of an IPv4 tunnel. To fix this I added a secondary check to vxlan_features_check so that we can verify that we can offload the inner checksum. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- include/linux/if_ether.h | 5 +++++ include/net/vxlan.h | 4 +++- 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index d5569734f672..548fd535fd02 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -28,6 +28,11 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) return (struct ethhdr *)skb_mac_header(skb); } +static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb) +{ + return (struct ethhdr *)skb_inner_mac_header(skb); +} + int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 73ed2e951c02..35437c779da8 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -252,7 +252,9 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, (skb->inner_protocol_type != ENCAP_TYPE_ETHER || skb->inner_protocol != htons(ETH_P_TEB) || (skb_inner_mac_header(skb) - skb_transport_header(skb) != - sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) + sizeof(struct udphdr) + sizeof(struct vxlanhdr)) || + (skb->ip_summed != CHECKSUM_NONE && + !can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto)))) return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); return features; -- cgit v1.2.3 From 4550c4e157ca3da929593bb6c64080a59141af35 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 5 May 2016 16:22:03 -0700 Subject: mm: memcontrol: let v2 cgroups follow changes in system swappiness Cgroup2 currently doesn't have a per-cgroup swappiness setting. We might want to add one later - that's a different discussion - but until we do, the cgroups should always follow the system setting. Otherwise it will be unchangeably set to whatever the ancestor inherited from the system setting at the time of cgroup creation. Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Acked-by: Vladimir Davydov Cc: [4.5] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 2b83359c19ca..0a4cd4703f40 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void) #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) { + /* Cgroup2 doesn't have per-cgroup swappiness */ + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + return vm_swappiness; + /* root ? */ if (mem_cgroup_disabled() || !memcg->css.parent) return vm_swappiness; -- cgit v1.2.3 From 4e1016dac1ccce6d8a960775526cdc3a5baa690b Mon Sep 17 00:00:00 2001 From: Alexandre Bounine Date: Thu, 5 May 2016 16:22:06 -0700 Subject: rapidio/mport_cdev: fix uapi type definitions Fix problems in uapi definitions reported by Gabriel Laskar: (see https://lkml.org/lkml/2016/4/5/205 for details) - move public header file rio_mport_cdev.h to include/uapi/linux directory - change types in data structures passed as IOCTL parameters - improve parameter checking in some IOCTL service routines Signed-off-by: Alexandre Bounine Reported-by: Gabriel Laskar Tested-by: Barry Wood Cc: Gabriel Laskar Cc: Matt Porter Cc: Aurelien Jacquiot Cc: Andre van Herk Cc: Barry Wood Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/rapidio/devices/rio_mport_cdev.c | 115 +++++++------ include/linux/rio_mport_cdev.h | 271 ------------------------------ include/uapi/linux/rio_mport_cdev.h | 277 +++++++++++++++++++++++++++++++ 3 files changed, 341 insertions(+), 322 deletions(-) delete mode 100644 include/linux/rio_mport_cdev.h create mode 100644 include/uapi/linux/rio_mport_cdev.h (limited to 'include/linux') diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 96168b819044..e165b7ce29d7 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -126,7 +126,7 @@ struct rio_mport_mapping { struct list_head node; struct mport_dev *md; enum rio_mport_map_dir dir; - u32 rioid; + u16 rioid; u64 rio_addr; dma_addr_t phys_addr; /* for mmap */ void *virt_addr; /* kernel address, for dma_free_coherent */ @@ -137,7 +137,7 @@ struct rio_mport_mapping { struct rio_mport_dma_map { int valid; - uint64_t length; + u64 length; void *vaddr; dma_addr_t paddr; }; @@ -208,7 +208,7 @@ struct mport_cdev_priv { struct kfifo event_fifo; wait_queue_head_t event_rx_wait; spinlock_t fifo_lock; - unsigned int event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ + u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ #ifdef CONFIG_RAPIDIO_DMA_ENGINE struct dma_chan *dmach; struct list_head async_list; @@ -276,7 +276,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, return -EFAULT; if ((maint_io.offset % 4) || - (maint_io.length == 0) || (maint_io.length % 4)) + (maint_io.length == 0) || (maint_io.length % 4) || + (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) return -EINVAL; buffer = vmalloc(maint_io.length); @@ -298,7 +299,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, offset += 4; } - if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length))) + if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer, + buffer, maint_io.length))) ret = -EFAULT; out: vfree(buffer); @@ -319,7 +321,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg, return -EFAULT; if ((maint_io.offset % 4) || - (maint_io.length == 0) || (maint_io.length % 4)) + (maint_io.length == 0) || (maint_io.length % 4) || + (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) return -EINVAL; buffer = vmalloc(maint_io.length); @@ -327,7 +330,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg, return -ENOMEM; length = maint_io.length; - if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) { + if (unlikely(copy_from_user(buffer, + (void __user *)(uintptr_t)maint_io.buffer, length))) { ret = -EFAULT; goto out; } @@ -360,7 +364,7 @@ out: */ static int rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, - u32 rioid, u64 raddr, u32 size, + u16 rioid, u64 raddr, u32 size, dma_addr_t *paddr) { struct rio_mport *mport = md->mport; @@ -369,7 +373,7 @@ rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); - map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); + map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) return -ENOMEM; @@ -394,7 +398,7 @@ err_map_outb: static int rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, - u32 rioid, u64 raddr, u32 size, + u16 rioid, u64 raddr, u32 size, dma_addr_t *paddr) { struct rio_mport_mapping *map; @@ -433,7 +437,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg) dma_addr_t paddr; int ret; - if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) return -EFAULT; rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", @@ -448,7 +452,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg) map.handle = paddr; - if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) return -EFAULT; return 0; } @@ -469,7 +473,7 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg) if (!md->mport->ops->unmap_outb) return -EPROTONOSUPPORT; - if (copy_from_user(&handle, arg, sizeof(u64))) + if (copy_from_user(&handle, arg, sizeof(handle))) return -EFAULT; rmcd_debug(OBW, "h=0x%llx", handle); @@ -498,9 +502,9 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg) static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; - uint16_t hdid; + u16 hdid; - if (copy_from_user(&hdid, arg, sizeof(uint16_t))) + if (copy_from_user(&hdid, arg, sizeof(hdid))) return -EFAULT; md->mport->host_deviceid = hdid; @@ -520,9 +524,9 @@ static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; - uint32_t comptag; + u32 comptag; - if (copy_from_user(&comptag, arg, sizeof(uint32_t))) + if (copy_from_user(&comptag, arg, sizeof(comptag))) return -EFAULT; rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); @@ -837,7 +841,7 @@ err_out: * @xfer: data transfer descriptor structure */ static int -rio_dma_transfer(struct file *filp, uint32_t transfer_mode, +rio_dma_transfer(struct file *filp, u32 transfer_mode, enum rio_transfer_sync sync, enum dma_data_direction dir, struct rio_transfer_io *xfer) { @@ -875,7 +879,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode, unsigned long offset; long pinned; - offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK; + offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK; nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; page_list = kmalloc_array(nr_pages, @@ -1015,19 +1019,20 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) return -EFAULT; - if (transaction.count != 1) + if (transaction.count != 1) /* only single transfer for now */ return -EINVAL; if ((transaction.transfer_mode & priv->md->properties.transfer_mode) == 0) return -ENODEV; - transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io)); + transfer = vmalloc(transaction.count * sizeof(*transfer)); if (!transfer) return -ENOMEM; - if (unlikely(copy_from_user(transfer, transaction.block, - transaction.count * sizeof(struct rio_transfer_io)))) { + if (unlikely(copy_from_user(transfer, + (void __user *)(uintptr_t)transaction.block, + transaction.count * sizeof(*transfer)))) { ret = -EFAULT; goto out_free; } @@ -1038,8 +1043,9 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) ret = rio_dma_transfer(filp, transaction.transfer_mode, transaction.sync, dir, &transfer[i]); - if (unlikely(copy_to_user(transaction.block, transfer, - transaction.count * sizeof(struct rio_transfer_io)))) + if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block, + transfer, + transaction.count * sizeof(*transfer)))) ret = -EFAULT; out_free: @@ -1129,11 +1135,11 @@ err_tmo: } static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, - uint64_t size, struct rio_mport_mapping **mapping) + u64 size, struct rio_mport_mapping **mapping) { struct rio_mport_mapping *map; - map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); + map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) return -ENOMEM; @@ -1165,7 +1171,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg) struct rio_mport_mapping *mapping = NULL; int ret; - if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem)))) + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) return -EFAULT; ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); @@ -1174,7 +1180,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg) map.dma_handle = mapping->phys_addr; - if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) { + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { mutex_lock(&md->buf_mutex); kref_put(&mapping->ref, mport_release_mapping); mutex_unlock(&md->buf_mutex); @@ -1192,7 +1198,7 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg) int ret = -EFAULT; struct rio_mport_mapping *map, *_map; - if (copy_from_user(&handle, arg, sizeof(u64))) + if (copy_from_user(&handle, arg, sizeof(handle))) return -EFAULT; rmcd_debug(EXIT, "filp=%p", filp); @@ -1242,14 +1248,18 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg) static int rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, - u64 raddr, u32 size, + u64 raddr, u64 size, struct rio_mport_mapping **mapping) { struct rio_mport *mport = md->mport; struct rio_mport_mapping *map; int ret; - map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); + /* rio_map_inb_region() accepts u32 size */ + if (size > 0xffffffff) + return -EINVAL; + + map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) return -ENOMEM; @@ -1262,7 +1272,7 @@ rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, if (raddr == RIO_MAP_ANY_ADDR) raddr = map->phys_addr; - ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0); + ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0); if (ret < 0) goto err_map_inb; @@ -1288,7 +1298,7 @@ err_dma_alloc: static int rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, - u64 raddr, u32 size, + u64 raddr, u64 size, struct rio_mport_mapping **mapping) { struct rio_mport_mapping *map; @@ -1331,7 +1341,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg) if (!md->mport->ops->map_inb) return -EPROTONOSUPPORT; - if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) return -EFAULT; rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); @@ -1344,7 +1354,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg) map.handle = mapping->phys_addr; map.rio_addr = mapping->rio_addr; - if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) { + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { /* Delete mapping if it was created by this request */ if (ret == 0 && mapping->filp == filp) { mutex_lock(&md->buf_mutex); @@ -1375,7 +1385,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg) if (!md->mport->ops->unmap_inb) return -EPROTONOSUPPORT; - if (copy_from_user(&handle, arg, sizeof(u64))) + if (copy_from_user(&handle, arg, sizeof(handle))) return -EFAULT; mutex_lock(&md->buf_mutex); @@ -1401,7 +1411,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg) static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; - uint32_t port_idx = md->mport->index; + u32 port_idx = md->mport->index; rmcd_debug(MPORT, "port_index=%d", port_idx); @@ -1451,7 +1461,7 @@ static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id, handled = 0; spin_lock(&data->db_lock); list_for_each_entry(db_filter, &data->doorbells, data_node) { - if (((db_filter->filter.rioid == 0xffffffff || + if (((db_filter->filter.rioid == RIO_INVALID_DESTID || db_filter->filter.rioid == src)) && info >= db_filter->filter.low && info <= db_filter->filter.high) { @@ -1525,6 +1535,9 @@ static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv, if (copy_from_user(&filter, arg, sizeof(filter))) return -EFAULT; + if (filter.low > filter.high) + return -EINVAL; + spin_lock_irqsave(&priv->md->db_lock, flags); list_for_each_entry(db_filter, &priv->db_filters, priv_node) { if (db_filter->filter.rioid == filter.rioid && @@ -1737,10 +1750,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, return -EEXIST; } - size = sizeof(struct rio_dev); + size = sizeof(*rdev); mport = md->mport; - destid = (u16)dev_info.destid; - hopcount = (u8)dev_info.hopcount; + destid = dev_info.destid; + hopcount = dev_info.hopcount; if (rio_mport_read_config_32(mport, destid, hopcount, RIO_PEF_CAR, &rval)) @@ -1872,8 +1885,8 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg) do { rdev = rio_get_comptag(dev_info.comptag, rdev); if (rdev && rdev->dev.parent == &mport->net->dev && - rdev->destid == (u16)dev_info.destid && - rdev->hopcount == (u8)dev_info.hopcount) + rdev->destid == dev_info.destid && + rdev->hopcount == dev_info.hopcount) break; } while (rdev); } @@ -2146,8 +2159,8 @@ static long mport_cdev_ioctl(struct file *filp, return maint_port_idx_get(data, (void __user *)arg); case RIO_MPORT_GET_PROPERTIES: md->properties.hdid = md->mport->host_deviceid; - if (copy_to_user((void __user *)arg, &(data->md->properties), - sizeof(data->md->properties))) + if (copy_to_user((void __user *)arg, &(md->properties), + sizeof(md->properties))) return -EFAULT; return 0; case RIO_ENABLE_DOORBELL_RANGE: @@ -2159,11 +2172,11 @@ static long mport_cdev_ioctl(struct file *filp, case RIO_DISABLE_PORTWRITE_RANGE: return rio_mport_remove_pw_filter(data, (void __user *)arg); case RIO_SET_EVENT_MASK: - data->event_mask = arg; + data->event_mask = (u32)arg; return 0; case RIO_GET_EVENT_MASK: if (copy_to_user((void __user *)arg, &data->event_mask, - sizeof(data->event_mask))) + sizeof(u32))) return -EFAULT; return 0; case RIO_MAP_OUTBOUND: @@ -2374,7 +2387,7 @@ static ssize_t mport_write(struct file *filp, const char __user *buf, return -EINVAL; ret = rio_mport_send_doorbell(mport, - (u16)event.u.doorbell.rioid, + event.u.doorbell.rioid, event.u.doorbell.payload); if (ret < 0) return ret; @@ -2421,7 +2434,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) struct mport_dev *md; struct rio_mport_attr attr; - md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL); + md = kzalloc(sizeof(*md), GFP_KERNEL); if (!md) { rmcd_error("Unable allocate a device object"); return NULL; @@ -2470,7 +2483,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) /* The transfer_mode property will be returned through mport query * interface */ -#ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */ +#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */ md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; #else md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; diff --git a/include/linux/rio_mport_cdev.h b/include/linux/rio_mport_cdev.h deleted file mode 100644 index b65d19df76d2..000000000000 --- a/include/linux/rio_mport_cdev.h +++ /dev/null @@ -1,271 +0,0 @@ -/* - * Copyright (c) 2015-2016, Integrated Device Technology Inc. - * Copyright (c) 2015, Prodrive Technologies - * Copyright (c) 2015, Texas Instruments Incorporated - * Copyright (c) 2015, RapidIO Trade Association - * All rights reserved. - * - * This software is available to you under a choice of one of two licenses. - * You may choose to be licensed under the terms of the GNU General Public - * License(GPL) Version 2, or the BSD-3 Clause license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3. Neither the name of the copyright holder nor the names of its contributors - * may be used to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _RIO_MPORT_CDEV_H_ -#define _RIO_MPORT_CDEV_H_ - -#ifndef __user -#define __user -#endif - -struct rio_mport_maint_io { - uint32_t rioid; /* destID of remote device */ - uint32_t hopcount; /* hopcount to remote device */ - uint32_t offset; /* offset in register space */ - size_t length; /* length in bytes */ - void __user *buffer; /* data buffer */ -}; - -/* - * Definitions for RapidIO data transfers: - * - memory mapped (MAPPED) - * - packet generation from memory (TRANSFER) - */ -#define RIO_TRANSFER_MODE_MAPPED (1 << 0) -#define RIO_TRANSFER_MODE_TRANSFER (1 << 1) -#define RIO_CAP_DBL_SEND (1 << 2) -#define RIO_CAP_DBL_RECV (1 << 3) -#define RIO_CAP_PW_SEND (1 << 4) -#define RIO_CAP_PW_RECV (1 << 5) -#define RIO_CAP_MAP_OUTB (1 << 6) -#define RIO_CAP_MAP_INB (1 << 7) - -struct rio_mport_properties { - uint16_t hdid; - uint8_t id; /* Physical port ID */ - uint8_t index; - uint32_t flags; - uint32_t sys_size; /* Default addressing size */ - uint8_t port_ok; - uint8_t link_speed; - uint8_t link_width; - uint32_t dma_max_sge; - uint32_t dma_max_size; - uint32_t dma_align; - uint32_t transfer_mode; /* Default transfer mode */ - uint32_t cap_sys_size; /* Capable system sizes */ - uint32_t cap_addr_size; /* Capable addressing sizes */ - uint32_t cap_transfer_mode; /* Capable transfer modes */ - uint32_t cap_mport; /* Mport capabilities */ -}; - -/* - * Definitions for RapidIO events; - * - incoming port-writes - * - incoming doorbells - */ -#define RIO_DOORBELL (1 << 0) -#define RIO_PORTWRITE (1 << 1) - -struct rio_doorbell { - uint32_t rioid; - uint16_t payload; -}; - -struct rio_doorbell_filter { - uint32_t rioid; /* 0xffffffff to match all ids */ - uint16_t low; - uint16_t high; -}; - - -struct rio_portwrite { - uint32_t payload[16]; -}; - -struct rio_pw_filter { - uint32_t mask; - uint32_t low; - uint32_t high; -}; - -/* RapidIO base address for inbound requests set to value defined below - * indicates that no specific RIO-to-local address translation is requested - * and driver should use direct (one-to-one) address mapping. -*/ -#define RIO_MAP_ANY_ADDR (uint64_t)(~((uint64_t) 0)) - -struct rio_mmap { - uint32_t rioid; - uint64_t rio_addr; - uint64_t length; - uint64_t handle; - void *address; -}; - -struct rio_dma_mem { - uint64_t length; /* length of DMA memory */ - uint64_t dma_handle; /* handle associated with this memory */ - void *buffer; /* pointer to this memory */ -}; - - -struct rio_event { - unsigned int header; /* event type RIO_DOORBELL or RIO_PORTWRITE */ - union { - struct rio_doorbell doorbell; /* header for RIO_DOORBELL */ - struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */ - } u; -}; - -enum rio_transfer_sync { - RIO_TRANSFER_SYNC, /* synchronous transfer */ - RIO_TRANSFER_ASYNC, /* asynchronous transfer */ - RIO_TRANSFER_FAF, /* fire-and-forget transfer */ -}; - -enum rio_transfer_dir { - RIO_TRANSFER_DIR_READ, /* Read operation */ - RIO_TRANSFER_DIR_WRITE, /* Write operation */ -}; - -/* - * RapidIO data exchange transactions are lists of individual transfers. Each - * transfer exchanges data between two RapidIO devices by remote direct memory - * access and has its own completion code. - * - * The RapidIO specification defines four types of data exchange requests: - * NREAD, NWRITE, SWRITE and NWRITE_R. The RapidIO DMA channel interface allows - * to specify the required type of write operation or combination of them when - * only the last data packet requires response. - * - * NREAD: read up to 256 bytes from remote device memory into local memory - * NWRITE: write up to 256 bytes from local memory to remote device memory - * without confirmation - * SWRITE: as NWRITE, but all addresses and payloads must be 64-bit aligned - * NWRITE_R: as NWRITE, but expect acknowledgment from remote device. - * - * The default exchange is chosen from NREAD and any of the WRITE modes as the - * driver sees fit. For write requests the user can explicitly choose between - * any of the write modes for each transaction. - */ -enum rio_exchange { - RIO_EXCHANGE_DEFAULT, /* Default method */ - RIO_EXCHANGE_NWRITE, /* All packets using NWRITE */ - RIO_EXCHANGE_SWRITE, /* All packets using SWRITE */ - RIO_EXCHANGE_NWRITE_R, /* Last packet NWRITE_R, others NWRITE */ - RIO_EXCHANGE_SWRITE_R, /* Last packet NWRITE_R, others SWRITE */ - RIO_EXCHANGE_NWRITE_R_ALL, /* All packets using NWRITE_R */ -}; - -struct rio_transfer_io { - uint32_t rioid; /* Target destID */ - uint64_t rio_addr; /* Address in target's RIO mem space */ - enum rio_exchange method; /* Data exchange method */ - void __user *loc_addr; - uint64_t handle; - uint64_t offset; /* Offset in buffer */ - uint64_t length; /* Length in bytes */ - uint32_t completion_code; /* Completion code for this transfer */ -}; - -struct rio_transaction { - uint32_t transfer_mode; /* Data transfer mode */ - enum rio_transfer_sync sync; /* Synchronization method */ - enum rio_transfer_dir dir; /* Transfer direction */ - size_t count; /* Number of transfers */ - struct rio_transfer_io __user *block; /* Array of transfers */ -}; - -struct rio_async_tx_wait { - uint32_t token; /* DMA transaction ID token */ - uint32_t timeout; /* Wait timeout in msec, if 0 use default TO */ -}; - -#define RIO_MAX_DEVNAME_SZ 20 - -struct rio_rdev_info { - uint32_t destid; - uint8_t hopcount; - uint32_t comptag; - char name[RIO_MAX_DEVNAME_SZ + 1]; -}; - -/* Driver IOCTL codes */ -#define RIO_MPORT_DRV_MAGIC 'm' - -#define RIO_MPORT_MAINT_HDID_SET \ - _IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t) -#define RIO_MPORT_MAINT_COMPTAG_SET \ - _IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t) -#define RIO_MPORT_MAINT_PORT_IDX_GET \ - _IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t) -#define RIO_MPORT_GET_PROPERTIES \ - _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties) -#define RIO_MPORT_MAINT_READ_LOCAL \ - _IOR(RIO_MPORT_DRV_MAGIC, 5, struct rio_mport_maint_io) -#define RIO_MPORT_MAINT_WRITE_LOCAL \ - _IOW(RIO_MPORT_DRV_MAGIC, 6, struct rio_mport_maint_io) -#define RIO_MPORT_MAINT_READ_REMOTE \ - _IOR(RIO_MPORT_DRV_MAGIC, 7, struct rio_mport_maint_io) -#define RIO_MPORT_MAINT_WRITE_REMOTE \ - _IOW(RIO_MPORT_DRV_MAGIC, 8, struct rio_mport_maint_io) -#define RIO_ENABLE_DOORBELL_RANGE \ - _IOW(RIO_MPORT_DRV_MAGIC, 9, struct rio_doorbell_filter) -#define RIO_DISABLE_DOORBELL_RANGE \ - _IOW(RIO_MPORT_DRV_MAGIC, 10, struct rio_doorbell_filter) -#define RIO_ENABLE_PORTWRITE_RANGE \ - _IOW(RIO_MPORT_DRV_MAGIC, 11, struct rio_pw_filter) -#define RIO_DISABLE_PORTWRITE_RANGE \ - _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter) -#define RIO_SET_EVENT_MASK \ - _IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int) -#define RIO_GET_EVENT_MASK \ - _IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int) -#define RIO_MAP_OUTBOUND \ - _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap) -#define RIO_UNMAP_OUTBOUND \ - _IOW(RIO_MPORT_DRV_MAGIC, 16, struct rio_mmap) -#define RIO_MAP_INBOUND \ - _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap) -#define RIO_UNMAP_INBOUND \ - _IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t) -#define RIO_ALLOC_DMA \ - _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem) -#define RIO_FREE_DMA \ - _IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t) -#define RIO_TRANSFER \ - _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction) -#define RIO_WAIT_FOR_ASYNC \ - _IOW(RIO_MPORT_DRV_MAGIC, 22, struct rio_async_tx_wait) -#define RIO_DEV_ADD \ - _IOW(RIO_MPORT_DRV_MAGIC, 23, struct rio_rdev_info) -#define RIO_DEV_DEL \ - _IOW(RIO_MPORT_DRV_MAGIC, 24, struct rio_rdev_info) - -#endif /* _RIO_MPORT_CDEV_H_ */ diff --git a/include/uapi/linux/rio_mport_cdev.h b/include/uapi/linux/rio_mport_cdev.h new file mode 100644 index 000000000000..5796bf1d06ad --- /dev/null +++ b/include/uapi/linux/rio_mport_cdev.h @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2015-2016, Integrated Device Technology Inc. + * Copyright (c) 2015, Prodrive Technologies + * Copyright (c) 2015, Texas Instruments Incorporated + * Copyright (c) 2015, RapidIO Trade Association + * All rights reserved. + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License(GPL) Version 2, or the BSD-3 Clause license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its contributors + * may be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RIO_MPORT_CDEV_H_ +#define _RIO_MPORT_CDEV_H_ + +#include +#include + +struct rio_mport_maint_io { + __u16 rioid; /* destID of remote device */ + __u8 hopcount; /* hopcount to remote device */ + __u8 pad0[5]; + __u32 offset; /* offset in register space */ + __u32 length; /* length in bytes */ + __u64 buffer; /* pointer to data buffer */ +}; + +/* + * Definitions for RapidIO data transfers: + * - memory mapped (MAPPED) + * - packet generation from memory (TRANSFER) + */ +#define RIO_TRANSFER_MODE_MAPPED (1 << 0) +#define RIO_TRANSFER_MODE_TRANSFER (1 << 1) +#define RIO_CAP_DBL_SEND (1 << 2) +#define RIO_CAP_DBL_RECV (1 << 3) +#define RIO_CAP_PW_SEND (1 << 4) +#define RIO_CAP_PW_RECV (1 << 5) +#define RIO_CAP_MAP_OUTB (1 << 6) +#define RIO_CAP_MAP_INB (1 << 7) + +struct rio_mport_properties { + __u16 hdid; + __u8 id; /* Physical port ID */ + __u8 index; + __u32 flags; + __u32 sys_size; /* Default addressing size */ + __u8 port_ok; + __u8 link_speed; + __u8 link_width; + __u8 pad0; + __u32 dma_max_sge; + __u32 dma_max_size; + __u32 dma_align; + __u32 transfer_mode; /* Default transfer mode */ + __u32 cap_sys_size; /* Capable system sizes */ + __u32 cap_addr_size; /* Capable addressing sizes */ + __u32 cap_transfer_mode; /* Capable transfer modes */ + __u32 cap_mport; /* Mport capabilities */ +}; + +/* + * Definitions for RapidIO events; + * - incoming port-writes + * - incoming doorbells + */ +#define RIO_DOORBELL (1 << 0) +#define RIO_PORTWRITE (1 << 1) + +struct rio_doorbell { + __u16 rioid; + __u16 payload; +}; + +struct rio_doorbell_filter { + __u16 rioid; /* Use RIO_INVALID_DESTID to match all ids */ + __u16 low; + __u16 high; + __u16 pad0; +}; + + +struct rio_portwrite { + __u32 payload[16]; +}; + +struct rio_pw_filter { + __u32 mask; + __u32 low; + __u32 high; + __u32 pad0; +}; + +/* RapidIO base address for inbound requests set to value defined below + * indicates that no specific RIO-to-local address translation is requested + * and driver should use direct (one-to-one) address mapping. +*/ +#define RIO_MAP_ANY_ADDR (__u64)(~((__u64) 0)) + +struct rio_mmap { + __u16 rioid; + __u16 pad0[3]; + __u64 rio_addr; + __u64 length; + __u64 handle; + __u64 address; +}; + +struct rio_dma_mem { + __u64 length; /* length of DMA memory */ + __u64 dma_handle; /* handle associated with this memory */ + __u64 address; +}; + +struct rio_event { + __u32 header; /* event type RIO_DOORBELL or RIO_PORTWRITE */ + union { + struct rio_doorbell doorbell; /* header for RIO_DOORBELL */ + struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */ + } u; + __u32 pad0; +}; + +enum rio_transfer_sync { + RIO_TRANSFER_SYNC, /* synchronous transfer */ + RIO_TRANSFER_ASYNC, /* asynchronous transfer */ + RIO_TRANSFER_FAF, /* fire-and-forget transfer */ +}; + +enum rio_transfer_dir { + RIO_TRANSFER_DIR_READ, /* Read operation */ + RIO_TRANSFER_DIR_WRITE, /* Write operation */ +}; + +/* + * RapidIO data exchange transactions are lists of individual transfers. Each + * transfer exchanges data between two RapidIO devices by remote direct memory + * access and has its own completion code. + * + * The RapidIO specification defines four types of data exchange requests: + * NREAD, NWRITE, SWRITE and NWRITE_R. The RapidIO DMA channel interface allows + * to specify the required type of write operation or combination of them when + * only the last data packet requires response. + * + * NREAD: read up to 256 bytes from remote device memory into local memory + * NWRITE: write up to 256 bytes from local memory to remote device memory + * without confirmation + * SWRITE: as NWRITE, but all addresses and payloads must be 64-bit aligned + * NWRITE_R: as NWRITE, but expect acknowledgment from remote device. + * + * The default exchange is chosen from NREAD and any of the WRITE modes as the + * driver sees fit. For write requests the user can explicitly choose between + * any of the write modes for each transaction. + */ +enum rio_exchange { + RIO_EXCHANGE_DEFAULT, /* Default method */ + RIO_EXCHANGE_NWRITE, /* All packets using NWRITE */ + RIO_EXCHANGE_SWRITE, /* All packets using SWRITE */ + RIO_EXCHANGE_NWRITE_R, /* Last packet NWRITE_R, others NWRITE */ + RIO_EXCHANGE_SWRITE_R, /* Last packet NWRITE_R, others SWRITE */ + RIO_EXCHANGE_NWRITE_R_ALL, /* All packets using NWRITE_R */ +}; + +struct rio_transfer_io { + __u64 rio_addr; /* Address in target's RIO mem space */ + __u64 loc_addr; + __u64 handle; + __u64 offset; /* Offset in buffer */ + __u64 length; /* Length in bytes */ + __u16 rioid; /* Target destID */ + __u16 method; /* Data exchange method, one of rio_exchange enum */ + __u32 completion_code; /* Completion code for this transfer */ +}; + +struct rio_transaction { + __u64 block; /* Pointer to array of transfers */ + __u32 count; /* Number of transfers */ + __u32 transfer_mode; /* Data transfer mode */ + __u16 sync; /* Synch method, one of rio_transfer_sync enum */ + __u16 dir; /* Transfer direction, one of rio_transfer_dir enum */ + __u32 pad0; +}; + +struct rio_async_tx_wait { + __u32 token; /* DMA transaction ID token */ + __u32 timeout; /* Wait timeout in msec, if 0 use default TO */ +}; + +#define RIO_MAX_DEVNAME_SZ 20 + +struct rio_rdev_info { + __u16 destid; + __u8 hopcount; + __u8 pad0; + __u32 comptag; + char name[RIO_MAX_DEVNAME_SZ + 1]; +}; + +/* Driver IOCTL codes */ +#define RIO_MPORT_DRV_MAGIC 'm' + +#define RIO_MPORT_MAINT_HDID_SET \ + _IOW(RIO_MPORT_DRV_MAGIC, 1, __u16) +#define RIO_MPORT_MAINT_COMPTAG_SET \ + _IOW(RIO_MPORT_DRV_MAGIC, 2, __u32) +#define RIO_MPORT_MAINT_PORT_IDX_GET \ + _IOR(RIO_MPORT_DRV_MAGIC, 3, __u32) +#define RIO_MPORT_GET_PROPERTIES \ + _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties) +#define RIO_MPORT_MAINT_READ_LOCAL \ + _IOR(RIO_MPORT_DRV_MAGIC, 5, struct rio_mport_maint_io) +#define RIO_MPORT_MAINT_WRITE_LOCAL \ + _IOW(RIO_MPORT_DRV_MAGIC, 6, struct rio_mport_maint_io) +#define RIO_MPORT_MAINT_READ_REMOTE \ + _IOR(RIO_MPORT_DRV_MAGIC, 7, struct rio_mport_maint_io) +#define RIO_MPORT_MAINT_WRITE_REMOTE \ + _IOW(RIO_MPORT_DRV_MAGIC, 8, struct rio_mport_maint_io) +#define RIO_ENABLE_DOORBELL_RANGE \ + _IOW(RIO_MPORT_DRV_MAGIC, 9, struct rio_doorbell_filter) +#define RIO_DISABLE_DOORBELL_RANGE \ + _IOW(RIO_MPORT_DRV_MAGIC, 10, struct rio_doorbell_filter) +#define RIO_ENABLE_PORTWRITE_RANGE \ + _IOW(RIO_MPORT_DRV_MAGIC, 11, struct rio_pw_filter) +#define RIO_DISABLE_PORTWRITE_RANGE \ + _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter) +#define RIO_SET_EVENT_MASK \ + _IOW(RIO_MPORT_DRV_MAGIC, 13, __u32) +#define RIO_GET_EVENT_MASK \ + _IOR(RIO_MPORT_DRV_MAGIC, 14, __u32) +#define RIO_MAP_OUTBOUND \ + _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap) +#define RIO_UNMAP_OUTBOUND \ + _IOW(RIO_MPORT_DRV_MAGIC, 16, struct rio_mmap) +#define RIO_MAP_INBOUND \ + _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap) +#define RIO_UNMAP_INBOUND \ + _IOW(RIO_MPORT_DRV_MAGIC, 18, __u64) +#define RIO_ALLOC_DMA \ + _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem) +#define RIO_FREE_DMA \ + _IOW(RIO_MPORT_DRV_MAGIC, 20, __u64) +#define RIO_TRANSFER \ + _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction) +#define RIO_WAIT_FOR_ASYNC \ + _IOW(RIO_MPORT_DRV_MAGIC, 22, struct rio_async_tx_wait) +#define RIO_DEV_ADD \ + _IOW(RIO_MPORT_DRV_MAGIC, 23, struct rio_rdev_info) +#define RIO_DEV_DEL \ + _IOW(RIO_MPORT_DRV_MAGIC, 24, struct rio_rdev_info) + +#endif /* _RIO_MPORT_CDEV_H_ */ -- cgit v1.2.3 From 127393fbe597dd85863a9bdccaa11007e7d4948f Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Thu, 5 May 2016 16:22:20 -0700 Subject: mm: thp: kvm: fix memory corruption in KVM with THP enabled After the THP refcounting change, obtaining a compound pages from get_user_pages() no longer allows us to assume the entire compound page is immediately mappable from a secondary MMU. A secondary MMU doesn't want to call get_user_pages() more than once for each compound page, in order to know if it can map the whole compound page. So a secondary MMU needs to know from a single get_user_pages() invocation when it can map immediately the entire compound page to avoid a flood of unnecessary secondary MMU faults and spurious atomic_inc()/atomic_dec() (pages don't have to be pinned by MMU notifier users). Ideally instead of the page->_mapcount < 1 check, get_user_pages() should return the granularity of the "page" mapping in the "mm" passed to get_user_pages(). However it's non trivial change to pass the "pmd" status belonging to the "mm" walked by get_user_pages up the stack (up to the caller of get_user_pages). So the fix just checks if there is not a single pte mapping on the page returned by get_user_pages, and in turn if the caller can assume that the whole compound page is mapped in the current "mm" (in a pmd_trans_huge()). In such case the entire compound page is safe to map into the secondary MMU without additional get_user_pages() calls on the surrounding tail/head pages. In addition of being faster, not having to run other get_user_pages() calls also reduces the memory footprint of the secondary MMU fault in case the pmd split happened as result of memory pressure. Without this fix after a MADV_DONTNEED (like invoked by QEMU during postcopy live migration or balloning) or after generic swapping (with a failure in split_huge_page() that would only result in pmd splitting and not a physical page split), KVM would map the whole compound page into the shadow pagetables, despite regular faults or userfaults (like UFFDIO_COPY) may map regular pages into the primary MMU as result of the pte faults, leading to the guest mode and userland mode going out of sync and not working on the same memory at all times. Any other secondary MMU notifier manager (KVM is just one of the many MMU notifier users) will need the same information if it doesn't want to run a flood of get_user_pages_fast and it can support multiple granularity in the secondary MMU mappings, so I think it is justified to be exposed not just to KVM. The other option would be to move transparent_hugepage_adjust to mm/huge_memory.c but that currently has all kind of KVM data structures in it, so it's definitely not a cut-and-paste work, so I couldn't do a fix as cleaner as this one for 4.6. Signed-off-by: Andrea Arcangeli Cc: "Dr. David Alan Gilbert" Cc: "Kirill A. Shutemov" Cc: "Li, Liang Z" Cc: Amit Shah Cc: Paolo Bonzini Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/kvm/mmu.c | 2 +- arch/x86/kvm/mmu.c | 4 ++-- include/linux/page-flags.h | 22 ++++++++++++++++++++++ 3 files changed, 25 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 58dbd5c439df..d6d4191e68f2 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1004,7 +1004,7 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap) kvm_pfn_t pfn = *pfnp; gfn_t gfn = *ipap >> PAGE_SHIFT; - if (PageTransCompound(pfn_to_page(pfn))) { + if (PageTransCompoundMap(pfn_to_page(pfn))) { unsigned long mask; /* * The address we faulted on is backed by a transparent huge diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1ff4dbb73fb7..b6f50e8b0a39 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2823,7 +2823,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, */ if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && - PageTransCompound(pfn_to_page(pfn)) && + PageTransCompoundMap(pfn_to_page(pfn)) && !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) { unsigned long mask; /* @@ -4785,7 +4785,7 @@ restart: */ if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && - PageTransCompound(pfn_to_page(pfn))) { + PageTransCompoundMap(pfn_to_page(pfn))) { drop_spte(kvm, sptep); need_tlb_flush = 1; goto restart; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f4ed4f1b0c77..6b052aa7b5b7 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -516,6 +516,27 @@ static inline int PageTransCompound(struct page *page) return PageCompound(page); } +/* + * PageTransCompoundMap is the same as PageTransCompound, but it also + * guarantees the primary MMU has the entire compound page mapped + * through pmd_trans_huge, which in turn guarantees the secondary MMUs + * can also map the entire compound page. This allows the secondary + * MMUs to call get_user_pages() only once for each compound page and + * to immediately map the entire compound page with a single secondary + * MMU fault. If there will be a pmd split later, the secondary MMUs + * will get an update through the MMU notifier invalidation through + * split_huge_pmd(). + * + * Unlike PageTransCompound, this is safe to be called only while + * split_huge_pmd() cannot run from under us, like if protected by the + * MMU notifier, otherwise it may result in page->_mapcount < 0 false + * positives. + */ +static inline int PageTransCompoundMap(struct page *page) +{ + return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0; +} + /* * PageTransTail returns true for both transparent huge pages * and hugetlbfs pages, so it should only be called when it's known @@ -559,6 +580,7 @@ static inline int TestClearPageDoubleMap(struct page *page) #else TESTPAGEFLAG_FALSE(TransHuge) TESTPAGEFLAG_FALSE(TransCompound) +TESTPAGEFLAG_FALSE(TransCompoundMap) TESTPAGEFLAG_FALSE(TransTail) TESTPAGEFLAG_FALSE(DoubleMap) TESTSETFLAG_FALSE(DoubleMap) -- cgit v1.2.3 From 229740c63169462a838a8b8e16391ed000934631 Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Tue, 3 May 2016 16:10:21 -0700 Subject: udp_offload: Set encapsulation before inner completes. UDP tunnel segmentation code relies on the inner offsets being set for an UDP tunnel GSO packet, but the inner *_complete() functions will set the inner offsets only if 'encapsulation' is set before calling them. Currently, udp_gro_complete() sets 'encapsulation' only after the inner *_complete() functions are done. This causes the inner offsets having invalid values after udp_gro_complete() returns, which in turn will make it impossible to properly segment the packet in case it needs to be forwarded, which would be visible to the user either as invalid packets being sent or as packet loss. This patch fixes this by setting skb's 'encapsulation' in udp_gro_complete() before calling into the inner complete functions, and by making each possible UDP tunnel gro_complete() callback set the inner_mac_header to the beginning of the tunnel payload. Signed-off-by: Jarno Rajahalme Reviewed-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/geneve.c | 3 +++ drivers/net/vxlan.c | 3 +++ include/linux/netdevice.h | 3 +++ net/ipv4/fou.c | 4 ++++ net/ipv4/udp_offload.c | 8 +++++--- 5 files changed, 18 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 98f12244714f..7b0a644122eb 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -514,6 +514,9 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff, err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); rcu_read_unlock(); + + skb_set_inner_mac_header(skb, nhoff + gh_len); + return err; } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index dd2d032fba5f..8ac261ab7d7d 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -616,6 +616,9 @@ out: static int vxlan_gro_complete(struct sk_buff *skb, int nhoff, struct udp_offload *uoff) { + /* Sets 'skb->inner_mac_header' since we are always called with + * 'skb->encapsulation' set. + */ return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b3c46b019ac1..78181a88903b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2164,6 +2164,9 @@ struct packet_offload { struct udp_offload; +/* 'skb->encapsulation' is set before gro_complete() is called. gro_complete() + * must set 'skb->inner_mac_header' to the beginning of tunnel payload. + */ struct udp_offload_callbacks { struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb, diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 305d9ac68bd9..a6962ccad98a 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -236,6 +236,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff, err = ops->callbacks.gro_complete(skb, nhoff); + skb_set_inner_mac_header(skb, nhoff); + out_unlock: rcu_read_unlock(); @@ -412,6 +414,8 @@ static int gue_gro_complete(struct sk_buff *skb, int nhoff, err = ops->callbacks.gro_complete(skb, nhoff + guehlen); + skb_set_inner_mac_header(skb, nhoff + guehlen); + out_unlock: rcu_read_unlock(); return err; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 0ed2dafb7cc4..e330c0e56b11 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -399,6 +399,11 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff) uh->len = newlen; + /* Set encapsulation before calling into inner gro_complete() functions + * to make them set up the inner offsets. + */ + skb->encapsulation = 1; + rcu_read_lock(); uo_priv = rcu_dereference(udp_offload_base); @@ -421,9 +426,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff) if (skb->remcsum_offload) skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM; - skb->encapsulation = 1; - skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr)); - return err; } -- cgit v1.2.3 From 8634de6d254462e6953b7dac772a1df4f44c8030 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Fri, 6 May 2016 09:22:25 -0500 Subject: compiler-gcc: require gcc 4.8 for powerpc __builtin_bswap16() gcc support for __builtin_bswap16() was supposedly added for powerpc in gcc 4.6, and was then later added for other architectures in gcc 4.8. However, Stephen Rothwell reported that attempting to use it on powerpc in gcc 4.6 fails with: lib/vsprintf.c:160:2: error: initializer element is not constant lib/vsprintf.c:160:2: error: (near initialization for 'decpair[0]') lib/vsprintf.c:160:2: error: initializer element is not constant lib/vsprintf.c:160:2: error: (near initialization for 'decpair[1]') ... I'm not entirely sure what those errors mean, but I don't see them on gcc 4.8. So let's consider gcc 4.8 to be the official starting point for __builtin_bswap16(). Arnd Bergmann adds: "I found the commit in gcc-4.8 that replaced the powerpc-specific implementation of __builtin_bswap16 with an architecture-independent one. Apparently the powerpc version (gcc-4.6 and 4.7) just mapped to the lhbrx/sthbrx instructions, so it ended up not being a constant, though the intent of the patch was mainly to add support for the builtin to x86: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52624 has the patch that went into gcc-4.8 and more information." Fixes: 7322dd755e7d ("byteswap: try to avoid __builtin_constant_p gcc bug") Reported-by: Stephen Rothwell Tested-by: Stephen Rothwell Acked-by: Arnd Bergmann Signed-off-by: Josh Poimboeuf Signed-off-by: Stephen Rothwell Signed-off-by: Linus Torvalds --- include/linux/compiler-gcc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index eeae401a2412..3d5202eda22f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -246,7 +246,7 @@ #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ #endif -#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600) +#if GCC_VERSION >= 40800 #define __HAVE_BUILTIN_BSWAP16__ #endif #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ -- cgit v1.2.3