From 17d9ddc72fb8bba0d4f67868c9c612e472a594a9 Mon Sep 17 00:00:00 2001 From: "Pallipadi, Venkatesh" Date: Wed, 10 Feb 2010 15:23:44 -0800 Subject: rbtree: Add support for augmented rbtrees Add support for augmented rbtrees in core rbtree code. This will be used in subsequent patches, in x86 PAT code, which needs interval trees to efficiently keep track of PAT ranges. Signed-off-by: Venkatesh Pallipadi LKML-Reference: <20100210232343.GA11465@linux-os.sc.intel.com> Signed-off-by: Suresh Siddha Signed-off-by: H. Peter Anvin --- include/linux/rbtree.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index 9c295411d01f..8e33a256ea0e 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -110,6 +110,7 @@ struct rb_node struct rb_root { struct rb_node *rb_node; + void (*augment_cb)(struct rb_node *node); }; @@ -129,7 +130,9 @@ static inline void rb_set_color(struct rb_node *rb, int color) rb->rb_parent_color = (rb->rb_parent_color & ~1) | color; } -#define RB_ROOT (struct rb_root) { NULL, } +#define RB_ROOT (struct rb_root) { NULL, NULL, } +#define RB_AUGMENT_ROOT(x) (struct rb_root) { NULL, x} + #define rb_entry(ptr, type, member) container_of(ptr, type, member) #define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) -- cgit v1.2.3 From 4abc14a733f9002c05623db755aaafdd27fa7a91 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 20 Jan 2010 14:52:23 +0100 Subject: iommu-api: Rename ->{un}map function pointers to ->{un}map_range The new function pointer names match better with the top-level functions of the iommu-api which are using them. Main intention of this change is to make the ->{un}map pointer names free for two new mapping functions. Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 4 ++-- drivers/base/iommu.c | 4 ++-- drivers/pci/intel-iommu.c | 4 ++-- include/linux/iommu.h | 8 ++++---- 4 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index adb0ba025702..59cae7c4df54 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -2515,8 +2515,8 @@ static struct iommu_ops amd_iommu_ops = { .domain_destroy = amd_iommu_domain_destroy, .attach_dev = amd_iommu_attach_device, .detach_dev = amd_iommu_detach_device, - .map = amd_iommu_map_range, - .unmap = amd_iommu_unmap_range, + .map_range = amd_iommu_map_range, + .unmap_range = amd_iommu_unmap_range, .iova_to_phys = amd_iommu_iova_to_phys, .domain_has_cap = amd_iommu_domain_has_cap, }; diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c index 8ad4ffea6920..f4c86c429297 100644 --- a/drivers/base/iommu.c +++ b/drivers/base/iommu.c @@ -83,14 +83,14 @@ EXPORT_SYMBOL_GPL(iommu_detach_device); int iommu_map_range(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { - return iommu_ops->map(domain, iova, paddr, size, prot); + return iommu_ops->map_range(domain, iova, paddr, size, prot); } EXPORT_SYMBOL_GPL(iommu_map_range); void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, size_t size) { - iommu_ops->unmap(domain, iova, size); + iommu_ops->unmap_range(domain, iova, size); } EXPORT_SYMBOL_GPL(iommu_unmap_range); diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 417312528ddf..a714e3db13c1 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -3714,8 +3714,8 @@ static struct iommu_ops intel_iommu_ops = { .domain_destroy = intel_iommu_domain_destroy, .attach_dev = intel_iommu_attach_device, .detach_dev = intel_iommu_detach_device, - .map = intel_iommu_map_range, - .unmap = intel_iommu_unmap_range, + .map_range = intel_iommu_map_range, + .unmap_range = intel_iommu_unmap_range, .iova_to_phys = intel_iommu_iova_to_phys, .domain_has_cap = intel_iommu_domain_has_cap, }; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 3af4ffd591b9..0f18f37a6503 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -36,10 +36,10 @@ struct iommu_ops { void (*domain_destroy)(struct iommu_domain *domain); int (*attach_dev)(struct iommu_domain *domain, struct device *dev); void (*detach_dev)(struct iommu_domain *domain, struct device *dev); - int (*map)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); - void (*unmap)(struct iommu_domain *domain, unsigned long iova, - size_t size); + int (*map_range)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); + void (*unmap_range)(struct iommu_domain *domain, unsigned long iova, + size_t size); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, unsigned long iova); int (*domain_has_cap)(struct iommu_domain *domain, -- cgit v1.2.3 From cefc53c7f494240d4813c80154c7617452d1904d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 8 Jan 2010 13:35:09 +0100 Subject: iommu-api: Add iommu_map and iommu_unmap functions These two functions provide support for mapping and unmapping physical addresses to io virtual addresses. The difference to the iommu_(un)map_range() is that the new functions take a gfp_order parameter instead of a size. This allows the IOMMU backend implementations to detect easier if a given range can be mapped by larger page sizes. These new functions should replace the old ones in the long term. Signed-off-by: Joerg Roedel --- drivers/base/iommu.c | 31 +++++++++++++++++++++++++++++++ include/linux/iommu.h | 16 ++++++++++++++++ 2 files changed, 47 insertions(+) (limited to 'include') diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c index f4c86c429297..cf7cbec116ed 100644 --- a/drivers/base/iommu.c +++ b/drivers/base/iommu.c @@ -107,3 +107,34 @@ int iommu_domain_has_cap(struct iommu_domain *domain, return iommu_ops->domain_has_cap(domain, cap); } EXPORT_SYMBOL_GPL(iommu_domain_has_cap); + +int iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, int gfp_order, int prot) +{ + unsigned long invalid_mask; + size_t size; + + size = 0x1000UL << gfp_order; + invalid_mask = size - 1; + + BUG_ON((iova | paddr) & invalid_mask); + + return iommu_ops->map_range(domain, iova, paddr, size, prot); +} +EXPORT_SYMBOL_GPL(iommu_map); + +int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) +{ + unsigned long invalid_mask; + size_t size; + + size = 0x1000UL << gfp_order; + invalid_mask = size - 1; + + BUG_ON(iova & invalid_mask); + + iommu_ops->unmap_range(domain, iova, size); + + return gfp_order; +} +EXPORT_SYMBOL_GPL(iommu_unmap); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 0f18f37a6503..6d0035bb1a0c 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -60,6 +60,10 @@ extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, size_t size); +extern int iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, int gfp_order, int prot); +extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova, + int gfp_order); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, unsigned long iova); extern int iommu_domain_has_cap(struct iommu_domain *domain, @@ -108,6 +112,18 @@ static inline void iommu_unmap_range(struct iommu_domain *domain, { } +static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, int gfp_order, int prot) +{ + return -ENODEV; +} + +static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, + int gfp_order) +{ + return -ENODEV; +} + static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, unsigned long iova) { -- cgit v1.2.3 From 67651786948c360c3122b8a17cb1e59209d50880 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 21 Jan 2010 16:32:27 +0100 Subject: iommu-api: Add ->{un}map callbacks to iommu_ops This patch adds new callbacks for mapping and unmapping pages to the iommu_ops structure. These callbacks are aware of page sizes which makes them different to the ->{un}map_range callbacks. Signed-off-by: Joerg Roedel --- drivers/base/iommu.c | 6 ++++++ include/linux/iommu.h | 4 ++++ 2 files changed, 10 insertions(+) (limited to 'include') diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c index cf7cbec116ed..55d37e4609eb 100644 --- a/drivers/base/iommu.c +++ b/drivers/base/iommu.c @@ -119,6 +119,9 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, BUG_ON((iova | paddr) & invalid_mask); + if (iommu_ops->map) + return iommu_ops->map(domain, iova, paddr, gfp_order, prot); + return iommu_ops->map_range(domain, iova, paddr, size, prot); } EXPORT_SYMBOL_GPL(iommu_map); @@ -133,6 +136,9 @@ int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) BUG_ON(iova & invalid_mask); + if (iommu_ops->unmap) + return iommu_ops->unmap(domain, iova, gfp_order); + iommu_ops->unmap_range(domain, iova, size); return gfp_order; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 6d0035bb1a0c..5a7a3d888dac 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -36,6 +36,10 @@ struct iommu_ops { void (*domain_destroy)(struct iommu_domain *domain); int (*attach_dev)(struct iommu_domain *domain, struct device *dev); void (*detach_dev)(struct iommu_domain *domain, struct device *dev); + int (*map)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, int gfp_order, int prot); + int (*unmap)(struct iommu_domain *domain, unsigned long iova, + int gfp_order); int (*map_range)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); void (*unmap_range)(struct iommu_domain *domain, unsigned long iova, -- cgit v1.2.3 From 12c7389abe5786349d3ea6da1961cf78d0c1c7cd Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 21 Jan 2010 11:50:28 +0100 Subject: iommu-api: Remove iommu_{un}map_range functions These functions are not longer used and can be removed savely. There functionality is now provided by the iommu_{un}map functions which are also capable of multiple page sizes. Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 48 --------------------------------------------- drivers/base/iommu.c | 26 ++---------------------- include/linux/iommu.h | 20 ------------------- 3 files changed, 2 insertions(+), 92 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 0e068c9ca5f5..d8da9988edd9 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -2506,52 +2506,6 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, return ret; } -static int amd_iommu_map_range(struct iommu_domain *dom, - unsigned long iova, phys_addr_t paddr, - size_t size, int iommu_prot) -{ - struct protection_domain *domain = dom->priv; - unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE); - int prot = 0; - int ret; - - if (iommu_prot & IOMMU_READ) - prot |= IOMMU_PROT_IR; - if (iommu_prot & IOMMU_WRITE) - prot |= IOMMU_PROT_IW; - - iova &= PAGE_MASK; - paddr &= PAGE_MASK; - - for (i = 0; i < npages; ++i) { - ret = iommu_map_page(domain, iova, paddr, prot, PAGE_SIZE); - if (ret) - return ret; - - iova += PAGE_SIZE; - paddr += PAGE_SIZE; - } - - return 0; -} - -static void amd_iommu_unmap_range(struct iommu_domain *dom, - unsigned long iova, size_t size) -{ - - struct protection_domain *domain = dom->priv; - unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE); - - iova &= PAGE_MASK; - - for (i = 0; i < npages; ++i) { - iommu_unmap_page(domain, iova, PAGE_SIZE); - iova += PAGE_SIZE; - } - - iommu_flush_tlb_pde(domain); -} - static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, phys_addr_t paddr, int gfp_order, int iommu_prot) { @@ -2616,8 +2570,6 @@ static struct iommu_ops amd_iommu_ops = { .detach_dev = amd_iommu_detach_device, .map = amd_iommu_map, .unmap = amd_iommu_unmap, - .map_range = amd_iommu_map_range, - .unmap_range = amd_iommu_unmap_range, .iova_to_phys = amd_iommu_iova_to_phys, .domain_has_cap = amd_iommu_domain_has_cap, }; diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c index 55d37e4609eb..6e6b6a11b3ce 100644 --- a/drivers/base/iommu.c +++ b/drivers/base/iommu.c @@ -80,20 +80,6 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) } EXPORT_SYMBOL_GPL(iommu_detach_device); -int iommu_map_range(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) -{ - return iommu_ops->map_range(domain, iova, paddr, size, prot); -} -EXPORT_SYMBOL_GPL(iommu_map_range); - -void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, - size_t size) -{ - iommu_ops->unmap_range(domain, iova, size); -} -EXPORT_SYMBOL_GPL(iommu_unmap_range); - phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, unsigned long iova) { @@ -119,10 +105,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, BUG_ON((iova | paddr) & invalid_mask); - if (iommu_ops->map) - return iommu_ops->map(domain, iova, paddr, gfp_order, prot); - - return iommu_ops->map_range(domain, iova, paddr, size, prot); + return iommu_ops->map(domain, iova, paddr, gfp_order, prot); } EXPORT_SYMBOL_GPL(iommu_map); @@ -136,11 +119,6 @@ int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) BUG_ON(iova & invalid_mask); - if (iommu_ops->unmap) - return iommu_ops->unmap(domain, iova, gfp_order); - - iommu_ops->unmap_range(domain, iova, size); - - return gfp_order; + return iommu_ops->unmap(domain, iova, gfp_order); } EXPORT_SYMBOL_GPL(iommu_unmap); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 5a7a3d888dac..be22ad83689c 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -40,10 +40,6 @@ struct iommu_ops { phys_addr_t paddr, int gfp_order, int prot); int (*unmap)(struct iommu_domain *domain, unsigned long iova, int gfp_order); - int (*map_range)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); - void (*unmap_range)(struct iommu_domain *domain, unsigned long iova, - size_t size); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, unsigned long iova); int (*domain_has_cap)(struct iommu_domain *domain, @@ -60,10 +56,6 @@ extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); extern void iommu_detach_device(struct iommu_domain *domain, struct device *dev); -extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); -extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, - size_t size); extern int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, int gfp_order, int prot); extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova, @@ -104,18 +96,6 @@ static inline void iommu_detach_device(struct iommu_domain *domain, { } -static inline int iommu_map_range(struct iommu_domain *domain, - unsigned long iova, phys_addr_t paddr, - size_t size, int prot) -{ - return -ENODEV; -} - -static inline void iommu_unmap_range(struct iommu_domain *domain, - unsigned long iova, size_t size) -{ -} - static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, int gfp_order, int prot) { -- cgit v1.2.3 From 1527bc8b928dd1399c3d3467dd47d9ede210978a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 1 Feb 2010 15:03:07 +0100 Subject: bitops: Optimize hweight() by making use of compile-time evaluation Rename the extisting runtime hweight() implementations to __arch_hweight(), rename the compile-time versions to __const_hweight() and then have hweight() pick between them. Suggested-by: H. Peter Anvin Signed-off-by: Peter Zijlstra LKML-Reference: <20100318111929.GB11152@aftab> Acked-by: H. Peter Anvin LKML-Reference: <1265028224.24455.154.camel@laptop> Signed-off-by: H. Peter Anvin --- arch/alpha/include/asm/bitops.h | 18 +++++++------ arch/ia64/include/asm/bitops.h | 11 ++++---- arch/sparc/include/asm/bitops_64.h | 11 ++++---- include/asm-generic/bitops/arch_hweight.h | 11 ++++++++ include/asm-generic/bitops/const_hweight.h | 42 ++++++++++++++++++++++++++++++ include/asm-generic/bitops/hweight.h | 8 ++---- include/linux/bitops.h | 25 ------------------ lib/hweight.c | 19 +++++++------- 8 files changed, 87 insertions(+), 58 deletions(-) create mode 100644 include/asm-generic/bitops/arch_hweight.h create mode 100644 include/asm-generic/bitops/const_hweight.h (limited to 'include') diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h index 15f3ae25c511..296da1d5ed57 100644 --- a/arch/alpha/include/asm/bitops.h +++ b/arch/alpha/include/asm/bitops.h @@ -405,29 +405,31 @@ static inline int fls(int x) #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) /* Whee. EV67 can calculate it directly. */ -static inline unsigned long hweight64(unsigned long w) +static inline unsigned long __arch_hweight64(unsigned long w) { return __kernel_ctpop(w); } -static inline unsigned int hweight32(unsigned int w) +static inline unsigned int __arch_weight32(unsigned int w) { - return hweight64(w); + return __arch_hweight64(w); } -static inline unsigned int hweight16(unsigned int w) +static inline unsigned int __arch_hweight16(unsigned int w) { - return hweight64(w & 0xffff); + return __arch_hweight64(w & 0xffff); } -static inline unsigned int hweight8(unsigned int w) +static inline unsigned int __arch_hweight8(unsigned int w) { - return hweight64(w & 0xff); + return __arch_hweight64(w & 0xff); } #else -#include +#include #endif +#include + #endif /* __KERNEL__ */ #include diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 6ebc229a1c51..9da3df6f1a52 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -437,17 +437,18 @@ __fls (unsigned long x) * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ -static __inline__ unsigned long -hweight64 (unsigned long x) +static __inline__ unsigned long __arch_hweight64(unsigned long x) { unsigned long result; result = ia64_popcnt(x); return result; } -#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful) -#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) -#define hweight8(x) (unsigned int) hweight64((x) & 0xfful) +#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful)) +#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful)) +#define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful)) + +#include #endif /* __KERNEL__ */ diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h index e72ac9cdfb98..766121a67a24 100644 --- a/arch/sparc/include/asm/bitops_64.h +++ b/arch/sparc/include/asm/bitops_64.h @@ -44,7 +44,7 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr); #ifdef ULTRA_HAS_POPULATION_COUNT -static inline unsigned int hweight64(unsigned long w) +static inline unsigned int __arch_hweight64(unsigned long w) { unsigned int res; @@ -52,7 +52,7 @@ static inline unsigned int hweight64(unsigned long w) return res; } -static inline unsigned int hweight32(unsigned int w) +static inline unsigned int __arch_hweight32(unsigned int w) { unsigned int res; @@ -60,7 +60,7 @@ static inline unsigned int hweight32(unsigned int w) return res; } -static inline unsigned int hweight16(unsigned int w) +static inline unsigned int __arch_hweight16(unsigned int w) { unsigned int res; @@ -68,7 +68,7 @@ static inline unsigned int hweight16(unsigned int w) return res; } -static inline unsigned int hweight8(unsigned int w) +static inline unsigned int __arch_hweight8(unsigned int w) { unsigned int res; @@ -78,9 +78,10 @@ static inline unsigned int hweight8(unsigned int w) #else -#include +#include #endif +#include #include #endif /* __KERNEL__ */ diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h new file mode 100644 index 000000000000..3a7be842cdce --- /dev/null +++ b/include/asm-generic/bitops/arch_hweight.h @@ -0,0 +1,11 @@ +#ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ + +#include + +extern unsigned int __arch_hweight32(unsigned int w); +extern unsigned int __arch_hweight16(unsigned int w); +extern unsigned int __arch_hweight8(unsigned int w); +extern unsigned long __arch_hweight64(__u64 w); + +#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h new file mode 100644 index 000000000000..fa2a50b7ee66 --- /dev/null +++ b/include/asm-generic/bitops/const_hweight.h @@ -0,0 +1,42 @@ +#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ + +/* + * Compile time versions of __arch_hweightN() + */ +#define __const_hweight8(w) \ + ( (!!((w) & (1ULL << 0))) + \ + (!!((w) & (1ULL << 1))) + \ + (!!((w) & (1ULL << 2))) + \ + (!!((w) & (1ULL << 3))) + \ + (!!((w) & (1ULL << 4))) + \ + (!!((w) & (1ULL << 5))) + \ + (!!((w) & (1ULL << 6))) + \ + (!!((w) & (1ULL << 7))) ) + +#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 )) +#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16)) +#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32)) + +/* + * Generic interface. + */ +#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w)) +#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w)) +#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w)) +#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w)) + +/* + * Interface for known constant arguments + */ +#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w)) +#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w)) +#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w)) +#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w)) + +/* + * Type invariant interface to the compile time constant hweight functions. + */ +#define HWEIGHT(w) HWEIGHT64((u64)w) + +#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */ diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h index fbbc383771da..a94d6519c7ed 100644 --- a/include/asm-generic/bitops/hweight.h +++ b/include/asm-generic/bitops/hweight.h @@ -1,11 +1,7 @@ #ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ #define _ASM_GENERIC_BITOPS_HWEIGHT_H_ -#include - -extern unsigned int hweight32(unsigned int w); -extern unsigned int hweight16(unsigned int w); -extern unsigned int hweight8(unsigned int w); -extern unsigned long hweight64(__u64 w); +#include +#include #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h index b79389879238..c55d5bc4ee58 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -47,31 +47,6 @@ static inline unsigned long hweight_long(unsigned long w) return sizeof(w) == 4 ? hweight32(w) : hweight64(w); } -/* - * Clearly slow versions of the hweightN() functions, their benefit is - * of course compile time evaluation of constant arguments. - */ -#define HWEIGHT8(w) \ - ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \ - (!!((w) & (1ULL << 0))) + \ - (!!((w) & (1ULL << 1))) + \ - (!!((w) & (1ULL << 2))) + \ - (!!((w) & (1ULL << 3))) + \ - (!!((w) & (1ULL << 4))) + \ - (!!((w) & (1ULL << 5))) + \ - (!!((w) & (1ULL << 6))) + \ - (!!((w) & (1ULL << 7))) ) - -#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8)) -#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16)) -#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32)) - -/* - * Type invariant version that simply casts things to the - * largest type. - */ -#define HWEIGHT(w) HWEIGHT64((u64)(w)) - /** * rol32 - rotate a 32-bit value left * @word: value to rotate diff --git a/lib/hweight.c b/lib/hweight.c index 63ee4eb1228d..a6927e76840f 100644 --- a/lib/hweight.c +++ b/lib/hweight.c @@ -9,7 +9,7 @@ * The Hamming Weight of a number is the total number of bits set in it. */ -unsigned int hweight32(unsigned int w) +unsigned int __arch_hweight32(unsigned int w) { #ifdef ARCH_HAS_FAST_MULTIPLIER w -= (w >> 1) & 0x55555555; @@ -24,29 +24,30 @@ unsigned int hweight32(unsigned int w) return (res + (res >> 16)) & 0x000000FF; #endif } -EXPORT_SYMBOL(hweight32); +EXPORT_SYMBOL(__arch_hweight32); -unsigned int hweight16(unsigned int w) +unsigned int __arch_hweight16(unsigned int w) { unsigned int res = w - ((w >> 1) & 0x5555); res = (res & 0x3333) + ((res >> 2) & 0x3333); res = (res + (res >> 4)) & 0x0F0F; return (res + (res >> 8)) & 0x00FF; } -EXPORT_SYMBOL(hweight16); +EXPORT_SYMBOL(__arch_hweight16); -unsigned int hweight8(unsigned int w) +unsigned int __arch_hweight8(unsigned int w) { unsigned int res = w - ((w >> 1) & 0x55); res = (res & 0x33) + ((res >> 2) & 0x33); return (res + (res >> 4)) & 0x0F; } -EXPORT_SYMBOL(hweight8); +EXPORT_SYMBOL(__arch_hweight8); -unsigned long hweight64(__u64 w) +unsigned long __arch_hweight64(__u64 w) { #if BITS_PER_LONG == 32 - return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); + return __arch_hweight32((unsigned int)(w >> 32)) + + __arch_hweight32((unsigned int)w); #elif BITS_PER_LONG == 64 #ifdef ARCH_HAS_FAST_MULTIPLIER w -= (w >> 1) & 0x5555555555555555ul; @@ -63,4 +64,4 @@ unsigned long hweight64(__u64 w) #endif #endif } -EXPORT_SYMBOL(hweight64); +EXPORT_SYMBOL(__arch_hweight64); -- cgit v1.2.3 From d61931d89be506372d01a90d1755f6d0a9fafe2d Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 5 Mar 2010 17:34:46 +0100 Subject: x86: Add optimized popcnt variants Add support for the hardware version of the Hamming weight function, popcnt, present in CPUs which advertize it under CPUID, Function 0x0000_0001_ECX[23]. On CPUs which don't support it, we fallback to the default lib/hweight.c sw versions. A synthetic benchmark comparing popcnt with __sw_hweight64 showed almost a 3x speedup on a F10h machine. Signed-off-by: Borislav Petkov LKML-Reference: <20100318112015.GC11152@aftab> Signed-off-by: H. Peter Anvin --- arch/x86/Kconfig | 5 +++ arch/x86/include/asm/alternative.h | 9 +++-- arch/x86/include/asm/arch_hweight.h | 59 +++++++++++++++++++++++++++++++ arch/x86/include/asm/bitops.h | 4 ++- include/asm-generic/bitops/arch_hweight.h | 22 +++++++++--- lib/Makefile | 3 ++ lib/hweight.c | 20 +++++------ scripts/Makefile.lib | 4 +++ 8 files changed, 108 insertions(+), 18 deletions(-) create mode 100644 arch/x86/include/asm/arch_hweight.h (limited to 'include') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0eacb1ffb421..89d8c54cdd37 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -238,6 +238,11 @@ config X86_32_LAZY_GS def_bool y depends on X86_32 && !CC_STACKPROTECTOR +config ARCH_HWEIGHT_CFLAGS + string + default "-fcall-saved-ecx -fcall-saved-edx" if X86_32 + default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64 + config KTIME_SCALAR def_bool X86_32 source "init/Kconfig" diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index b09ec55650b3..67dae51e7fd0 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -39,9 +39,6 @@ #define LOCK_PREFIX "" #endif -/* This must be included *after* the definition of LOCK_PREFIX */ -#include - struct alt_instr { u8 *instr; /* original instruction */ u8 *replacement; @@ -95,6 +92,12 @@ static inline int alternatives_text_reserved(void *start, void *end) "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" +/* + * This must be included *after* the definition of ALTERNATIVE due to + * + */ +#include + /* * Alternative instructions for different CPU types or capabilities. * diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h new file mode 100644 index 000000000000..d1fc3c219ae6 --- /dev/null +++ b/arch/x86/include/asm/arch_hweight.h @@ -0,0 +1,59 @@ +#ifndef _ASM_X86_HWEIGHT_H +#define _ASM_X86_HWEIGHT_H + +#ifdef CONFIG_64BIT +/* popcnt %rdi, %rax */ +#define POPCNT ".byte 0xf3,0x48,0x0f,0xb8,0xc7" +#define REG_IN "D" +#define REG_OUT "a" +#else +/* popcnt %eax, %eax */ +#define POPCNT ".byte 0xf3,0x0f,0xb8,0xc0" +#define REG_IN "a" +#define REG_OUT "a" +#endif + +/* + * __sw_hweightXX are called from within the alternatives below + * and callee-clobbered registers need to be taken care of. See + * ARCH_HWEIGHT_CFLAGS in for the respective + * compiler switches. + */ +static inline unsigned int __arch_hweight32(unsigned int w) +{ + unsigned int res = 0; + + asm (ALTERNATIVE("call __sw_hweight32", POPCNT, X86_FEATURE_POPCNT) + : "="REG_OUT (res) + : REG_IN (w)); + + return res; +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return __arch_hweight32(w & 0xffff); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return __arch_hweight32(w & 0xff); +} + +static inline unsigned long __arch_hweight64(__u64 w) +{ + unsigned long res = 0; + +#ifdef CONFIG_X86_32 + return __arch_hweight32((u32)w) + + __arch_hweight32((u32)(w >> 32)); +#else + asm (ALTERNATIVE("call __sw_hweight64", POPCNT, X86_FEATURE_POPCNT) + : "="REG_OUT (res) + : REG_IN (w)); +#endif /* CONFIG_X86_32 */ + + return res; +} + +#endif diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 02b47a603fc8..545776efeb16 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -444,7 +444,9 @@ static inline int fls(int x) #define ARCH_HAS_FAST_MULTIPLIER 1 -#include +#include + +#include #endif /* __KERNEL__ */ diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h index 3a7be842cdce..9a81c1e9436c 100644 --- a/include/asm-generic/bitops/arch_hweight.h +++ b/include/asm-generic/bitops/arch_hweight.h @@ -3,9 +3,23 @@ #include -extern unsigned int __arch_hweight32(unsigned int w); -extern unsigned int __arch_hweight16(unsigned int w); -extern unsigned int __arch_hweight8(unsigned int w); -extern unsigned long __arch_hweight64(__u64 w); +inline unsigned int __arch_hweight32(unsigned int w) +{ + return __sw_hweight32(w); +} +inline unsigned int __arch_hweight16(unsigned int w) +{ + return __sw_hweight16(w); +} + +inline unsigned int __arch_hweight8(unsigned int w) +{ + return __sw_hweight8(w); +} + +inline unsigned long __arch_hweight64(__u64 w) +{ + return __sw_hweight64(w); +} #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/lib/Makefile b/lib/Makefile index 2e152aed7198..abe63a8ad143 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -39,7 +39,10 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o + +CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o + obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_BTREE) += btree.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o diff --git a/lib/hweight.c b/lib/hweight.c index a6927e76840f..3c79d50814cf 100644 --- a/lib/hweight.c +++ b/lib/hweight.c @@ -9,7 +9,7 @@ * The Hamming Weight of a number is the total number of bits set in it. */ -unsigned int __arch_hweight32(unsigned int w) +unsigned int __sw_hweight32(unsigned int w) { #ifdef ARCH_HAS_FAST_MULTIPLIER w -= (w >> 1) & 0x55555555; @@ -24,30 +24,30 @@ unsigned int __arch_hweight32(unsigned int w) return (res + (res >> 16)) & 0x000000FF; #endif } -EXPORT_SYMBOL(__arch_hweight32); +EXPORT_SYMBOL(__sw_hweight32); -unsigned int __arch_hweight16(unsigned int w) +unsigned int __sw_hweight16(unsigned int w) { unsigned int res = w - ((w >> 1) & 0x5555); res = (res & 0x3333) + ((res >> 2) & 0x3333); res = (res + (res >> 4)) & 0x0F0F; return (res + (res >> 8)) & 0x00FF; } -EXPORT_SYMBOL(__arch_hweight16); +EXPORT_SYMBOL(__sw_hweight16); -unsigned int __arch_hweight8(unsigned int w) +unsigned int __sw_hweight8(unsigned int w) { unsigned int res = w - ((w >> 1) & 0x55); res = (res & 0x33) + ((res >> 2) & 0x33); return (res + (res >> 4)) & 0x0F; } -EXPORT_SYMBOL(__arch_hweight8); +EXPORT_SYMBOL(__sw_hweight8); -unsigned long __arch_hweight64(__u64 w) +unsigned long __sw_hweight64(__u64 w) { #if BITS_PER_LONG == 32 - return __arch_hweight32((unsigned int)(w >> 32)) + - __arch_hweight32((unsigned int)w); + return __sw_hweight32((unsigned int)(w >> 32)) + + __sw_hweight32((unsigned int)w); #elif BITS_PER_LONG == 64 #ifdef ARCH_HAS_FAST_MULTIPLIER w -= (w >> 1) & 0x5555555555555555ul; @@ -64,4 +64,4 @@ unsigned long __arch_hweight64(__u64 w) #endif #endif } -EXPORT_SYMBOL(__arch_hweight64); +EXPORT_SYMBOL(__sw_hweight64); diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index f9bdf264473d..cbcd654215e6 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -245,3 +245,7 @@ quiet_cmd_lzo = LZO $@ cmd_lzo = (cat $(filter-out FORCE,$^) | \ lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ (rm -f $@ ; false) + +# misc stuff +# --------------------------------------------------------------------------- +quote:=" -- cgit v1.2.3 From 6dad2a29646ce3792c40cfc52d77e9b65a7bb143 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 31 Mar 2010 21:56:46 +0200 Subject: cpufreq: Unify sysfs attribute definition macros Multiple modules used to define those which are with identical functionality and were needlessly replicated among the different cpufreq drivers. Push them into the header and remove duplication. Signed-off-by: Borislav Petkov LKML-Reference: <1270065406-1814-7-git-send-email-bp@amd64.org> Reviewed-by: Thomas Renninger Signed-off-by: H. Peter Anvin --- drivers/cpufreq/cpufreq.c | 40 ++++++++++------------------ drivers/cpufreq/cpufreq_conservative.c | 48 ++++++++++++---------------------- drivers/cpufreq/cpufreq_ondemand.c | 40 +++++++++------------------- include/linux/cpufreq.h | 30 +++++++++++++++++++++ 4 files changed, 72 insertions(+), 86 deletions(-) (limited to 'include') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 2d5d575e889d..e02e4174c2c8 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -662,32 +662,20 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); } -#define define_one_ro(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0444, show_##_name, NULL) - -#define define_one_ro0400(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0400, show_##_name, NULL) - -#define define_one_rw(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0644, show_##_name, store_##_name) - -define_one_ro0400(cpuinfo_cur_freq); -define_one_ro(cpuinfo_min_freq); -define_one_ro(cpuinfo_max_freq); -define_one_ro(cpuinfo_transition_latency); -define_one_ro(scaling_available_governors); -define_one_ro(scaling_driver); -define_one_ro(scaling_cur_freq); -define_one_ro(bios_limit); -define_one_ro(related_cpus); -define_one_ro(affected_cpus); -define_one_rw(scaling_min_freq); -define_one_rw(scaling_max_freq); -define_one_rw(scaling_governor); -define_one_rw(scaling_setspeed); +cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); +cpufreq_freq_attr_ro(cpuinfo_min_freq); +cpufreq_freq_attr_ro(cpuinfo_max_freq); +cpufreq_freq_attr_ro(cpuinfo_transition_latency); +cpufreq_freq_attr_ro(scaling_available_governors); +cpufreq_freq_attr_ro(scaling_driver); +cpufreq_freq_attr_ro(scaling_cur_freq); +cpufreq_freq_attr_ro(bios_limit); +cpufreq_freq_attr_ro(related_cpus); +cpufreq_freq_attr_ro(affected_cpus); +cpufreq_freq_attr_rw(scaling_min_freq); +cpufreq_freq_attr_rw(scaling_max_freq); +cpufreq_freq_attr_rw(scaling_governor); +cpufreq_freq_attr_rw(scaling_setspeed); static struct attribute *default_attrs[] = { &cpuinfo_min_freq.attr, diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 599a40b25cb0..ce5248e04218 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -178,12 +178,8 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj, return sprintf(buf, "%u\n", min_sampling_rate); } -#define define_one_ro(_name) \ -static struct global_attr _name = \ -__ATTR(_name, 0444, show_##_name, NULL) - -define_one_ro(sampling_rate_max); -define_one_ro(sampling_rate_min); +define_one_global_ro(sampling_rate_max); +define_one_global_ro(sampling_rate_min); /* cpufreq_conservative Governor Tunables */ #define show_one(file_name, object) \ @@ -221,12 +217,8 @@ show_one_old(freq_step); show_one_old(sampling_rate_min); show_one_old(sampling_rate_max); -#define define_one_ro_old(object, _name) \ -static struct freq_attr object = \ -__ATTR(_name, 0444, show_##_name##_old, NULL) - -define_one_ro_old(sampling_rate_min_old, sampling_rate_min); -define_one_ro_old(sampling_rate_max_old, sampling_rate_max); +cpufreq_freq_attr_ro_old(sampling_rate_min); +cpufreq_freq_attr_ro_old(sampling_rate_max); /*** delete after deprecation time ***/ @@ -364,16 +356,12 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b, return count; } -#define define_one_rw(_name) \ -static struct global_attr _name = \ -__ATTR(_name, 0644, show_##_name, store_##_name) - -define_one_rw(sampling_rate); -define_one_rw(sampling_down_factor); -define_one_rw(up_threshold); -define_one_rw(down_threshold); -define_one_rw(ignore_nice_load); -define_one_rw(freq_step); +define_one_global_rw(sampling_rate); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(up_threshold); +define_one_global_rw(down_threshold); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(freq_step); static struct attribute *dbs_attributes[] = { &sampling_rate_max.attr, @@ -409,16 +397,12 @@ write_one_old(down_threshold); write_one_old(ignore_nice_load); write_one_old(freq_step); -#define define_one_rw_old(object, _name) \ -static struct freq_attr object = \ -__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old) - -define_one_rw_old(sampling_rate_old, sampling_rate); -define_one_rw_old(sampling_down_factor_old, sampling_down_factor); -define_one_rw_old(up_threshold_old, up_threshold); -define_one_rw_old(down_threshold_old, down_threshold); -define_one_rw_old(ignore_nice_load_old, ignore_nice_load); -define_one_rw_old(freq_step_old, freq_step); +cpufreq_freq_attr_rw_old(sampling_rate); +cpufreq_freq_attr_rw_old(sampling_down_factor); +cpufreq_freq_attr_rw_old(up_threshold); +cpufreq_freq_attr_rw_old(down_threshold); +cpufreq_freq_attr_rw_old(ignore_nice_load); +cpufreq_freq_attr_rw_old(freq_step); static struct attribute *dbs_attributes_old[] = { &sampling_rate_max_old.attr, diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index bd444dc93cf2..c00b25f4d243 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -234,12 +234,8 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj, return sprintf(buf, "%u\n", min_sampling_rate); } -#define define_one_ro(_name) \ -static struct global_attr _name = \ -__ATTR(_name, 0444, show_##_name, NULL) - -define_one_ro(sampling_rate_max); -define_one_ro(sampling_rate_min); +define_one_global_ro(sampling_rate_max); +define_one_global_ro(sampling_rate_min); /* cpufreq_ondemand Governor Tunables */ #define show_one(file_name, object) \ @@ -274,12 +270,8 @@ show_one_old(powersave_bias); show_one_old(sampling_rate_min); show_one_old(sampling_rate_max); -#define define_one_ro_old(object, _name) \ -static struct freq_attr object = \ -__ATTR(_name, 0444, show_##_name##_old, NULL) - -define_one_ro_old(sampling_rate_min_old, sampling_rate_min); -define_one_ro_old(sampling_rate_max_old, sampling_rate_max); +cpufreq_freq_attr_ro_old(sampling_rate_min); +cpufreq_freq_attr_ro_old(sampling_rate_max); /*** delete after deprecation time ***/ @@ -376,14 +368,10 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, return count; } -#define define_one_rw(_name) \ -static struct global_attr _name = \ -__ATTR(_name, 0644, show_##_name, store_##_name) - -define_one_rw(sampling_rate); -define_one_rw(up_threshold); -define_one_rw(ignore_nice_load); -define_one_rw(powersave_bias); +define_one_global_rw(sampling_rate); +define_one_global_rw(up_threshold); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(powersave_bias); static struct attribute *dbs_attributes[] = { &sampling_rate_max.attr, @@ -415,14 +403,10 @@ write_one_old(up_threshold); write_one_old(ignore_nice_load); write_one_old(powersave_bias); -#define define_one_rw_old(object, _name) \ -static struct freq_attr object = \ -__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old) - -define_one_rw_old(sampling_rate_old, sampling_rate); -define_one_rw_old(up_threshold_old, up_threshold); -define_one_rw_old(ignore_nice_load_old, ignore_nice_load); -define_one_rw_old(powersave_bias_old, powersave_bias); +cpufreq_freq_attr_rw_old(sampling_rate); +cpufreq_freq_attr_rw_old(up_threshold); +cpufreq_freq_attr_rw_old(ignore_nice_load); +cpufreq_freq_attr_rw_old(powersave_bias); static struct attribute *dbs_attributes_old[] = { &sampling_rate_max_old.attr, diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 4de02b10007f..9f15150ce8d6 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -278,6 +278,27 @@ struct freq_attr { ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); }; +#define cpufreq_freq_attr_ro(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define cpufreq_freq_attr_ro_perm(_name, _perm) \ +static struct freq_attr _name = \ +__ATTR(_name, _perm, show_##_name, NULL) + +#define cpufreq_freq_attr_ro_old(_name) \ +static struct freq_attr _name##_old = \ +__ATTR(_name, 0444, show_##_name##_old, NULL) + +#define cpufreq_freq_attr_rw(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +#define cpufreq_freq_attr_rw_old(_name) \ +static struct freq_attr _name##_old = \ +__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old) + + struct global_attr { struct attribute attr; ssize_t (*show)(struct kobject *kobj, @@ -286,6 +307,15 @@ struct global_attr { const char *c, size_t count); }; +#define define_one_global_ro(_name) \ +static struct global_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define define_one_global_rw(_name) \ +static struct global_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + + /********************************************************************* * CPUFREQ 2.6. INTERFACE * *********************************************************************/ -- cgit v1.2.3 From 1ce7e4ff24fe338438bc7837e02780f202bf202b Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 23 Apr 2010 10:35:52 +0800 Subject: cgroup: Check task_lock in task_subsys_state() Expand task_subsys_state()'s rcu_dereference_check() to include the full locking rule as documented in Documentation/cgroups/cgroups.txt by adding a check for task->alloc_lock being held. This fixes an RCU false positive when resuming from suspend. The warning comes from freezer cgroup in cgroup_freezing_or_frozen(). Signed-off-by: Li Zefan Acked-by: Matt Helsley Signed-off-by: Paul E. McKenney --- include/linux/cgroup.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b8ad1ea99586..8f78073d7caa 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -530,6 +530,7 @@ static inline struct cgroup_subsys_state *task_subsys_state( { return rcu_dereference_check(task->cgroups->subsys[subsys_id], rcu_read_lock_held() || + lockdep_is_held(&task->alloc_lock) || cgroup_lock_is_held()); } -- cgit v1.2.3 From 4677d4a53e0d565742277e8913e91c821453e63e Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 3 May 2010 14:57:11 +0200 Subject: arch, hweight: Fix compilation errors Fix function prototype visibility issues when compiling for non-x86 architectures. Tested with crosstool (ftp://ftp.kernel.org/pub/tools/crosstool/) with alpha, ia64 and sparc targets. Signed-off-by: Borislav Petkov LKML-Reference: <20100503130736.GD26107@aftab> Signed-off-by: H. Peter Anvin --- include/asm-generic/bitops/arch_hweight.h | 8 ++++---- include/linux/bitops.h | 5 +++++ 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h index 9a81c1e9436c..6a211f40665c 100644 --- a/include/asm-generic/bitops/arch_hweight.h +++ b/include/asm-generic/bitops/arch_hweight.h @@ -3,22 +3,22 @@ #include -inline unsigned int __arch_hweight32(unsigned int w) +static inline unsigned int __arch_hweight32(unsigned int w) { return __sw_hweight32(w); } -inline unsigned int __arch_hweight16(unsigned int w) +static inline unsigned int __arch_hweight16(unsigned int w) { return __sw_hweight16(w); } -inline unsigned int __arch_hweight8(unsigned int w) +static inline unsigned int __arch_hweight8(unsigned int w) { return __sw_hweight8(w); } -inline unsigned long __arch_hweight64(__u64 w) +static inline unsigned long __arch_hweight64(__u64 w) { return __sw_hweight64(w); } diff --git a/include/linux/bitops.h b/include/linux/bitops.h index c55d5bc4ee58..26caa608ccd9 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -10,6 +10,11 @@ #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) #endif +extern unsigned int __sw_hweight8(unsigned int w); +extern unsigned int __sw_hweight16(unsigned int w); +extern unsigned int __sw_hweight32(unsigned int w); +extern unsigned long __sw_hweight64(__u64 w); + /* * Include this here because some architectures need generic_ffs/fls in * scope -- cgit v1.2.3 From 2c2df8418ac7908eec4558407b83f16739006c54 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 30 Mar 2010 01:07:02 -0700 Subject: x86, acpi/irq: Introduce apci_isa_irq_to_gsi There are a number of cases where the current code makes the assumption that isa irqs identity map to the first 16 acpi global system intereupts. In most instances that assumption is correct as that is the required behaviour in dual i8259 mode and the default behavior in ioapic mode. However there are some systems out there that take advantage of acpis interrupt remapping for the isa irqs to have a completely different mapping of isa_irq to gsi. Introduce acpi_isa_irq_to_gsi to perform this mapping explicitly in the code that needs it. Initially this will be just the current assumed identity mapping to ensure it's introduction does not cause regressions. Signed-off-by: Eric W. Biederman LKML-Reference: <1269936436-7039-1-git-send-email-ebiederm@xmission.com> Signed-off-by: H. Peter Anvin --- arch/ia64/kernel/acpi.c | 8 ++++++++ arch/x86/kernel/acpi/boot.c | 8 ++++++++ include/linux/acpi.h | 1 + 3 files changed, 17 insertions(+) (limited to 'include') diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 4d1a7e9314cf..c6c90f39f4d9 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -785,6 +785,14 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) return 0; } +int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) +{ + if (isa_irq >= 16) + return -1; + *gsi = isa_irq; + return 0; +} + /* * ACPI based hotplug CPU support */ diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index cd40aba6aa95..da718d672596 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -458,6 +458,14 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) return 0; } +int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) +{ + if (isa_irq >= 16) + return -1; + *gsi = isa_irq; + return 0; +} + /* * success: return IRQ number (>=0) * failure: return < 0 diff --git a/include/linux/acpi.h b/include/linux/acpi.h index b926afe8c03e..7a937dabcc4a 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -116,6 +116,7 @@ extern unsigned long acpi_realmode_flags; int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); +int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); #ifdef CONFIG_X86_IO_APIC extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity); -- cgit v1.2.3 From 9a0a91bb56d2915cdb8585717de38376ad20fef9 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 30 Mar 2010 01:07:03 -0700 Subject: x86, acpi/irq: Teach acpi_get_override_irq to take a gsi not an isa_irq In perverse acpi implementations the isa irqs are not identity mapped to the first 16 gsi. Furthermore at least the extended interrupt resource capability may return gsi's and not isa irqs. So since what we get from acpi is a gsi teach acpi_get_overrride_irq to operate on a gsi instead of an isa_irq. Signed-off-by: Eric W. Biederman LKML-Reference: <1269936436-7039-2-git-send-email-ebiederm@xmission.com> Signed-off-by: H. Peter Anvin --- arch/x86/kernel/apic/io_apic.c | 23 ++++++++++++++--------- include/linux/acpi.h | 4 ++-- 2 files changed, 16 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 127b8718abfb..73ec92838d83 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -4082,22 +4082,27 @@ int __init io_apic_get_version(int ioapic) return reg_01.bits.version; } -int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) +int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) { - int i; + int ioapic, pin, idx; if (skip_ioapic_setup) return -1; - for (i = 0; i < mp_irq_entries; i++) - if (mp_irqs[i].irqtype == mp_INT && - mp_irqs[i].srcbusirq == bus_irq) - break; - if (i >= mp_irq_entries) + ioapic = mp_find_ioapic(gsi); + if (ioapic < 0) + return -1; + + pin = mp_find_ioapic_pin(ioapic, gsi); + if (pin < 0) + return -1; + + idx = find_irq_entry(ioapic, pin, mp_INT); + if (idx < 0) return -1; - *trigger = irq_trigger(i); - *polarity = irq_polarity(i); + *trigger = irq_trigger(idx); + *polarity = irq_polarity(idx); return 0; } diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7a937dabcc4a..3da73f5f0ae9 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -119,9 +119,9 @@ int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); #ifdef CONFIG_X86_IO_APIC -extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity); +extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); #else -#define acpi_get_override_irq(bus, trigger, polarity) (-1) +#define acpi_get_override_irq(gsi, trigger, polarity) (-1) #endif /* * This function undoes the effect of one call to acpi_register_gsi(). -- cgit v1.2.3 From 50b5d6ad63821cea324a5a7a19854d4de1a0a819 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Thu, 6 May 2010 00:56:07 -0700 Subject: sctp: Fix a race between ICMP protocol unreachable and connect() ICMP protocol unreachable handling completely disregarded the fact that the user may have locked the socket. It proceeded to destroy the association, even though the user may have held the lock and had a ref on the association. This resulted in the following: Attempt to release alive inet socket f6afcc00 ========================= [ BUG: held lock freed! ] ------------------------- somenu/2672 is freeing memory f6afcc00-f6afcfff, with a lock still held there! (sk_lock-AF_INET){+.+.+.}, at: [] sctp_connect+0x13/0x4c 1 lock held by somenu/2672: #0: (sk_lock-AF_INET){+.+.+.}, at: [] sctp_connect+0x13/0x4c stack backtrace: Pid: 2672, comm: somenu Not tainted 2.6.32-telco #55 Call Trace: [] ? printk+0xf/0x11 [] debug_check_no_locks_freed+0xce/0xff [] kmem_cache_free+0x21/0x66 [] __sk_free+0x9d/0xab [] sk_free+0x1c/0x1e [] sctp_association_put+0x32/0x89 [] __sctp_connect+0x36d/0x3f4 [] ? sctp_connect+0x13/0x4c [] ? autoremove_wake_function+0x0/0x33 [] sctp_connect+0x31/0x4c [] inet_dgram_connect+0x4b/0x55 [] sys_connect+0x54/0x71 [] ? lock_release_non_nested+0x88/0x239 [] ? might_fault+0x42/0x7c [] ? might_fault+0x42/0x7c [] sys_socketcall+0x6d/0x178 [] ? trace_hardirqs_on_thunk+0xc/0x10 [] syscall_call+0x7/0xb This was because the sctp_wait_for_connect() would aqcure the socket lock and then proceed to release the last reference count on the association, thus cause the fully destruction path to finish freeing the socket. The simplest solution is to start a very short timer in case the socket is owned by user. When the timer expires, we can do some verification and be able to do the release properly. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/net/sctp/sm.h | 1 + include/net/sctp/structs.h | 3 +++ net/sctp/input.c | 22 ++++++++++++++++++---- net/sctp/sm_sideeffect.c | 35 +++++++++++++++++++++++++++++++++++ net/sctp/transport.c | 2 ++ 5 files changed, 59 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 851c813adb3a..61d73e37d543 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -279,6 +279,7 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, /* 2nd level prototypes */ void sctp_generate_t3_rtx_event(unsigned long peer); void sctp_generate_heartbeat_event(unsigned long peer); +void sctp_generate_proto_unreach_event(unsigned long peer); void sctp_ootb_pkt_free(struct sctp_packet *); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 597f8e27aaf6..219043a67bf7 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1010,6 +1010,9 @@ struct sctp_transport { /* Heartbeat timer is per destination. */ struct timer_list hb_timer; + /* Timer to handle ICMP proto unreachable envets */ + struct timer_list proto_unreach_timer; + /* Since we're using per-destination retransmission timers * (see above), we're also using per-destination "transmitted" * queues. This probably ought to be a private struct diff --git a/net/sctp/input.c b/net/sctp/input.c index 2a570184e5a9..ea2192444ce6 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -440,11 +440,25 @@ void sctp_icmp_proto_unreachable(struct sock *sk, { SCTP_DEBUG_PRINTK("%s\n", __func__); - sctp_do_sm(SCTP_EVENT_T_OTHER, - SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), - asoc->state, asoc->ep, asoc, t, - GFP_ATOMIC); + if (sock_owned_by_user(sk)) { + if (timer_pending(&t->proto_unreach_timer)) + return; + else { + if (!mod_timer(&t->proto_unreach_timer, + jiffies + (HZ/20))) + sctp_association_hold(asoc); + } + + } else { + if (timer_pending(&t->proto_unreach_timer) && + del_timer(&t->proto_unreach_timer)) + sctp_association_put(asoc); + sctp_do_sm(SCTP_EVENT_T_OTHER, + SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), + asoc->state, asoc->ep, asoc, t, + GFP_ATOMIC); + } } /* Common lookup code for icmp/icmpv6 error handler. */ diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index d5ae450b6f02..eb1f42f45fdd 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -397,6 +397,41 @@ out_unlock: sctp_transport_put(transport); } +/* Handle the timeout of the ICMP protocol unreachable timer. Trigger + * the correct state machine transition that will close the association. + */ +void sctp_generate_proto_unreach_event(unsigned long data) +{ + struct sctp_transport *transport = (struct sctp_transport *) data; + struct sctp_association *asoc = transport->asoc; + + sctp_bh_lock_sock(asoc->base.sk); + if (sock_owned_by_user(asoc->base.sk)) { + SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__); + + /* Try again later. */ + if (!mod_timer(&transport->proto_unreach_timer, + jiffies + (HZ/20))) + sctp_association_hold(asoc); + goto out_unlock; + } + + /* Is this structure just waiting around for us to actually + * get destroyed? + */ + if (asoc->base.dead) + goto out_unlock; + + sctp_do_sm(SCTP_EVENT_T_OTHER, + SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), + asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); + +out_unlock: + sctp_bh_unlock_sock(asoc->base.sk); + sctp_association_put(asoc); +} + + /* Inject a SACK Timeout event into the state machine. */ static void sctp_generate_sack_event(unsigned long data) { diff --git a/net/sctp/transport.c b/net/sctp/transport.c index be4d63d5a5cc..4a368038d46f 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -108,6 +108,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, (unsigned long)peer); setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, (unsigned long)peer); + setup_timer(&peer->proto_unreach_timer, + sctp_generate_proto_unreach_event, (unsigned long)peer); /* Initialize the 64-bit random nonce sent with heartbeat. */ get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); -- cgit v1.2.3 From ee84b8243b07c33a5c8aed42b4b2da60cb16d1d2 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 6 May 2010 09:28:41 -0700 Subject: rcu: create rcu_my_thread_group_empty() wrapper Some RCU-lockdep splat repairs need to know whether they are running in a single-threaded process. Unfortunately, the thread_group_empty() primitive is defined in sched.h, and can induce #include hell. This commit therefore introduces a rcu_my_thread_group_empty() wrapper that is defined in rcupdate.c, thus avoiding the need to include sched.h everywhere. Signed-off-by: "Paul E. McKenney" --- include/linux/rcupdate.h | 2 ++ kernel/rcupdate.c | 11 +++++++++++ 2 files changed, 13 insertions(+) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 07db2feb8572..db266bbed23f 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -190,6 +190,8 @@ static inline int rcu_read_lock_sched_held(void) #ifdef CONFIG_PROVE_RCU +extern int rcu_my_thread_group_empty(void); + /** * rcu_dereference_check - rcu_dereference with debug checking * @p: The pointer to read, prior to dereferencing diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 03a7ea1579f6..49d808e833b0 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -122,3 +122,14 @@ void wakeme_after_rcu(struct rcu_head *head) rcu = container_of(head, struct rcu_synchronize, head); complete(&rcu->completion); } + +#ifdef CONFIG_PROVE_RCU +/* + * wrapper function to avoid #include problems. + */ +int rcu_my_thread_group_empty(void) +{ + return thread_group_empty(current); +} +EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty); +#endif /* #ifdef CONFIG_PROVE_RCU */ -- cgit v1.2.3 From 03b1930efd3c2320b1dcba76c8af15f7e454919d Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 24 Mar 2010 19:09:55 -0300 Subject: V4L/DVB: saa7146: fix regression of the av7110/budget-av driver An earlier regression fix for the mxb driver (V4L/DVB: saa7146_vv: fix regression where v4l2_device was registered too late) caused a new regression in the av7110 driver. Reverted the old fix and fixed the problem in the mxb driver instead. Tested on mxb and budget-av cards. The real problem is that the saa7146 framework has separate probe() and attach() driver callbacks which should be rolled into one. This is now done for the mxb driver, but others should do the same. Lack of hardware makes this hard to do, though. I hope to get hold of some hexium cards and then I can try to improve the framework to prevent this from happening again. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/common/saa7146_fops.c | 11 +++++------ drivers/media/video/hexium_gemini.c | 3 --- drivers/media/video/hexium_orion.c | 4 ---- drivers/media/video/mxb.c | 17 ++++++++--------- include/media/saa7146_vv.h | 1 - 5 files changed, 13 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c index fd8e1f45be36..7364b9642d00 100644 --- a/drivers/media/common/saa7146_fops.c +++ b/drivers/media/common/saa7146_fops.c @@ -423,15 +423,14 @@ static void vv_callback(struct saa7146_dev *dev, unsigned long status) } } -int saa7146_vv_devinit(struct saa7146_dev *dev) -{ - return v4l2_device_register(&dev->pci->dev, &dev->v4l2_dev); -} -EXPORT_SYMBOL_GPL(saa7146_vv_devinit); - int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv) { struct saa7146_vv *vv; + int err; + + err = v4l2_device_register(&dev->pci->dev, &dev->v4l2_dev); + if (err) + return err; vv = kzalloc(sizeof(struct saa7146_vv), GFP_KERNEL); if (vv == NULL) { diff --git a/drivers/media/video/hexium_gemini.c b/drivers/media/video/hexium_gemini.c index e620a3a92f25..ad2c232baa6d 100644 --- a/drivers/media/video/hexium_gemini.c +++ b/drivers/media/video/hexium_gemini.c @@ -356,9 +356,6 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d DEB_EE((".\n")); - ret = saa7146_vv_devinit(dev); - if (ret) - return ret; hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL); if (NULL == hexium) { printk("hexium_gemini: not enough kernel memory in hexium_attach().\n"); diff --git a/drivers/media/video/hexium_orion.c b/drivers/media/video/hexium_orion.c index fe596a1c12a8..938a1f8f880a 100644 --- a/drivers/media/video/hexium_orion.c +++ b/drivers/media/video/hexium_orion.c @@ -216,10 +216,6 @@ static int hexium_probe(struct saa7146_dev *dev) return -EFAULT; } - err = saa7146_vv_devinit(dev); - if (err) - return err; - hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL); if (NULL == hexium) { printk("hexium_orion: hexium_probe: not enough kernel memory.\n"); diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c index 9f01f14e4aa2..ef0c8178f255 100644 --- a/drivers/media/video/mxb.c +++ b/drivers/media/video/mxb.c @@ -169,11 +169,7 @@ static struct saa7146_extension extension; static int mxb_probe(struct saa7146_dev *dev) { struct mxb *mxb = NULL; - int err; - err = saa7146_vv_devinit(dev); - if (err) - return err; mxb = kzalloc(sizeof(struct mxb), GFP_KERNEL); if (mxb == NULL) { DEB_D(("not enough kernel memory.\n")); @@ -699,14 +695,17 @@ static struct saa7146_ext_vv vv_data; /* this function only gets called when the probing was successful */ static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info) { - struct mxb *mxb = (struct mxb *)dev->ext_priv; + struct mxb *mxb; DEB_EE(("dev:%p\n", dev)); - /* checking for i2c-devices can be omitted here, because we - already did this in "mxb_vl42_probe" */ - saa7146_vv_init(dev, &vv_data); + if (mxb_probe(dev)) { + saa7146_vv_release(dev); + return -1; + } + mxb = (struct mxb *)dev->ext_priv; + vv_data.ops.vidioc_queryctrl = vidioc_queryctrl; vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl; vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl; @@ -726,6 +725,7 @@ static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data vv_data.ops.vidioc_default = vidioc_default; if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) { ERR(("cannot register capture v4l2 device. skipping.\n")); + saa7146_vv_release(dev); return -1; } @@ -846,7 +846,6 @@ static struct saa7146_extension extension = { .pci_tbl = &pci_tbl[0], .module = THIS_MODULE, - .probe = mxb_probe, .attach = mxb_attach, .detach = mxb_detach, diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h index b9da1f5591e7..4aeff96ff7d8 100644 --- a/include/media/saa7146_vv.h +++ b/include/media/saa7146_vv.h @@ -188,7 +188,6 @@ void saa7146_buffer_timeout(unsigned long data); void saa7146_dma_free(struct saa7146_dev* dev,struct videobuf_queue *q, struct saa7146_buf *buf); -int saa7146_vv_devinit(struct saa7146_dev *dev); int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv); int saa7146_vv_release(struct saa7146_dev* dev); -- cgit v1.2.3 From 8cfe92d683a0041ac8e016a0b0a487c99a78f6c1 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 28 Apr 2010 11:33:25 +0200 Subject: drm/ttm: Remove the ttm_bo_block_reservation() function. It's unused and buggy in its current form, since it can place a bo in the reserved state without removing it from lru lists. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 30 +----------------------------- include/drm/ttm/ttm_bo_driver.h | 28 ---------------------------- 2 files changed, 1 insertion(+), 57 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index dd47b2a9a791..0e3754a3a303 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1716,40 +1716,12 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_wait); -void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo) -{ - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); -} - -int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible, - bool no_wait) -{ - int ret; - - while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { - if (no_wait) - return -EBUSY; - else if (interruptible) { - ret = wait_event_interruptible - (bo->event_queue, atomic_read(&bo->reserved) == 0); - if (unlikely(ret != 0)) - return ret; - } else { - wait_event(bo->event_queue, - atomic_read(&bo->reserved) == 0); - } - } - return 0; -} - int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) { int ret = 0; /* - * Using ttm_bo_reserve instead of ttm_bo_block_reservation - * makes sure the lru lists are updated. + * Using ttm_bo_reserve makes sure the lru lists are updated. */ ret = ttm_bo_reserve(bo, true, no_wait, false, 0); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e929c27ede22..6b9db917e717 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -789,34 +789,6 @@ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible); -/** - * ttm_bo_block_reservation - * - * @bo: A pointer to a struct ttm_buffer_object. - * @interruptible: Use interruptible sleep when waiting. - * @no_wait: Don't sleep, but rather return -EBUSY. - * - * Block reservation for validation by simply reserving the buffer. - * This is intended for single buffer use only without eviction, - * and thus needs no deadlock protection. - * - * Returns: - * -EBUSY: If no_wait == 1 and the buffer is already reserved. - * -ERESTARTSYS: If interruptible == 1 and the process received a signal - * while sleeping. - */ -extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait); - -/** - * ttm_bo_unblock_reservation - * - * @bo: A pointer to a struct ttm_buffer_object. - * - * Unblocks reservation leaving lru lists untouched. - */ -extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo); - /* * ttm_bo_util.c */ -- cgit v1.2.3 From 2b3fc35f6919344e3cf722dde8308f47235c0b70 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 20 Apr 2010 16:23:07 +0800 Subject: rcu: optionally leave lockdep enabled after RCU lockdep splat There is no need to disable lockdep after an RCU lockdep splat, so remove the debug_lockdeps_off() from lockdep_rcu_dereference(). To avoid repeated lockdep splats, use a static variable in the inlined rcu_dereference_check() and rcu_dereference_protected() macros so that a given instance splats only once, but so that multiple instances can be detected per boot. This is controlled by a new config variable CONFIG_PROVE_RCU_REPEATEDLY, which is disabled by default. This provides the normal lockdep behavior by default, but permits people who want to find multiple RCU-lockdep splats per boot to easily do so. Requested-by: Eric Paris Signed-off-by: Lai Jiangshan Tested-by: Eric Paris Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 15 +++++++++++---- kernel/lockdep.c | 3 +++ lib/Kconfig.debug | 12 ++++++++++++ 3 files changed, 26 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index db266bbed23f..4dca2752cfde 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -192,6 +192,15 @@ static inline int rcu_read_lock_sched_held(void) extern int rcu_my_thread_group_empty(void); +#define __do_rcu_dereference_check(c) \ + do { \ + static bool __warned; \ + if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ + __warned = true; \ + lockdep_rcu_dereference(__FILE__, __LINE__); \ + } \ + } while (0) + /** * rcu_dereference_check - rcu_dereference with debug checking * @p: The pointer to read, prior to dereferencing @@ -221,8 +230,7 @@ extern int rcu_my_thread_group_empty(void); */ #define rcu_dereference_check(p, c) \ ({ \ - if (debug_lockdep_rcu_enabled() && !(c)) \ - lockdep_rcu_dereference(__FILE__, __LINE__); \ + __do_rcu_dereference_check(c); \ rcu_dereference_raw(p); \ }) @@ -239,8 +247,7 @@ extern int rcu_my_thread_group_empty(void); */ #define rcu_dereference_protected(p, c) \ ({ \ - if (debug_lockdep_rcu_enabled() && !(c)) \ - lockdep_rcu_dereference(__FILE__, __LINE__); \ + __do_rcu_dereference_check(c); \ (p); \ }) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 2594e1ce41cb..3a756ba8d5d8 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -3801,8 +3801,11 @@ void lockdep_rcu_dereference(const char *file, const int line) { struct task_struct *curr = current; +#ifndef CONFIG_PROVE_RCU_REPEATEDLY if (!debug_locks_off()) return; +#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */ + /* Note: the following can be executed concurrently, so be careful. */ printk("\n===================================================\n"); printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n"); printk( "---------------------------------------------------\n"); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 935248bdbc47..94090b4bb7d2 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -512,6 +512,18 @@ config PROVE_RCU Say N if you are unsure. +config PROVE_RCU_REPEATEDLY + bool "RCU debugging: don't disable PROVE_RCU on first splat" + depends on PROVE_RCU + default n + help + By itself, PROVE_RCU will disable checking upon issuing the + first warning (or "splat"). This feature prevents such + disabling, allowing multiple RCU-lockdep warnings to be printed + on a single reboot. + + Say N if you are unsure. + config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT -- cgit v1.2.3 From d20200b591f59847ab6a5c23507084a7d29e23c5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 30 Mar 2010 10:52:21 -0700 Subject: rcu: Fix bogus CONFIG_PROVE_LOCKING in comments to reflect reality. It is CONFIG_DEBUG_LOCK_ALLOC rather than CONFIG_PROVE_LOCKING, so fix it. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 15 ++++++++------- include/linux/srcu.h | 4 ++-- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 4dca2752cfde..a150af0e5cd5 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -106,8 +106,8 @@ extern int debug_lockdep_rcu_enabled(void); /** * rcu_read_lock_held - might we be in RCU read-side critical section? * - * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in - * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU + * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * this assumes we are in an RCU read-side critical section unless it can * prove otherwise. * @@ -129,11 +129,12 @@ extern int rcu_read_lock_bh_held(void); /** * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? * - * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an - * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING, - * this assumes we are in an RCU-sched read-side critical section unless it - * can prove otherwise. Note that disabling of preemption (including - * disabling irqs) counts as an RCU-sched read-side critical section. + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an + * RCU-sched read-side critical section. In absence of + * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side + * critical section unless it can prove otherwise. Note that disabling + * of preemption (including disabling irqs) counts as an RCU-sched + * read-side critical section. * * Check rcu_scheduler_active to prevent false positives during boot. */ diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 4d5ecb222af9..9c01f1022428 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -84,8 +84,8 @@ long srcu_batches_completed(struct srcu_struct *sp); /** * srcu_read_lock_held - might we be in SRCU read-side critical section? * - * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in - * an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU + * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * this assumes we are in an SRCU read-side critical section unless it can * prove otherwise. */ -- cgit v1.2.3 From 32c141a0a1dfa29e0a07d78bec0c0919fc4b9f88 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 30 Mar 2010 10:59:28 -0700 Subject: rcu: fix now-bogus rcu_scheduler_active comments. The rcu_scheduler_active check has been wrapped into the new debug_lockdep_rcu_enabled() function, so update the comments to reflect this new reality. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index a150af0e5cd5..02537a72aaa4 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -111,7 +111,8 @@ extern int debug_lockdep_rcu_enabled(void); * this assumes we are in an RCU read-side critical section unless it can * prove otherwise. * - * Check rcu_scheduler_active to prevent false positives during boot. + * Check debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. */ static inline int rcu_read_lock_held(void) { @@ -136,7 +137,8 @@ extern int rcu_read_lock_bh_held(void); * of preemption (including disabling irqs) counts as an RCU-sched * read-side critical section. * - * Check rcu_scheduler_active to prevent false positives during boot. + * Check debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. */ #ifdef CONFIG_PREEMPT static inline int rcu_read_lock_sched_held(void) -- cgit v1.2.3 From da848c47bc6e873a54a445ea1960423a495b6b32 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 30 Mar 2010 15:46:01 -0700 Subject: rcu: shrink rcutiny by making synchronize_rcu_bh() be inline Because synchronize_rcu_bh() is identical to synchronize_sched(), make the former a static inline invoking the latter, saving the overhead of an EXPORT_SYMBOL_GPL() and the duplicate code. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 2 -- include/linux/rcutiny.h | 12 +++++++++++- include/linux/rcutree.h | 2 ++ kernel/rcutiny.c | 9 ++------- 4 files changed, 15 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 02537a72aaa4..d8fb2abcf303 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -56,8 +56,6 @@ struct rcu_head { }; /* Exported common interfaces */ -extern void synchronize_rcu_bh(void); -extern void synchronize_sched(void); extern void rcu_barrier(void); extern void rcu_barrier_bh(void); extern void rcu_barrier_sched(void); diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index a5195875480a..bbeb55b7709b 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -74,7 +74,17 @@ static inline void rcu_sched_force_quiescent_state(void) { } -#define synchronize_rcu synchronize_sched +extern void synchronize_sched(void); + +static inline void synchronize_rcu(void) +{ + synchronize_sched(); +} + +static inline void synchronize_rcu_bh(void) +{ + synchronize_sched(); +} static inline void synchronize_rcu_expedited(void) { diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 42cc3a04779e..7484fe66a3aa 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -86,6 +86,8 @@ static inline void __rcu_read_unlock_bh(void) extern void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); +extern void synchronize_rcu_bh(void); +extern void synchronize_sched(void); extern void synchronize_rcu_expedited(void); static inline void synchronize_rcu_bh_expedited(void) diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 9f6d9ff2572c..272c6d21a75f 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -187,7 +187,8 @@ static void rcu_process_callbacks(struct softirq_action *unused) * * Cool, huh? (Due to Josh Triplett.) * - * But we want to make this a static inline later. + * But we want to make this a static inline later. The cond_resched() + * currently makes this problematic. */ void synchronize_sched(void) { @@ -195,12 +196,6 @@ void synchronize_sched(void) } EXPORT_SYMBOL_GPL(synchronize_sched); -void synchronize_rcu_bh(void) -{ - synchronize_sched(); -} -EXPORT_SYMBOL_GPL(synchronize_rcu_bh); - /* * Helper function for call_rcu() and call_rcu_bh(). */ -- cgit v1.2.3 From 25502a6c13745f4650cc59322bd198194f55e796 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 1 Apr 2010 17:37:01 -0700 Subject: rcu: refactor RCU's context-switch handling The addition of preemptible RCU to treercu resulted in a bit of confusion and inefficiency surrounding the handling of context switches for RCU-sched and for RCU-preempt. For RCU-sched, a context switch is a quiescent state, pure and simple, just like it always has been. For RCU-preempt, a context switch is in no way a quiescent state, but special handling is required when a task blocks in an RCU read-side critical section. However, the callout from the scheduler and the outer loop in ksoftirqd still calls something named rcu_sched_qs(), whose name is no longer accurate. Furthermore, when rcu_check_callbacks() notes an RCU-sched quiescent state, it ends up unnecessarily (though harmlessly, aside from the performance hit) enqueuing the current task if it happens to be running in an RCU-preempt read-side critical section. This not only increases the maximum latency of scheduler_tick(), it also needlessly increases the overhead of the next outermost rcu_read_unlock() invocation. This patch addresses this situation by separating the notion of RCU's context-switch handling from that of RCU-sched's quiescent states. The context-switch handling is covered by rcu_note_context_switch() in general and by rcu_preempt_note_context_switch() for preemptible RCU. This permits rcu_sched_qs() to handle quiescent states and only quiescent states. It also reduces the maximum latency of scheduler_tick(), though probably by much less than a microsecond. Finally, it means that tasks within preemptible-RCU read-side critical sections avoid incurring the overhead of queuing unless there really is a context switch. Suggested-by: Lai Jiangshan Acked-by: Lai Jiangshan Signed-off-by: Paul E. McKenney Cc: Ingo Molnar Cc: Peter Zijlstra --- include/linux/rcutiny.h | 4 ++++ include/linux/rcutree.h | 1 + kernel/rcutree.c | 17 ++++++++++++----- kernel/rcutree_plugin.h | 11 +++++++---- kernel/sched.c | 2 +- kernel/softirq.c | 2 +- 6 files changed, 26 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index bbeb55b7709b..ff22b97fb979 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -29,6 +29,10 @@ void rcu_sched_qs(int cpu); void rcu_bh_qs(int cpu); +static inline void rcu_note_context_switch(int cpu) +{ + rcu_sched_qs(cpu); +} #define __rcu_read_lock() preempt_disable() #define __rcu_read_unlock() preempt_enable() diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 7484fe66a3aa..b9f74606f320 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -34,6 +34,7 @@ struct notifier_block; extern void rcu_sched_qs(int cpu); extern void rcu_bh_qs(int cpu); +extern void rcu_note_context_switch(int cpu); extern int rcu_needs_cpu(int cpu); extern int rcu_expedited_torture_stats(char *page); diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 86bb9499aae6..e33631354b69 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -97,25 +97,32 @@ static int rcu_gp_in_progress(struct rcu_state *rsp) */ void rcu_sched_qs(int cpu) { - struct rcu_data *rdp; + struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); - rdp = &per_cpu(rcu_sched_data, cpu); rdp->passed_quiesc_completed = rdp->gpnum - 1; barrier(); rdp->passed_quiesc = 1; - rcu_preempt_note_context_switch(cpu); } void rcu_bh_qs(int cpu) { - struct rcu_data *rdp; + struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); - rdp = &per_cpu(rcu_bh_data, cpu); rdp->passed_quiesc_completed = rdp->gpnum - 1; barrier(); rdp->passed_quiesc = 1; } +/* + * Note a context switch. This is a quiescent state for RCU-sched, + * and requires special handling for preemptible RCU. + */ +void rcu_note_context_switch(int cpu) +{ + rcu_sched_qs(cpu); + rcu_preempt_note_context_switch(cpu); +} + #ifdef CONFIG_NO_HZ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { .dynticks_nesting = 1, diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 687c4e90722e..f9bc83a047da 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -75,13 +75,19 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); * that this just means that the task currently running on the CPU is * not in a quiescent state. There might be any number of tasks blocked * while in an RCU read-side critical section. + * + * Unlike the other rcu_*_qs() functions, callers to this function + * must disable irqs in order to protect the assignment to + * ->rcu_read_unlock_special. */ static void rcu_preempt_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); + rdp->passed_quiesc_completed = rdp->gpnum - 1; barrier(); rdp->passed_quiesc = 1; + current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; } /* @@ -144,9 +150,8 @@ static void rcu_preempt_note_context_switch(int cpu) * grace period, then the fact that the task has been enqueued * means that we continue to block the current grace period. */ - rcu_preempt_qs(cpu); local_irq_save(flags); - t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; + rcu_preempt_qs(cpu); local_irq_restore(flags); } @@ -236,7 +241,6 @@ static void rcu_read_unlock_special(struct task_struct *t) */ special = t->rcu_read_unlock_special; if (special & RCU_READ_UNLOCK_NEED_QS) { - t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; rcu_preempt_qs(smp_processor_id()); } @@ -473,7 +477,6 @@ static void rcu_preempt_check_callbacks(int cpu) struct task_struct *t = current; if (t->rcu_read_lock_nesting == 0) { - t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; rcu_preempt_qs(cpu); return; } diff --git a/kernel/sched.c b/kernel/sched.c index 3c2a54f70ffe..d8a213ccdc3b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3706,7 +3706,7 @@ need_resched: preempt_disable(); cpu = smp_processor_id(); rq = cpu_rq(cpu); - rcu_sched_qs(cpu); + rcu_note_context_switch(cpu); prev = rq->curr; switch_count = &prev->nivcsw; diff --git a/kernel/softirq.c b/kernel/softirq.c index 7c1a67ef0274..0db913a5c60f 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -716,7 +716,7 @@ static int run_ksoftirqd(void * __bind_cpu) preempt_enable_no_resched(); cond_resched(); preempt_disable(); - rcu_sched_qs((long)__bind_cpu); + rcu_note_context_switch((long)__bind_cpu); } preempt_enable(); set_current_state(TASK_INTERRUPTIBLE); -- cgit v1.2.3 From bbad937983147c017c25406860287cb94da9af7c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 2 Apr 2010 16:17:17 -0700 Subject: rcu: slim down rcutiny by removing rcu_scheduler_active and friends TINY_RCU does not need rcu_scheduler_active unless CONFIG_DEBUG_LOCK_ALLOC. So conditionally compile rcu_scheduler_active in order to slim down rcutiny a bit more. Also gets rid of an EXPORT_SYMBOL_GPL, which is responsible for most of the slimming. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 4 +--- include/linux/rcutiny.h | 13 +++++++++++++ include/linux/rcutree.h | 3 +++ kernel/rcupdate.c | 19 ------------------- kernel/rcutiny.c | 7 +++++++ kernel/rcutiny_plugin.h | 39 +++++++++++++++++++++++++++++++++++++++ kernel/rcutree.c | 19 +++++++++++++++++++ 7 files changed, 82 insertions(+), 22 deletions(-) create mode 100644 kernel/rcutiny_plugin.h (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index d8fb2abcf303..23be3a702516 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -64,8 +64,6 @@ extern int sched_expedited_torture_stats(char *page); /* Internal to kernel */ extern void rcu_init(void); -extern int rcu_scheduler_active; -extern void rcu_scheduler_starting(void); #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) #include @@ -178,7 +176,7 @@ static inline int rcu_read_lock_bh_held(void) #ifdef CONFIG_PREEMPT static inline int rcu_read_lock_sched_held(void) { - return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); + return preempt_count() != 0 || irqs_disabled(); } #else /* #ifdef CONFIG_PREEMPT */ static inline int rcu_read_lock_sched_held(void) diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index ff22b97fb979..14e5a76b2c06 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -128,4 +128,17 @@ static inline int rcu_preempt_depth(void) return 0; } +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +extern int rcu_scheduler_active __read_mostly; +extern void rcu_scheduler_starting(void); + +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +static inline void rcu_scheduler_starting(void) +{ +} + +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + #endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index b9f74606f320..48282055e83d 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -123,4 +123,7 @@ static inline int rcu_blocking_is_gp(void) return num_online_cpus() == 1; } +extern void rcu_scheduler_starting(void); +extern int rcu_scheduler_active __read_mostly; + #endif /* __LINUX_RCUTREE_H */ diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 49d808e833b0..72a8dc9567f5 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -44,7 +44,6 @@ #include #include #include -#include #include #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -64,9 +63,6 @@ struct lockdep_map rcu_sched_lock_map = EXPORT_SYMBOL_GPL(rcu_sched_lock_map); #endif -int rcu_scheduler_active __read_mostly; -EXPORT_SYMBOL_GPL(rcu_scheduler_active); - #ifdef CONFIG_DEBUG_LOCK_ALLOC int debug_lockdep_rcu_enabled(void) @@ -96,21 +92,6 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -/* - * This function is invoked towards the end of the scheduler's initialization - * process. Before this is called, the idle task might contain - * RCU read-side critical sections (during which time, this idle - * task is booting the system). After this function is called, the - * idle tasks are prohibited from containing RCU read-side critical - * sections. - */ -void rcu_scheduler_starting(void) -{ - WARN_ON(num_online_cpus() != 1); - WARN_ON(nr_context_switches() > 0); - rcu_scheduler_active = 1; -} - /* * Awaken the corresponding synchronize_rcu() instance now that a * grace period has elapsed. diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index d9f8a623c9fa..b1804ff83d5e 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -54,6 +54,11 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk = { .curtail = &rcu_bh_ctrlblk.rcucblist, }; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +int rcu_scheduler_active __read_mostly; +EXPORT_SYMBOL_GPL(rcu_scheduler_active); +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + #ifdef CONFIG_NO_HZ static long rcu_dynticks_nesting = 1; @@ -276,3 +281,5 @@ void __init rcu_init(void) { open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); } + +#include "rcutiny_plugin.h" diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h new file mode 100644 index 000000000000..d223a92bc742 --- /dev/null +++ b/kernel/rcutiny_plugin.h @@ -0,0 +1,39 @@ +/* + * Read-Copy Update mechanism for mutual exclusion (tree-based version) + * Internal non-public definitions that provide either classic + * or preemptable semantics. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright IBM Corporation, 2009 + * + * Author: Paul E. McKenney + */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +#include + +/* + * During boot, we forgive RCU lockdep issues. After this function is + * invoked, we start taking RCU lockdep issues seriously. + */ +void rcu_scheduler_starting(void) +{ + WARN_ON(nr_context_switches() > 0); + rcu_scheduler_active = 1; +} + +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ diff --git a/kernel/rcutree.c b/kernel/rcutree.c index e33631354b69..3623f8e10220 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -46,6 +46,7 @@ #include #include #include +#include #include "rcutree.h" @@ -80,6 +81,9 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); +int rcu_scheduler_active __read_mostly; +EXPORT_SYMBOL_GPL(rcu_scheduler_active); + /* * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s * permit this function to be invoked without holding the root rcu_node @@ -1783,6 +1787,21 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } +/* + * This function is invoked towards the end of the scheduler's initialization + * process. Before this is called, the idle task might contain + * RCU read-side critical sections (during which time, this idle + * task is booting the system). After this function is called, the + * idle tasks are prohibited from containing RCU read-side critical + * sections. This function also enables RCU lockdep checking. + */ +void rcu_scheduler_starting(void) +{ + WARN_ON(num_online_cpus() != 1); + WARN_ON(nr_context_switches() > 0); + rcu_scheduler_active = 1; +} + /* * Compute the per-level fanout, either using the exact fanout specified * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. -- cgit v1.2.3 From d14aada8e20bdf81ffd43f433b123972cf575b32 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 19 Apr 2010 22:24:22 -0700 Subject: rcu: make SRCU usable in modules Add a #include for mutex.h to allow SRCU to be more easily used in kernel modules. Signed-off-by: Lai Jiangshan Signed-off-by: Paul E. McKenney --- include/linux/srcu.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 9c01f1022428..4d5d2f546dbf 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -27,6 +27,8 @@ #ifndef _LINUX_SRCU_H #define _LINUX_SRCU_H +#include + struct srcu_struct_array { int c[2]; }; -- cgit v1.2.3 From a5d8e467f83f6672104f276223a88e3b50cbd375 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Sat, 17 Apr 2010 08:48:38 -0400 Subject: Debugobjects transition check Implement a basic state machine checker in the debugobjects. This state machine checker detects races and inconsistencies within the "active" life of a debugobject. The checker only keeps track of the current state; all the state machine logic is kept at the object instance level. The checker works by adding a supplementary "unsigned int astate" field to the debug_obj structure. It keeps track of the current "active state" of the object. The only constraints that are imposed on the states by the debugobjects system is that: - activation of an object sets the current active state to 0, - deactivation of an object expects the current active state to be 0. For the rest of the states, the state mapping is determined by the specific object instance. Therefore, the logic keeping track of the state machine is within the specialized instance, without any need to know about it at the debugobject level. The current object active state is changed by calling: debug_object_active_state(addr, descr, expect, next) where "expect" is the expected state and "next" is the next state to move to if the expected state is found. A warning is generated if the expected is not found. Signed-off-by: Mathieu Desnoyers Reviewed-by: Thomas Gleixner Acked-by: David S. Miller CC: "Paul E. McKenney" CC: akpm@linux-foundation.org CC: mingo@elte.hu CC: laijs@cn.fujitsu.com CC: dipankar@in.ibm.com CC: josh@joshtriplett.org CC: dvhltc@us.ibm.com CC: niv@us.ibm.com CC: peterz@infradead.org CC: rostedt@goodmis.org CC: Valdis.Kletnieks@vt.edu CC: dhowells@redhat.com CC: eric.dumazet@gmail.com CC: Alexey Dobriyan Signed-off-by: Paul E. McKenney --- include/linux/debugobjects.h | 11 +++++++++ lib/debugobjects.c | 59 +++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 67 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 8c243aaa86a7..597692f1fc8d 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -20,12 +20,14 @@ struct debug_obj_descr; * struct debug_obj - representaion of an tracked object * @node: hlist node to link the object into the tracker list * @state: tracked object state + * @astate: current active state * @object: pointer to the real object * @descr: pointer to an object type specific debug description structure */ struct debug_obj { struct hlist_node node; enum debug_obj_state state; + unsigned int astate; void *object; struct debug_obj_descr *descr; }; @@ -60,6 +62,15 @@ extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); extern void debug_object_free (void *addr, struct debug_obj_descr *descr); +/* + * Active state: + * - Set at 0 upon initialization. + * - Must return to 0 before deactivation. + */ +extern void +debug_object_active_state(void *addr, struct debug_obj_descr *descr, + unsigned int expect, unsigned int next); + extern void debug_objects_early_init(void); extern void debug_objects_mem_init(void); #else diff --git a/lib/debugobjects.c b/lib/debugobjects.c index b862b30369ff..076464fd2072 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -141,6 +141,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) obj->object = addr; obj->descr = descr; obj->state = ODEBUG_STATE_NONE; + obj->astate = 0; hlist_del(&obj->node); hlist_add_head(&obj->node, &b->list); @@ -252,8 +253,10 @@ static void debug_print_object(struct debug_obj *obj, char *msg) if (limit < 5 && obj->descr != descr_test) { limit++; - WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, - obj_states[obj->state], obj->descr->name); + WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " + "object type: %s\n", + msg, obj_states[obj->state], obj->astate, + obj->descr->name); } debug_objects_warnings++; } @@ -447,7 +450,10 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: case ODEBUG_STATE_ACTIVE: - obj->state = ODEBUG_STATE_INACTIVE; + if (!obj->astate) + obj->state = ODEBUG_STATE_INACTIVE; + else + debug_print_object(obj, "deactivate"); break; case ODEBUG_STATE_DESTROYED: @@ -553,6 +559,53 @@ out_unlock: raw_spin_unlock_irqrestore(&db->lock, flags); } +/** + * debug_object_active_state - debug checks object usage state machine + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + * @expect: expected state + * @next: state to move to if expected state is found + */ +void +debug_object_active_state(void *addr, struct debug_obj_descr *descr, + unsigned int expect, unsigned int next) +{ + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + raw_spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (obj) { + switch (obj->state) { + case ODEBUG_STATE_ACTIVE: + if (obj->astate == expect) + obj->astate = next; + else + debug_print_object(obj, "active_state"); + break; + + default: + debug_print_object(obj, "active_state"); + break; + } + } else { + struct debug_obj o = { .object = addr, + .state = ODEBUG_STATE_NOTAVAILABLE, + .descr = descr }; + + debug_print_object(&o, "active_state"); + } + + raw_spin_unlock_irqrestore(&db->lock, flags); +} + #ifdef CONFIG_DEBUG_OBJECTS_FREE static void __debug_check_no_obj_freed(const void *address, unsigned long size) { -- cgit v1.2.3 From 4376030a54860dedab9d848dfa7cc700a6025c0b Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Sat, 17 Apr 2010 08:48:39 -0400 Subject: rcu head introduce rcu head init on stack PEM: o Would it be possible to make this bisectable as follows? a. Insert a new patch after current patch 4/6 that defines destroy_rcu_head_on_stack(), init_rcu_head_on_stack(), and init_rcu_head() with their !CONFIG_DEBUG_OBJECTS_RCU_HEAD definitions. This patch performs this transition. Signed-off-by: Mathieu Desnoyers CC: "Paul E. McKenney" CC: David S. Miller CC: akpm@linux-foundation.org CC: mingo@elte.hu CC: laijs@cn.fujitsu.com CC: dipankar@in.ibm.com CC: josh@joshtriplett.org CC: dvhltc@us.ibm.com CC: niv@us.ibm.com CC: tglx@linutronix.de CC: peterz@infradead.org CC: rostedt@goodmis.org CC: Valdis.Kletnieks@vt.edu CC: dhowells@redhat.com CC: eric.dumazet@gmail.com CC: Alexey Dobriyan Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 23be3a702516..b653b4aaa8a6 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -79,6 +79,14 @@ extern void rcu_init(void); (ptr)->next = NULL; (ptr)->func = NULL; \ } while (0) +static inline void init_rcu_head_on_stack(struct rcu_head *head) +{ +} + +static inline void destroy_rcu_head_on_stack(struct rcu_head *head) +{ +} + #ifdef CONFIG_DEBUG_LOCK_ALLOC extern struct lockdep_map rcu_lock_map; -- cgit v1.2.3 From 72d5a9f7a9542f88397558c65bcfc3b115a65e34 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 10 May 2010 17:12:17 -0700 Subject: rcu: remove all rcu head initializations, except on_stack initializations Remove all rcu head inits. We don't care about the RCU head state before passing it to call_rcu() anyway. Only leave the "on_stack" variants so debugobjects can keep track of objects on stack. Signed-off-by: Mathieu Desnoyers Signed-off-by: Paul E. McKenney --- include/linux/init_task.h | 1 - kernel/rcutiny.c | 6 ++++++ kernel/rcutorture.c | 2 ++ kernel/rcutree.c | 4 ++++ kernel/rcutree_plugin.h | 2 ++ 5 files changed, 14 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/init_task.h b/include/linux/init_task.h index b1ed1cd8e2a8..7996fc2c9ba9 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -49,7 +49,6 @@ extern struct group_info init_groups; { .first = &init_task.pids[PIDTYPE_PGID].node }, \ { .first = &init_task.pids[PIDTYPE_SID].node }, \ }, \ - .rcu = RCU_HEAD_INIT, \ .level = 0, \ .numbers = { { \ .nr = 0, \ diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index b1804ff83d5e..38729d3cd236 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -245,11 +245,13 @@ void rcu_barrier(void) { struct rcu_synchronize rcu; + init_rcu_head_on_stack(&rcu.head); init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); + destroy_rcu_head_on_stack(&rcu.head); } EXPORT_SYMBOL_GPL(rcu_barrier); @@ -257,11 +259,13 @@ void rcu_barrier_bh(void) { struct rcu_synchronize rcu; + init_rcu_head_on_stack(&rcu.head); init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu_bh(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); + destroy_rcu_head_on_stack(&rcu.head); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); @@ -269,11 +273,13 @@ void rcu_barrier_sched(void) { struct rcu_synchronize rcu; + init_rcu_head_on_stack(&rcu.head); init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu_sched(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); + destroy_rcu_head_on_stack(&rcu.head); } EXPORT_SYMBOL_GPL(rcu_barrier_sched); diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 58df55bf83ed..077defb34571 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -464,9 +464,11 @@ static void rcu_bh_torture_synchronize(void) { struct rcu_bh_torture_synchronize rcu; + init_rcu_head_on_stack(&rcu.head); init_completion(&rcu.completion); call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb); wait_for_completion(&rcu.completion); + destroy_rcu_head_on_stack(&rcu.head); } static struct rcu_torture_ops rcu_bh_ops = { diff --git a/kernel/rcutree.c b/kernel/rcutree.c index ba6996943e28..d4437345706f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -1484,11 +1484,13 @@ void synchronize_sched(void) if (rcu_blocking_is_gp()) return; + init_rcu_head_on_stack(&rcu.head); init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu_sched(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); + destroy_rcu_head_on_stack(&rcu.head); } EXPORT_SYMBOL_GPL(synchronize_sched); @@ -1508,11 +1510,13 @@ void synchronize_rcu_bh(void) if (rcu_blocking_is_gp()) return; + init_rcu_head_on_stack(&rcu.head); init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu_bh(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); + destroy_rcu_head_on_stack(&rcu.head); } EXPORT_SYMBOL_GPL(synchronize_rcu_bh); diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index ac7d80fa895c..0e4f420245d9 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -557,11 +557,13 @@ void synchronize_rcu(void) if (!rcu_scheduler_active) return; + init_rcu_head_on_stack(&rcu.head); init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); + destroy_rcu_head_on_stack(&rcu.head); } EXPORT_SYMBOL_GPL(synchronize_rcu); -- cgit v1.2.3 From f33d7e2d2d113a63772bbc993cdec3b5327f0ef1 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Tue, 11 May 2010 14:06:43 -0700 Subject: dma-mapping: fix dma_sync_single_range_* dma_sync_single_range_for_cpu() and dma_sync_single_range_for_device() use a wrong address with a partial synchronization. Signed-off-by: FUJITA Tomonori Reviewed-by: Konrad Rzeszutek Wilk Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/dma-mapping-common.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index e694263445f7..69206957b72c 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -131,7 +131,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); } else - dma_sync_single_for_cpu(dev, addr, size, dir); + dma_sync_single_for_cpu(dev, addr + offset, size, dir); } static inline void dma_sync_single_range_for_device(struct device *dev, @@ -148,7 +148,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); } else - dma_sync_single_for_device(dev, addr, size, dir); + dma_sync_single_for_device(dev, addr + offset, size, dir); } static inline void -- cgit v1.2.3 From 34441427aab4bdb3069a4ffcda69a99357abcb2e Mon Sep 17 00:00:00 2001 From: Robin Holt Date: Tue, 11 May 2010 14:06:46 -0700 Subject: revert "procfs: provide stack information for threads" and its fixup commits Originally, commit d899bf7b ("procfs: provide stack information for threads") attempted to introduce a new feature for showing where the threadstack was located and how many pages are being utilized by the stack. Commit c44972f1 ("procfs: disable per-task stack usage on NOMMU") was applied to fix the NO_MMU case. Commit 89240ba0 ("x86, fs: Fix x86 procfs stack information for threads on 64-bit") was applied to fix a bug in ia32 executables being loaded. Commit 9ebd4eba7 ("procfs: fix /proc//stat stack pointer for kernel threads") was applied to fix a bug which had kernel threads printing a userland stack address. Commit 1306d603f ('proc: partially revert "procfs: provide stack information for threads"') was then applied to revert the stack pages being used to solve a significant performance regression. This patch nearly undoes the effect of all these patches. The reason for reverting these is it provides an unusable value in field 28. For x86_64, a fork will result in the task->stack_start value being updated to the current user top of stack and not the stack start address. This unpredictability of the stack_start value makes it worthless. That includes the intended use of showing how much stack space a thread has. Other architectures will get different values. As an example, ia64 gets 0. The do_fork() and copy_process() functions appear to treat the stack_start and stack_size parameters as architecture specific. I only partially reverted c44972f1 ("procfs: disable per-task stack usage on NOMMU") . If I had completely reverted it, I would have had to change mm/Makefile only build pagewalk.o when CONFIG_PROC_PAGE_MONITOR is configured. Since I could not test the builds without significant effort, I decided to not change mm/Makefile. I only partially reverted 89240ba0 ("x86, fs: Fix x86 procfs stack information for threads on 64-bit") . I left the KSTK_ESP() change in place as that seemed worthwhile. Signed-off-by: Robin Holt Cc: Stefani Seibold Cc: KOSAKI Motohiro Cc: Michal Simek Cc: Ingo Molnar Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/proc.txt | 3 +-- fs/compat.c | 2 -- fs/exec.c | 2 -- fs/proc/array.c | 3 +-- fs/proc/task_mmu.c | 19 ------------------- include/linux/sched.h | 1 - kernel/fork.c | 2 -- 7 files changed, 2 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index a4f30faa4f1f..1e359b62c40a 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -316,7 +316,7 @@ address perms offset dev inode pathname 08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test 0804a000-0806b000 rw-p 00000000 00:00 0 [heap] a7cb1000-a7cb2000 ---p 00000000 00:00 0 -a7cb2000-a7eb2000 rw-p 00000000 00:00 0 [threadstack:001ff4b4] +a7cb2000-a7eb2000 rw-p 00000000 00:00 0 a7eb2000-a7eb3000 ---p 00000000 00:00 0 a7eb3000-a7ed5000 rw-p 00000000 00:00 0 a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6 @@ -352,7 +352,6 @@ is not associated with a file: [stack] = the stack of the main process [vdso] = the "virtual dynamic shared object", the kernel system call handler - [threadstack:xxxxxxxx] = the stack of the thread, xxxxxxxx is the stack size or if empty, the mapping is anonymous. diff --git a/fs/compat.c b/fs/compat.c index 4b6ed03cc478..05448730f840 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -1531,8 +1531,6 @@ int compat_do_execve(char * filename, if (retval < 0) goto out; - current->stack_start = current->mm->start_stack; - /* execve succeeded */ current->fs->in_exec = 0; current->in_execve = 0; diff --git a/fs/exec.c b/fs/exec.c index 49cdaa19e5b9..e6e94c626c2c 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1387,8 +1387,6 @@ int do_execve(char * filename, if (retval < 0) goto out; - current->stack_start = current->mm->start_stack; - /* execve succeeded */ current->fs->in_exec = 0; current->in_execve = 0; diff --git a/fs/proc/array.c b/fs/proc/array.c index e51f2ec2c5e5..885ab5513ac5 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -81,7 +81,6 @@ #include #include #include -#include #include #include @@ -495,7 +494,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, rsslim, mm ? mm->start_code : 0, mm ? mm->end_code : 0, - (permitted && mm) ? task->stack_start : 0, + (permitted && mm) ? mm->start_stack : 0, esp, eip, /* The signal information here is obsolete. diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 070553427dd5..47f5b145f56e 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -247,25 +247,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) } else if (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack) { name = "[stack]"; - } else { - unsigned long stack_start; - struct proc_maps_private *pmp; - - pmp = m->private; - stack_start = pmp->task->stack_start; - - if (vma->vm_start <= stack_start && - vma->vm_end >= stack_start) { - pad_len_spaces(m, len); - seq_printf(m, - "[threadstack:%08lx]", -#ifdef CONFIG_STACK_GROWSUP - vma->vm_end - stack_start -#else - stack_start - vma->vm_start -#endif - ); - } } } else { name = "[vdso]"; diff --git a/include/linux/sched.h b/include/linux/sched.h index dad7f668ebf7..2b7b81df78b3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1497,7 +1497,6 @@ struct task_struct { /* bitmask of trace recursion */ unsigned long trace_recursion; #endif /* CONFIG_TRACING */ - unsigned long stack_start; #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ struct memcg_batch_info { int do_batch; /* incremented when batch uncharge started */ diff --git a/kernel/fork.c b/kernel/fork.c index 44b0791b0a2e..4c14942a0ee3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1114,8 +1114,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->bts = NULL; - p->stack_start = stack_start; - /* Perform scheduler related setup. Assign this task to a CPU. */ sched_fork(p, clone_flags); -- cgit v1.2.3 From d83c49f3e36cecd2e8823b6c48ffba083b8a5704 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 30 Apr 2010 17:17:09 -0400 Subject: Fix the regression created by "set S_DEAD on unlink()..." commit 1) i_flags simply doesn't work for mount/unlink race prevention; we may have many links to file and rm on one of those obviously shouldn't prevent bind on top of another later on. To fix it right way we need to mark _dentry_ as unsuitable for mounting upon; new flag (DCACHE_CANT_MOUNT) is protected by d_flags and i_mutex on the inode in question. Set it (with dont_mount(dentry)) in unlink/rmdir/etc., check (with cant_mount(dentry)) in places in namespace.c that used to check for S_DEAD. Setting S_DEAD is still needed in places where we used to set it (for directories getting killed), since we rely on it for readdir/rmdir race prevention. 2) rename()/mount() protection has another bogosity - we unhash the target before we'd checked that it's not a mountpoint. Fixed. 3) ancient bogosity in pivot_root() - we locked i_mutex on the right directory, but checked S_DEAD on the different (and wrong) one. Noticed and fixed. Signed-off-by: Al Viro --- drivers/usb/core/inode.c | 1 + fs/configfs/dir.c | 4 ++++ fs/namei.c | 21 +++++++++++++-------- fs/namespace.c | 6 +++--- include/linux/dcache.h | 14 ++++++++++++++ 5 files changed, 35 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index 4a6366a42129..111a01a747fc 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c @@ -380,6 +380,7 @@ static int usbfs_rmdir(struct inode *dir, struct dentry *dentry) mutex_lock(&inode->i_mutex); dentry_unhash(dentry); if (usbfs_empty(dentry)) { + dont_mount(dentry); drop_nlink(dentry->d_inode); drop_nlink(dentry->d_inode); dput(dentry); diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 8e48b52205aa..0b502f80c691 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -645,6 +645,7 @@ static void detach_groups(struct config_group *group) configfs_detach_group(sd->s_element); child->d_inode->i_flags |= S_DEAD; + dont_mount(child); mutex_unlock(&child->d_inode->i_mutex); @@ -840,6 +841,7 @@ static int configfs_attach_item(struct config_item *parent_item, mutex_lock(&dentry->d_inode->i_mutex); configfs_remove_dir(item); dentry->d_inode->i_flags |= S_DEAD; + dont_mount(dentry); mutex_unlock(&dentry->d_inode->i_mutex); d_delete(dentry); } @@ -882,6 +884,7 @@ static int configfs_attach_group(struct config_item *parent_item, if (ret) { configfs_detach_item(item); dentry->d_inode->i_flags |= S_DEAD; + dont_mount(dentry); } configfs_adjust_dir_dirent_depth_after_populate(sd); mutex_unlock(&dentry->d_inode->i_mutex); @@ -1725,6 +1728,7 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys) mutex_unlock(&configfs_symlink_mutex); configfs_detach_group(&group->cg_item); dentry->d_inode->i_flags |= S_DEAD; + dont_mount(dentry); mutex_unlock(&dentry->d_inode->i_mutex); d_delete(dentry); diff --git a/fs/namei.c b/fs/namei.c index 16df7277a92e..b86b96fe1dc3 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2176,8 +2176,10 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) error = security_inode_rmdir(dir, dentry); if (!error) { error = dir->i_op->rmdir(dir, dentry); - if (!error) + if (!error) { dentry->d_inode->i_flags |= S_DEAD; + dont_mount(dentry); + } } } mutex_unlock(&dentry->d_inode->i_mutex); @@ -2261,7 +2263,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry) if (!error) { error = dir->i_op->unlink(dir, dentry); if (!error) - dentry->d_inode->i_flags |= S_DEAD; + dont_mount(dentry); } } mutex_unlock(&dentry->d_inode->i_mutex); @@ -2572,17 +2574,20 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, return error; target = new_dentry->d_inode; - if (target) { + if (target) mutex_lock(&target->i_mutex); - dentry_unhash(new_dentry); - } if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) error = -EBUSY; - else + else { + if (target) + dentry_unhash(new_dentry); error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); + } if (target) { - if (!error) + if (!error) { target->i_flags |= S_DEAD; + dont_mount(new_dentry); + } mutex_unlock(&target->i_mutex); if (d_unhashed(new_dentry)) d_rehash(new_dentry); @@ -2614,7 +2619,7 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry, error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); if (!error) { if (target) - target->i_flags |= S_DEAD; + dont_mount(new_dentry); if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) d_move(old_dentry, new_dentry); } diff --git a/fs/namespace.c b/fs/namespace.c index 8174c8ab5c70..f20cb57d1067 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1432,7 +1432,7 @@ static int graft_tree(struct vfsmount *mnt, struct path *path) err = -ENOENT; mutex_lock(&path->dentry->d_inode->i_mutex); - if (IS_DEADDIR(path->dentry->d_inode)) + if (cant_mount(path->dentry)) goto out_unlock; err = security_sb_check_sb(mnt, path); @@ -1623,7 +1623,7 @@ static int do_move_mount(struct path *path, char *old_name) err = -ENOENT; mutex_lock(&path->dentry->d_inode->i_mutex); - if (IS_DEADDIR(path->dentry->d_inode)) + if (cant_mount(path->dentry)) goto out1; if (d_unlinked(path->dentry)) @@ -2234,7 +2234,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, if (!check_mnt(root.mnt)) goto out2; error = -ENOENT; - if (IS_DEADDIR(new.dentry->d_inode)) + if (cant_mount(old.dentry)) goto out2; if (d_unlinked(new.dentry)) goto out2; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 30b93b2a01a4..eebb617c17d8 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -186,6 +186,8 @@ d_iput: no no no yes #define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */ +#define DCACHE_CANT_MOUNT 0x0100 + extern spinlock_t dcache_lock; extern seqlock_t rename_lock; @@ -358,6 +360,18 @@ static inline int d_unlinked(struct dentry *dentry) return d_unhashed(dentry) && !IS_ROOT(dentry); } +static inline int cant_mount(struct dentry *dentry) +{ + return (dentry->d_flags & DCACHE_CANT_MOUNT); +} + +static inline void dont_mount(struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + dentry->d_flags |= DCACHE_CANT_MOUNT; + spin_unlock(&dentry->d_lock); +} + static inline struct dentry *dget_parent(struct dentry *dentry) { struct dentry *ret; -- cgit v1.2.3 From 35790c0421121364883a167bab8a2e37e1f67f78 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 16 May 2010 00:34:04 -0700 Subject: tcp: fix MD5 (RFC2385) support TCP MD5 support uses percpu data for temporary storage. It currently disables preemption so that same storage cannot be reclaimed by another thread on same cpu. We also have to make sure a softirq handler wont try to use also same context. Various bug reports demonstrated corruptions. Fix is to disable preemption and BH. Reported-by: Bhaskar Dutta Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 21 +++------------------ net/ipv4/tcp.c | 34 ++++++++++++++++++++++++---------- 2 files changed, 27 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index 75be5a28815d..aa04b9a5093b 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1197,30 +1197,15 @@ extern int tcp_v4_md5_do_del(struct sock *sk, extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); extern void tcp_free_md5sig_pool(void); -extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu); -extern void __tcp_put_md5sig_pool(void); +extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); +extern void tcp_put_md5sig_pool(void); + extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, unsigned header_len); extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key); -static inline -struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) -{ - int cpu = get_cpu(); - struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu); - if (!ret) - put_cpu(); - return ret; -} - -static inline void tcp_put_md5sig_pool(void) -{ - __tcp_put_md5sig_pool(); - put_cpu(); -} - /* write queue abstraction */ static inline void tcp_write_queue_purge(struct sock *sk) { diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0f8caf64caa3..296150b2a62f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2839,7 +2839,6 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) if (p->md5_desc.tfm) crypto_free_hash(p->md5_desc.tfm); kfree(p); - p = NULL; } } free_percpu(pool); @@ -2937,25 +2936,40 @@ retry: EXPORT_SYMBOL(tcp_alloc_md5sig_pool); -struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) + +/** + * tcp_get_md5sig_pool - get md5sig_pool for this user + * + * We use percpu structure, so if we succeed, we exit with preemption + * and BH disabled, to make sure another thread or softirq handling + * wont try to get same context. + */ +struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) { struct tcp_md5sig_pool * __percpu *p; - spin_lock_bh(&tcp_md5sig_pool_lock); + + local_bh_disable(); + + spin_lock(&tcp_md5sig_pool_lock); p = tcp_md5sig_pool; if (p) tcp_md5sig_users++; - spin_unlock_bh(&tcp_md5sig_pool_lock); - return (p ? *per_cpu_ptr(p, cpu) : NULL); -} + spin_unlock(&tcp_md5sig_pool_lock); + + if (p) + return *per_cpu_ptr(p, smp_processor_id()); -EXPORT_SYMBOL(__tcp_get_md5sig_pool); + local_bh_enable(); + return NULL; +} +EXPORT_SYMBOL(tcp_get_md5sig_pool); -void __tcp_put_md5sig_pool(void) +void tcp_put_md5sig_pool(void) { + local_bh_enable(); tcp_free_md5sig_pool(); } - -EXPORT_SYMBOL(__tcp_put_md5sig_pool); +EXPORT_SYMBOL(tcp_put_md5sig_pool); int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, struct tcphdr *th) -- cgit v1.2.3 From c02db8c6290bb992442fec1407643c94cc414375 Mon Sep 17 00:00:00 2001 From: Chris Wright Date: Sun, 16 May 2010 01:05:45 -0700 Subject: rtnetlink: make SR-IOV VF interface symmetric Now we have a set of nested attributes: IFLA_VFINFO_LIST (NESTED) IFLA_VF_INFO (NESTED) IFLA_VF_MAC IFLA_VF_VLAN IFLA_VF_TX_RATE This allows a single set to operate on multiple attributes if desired. Among other things, it means a dump can be replayed to set state. The current interface has yet to be released, so this seems like something to consider for 2.6.34. Signed-off-by: Chris Wright Signed-off-by: David S. Miller --- include/linux/if_link.h | 23 +++++-- net/core/rtnetlink.c | 159 +++++++++++++++++++++++++++++++++--------------- 2 files changed, 129 insertions(+), 53 deletions(-) (limited to 'include') diff --git a/include/linux/if_link.h b/include/linux/if_link.h index c9bf92cd7653..d94963b379d9 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -79,10 +79,7 @@ enum { IFLA_NET_NS_PID, IFLA_IFALIAS, IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ - IFLA_VF_MAC, /* Hardware queue specific attributes */ - IFLA_VF_VLAN, - IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ - IFLA_VFINFO, + IFLA_VFINFO_LIST, __IFLA_MAX }; @@ -203,6 +200,24 @@ enum macvlan_mode { /* SR-IOV virtual function managment section */ +enum { + IFLA_VF_INFO_UNSPEC, + IFLA_VF_INFO, + __IFLA_VF_INFO_MAX, +}; + +#define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1) + +enum { + IFLA_VF_UNSPEC, + IFLA_VF_MAC, /* Hardware queue specific attributes */ + IFLA_VF_VLAN, + IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ + __IFLA_VF_MAX, +}; + +#define IFLA_VF_MAX (__IFLA_VF_MAX - 1) + struct ifla_vf_mac { __u32 vf; __u8 mac[32]; /* MAX_ADDR_LEN */ diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index fe776c9ddeca..31e85d327aa2 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -602,12 +602,19 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a, a->tx_compressed = b->tx_compressed; }; +/* All VF info */ static inline int rtnl_vfinfo_size(const struct net_device *dev) { - if (dev->dev.parent && dev_is_pci(dev->dev.parent)) - return dev_num_vf(dev->dev.parent) * - sizeof(struct ifla_vf_info); - else + if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { + + int num_vfs = dev_num_vf(dev->dev.parent); + size_t size = nlmsg_total_size(sizeof(struct nlattr)); + size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); + size += num_vfs * (sizeof(struct ifla_vf_mac) + + sizeof(struct ifla_vf_vlan) + + sizeof(struct ifla_vf_tx_rate)); + return size; + } else return 0; } @@ -629,7 +636,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev) + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(4) /* IFLA_NUM_VF */ - + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */ + + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ } @@ -700,14 +707,37 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { int i; - struct ifla_vf_info ivi; - NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); - for (i = 0; i < dev_num_vf(dev->dev.parent); i++) { + struct nlattr *vfinfo, *vf; + int num_vfs = dev_num_vf(dev->dev.parent); + + NLA_PUT_U32(skb, IFLA_NUM_VF, num_vfs); + vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); + if (!vfinfo) + goto nla_put_failure; + for (i = 0; i < num_vfs; i++) { + struct ifla_vf_info ivi; + struct ifla_vf_mac vf_mac; + struct ifla_vf_vlan vf_vlan; + struct ifla_vf_tx_rate vf_tx_rate; if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) break; - NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi); + vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf; + memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); + vf_vlan.vlan = ivi.vlan; + vf_vlan.qos = ivi.qos; + vf_tx_rate.rate = ivi.tx_rate; + vf = nla_nest_start(skb, IFLA_VF_INFO); + if (!vf) { + nla_nest_cancel(skb, vfinfo); + goto nla_put_failure; + } + NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac); + NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan); + NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate); + nla_nest_end(skb, vf); } + nla_nest_end(skb, vfinfo); } if (dev->rtnl_link_ops) { if (rtnl_link_fill(skb, dev) < 0) @@ -769,12 +799,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_LINKINFO] = { .type = NLA_NESTED }, [IFLA_NET_NS_PID] = { .type = NLA_U32 }, [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, - [IFLA_VF_MAC] = { .type = NLA_BINARY, - .len = sizeof(struct ifla_vf_mac) }, - [IFLA_VF_VLAN] = { .type = NLA_BINARY, - .len = sizeof(struct ifla_vf_vlan) }, - [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, - .len = sizeof(struct ifla_vf_tx_rate) }, + [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, }; EXPORT_SYMBOL(ifla_policy); @@ -783,6 +808,19 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { [IFLA_INFO_DATA] = { .type = NLA_NESTED }, }; +static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { + [IFLA_VF_INFO] = { .type = NLA_NESTED }, +}; + +static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { + [IFLA_VF_MAC] = { .type = NLA_BINARY, + .len = sizeof(struct ifla_vf_mac) }, + [IFLA_VF_VLAN] = { .type = NLA_BINARY, + .len = sizeof(struct ifla_vf_vlan) }, + [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, + .len = sizeof(struct ifla_vf_tx_rate) }, +}; + struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) { struct net *net; @@ -812,6 +850,52 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) return 0; } +static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) +{ + int rem, err = -EINVAL; + struct nlattr *vf; + const struct net_device_ops *ops = dev->netdev_ops; + + nla_for_each_nested(vf, attr, rem) { + switch (nla_type(vf)) { + case IFLA_VF_MAC: { + struct ifla_vf_mac *ivm; + ivm = nla_data(vf); + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_mac) + err = ops->ndo_set_vf_mac(dev, ivm->vf, + ivm->mac); + break; + } + case IFLA_VF_VLAN: { + struct ifla_vf_vlan *ivv; + ivv = nla_data(vf); + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_vlan) + err = ops->ndo_set_vf_vlan(dev, ivv->vf, + ivv->vlan, + ivv->qos); + break; + } + case IFLA_VF_TX_RATE: { + struct ifla_vf_tx_rate *ivt; + ivt = nla_data(vf); + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_tx_rate) + err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, + ivt->rate); + break; + } + default: + err = -EINVAL; + break; + } + if (err) + break; + } + return err; +} + static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, struct nlattr **tb, char *ifname, int modified) { @@ -942,40 +1026,17 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, write_unlock_bh(&dev_base_lock); } - if (tb[IFLA_VF_MAC]) { - struct ifla_vf_mac *ivm; - ivm = nla_data(tb[IFLA_VF_MAC]); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_mac) - err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac); - if (err < 0) - goto errout; - modified = 1; - } - - if (tb[IFLA_VF_VLAN]) { - struct ifla_vf_vlan *ivv; - ivv = nla_data(tb[IFLA_VF_VLAN]); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_vlan) - err = ops->ndo_set_vf_vlan(dev, ivv->vf, - ivv->vlan, - ivv->qos); - if (err < 0) - goto errout; - modified = 1; - } - err = 0; - - if (tb[IFLA_VF_TX_RATE]) { - struct ifla_vf_tx_rate *ivt; - ivt = nla_data(tb[IFLA_VF_TX_RATE]); - err = -EOPNOTSUPP; - if (ops->ndo_set_vf_tx_rate) - err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate); - if (err < 0) - goto errout; - modified = 1; + if (tb[IFLA_VFINFO_LIST]) { + struct nlattr *attr; + int rem; + nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { + if (nla_type(attr) != IFLA_VF_INFO) + goto errout; + err = do_setvfinfo(dev, attr); + if (err < 0) + goto errout; + modified = 1; + } } err = 0; -- cgit v1.2.3 From f3d46f9d3194e0329216002a8724d4c0957abc79 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 17 May 2010 14:33:53 +1000 Subject: atomic_t: Cast to volatile when accessing atomic variables In preparation for removing volatile from the atomic_t definition, this patch adds a volatile cast to all the atomic read functions. Signed-off-by: Anton Blanchard Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/atomic.h | 4 ++-- arch/arm/include/asm/atomic.h | 2 +- arch/avr32/include/asm/atomic.h | 2 +- arch/cris/include/asm/atomic.h | 2 +- arch/frv/include/asm/atomic.h | 2 +- arch/h8300/include/asm/atomic.h | 2 +- arch/ia64/include/asm/atomic.h | 4 ++-- arch/m32r/include/asm/atomic.h | 2 +- arch/m68k/include/asm/atomic_mm.h | 2 +- arch/m68k/include/asm/atomic_no.h | 2 +- arch/mips/include/asm/atomic.h | 4 ++-- arch/mn10300/include/asm/atomic.h | 2 +- arch/parisc/include/asm/atomic.h | 4 ++-- arch/sh/include/asm/atomic.h | 2 +- arch/sparc/include/asm/atomic_32.h | 2 +- arch/sparc/include/asm/atomic_64.h | 4 ++-- arch/x86/include/asm/atomic.h | 2 +- arch/x86/include/asm/atomic64_64.h | 2 +- arch/xtensa/include/asm/atomic.h | 2 +- include/asm-generic/atomic.h | 2 +- 20 files changed, 25 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 610dff44d94b..e756d04b6cd5 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -17,8 +17,8 @@ #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) -#define atomic_read(v) ((v)->counter + 0) -#define atomic64_read(v) ((v)->counter + 0) +#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic_set(v,i) ((v)->counter = (i)) #define atomic64_set(v,i) ((v)->counter = (i)) diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index e8ddec2cb158..a0162fa94564 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -24,7 +24,7 @@ * strex/ldrex monitor on some implementations. The reason we can use it for * atomic_set() is the clrex or dummy strex done on every exception return. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) #if __LINUX_ARM_ARCH__ >= 6 diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index b131c27ddf57..bbce6a1c6bb6 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h @@ -19,7 +19,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v, i) (((v)->counter) = i) /* diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h index a6aca819e9f3..88dc9b9c4ba0 100644 --- a/arch/cris/include/asm/atomic.h +++ b/arch/cris/include/asm/atomic.h @@ -15,7 +15,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) /* These should be written in asm but we do it in C for now. */ diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 00a57af79afc..fae32c7fdcb6 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -36,7 +36,7 @@ #define smp_mb__after_atomic_inc() barrier() #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v, i) (((v)->counter) = (i)) #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index 33c8c0fa9583..e936804b7508 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -10,7 +10,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v, i) (((v)->counter) = i) #include diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 88405cb0832a..4e1948447a00 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -21,8 +21,8 @@ #define ATOMIC_INIT(i) ((atomic_t) { (i) }) #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) -#define atomic_read(v) ((v)->counter) -#define atomic64_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) #define atomic64_set(v,i) (((v)->counter) = (i)) diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 63f0cf0f50dd..d44a51e5271b 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -26,7 +26,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) /** * atomic_set - set atomic variable diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h index d9d2ed647435..6a223b3f7e74 100644 --- a/arch/m68k/include/asm/atomic_mm.h +++ b/arch/m68k/include/asm/atomic_mm.h @@ -15,7 +15,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v, i) (((v)->counter) = i) static inline void atomic_add(int i, atomic_t *v) diff --git a/arch/m68k/include/asm/atomic_no.h b/arch/m68k/include/asm/atomic_no.h index 5674cb9449bd..289310c63a8a 100644 --- a/arch/m68k/include/asm/atomic_no.h +++ b/arch/m68k/include/asm/atomic_no.h @@ -15,7 +15,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v, i) (((v)->counter) = i) static __inline__ void atomic_add(int i, atomic_t *v) diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 519197ede089..59dc0c7ef733 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -29,7 +29,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) /* * atomic_set - set atomic variable @@ -410,7 +410,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) * @v: pointer of type atomic64_t * */ -#define atomic64_read(v) ((v)->counter) +#define atomic64_read(v) (*(volatile long *)&(v)->counter) /* * atomic64_set - set atomic variable diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index 5bf5be9566de..e41222d6c2fd 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -31,7 +31,7 @@ * Atomically reads the value of @v. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) /** * atomic_set - set atomic variable diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 716634d1f546..f81955934aeb 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -189,7 +189,7 @@ static __inline__ void atomic_set(atomic_t *v, int i) static __inline__ int atomic_read(const atomic_t *v) { - return v->counter; + return (*(volatile int *)&(v)->counter); } /* exported interface */ @@ -286,7 +286,7 @@ atomic64_set(atomic64_t *v, s64 i) static __inline__ s64 atomic64_read(const atomic64_t *v) { - return v->counter; + return (*(volatile long *)&(v)->counter); } #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v)))) diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index 275a448ae8c2..c7983124d99d 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -13,7 +13,7 @@ #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v,i) ((v)->counter = (i)) #if defined(CONFIG_GUSA_RB) diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index f0d343c3b956..7ae128b19d3f 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -25,7 +25,7 @@ extern int atomic_cmpxchg(atomic_t *, int, int); extern int atomic_add_unless(atomic_t *, int, int); extern void atomic_set(atomic_t *, int); -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) #define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index f2e48009989e..2050ca02c423 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -13,8 +13,8 @@ #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) -#define atomic64_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic_set(v, i) (((v)->counter) = i) #define atomic64_set(v, i) (((v)->counter) = i) diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 8f8217b9bdac..37b39d27abe0 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -22,7 +22,7 @@ */ static inline int atomic_read(const atomic_t *v) { - return v->counter; + return (*(volatile int *)&(v)->counter); } /** diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 51c5b4056929..b014e235ea8d 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -18,7 +18,7 @@ */ static inline long atomic64_read(const atomic64_t *v) { - return v->counter; + return (*(volatile long *)&(v)->counter); } /** diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 22d6dde42619..a96a0619d0b7 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -46,7 +46,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) /** * atomic_set - set atomic variable diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index c99c64dc5f3d..c33749f95b32 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -33,7 +33,7 @@ * Atomically reads the value of @v. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) /** * atomic_set - set atomic variable -- cgit v1.2.3 From 81880d603d00c645e0890d0a44d50711c503b72b Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 17 May 2010 14:34:57 +1000 Subject: atomic_t: Remove volatile from atomic_t definition When looking at a performance problem on PowerPC, I noticed some awful code generation: c00000000051fc98: 3b 60 00 01 li r27,1 ... c00000000051fca0: 3b 80 00 00 li r28,0 ... c00000000051fcdc: 93 61 00 70 stw r27,112(r1) c00000000051fce0: 93 81 00 74 stw r28,116(r1) c00000000051fce4: 81 21 00 70 lwz r9,112(r1) c00000000051fce8: 80 01 00 74 lwz r0,116(r1) c00000000051fcec: 7d 29 07 b4 extsw r9,r9 c00000000051fcf0: 7c 00 07 b4 extsw r0,r0 c00000000051fcf4: 7c 20 04 ac lwsync c00000000051fcf8: 7d 60 f8 28 lwarx r11,0,r31 c00000000051fcfc: 7c 0b 48 00 cmpw r11,r9 c00000000051fd00: 40 c2 00 10 bne- c00000000051fd10 c00000000051fd04: 7c 00 f9 2d stwcx. r0,0,r31 c00000000051fd08: 40 c2 ff f0 bne+ c00000000051fcf8 c00000000051fd0c: 4c 00 01 2c isync We create two constants, write them out to the stack, read them straight back in and sign extend them. What a mess. It turns out this bad code is a result of us defining atomic_t as a volatile int. We removed the volatile attribute from the powerpc atomic_t definition years ago, but commit ea435467500612636f8f4fb639ff6e76b2496e4b (atomic_t: unify all arch definitions) added it back in. To dig up an old quote from Linus: > The fact is, volatile on data structures is a bug. It's a wart in the C > language. It shouldn't be used. > > Volatile accesses in *code* can be ok, and if we have "atomic_read()" > expand to a "*(volatile int *)&(x)->value", then I'd be ok with that. > > But marking data structures volatile just makes the compiler screw up > totally, and makes code for initialization sequences etc much worse. And screw up it does :) With the volatile removed, we see much more reasonable code generation: c00000000051f5b8: 3b 60 00 01 li r27,1 ... c00000000051f5c0: 3b 80 00 00 li r28,0 ... c00000000051fc7c: 7c 20 04 ac lwsync c00000000051fc80: 7c 00 f8 28 lwarx r0,0,r31 c00000000051fc84: 7c 00 d8 00 cmpw r0,r27 c00000000051fc88: 40 c2 00 10 bne- c00000000051fc98 c00000000051fc8c: 7f 80 f9 2d stwcx. r28,0,r31 c00000000051fc90: 40 c2 ff f0 bne+ c00000000051fc80 c00000000051fc94: 4c 00 01 2c isync Six instructions less. Signed-off-by: Anton Blanchard Signed-off-by: Linus Torvalds --- include/linux/types.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/types.h b/include/linux/types.h index c42724f8c802..23d237a075e2 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -188,12 +188,12 @@ typedef u32 phys_addr_t; typedef phys_addr_t resource_size_t; typedef struct { - volatile int counter; + int counter; } atomic_t; #ifdef CONFIG_64BIT typedef struct { - volatile long counter; + long counter; } atomic64_t; #endif -- cgit v1.2.3 From 0b7f1a7efb38b551f5948a13d0b36e876ba536db Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 28 Jan 2009 21:01:02 +0100 Subject: platform: Make platform resource input parameters const Make the platform resource input parameters of platform_device_add_resources() and platform_device_register_simple() const, as the resources are copied and never modified. Signed-off-by: Geert Uytterhoeven Acked-by: Greg Kroah-Hartman --- drivers/base/platform.c | 4 ++-- include/linux/platform_device.h | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 4b4b565c835f..c5fbe198fbdb 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(platform_device_alloc); * released. */ int platform_device_add_resources(struct platform_device *pdev, - struct resource *res, unsigned int num) + const struct resource *res, unsigned int num) { struct resource *r; @@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister); */ struct platform_device *platform_device_register_simple(const char *name, int id, - struct resource *res, + const struct resource *res, unsigned int num) { struct platform_device *pdev; diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 212da17d06af..5417944d3687 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -44,12 +44,14 @@ extern int platform_get_irq_byname(struct platform_device *, const char *); extern int platform_add_devices(struct platform_device **, int); extern struct platform_device *platform_device_register_simple(const char *, int id, - struct resource *, unsigned int); + const struct resource *, unsigned int); extern struct platform_device *platform_device_register_data(struct device *, const char *, int, const void *, size_t); extern struct platform_device *platform_device_alloc(const char *name, int id); -extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num); +extern int platform_device_add_resources(struct platform_device *pdev, + const struct resource *res, + unsigned int num); extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); extern int platform_device_add(struct platform_device *pdev); extern void platform_device_del(struct platform_device *pdev); -- cgit v1.2.3 From bf54a2b3c0dbf76136f00ff785bf6d8f6291311d Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 18 Nov 2008 21:13:53 +0100 Subject: m68k: amiga - Zorro bus modalias support Add Amiga Zorro bus modalias and uevent support Signed-off-by: Geert Uytterhoeven --- drivers/net/a2065.c | 1 + drivers/net/ariadne.c | 1 + drivers/net/hydra.c | 1 + drivers/net/zorro8390.c | 1 + drivers/scsi/zorro7xx.c | 1 + drivers/video/cirrusfb.c | 1 + drivers/video/fm2fb.c | 1 + drivers/zorro/zorro-driver.c | 24 ++++++++++++++++++++++++ drivers/zorro/zorro-sysfs.c | 11 +++++++++++ include/linux/mod_devicetable.h | 9 +++++++++ include/linux/zorro.h | 13 +------------ scripts/mod/file2alias.c | 14 ++++++++++++++ 12 files changed, 66 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index ed5e9742be2c..a8f0512bad38 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c @@ -674,6 +674,7 @@ static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = { { ZORRO_PROD_AMERISTAR_A2065 }, { 0 } }; +MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl); static struct zorro_driver a2065_driver = { .name = "a2065", diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index fa1a2354f5f9..4b30a46486e2 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c @@ -145,6 +145,7 @@ static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = { { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE }, { 0 } }; +MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl); static struct zorro_driver ariadne_driver = { .name = "ariadne", diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c index 24724b4ad709..07d8e5b634f3 100644 --- a/drivers/net/hydra.c +++ b/drivers/net/hydra.c @@ -71,6 +71,7 @@ static struct zorro_device_id hydra_zorro_tbl[] __devinitdata = { { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET }, { 0 } }; +MODULE_DEVICE_TABLE(zorro, hydra_zorro_tbl); static struct zorro_driver hydra_driver = { .name = "hydra", diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index 81c753a617ab..9548cbb5012a 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c @@ -102,6 +102,7 @@ static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = { { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, }, { 0 } }; +MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl); static struct zorro_driver zorro8390_driver = { .name = "zorro8390", diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c index 105449c15fa9..e17764d71476 100644 --- a/drivers/scsi/zorro7xx.c +++ b/drivers/scsi/zorro7xx.c @@ -69,6 +69,7 @@ static struct zorro_device_id zorro7xx_zorro_tbl[] __devinitdata = { }, { 0 } }; +MODULE_DEVICE_TABLE(zorro, zorro7xx_zorro_tbl); static int __devinit zorro7xx_init_one(struct zorro_dev *z, const struct zorro_device_id *ent) diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c index 8d8dfda2f868..6df7c54db0a3 100644 --- a/drivers/video/cirrusfb.c +++ b/drivers/video/cirrusfb.c @@ -299,6 +299,7 @@ static const struct zorro_device_id cirrusfb_zorro_table[] = { }, { 0 } }; +MODULE_DEVICE_TABLE(zorro, cirrusfb_zorro_table); static const struct { zorro_id id2; diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c index 6c91c61cdb63..1b0feb8e7244 100644 --- a/drivers/video/fm2fb.c +++ b/drivers/video/fm2fb.c @@ -219,6 +219,7 @@ static struct zorro_device_id fm2fb_devices[] __devinitdata = { { ZORRO_PROD_HELFRICH_RAINBOW_II }, { 0 } }; +MODULE_DEVICE_TABLE(zorro, fm2fb_devices); static struct zorro_driver fm2fb_driver = { .name = "fm2fb", diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c index 53180a37cc9a..7ee2b6e71786 100644 --- a/drivers/zorro/zorro-driver.c +++ b/drivers/zorro/zorro-driver.c @@ -137,10 +137,34 @@ static int zorro_bus_match(struct device *dev, struct device_driver *drv) return 0; } +static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env) +{ +#ifdef CONFIG_HOTPLUG + struct zorro_dev *z; + + if (!dev) + return -ENODEV; + + z = to_zorro_dev(dev); + if (!z) + return -ENODEV; + + if (add_uevent_var(env, "ZORRO_ID=%08X", z->id) || + add_uevent_var(env, "ZORRO_SLOT_NAME=%s", dev_name(dev)) || + add_uevent_var(env, "ZORRO_SLOT_ADDR=%04X", z->slotaddr) || + add_uevent_var(env, "MODALIAS=" ZORRO_DEVICE_MODALIAS_FMT, z->id)) + return -ENOMEM; + + return 0; +#else /* !CONFIG_HOTPLUG */ + return -ENODEV; +#endif /* !CONFIG_HOTPLUG */ +} struct bus_type zorro_bus_type = { .name = "zorro", .match = zorro_bus_match, + .uevent = zorro_uevent, .probe = zorro_device_probe, .remove = zorro_device_remove, }; diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c index 1d2a772ea14c..eb924e0a64ce 100644 --- a/drivers/zorro/zorro-sysfs.c +++ b/drivers/zorro/zorro-sysfs.c @@ -77,6 +77,16 @@ static struct bin_attribute zorro_config_attr = { .read = zorro_read_config, }; +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct zorro_dev *z = to_zorro_dev(dev); + + return sprintf(buf, ZORRO_DEVICE_MODALIAS_FMT "\n", z->id); +} + +static DEVICE_ATTR(modalias, S_IRUGO, modalias_show, NULL); + int zorro_create_sysfs_dev_files(struct zorro_dev *z) { struct device *dev = &z->dev; @@ -89,6 +99,7 @@ int zorro_create_sysfs_dev_files(struct zorro_dev *z) (error = device_create_file(dev, &dev_attr_slotaddr)) || (error = device_create_file(dev, &dev_attr_slotsize)) || (error = device_create_file(dev, &dev_attr_resource)) || + (error = device_create_file(dev, &dev_attr_modalias)) || (error = sysfs_create_bin_file(&dev->kobj, &zorro_config_attr))) return error; diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index f58e9d836f32..56fde4364e4c 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -474,4 +474,13 @@ struct platform_device_id { __attribute__((aligned(sizeof(kernel_ulong_t)))); }; +struct zorro_device_id { + __u32 id; /* Device ID or ZORRO_WILDCARD */ + kernel_ulong_t driver_data; /* Data private to the driver */ +}; + +#define ZORRO_WILDCARD (0xffffffff) /* not official */ + +#define ZORRO_DEVICE_MODALIAS_FMT "zorro:i%08X" + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/zorro.h b/include/linux/zorro.h index 913bfc226dda..908db1b36d6c 100644 --- a/include/linux/zorro.h +++ b/include/linux/zorro.h @@ -38,8 +38,6 @@ typedef __u32 zorro_id; -#define ZORRO_WILDCARD (0xffffffff) /* not official */ - /* Include the ID list */ #include @@ -116,6 +114,7 @@ struct ConfigDev { #include #include +#include #include @@ -154,16 +153,6 @@ extern struct zorro_bus zorro_bus; /* single Zorro bus */ extern struct bus_type zorro_bus_type; - /* - * Zorro device IDs - */ - -struct zorro_device_id { - zorro_id id; /* Device ID or ZORRO_WILDCARD */ - unsigned long driver_data; /* Data private to the driver */ -}; - - /* * Zorro device drivers */ diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 220213e603db..df90f31d14bf 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -796,6 +796,16 @@ static int do_platform_entry(const char *filename, return 1; } +/* Looks like: zorro:iN. */ +static int do_zorro_entry(const char *filename, struct zorro_device_id *id, + char *alias) +{ + id->id = TO_NATIVE(id->id); + strcpy(alias, "zorro:"); + ADD(alias, "i", id->id != ZORRO_WILDCARD, id->id); + return 1; +} + /* Ignore any prefix, eg. some architectures prepend _ */ static inline int sym_is(const char *symbol, const char *name) { @@ -943,6 +953,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info, do_table(symval, sym->st_size, sizeof(struct platform_device_id), "platform", do_platform_entry, mod); + else if (sym_is(symname, "__mod_zorro_device_table")) + do_table(symval, sym->st_size, + sizeof(struct zorro_device_id), "zorro", + do_zorro_entry, mod); free(zeros); } -- cgit v1.2.3 From 0d305464aefff342c85b4db8b3d7a4345246e5a1 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Sun, 5 Apr 2009 12:40:41 +0200 Subject: m68k: amiga - Zorro host bridge platform device conversion Signed-off-by: Geert Uytterhoeven --- arch/m68k/amiga/Makefile | 2 +- arch/m68k/amiga/platform.c | 58 +++++++++++ drivers/zorro/proc.c | 6 +- drivers/zorro/zorro.c | 243 ++++++++++++++++++++++++--------------------- include/linux/zorro.h | 9 -- 5 files changed, 190 insertions(+), 128 deletions(-) create mode 100644 arch/m68k/amiga/platform.c (limited to 'include') diff --git a/arch/m68k/amiga/Makefile b/arch/m68k/amiga/Makefile index 6a0d7650f980..11dd30b16b3b 100644 --- a/arch/m68k/amiga/Makefile +++ b/arch/m68k/amiga/Makefile @@ -2,6 +2,6 @@ # Makefile for Linux arch/m68k/amiga source directory # -obj-y := config.o amiints.o cia.o chipram.o amisound.o +obj-y := config.o amiints.o cia.o chipram.o amisound.o platform.o obj-$(CONFIG_AMIGA_PCMCIA) += pcmcia.o diff --git a/arch/m68k/amiga/platform.c b/arch/m68k/amiga/platform.c new file mode 100644 index 000000000000..33a7669b4415 --- /dev/null +++ b/arch/m68k/amiga/platform.c @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2007-2009 Geert Uytterhoeven + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#include +#include +#include + +#include + + +#ifdef CONFIG_ZORRO + +static const struct resource zorro_resources[] __initconst = { + /* Zorro II regions (on Zorro II/III) */ + { + .name = "Zorro II exp", + .start = 0x00e80000, + .end = 0x00efffff, + .flags = IORESOURCE_MEM, + }, { + .name = "Zorro II mem", + .start = 0x00200000, + .end = 0x009fffff, + .flags = IORESOURCE_MEM, + }, + /* Zorro III regions (on Zorro III only) */ + { + .name = "Zorro III exp", + .start = 0xff000000, + .end = 0xffffffff, + .flags = IORESOURCE_MEM, + }, { + .name = "Zorro III cfg", + .start = 0x40000000, + .end = 0x7fffffff, + .flags = IORESOURCE_MEM, + } +}; + + +static int __init amiga_init_bus(void) +{ + if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO)) + return -ENODEV; + + platform_device_register_simple("amiga-zorro", -1, zorro_resources, + AMIGAHW_PRESENT(ZORRO3) ? 4 : 2); + return 0; +} + +subsys_initcall(amiga_init_bus); + +#endif /* CONFIG_ZORRO */ diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c index d47c47fc048f..3c7046d79654 100644 --- a/drivers/zorro/proc.c +++ b/drivers/zorro/proc.c @@ -97,7 +97,7 @@ static void zorro_seq_stop(struct seq_file *m, void *v) static int zorro_seq_show(struct seq_file *m, void *v) { - u_int slot = *(loff_t *)v; + unsigned int slot = *(loff_t *)v; struct zorro_dev *z = &zorro_autocon[slot]; seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id, @@ -129,7 +129,7 @@ static const struct file_operations zorro_devices_proc_fops = { static struct proc_dir_entry *proc_bus_zorro_dir; -static int __init zorro_proc_attach_device(u_int slot) +static int __init zorro_proc_attach_device(unsigned int slot) { struct proc_dir_entry *entry; char name[4]; @@ -146,7 +146,7 @@ static int __init zorro_proc_attach_device(u_int slot) static int __init zorro_proc_init(void) { - u_int slot; + unsigned int slot; if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) { proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL); diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c index d45fb34e2d23..6455f3a244c5 100644 --- a/drivers/zorro/zorro.c +++ b/drivers/zorro/zorro.c @@ -15,6 +15,8 @@ #include #include #include +#include +#include #include #include @@ -26,24 +28,17 @@ * Zorro Expansion Devices */ -u_int zorro_num_autocon = 0; +unsigned int zorro_num_autocon; struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO]; /* - * Single Zorro bus + * Zorro bus */ -struct zorro_bus zorro_bus = {\ - .resources = { - /* Zorro II regions (on Zorro II/III) */ - { .name = "Zorro II exp", .start = 0x00e80000, .end = 0x00efffff }, - { .name = "Zorro II mem", .start = 0x00200000, .end = 0x009fffff }, - /* Zorro III regions (on Zorro III only) */ - { .name = "Zorro III exp", .start = 0xff000000, .end = 0xffffffff }, - { .name = "Zorro III cfg", .start = 0x40000000, .end = 0x7fffffff } - }, - .name = "Zorro bus" +struct zorro_bus { + struct list_head devices; /* list of devices on this bus */ + struct device dev; }; @@ -53,18 +48,19 @@ struct zorro_bus zorro_bus = {\ struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from) { - struct zorro_dev *z; + struct zorro_dev *z; - if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO)) - return NULL; + if (!zorro_num_autocon) + return NULL; - for (z = from ? from+1 : &zorro_autocon[0]; - z < zorro_autocon+zorro_num_autocon; - z++) - if (id == ZORRO_WILDCARD || id == z->id) - return z; - return NULL; + for (z = from ? from+1 : &zorro_autocon[0]; + z < zorro_autocon+zorro_num_autocon; + z++) + if (id == ZORRO_WILDCARD || id == z->id) + return z; + return NULL; } +EXPORT_SYMBOL(zorro_find_device); /* @@ -83,121 +79,138 @@ struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from) */ DECLARE_BITMAP(zorro_unused_z2ram, 128); +EXPORT_SYMBOL(zorro_unused_z2ram); static void __init mark_region(unsigned long start, unsigned long end, int flag) { - if (flag) - start += Z2RAM_CHUNKMASK; - else - end += Z2RAM_CHUNKMASK; - start &= ~Z2RAM_CHUNKMASK; - end &= ~Z2RAM_CHUNKMASK; - - if (end <= Z2RAM_START || start >= Z2RAM_END) - return; - start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START; - end = end > Z2RAM_END ? Z2RAM_SIZE : end-Z2RAM_START; - while (start < end) { - u32 chunk = start>>Z2RAM_CHUNKSHIFT; if (flag) - set_bit(chunk, zorro_unused_z2ram); + start += Z2RAM_CHUNKMASK; else - clear_bit(chunk, zorro_unused_z2ram); - start += Z2RAM_CHUNKSIZE; - } + end += Z2RAM_CHUNKMASK; + start &= ~Z2RAM_CHUNKMASK; + end &= ~Z2RAM_CHUNKMASK; + + if (end <= Z2RAM_START || start >= Z2RAM_END) + return; + start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START; + end = end > Z2RAM_END ? Z2RAM_SIZE : end-Z2RAM_START; + while (start < end) { + u32 chunk = start>>Z2RAM_CHUNKSHIFT; + if (flag) + set_bit(chunk, zorro_unused_z2ram); + else + clear_bit(chunk, zorro_unused_z2ram); + start += Z2RAM_CHUNKSIZE; + } } -static struct resource __init *zorro_find_parent_resource(struct zorro_dev *z) +static struct resource __init *zorro_find_parent_resource( + struct platform_device *bridge, struct zorro_dev *z) { - int i; + int i; - for (i = 0; i < zorro_bus.num_resources; i++) - if (zorro_resource_start(z) >= zorro_bus.resources[i].start && - zorro_resource_end(z) <= zorro_bus.resources[i].end) - return &zorro_bus.resources[i]; - return &iomem_resource; + for (i = 0; i < bridge->num_resources; i++) { + struct resource *r = &bridge->resource[i]; + if (zorro_resource_start(z) >= r->start && + zorro_resource_end(z) <= r->end) + return r; + } + return &iomem_resource; } - /* - * Initialization - */ -static int __init zorro_init(void) +static int __init amiga_zorro_probe(struct platform_device *pdev) { - struct zorro_dev *z; - unsigned int i; - int error; - - if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO)) - return 0; - - pr_info("Zorro: Probing AutoConfig expansion devices: %d device%s\n", - zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); - - /* Initialize the Zorro bus */ - INIT_LIST_HEAD(&zorro_bus.devices); - dev_set_name(&zorro_bus.dev, "zorro"); - error = device_register(&zorro_bus.dev); - if (error) { - pr_err("Zorro: Error registering zorro_bus\n"); - return error; - } - - /* Request the resources */ - zorro_bus.num_resources = AMIGAHW_PRESENT(ZORRO3) ? 4 : 2; - for (i = 0; i < zorro_bus.num_resources; i++) - request_resource(&iomem_resource, &zorro_bus.resources[i]); - - /* Register all devices */ - for (i = 0; i < zorro_num_autocon; i++) { - z = &zorro_autocon[i]; - z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); - if (z->id == ZORRO_PROD_GVP_EPC_BASE) { - /* GVP quirk */ - unsigned long magic = zorro_resource_start(z)+0x8000; - z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK; - } - sprintf(z->name, "Zorro device %08x", z->id); - zorro_name_device(z); - z->resource.name = z->name; - if (request_resource(zorro_find_parent_resource(z), &z->resource)) - pr_err("Zorro: Address space collision on device %s %pR\n", - z->name, &z->resource); - dev_set_name(&z->dev, "%02x", i); - z->dev.parent = &zorro_bus.dev; - z->dev.bus = &zorro_bus_type; - error = device_register(&z->dev); + struct zorro_bus *bus; + struct zorro_dev *z; + struct resource *r; + unsigned int i; + int error; + + /* Initialize the Zorro bus */ + bus = kzalloc(sizeof(*bus), GFP_KERNEL); + if (!bus) + return -ENOMEM; + + INIT_LIST_HEAD(&bus->devices); + bus->dev.parent = &pdev->dev; + dev_set_name(&bus->dev, "zorro"); + error = device_register(&bus->dev); if (error) { - pr_err("Zorro: Error registering device %s\n", z->name); - continue; + pr_err("Zorro: Error registering zorro_bus\n"); + kfree(bus); + return error; } - error = zorro_create_sysfs_dev_files(z); - if (error) - dev_err(&z->dev, "Error creating sysfs files\n"); - } - - /* Mark all available Zorro II memory */ - zorro_for_each_dev(z) { - if (z->rom.er_Type & ERTF_MEMLIST) - mark_region(zorro_resource_start(z), zorro_resource_end(z)+1, 1); - } - - /* Unmark all used Zorro II memory */ - for (i = 0; i < m68k_num_memory; i++) - if (m68k_memory[i].addr < 16*1024*1024) - mark_region(m68k_memory[i].addr, - m68k_memory[i].addr+m68k_memory[i].size, 0); - - return 0; + platform_set_drvdata(pdev, bus); + + /* Register all devices */ + pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n", + zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); + + for (i = 0; i < zorro_num_autocon; i++) { + z = &zorro_autocon[i]; + z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); + if (z->id == ZORRO_PROD_GVP_EPC_BASE) { + /* GVP quirk */ + unsigned long magic = zorro_resource_start(z)+0x8000; + z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK; + } + sprintf(z->name, "Zorro device %08x", z->id); + zorro_name_device(z); + z->resource.name = z->name; + r = zorro_find_parent_resource(pdev, z); + error = request_resource(r, &z->resource); + if (error) + dev_err(&bus->dev, + "Address space collision on device %s %pR\n", + z->name, &z->resource); + dev_set_name(&z->dev, "%02x", i); + z->dev.parent = &bus->dev; + z->dev.bus = &zorro_bus_type; + error = device_register(&z->dev); + if (error) { + dev_err(&bus->dev, "Error registering device %s\n", + z->name); + continue; + } + error = zorro_create_sysfs_dev_files(z); + if (error) + dev_err(&z->dev, "Error creating sysfs files\n"); + } + + /* Mark all available Zorro II memory */ + zorro_for_each_dev(z) { + if (z->rom.er_Type & ERTF_MEMLIST) + mark_region(zorro_resource_start(z), + zorro_resource_end(z)+1, 1); + } + + /* Unmark all used Zorro II memory */ + for (i = 0; i < m68k_num_memory; i++) + if (m68k_memory[i].addr < 16*1024*1024) + mark_region(m68k_memory[i].addr, + m68k_memory[i].addr+m68k_memory[i].size, + 0); + + return 0; } -subsys_initcall(zorro_init); +static struct platform_driver amiga_zorro_driver = { + .driver = { + .name = "amiga-zorro", + .owner = THIS_MODULE, + }, +}; -EXPORT_SYMBOL(zorro_find_device); -EXPORT_SYMBOL(zorro_unused_z2ram); +static int __init amiga_zorro_init(void) +{ + return platform_driver_probe(&amiga_zorro_driver, amiga_zorro_probe); +} + +module_init(amiga_zorro_init); MODULE_LICENSE("GPL"); diff --git a/include/linux/zorro.h b/include/linux/zorro.h index 908db1b36d6c..7bf9db525e9e 100644 --- a/include/linux/zorro.h +++ b/include/linux/zorro.h @@ -141,15 +141,6 @@ struct zorro_dev { * Zorro bus */ -struct zorro_bus { - struct list_head devices; /* list of devices on this bus */ - unsigned int num_resources; /* number of resources */ - struct resource resources[4]; /* address space routed to this bus */ - struct device dev; - char name[10]; -}; - -extern struct zorro_bus zorro_bus; /* single Zorro bus */ extern struct bus_type zorro_bus_type; -- cgit v1.2.3 From 4f41c013f553957765902fb01475972f0af3e8e7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 18 May 2010 18:08:32 +0200 Subject: perf/ftrace: Optimize perf/tracepoint interaction for single events When we've got but a single event per tracepoint there is no reason to try and multiplex it so don't. Signed-off-by: Peter Zijlstra Tested-by: Ingo Molnar Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/ftrace_event.h | 8 +++++--- include/linux/perf_event.h | 2 +- include/trace/ftrace.h | 3 ++- kernel/perf_event.c | 15 ++++++++++----- kernel/trace/trace_event_perf.c | 11 +++++++---- kernel/trace/trace_kprobe.c | 4 ++-- kernel/trace/trace_syscalls.c | 6 ++++-- 7 files changed, 31 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index c0f4b364c711..c8091001b943 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -132,6 +132,7 @@ struct ftrace_event_call { void *data; int perf_refcount; + void *perf_data; int (*perf_event_enable)(struct ftrace_event_call *); void (*perf_event_disable)(struct ftrace_event_call *); }; @@ -190,7 +191,7 @@ struct perf_event; DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); -extern int perf_trace_enable(int event_id); +extern int perf_trace_enable(int event_id, void *data); extern void perf_trace_disable(int event_id); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); @@ -201,11 +202,12 @@ perf_trace_buf_prepare(int size, unsigned short type, int *rctxp, static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, - u64 count, unsigned long irq_flags, struct pt_regs *regs) + u64 count, unsigned long irq_flags, struct pt_regs *regs, + void *event) { struct trace_entry *entry = raw_data; - perf_tp_event(entry->type, addr, count, raw_data, size, regs); + perf_tp_event(entry->type, addr, count, raw_data, size, regs, event); perf_swevent_put_recursion_context(rctx); local_irq_restore(irq_flags); } diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3fd5c82e0e18..0b521fc8f5b0 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -994,7 +994,7 @@ static inline bool perf_paranoid_kernel(void) extern void perf_event_init(void); extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, - int entry_size, struct pt_regs *regs); + int entry_size, struct pt_regs *regs, void *event); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 882c64832ffe..0a29df092922 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -785,7 +785,8 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ { assign; } \ \ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ - __count, irq_flags, __regs); \ + __count, irq_flags, __regs, \ + event_call->perf_data); \ } #undef DEFINE_EVENT diff --git a/kernel/perf_event.c b/kernel/perf_event.c index a4fa381db3c2..17ac47f4bce6 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4468,8 +4468,9 @@ static int swevent_hlist_get(struct perf_event *event) #ifdef CONFIG_EVENT_TRACING void perf_tp_event(int event_id, u64 addr, u64 count, void *record, - int entry_size, struct pt_regs *regs) + int entry_size, struct pt_regs *regs, void *event) { + const int type = PERF_TYPE_TRACEPOINT; struct perf_sample_data data; struct perf_raw_record raw = { .size = entry_size, @@ -4479,9 +4480,13 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, perf_sample_data_init(&data, addr); data.raw = &raw; - /* Trace events already protected against recursion */ - do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, - &data, regs); + if (!event) { + do_perf_sw_event(type, event_id, count, 1, &data, regs); + return; + } + + if (perf_swevent_match(event, type, event_id, &data, regs)) + perf_swevent_add(event, count, 1, &data, regs); } EXPORT_SYMBOL_GPL(perf_tp_event); @@ -4514,7 +4519,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); - if (perf_trace_enable(event->attr.config)) + if (perf_trace_enable(event->attr.config, event)) return NULL; event->destroy = tp_perf_event_destroy; diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 0565bb42566f..89b780a7c522 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -27,13 +27,15 @@ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) /* Count the events in use (per event id, not per instance) */ static int total_ref_count; -static int perf_trace_event_enable(struct ftrace_event_call *event) +static int perf_trace_event_enable(struct ftrace_event_call *event, void *data) { char *buf; int ret = -ENOMEM; - if (event->perf_refcount++ > 0) + if (event->perf_refcount++ > 0) { + event->perf_data = NULL; return 0; + } if (!total_ref_count) { buf = (char *)alloc_percpu(perf_trace_t); @@ -51,6 +53,7 @@ static int perf_trace_event_enable(struct ftrace_event_call *event) ret = event->perf_event_enable(event); if (!ret) { + event->perf_data = data; total_ref_count++; return 0; } @@ -68,7 +71,7 @@ fail_buf: return ret; } -int perf_trace_enable(int event_id) +int perf_trace_enable(int event_id, void *data) { struct ftrace_event_call *event; int ret = -EINVAL; @@ -77,7 +80,7 @@ int perf_trace_enable(int event_id) list_for_each_entry(event, &ftrace_events, list) { if (event->id == event_id && event->perf_event_enable && try_module_get(event->mod)) { - ret = perf_trace_event_enable(event); + ret = perf_trace_event_enable(event, data); break; } } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index a7514326052b..2d7bf4146be8 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1362,7 +1362,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, for (i = 0; i < tp->nr_args; i++) call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); - perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); + perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs, call->perf_data); } /* Kretprobe profile handler */ @@ -1395,7 +1395,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, - irq_flags, regs); + irq_flags, regs, call->perf_data); } static int probe_perf_enable(struct ftrace_event_call *call) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 4d6d711717f2..9eff1a4b49b9 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -468,7 +468,8 @@ static void perf_syscall_enter(struct pt_regs *regs, long id) rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); + perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs, + sys_data->enter_event->perf_data); } int perf_sysenter_enable(struct ftrace_event_call *call) @@ -543,7 +544,8 @@ static void perf_syscall_exit(struct pt_regs *regs, long ret) rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); + perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs, + sys_data->exit_event->perf_data); } int perf_sysexit_enable(struct ftrace_event_call *call) -- cgit v1.2.3 From ef60777c9abd999db5eb4e338aae3eb593ae8e10 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 18 May 2010 10:50:41 +0200 Subject: perf: Optimize the perf_output() path by removing IRQ-disables Since we can now assume there is only a single writer to each buffer, we can remove per-cpu lock thingy and use a simply nest-count to the same effect. This removes the need to disable IRQs. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 5 +-- kernel/perf_event.c | 94 ++++++++++++++-------------------------------- 2 files changed, 30 insertions(+), 69 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 0b521fc8f5b0..f1f853a9d5eb 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -597,12 +597,12 @@ struct perf_mmap_data { atomic_t events; /* event_id limit */ atomic_long_t head; /* write position */ - atomic_long_t done_head; /* completed head */ - atomic_t lock; /* concurrent writes */ atomic_t wakeup; /* needs a wakeup */ atomic_t lost; /* nr records lost */ + atomic_t nest; /* nested writers */ + long watermark; /* wakeup watermark */ struct perf_event_mmap_page *user_page; @@ -807,7 +807,6 @@ struct perf_output_handle { unsigned long offset; int nmi; int sample; - int locked; }; #ifdef CONFIG_PERF_EVENTS diff --git a/kernel/perf_event.c b/kernel/perf_event.c index ff5d430d45a7..8cf737da3ec4 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2519,8 +2519,6 @@ perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data) { long max_size = perf_data_size(data); - atomic_set(&data->lock, -1); - if (event->attr.watermark) { data->watermark = min_t(long, max_size, event->attr.wakeup_watermark); @@ -2906,82 +2904,56 @@ static void perf_output_wakeup(struct perf_output_handle *handle) } /* - * Curious locking construct. - * * We need to ensure a later event_id doesn't publish a head when a former - * event_id isn't done writing. However since we need to deal with NMIs we + * event isn't done writing. However since we need to deal with NMIs we * cannot fully serialize things. * - * What we do is serialize between CPUs so we only have to deal with NMI - * nesting on a single CPU. - * * We only publish the head (and generate a wakeup) when the outer-most - * event_id completes. + * event completes. */ -static void perf_output_lock(struct perf_output_handle *handle) +static void perf_output_get_handle(struct perf_output_handle *handle) { struct perf_mmap_data *data = handle->data; - int cur, cpu = get_cpu(); - handle->locked = 0; - - for (;;) { - cur = atomic_cmpxchg(&data->lock, -1, cpu); - if (cur == -1) { - handle->locked = 1; - break; - } - if (cur == cpu) - break; - - cpu_relax(); - } + preempt_disable(); + atomic_inc(&data->nest); } -static void perf_output_unlock(struct perf_output_handle *handle) +static void perf_output_put_handle(struct perf_output_handle *handle) { struct perf_mmap_data *data = handle->data; unsigned long head; - int cpu; - - data->done_head = data->head; - - if (!handle->locked) - goto out; again: - /* - * The xchg implies a full barrier that ensures all writes are done - * before we publish the new head, matched by a rmb() in userspace when - * reading this position. - */ - while ((head = atomic_long_xchg(&data->done_head, 0))) - data->user_page->data_head = head; + head = atomic_long_read(&data->head); /* - * NMI can happen here, which means we can miss a done_head update. + * IRQ/NMI can happen here, which means we can miss a head update. */ - cpu = atomic_xchg(&data->lock, -1); - WARN_ON_ONCE(cpu != smp_processor_id()); + if (!atomic_dec_and_test(&data->nest)) + return; /* - * Therefore we have to validate we did not indeed do so. + * Publish the known good head. Rely on the full barrier implied + * by atomic_dec_and_test() order the data->head read and this + * write. */ - if (unlikely(atomic_long_read(&data->done_head))) { - /* - * Since we had it locked, we can lock it again. - */ - while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) - cpu_relax(); + data->user_page->data_head = head; + /* + * Now check if we missed an update, rely on the (compiler) + * barrier in atomic_dec_and_test() to re-read data->head. + */ + if (unlikely(head != atomic_long_read(&data->head))) { + atomic_inc(&data->nest); goto again; } if (atomic_xchg(&data->wakeup, 0)) perf_output_wakeup(handle); -out: - put_cpu(); + + preempt_enable(); } void perf_output_copy(struct perf_output_handle *handle, @@ -3063,7 +3035,7 @@ int perf_output_begin(struct perf_output_handle *handle, if (have_lost) size += sizeof(lost_event); - perf_output_lock(handle); + perf_output_get_handle(handle); do { /* @@ -3083,7 +3055,7 @@ int perf_output_begin(struct perf_output_handle *handle, handle->head = head; if (head - tail > data->watermark) - atomic_set(&data->wakeup, 1); + atomic_inc(&data->wakeup); if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; @@ -3099,7 +3071,7 @@ int perf_output_begin(struct perf_output_handle *handle, fail: atomic_inc(&data->lost); - perf_output_unlock(handle); + perf_output_put_handle(handle); out: rcu_read_unlock(); @@ -3117,11 +3089,11 @@ void perf_output_end(struct perf_output_handle *handle) int events = atomic_inc_return(&data->events); if (events >= wakeup_events) { atomic_sub(wakeup_events, &data->events); - atomic_set(&data->wakeup, 1); + atomic_inc(&data->wakeup); } } - perf_output_unlock(handle); + perf_output_put_handle(handle); rcu_read_unlock(); } @@ -3457,22 +3429,13 @@ static void perf_event_task_output(struct perf_event *event, { struct perf_output_handle handle; struct task_struct *task = task_event->task; - unsigned long flags; int size, ret; - /* - * If this CPU attempts to acquire an rq lock held by a CPU spinning - * in perf_output_lock() from interrupt context, it's game over. - */ - local_irq_save(flags); - size = task_event->event_id.header.size; ret = perf_output_begin(&handle, event, size, 0, 0); - if (ret) { - local_irq_restore(flags); + if (ret) return; - } task_event->event_id.pid = perf_event_pid(event, task); task_event->event_id.ppid = perf_event_pid(event, current); @@ -3483,7 +3446,6 @@ static void perf_event_task_output(struct perf_event *event, perf_output_put(&handle, task_event->event_id); perf_output_end(&handle); - local_irq_restore(flags); } static int perf_event_task_match(struct perf_event *event) -- cgit v1.2.3 From fa5881514ef9c9bcb29319aad85cf2d8889d91f1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 18 May 2010 10:54:20 +0200 Subject: perf: Optimize the hotpath by converting the perf output buffer to local_t Since there is now only a single writer, we can use local_t instead and avoid all these pesky LOCK insn. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 15 +++++++-------- kernel/perf_event.c | 30 +++++++++++++++--------------- 2 files changed, 22 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f1f853a9d5eb..ce7667616fcb 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -485,6 +485,7 @@ struct perf_guest_info_callbacks { #include #include #include +#include #define PERF_MAX_STACK_DEPTH 255 @@ -588,20 +589,18 @@ struct perf_mmap_data { #ifdef CONFIG_PERF_USE_VMALLOC struct work_struct work; #endif - int data_order; + int data_order; /* allocation order */ int nr_pages; /* nr of data pages */ int writable; /* are we writable */ int nr_locked; /* nr pages mlocked */ atomic_t poll; /* POLL_ for wakeups */ - atomic_t events; /* event_id limit */ - atomic_long_t head; /* write position */ - - atomic_t wakeup; /* needs a wakeup */ - atomic_t lost; /* nr records lost */ - - atomic_t nest; /* nested writers */ + local_t head; /* write position */ + local_t nest; /* nested writers */ + local_t events; /* event limit */ + local_t wakeup; /* needs a wakeup */ + local_t lost; /* nr records lost */ long watermark; /* wakeup watermark */ diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 8cf737da3ec4..1f98c78c3343 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2916,7 +2916,7 @@ static void perf_output_get_handle(struct perf_output_handle *handle) struct perf_mmap_data *data = handle->data; preempt_disable(); - atomic_inc(&data->nest); + local_inc(&data->nest); } static void perf_output_put_handle(struct perf_output_handle *handle) @@ -2925,13 +2925,13 @@ static void perf_output_put_handle(struct perf_output_handle *handle) unsigned long head; again: - head = atomic_long_read(&data->head); + head = local_read(&data->head); /* * IRQ/NMI can happen here, which means we can miss a head update. */ - if (!atomic_dec_and_test(&data->nest)) + if (!local_dec_and_test(&data->nest)) return; /* @@ -2945,12 +2945,12 @@ again: * Now check if we missed an update, rely on the (compiler) * barrier in atomic_dec_and_test() to re-read data->head. */ - if (unlikely(head != atomic_long_read(&data->head))) { - atomic_inc(&data->nest); + if (unlikely(head != local_read(&data->head))) { + local_inc(&data->nest); goto again; } - if (atomic_xchg(&data->wakeup, 0)) + if (local_xchg(&data->wakeup, 0)) perf_output_wakeup(handle); preempt_enable(); @@ -3031,7 +3031,7 @@ int perf_output_begin(struct perf_output_handle *handle, if (!data->nr_pages) goto out; - have_lost = atomic_read(&data->lost); + have_lost = local_read(&data->lost); if (have_lost) size += sizeof(lost_event); @@ -3045,24 +3045,24 @@ int perf_output_begin(struct perf_output_handle *handle, */ tail = ACCESS_ONCE(data->user_page->data_tail); smp_rmb(); - offset = head = atomic_long_read(&data->head); + offset = head = local_read(&data->head); head += size; if (unlikely(!perf_output_space(data, tail, offset, head))) goto fail; - } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); + } while (local_cmpxchg(&data->head, offset, head) != offset); handle->offset = offset; handle->head = head; if (head - tail > data->watermark) - atomic_inc(&data->wakeup); + local_inc(&data->wakeup); if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; lost_event.header.misc = 0; lost_event.header.size = sizeof(lost_event); lost_event.id = event->id; - lost_event.lost = atomic_xchg(&data->lost, 0); + lost_event.lost = local_xchg(&data->lost, 0); perf_output_put(handle, lost_event); } @@ -3070,7 +3070,7 @@ int perf_output_begin(struct perf_output_handle *handle, return 0; fail: - atomic_inc(&data->lost); + local_inc(&data->lost); perf_output_put_handle(handle); out: rcu_read_unlock(); @@ -3086,10 +3086,10 @@ void perf_output_end(struct perf_output_handle *handle) int wakeup_events = event->attr.wakeup_events; if (handle->sample && wakeup_events) { - int events = atomic_inc_return(&data->events); + int events = local_inc_return(&data->events); if (events >= wakeup_events) { - atomic_sub(wakeup_events, &data->events); - atomic_inc(&data->wakeup); + local_sub(wakeup_events, &data->events); + local_inc(&data->wakeup); } } -- cgit v1.2.3 From 6d1acfd5c6bfd5231c13a8f2858d7f2afbaa1b62 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 18 May 2010 11:12:48 +0200 Subject: perf: Optimize perf_output_*() by avoiding local_xchg() Since the x86 XCHG ins implies LOCK, avoid the use by using a sequence count instead. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 1 + kernel/perf_event.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index ce7667616fcb..fe50347dc645 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -804,6 +804,7 @@ struct perf_output_handle { struct perf_mmap_data *data; unsigned long head; unsigned long offset; + unsigned long wakeup; int nmi; int sample; }; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 1f98c78c3343..7e3bcf1a29f0 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2917,6 +2917,7 @@ static void perf_output_get_handle(struct perf_output_handle *handle) preempt_disable(); local_inc(&data->nest); + handle->wakeup = local_read(&data->wakeup); } static void perf_output_put_handle(struct perf_output_handle *handle) @@ -2950,7 +2951,7 @@ again: goto again; } - if (local_xchg(&data->wakeup, 0)) + if (handle->wakeup != local_read(&data->wakeup)) perf_output_wakeup(handle); preempt_enable(); -- cgit v1.2.3 From b7e2ecef92d2e7785e6d76b41e5ba8bcbc45259d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 19 May 2010 10:52:27 +0200 Subject: perf, trace: Optimize tracepoints by removing IRQ-disable from perf/tracepoint interaction Improves performance. Acked-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <1274259525.5605.10352.camel@twins> Signed-off-by: Ingo Molnar --- include/linux/ftrace_event.h | 9 ++--- include/trace/ftrace.h | 17 ++++------ kernel/trace/trace_event_perf.c | 73 ++++++++++++++++------------------------- kernel/trace/trace_kprobe.c | 10 +++--- kernel/trace/trace_syscalls.c | 10 +++--- 5 files changed, 47 insertions(+), 72 deletions(-) (limited to 'include') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index a9775dd7f7fe..126071bc90ab 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -197,20 +197,17 @@ extern void perf_trace_disable(int event_id); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); -extern void * -perf_trace_buf_prepare(int size, unsigned short type, int *rctxp, - unsigned long *irq_flags); +extern void *perf_trace_buf_prepare(int size, unsigned short type, + struct pt_regs *regs, int *rctxp); static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, - u64 count, unsigned long irq_flags, struct pt_regs *regs, - void *event) + u64 count, struct pt_regs *regs, void *event) { struct trace_entry *entry = raw_data; perf_tp_event(entry->type, addr, count, raw_data, size, regs, event); perf_swevent_put_recursion_context(rctx); - local_irq_restore(irq_flags); } #endif diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 1016b2162935..f282885057dd 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -768,7 +768,6 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ - unsigned long irq_flags; \ int __entry_size; \ int __data_size; \ int rctx; \ @@ -781,17 +780,18 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ "profile buffer not large enough")) \ return; \ + \ entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ - __entry_size, event_call->id, &rctx, &irq_flags); \ + __entry_size, event_call->id, __regs, &rctx); \ if (!entry) \ return; \ + \ tstruct \ \ { assign; } \ \ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ - __count, irq_flags, __regs, \ - event_call->perf_data); \ + __count, __regs, event_call->perf_data); \ } #undef DEFINE_EVENT @@ -799,13 +799,10 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ static notrace void perf_trace_##call(proto) \ { \ struct ftrace_event_call *event_call = &event_##call; \ - struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \ - \ - perf_fetch_caller_regs(__regs, 1); \ - \ - perf_trace_templ_##template(event_call, __regs, args); \ + struct pt_regs __regs; \ \ - put_cpu_var(perf_trace_regs); \ + perf_fetch_caller_regs(&__regs, 1); \ + perf_trace_templ_##template(event_call, &__regs, args); \ } #undef DEFINE_EVENT_PRINT diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 89b780a7c522..a1304f8c4440 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -9,13 +9,9 @@ #include #include "trace.h" -DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); -EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs); - EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); -static char *perf_trace_buf; -static char *perf_trace_buf_nmi; +static char *perf_trace_buf[4]; /* * Force it to be aligned to unsigned long to avoid misaligned accesses @@ -29,7 +25,6 @@ static int total_ref_count; static int perf_trace_event_enable(struct ftrace_event_call *event, void *data) { - char *buf; int ret = -ENOMEM; if (event->perf_refcount++ > 0) { @@ -38,17 +33,16 @@ static int perf_trace_event_enable(struct ftrace_event_call *event, void *data) } if (!total_ref_count) { - buf = (char *)alloc_percpu(perf_trace_t); - if (!buf) - goto fail_buf; - - rcu_assign_pointer(perf_trace_buf, buf); + char *buf; + int i; - buf = (char *)alloc_percpu(perf_trace_t); - if (!buf) - goto fail_buf_nmi; + for (i = 0; i < 4; i++) { + buf = (char *)alloc_percpu(perf_trace_t); + if (!buf) + goto fail_buf; - rcu_assign_pointer(perf_trace_buf_nmi, buf); + rcu_assign_pointer(perf_trace_buf[i], buf); + } } ret = event->perf_event_enable(event); @@ -58,14 +52,15 @@ static int perf_trace_event_enable(struct ftrace_event_call *event, void *data) return 0; } -fail_buf_nmi: +fail_buf: if (!total_ref_count) { - free_percpu(perf_trace_buf_nmi); - free_percpu(perf_trace_buf); - perf_trace_buf_nmi = NULL; - perf_trace_buf = NULL; + int i; + + for (i = 0; i < 4; i++) { + free_percpu(perf_trace_buf[i]); + perf_trace_buf[i] = NULL; + } } -fail_buf: event->perf_refcount--; return ret; @@ -91,19 +86,19 @@ int perf_trace_enable(int event_id, void *data) static void perf_trace_event_disable(struct ftrace_event_call *event) { - char *buf, *nmi_buf; - if (--event->perf_refcount > 0) return; event->perf_event_disable(event); if (!--total_ref_count) { - buf = perf_trace_buf; - rcu_assign_pointer(perf_trace_buf, NULL); + char *buf[4]; + int i; - nmi_buf = perf_trace_buf_nmi; - rcu_assign_pointer(perf_trace_buf_nmi, NULL); + for (i = 0; i < 4; i++) { + buf[i] = perf_trace_buf[i]; + rcu_assign_pointer(perf_trace_buf[i], NULL); + } /* * Ensure every events in profiling have finished before @@ -111,8 +106,8 @@ static void perf_trace_event_disable(struct ftrace_event_call *event) */ synchronize_sched(); - free_percpu(buf); - free_percpu(nmi_buf); + for (i = 0; i < 4; i++) + free_percpu(buf[i]); } } @@ -132,47 +127,37 @@ void perf_trace_disable(int event_id) } __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, - int *rctxp, unsigned long *irq_flags) + struct pt_regs *regs, int *rctxp) { struct trace_entry *entry; char *trace_buf, *raw_data; - int pc, cpu; + int pc; BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); pc = preempt_count(); - /* Protect the per cpu buffer, begin the rcu read side */ - local_irq_save(*irq_flags); - *rctxp = perf_swevent_get_recursion_context(); if (*rctxp < 0) goto err_recursion; - cpu = smp_processor_id(); - - if (in_nmi()) - trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); - else - trace_buf = rcu_dereference_sched(perf_trace_buf); - + trace_buf = rcu_dereference_sched(perf_trace_buf[*rctxp]); if (!trace_buf) goto err; - raw_data = per_cpu_ptr(trace_buf, cpu); + raw_data = per_cpu_ptr(trace_buf, smp_processor_id()); /* zero the dead bytes from align to not leak stack to user */ memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); entry = (struct trace_entry *)raw_data; - tracing_generic_entry_update(entry, *irq_flags, pc); + tracing_generic_entry_update(entry, regs->flags, pc); entry->type = type; return raw_data; err: perf_swevent_put_recursion_context(*rctxp); err_recursion: - local_irq_restore(*irq_flags); return NULL; } EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 2d7bf4146be8..20c96de0aea0 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1343,7 +1343,6 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, struct kprobe_trace_entry_head *entry; u8 *data; int size, __size, i; - unsigned long irq_flags; int rctx; __size = sizeof(*entry) + tp->size; @@ -1353,7 +1352,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, "profile buffer not large enough")) return; - entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); + entry = perf_trace_buf_prepare(size, call->id, regs, &rctx); if (!entry) return; @@ -1362,7 +1361,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, for (i = 0; i < tp->nr_args; i++) call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); - perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs, call->perf_data); + perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, call->perf_data); } /* Kretprobe profile handler */ @@ -1374,7 +1373,6 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, struct kretprobe_trace_entry_head *entry; u8 *data; int size, __size, i; - unsigned long irq_flags; int rctx; __size = sizeof(*entry) + tp->size; @@ -1384,7 +1382,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, "profile buffer not large enough")) return; - entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); + entry = perf_trace_buf_prepare(size, call->id, regs, &rctx); if (!entry) return; @@ -1395,7 +1393,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, - irq_flags, regs, call->perf_data); + regs, call->perf_data); } static int probe_perf_enable(struct ftrace_event_call *call) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 9eff1a4b49b9..a657cefbb137 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -438,7 +438,6 @@ static void perf_syscall_enter(struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; - unsigned long flags; int syscall_nr; int rctx; int size; @@ -461,14 +460,14 @@ static void perf_syscall_enter(struct pt_regs *regs, long id) return; rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, - sys_data->enter_event->id, &rctx, &flags); + sys_data->enter_event->id, regs, &rctx); if (!rec) return; rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs, + perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, sys_data->enter_event->perf_data); } @@ -511,7 +510,6 @@ static void perf_syscall_exit(struct pt_regs *regs, long ret) { struct syscall_metadata *sys_data; struct syscall_trace_exit *rec; - unsigned long flags; int syscall_nr; int rctx; int size; @@ -537,14 +535,14 @@ static void perf_syscall_exit(struct pt_regs *regs, long ret) return; rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, - sys_data->exit_event->id, &rctx, &flags); + sys_data->exit_event->id, regs, &rctx); if (!rec) return; rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs, + perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, sys_data->exit_event->perf_data); } -- cgit v1.2.3 From 1c024eca51fdc965290acf342ae16a476c2189d0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 19 May 2010 14:02:22 +0200 Subject: perf, trace: Optimize tracepoints by using per-tracepoint-per-cpu hlist to track events Avoid the swevent hash-table by using per-tracepoint hlists. Also, avoid conditionals on the fast path by ordering with probe unregister so that we should never get on the callback path without the data being there. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <20100521090710.473188012@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/ftrace_event.h | 16 ++--- include/linux/perf_event.h | 6 +- include/trace/ftrace.h | 4 +- kernel/perf_event.c | 94 ++++++++++++++--------------- kernel/trace/trace_event_perf.c | 127 +++++++++++++++++++++------------------- kernel/trace/trace_kprobe.c | 9 ++- kernel/trace/trace_syscalls.c | 11 ++-- 7 files changed, 143 insertions(+), 124 deletions(-) (limited to 'include') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 126071bc90ab..7024b7d1126f 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -133,7 +133,7 @@ struct ftrace_event_call { void *data; int perf_refcount; - void *perf_data; + struct hlist_head *perf_events; int (*perf_event_enable)(struct ftrace_event_call *); void (*perf_event_disable)(struct ftrace_event_call *); }; @@ -192,9 +192,11 @@ struct perf_event; DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); -extern int perf_trace_enable(int event_id, void *data); -extern void perf_trace_disable(int event_id); -extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, +extern int perf_trace_init(struct perf_event *event); +extern void perf_trace_destroy(struct perf_event *event); +extern int perf_trace_enable(struct perf_event *event); +extern void perf_trace_disable(struct perf_event *event); +extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); extern void *perf_trace_buf_prepare(int size, unsigned short type, @@ -202,11 +204,9 @@ extern void *perf_trace_buf_prepare(int size, unsigned short type, static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, - u64 count, struct pt_regs *regs, void *event) + u64 count, struct pt_regs *regs, void *head) { - struct trace_entry *entry = raw_data; - - perf_tp_event(entry->type, addr, count, raw_data, size, regs, event); + perf_tp_event(addr, count, raw_data, size, regs, head); perf_swevent_put_recursion_context(rctx); } #endif diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index fe50347dc645..7cd7b356447d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -727,6 +727,7 @@ struct perf_event { perf_overflow_handler_t overflow_handler; #ifdef CONFIG_EVENT_TRACING + struct ftrace_event_call *tp_event; struct event_filter *filter; #endif @@ -992,8 +993,9 @@ static inline bool perf_paranoid_kernel(void) } extern void perf_event_init(void); -extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, - int entry_size, struct pt_regs *regs, void *event); +extern void perf_tp_event(u64 addr, u64 count, void *record, + int entry_size, struct pt_regs *regs, + struct hlist_head *head); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index f282885057dd..4eb2148f1321 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -768,6 +768,7 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ + struct hlist_head *head; \ int __entry_size; \ int __data_size; \ int rctx; \ @@ -790,8 +791,9 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ \ { assign; } \ \ + head = per_cpu_ptr(event_call->perf_events, smp_processor_id());\ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ - __count, __regs, event_call->perf_data); \ + __count, __regs, head); \ } #undef DEFINE_EVENT diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 45b7aec55458..3f2cc313ee25 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4005,9 +4005,6 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, perf_swevent_overflow(event, 0, nmi, data, regs); } -static int perf_tp_event_match(struct perf_event *event, - struct perf_sample_data *data); - static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs) { @@ -4037,10 +4034,6 @@ static int perf_swevent_match(struct perf_event *event, if (perf_exclude_event(event, regs)) return 0; - if (event->attr.type == PERF_TYPE_TRACEPOINT && - !perf_tp_event_match(event, data)) - return 0; - return 1; } @@ -4122,7 +4115,7 @@ end: int perf_swevent_get_recursion_context(void) { - struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); int rctx; if (in_nmi()) @@ -4134,10 +4127,8 @@ int perf_swevent_get_recursion_context(void) else rctx = 0; - if (cpuctx->recursion[rctx]) { - put_cpu_var(perf_cpu_context); + if (cpuctx->recursion[rctx]) return -1; - } cpuctx->recursion[rctx]++; barrier(); @@ -4151,7 +4142,6 @@ void perf_swevent_put_recursion_context(int rctx) struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); barrier(); cpuctx->recursion[rctx]--; - put_cpu_var(perf_cpu_context); } EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context); @@ -4162,6 +4152,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi, struct perf_sample_data data; int rctx; + preempt_disable_notrace(); rctx = perf_swevent_get_recursion_context(); if (rctx < 0) return; @@ -4171,6 +4162,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi, do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); perf_swevent_put_recursion_context(rctx); + preempt_enable_notrace(); } static void perf_swevent_read(struct perf_event *event) @@ -4486,11 +4478,43 @@ static int swevent_hlist_get(struct perf_event *event) #ifdef CONFIG_EVENT_TRACING -void perf_tp_event(int event_id, u64 addr, u64 count, void *record, - int entry_size, struct pt_regs *regs, void *event) +static const struct pmu perf_ops_tracepoint = { + .enable = perf_trace_enable, + .disable = perf_trace_disable, + .read = perf_swevent_read, + .unthrottle = perf_swevent_unthrottle, +}; + +static int perf_tp_filter_match(struct perf_event *event, + struct perf_sample_data *data) +{ + void *record = data->raw->data; + + if (likely(!event->filter) || filter_match_preds(event->filter, record)) + return 1; + return 0; +} + +static int perf_tp_event_match(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + if (perf_exclude_event(event, regs)) + return 0; + + if (!perf_tp_filter_match(event, data)) + return 0; + + return 1; +} + +void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, + struct pt_regs *regs, struct hlist_head *head) { - const int type = PERF_TYPE_TRACEPOINT; struct perf_sample_data data; + struct perf_event *event; + struct hlist_node *node; + struct perf_raw_record raw = { .size = entry_size, .data = record, @@ -4499,30 +4523,18 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, perf_sample_data_init(&data, addr); data.raw = &raw; - if (!event) { - do_perf_sw_event(type, event_id, count, 1, &data, regs); - return; + rcu_read_lock(); + hlist_for_each_entry_rcu(event, node, head, hlist_entry) { + if (perf_tp_event_match(event, &data, regs)) + perf_swevent_add(event, count, 1, &data, regs); } - - if (perf_swevent_match(event, type, event_id, &data, regs)) - perf_swevent_add(event, count, 1, &data, regs); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(perf_tp_event); -static int perf_tp_event_match(struct perf_event *event, - struct perf_sample_data *data) -{ - void *record = data->raw->data; - - if (likely(!event->filter) || filter_match_preds(event->filter, record)) - return 1; - return 0; -} - static void tp_perf_event_destroy(struct perf_event *event) { - perf_trace_disable(event->attr.config); - swevent_hlist_put(event); + perf_trace_destroy(event); } static const struct pmu *tp_perf_event_init(struct perf_event *event) @@ -4538,17 +4550,13 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); - if (perf_trace_enable(event->attr.config, event)) + err = perf_trace_init(event); + if (err) return NULL; event->destroy = tp_perf_event_destroy; - err = swevent_hlist_get(event); - if (err) { - perf_trace_disable(event->attr.config); - return ERR_PTR(err); - } - return &perf_ops_generic; + return &perf_ops_tracepoint; } static int perf_event_set_filter(struct perf_event *event, void __user *arg) @@ -4576,12 +4584,6 @@ static void perf_event_free_filter(struct perf_event *event) #else -static int perf_tp_event_match(struct perf_event *event, - struct perf_sample_data *data) -{ - return 1; -} - static const struct pmu *tp_perf_event_init(struct perf_event *event) { return NULL; diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index a1304f8c4440..39d5ea7b0653 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -23,14 +23,25 @@ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) /* Count the events in use (per event id, not per instance) */ static int total_ref_count; -static int perf_trace_event_enable(struct ftrace_event_call *event, void *data) +static int perf_trace_event_init(struct ftrace_event_call *tp_event, + struct perf_event *p_event) { + struct hlist_head *list; int ret = -ENOMEM; + int cpu; - if (event->perf_refcount++ > 0) { - event->perf_data = NULL; + p_event->tp_event = tp_event; + if (tp_event->perf_refcount++ > 0) return 0; - } + + list = alloc_percpu(struct hlist_head); + if (!list) + goto fail; + + for_each_possible_cpu(cpu) + INIT_HLIST_HEAD(per_cpu_ptr(list, cpu)); + + tp_event->perf_events = list; if (!total_ref_count) { char *buf; @@ -39,20 +50,20 @@ static int perf_trace_event_enable(struct ftrace_event_call *event, void *data) for (i = 0; i < 4; i++) { buf = (char *)alloc_percpu(perf_trace_t); if (!buf) - goto fail_buf; + goto fail; - rcu_assign_pointer(perf_trace_buf[i], buf); + perf_trace_buf[i] = buf; } } - ret = event->perf_event_enable(event); - if (!ret) { - event->perf_data = data; - total_ref_count++; - return 0; - } + ret = tp_event->perf_event_enable(tp_event); + if (ret) + goto fail; -fail_buf: + total_ref_count++; + return 0; + +fail: if (!total_ref_count) { int i; @@ -61,21 +72,26 @@ fail_buf: perf_trace_buf[i] = NULL; } } - event->perf_refcount--; + + if (!--tp_event->perf_refcount) { + free_percpu(tp_event->perf_events); + tp_event->perf_events = NULL; + } return ret; } -int perf_trace_enable(int event_id, void *data) +int perf_trace_init(struct perf_event *p_event) { - struct ftrace_event_call *event; + struct ftrace_event_call *tp_event; + int event_id = p_event->attr.config; int ret = -EINVAL; mutex_lock(&event_mutex); - list_for_each_entry(event, &ftrace_events, list) { - if (event->id == event_id && event->perf_event_enable && - try_module_get(event->mod)) { - ret = perf_trace_event_enable(event, data); + list_for_each_entry(tp_event, &ftrace_events, list) { + if (tp_event->id == event_id && tp_event->perf_event_enable && + try_module_get(tp_event->mod)) { + ret = perf_trace_event_init(tp_event, p_event); break; } } @@ -84,53 +100,52 @@ int perf_trace_enable(int event_id, void *data) return ret; } -static void perf_trace_event_disable(struct ftrace_event_call *event) +int perf_trace_enable(struct perf_event *p_event) { - if (--event->perf_refcount > 0) - return; + struct ftrace_event_call *tp_event = p_event->tp_event; + struct hlist_head *list; - event->perf_event_disable(event); + list = tp_event->perf_events; + if (WARN_ON_ONCE(!list)) + return -EINVAL; - if (!--total_ref_count) { - char *buf[4]; - int i; - - for (i = 0; i < 4; i++) { - buf[i] = perf_trace_buf[i]; - rcu_assign_pointer(perf_trace_buf[i], NULL); - } + list = per_cpu_ptr(list, smp_processor_id()); + hlist_add_head_rcu(&p_event->hlist_entry, list); - /* - * Ensure every events in profiling have finished before - * releasing the buffers - */ - synchronize_sched(); + return 0; +} - for (i = 0; i < 4; i++) - free_percpu(buf[i]); - } +void perf_trace_disable(struct perf_event *p_event) +{ + hlist_del_rcu(&p_event->hlist_entry); } -void perf_trace_disable(int event_id) +void perf_trace_destroy(struct perf_event *p_event) { - struct ftrace_event_call *event; + struct ftrace_event_call *tp_event = p_event->tp_event; + int i; - mutex_lock(&event_mutex); - list_for_each_entry(event, &ftrace_events, list) { - if (event->id == event_id) { - perf_trace_event_disable(event); - module_put(event->mod); - break; + if (--tp_event->perf_refcount > 0) + return; + + tp_event->perf_event_disable(tp_event); + + free_percpu(tp_event->perf_events); + tp_event->perf_events = NULL; + + if (!--total_ref_count) { + for (i = 0; i < 4; i++) { + free_percpu(perf_trace_buf[i]); + perf_trace_buf[i] = NULL; } } - mutex_unlock(&event_mutex); } __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, struct pt_regs *regs, int *rctxp) { struct trace_entry *entry; - char *trace_buf, *raw_data; + char *raw_data; int pc; BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); @@ -139,13 +154,9 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, *rctxp = perf_swevent_get_recursion_context(); if (*rctxp < 0) - goto err_recursion; - - trace_buf = rcu_dereference_sched(perf_trace_buf[*rctxp]); - if (!trace_buf) - goto err; + return NULL; - raw_data = per_cpu_ptr(trace_buf, smp_processor_id()); + raw_data = per_cpu_ptr(perf_trace_buf[*rctxp], smp_processor_id()); /* zero the dead bytes from align to not leak stack to user */ memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); @@ -155,9 +166,5 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, entry->type = type; return raw_data; -err: - perf_swevent_put_recursion_context(*rctxp); -err_recursion: - return NULL; } EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 20c96de0aea0..4681f60dac00 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1341,6 +1341,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct ftrace_event_call *call = &tp->call; struct kprobe_trace_entry_head *entry; + struct hlist_head *head; u8 *data; int size, __size, i; int rctx; @@ -1361,7 +1362,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, for (i = 0; i < tp->nr_args; i++) call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); - perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, call->perf_data); + head = per_cpu_ptr(call->perf_events, smp_processor_id()); + perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); } /* Kretprobe profile handler */ @@ -1371,6 +1373,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); struct ftrace_event_call *call = &tp->call; struct kretprobe_trace_entry_head *entry; + struct hlist_head *head; u8 *data; int size, __size, i; int rctx; @@ -1392,8 +1395,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, for (i = 0; i < tp->nr_args; i++) call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); - perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, - regs, call->perf_data); + head = per_cpu_ptr(call->perf_events, smp_processor_id()); + perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); } static int probe_perf_enable(struct ftrace_event_call *call) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index a657cefbb137..eb769f270291 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -438,6 +438,7 @@ static void perf_syscall_enter(struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; + struct hlist_head *head; int syscall_nr; int rctx; int size; @@ -467,8 +468,9 @@ static void perf_syscall_enter(struct pt_regs *regs, long id) rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, - sys_data->enter_event->perf_data); + + head = per_cpu_ptr(sys_data->enter_event->perf_events, smp_processor_id()); + perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); } int perf_sysenter_enable(struct ftrace_event_call *call) @@ -510,6 +512,7 @@ static void perf_syscall_exit(struct pt_regs *regs, long ret) { struct syscall_metadata *sys_data; struct syscall_trace_exit *rec; + struct hlist_head *head; int syscall_nr; int rctx; int size; @@ -542,8 +545,8 @@ static void perf_syscall_exit(struct pt_regs *regs, long ret) rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, - sys_data->exit_event->perf_data); + head = per_cpu_ptr(sys_data->exit_event->perf_events, smp_processor_id()); + perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); } int perf_sysexit_enable(struct ftrace_event_call *call) -- cgit v1.2.3 From adb8e118f288dc4c569ac9a89010b81a4745fbf0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 May 2010 16:21:55 +0200 Subject: perf: Fix wakeup storm for RO mmap()s RO mmap()s don't update the tail pointer, so comparing against it for determining the written data size doesn't really do any good. Keep track of when we last did a wakeup, and compare against that. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <20100521090710.684479310@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 2 +- kernel/perf_event.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 7cd7b356447d..7098ebbb3b3a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -599,7 +599,7 @@ struct perf_mmap_data { local_t head; /* write position */ local_t nest; /* nested writers */ local_t events; /* event limit */ - local_t wakeup; /* needs a wakeup */ + local_t wakeup; /* wakeup stamp */ local_t lost; /* nr records lost */ long watermark; /* wakeup watermark */ diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 7a932526946f..1531e0b409a5 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3056,8 +3056,8 @@ int perf_output_begin(struct perf_output_handle *handle, handle->offset = offset; handle->head = head; - if (head - tail > data->watermark) - local_inc(&data->wakeup); + if (head - local_read(&data->wakeup) > data->watermark) + local_add(data->watermark, &data->wakeup); if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; -- cgit v1.2.3 From 5d967a8be636a4f301a8daad642bd1007299d9ec Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 May 2010 16:46:39 +0200 Subject: perf: Optimize perf_output_copy() Reduce the clutter in perf_output_copy() by keeping an interator in perf_output_handle. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <20100521090710.742809176@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 3 +++ kernel/perf_event.c | 54 ++++++++++++++++++++++------------------------ 2 files changed, 29 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 7098ebbb3b3a..7bd17f0488f8 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -806,6 +806,9 @@ struct perf_output_handle { unsigned long head; unsigned long offset; unsigned long wakeup; + unsigned long size; + void *addr; + int page; int nmi; int sample; }; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 1531e0b409a5..b67549a08626 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2961,39 +2961,30 @@ again: void perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len) { - unsigned int pages_mask; - unsigned long offset; - unsigned int size; - void **pages; - - offset = handle->offset; - pages_mask = handle->data->nr_pages - 1; - pages = handle->data->data_pages; - - do { - unsigned long page_offset; - unsigned long page_size; - int nr; - - nr = (offset >> PAGE_SHIFT) & pages_mask; - page_size = 1UL << (handle->data->data_order + PAGE_SHIFT); - page_offset = offset & (page_size - 1); - size = min_t(unsigned int, page_size - page_offset, len); - - memcpy(pages[nr] + page_offset, buf, size); - - len -= size; - buf += size; - offset += size; - } while (len); - - handle->offset = offset; + handle->offset += len; /* * Check we didn't copy past our reservation window, taking the * possible unsigned int wrap into account. */ - WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); + if (WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0)) + return; + + do { + unsigned long size = min(handle->size, len); + + memcpy(handle->addr, buf, size); + + len -= size; + handle->addr += size; + handle->size -= size; + if (!handle->size) { + handle->page++; + handle->page &= handle->data->nr_pages - 1; + handle->addr = handle->data->data_pages[handle->page]; + handle->size = PAGE_SIZE << handle->data->data_order; + } + } while (len); } int perf_output_begin(struct perf_output_handle *handle, @@ -3059,6 +3050,13 @@ int perf_output_begin(struct perf_output_handle *handle, if (head - local_read(&data->wakeup) > data->watermark) local_add(data->watermark, &data->wakeup); + handle->page = handle->offset >> (PAGE_SHIFT + data->data_order); + handle->page &= data->nr_pages - 1; + handle->size = handle->offset & ((PAGE_SIZE << data->data_order) - 1); + handle->addr = data->data_pages[handle->page]; + handle->addr += handle->size; + handle->size = (PAGE_SIZE << data->data_order) - handle->size; + if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; lost_event.header.misc = 0; -- cgit v1.2.3 From 3cafa9fbb5c1d564b7b8e7224f493effbf04ffee Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 May 2010 19:07:56 +0200 Subject: perf: Optimize the !vmalloc backed buffer Reduce code and data by using the knowledge that for !PERF_USE_VMALLOC data_order is always 0. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <20100521090710.795019386@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 2 +- kernel/perf_event.c | 41 ++++++++++++++++++++++++++--------------- 2 files changed, 27 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 7bd17f0488f8..09cd9c1abfda 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -588,8 +588,8 @@ struct perf_mmap_data { struct rcu_head rcu_head; #ifdef CONFIG_PERF_USE_VMALLOC struct work_struct work; + int page_order; /* allocation order */ #endif - int data_order; /* allocation order */ int nr_pages; /* nr of data pages */ int writable; /* are we writable */ int nr_locked; /* nr pages mlocked */ diff --git a/kernel/perf_event.c b/kernel/perf_event.c index b67549a08626..953ce46d7b2f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2297,11 +2297,6 @@ unlock: rcu_read_unlock(); } -static unsigned long perf_data_size(struct perf_mmap_data *data) -{ - return data->nr_pages << (PAGE_SHIFT + data->data_order); -} - #ifndef CONFIG_PERF_USE_VMALLOC /* @@ -2359,7 +2354,6 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages) goto fail_data_pages; } - data->data_order = 0; data->nr_pages = nr_pages; return data; @@ -2395,6 +2389,11 @@ static void perf_mmap_data_free(struct perf_mmap_data *data) kfree(data); } +static inline int page_order(struct perf_mmap_data *data) +{ + return 0; +} + #else /* @@ -2403,10 +2402,15 @@ static void perf_mmap_data_free(struct perf_mmap_data *data) * Required for architectures that have d-cache aliasing issues. */ +static inline int page_order(struct perf_mmap_data *data) +{ + return data->page_order; +} + static struct page * perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) { - if (pgoff > (1UL << data->data_order)) + if (pgoff > (1UL << page_order(data))) return NULL; return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE); @@ -2426,7 +2430,7 @@ static void perf_mmap_data_free_work(struct work_struct *work) int i, nr; data = container_of(work, struct perf_mmap_data, work); - nr = 1 << data->data_order; + nr = 1 << page_order(data); base = data->user_page; for (i = 0; i < nr + 1; i++) @@ -2465,7 +2469,7 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages) data->user_page = all_buf; data->data_pages[0] = all_buf + PAGE_SIZE; - data->data_order = ilog2(nr_pages); + data->page_order = ilog2(nr_pages); data->nr_pages = 1; return data; @@ -2479,6 +2483,11 @@ fail: #endif +static unsigned long perf_data_size(struct perf_mmap_data *data) +{ + return data->nr_pages << (PAGE_SHIFT + page_order(data)); +} + static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct perf_event *event = vma->vm_file->private_data; @@ -2979,10 +2988,12 @@ void perf_output_copy(struct perf_output_handle *handle, handle->addr += size; handle->size -= size; if (!handle->size) { + struct perf_mmap_data *data = handle->data; + handle->page++; - handle->page &= handle->data->nr_pages - 1; - handle->addr = handle->data->data_pages[handle->page]; - handle->size = PAGE_SIZE << handle->data->data_order; + handle->page &= data->nr_pages - 1; + handle->addr = data->data_pages[handle->page]; + handle->size = PAGE_SIZE << page_order(data); } } while (len); } @@ -3050,12 +3061,12 @@ int perf_output_begin(struct perf_output_handle *handle, if (head - local_read(&data->wakeup) > data->watermark) local_add(data->watermark, &data->wakeup); - handle->page = handle->offset >> (PAGE_SHIFT + data->data_order); + handle->page = handle->offset >> (PAGE_SHIFT + page_order(data)); handle->page &= data->nr_pages - 1; - handle->size = handle->offset & ((PAGE_SIZE << data->data_order) - 1); + handle->size = handle->offset & ((PAGE_SIZE << page_order(data)) - 1); handle->addr = data->data_pages[handle->page]; handle->addr += handle->size; - handle->size = (PAGE_SIZE << data->data_order) - handle->size; + handle->size = (PAGE_SIZE << page_order(data)) - handle->size; if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; -- cgit v1.2.3 From a94ffaaf55552769af328eaca9260fe6291c66c7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 May 2010 19:50:07 +0200 Subject: perf: Remove more code from the fastpath Sanity checks cost instructions. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <20100521090710.852926930@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 2 -- kernel/perf_event.c | 20 ++++---------------- 2 files changed, 4 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 09cd9c1abfda..fb6c91eac7e3 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -803,8 +803,6 @@ struct perf_cpu_context { struct perf_output_handle { struct perf_event *event; struct perf_mmap_data *data; - unsigned long head; - unsigned long offset; unsigned long wakeup; unsigned long size; void *addr; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 953ce46d7b2f..d25c864cadbf 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2967,20 +2967,11 @@ again: preempt_enable(); } -void perf_output_copy(struct perf_output_handle *handle, +__always_inline void perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len) { - handle->offset += len; - - /* - * Check we didn't copy past our reservation window, taking the - * possible unsigned int wrap into account. - */ - if (WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0)) - return; - do { - unsigned long size = min(handle->size, len); + unsigned long size = min_t(unsigned long, handle->size, len); memcpy(handle->addr, buf, size); @@ -3055,15 +3046,12 @@ int perf_output_begin(struct perf_output_handle *handle, goto fail; } while (local_cmpxchg(&data->head, offset, head) != offset); - handle->offset = offset; - handle->head = head; - if (head - local_read(&data->wakeup) > data->watermark) local_add(data->watermark, &data->wakeup); - handle->page = handle->offset >> (PAGE_SHIFT + page_order(data)); + handle->page = offset >> (PAGE_SHIFT + page_order(data)); handle->page &= data->nr_pages - 1; - handle->size = handle->offset & ((PAGE_SIZE << page_order(data)) - 1); + handle->size = offset & ((PAGE_SIZE << page_order(data)) - 1); handle->addr = data->data_pages[handle->page]; handle->addr += handle->size; handle->size = (PAGE_SIZE << page_order(data)) - handle->size; -- cgit v1.2.3