From 3340289ddf29ca75c3acfb3a6b72f234b2f74d5c Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 6 Jan 2009 14:38:54 -0800 Subject: mm: report the MMU pagesize in /proc/pid/smaps The KernelPageSize entry in /proc/pid/smaps is the pagesize used by the kernel to back a VMA. This matches the size used by the MMU in the majority of cases. However, one counter-example occurs on PPC64 kernels whereby a kernel using 64K as a base pagesize may still use 4K pages for the MMU on older processor. To distinguish, this patch reports MMUPageSize as the pagesize used by the MMU in /proc/pid/smaps. Signed-off-by: Mel Gorman Cc: "KOSAKI Motohiro" Cc: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/hugetlb.h | 6 ++++++ arch/powerpc/mm/hugetlbpage.c | 7 +++++++ 2 files changed, 13 insertions(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 26f0d0ab27a5..b1dafb6a9743 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -17,6 +17,12 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +/* + * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs + * to override the version in mm/hugetlb.c + */ +#define vma_mmu_pagesize vma_mmu_pagesize + /* * If the arch doesn't supply something else, assume that hugepage * size aligned regions are ok without further preparation. diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 201c7a5486cb..9920d6a7cf29 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -512,6 +512,13 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); } +unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) +{ + unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); + + return 1UL << mmu_psize_to_shift(psize); +} + /* * Called by asm hashtable.S for doing lazy icache flush */ -- cgit v1.2.3 From c04fc586c1a480ba198f03ae7b6cbd7b57380b91 Mon Sep 17 00:00:00 2001 From: Gary Hade Date: Tue, 6 Jan 2009 14:39:14 -0800 Subject: mm: show node to memory section relationship with symlinks in sysfs Show node to memory section relationship with symlinks in sysfs Add /sys/devices/system/node/nodeX/memoryY symlinks for all the memory sections located on nodeX. For example: /sys/devices/system/node/node1/memory135 -> ../../memory/memory135 indicates that memory section 135 resides on node1. Also revises documentation to cover this change as well as updating Documentation/ABI/testing/sysfs-devices-memory to include descriptions of memory hotremove files 'phys_device', 'phys_index', and 'state' that were previously not described there. In addition to it always being a good policy to provide users with the maximum possible amount of physical location information for resources that can be hot-added and/or hot-removed, the following are some (but likely not all) of the user benefits provided by this change. Immediate: - Provides information needed to determine the specific node on which a defective DIMM is located. This will reduce system downtime when the node or defective DIMM is swapped out. - Prevents unintended onlining of a memory section that was previously offlined due to a defective DIMM. This could happen during node hot-add when the user or node hot-add assist script onlines _all_ offlined sections due to user or script inability to identify the specific memory sections located on the hot-added node. The consequences of reintroducing the defective memory could be ugly. - Provides information needed to vary the amount and distribution of memory on specific nodes for testing or debugging purposes. Future: - Will provide information needed to identify the memory sections that need to be offlined prior to physical removal of a specific node. Symlink creation during boot was tested on 2-node x86_64, 2-node ppc64, and 2-node ia64 systems. Symlink creation during physical memory hot-add tested on a 2-node x86_64 system. Signed-off-by: Gary Hade Signed-off-by: Badari Pulavarty Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/ABI/testing/sysfs-devices-memory | 51 +++++++++++- Documentation/memory-hotplug.txt | 16 +++- arch/ia64/mm/init.c | 2 +- arch/powerpc/mm/mem.c | 2 +- arch/s390/mm/init.c | 2 +- arch/sh/mm/init.c | 3 +- arch/x86/mm/init_32.c | 2 +- arch/x86/mm/init_64.c | 2 +- drivers/base/memory.c | 19 +++-- drivers/base/node.c | 103 +++++++++++++++++++++++++ include/linux/memory.h | 6 +- include/linux/memory_hotplug.h | 2 +- include/linux/node.h | 13 ++++ mm/memory_hotplug.c | 11 +-- 14 files changed, 209 insertions(+), 25 deletions(-) (limited to 'arch/powerpc') diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory index 7a16fe1e2270..9fe91c02ee40 100644 --- a/Documentation/ABI/testing/sysfs-devices-memory +++ b/Documentation/ABI/testing/sysfs-devices-memory @@ -6,7 +6,6 @@ Description: internal state of the kernel memory blocks. Files could be added or removed dynamically to represent hot-add/remove operations. - Users: hotplug memory add/remove tools https://w3.opensource.ibm.com/projects/powerpc-utils/ @@ -19,6 +18,56 @@ Description: This is useful for a user-level agent to determine identify removable sections of the memory before attempting potentially expensive hot-remove memory operation +Users: hotplug memory remove tools + https://w3.opensource.ibm.com/projects/powerpc-utils/ + +What: /sys/devices/system/memory/memoryX/phys_device +Date: September 2008 +Contact: Badari Pulavarty +Description: + The file /sys/devices/system/memory/memoryX/phys_device + is read-only and is designed to show the name of physical + memory device. Implementation is currently incomplete. +What: /sys/devices/system/memory/memoryX/phys_index +Date: September 2008 +Contact: Badari Pulavarty +Description: + The file /sys/devices/system/memory/memoryX/phys_index + is read-only and contains the section ID in hexadecimal + which is equivalent to decimal X contained in the + memory section directory name. + +What: /sys/devices/system/memory/memoryX/state +Date: September 2008 +Contact: Badari Pulavarty +Description: + The file /sys/devices/system/memory/memoryX/state + is read-write. When read, it's contents show the + online/offline state of the memory section. When written, + root can toggle the the online/offline state of a removable + memory section (see removable file description above) + using the following commands. + # echo online > /sys/devices/system/memory/memoryX/state + # echo offline > /sys/devices/system/memory/memoryX/state + + For example, if /sys/devices/system/memory/memory22/removable + contains a value of 1 and + /sys/devices/system/memory/memory22/state contains the + string "online" the following command can be executed by + by root to offline that section. + # echo offline > /sys/devices/system/memory/memory22/state Users: hotplug memory remove tools https://w3.opensource.ibm.com/projects/powerpc-utils/ + +What: /sys/devices/system/node/nodeX/memoryY +Date: September 2008 +Contact: Gary Hade +Description: + When CONFIG_NUMA is enabled + /sys/devices/system/node/nodeX/memoryY is a symbolic link that + points to the corresponding /sys/devices/system/memory/memoryY + memory section directory. For example, the following symbolic + link is created for memory section 9 on node0. + /sys/devices/system/node/node0/memory9 -> ../../memory/memory9 + diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt index 168117bd6ee8..4c2ecf537a4a 100644 --- a/Documentation/memory-hotplug.txt +++ b/Documentation/memory-hotplug.txt @@ -124,7 +124,7 @@ config options. This option can be kernel module too. -------------------------------- -3 sysfs files for memory hotplug +4 sysfs files for memory hotplug -------------------------------- All sections have their device information under /sys/devices/system/memory as @@ -138,11 +138,12 @@ For example, assume 1GiB section size. A device for a memory starting at (0x100000000 / 1Gib = 4) This device covers address range [0x100000000 ... 0x140000000) -Under each section, you can see 3 files. +Under each section, you can see 4 files. /sys/devices/system/memory/memoryXXX/phys_index /sys/devices/system/memory/memoryXXX/phys_device /sys/devices/system/memory/memoryXXX/state +/sys/devices/system/memory/memoryXXX/removable 'phys_index' : read-only and contains section id, same as XXX. 'state' : read-write @@ -150,10 +151,20 @@ Under each section, you can see 3 files. at write: user can specify "online", "offline" command 'phys_device': read-only: designed to show the name of physical memory device. This is not well implemented now. +'removable' : read-only: contains an integer value indicating + whether the memory section is removable or not + removable. A value of 1 indicates that the memory + section is removable and a value of 0 indicates that + it is not removable. NOTE: These directories/files appear after physical memory hotplug phase. +If CONFIG_NUMA is enabled the +/sys/devices/system/memory/memoryXXX memory section +directories can also be accessed via symbolic links located in +the /sys/devices/system/node/node* directories. For example: +/sys/devices/system/node/node0/memory9 -> ../../memory/memory9 -------------------------------- 4. Physical memory hot-add phase @@ -365,7 +376,6 @@ node if necessary. - allowing memory hot-add to ZONE_MOVABLE. maybe we need some switch like sysctl or new control file. - showing memory section and physical device relationship. - - showing memory section and node relationship (maybe good for NUMA) - showing memory section is under ZONE_MOVABLE or not - test and make it better memory offlining. - support HugeTLB page migration and offlining. diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 054bcd9439aa..56e12903973c 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -692,7 +692,7 @@ int arch_add_memory(int nid, u64 start, u64 size) pgdat = NODE_DATA(nid); zone = pgdat->node_zones + ZONE_NORMAL; - ret = __add_pages(zone, start_pfn, nr_pages); + ret = __add_pages(nid, zone, start_pfn, nr_pages); if (ret) printk("%s: Problem encountered in __add_pages() as ret=%d\n", diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 53b06ebb3f2f..f00f09a77f12 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -132,7 +132,7 @@ int arch_add_memory(int nid, u64 start, u64 size) /* this should work for most non-highmem platforms */ zone = pgdata->node_zones; - return __add_pages(zone, start_pfn, nr_pages); + return __add_pages(nid, zone, start_pfn, nr_pages); } #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 158b0d6d7046..f0258ca3b17e 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -183,7 +183,7 @@ int arch_add_memory(int nid, u64 start, u64 size) rc = vmem_add_mapping(start, size); if (rc) return rc; - rc = __add_pages(zone, PFN_DOWN(start), PFN_DOWN(size)); + rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size)); if (rc) vmem_remove_mapping(start, size); return rc; diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 6cbef8caeb56..3edf297c829b 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -311,7 +311,8 @@ int arch_add_memory(int nid, u64 start, u64 size) pgdat = NODE_DATA(nid); /* We only have ZONE_NORMAL, so this is easy.. */ - ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages); + ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL, + start_pfn, nr_pages); if (unlikely(ret)) printk("%s: Failed, __add_pages() == %d\n", __func__, ret); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index f99a6c6c432e..544d724caeee 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -1079,7 +1079,7 @@ int arch_add_memory(int nid, u64 start, u64 size) unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - return __add_pages(zone, start_pfn, nr_pages); + return __add_pages(nid, zone, start_pfn, nr_pages); } #endif diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 9f7a0d24d42a..54c437e96541 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -857,7 +857,7 @@ int arch_add_memory(int nid, u64 start, u64 size) if (last_mapped_pfn > max_pfn_mapped) max_pfn_mapped = last_mapped_pfn; - ret = __add_pages(zone, start_pfn, nr_pages); + ret = __add_pages(nid, zone, start_pfn, nr_pages); WARN_ON_ONCE(ret); return ret; diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 5260e9e0df48..989429cfed88 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -347,8 +347,9 @@ static inline int memory_probe_init(void) * section belongs to... */ -static int add_memory_block(unsigned long node_id, struct mem_section *section, - unsigned long state, int phys_device) +static int add_memory_block(int nid, struct mem_section *section, + unsigned long state, int phys_device, + enum mem_add_context context) { struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL); int ret = 0; @@ -370,6 +371,10 @@ static int add_memory_block(unsigned long node_id, struct mem_section *section, ret = mem_create_simple_file(mem, phys_device); if (!ret) ret = mem_create_simple_file(mem, removable); + if (!ret) { + if (context == HOTPLUG) + ret = register_mem_sect_under_node(mem, nid); + } return ret; } @@ -382,7 +387,7 @@ static int add_memory_block(unsigned long node_id, struct mem_section *section, * * This could be made generic for all sysdev classes. */ -static struct memory_block *find_memory_block(struct mem_section *section) +struct memory_block *find_memory_block(struct mem_section *section) { struct kobject *kobj; struct sys_device *sysdev; @@ -411,6 +416,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section, struct memory_block *mem; mem = find_memory_block(section); + unregister_mem_sect_under_nodes(mem); mem_remove_simple_file(mem, phys_index); mem_remove_simple_file(mem, state); mem_remove_simple_file(mem, phys_device); @@ -424,9 +430,9 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section, * need an interface for the VM to add new memory regions, * but without onlining it. */ -int register_new_memory(struct mem_section *section) +int register_new_memory(int nid, struct mem_section *section) { - return add_memory_block(0, section, MEM_OFFLINE, 0); + return add_memory_block(nid, section, MEM_OFFLINE, 0, HOTPLUG); } int unregister_memory_section(struct mem_section *section) @@ -458,7 +464,8 @@ int __init memory_dev_init(void) for (i = 0; i < NR_MEM_SECTIONS; i++) { if (!present_section_nr(i)) continue; - err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, 0); + err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, + 0, BOOT); if (!ret) ret = err; } diff --git a/drivers/base/node.c b/drivers/base/node.c index 91636cd8b6c9..43fa90b837ee 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -248,6 +249,105 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) return 0; } +#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +#define page_initialized(page) (page->lru.next) + +static int get_nid_for_pfn(unsigned long pfn) +{ + struct page *page; + + if (!pfn_valid_within(pfn)) + return -1; + page = pfn_to_page(pfn); + if (!page_initialized(page)) + return -1; + return pfn_to_nid(pfn); +} + +/* register memory section under specified node if it spans that node */ +int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) +{ + unsigned long pfn, sect_start_pfn, sect_end_pfn; + + if (!mem_blk) + return -EFAULT; + if (!node_online(nid)) + return 0; + sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); + sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; + for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { + int page_nid; + + page_nid = get_nid_for_pfn(pfn); + if (page_nid < 0) + continue; + if (page_nid != nid) + continue; + return sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj, + &mem_blk->sysdev.kobj, + kobject_name(&mem_blk->sysdev.kobj)); + } + /* mem section does not span the specified node */ + return 0; +} + +/* unregister memory section under all nodes that it spans */ +int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) +{ + nodemask_t unlinked_nodes; + unsigned long pfn, sect_start_pfn, sect_end_pfn; + + if (!mem_blk) + return -EFAULT; + nodes_clear(unlinked_nodes); + sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); + sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; + for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { + unsigned int nid; + + nid = get_nid_for_pfn(pfn); + if (nid < 0) + continue; + if (!node_online(nid)) + continue; + if (node_test_and_set(nid, unlinked_nodes)) + continue; + sysfs_remove_link(&node_devices[nid].sysdev.kobj, + kobject_name(&mem_blk->sysdev.kobj)); + } + return 0; +} + +static int link_mem_sections(int nid) +{ + unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn; + unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages; + unsigned long pfn; + int err = 0; + + for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { + unsigned long section_nr = pfn_to_section_nr(pfn); + struct mem_section *mem_sect; + struct memory_block *mem_blk; + int ret; + + if (!present_section_nr(section_nr)) + continue; + mem_sect = __nr_to_section(section_nr); + mem_blk = find_memory_block(mem_sect); + ret = register_mem_sect_under_node(mem_blk, nid); + if (!err) + err = ret; + + /* discard ref obtained in find_memory_block() */ + kobject_put(&mem_blk->sysdev.kobj); + } + return err; +} +#else +static int link_mem_sections(int nid) { return 0; } +#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ + int register_one_node(int nid) { int error = 0; @@ -267,6 +367,9 @@ int register_one_node(int nid) if (cpu_to_node(cpu) == nid) register_cpu_under_node(cpu, nid); } + + /* link memory sections under this node */ + error = link_mem_sections(nid); } return error; diff --git a/include/linux/memory.h b/include/linux/memory.h index 36c82c9e6ea7..3fdc10806d31 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -79,14 +79,14 @@ static inline int memory_notify(unsigned long val, void *v) #else extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); -extern int register_new_memory(struct mem_section *); +extern int register_new_memory(int, struct mem_section *); extern int unregister_memory_section(struct mem_section *); extern int memory_dev_init(void); extern int remove_memory_block(unsigned long, struct mem_section *, int); extern int memory_notify(unsigned long val, void *v); +extern struct memory_block *find_memory_block(struct mem_section *); #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION< Date: Tue, 6 Jan 2009 14:40:39 -0800 Subject: atomic_t: unify all arch definitions The atomic_t type cannot currently be used in some header files because it would create an include loop with asm/atomic.h. Move the type definition to linux/types.h to break the loop. Signed-off-by: Matthew Wilcox Cc: Huang Ying Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/atomic.h | 9 +-------- arch/arm/include/asm/atomic.h | 3 +-- arch/avr32/include/asm/atomic.h | 2 +- arch/blackfin/include/asm/atomic.h | 4 +--- arch/cris/include/asm/atomic.h | 4 +--- arch/h8300/include/asm/atomic.h | 3 ++- arch/ia64/include/asm/atomic.h | 6 ------ arch/m68knommu/include/asm/atomic.h | 2 +- arch/mips/include/asm/atomic.h | 5 +---- arch/parisc/include/asm/atomic.h | 11 +++-------- arch/powerpc/include/asm/atomic.h | 4 +--- arch/s390/include/asm/atomic.h | 7 +------ arch/sh/include/asm/atomic.h | 7 +++---- arch/sparc/include/asm/atomic_32.h | 2 -- arch/sparc/include/asm/atomic_64.h | 3 --- arch/x86/include/asm/atomic_32.h | 10 +--------- arch/x86/include/asm/atomic_64.h | 18 ++---------------- include/asm-frv/atomic.h | 4 ---- include/asm-m32r/atomic.h | 8 +------- include/asm-m68k/atomic.h | 3 +-- include/asm-mn10300/atomic.h | 9 --------- include/asm-xtensa/atomic.h | 3 +-- include/linux/types.h | 10 ++++++++++ 23 files changed, 33 insertions(+), 104 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index ca88e54dec93..62b363584b2b 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -1,6 +1,7 @@ #ifndef _ALPHA_ATOMIC_H #define _ALPHA_ATOMIC_H +#include #include #include @@ -13,14 +14,6 @@ */ -/* - * Counter is volatile to make sure gcc doesn't try to be clever - * and move things around on us. We need to use _exactly_ the address - * the user gave us, not some alias that contains the same information. - */ -typedef struct { volatile int counter; } atomic_t; -typedef struct { volatile long counter; } atomic64_t; - #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 325f881ccb50..ee99723b3a6c 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -12,10 +12,9 @@ #define __ASM_ARM_ATOMIC_H #include +#include #include -typedef struct { volatile int counter; } atomic_t; - #define ATOMIC_INIT(i) { (i) } #ifdef __KERNEL__ diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index 7ef3862a73d0..318815107748 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h @@ -14,9 +14,9 @@ #ifndef __ASM_AVR32_ATOMIC_H #define __ASM_AVR32_ATOMIC_H +#include #include -typedef struct { volatile int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index 7cf508718605..25776c19064b 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -1,6 +1,7 @@ #ifndef __ARCH_BLACKFIN_ATOMIC__ #define __ARCH_BLACKFIN_ATOMIC__ +#include #include /* local_irq_XXX() */ /* @@ -13,9 +14,6 @@ * Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001 */ -typedef struct { - int counter; -} atomic_t; #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h index f71ea686a2ea..5718dd8902a1 100644 --- a/arch/cris/include/asm/atomic.h +++ b/arch/cris/include/asm/atomic.h @@ -4,7 +4,7 @@ #define __ASM_CRIS_ATOMIC__ #include - +#include #include #include @@ -13,8 +13,6 @@ * resource counting etc.. */ -typedef struct { volatile int counter; } atomic_t; - #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index b4cf0ea97ede..833186c8dc3b 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -1,12 +1,13 @@ #ifndef __ARCH_H8300_ATOMIC__ #define __ARCH_H8300_ATOMIC__ +#include + /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. */ -typedef struct { int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 50c2b83fd5a0..d37292bd9875 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -17,12 +17,6 @@ #include #include -/* - * On IA-64, counter must always be volatile to ensure that that the - * memory accesses are ordered. - */ -typedef struct { volatile __s32 counter; } atomic_t; -typedef struct { volatile __s64 counter; } atomic64_t; #define ATOMIC_INIT(i) ((atomic_t) { (i) }) #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) diff --git a/arch/m68knommu/include/asm/atomic.h b/arch/m68knommu/include/asm/atomic.h index d5632a305dae..6bb674855a3f 100644 --- a/arch/m68knommu/include/asm/atomic.h +++ b/arch/m68knommu/include/asm/atomic.h @@ -1,6 +1,7 @@ #ifndef __ARCH_M68KNOMMU_ATOMIC__ #define __ARCH_M68KNOMMU_ATOMIC__ +#include #include /* @@ -12,7 +13,6 @@ * We do not have SMP m68k systems, so we don't have to deal with that. */ -typedef struct { int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 1232be3885b0..c996c3b4d074 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -15,13 +15,12 @@ #define _ASM_ATOMIC_H #include +#include #include #include #include #include -typedef struct { volatile int counter; } atomic_t; - #define ATOMIC_INIT(i) { (i) } /* @@ -404,8 +403,6 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #ifdef CONFIG_64BIT -typedef struct { volatile long counter; } atomic64_t; - #define ATOMIC64_INIT(i) { (i) } /* diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 57fcc4a5ebb4..edbfe25c5fc1 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -155,14 +155,11 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #endif -/* Note that we need not lock read accesses - aligned word writes/reads - * are atomic, so a reader never sees unconsistent values. - * - * Cache-line alignment would conflict with, for example, linux/module.h +/* + * Note that we need not lock read accesses - aligned word writes/reads + * are atomic, so a reader never sees inconsistent values. */ -typedef struct { volatile int counter; } atomic_t; - /* It's possible to reduce all atomic operations to either * __atomic_add_return, atomic_set and atomic_read (the latter * is there only for consistency). @@ -260,8 +257,6 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #ifdef CONFIG_64BIT -typedef struct { volatile s64 counter; } atomic64_t; - #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) static __inline__ int diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 499be5bdd6fa..b401950f5259 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -5,7 +5,7 @@ * PowerPC atomic operations */ -typedef struct { int counter; } atomic_t; +#include #ifdef __KERNEL__ #include @@ -251,8 +251,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) #ifdef __powerpc64__ -typedef struct { long counter; } atomic64_t; - #define ATOMIC64_INIT(i) { (i) } static __inline__ long atomic64_read(const atomic64_t *v) diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 2d184655bc5d..de432f2de2d2 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -2,6 +2,7 @@ #define __ARCH_S390_ATOMIC__ #include +#include /* * include/asm-s390/atomic.h @@ -23,9 +24,6 @@ * S390 uses 'Compare And Swap' for atomicity in SMP enviroment */ -typedef struct { - int counter; -} __attribute__ ((aligned (4))) atomic_t; #define ATOMIC_INIT(i) { (i) } #ifdef __KERNEL__ @@ -149,9 +147,6 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #undef __CS_LOOP #ifdef __s390x__ -typedef struct { - long long counter; -} __attribute__ ((aligned (8))) atomic64_t; #define ATOMIC64_INIT(i) { (i) } #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index c043ef003028..6327ffbb1992 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -7,16 +7,15 @@ * */ -typedef struct { volatile int counter; } atomic_t; +#include +#include +#include #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) #define atomic_read(v) ((v)->counter) #define atomic_set(v,i) ((v)->counter = (i)) -#include -#include - #if defined(CONFIG_GUSA_RB) #include #elif defined(CONFIG_CPU_SH4A) diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 5c944b5a8040..ce465975a6a5 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -13,8 +13,6 @@ #include -typedef struct { volatile int counter; } atomic_t; - #ifdef __KERNEL__ #define ATOMIC_INIT(i) { (i) } diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 5982c5ae7f07..a0a706492696 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -10,9 +10,6 @@ #include #include -typedef struct { volatile int counter; } atomic_t; -typedef struct { volatile __s64 counter; } atomic64_t; - #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h index ad5b9f6ecddf..85b46fba4229 100644 --- a/arch/x86/include/asm/atomic_32.h +++ b/arch/x86/include/asm/atomic_32.h @@ -2,6 +2,7 @@ #define _ASM_X86_ATOMIC_32_H #include +#include #include #include @@ -10,15 +11,6 @@ * resource counting etc.. */ -/* - * Make sure gcc doesn't try to be clever and move things around - * on us. We need to use _exactly_ the address the user gave us, - * not some alias that contains the same information. - */ -typedef struct { - int counter; -} atomic_t; - #define ATOMIC_INIT(i) { (i) } /** diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h index 279d2a731f3f..8c21731984da 100644 --- a/arch/x86/include/asm/atomic_64.h +++ b/arch/x86/include/asm/atomic_64.h @@ -1,25 +1,15 @@ #ifndef _ASM_X86_ATOMIC_64_H #define _ASM_X86_ATOMIC_64_H +#include #include #include -/* atomic_t should be 32 bit signed type */ - /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. */ -/* - * Make sure gcc doesn't try to be clever and move things around - * on us. We need to use _exactly_ the address the user gave us, - * not some alias that contains the same information. - */ -typedef struct { - int counter; -} atomic_t; - #define ATOMIC_INIT(i) { (i) } /** @@ -191,11 +181,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) #define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v)) -/* An 64bit atomic type */ - -typedef struct { - long counter; -} atomic64_t; +/* The 64-bit atomic type */ #define ATOMIC64_INIT(i) { (i) } diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h index 46d696b331e7..296c35cfb207 100644 --- a/include/asm-frv/atomic.h +++ b/include/asm-frv/atomic.h @@ -35,10 +35,6 @@ #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -typedef struct { - int counter; -} atomic_t; - #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) #define atomic_set(v, i) (((v)->counter) = (i)) diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h index 3a38ffe4a4f4..2eed30f84080 100644 --- a/include/asm-m32r/atomic.h +++ b/include/asm-m32r/atomic.h @@ -9,6 +9,7 @@ * Copyright (C) 2004 Hirokazu Takata */ +#include #include #include @@ -17,13 +18,6 @@ * resource counting etc.. */ -/* - * Make sure gcc doesn't try to be clever and move things around - * on us. We need to use _exactly_ the address the user gave us, - * not some alias that contains the same information. - */ -typedef struct { volatile int counter; } atomic_t; - #define ATOMIC_INIT(i) { (i) } /** diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h index 4915294fea63..eb0ab9d4ee77 100644 --- a/include/asm-m68k/atomic.h +++ b/include/asm-m68k/atomic.h @@ -1,7 +1,7 @@ #ifndef __ARCH_M68K_ATOMIC__ #define __ARCH_M68K_ATOMIC__ - +#include #include /* @@ -13,7 +13,6 @@ * We do not have SMP m68k systems, so we don't have to deal with that. */ -typedef struct { int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) diff --git a/include/asm-mn10300/atomic.h b/include/asm-mn10300/atomic.h index 27c9690b9574..bc064825f9b1 100644 --- a/include/asm-mn10300/atomic.h +++ b/include/asm-mn10300/atomic.h @@ -20,15 +20,6 @@ * resource counting etc.. */ -/* - * Make sure gcc doesn't try to be clever and move things around - * on us. We need to use _exactly_ the address the user gave us, - * not some alias that contains the same information. - */ -typedef struct { - int counter; -} atomic_t; - #define ATOMIC_INIT(i) { (i) } #ifdef __KERNEL__ diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h index b3b23540f14d..67ad67bed8c1 100644 --- a/include/asm-xtensa/atomic.h +++ b/include/asm-xtensa/atomic.h @@ -14,8 +14,7 @@ #define _XTENSA_ATOMIC_H #include - -typedef struct { volatile int counter; } atomic_t; +#include #ifdef __KERNEL__ #include diff --git a/include/linux/types.h b/include/linux/types.h index 121f349cb7ec..3b864f2d9560 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -195,6 +195,16 @@ typedef u32 phys_addr_t; typedef phys_addr_t resource_size_t; +typedef struct { + volatile int counter; +} atomic_t; + +#ifdef CONFIG_64BIT +typedef struct { + volatile long counter; +} atomic64_t; +#endif + struct ustat { __kernel_daddr_t f_tfree; __kernel_ino_t f_tinode; -- cgit v1.2.3 From 129415607845d4daea11ddcba706005c69dcb942 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 6 Jan 2009 14:41:50 -0800 Subject: kprobes: add kprobe_insn_mutex and cleanup arch_remove_kprobe() Add kprobe_insn_mutex for protecting kprobe_insn_pages hlist, and remove kprobe_mutex from architecture dependent code. This allows us to call arch_remove_kprobe() (and free_insn_slot) while holding kprobe_mutex. Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: Russell King Cc: "Luck, Tony" Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/kernel/kprobes.c | 2 -- arch/ia64/kernel/kprobes.c | 8 +++++--- arch/powerpc/kernel/kprobes.c | 7 ++++--- arch/s390/kernel/kprobes.c | 7 ++++--- arch/x86/kernel/kprobes.c | 7 ++++--- include/linux/kprobes.h | 1 - kernel/kprobes.c | 25 +++++++++++++++++++++---- 7 files changed, 38 insertions(+), 19 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 3f9abe0e9aff..f692efddd449 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c @@ -92,9 +92,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { if (p->ainsn.insn) { - mutex_lock(&kprobe_mutex); free_insn_slot(p->ainsn.insn, 0); - mutex_unlock(&kprobe_mutex); p->ainsn.insn = NULL; } } diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index f07688da947c..097b84d54e73 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -670,9 +670,11 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); - mutex_unlock(&kprobe_mutex); + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, + p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); + p->ainsn.insn = NULL; + } } /* * We are resuming execution after a single step fault, so the pt_regs diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index de79915452c8..989edcdf0297 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -96,9 +96,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, 0); - mutex_unlock(&kprobe_mutex); + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; + } } static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 569079ec4ff0..9b92856632cf 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -218,9 +218,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, 0); - mutex_unlock(&kprobe_mutex); + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; + } } static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 6c27679ec6aa..eead6f8f9218 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -376,9 +376,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); - mutex_unlock(&kprobe_mutex); + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); + p->ainsn.insn = NULL; + } } static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 497b1d1f7a05..b93e44ce2284 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -201,7 +201,6 @@ static inline int init_test_probes(void) } #endif /* CONFIG_KPROBES_SANITY_TEST */ -extern struct mutex kprobe_mutex; extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); extern void arch_disarm_kprobe(struct kprobe *p); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 3afd354c46f1..29e87921437d 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -69,7 +69,7 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; /* NOTE: change this value only with kprobe_mutex held */ static bool kprobe_enabled; -DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ +static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; static struct { spinlock_t lock ____cacheline_aligned_in_smp; @@ -115,6 +115,7 @@ enum kprobe_slot_state { SLOT_USED = 2, }; +static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ static struct hlist_head kprobe_insn_pages; static int kprobe_garbage_slots; static int collect_garbage_slots(void); @@ -144,10 +145,10 @@ loop_end: } /** - * get_insn_slot() - Find a slot on an executable page for an instruction. + * __get_insn_slot() - Find a slot on an executable page for an instruction. * We allocate an executable page if there's no room on existing ones. */ -kprobe_opcode_t __kprobes *get_insn_slot(void) +static kprobe_opcode_t __kprobes *__get_insn_slot(void) { struct kprobe_insn_page *kip; struct hlist_node *pos; @@ -196,6 +197,15 @@ kprobe_opcode_t __kprobes *get_insn_slot(void) return kip->insns; } +kprobe_opcode_t __kprobes *get_insn_slot(void) +{ + kprobe_opcode_t *ret; + mutex_lock(&kprobe_insn_mutex); + ret = __get_insn_slot(); + mutex_unlock(&kprobe_insn_mutex); + return ret; +} + /* Return 1 if all garbages are collected, otherwise 0. */ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) { @@ -226,9 +236,13 @@ static int __kprobes collect_garbage_slots(void) { struct kprobe_insn_page *kip; struct hlist_node *pos, *next; + int safety; /* Ensure no-one is preepmted on the garbages */ - if (check_safety() != 0) + mutex_unlock(&kprobe_insn_mutex); + safety = check_safety(); + mutex_lock(&kprobe_insn_mutex); + if (safety != 0) return -EAGAIN; hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { @@ -251,6 +265,7 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) struct kprobe_insn_page *kip; struct hlist_node *pos; + mutex_lock(&kprobe_insn_mutex); hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { if (kip->insns <= slot && slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { @@ -267,6 +282,8 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) collect_garbage_slots(); + + mutex_unlock(&kprobe_insn_mutex); } #endif -- cgit v1.2.3 From 156ca2bbf6503a02d7d6829886ce381d572de66e Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Tue, 6 Jan 2009 14:56:23 -0800 Subject: powerpc: introduce asm/swab.h Signed-off-by: Harvey Harrison Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/Kbuild | 1 + arch/powerpc/include/asm/byteorder.h | 83 +-------------------------------- arch/powerpc/include/asm/swab.h | 90 ++++++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+), 81 deletions(-) create mode 100644 arch/powerpc/include/asm/swab.h (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 5ab7d7fe198c..9268602de5d0 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -35,3 +35,4 @@ unifdef-y += spu_info.h unifdef-y += termios.h unifdef-y += types.h unifdef-y += unistd.h +unifdef-y += swab.h diff --git a/arch/powerpc/include/asm/byteorder.h b/arch/powerpc/include/asm/byteorder.h index d5de325472e9..5cca27a41532 100644 --- a/arch/powerpc/include/asm/byteorder.h +++ b/arch/powerpc/include/asm/byteorder.h @@ -8,86 +8,7 @@ * 2 of the License, or (at your option) any later version. */ -#include -#include - -#define __BIG_ENDIAN - -#ifdef __GNUC__ -#ifdef __KERNEL__ - -static __inline__ __u16 ld_le16(const volatile __u16 *addr) -{ - __u16 val; - - __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); - return val; -} -#define __arch_swab16p ld_le16 - -static __inline__ void st_le16(volatile __u16 *addr, const __u16 val) -{ - __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); -} - -static inline void __arch_swab16s(__u16 *addr) -{ - st_le16(addr, *addr); -} -#define __arch_swab16s __arch_swab16s - -static __inline__ __u32 ld_le32(const volatile __u32 *addr) -{ - __u32 val; - - __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); - return val; -} -#define __arch_swab32p ld_le32 - -static __inline__ void st_le32(volatile __u32 *addr, const __u32 val) -{ - __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); -} - -static inline void __arch_swab32s(__u32 *addr) -{ - st_le32(addr, *addr); -} -#define __arch_swab32s __arch_swab32s - -static inline __attribute_const__ __u16 __arch_swab16(__u16 value) -{ - __u16 result; - - __asm__("rlwimi %0,%1,8,16,23" - : "=r" (result) - : "r" (value), "0" (value >> 8)); - return result; -} -#define __arch_swab16 __arch_swab16 - -static inline __attribute_const__ __u32 __arch_swab32(__u32 value) -{ - __u32 result; - - __asm__("rlwimi %0,%1,24,16,23\n\t" - "rlwimi %0,%1,8,8,15\n\t" - "rlwimi %0,%1,24,0,7" - : "=r" (result) - : "r" (value), "0" (value >> 24)); - return result; -} -#define __arch_swab32 __arch_swab32 - -#endif /* __KERNEL__ */ - -#ifndef __powerpc64__ -#define __SWAB_64_THRU_32__ -#endif /* __powerpc64__ */ - -#endif /* __GNUC__ */ - -#include +#include +#include #endif /* _ASM_POWERPC_BYTEORDER_H */ diff --git a/arch/powerpc/include/asm/swab.h b/arch/powerpc/include/asm/swab.h new file mode 100644 index 000000000000..ef824ae4b79c --- /dev/null +++ b/arch/powerpc/include/asm/swab.h @@ -0,0 +1,90 @@ +#ifndef _ASM_POWERPC_SWAB_H +#define _ASM_POWERPC_SWAB_H + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include + +#ifdef __GNUC__ + +#ifndef __powerpc64__ +#define __SWAB_64_THRU_32__ +#endif /* __powerpc64__ */ + +#ifdef __KERNEL__ + +static __inline__ __u16 ld_le16(const volatile __u16 *addr) +{ + __u16 val; + + __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); + return val; +} +#define __arch_swab16p ld_le16 + +static __inline__ void st_le16(volatile __u16 *addr, const __u16 val) +{ + __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); +} + +static inline void __arch_swab16s(__u16 *addr) +{ + st_le16(addr, *addr); +} +#define __arch_swab16s __arch_swab16s + +static __inline__ __u32 ld_le32(const volatile __u32 *addr) +{ + __u32 val; + + __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); + return val; +} +#define __arch_swab32p ld_le32 + +static __inline__ void st_le32(volatile __u32 *addr, const __u32 val) +{ + __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); +} + +static inline void __arch_swab32s(__u32 *addr) +{ + st_le32(addr, *addr); +} +#define __arch_swab32s __arch_swab32s + +static inline __attribute_const__ __u16 __arch_swab16(__u16 value) +{ + __u16 result; + + __asm__("rlwimi %0,%1,8,16,23" + : "=r" (result) + : "r" (value), "0" (value >> 8)); + return result; +} +#define __arch_swab16 __arch_swab16 + +static inline __attribute_const__ __u32 __arch_swab32(__u32 value) +{ + __u32 result; + + __asm__("rlwimi %0,%1,24,16,23\n\t" + "rlwimi %0,%1,8,8,15\n\t" + "rlwimi %0,%1,24,0,7" + : "=r" (result) + : "r" (value), "0" (value >> 24)); + return result; +} +#define __arch_swab32 __arch_swab32 + +#endif /* __KERNEL__ */ + +#endif /* __GNUC__ */ + +#endif /* _ASM_POWERPC_SWAB_H */ -- cgit v1.2.3