From f1d4c3a76981addcd7669f404f75041435a04e6a Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Fri, 25 Jun 2010 12:16:52 -0600 Subject: of/flattree: Use common ALIGN() macro instead of arch specific _ALIGN There's no reason to use the powerpc-specific _ALIGN macro in the fdt code. Replace it with ALIGN() from kernel.h Signed-off-by: Grant Likely Acked-By: Jeremy Kerr Acked-by: Benjamin Herrenschmidt --- arch/microblaze/include/asm/page.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/microblaze/include/asm/page.h') diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 464ff32bee3d..2fd476126260 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -39,13 +39,6 @@ #define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) #define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1))) -/* align addr on a size boundary - adjust address up/down if needed */ -#define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1))) -#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1))) - -/* align addr on a size boundary - adjust address up if needed */ -#define _ALIGN(addr, size) _ALIGN_UP(addr, size) - #ifndef CONFIG_MMU /* * PAGE_OFFSET -- the first address of the first page of memory. When not -- cgit v1.2.3 From ba9c4f88d747836bf35c3eee36aa18d2e164f493 Mon Sep 17 00:00:00 2001 From: "Steven J. Magnani" Date: Thu, 13 May 2010 10:48:27 -0500 Subject: microblaze: Allow PAGE_SIZE configuration Allow developer to configure memory page size at compile time. Larger pages can improve performance on some workloads. Based on PowerPC code. Signed-off-by: Steven J. Magnani Signed-off-by: Michal Simek --- arch/microblaze/Kconfig | 30 ++++++++++++++++++++++++++++++ arch/microblaze/include/asm/elf.h | 2 +- arch/microblaze/include/asm/page.h | 12 ++++++++++-- arch/microblaze/kernel/cpu/mb.c | 1 + arch/microblaze/kernel/head.S | 4 ++-- arch/microblaze/kernel/vmlinux.lds.S | 12 ++++++------ 6 files changed, 50 insertions(+), 11 deletions(-) (limited to 'arch/microblaze/include/asm/page.h') diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 505a08592423..a51742190c12 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -223,6 +223,36 @@ config TASK_SIZE hex "Size of user task space" if TASK_SIZE_BOOL default "0x80000000" +choice + prompt "Page size" + default MICROBLAZE_4K_PAGES + depends on ADVANCED_OPTIONS && !MMU + help + Select the kernel logical page size. Increasing the page size + will reduce software overhead at each page boundary, allow + hardware prefetch mechanisms to be more effective, and allow + larger dma transfers increasing IO efficiency and reducing + overhead. However the utilization of memory will increase. + For example, each cached file will using a multiple of the + page size to hold its contents and the difference between the + end of file and the end of page is wasted. + + If unsure, choose 4K_PAGES. + +config MICROBLAZE_4K_PAGES + bool "4k page size" + +config MICROBLAZE_8K_PAGES + bool "8k page size" + +config MICROBLAZE_16K_PAGES + bool "16k page size" + +config MICROBLAZE_32K_PAGES + bool "32k page size" + +endchoice + endmenu source "mm/Kconfig" diff --git a/arch/microblaze/include/asm/elf.h b/arch/microblaze/include/asm/elf.h index 7d4acf2b278e..732caf1be741 100644 --- a/arch/microblaze/include/asm/elf.h +++ b/arch/microblaze/include/asm/elf.h @@ -77,7 +77,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #define ELF_DATA ELFDATA2MSB #endif -#define ELF_EXEC_PAGESIZE 4096 +#define ELF_EXEC_PAGESIZE PAGE_SIZE #define ELF_CORE_COPY_REGS(_dest, _regs) \ diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 464ff32bee3d..c12c6dfafd9f 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -23,8 +23,16 @@ #ifdef __KERNEL__ /* PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT (12) -#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#if defined(CONFIG_MICROBLAZE_32K_PAGES) +#define PAGE_SHIFT 15 +#elif defined(CONFIG_MICROBLAZE_16K_PAGES) +#define PAGE_SHIFT 14 +#elif defined(CONFIG_MICROBLAZE_8K_PAGES) +#define PAGE_SHIFT 13 +#else +#define PAGE_SHIFT 12 +#endif +#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_KERNEL_BASE_ADDR)) diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c index 4216eb1eaa32..7086e3564281 100644 --- a/arch/microblaze/kernel/cpu/mb.c +++ b/arch/microblaze/kernel/cpu/mb.c @@ -126,6 +126,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) cpuinfo.pvr_user1, cpuinfo.pvr_user2); + count += seq_printf(m, "Page size:\t%lu\n", PAGE_SIZE); return 0; } diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 1bf739888260..42434008209e 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -43,10 +43,10 @@ .global empty_zero_page .align 12 empty_zero_page: - .space 4096 + .space PAGE_SIZE .global swapper_pg_dir swapper_pg_dir: - .space 4096 + .space PAGE_SIZE #endif /* CONFIG_MMU */ diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index db72d7124602..b0de1a6b5513 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -55,7 +55,7 @@ SECTIONS { */ .sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) { _ssrw = .; - . = ALIGN(4096); /* page aligned when MMU used - origin 0x8 */ + . = ALIGN(PAGE_SIZE); /* page aligned when MMU used */ *(.sdata2) . = ALIGN(8); _essrw = .; @@ -70,7 +70,7 @@ SECTIONS { /* Reserve some low RAM for r0 based memory references */ . = ALIGN(0x4) ; r0_ram = . ; - . = . + 4096; /* a page should be enough */ + . = . + PAGE_SIZE; /* a page should be enough */ /* Under the microblaze ABI, .sdata and .sbss must be contiguous */ . = ALIGN(8); @@ -120,7 +120,7 @@ SECTIONS { __init_end_before_initramfs = .; - .init.ramfs ALIGN(4096) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { + .init.ramfs ALIGN(PAGE_SIZE) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { __initramfs_start = .; *(.init.ramfs) __initramfs_end = .; @@ -132,11 +132,11 @@ SECTIONS { * so that __init_end == __bss_start. This will make image.elf * consistent with the image.bin */ - /* . = ALIGN(4096); */ + /* . = ALIGN(PAGE_SIZE); */ } __init_end = .; - .bss ALIGN (4096) : AT(ADDR(.bss) - LOAD_OFFSET) { + .bss ALIGN (PAGE_SIZE) : AT(ADDR(.bss) - LOAD_OFFSET) { /* page aligned when MMU used */ __bss_start = . ; *(.bss*) @@ -145,7 +145,7 @@ SECTIONS { __bss_stop = . ; _ebss = . ; } - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); _end = .; DISCARDS -- cgit v1.2.3 From a6eb9fe105d5de0053b261148cee56c94b4720ca Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Tue, 10 Aug 2010 18:03:22 -0700 Subject: dma-mapping: rename ARCH_KMALLOC_MINALIGN to ARCH_DMA_MINALIGN Now each architecture has the own dma_get_cache_alignment implementation. dma_get_cache_alignment returns the minimum DMA alignment. Architectures define it as ARCH_KMALLOC_MINALIGN (it's used to make sure that malloc'ed buffer is DMA-safe; the buffer doesn't share a cache with the others). So we can unify dma_get_cache_alignment implementations. This patch: dma_get_cache_alignment() needs to know if an architecture defines ARCH_KMALLOC_MINALIGN or not (needs to know if architecture has DMA alignment restriction). However, slab.h define ARCH_KMALLOC_MINALIGN if architectures doesn't define it. Let's rename ARCH_KMALLOC_MINALIGN to ARCH_DMA_MINALIGN. ARCH_KMALLOC_MINALIGN is used only in the internals of slab/slob/slub (except for crypto). Signed-off-by: FUJITA Tomonori Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/cache.h | 2 +- arch/avr32/include/asm/cache.h | 2 +- arch/blackfin/include/asm/cache.h | 2 +- arch/frv/include/asm/mem-layout.h | 2 +- arch/m68k/include/asm/cache.h | 2 +- arch/microblaze/include/asm/page.h | 2 +- arch/mips/include/asm/mach-generic/kmalloc.h | 2 +- arch/mips/include/asm/mach-ip27/kmalloc.h | 2 +- arch/mips/include/asm/mach-ip32/kmalloc.h | 4 ++-- arch/mn10300/include/asm/cache.h | 2 +- arch/powerpc/include/asm/page_32.h | 2 +- arch/sh/include/asm/page.h | 2 +- arch/xtensa/include/asm/cache.h | 2 +- include/linux/slab_def.h | 4 +++- include/linux/slob_def.h | 4 +++- include/linux/slub_def.h | 8 +++++--- 16 files changed, 25 insertions(+), 19 deletions(-) (limited to 'arch/microblaze/include/asm/page.h') diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h index 66c160b8547f..9d6122096fbe 100644 --- a/arch/arm/include/asm/cache.h +++ b/arch/arm/include/asm/cache.h @@ -14,7 +14,7 @@ * cache before the transfer is done, causing old data to be seen by * the CPU. */ -#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES /* * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h index d3cf35ab11ab..c3a58a189a91 100644 --- a/arch/avr32/include/asm/cache.h +++ b/arch/avr32/include/asm/cache.h @@ -11,7 +11,7 @@ * cache before the transfer is done, causing old data to be seen by * the CPU. */ -#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES #ifndef __ASSEMBLER__ struct cache_info { diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h index 93f6c634fdf4..bd0641a267f1 100644 --- a/arch/blackfin/include/asm/cache.h +++ b/arch/blackfin/include/asm/cache.h @@ -15,7 +15,7 @@ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define SMP_CACHE_BYTES L1_CACHE_BYTES -#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES #ifdef CONFIG_SMP #define __cacheline_aligned diff --git a/arch/frv/include/asm/mem-layout.h b/arch/frv/include/asm/mem-layout.h index ccae981876fa..e9a0ec85a402 100644 --- a/arch/frv/include/asm/mem-layout.h +++ b/arch/frv/include/asm/mem-layout.h @@ -35,7 +35,7 @@ * the slab must be aligned such that load- and store-double instructions don't * fault if used */ -#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES #define ARCH_SLAB_MINALIGN L1_CACHE_BYTES /*****************************************************************************/ diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h index ecafbe1718c3..0395c51e46a6 100644 --- a/arch/m68k/include/asm/cache.h +++ b/arch/m68k/include/asm/cache.h @@ -8,6 +8,6 @@ #define L1_CACHE_SHIFT 4 #define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT) -#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES #endif diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 4f268faa0126..cf377d91da71 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -40,7 +40,7 @@ #ifndef __ASSEMBLY__ /* MS be sure that SLAB allocates aligned objects */ -#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES #define ARCH_SLAB_MINALIGN L1_CACHE_BYTES diff --git a/arch/mips/include/asm/mach-generic/kmalloc.h b/arch/mips/include/asm/mach-generic/kmalloc.h index b8e6deba352f..a5d669086ed9 100644 --- a/arch/mips/include/asm/mach-generic/kmalloc.h +++ b/arch/mips/include/asm/mach-generic/kmalloc.h @@ -7,7 +7,7 @@ * Total overkill for most systems but need as a safe default. * Set this one if any device in the system might do non-coherent DMA. */ -#define ARCH_KMALLOC_MINALIGN 128 +#define ARCH_DMA_MINALIGN 128 #endif #endif /* __ASM_MACH_GENERIC_KMALLOC_H */ diff --git a/arch/mips/include/asm/mach-ip27/kmalloc.h b/arch/mips/include/asm/mach-ip27/kmalloc.h index 426bd049b2d7..82c23ce2afa7 100644 --- a/arch/mips/include/asm/mach-ip27/kmalloc.h +++ b/arch/mips/include/asm/mach-ip27/kmalloc.h @@ -2,7 +2,7 @@ #define __ASM_MACH_IP27_KMALLOC_H /* - * All happy, no need to define ARCH_KMALLOC_MINALIGN + * All happy, no need to define ARCH_DMA_MINALIGN */ #endif /* __ASM_MACH_IP27_KMALLOC_H */ diff --git a/arch/mips/include/asm/mach-ip32/kmalloc.h b/arch/mips/include/asm/mach-ip32/kmalloc.h index b1e0be60f720..042ca926c48f 100644 --- a/arch/mips/include/asm/mach-ip32/kmalloc.h +++ b/arch/mips/include/asm/mach-ip32/kmalloc.h @@ -3,9 +3,9 @@ #if defined(CONFIG_CPU_R5000) || defined(CONFIG_CPU_RM7000) -#define ARCH_KMALLOC_MINALIGN 32 +#define ARCH_DMA_MINALIGN 32 #else -#define ARCH_KMALLOC_MINALIGN 128 +#define ARCH_DMA_MINALIGN 128 #endif #endif /* __ASM_MACH_IP32_KMALLOC_H */ diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h index 6e2fe28dde4e..781bf613366d 100644 --- a/arch/mn10300/include/asm/cache.h +++ b/arch/mn10300/include/asm/cache.h @@ -21,7 +21,7 @@ #define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES #endif -#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES /* data cache purge registers * - read from the register to unconditionally purge that cache line diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index bd0849dbcaaa..68d73b2a7bfc 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -10,7 +10,7 @@ #define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32 #ifdef CONFIG_NOT_COHERENT_CACHE -#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES #endif #ifdef CONFIG_PTE_64BIT diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index fb703d120d09..c4e0b3d472b9 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -180,7 +180,7 @@ typedef struct page *pgtable_t; * Some drivers need to perform DMA into kmalloc'ed buffers * and so we have to increase the kmalloc minalign for this. */ -#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES #ifdef CONFIG_SUPERH64 /* diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h index ed8cd3cbd499..d2fd932fdb4d 100644 --- a/arch/xtensa/include/asm/cache.h +++ b/arch/xtensa/include/asm/cache.h @@ -29,6 +29,6 @@ # define CACHE_WAY_SIZE ICACHE_WAY_SIZE #endif -#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES #endif /* _XTENSA_CACHE_H */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 1acfa73ce2ac..791a502f6906 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -17,7 +17,6 @@ #include -#ifndef ARCH_KMALLOC_MINALIGN /* * Enforce a minimum alignment for the kmalloc caches. * Usually, the kmalloc caches are cache_line_size() aligned, except when @@ -27,6 +26,9 @@ * ARCH_KMALLOC_MINALIGN allows that. * Note that increasing this value may disable some debug features. */ +#ifdef ARCH_DMA_MINALIGN +#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN +#else #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) #endif diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 62667f72c2ef..4382db09df4f 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h @@ -1,7 +1,9 @@ #ifndef __LINUX_SLOB_DEF_H #define __LINUX_SLOB_DEF_H -#ifndef ARCH_KMALLOC_MINALIGN +#ifdef ARCH_DMA_MINALIGN +#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN +#else #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) #endif diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 6447a723ecb1..6d14409c4d9a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -106,15 +106,17 @@ struct kmem_cache { /* * Kmalloc subsystem. */ -#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8 -#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN +#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 +#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN #else #define KMALLOC_MIN_SIZE 8 #endif #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) -#ifndef ARCH_KMALLOC_MINALIGN +#ifdef ARCH_DMA_MINALIGN +#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN +#else #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) #endif -- cgit v1.2.3