diff options
Diffstat (limited to 'arch')
257 files changed, 6005 insertions, 7135 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index f1cf895c040f..80bbb8ccd0d1 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -336,6 +336,73 @@ config SECCOMP_FILTER See Documentation/prctl/seccomp_filter.txt for details. +config HAVE_CC_STACKPROTECTOR + bool + help + An arch should select this symbol if: + - its compiler supports the -fstack-protector option + - it has implemented a stack canary (e.g. __stack_chk_guard) + +config CC_STACKPROTECTOR + def_bool n + help + Set when a stack-protector mode is enabled, so that the build + can enable kernel-side support for the GCC feature. + +choice + prompt "Stack Protector buffer overflow detection" + depends on HAVE_CC_STACKPROTECTOR + default CC_STACKPROTECTOR_NONE + help + This option turns on the "stack-protector" GCC feature. This + feature puts, at the beginning of functions, a canary value on + the stack just before the return address, and validates + the value just before actually returning. Stack based buffer + overflows (that need to overwrite this return address) now also + overwrite the canary, which gets detected and the attack is then + neutralized via a kernel panic. + +config CC_STACKPROTECTOR_NONE + bool "None" + help + Disable "stack-protector" GCC feature. + +config CC_STACKPROTECTOR_REGULAR + bool "Regular" + select CC_STACKPROTECTOR + help + Functions will have the stack-protector canary logic added if they + have an 8-byte or larger character array on the stack. + + This feature requires gcc version 4.2 or above, or a distribution + gcc with the feature backported ("-fstack-protector"). + + On an x86 "defconfig" build, this feature adds canary checks to + about 3% of all kernel functions, which increases kernel code size + by about 0.3%. + +config CC_STACKPROTECTOR_STRONG + bool "Strong" + select CC_STACKPROTECTOR + help + Functions will have the stack-protector canary logic added in any + of the following conditions: + + - local variable's address used as part of the right hand side of an + assignment or function argument + - local variable is an array (or union containing an array), + regardless of array type or length + - uses register local variables + + This feature requires gcc version 4.9 or above, or a distribution + gcc with the feature backported ("-fstack-protector-strong"). + + On an x86 "defconfig" build, this feature adds canary checks to + about 20% of all kernel functions, which increases the kernel code + size by about 2%. + +endchoice + config HAVE_CONTEXT_TRACKING bool help diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h index ce8860a0b32d..3832bdb794fe 100644 --- a/arch/alpha/include/asm/barrier.h +++ b/arch/alpha/include/asm/barrier.h @@ -3,33 +3,18 @@ #include <asm/compiler.h> -#define mb() \ -__asm__ __volatile__("mb": : :"memory") +#define mb() __asm__ __volatile__("mb": : :"memory") +#define rmb() __asm__ __volatile__("mb": : :"memory") +#define wmb() __asm__ __volatile__("wmb": : :"memory") -#define rmb() \ -__asm__ __volatile__("mb": : :"memory") - -#define wmb() \ -__asm__ __volatile__("wmb": : :"memory") - -#define read_barrier_depends() \ -__asm__ __volatile__("mb": : :"memory") +#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory") #ifdef CONFIG_SMP #define __ASM_SMP_MB "\tmb\n" -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() #else #define __ASM_SMP_MB -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) #endif -#define set_mb(var, value) \ -do { var = value; mb(); } while (0) +#include <asm-generic/barrier.h> #endif /* __BARRIER_H */ diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 5943f7f9d325..9ae21c198007 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -1,4 +1,5 @@ generic-y += auxvec.h +generic-y += barrier.h generic-y += bugs.h generic-y += bitsperlong.h generic-y += clkdev.h diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 83f03ca6caf6..03e494f695d1 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -190,6 +190,11 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #endif /* !CONFIG_ARC_HAS_LLSC */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + /** * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h index f6cb7c4ffb35..c32245c3d1e9 100644 --- a/arch/arc/include/asm/barrier.h +++ b/arch/arc/include/asm/barrier.h @@ -30,11 +30,6 @@ #define smp_wmb() barrier() #endif -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #define smp_read_barrier_depends() do { } while (0) #endif diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c1f1a7eee953..9c909fc29272 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -30,6 +30,7 @@ config ARM select HAVE_BPF_JIT select HAVE_CONTEXT_TRACKING select HAVE_C_RECORDMCOUNT + select HAVE_CC_STACKPROTECTOR select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG select HAVE_DMA_ATTRS @@ -1856,18 +1857,6 @@ config SECCOMP and the task is only allowed to execute a few safe syscalls defined by each seccomp mode. -config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" - help - This option turns on the -fstack-protector GCC feature. This - feature puts, at the beginning of functions, a canary value on - the stack just before the return address, and validates - the value just before actually returning. Stack based buffer - overflows (that need to overwrite this return address) now also - overwrite the canary, which gets detected and the attack is then - neutralized via a kernel panic. - This feature requires gcc version 4.2 or above. - config SWIOTLB def_bool y diff --git a/arch/arm/Makefile b/arch/arm/Makefile index c99b1086d83d..55b4255ad6ed 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -40,10 +40,6 @@ ifeq ($(CONFIG_FRAME_POINTER),y) KBUILD_CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog endif -ifeq ($(CONFIG_CC_STACKPROTECTOR),y) -KBUILD_CFLAGS +=-fstack-protector -endif - ifeq ($(CONFIG_CPU_BIG_ENDIAN),y) KBUILD_CPPFLAGS += -mbig-endian AS += -EB diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index 31bd43b82095..d4f891f56996 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c @@ -127,6 +127,18 @@ asmlinkage void __div0(void) error("Attempting division by 0!"); } +unsigned long __stack_chk_guard; + +void __stack_chk_guard_setup(void) +{ + __stack_chk_guard = 0x000a0dff; +} + +void __stack_chk_fail(void) +{ + error("stack-protector: Kernel stack is corrupted\n"); +} + extern int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)); @@ -137,6 +149,8 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p, { int ret; + __stack_chk_guard_setup(); + output_data = (unsigned char *)output_start; free_mem_ptr = free_mem_ptr_p; free_mem_end_ptr = free_mem_ptr_end_p; diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 60f15e274e6d..2f59f7443396 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -59,6 +59,21 @@ #define smp_wmb() dmb(ishst) #endif +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 141baa3f9a72..acabef1a75df 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -15,7 +15,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls (380) +#define __NR_syscalls (384) #define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) #define __ARCH_WANT_STAT64 diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index af33b44990ed..fb5584d0cc05 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -406,6 +406,8 @@ #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) #define __NR_kcmp (__NR_SYSCALL_BASE+378) #define __NR_finit_module (__NR_SYSCALL_BASE+379) +#define __NR_sched_setattr (__NR_SYSCALL_BASE+380) +#define __NR_sched_getattr (__NR_SYSCALL_BASE+381) /* * This may need to be greater than __NR_last_syscall+1 in order to diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index c6ca7e376773..166e945de832 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -389,6 +389,8 @@ CALL(sys_process_vm_writev) CALL(sys_kcmp) CALL(sys_finit_module) +/* 380 */ CALL(sys_sched_setattr) + CALL(sys_sched_getattr) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 739c3dfc1da2..34d5fd585bbb 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -171,7 +171,7 @@ void __init arm_dt_init_cpu_maps(void) bool arch_match_cpu_phys_id(int cpu, u64 phys_id) { - return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu); + return phys_id == cpu_logical_map(cpu); } static const void * __init arch_get_next_mach(const char *const **match) diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index bc3f2efa0d86..789d846a9184 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -99,10 +99,6 @@ int armpmu_event_set_period(struct perf_event *event) s64 period = hwc->sample_period; int ret = 0; - /* The period may have been changed by PERF_EVENT_IOC_PERIOD */ - if (unlikely(period != hwc->last_period)) - left = period - (hwc->last_period - left); - if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index d85055cd24ba..20d553c9f5e2 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -254,7 +254,7 @@ static int probe_current_pmu(struct arm_pmu *pmu) static int cpu_pmu_device_probe(struct platform_device *pdev) { const struct of_device_id *of_id; - int (*init_fn)(struct arm_pmu *); + const int (*init_fn)(struct arm_pmu *); struct device_node *node = pdev->dev.of_node; struct arm_pmu *pmu; int ret = -ENODEV; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 6eda3bf85c52..4636d56af2db 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -431,9 +431,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) instr2 = __mem_to_opcode_thumb16(instr2); instr = __opcode_thumb32_compose(instr, instr2); } - } else if (get_user(instr, (u32 __user *)pc)) { + } else { + if (get_user(instr, (u32 __user *)pc)) + goto die_sig; instr = __mem_to_opcode_arm(instr); - goto die_sig; } if (call_undef_hook(regs, instr) == 0) diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index bd3bf66ce344..c7de89b263dd 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -53,6 +53,7 @@ static void __init highbank_scu_map_io(void) static void highbank_l2x0_disable(void) { + outer_flush_all(); /* Disable PL310 L2 Cache controller */ highbank_smc1(0x102, 0x0); } diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index b39efd46abf9..c0ab9b26be3d 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -162,6 +162,7 @@ void __iomem *omap4_get_l2cache_base(void) static void omap4_l2x0_disable(void) { + outer_flush_all(); /* Disable PL310 L2 Cache controller */ omap_smc1(0x102, 0x0); } diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 1f7b19a47060..3e8f106ee5fe 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc) #ifdef CONFIG_ZONE_DMA if (mdesc->dma_zone_size) { arm_dma_zone_size = mdesc->dma_zone_size; - arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1; + arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; } else arm_dma_limit = 0xffffffff; arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 9ed155ad0f97..271b5e971568 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -641,10 +641,10 @@ load_ind: emit(ARM_MUL(r_A, r_A, r_X), ctx); break; case BPF_S_ALU_DIV_K: - /* current k == reciprocal_value(userspace k) */ + if (k == 1) + break; emit_mov_i(r_scratch, k, ctx); - /* A = top 32 bits of the product */ - emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx); + emit_udiv(r_A, r_A, r_scratch, ctx); break; case BPF_S_ALU_DIV_X: update_on_xread(ctx); diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index d4a63338a53c..78e20ba8806b 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -35,10 +35,60 @@ #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + #else + #define smp_mb() asm volatile("dmb ish" : : : "memory") #define smp_rmb() asm volatile("dmb ishld" : : : "memory") #define smp_wmb() asm volatile("dmb ishst" : : : "memory") + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 4: \ + asm volatile ("stlr %w1, %0" \ + : "=Q" (*p) : "r" (v) : "memory"); \ + break; \ + case 8: \ + asm volatile ("stlr %1, %0" \ + : "=Q" (*p) : "r" (v) : "memory"); \ + break; \ + } \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1; \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 4: \ + asm volatile ("ldar %w0, %1" \ + : "=r" (___p1) : "Q" (*p) : "memory"); \ + break; \ + case 8: \ + asm volatile ("ldar %0, %1" \ + : "=r" (___p1) : "Q" (*p) : "memory"); \ + break; \ + } \ + ___p1; \ +}) + #endif #define read_barrier_depends() do { } while(0) diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 572769727227..4cc813eddacb 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -229,7 +229,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot extern void __iounmap(volatile void __iomem *addr); extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); -#define PROT_DEFAULT (pgprot_default | PTE_DIRTY) +#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) #define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h index 0961275373db..715100790fd0 100644 --- a/arch/avr32/include/asm/barrier.h +++ b/arch/avr32/include/asm/barrier.h @@ -8,22 +8,15 @@ #ifndef __ASM_AVR32_BARRIER_H #define __ASM_AVR32_BARRIER_H -#define nop() asm volatile("nop") - -#define mb() asm volatile("" : : : "memory") -#define rmb() mb() -#define wmb() asm volatile("sync 0" : : : "memory") -#define read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; mb(); } while(0) +/* + * Weirdest thing ever.. no full barrier, but it has a write barrier! + */ +#define wmb() asm volatile("sync 0" : : : "memory") #ifdef CONFIG_SMP # error "The AVR32 port does not support SMP" -#else -# define smp_mb() barrier() -# define smp_rmb() barrier() -# define smp_wmb() barrier() -# define smp_read_barrier_depends() do { } while(0) #endif +#include <asm-generic/barrier.h> #endif /* __ASM_AVR32_BARRIER_H */ diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h index ebb189507dd7..19283a16ac08 100644 --- a/arch/blackfin/include/asm/barrier.h +++ b/arch/blackfin/include/asm/barrier.h @@ -23,26 +23,10 @@ # define rmb() do { barrier(); smp_check_barrier(); } while (0) # define wmb() do { barrier(); smp_mark_barrier(); } while (0) # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) -#else -# define mb() barrier() -# define rmb() barrier() -# define wmb() barrier() -# define read_barrier_depends() do { } while (0) #endif -#else /* !CONFIG_SMP */ - -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define read_barrier_depends() do { } while (0) - #endif /* !CONFIG_SMP */ -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define smp_read_barrier_depends() read_barrier_depends() +#include <asm-generic/barrier.h> #endif /* _BLACKFIN_BARRIER_H */ diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index b06caf649a95..199b1a9dab89 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -3,6 +3,7 @@ header-y += arch-v10/ header-y += arch-v32/ +generic-y += barrier.h generic-y += clkdev.h generic-y += exec.h generic-y += kvm_para.h diff --git a/arch/cris/include/asm/barrier.h b/arch/cris/include/asm/barrier.h deleted file mode 100644 index 198ad7fa6b25..000000000000 --- a/arch/cris/include/asm/barrier.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __ASM_CRIS_BARRIER_H -#define __ASM_CRIS_BARRIER_H - -#define nop() __asm__ __volatile__ ("nop"); - -#define barrier() __asm__ __volatile__("": : :"memory") -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() -#define read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; mb(); } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) -#endif - -#endif /* __ASM_CRIS_BARRIER_H */ diff --git a/arch/frv/include/asm/barrier.h b/arch/frv/include/asm/barrier.h index 06776ad9f5e9..abbef470154c 100644 --- a/arch/frv/include/asm/barrier.h +++ b/arch/frv/include/asm/barrier.h @@ -17,13 +17,7 @@ #define mb() asm volatile ("membar" : : :"memory") #define rmb() asm volatile ("membar" : : :"memory") #define wmb() asm volatile ("membar" : : :"memory") -#define read_barrier_depends() do { } while (0) -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do {} while(0) -#define set_mb(var, value) \ - do { var = (value); barrier(); } while (0) +#include <asm-generic/barrier.h> #endif /* _ASM_BARRIER_H */ diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 67c3450309b7..ada843c701ef 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -2,6 +2,7 @@ header-y += ucontext.h generic-y += auxvec.h +generic-y += barrier.h generic-y += bug.h generic-y += bugs.h generic-y += clkdev.h diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 8a64ff2337f6..7aae4cb2a29a 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -160,8 +160,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0) #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0) - #define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + #endif diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h index 1041a8e70ce8..4e863daea25b 100644 --- a/arch/hexagon/include/asm/barrier.h +++ b/arch/hexagon/include/asm/barrier.h @@ -29,10 +29,6 @@ #define smp_read_barrier_depends() barrier() #define smp_wmb() barrier() #define smp_mb() barrier() -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() /* Set a value and use a memory barrier. Used by the scheduler somewhere. */ #define set_mb(var, value) \ diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 4e4119b0e691..a8c3a11dc5ab 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -147,9 +147,6 @@ config PARAVIRT over full virtualization. However, when run without a hypervisor the kernel is theoretically slower and slightly larger. - -source "arch/ia64/xen/Kconfig" - endif choice @@ -175,7 +172,6 @@ config IA64_GENERIC SGI-SN2 For SGI Altix systems SGI-UV For SGI UV systems Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> - Xen-domU For xen domU system If you don't know what to do, choose "generic". @@ -231,14 +227,6 @@ config IA64_HP_SIM bool "Ski-simulator" select SWIOTLB -config IA64_XEN_GUEST - bool "Xen guest" - select SWIOTLB - depends on XEN - help - Build a kernel that runs on Xen guest domain. At this moment only - 16KB page size in supported. - endchoice choice diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index be7bfa12b705..f37238f45bcd 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -51,11 +51,9 @@ core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ -core-$(CONFIG_IA64_XEN_GUEST) += arch/ia64/dig/ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ core-$(CONFIG_KVM) += arch/ia64/kvm/ -core-$(CONFIG_XEN) += arch/ia64/xen/ drivers-$(CONFIG_PCI) += arch/ia64/pci/ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig deleted file mode 100644 index b025acfde5c1..000000000000 --- a/arch/ia64/configs/xen_domu_defconfig +++ /dev/null @@ -1,199 +0,0 @@ -CONFIG_EXPERIMENTAL=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=20 -CONFIG_SYSFS_DEPRECATED_V2=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PARAVIRT_GUEST=y -CONFIG_IA64_XEN_GUEST=y -CONFIG_MCKINLEY=y -CONFIG_IA64_CYCLONE=y -CONFIG_SMP=y -CONFIG_NR_CPUS=16 -CONFIG_HOTPLUG_CPU=y -CONFIG_PERMIT_BSP_REMOVE=y -CONFIG_FORCE_CPEI_RETARGET=y -CONFIG_IA64_MCA_RECOVERY=y -CONFIG_PERFMON=y -CONFIG_IA64_PALINFO=y -CONFIG_KEXEC=y -CONFIG_EFI_VARS=y -CONFIG_BINFMT_MISC=m -CONFIG_ACPI_PROCFS=y -CONFIG_ACPI_BUTTON=m -CONFIG_ACPI_FAN=m -CONFIG_ACPI_PROCESSOR=m -CONFIG_ACPI_CONTAINER=m -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=m -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_ARPD=y -CONFIG_SYN_COOKIES=y -# CONFIG_INET_LRO is not set -# CONFIG_IPV6 is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_CMD64X=y -CONFIG_BLK_DEV_PIIX=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=m -CONFIG_BLK_DEV_SR=m -CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_SYM53C8XX_2=y -CONFIG_SCSI_QLOGIC_1280=y -CONFIG_MD=y -CONFIG_BLK_DEV_MD=m -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_MULTIPATH=m -CONFIG_BLK_DEV_DM=m -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_MIRROR=m -CONFIG_DM_ZERO=m -CONFIG_FUSION=y -CONFIG_FUSION_SPI=y -CONFIG_FUSION_FC=y -CONFIG_FUSION_CTL=y -CONFIG_NETDEVICES=y -CONFIG_DUMMY=m -CONFIG_NET_ETHERNET=y -CONFIG_NET_TULIP=y -CONFIG_TULIP=m -CONFIG_NET_PCI=y -CONFIG_NET_VENDOR_INTEL=y -CONFIG_E100=m -CONFIG_E1000=y -CONFIG_TIGON3=y -CONFIG_NETCONSOLE=y -# CONFIG_SERIO_SERPORT is not set -CONFIG_GAMEPORT=m -CONFIG_SERIAL_NONSTANDARD=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=6 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_HW_RANDOM is not set -CONFIG_EFI_RTC=y -CONFIG_RAW_DRIVER=m -CONFIG_HPET=y -CONFIG_AGP=m -CONFIG_DRM=m -CONFIG_DRM_TDFX=m -CONFIG_DRM_R128=m -CONFIG_DRM_RADEON=m -CONFIG_DRM_MGA=m -CONFIG_DRM_SIS=m -CONFIG_HID_GYRATION=y -CONFIG_HID_NTRIG=y -CONFIG_HID_PANTHERLORD=y -CONFIG_HID_PETALYNX=y -CONFIG_HID_SAMSUNG=y -CONFIG_HID_SONY=y -CONFIG_HID_SUNPLUS=y -CONFIG_HID_TOPSEED=y -CONFIG_USB=y -CONFIG_USB_DEVICEFS=y -CONFIG_USB_EHCI_HCD=m -CONFIG_USB_OHCI_HCD=m -CONFIG_USB_UHCI_HCD=y -CONFIG_USB_STORAGE=m -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_REISERFS_FS=y -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y -CONFIG_XFS_FS=y -CONFIG_AUTOFS_FS=y -CONFIG_AUTOFS4_FS=y -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_UDF_FS=m -CONFIG_VFAT_FS=y -CONFIG_NTFS_FS=m -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_HUGETLBFS=y -CONFIG_NFS_FS=m -CONFIG_NFS_V3=y -CONFIG_NFS_V4=y -CONFIG_NFSD=m -CONFIG_NFSD_V4=y -CONFIG_SMB_FS=m -CONFIG_SMB_NLS_DEFAULT=y -CONFIG_CIFS=m -CONFIG_PARTITION_ADVANCED=y -CONFIG_SGI_PARTITION=y -CONFIG_EFI_PARTITION=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_UTF8=m -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_MUTEXES=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_IA64_GRANULE_16MB=y -CONFIG_CRYPTO_ECB=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_MD5=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index faa1bf0da815..d651102a4d45 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -111,8 +111,6 @@ static inline const char *acpi_get_sysname (void) return "uv"; # elif defined (CONFIG_IA64_DIG) return "dig"; -# elif defined (CONFIG_IA64_XEN_GUEST) - return "xen"; # elif defined(CONFIG_IA64_DIG_VTD) return "dig_vtd"; # else diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index 60576e06b6fb..d0a69aa35e27 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -45,14 +45,37 @@ # define smp_rmb() rmb() # define smp_wmb() wmb() # define smp_read_barrier_depends() read_barrier_depends() + #else + # define smp_mb() barrier() # define smp_rmb() barrier() # define smp_wmb() barrier() # define smp_read_barrier_depends() do { } while(0) + #endif /* + * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no + * need for asm trickery! + */ + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + +/* * XXX check on this ---I suspect what Linus really wants here is * acquire vs release semantics but we can't discuss this stuff with * Linus just yet. Grrr... diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index 2d1ad4b11a85..9c39bdfc2da8 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h @@ -113,8 +113,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); # include <asm/machvec_sn2.h> # elif defined (CONFIG_IA64_SGI_UV) # include <asm/machvec_uv.h> -# elif defined (CONFIG_IA64_XEN_GUEST) -# include <asm/machvec_xen.h> # elif defined (CONFIG_IA64_GENERIC) # ifdef MACHVEC_PLATFORM_HEADER diff --git a/arch/ia64/include/asm/machvec_xen.h b/arch/ia64/include/asm/machvec_xen.h deleted file mode 100644 index 8b8bd0eb3923..000000000000 --- a/arch/ia64/include/asm/machvec_xen.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _ASM_IA64_MACHVEC_XEN_h -#define _ASM_IA64_MACHVEC_XEN_h - -extern ia64_mv_setup_t dig_setup; -extern ia64_mv_cpu_init_t xen_cpu_init; -extern ia64_mv_irq_init_t xen_irq_init; -extern ia64_mv_send_ipi_t xen_platform_send_ipi; - -/* - * This stuff has dual use! - * - * For a generic kernel, the macros are used to initialize the - * platform's machvec structure. When compiling a non-generic kernel, - * the macros are used directly. - */ -#define ia64_platform_name "xen" -#define platform_setup dig_setup -#define platform_cpu_init xen_cpu_init -#define platform_irq_init xen_irq_init -#define platform_send_ipi xen_platform_send_ipi - -#endif /* _ASM_IA64_MACHVEC_XEN_h */ diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h index 61c7b1750b16..092f1c91b36c 100644 --- a/arch/ia64/include/asm/meminit.h +++ b/arch/ia64/include/asm/meminit.h @@ -18,7 +18,6 @@ * - crash dumping code reserved region * - Kernel memory map built from EFI memory map * - ELF core header - * - xen start info if CONFIG_XEN * * More could be added if necessary */ diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h index b149b88ea795..b53518a98026 100644 --- a/arch/ia64/include/asm/paravirt.h +++ b/arch/ia64/include/asm/paravirt.h @@ -75,7 +75,6 @@ void *paravirt_get_gate_section(void); #ifdef CONFIG_PARAVIRT_GUEST #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 -#define PARAVIRT_HYPERVISOR_TYPE_XEN 1 #ifndef __ASSEMBLY__ diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h index 44ef9ef8f5b3..42b233bedeb5 100644 --- a/arch/ia64/include/asm/pvclock-abi.h +++ b/arch/ia64/include/asm/pvclock-abi.h @@ -11,7 +11,7 @@ /* * These structs MUST NOT be changed. * They are the ABI between hypervisor and guest OS. - * Both Xen and KVM are using this. + * KVM is using this. * * pvclock_vcpu_time_info holds the system time and the tsc timestamp * of the last update. So the guest can use the tsc delta to get a diff --git a/arch/ia64/include/asm/sync_bitops.h b/arch/ia64/include/asm/sync_bitops.h deleted file mode 100644 index 593c12eeb270..000000000000 --- a/arch/ia64/include/asm/sync_bitops.h +++ /dev/null @@ -1,51 +0,0 @@ -#ifndef _ASM_IA64_SYNC_BITOPS_H -#define _ASM_IA64_SYNC_BITOPS_H - -/* - * Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp> - * - * Based on synch_bitops.h which Dan Magenhaimer wrote. - * - * bit operations which provide guaranteed strong synchronisation - * when communicating with Xen or other guest OSes running on other CPUs. - */ - -static inline void sync_set_bit(int nr, volatile void *addr) -{ - set_bit(nr, addr); -} - -static inline void sync_clear_bit(int nr, volatile void *addr) -{ - clear_bit(nr, addr); -} - -static inline void sync_change_bit(int nr, volatile void *addr) -{ - change_bit(nr, addr); -} - -static inline int sync_test_and_set_bit(int nr, volatile void *addr) -{ - return test_and_set_bit(nr, addr); -} - -static inline int sync_test_and_clear_bit(int nr, volatile void *addr) -{ - return test_and_clear_bit(nr, addr); -} - -static inline int sync_test_and_change_bit(int nr, volatile void *addr) -{ - return test_and_change_bit(nr, addr); -} - -static inline int sync_test_bit(int nr, const volatile void *addr) -{ - return test_bit(nr, addr); -} - -#define sync_cmpxchg(ptr, old, new) \ - ((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new))) - -#endif /* _ASM_IA64_SYNC_BITOPS_H */ diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h deleted file mode 100644 index baa74c82aa71..000000000000 --- a/arch/ia64/include/asm/xen/events.h +++ /dev/null @@ -1,41 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/events.h - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ -#ifndef _ASM_IA64_XEN_EVENTS_H -#define _ASM_IA64_XEN_EVENTS_H - -enum ipi_vector { - XEN_RESCHEDULE_VECTOR, - XEN_IPI_VECTOR, - XEN_CMCP_VECTOR, - XEN_CPEP_VECTOR, - - XEN_NR_IPIS, -}; - -static inline int xen_irqs_disabled(struct pt_regs *regs) -{ - return !(ia64_psr(regs)->i); -} - -#define irq_ctx_init(cpu) do { } while (0) - -#endif /* _ASM_IA64_XEN_EVENTS_H */ diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h deleted file mode 100644 index ed28bcd5bb85..000000000000 --- a/arch/ia64/include/asm/xen/hypercall.h +++ /dev/null @@ -1,265 +0,0 @@ -/****************************************************************************** - * hypercall.h - * - * Linux-specific hypervisor handling. - * - * Copyright (c) 2002-2004, K A Fraser - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation; or, when distributed - * separately from the Linux kernel or incorporated into other - * software packages, subject to the following license: - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this source file (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef _ASM_IA64_XEN_HYPERCALL_H -#define _ASM_IA64_XEN_HYPERCALL_H - -#include <xen/interface/xen.h> -#include <xen/interface/physdev.h> -#include <xen/interface/sched.h> -#include <asm/xen/xcom_hcall.h> -struct xencomm_handle; -extern unsigned long __hypercall(unsigned long a1, unsigned long a2, - unsigned long a3, unsigned long a4, - unsigned long a5, unsigned long cmd); - -/* - * Assembler stubs for hyper-calls. - */ - -#define _hypercall0(type, name) \ -({ \ - long __res; \ - __res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\ - (type)__res; \ -}) - -#define _hypercall1(type, name, a1) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - 0, 0, 0, 0, __HYPERVISOR_##name); \ - (type)__res; \ -}) - -#define _hypercall2(type, name, a1, a2) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - (unsigned long)a2, \ - 0, 0, 0, __HYPERVISOR_##name); \ - (type)__res; \ -}) - -#define _hypercall3(type, name, a1, a2, a3) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - (unsigned long)a2, \ - (unsigned long)a3, \ - 0, 0, __HYPERVISOR_##name); \ - (type)__res; \ -}) - -#define _hypercall4(type, name, a1, a2, a3, a4) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - (unsigned long)a2, \ - (unsigned long)a3, \ - (unsigned long)a4, \ - 0, __HYPERVISOR_##name); \ - (type)__res; \ -}) - -#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - (unsigned long)a2, \ - (unsigned long)a3, \ - (unsigned long)a4, \ - (unsigned long)a5, \ - __HYPERVISOR_##name); \ - (type)__res; \ -}) - - -static inline int -xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, sched_op, cmd, arg); -} - -static inline long -HYPERVISOR_set_timer_op(u64 timeout) -{ - unsigned long timeout_hi = (unsigned long)(timeout >> 32); - unsigned long timeout_lo = (unsigned long)timeout; - return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); -} - -static inline int -xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list, - int nr_calls) -{ - return _hypercall2(int, multicall, call_list, nr_calls); -} - -static inline int -xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, memory_op, cmd, arg); -} - -static inline int -xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, event_channel_op, cmd, arg); -} - -static inline int -xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, xen_version, cmd, arg); -} - -static inline int -xencomm_arch_hypercall_console_io(int cmd, int count, - struct xencomm_handle *str) -{ - return _hypercall3(int, console_io, cmd, count, str); -} - -static inline int -xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, physdev_op, cmd, arg); -} - -static inline int -xencomm_arch_hypercall_grant_table_op(unsigned int cmd, - struct xencomm_handle *uop, - unsigned int count) -{ - return _hypercall3(int, grant_table_op, cmd, uop, count); -} - -int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); - -extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg); - -static inline int -xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, callback_op, cmd, arg); -} - -static inline long -xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg) -{ - return _hypercall3(long, vcpu_op, cmd, cpu, arg); -} - -static inline int -HYPERVISOR_physdev_op(int cmd, void *arg) -{ - switch (cmd) { - case PHYSDEVOP_eoi: - return _hypercall1(int, ia64_fast_eoi, - ((struct physdev_eoi *)arg)->irq); - default: - return xencomm_hypercall_physdev_op(cmd, arg); - } -} - -static inline long -xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg) -{ - return _hypercall1(long, opt_feature, arg); -} - -/* for balloon driver */ -#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0) - -/* Use xencomm to do hypercalls. */ -#define HYPERVISOR_sched_op xencomm_hypercall_sched_op -#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op -#define HYPERVISOR_callback_op xencomm_hypercall_callback_op -#define HYPERVISOR_multicall xencomm_hypercall_multicall -#define HYPERVISOR_xen_version xencomm_hypercall_xen_version -#define HYPERVISOR_console_io xencomm_hypercall_console_io -#define HYPERVISOR_memory_op xencomm_hypercall_memory_op -#define HYPERVISOR_suspend xencomm_hypercall_suspend -#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op -#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature - -/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */ -#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; }) - -static inline int -HYPERVISOR_shutdown( - unsigned int reason) -{ - struct sched_shutdown sched_shutdown = { - .reason = reason - }; - - int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); - - return rc; -} - -/* for netfront.c, netback.c */ -#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */ - -static inline void -MULTI_update_va_mapping( - struct multicall_entry *mcl, unsigned long va, - pte_t new_val, unsigned long flags) -{ - mcl->op = __HYPERVISOR_update_va_mapping; - mcl->result = 0; -} - -static inline void -MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd, - void *uop, unsigned int count) -{ - mcl->op = __HYPERVISOR_grant_table_op; - mcl->args[0] = cmd; - mcl->args[1] = (unsigned long)uop; - mcl->args[2] = count; -} - -static inline void -MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, - int count, int *success_count, domid_t domid) -{ - mcl->op = __HYPERVISOR_mmu_update; - mcl->args[0] = (unsigned long)req; - mcl->args[1] = count; - mcl->args[2] = (unsigned long)success_count; - mcl->args[3] = domid; -} - -#endif /* _ASM_IA64_XEN_HYPERCALL_H */ diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h deleted file mode 100644 index 67455c2ed2b1..000000000000 --- a/arch/ia64/include/asm/xen/hypervisor.h +++ /dev/null @@ -1,61 +0,0 @@ -/****************************************************************************** - * hypervisor.h - * - * Linux-specific hypervisor handling. - * - * Copyright (c) 2002-2004, K A Fraser - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation; or, when distributed - * separately from the Linux kernel or incorporated into other - * software packages, subject to the following license: - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this source file (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef _ASM_IA64_XEN_HYPERVISOR_H -#define _ASM_IA64_XEN_HYPERVISOR_H - -#include <linux/err.h> -#include <xen/interface/xen.h> -#include <xen/interface/version.h> /* to compile feature.c */ -#include <xen/features.h> /* to comiple xen-netfront.c */ -#include <xen/xen.h> -#include <asm/xen/hypercall.h> - -#ifdef CONFIG_XEN -extern struct shared_info *HYPERVISOR_shared_info; -extern struct start_info *xen_start_info; - -void __init xen_setup_vcpu_info_placement(void); -void force_evtchn_callback(void); - -/* for drivers/xen/balloon/balloon.c */ -#ifdef CONFIG_XEN_SCRUB_PAGES -#define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT) -#else -#define scrub_pages(_p, _n) ((void)0) -#endif - -/* For setup_arch() in arch/ia64/kernel/setup.c */ -void xen_ia64_enable_opt_feature(void); -#endif - -#endif /* _ASM_IA64_XEN_HYPERVISOR_H */ diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h deleted file mode 100644 index c53a47611208..000000000000 --- a/arch/ia64/include/asm/xen/inst.h +++ /dev/null @@ -1,486 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/inst.h - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include <asm/xen/privop.h> - -#define ia64_ivt xen_ivt -#define DO_SAVE_MIN XEN_DO_SAVE_MIN - -#define __paravirt_switch_to xen_switch_to -#define __paravirt_leave_syscall xen_leave_syscall -#define __paravirt_work_processed_syscall xen_work_processed_syscall -#define __paravirt_leave_kernel xen_leave_kernel -#define __paravirt_pending_syscall_end xen_work_pending_syscall_end -#define __paravirt_work_processed_syscall_target \ - xen_work_processed_syscall - -#define paravirt_fsyscall_table xen_fsyscall_table -#define paravirt_fsys_bubble_down xen_fsys_bubble_down - -#define MOV_FROM_IFA(reg) \ - movl reg = XSI_IFA; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_ITIR(reg) \ - movl reg = XSI_ITIR; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_ISR(reg) \ - movl reg = XSI_ISR; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_IHA(reg) \ - movl reg = XSI_IHA; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_IPSR(pred, reg) \ -(pred) movl reg = XSI_IPSR; \ - ;; \ -(pred) ld8 reg = [reg] - -#define MOV_FROM_IIM(reg) \ - movl reg = XSI_IIM; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_IIP(reg) \ - movl reg = XSI_IIP; \ - ;; \ - ld8 reg = [reg] - -.macro __MOV_FROM_IVR reg, clob - .ifc "\reg", "r8" - XEN_HYPER_GET_IVR - .exitm - .endif - .ifc "\clob", "r8" - XEN_HYPER_GET_IVR - ;; - mov \reg = r8 - .exitm - .endif - - mov \clob = r8 - ;; - XEN_HYPER_GET_IVR - ;; - mov \reg = r8 - ;; - mov r8 = \clob -.endm -#define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob - -.macro __MOV_FROM_PSR pred, reg, clob - .ifc "\reg", "r8" - (\pred) XEN_HYPER_GET_PSR; - .exitm - .endif - .ifc "\clob", "r8" - (\pred) XEN_HYPER_GET_PSR - ;; - (\pred) mov \reg = r8 - .exitm - .endif - - (\pred) mov \clob = r8 - (\pred) XEN_HYPER_GET_PSR - ;; - (\pred) mov \reg = r8 - (\pred) mov r8 = \clob -.endm -#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob - -/* assuming ar.itc is read with interrupt disabled. */ -#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ -(pred) movl clob = XSI_ITC_OFFSET; \ - ;; \ -(pred) ld8 clob = [clob]; \ -(pred) mov reg = ar.itc; \ - ;; \ -(pred) add reg = reg, clob; \ - ;; \ -(pred) movl clob = XSI_ITC_LAST; \ - ;; \ -(pred) ld8 clob = [clob]; \ - ;; \ -(pred) cmp.geu.unc pred_clob, p0 = clob, reg; \ - ;; \ -(pred_clob) add reg = 1, clob; \ - ;; \ -(pred) movl clob = XSI_ITC_LAST; \ - ;; \ -(pred) st8 [clob] = reg - - -#define MOV_TO_IFA(reg, clob) \ - movl clob = XSI_IFA; \ - ;; \ - st8 [clob] = reg \ - -#define MOV_TO_ITIR(pred, reg, clob) \ -(pred) movl clob = XSI_ITIR; \ - ;; \ -(pred) st8 [clob] = reg - -#define MOV_TO_IHA(pred, reg, clob) \ -(pred) movl clob = XSI_IHA; \ - ;; \ -(pred) st8 [clob] = reg - -#define MOV_TO_IPSR(pred, reg, clob) \ -(pred) movl clob = XSI_IPSR; \ - ;; \ -(pred) st8 [clob] = reg; \ - ;; - -#define MOV_TO_IFS(pred, reg, clob) \ -(pred) movl clob = XSI_IFS; \ - ;; \ -(pred) st8 [clob] = reg; \ - ;; - -#define MOV_TO_IIP(reg, clob) \ - movl clob = XSI_IIP; \ - ;; \ - st8 [clob] = reg - -.macro ____MOV_TO_KR kr, reg, clob0, clob1 - .ifc "\clob0", "r9" - .error "clob0 \clob0 must not be r9" - .endif - .ifc "\clob1", "r8" - .error "clob1 \clob1 must not be r8" - .endif - - .ifnc "\reg", "r9" - .ifnc "\clob1", "r9" - mov \clob1 = r9 - .endif - mov r9 = \reg - .endif - .ifnc "\clob0", "r8" - mov \clob0 = r8 - .endif - mov r8 = \kr - ;; - XEN_HYPER_SET_KR - - .ifnc "\reg", "r9" - .ifnc "\clob1", "r9" - mov r9 = \clob1 - .endif - .endif - .ifnc "\clob0", "r8" - mov r8 = \clob0 - .endif -.endm - -.macro __MOV_TO_KR kr, reg, clob0, clob1 - .ifc "\clob0", "r9" - ____MOV_TO_KR \kr, \reg, \clob1, \clob0 - .exitm - .endif - .ifc "\clob1", "r8" - ____MOV_TO_KR \kr, \reg, \clob1, \clob0 - .exitm - .endif - - ____MOV_TO_KR \kr, \reg, \clob0, \clob1 -.endm - -#define MOV_TO_KR(kr, reg, clob0, clob1) \ - __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1 - - -.macro __ITC_I pred, reg, clob - .ifc "\reg", "r8" - (\pred) XEN_HYPER_ITC_I - .exitm - .endif - .ifc "\clob", "r8" - (\pred) mov r8 = \reg - ;; - (\pred) XEN_HYPER_ITC_I - .exitm - .endif - - (\pred) mov \clob = r8 - (\pred) mov r8 = \reg - ;; - (\pred) XEN_HYPER_ITC_I - ;; - (\pred) mov r8 = \clob - ;; -.endm -#define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob - -.macro __ITC_D pred, reg, clob - .ifc "\reg", "r8" - (\pred) XEN_HYPER_ITC_D - ;; - .exitm - .endif - .ifc "\clob", "r8" - (\pred) mov r8 = \reg - ;; - (\pred) XEN_HYPER_ITC_D - ;; - .exitm - .endif - - (\pred) mov \clob = r8 - (\pred) mov r8 = \reg - ;; - (\pred) XEN_HYPER_ITC_D - ;; - (\pred) mov r8 = \clob - ;; -.endm -#define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob - -.macro __ITC_I_AND_D pred_i, pred_d, reg, clob - .ifc "\reg", "r8" - (\pred_i)XEN_HYPER_ITC_I - ;; - (\pred_d)XEN_HYPER_ITC_D - ;; - .exitm - .endif - .ifc "\clob", "r8" - mov r8 = \reg - ;; - (\pred_i)XEN_HYPER_ITC_I - ;; - (\pred_d)XEN_HYPER_ITC_D - ;; - .exitm - .endif - - mov \clob = r8 - mov r8 = \reg - ;; - (\pred_i)XEN_HYPER_ITC_I - ;; - (\pred_d)XEN_HYPER_ITC_D - ;; - mov r8 = \clob - ;; -.endm -#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \ - __ITC_I_AND_D pred_i, pred_d, reg, clob - -.macro __THASH pred, reg0, reg1, clob - .ifc "\reg0", "r8" - (\pred) mov r8 = \reg1 - (\pred) XEN_HYPER_THASH - .exitm - .endc - .ifc "\reg1", "r8" - (\pred) XEN_HYPER_THASH - ;; - (\pred) mov \reg0 = r8 - ;; - .exitm - .endif - .ifc "\clob", "r8" - (\pred) mov r8 = \reg1 - (\pred) XEN_HYPER_THASH - ;; - (\pred) mov \reg0 = r8 - ;; - .exitm - .endif - - (\pred) mov \clob = r8 - (\pred) mov r8 = \reg1 - (\pred) XEN_HYPER_THASH - ;; - (\pred) mov \reg0 = r8 - (\pred) mov r8 = \clob - ;; -.endm -#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob - -#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \ - mov clob0 = 1; \ - movl clob1 = XSI_PSR_IC; \ - ;; \ - st4 [clob1] = clob0 \ - ;; - -#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \ - ;; \ - srlz.d; \ - mov clob1 = 1; \ - movl clob0 = XSI_PSR_IC; \ - ;; \ - st4 [clob0] = clob1 - -#define RSM_PSR_IC(clob) \ - movl clob = XSI_PSR_IC; \ - ;; \ - st4 [clob] = r0; \ - ;; - -/* pred will be clobbered */ -#define MASK_TO_PEND_OFS (-1) -#define SSM_PSR_I(pred, pred_clob, clob) \ -(pred) movl clob = XSI_PSR_I_ADDR \ - ;; \ -(pred) ld8 clob = [clob] \ - ;; \ - /* if (pred) vpsr.i = 1 */ \ - /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \ -(pred) st1 [clob] = r0, MASK_TO_PEND_OFS \ - ;; \ - /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \ -(pred) ld1 clob = [clob] \ - ;; \ -(pred) cmp.ne.unc pred_clob, p0 = clob, r0 \ - ;; \ -(pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */ - -#define RSM_PSR_I(pred, clob0, clob1) \ - movl clob0 = XSI_PSR_I_ADDR; \ - mov clob1 = 1; \ - ;; \ - ld8 clob0 = [clob0]; \ - ;; \ -(pred) st1 [clob0] = clob1 - -#define RSM_PSR_I_IC(clob0, clob1, clob2) \ - movl clob0 = XSI_PSR_I_ADDR; \ - movl clob1 = XSI_PSR_IC; \ - ;; \ - ld8 clob0 = [clob0]; \ - mov clob2 = 1; \ - ;; \ - /* note: clears both vpsr.i and vpsr.ic! */ \ - st1 [clob0] = clob2; \ - st4 [clob1] = r0; \ - ;; - -#define RSM_PSR_DT \ - XEN_HYPER_RSM_PSR_DT - -#define RSM_PSR_BE_I(clob0, clob1) \ - RSM_PSR_I(p0, clob0, clob1); \ - rum psr.be - -#define SSM_PSR_DT_AND_SRLZ_I \ - XEN_HYPER_SSM_PSR_DT - -#define BSW_0(clob0, clob1, clob2) \ - ;; \ - /* r16-r31 all now hold bank1 values */ \ - mov clob2 = ar.unat; \ - movl clob0 = XSI_BANK1_R16; \ - movl clob1 = XSI_BANK1_R16 + 8; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r16, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r17, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r18, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r19, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r20, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r21, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r22, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r23, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r24, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r25, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r26, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r27, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r28, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r29, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r30, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r31, 16; \ - ;; \ - mov clob1 = ar.unat; \ - movl clob0 = XSI_B1NAT; \ - ;; \ - st8 [clob0] = clob1; \ - mov ar.unat = clob2; \ - movl clob0 = XSI_BANKNUM; \ - ;; \ - st4 [clob0] = r0 - - - /* FIXME: THIS CODE IS NOT NaT SAFE! */ -#define XEN_BSW_1(clob) \ - mov clob = ar.unat; \ - movl r30 = XSI_B1NAT; \ - ;; \ - ld8 r30 = [r30]; \ - mov r31 = 1; \ - ;; \ - mov ar.unat = r30; \ - movl r30 = XSI_BANKNUM; \ - ;; \ - st4 [r30] = r31; \ - movl r30 = XSI_BANK1_R16; \ - movl r31 = XSI_BANK1_R16+8; \ - ;; \ - ld8.fill r16 = [r30], 16; \ - ld8.fill r17 = [r31], 16; \ - ;; \ - ld8.fill r18 = [r30], 16; \ - ld8.fill r19 = [r31], 16; \ - ;; \ - ld8.fill r20 = [r30], 16; \ - ld8.fill r21 = [r31], 16; \ - ;; \ - ld8.fill r22 = [r30], 16; \ - ld8.fill r23 = [r31], 16; \ - ;; \ - ld8.fill r24 = [r30], 16; \ - ld8.fill r25 = [r31], 16; \ - ;; \ - ld8.fill r26 = [r30], 16; \ - ld8.fill r27 = [r31], 16; \ - ;; \ - ld8.fill r28 = [r30], 16; \ - ld8.fill r29 = [r31], 16; \ - ;; \ - ld8.fill r30 = [r30]; \ - ld8.fill r31 = [r31]; \ - ;; \ - mov ar.unat = clob - -#define BSW_1(clob0, clob1) XEN_BSW_1(clob1) - - -#define COVER \ - XEN_HYPER_COVER - -#define RFI \ - XEN_HYPER_RFI; \ - dv_serialize_data diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h deleted file mode 100644 index e88c5de27410..000000000000 --- a/arch/ia64/include/asm/xen/interface.h +++ /dev/null @@ -1,363 +0,0 @@ -/****************************************************************************** - * arch-ia64/hypervisor-if.h - * - * Guest OS interface to IA64 Xen. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Copyright by those who contributed. (in alphabetical order) - * - * Anthony Xu <anthony.xu@intel.com> - * Eddie Dong <eddie.dong@intel.com> - * Fred Yang <fred.yang@intel.com> - * Kevin Tian <kevin.tian@intel.com> - * Alex Williamson <alex.williamson@hp.com> - * Chris Wright <chrisw@sous-sol.org> - * Christian Limpach <Christian.Limpach@cl.cam.ac.uk> - * Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com> - * Hollis Blanchard <hollisb@us.ibm.com> - * Isaku Yamahata <yamahata@valinux.co.jp> - * Jan Beulich <jbeulich@novell.com> - * John Levon <john.levon@sun.com> - * Kazuhiro Suzuki <kaz@jp.fujitsu.com> - * Keir Fraser <keir.fraser@citrix.com> - * Kouya Shimura <kouya@jp.fujitsu.com> - * Masaki Kanno <kanno.masaki@jp.fujitsu.com> - * Matt Chapman <matthewc@hp.com> - * Matthew Chapman <matthewc@hp.com> - * Samuel Thibault <samuel.thibault@eu.citrix.com> - * Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com> - * Tristan Gingold <tgingold@free.fr> - * Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com> - * Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com> - * Zhang Xin <xing.z.zhang@intel.com> - * Zhang xiantao <xiantao.zhang@intel.com> - * dan.magenheimer@hp.com - * ian.pratt@cl.cam.ac.uk - * michael.fetterman@cl.cam.ac.uk - */ - -#ifndef _ASM_IA64_XEN_INTERFACE_H -#define _ASM_IA64_XEN_INTERFACE_H - -#define __DEFINE_GUEST_HANDLE(name, type) \ - typedef struct { type *p; } __guest_handle_ ## name - -#define DEFINE_GUEST_HANDLE_STRUCT(name) \ - __DEFINE_GUEST_HANDLE(name, struct name) -#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) -#define GUEST_HANDLE(name) __guest_handle_ ## name -#define GUEST_HANDLE_64(name) GUEST_HANDLE(name) -#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) - -#ifndef __ASSEMBLY__ -/* Explicitly size integers that represent pfns in the public interface - * with Xen so that we could have one ABI that works for 32 and 64 bit - * guests. */ -typedef unsigned long xen_pfn_t; -typedef unsigned long xen_ulong_t; -/* Guest handles for primitive C types. */ -__DEFINE_GUEST_HANDLE(uchar, unsigned char); -__DEFINE_GUEST_HANDLE(uint, unsigned int); -__DEFINE_GUEST_HANDLE(ulong, unsigned long); - -DEFINE_GUEST_HANDLE(char); -DEFINE_GUEST_HANDLE(int); -DEFINE_GUEST_HANDLE(long); -DEFINE_GUEST_HANDLE(void); -DEFINE_GUEST_HANDLE(uint64_t); -DEFINE_GUEST_HANDLE(uint32_t); - -DEFINE_GUEST_HANDLE(xen_pfn_t); -#define PRI_xen_pfn "lx" -#endif - -/* Arch specific VIRQs definition */ -#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */ -#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */ -#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */ - -/* Maximum number of virtual CPUs in multi-processor guests. */ -/* keep sizeof(struct shared_page) <= PAGE_SIZE. - * this is checked in arch/ia64/xen/hypervisor.c. */ -#define MAX_VIRT_CPUS 64 - -#ifndef __ASSEMBLY__ - -#define INVALID_MFN (~0UL) - -union vac { - unsigned long value; - struct { - int a_int:1; - int a_from_int_cr:1; - int a_to_int_cr:1; - int a_from_psr:1; - int a_from_cpuid:1; - int a_cover:1; - int a_bsw:1; - long reserved:57; - }; -}; - -union vdc { - unsigned long value; - struct { - int d_vmsw:1; - int d_extint:1; - int d_ibr_dbr:1; - int d_pmc:1; - int d_to_pmd:1; - int d_itm:1; - long reserved:58; - }; -}; - -struct mapped_regs { - union vac vac; - union vdc vdc; - unsigned long virt_env_vaddr; - unsigned long reserved1[29]; - unsigned long vhpi; - unsigned long reserved2[95]; - union { - unsigned long vgr[16]; - unsigned long bank1_regs[16]; /* bank1 regs (r16-r31) - when bank0 active */ - }; - union { - unsigned long vbgr[16]; - unsigned long bank0_regs[16]; /* bank0 regs (r16-r31) - when bank1 active */ - }; - unsigned long vnat; - unsigned long vbnat; - unsigned long vcpuid[5]; - unsigned long reserved3[11]; - unsigned long vpsr; - unsigned long vpr; - unsigned long reserved4[76]; - union { - unsigned long vcr[128]; - struct { - unsigned long dcr; /* CR0 */ - unsigned long itm; - unsigned long iva; - unsigned long rsv1[5]; - unsigned long pta; /* CR8 */ - unsigned long rsv2[7]; - unsigned long ipsr; /* CR16 */ - unsigned long isr; - unsigned long rsv3; - unsigned long iip; - unsigned long ifa; - unsigned long itir; - unsigned long iipa; - unsigned long ifs; - unsigned long iim; /* CR24 */ - unsigned long iha; - unsigned long rsv4[38]; - unsigned long lid; /* CR64 */ - unsigned long ivr; - unsigned long tpr; - unsigned long eoi; - unsigned long irr[4]; - unsigned long itv; /* CR72 */ - unsigned long pmv; - unsigned long cmcv; - unsigned long rsv5[5]; - unsigned long lrr0; /* CR80 */ - unsigned long lrr1; - unsigned long rsv6[46]; - }; - }; - union { - unsigned long reserved5[128]; - struct { - unsigned long precover_ifs; - unsigned long unat; /* not sure if this is needed - until NaT arch is done */ - int interrupt_collection_enabled; /* virtual psr.ic */ - - /* virtual interrupt deliverable flag is - * evtchn_upcall_mask in shared info area now. - * interrupt_mask_addr is the address - * of evtchn_upcall_mask for current vcpu - */ - unsigned char *interrupt_mask_addr; - int pending_interruption; - unsigned char vpsr_pp; - unsigned char vpsr_dfh; - unsigned char hpsr_dfh; - unsigned char hpsr_mfh; - unsigned long reserved5_1[4]; - int metaphysical_mode; /* 1 = use metaphys mapping - 0 = use virtual */ - int banknum; /* 0 or 1, which virtual - register bank is active */ - unsigned long rrs[8]; /* region registers */ - unsigned long krs[8]; /* kernel registers */ - unsigned long tmp[16]; /* temp registers - (e.g. for hyperprivops) */ - - /* itc paravirtualization - * vAR.ITC = mAR.ITC + itc_offset - * itc_last is one which was lastly passed to - * the guest OS in order to prevent it from - * going backwords. - */ - unsigned long itc_offset; - unsigned long itc_last; - }; - }; -}; - -struct arch_vcpu_info { - /* nothing */ -}; - -/* - * This structure is used for magic page in domain pseudo physical address - * space and the result of XENMEM_machine_memory_map. - * As the XENMEM_machine_memory_map result, - * xen_memory_map::nr_entries indicates the size in bytes - * including struct xen_ia64_memmap_info. Not the number of entries. - */ -struct xen_ia64_memmap_info { - uint64_t efi_memmap_size; /* size of EFI memory map */ - uint64_t efi_memdesc_size; /* size of an EFI memory map - * descriptor */ - uint32_t efi_memdesc_version; /* memory descriptor version */ - void *memdesc[0]; /* array of efi_memory_desc_t */ -}; - -struct arch_shared_info { - /* PFN of the start_info page. */ - unsigned long start_info_pfn; - - /* Interrupt vector for event channel. */ - int evtchn_vector; - - /* PFN of memmap_info page */ - unsigned int memmap_info_num_pages; /* currently only = 1 case is - supported. */ - unsigned long memmap_info_pfn; - - uint64_t pad[31]; -}; - -struct xen_callback { - unsigned long ip; -}; -typedef struct xen_callback xen_callback_t; - -#endif /* !__ASSEMBLY__ */ - -#include <asm/pvclock-abi.h> - -/* Size of the shared_info area (this is not related to page size). */ -#define XSI_SHIFT 14 -#define XSI_SIZE (1 << XSI_SHIFT) -/* Log size of mapped_regs area (64 KB - only 4KB is used). */ -#define XMAPPEDREGS_SHIFT 12 -#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT) -/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */ -#define XMAPPEDREGS_OFS XSI_SIZE - -/* Hyperprivops. */ -#define HYPERPRIVOP_START 0x1 -#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0) -#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1) -#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2) -#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3) -#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4) -#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5) -#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6) -#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7) -#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8) -#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9) -#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa) -#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb) -#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc) -#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd) -#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe) -#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf) -#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10) -#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11) -#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12) -#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13) -#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14) -#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15) -#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16) -#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17) -#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18) -#define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19) -#define HYPERPRIVOP_MAX (0x1a) - -/* Fast and light hypercalls. */ -#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1 - -/* Xencomm macros. */ -#define XENCOMM_INLINE_MASK 0xf800000000000000UL -#define XENCOMM_INLINE_FLAG 0x8000000000000000UL - -#ifndef __ASSEMBLY__ - -/* - * Optimization features. - * The hypervisor may do some special optimizations for guests. This hypercall - * can be used to switch on/of these special optimizations. - */ -#define __HYPERVISOR_opt_feature 0x700UL - -#define XEN_IA64_OPTF_OFF 0x0 -#define XEN_IA64_OPTF_ON 0x1 - -/* - * If this feature is switched on, the hypervisor inserts the - * tlb entries without calling the guests traphandler. - * This is useful in guests using region 7 for identity mapping - * like the linux kernel does. - */ -#define XEN_IA64_OPTF_IDENT_MAP_REG7 1 - -/* Identity mapping of region 4 addresses in HVM. */ -#define XEN_IA64_OPTF_IDENT_MAP_REG4 2 - -/* Identity mapping of region 5 addresses in HVM. */ -#define XEN_IA64_OPTF_IDENT_MAP_REG5 3 - -#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0) - -struct xen_ia64_opt_feature { - unsigned long cmd; /* Which feature */ - unsigned char on; /* Switch feature on/off */ - union { - struct { - /* The page protection bit mask of the pte. - * This will be or'ed with the pte. */ - unsigned long pgprot; - unsigned long key; /* A protection key for itir.*/ - }; - }; -}; - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_IA64_XEN_INTERFACE_H */ diff --git a/arch/ia64/include/asm/xen/irq.h b/arch/ia64/include/asm/xen/irq.h deleted file mode 100644 index a90450983003..000000000000 --- a/arch/ia64/include/asm/xen/irq.h +++ /dev/null @@ -1,44 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/irq.h - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef _ASM_IA64_XEN_IRQ_H -#define _ASM_IA64_XEN_IRQ_H - -/* - * The flat IRQ space is divided into two regions: - * 1. A one-to-one mapping of real physical IRQs. This space is only used - * if we have physical device-access privilege. This region is at the - * start of the IRQ space so that existing device drivers do not need - * to be modified to translate physical IRQ numbers into our IRQ space. - * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These - * are bound using the provided bind/unbind functions. - */ - -#define XEN_PIRQ_BASE 0 -#define XEN_NR_PIRQS 256 - -#define XEN_DYNIRQ_BASE (XEN_PIRQ_BASE + XEN_NR_PIRQS) -#define XEN_NR_DYNIRQS (NR_CPUS * 8) - -#define XEN_NR_IRQS (XEN_NR_PIRQS + XEN_NR_DYNIRQS) - -#endif /* _ASM_IA64_XEN_IRQ_H */ diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h deleted file mode 100644 index 00cf03e0cb82..000000000000 --- a/arch/ia64/include/asm/xen/minstate.h +++ /dev/null @@ -1,143 +0,0 @@ - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -/* read ar.itc in advance, and use it before leaving bank 0 */ -#define XEN_ACCOUNT_GET_STAMP \ - MOV_FROM_ITC(pUStk, p6, r20, r2); -#else -#define XEN_ACCOUNT_GET_STAMP -#endif - -/* - * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves - * the minimum state necessary that allows us to turn psr.ic back - * on. - * - * Assumed state upon entry: - * psr.ic: off - * r31: contains saved predicates (pr) - * - * Upon exit, the state is as follows: - * psr.ic: off - * r2 = points to &pt_regs.r16 - * r8 = contents of ar.ccv - * r9 = contents of ar.csd - * r10 = contents of ar.ssd - * r11 = FPSR_DEFAULT - * r12 = kernel sp (kernel virtual address) - * r13 = points to current task_struct (kernel virtual address) - * p15 = TRUE if psr.i is set in cr.ipsr - * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15: - * preserved - * CONFIG_XEN note: p6/p7 are not preserved - * - * Note that psr.ic is NOT turned on by this macro. This is so that - * we can pass interruption state as arguments to a handler. - */ -#define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \ - mov r16=IA64_KR(CURRENT); /* M */ \ - mov r27=ar.rsc; /* M */ \ - mov r20=r1; /* A */ \ - mov r25=ar.unat; /* M */ \ - MOV_FROM_IPSR(p0,r29); /* M */ \ - MOV_FROM_IIP(r28); /* M */ \ - mov r21=ar.fpsr; /* M */ \ - mov r26=ar.pfs; /* I */ \ - __COVER; /* B;; (or nothing) */ \ - adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \ - ;; \ - ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \ - st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \ - adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \ - /* switch from user to kernel RBS: */ \ - ;; \ - invala; /* M */ \ - /* SAVE_IFS;*/ /* see xen special handling below */ \ - cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \ - ;; \ -(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ - ;; \ -(pUStk) mov.m r24=ar.rnat; \ -(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ -(pKStk) mov r1=sp; /* get sp */ \ - ;; \ -(pUStk) lfetch.fault.excl.nt1 [r22]; \ -(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ -(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ - ;; \ -(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ -(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ - ;; \ -(pUStk) mov r18=ar.bsp; \ -(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ - adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ - adds r16=PT(CR_IPSR),r1; \ - ;; \ - lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \ - st8 [r16]=r29; /* save cr.ipsr */ \ - ;; \ - lfetch.fault.excl.nt1 [r17]; \ - tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ - mov r29=b0 \ - ;; \ - WORKAROUND; \ - adds r16=PT(R8),r1; /* initialize first base pointer */ \ - adds r17=PT(R9),r1; /* initialize second base pointer */ \ -(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r8,16; \ -.mem.offset 8,0; st8.spill [r17]=r9,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r10,24; \ - movl r8=XSI_PRECOVER_IFS; \ -.mem.offset 8,0; st8.spill [r17]=r11,24; \ - ;; \ - /* xen special handling for possibly lazy cover */ \ - /* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \ - ld8 r30=[r8]; \ -(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \ - st8 [r16]=r28,16; /* save cr.iip */ \ - ;; \ - st8 [r17]=r30,16; /* save cr.ifs */ \ - mov r8=ar.ccv; \ - mov r9=ar.csd; \ - mov r10=ar.ssd; \ - movl r11=FPSR_DEFAULT; /* L-unit */ \ - ;; \ - st8 [r16]=r25,16; /* save ar.unat */ \ - st8 [r17]=r26,16; /* save ar.pfs */ \ - shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \ - ;; \ - st8 [r16]=r27,16; /* save ar.rsc */ \ -(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \ -(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \ - ;; /* avoid RAW on r16 & r17 */ \ -(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \ - st8 [r17]=r31,16; /* save predicates */ \ -(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \ - ;; \ - st8 [r16]=r29,16; /* save b0 */ \ - st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \ - cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \ -.mem.offset 8,0; st8.spill [r17]=r12,16; \ - adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r13,16; \ -.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \ - mov r13=IA64_KR(CURRENT); /* establish `current' */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r15,16; \ -.mem.offset 8,0; st8.spill [r17]=r14,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r2,16; \ -.mem.offset 8,0; st8.spill [r17]=r3,16; \ - XEN_ACCOUNT_GET_STAMP \ - adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ - ;; \ - EXTRA; \ - movl r1=__gp; /* establish kernel global pointer */ \ - ;; \ - ACCOUNT_SYS_ENTER \ - BSW_1(r3,r14); /* switch back to bank 1 (must be last in insn group) */ \ - ;; diff --git a/arch/ia64/include/asm/xen/page-coherent.h b/arch/ia64/include/asm/xen/page-coherent.h deleted file mode 100644 index 96e42f97fa1f..000000000000 --- a/arch/ia64/include/asm/xen/page-coherent.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef _ASM_IA64_XEN_PAGE_COHERENT_H -#define _ASM_IA64_XEN_PAGE_COHERENT_H - -#include <asm/page.h> -#include <linux/dma-attrs.h> -#include <linux/dma-mapping.h> - -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) -{ - void *vstart = (void*)__get_free_pages(flags, get_order(size)); - *dma_handle = virt_to_phys(vstart); - return vstart; -} - -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - free_pages((unsigned long) cpu_addr, get_order(size)); -} - -static inline void xen_dma_map_page(struct device *hwdev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) { } - -static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) { } - -static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) { } - -static inline void xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) { } - -#endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */ diff --git a/arch/ia64/include/asm/xen/page.h b/arch/ia64/include/asm/xen/page.h deleted file mode 100644 index 03441a780b5b..000000000000 --- a/arch/ia64/include/asm/xen/page.h +++ /dev/null @@ -1,65 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/page.h - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef _ASM_IA64_XEN_PAGE_H -#define _ASM_IA64_XEN_PAGE_H - -#define INVALID_P2M_ENTRY (~0UL) - -static inline unsigned long mfn_to_pfn(unsigned long mfn) -{ - return mfn; -} - -static inline unsigned long pfn_to_mfn(unsigned long pfn) -{ - return pfn; -} - -#define phys_to_machine_mapping_valid(_x) (1) - -static inline void *mfn_to_virt(unsigned long mfn) -{ - return __va(mfn << PAGE_SHIFT); -} - -static inline unsigned long virt_to_mfn(void *virt) -{ - return __pa(virt) >> PAGE_SHIFT; -} - -/* for tpmfront.c */ -static inline unsigned long virt_to_machine(void *virt) -{ - return __pa(virt); -} - -static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ - /* nothing */ -} - -#define pte_mfn(_x) pte_pfn(_x) -#define mfn_pte(_x, _y) __pte_ma(0) /* unmodified use */ -#define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */ - -#endif /* _ASM_IA64_XEN_PAGE_H */ diff --git a/arch/ia64/include/asm/xen/patchlist.h b/arch/ia64/include/asm/xen/patchlist.h deleted file mode 100644 index eae944e88846..000000000000 --- a/arch/ia64/include/asm/xen/patchlist.h +++ /dev/null @@ -1,38 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/patchlist.h - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#define __paravirt_start_gate_fsyscall_patchlist \ - __xen_start_gate_fsyscall_patchlist -#define __paravirt_end_gate_fsyscall_patchlist \ - __xen_end_gate_fsyscall_patchlist -#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ - __xen_start_gate_brl_fsys_bubble_down_patchlist -#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ - __xen_end_gate_brl_fsys_bubble_down_patchlist -#define __paravirt_start_gate_vtop_patchlist \ - __xen_start_gate_vtop_patchlist -#define __paravirt_end_gate_vtop_patchlist \ - __xen_end_gate_vtop_patchlist -#define __paravirt_start_gate_mckinley_e9_patchlist \ - __xen_start_gate_mckinley_e9_patchlist -#define __paravirt_end_gate_mckinley_e9_patchlist \ - __xen_end_gate_mckinley_e9_patchlist diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h deleted file mode 100644 index fb4ec5e0b066..000000000000 --- a/arch/ia64/include/asm/xen/privop.h +++ /dev/null @@ -1,135 +0,0 @@ -#ifndef _ASM_IA64_XEN_PRIVOP_H -#define _ASM_IA64_XEN_PRIVOP_H - -/* - * Copyright (C) 2005 Hewlett-Packard Co - * Dan Magenheimer <dan.magenheimer@hp.com> - * - * Paravirtualizations of privileged operations for Xen/ia64 - * - * - * inline privop and paravirt_alt support - * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - */ - -#ifndef __ASSEMBLY__ -#include <linux/types.h> /* arch-ia64.h requires uint64_t */ -#endif -#include <asm/xen/interface.h> - -/* At 1 MB, before per-cpu space but still addressable using addl instead - of movl. */ -#define XSI_BASE 0xfffffffffff00000 - -/* Address of mapped regs. */ -#define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE) - -#ifdef __ASSEMBLY__ -#define XEN_HYPER_RFI break HYPERPRIVOP_RFI -#define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT -#define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT -#define XEN_HYPER_COVER break HYPERPRIVOP_COVER -#define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D -#define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I -#define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I -#define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR -#define XEN_HYPER_THASH break HYPERPRIVOP_THASH -#define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D -#define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR -#define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR -#define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4 - -#define XSI_IFS (XSI_BASE + XSI_IFS_OFS) -#define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS) -#define XSI_IFA (XSI_BASE + XSI_IFA_OFS) -#define XSI_ISR (XSI_BASE + XSI_ISR_OFS) -#define XSI_IIM (XSI_BASE + XSI_IIM_OFS) -#define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS) -#define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) -#define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS) -#define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS) -#define XSI_IIP (XSI_BASE + XSI_IIP_OFS) -#define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS) -#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) -#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) -#define XSI_IHA (XSI_BASE + XSI_IHA_OFS) -#define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS) -#define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS) -#endif - -#ifndef __ASSEMBLY__ - -/************************************************/ -/* Instructions paravirtualized for correctness */ -/************************************************/ - -/* "fc" and "thash" are privilege-sensitive instructions, meaning they - * may have different semantics depending on whether they are executed - * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't - * be allowed to execute directly, lest incorrect semantics result. */ -extern void xen_fc(void *addr); -extern unsigned long xen_thash(unsigned long addr); - -/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" - * is not currently used (though it may be in a long-format VHPT system!) - * and the semantics of cover only change if psr.ic is off which is very - * rare (and currently non-existent outside of assembly code */ - -/* There are also privilege-sensitive registers. These registers are - * readable at any privilege level but only writable at PL0. */ -extern unsigned long xen_get_cpuid(int index); -extern unsigned long xen_get_pmd(int index); - -#ifndef ASM_SUPPORTED -extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ -extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ -#endif - -/************************************************/ -/* Instructions paravirtualized for performance */ -/************************************************/ - -/* Xen uses memory-mapped virtual privileged registers for access to many - * performance-sensitive privileged registers. Some, like the processor - * status register (psr), are broken up into multiple memory locations. - * Others, like "pend", are abstractions based on privileged registers. - * "Pend" is guaranteed to be set if reading cr.ivr would return a - * (non-spurious) interrupt. */ -#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE) - -#define XSI_PSR_I \ - (*XEN_MAPPEDREGS->interrupt_mask_addr) -#define xen_get_virtual_psr_i() \ - (!XSI_PSR_I) -#define xen_set_virtual_psr_i(_val) \ - ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; }) -#define xen_set_virtual_psr_ic(_val) \ - ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; }) -#define xen_get_virtual_pend() \ - (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) - -#ifndef ASM_SUPPORTED -/* Although all privileged operations can be left to trap and will - * be properly handled by Xen, some are frequent enough that we use - * hyperprivops for performance. */ -extern unsigned long xen_get_psr(void); -extern unsigned long xen_get_ivr(void); -extern unsigned long xen_get_tpr(void); -extern void xen_hyper_ssm_i(void); -extern void xen_set_itm(unsigned long); -extern void xen_set_tpr(unsigned long); -extern void xen_eoi(unsigned long); -extern unsigned long xen_get_rr(unsigned long index); -extern void xen_set_rr(unsigned long index, unsigned long val); -extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, - unsigned long val2, unsigned long val3, - unsigned long val4); -extern void xen_set_kr(unsigned long index, unsigned long val); -extern void xen_ptcga(unsigned long addr, unsigned long size); -#endif /* !ASM_SUPPORTED */ - -#endif /* !__ASSEMBLY__ */ - -#endif /* _ASM_IA64_XEN_PRIVOP_H */ diff --git a/arch/ia64/include/asm/xen/xcom_hcall.h b/arch/ia64/include/asm/xen/xcom_hcall.h deleted file mode 100644 index 20b2950c71b6..000000000000 --- a/arch/ia64/include/asm/xen/xcom_hcall.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_IA64_XEN_XCOM_HCALL_H -#define _ASM_IA64_XEN_XCOM_HCALL_H - -/* These function creates inline or mini descriptor for the parameters and - calls the corresponding xencomm_arch_hypercall_X. - Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless - they want to use their own wrapper. */ -extern int xencomm_hypercall_console_io(int cmd, int count, char *str); - -extern int xencomm_hypercall_event_channel_op(int cmd, void *op); - -extern int xencomm_hypercall_xen_version(int cmd, void *arg); - -extern int xencomm_hypercall_physdev_op(int cmd, void *op); - -extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, - unsigned int count); - -extern int xencomm_hypercall_sched_op(int cmd, void *arg); - -extern int xencomm_hypercall_multicall(void *call_list, int nr_calls); - -extern int xencomm_hypercall_callback_op(int cmd, void *arg); - -extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg); - -extern int xencomm_hypercall_suspend(unsigned long srec); - -extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg); - -extern long xencomm_hypercall_opt_feature(void *arg); - -#endif /* _ASM_IA64_XEN_XCOM_HCALL_H */ diff --git a/arch/ia64/include/asm/xen/xencomm.h b/arch/ia64/include/asm/xen/xencomm.h deleted file mode 100644 index cded677bebf2..000000000000 --- a/arch/ia64/include/asm/xen/xencomm.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_IA64_XEN_XENCOMM_H -#define _ASM_IA64_XEN_XENCOMM_H - -#include <xen/xencomm.h> -#include <asm/pgtable.h> - -/* Must be called before any hypercall. */ -extern void xencomm_initialize(void); -extern int xencomm_is_initialized(void); - -/* Check if virtual contiguity means physical contiguity - * where the passed address is a pointer value in virtual address. - * On ia64, identity mapping area in region 7 or the piece of region 5 - * that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL] - */ -static inline int xencomm_is_phys_contiguous(unsigned long addr) -{ - return (PAGE_OFFSET <= addr && - addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) || - (KERNEL_START <= addr && - addr < KERNEL_START + KERNEL_TR_PAGE_SIZE); -} - -#endif /* _ASM_IA64_XEN_XENCOMM_H */ diff --git a/arch/ia64/include/uapi/asm/break.h b/arch/ia64/include/uapi/asm/break.h index e90c40ec9edf..f03402039896 100644 --- a/arch/ia64/include/uapi/asm/break.h +++ b/arch/ia64/include/uapi/asm/break.h @@ -20,13 +20,4 @@ */ #define __IA64_BREAK_SYSCALL 0x100000 -/* - * Xen specific break numbers: - */ -#define __IA64_XEN_HYPERCALL 0x1000 -/* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used - for xen hyperprivops */ -#define __IA64_XEN_HYPERPRIVOP_START 0x1 -#define __IA64_XEN_HYPERPRIVOP_MAX 0x1a - #endif /* _ASM_IA64_BREAK_H */ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 59d52e3aef12..bfa19311e09c 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -53,7 +53,6 @@ #include <asm/numa.h> #include <asm/sal.h> #include <asm/cyclone.h> -#include <asm/xen/hypervisor.h> #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ @@ -120,8 +119,6 @@ acpi_get_sysname(void) return "uv"; else return "sn2"; - } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) { - return "xen"; } #ifdef CONFIG_INTEL_IOMMU diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 46c9e3007315..60ef83e6db71 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -16,9 +16,6 @@ #include <asm/sigcontext.h> #include <asm/mca.h> -#include <asm/xen/interface.h> -#include <asm/xen/hypervisor.h> - #include "../kernel/sigframe.h" #include "../kernel/fsyscall_gtod_data.h" @@ -290,33 +287,4 @@ void foo(void) DEFINE(IA64_ITC_LASTCYCLE_OFFSET, offsetof (struct itc_jitter_data_t, itc_lastcycle)); -#ifdef CONFIG_XEN - BLANK(); - - DEFINE(XEN_NATIVE_ASM, XEN_NATIVE); - DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN); - -#define DEFINE_MAPPED_REG_OFS(sym, field) \ - DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field))) - - DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr); - DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr); - DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip); - DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs); - DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs); - DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr); - DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa); - DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa); - DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim); - DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha); - DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir); - DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled); - DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum); - DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]); - DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); - DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); - DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); - DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset); - DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last); -#endif /* CONFIG_XEN */ } diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 991ca336b8a2..e6f80fcf013b 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -416,8 +416,6 @@ start_ap: default_setup_hook = 0 // Currently nothing needs to be done. - .weak xen_setup_hook - .global hypervisor_type hypervisor_type: data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT @@ -426,7 +424,6 @@ hypervisor_type: hypervisor_setup_hooks: data8 default_setup_hook - data8 xen_setup_hook num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8 .previous diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c index ee564575148e..f6769cd54bd9 100644 --- a/arch/ia64/kernel/nr-irqs.c +++ b/arch/ia64/kernel/nr-irqs.c @@ -10,15 +10,11 @@ #include <linux/kbuild.h> #include <linux/threads.h> #include <asm/native/irq.h> -#include <asm/xen/irq.h> void foo(void) { union paravirt_nr_irqs_max { char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS]; -#ifdef CONFIG_XEN - char xen_nr_irqs[XEN_NR_IRQS]; -#endif }; DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max)); diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h index 64d6d810c64b..1ad7512b5f65 100644 --- a/arch/ia64/kernel/paravirt_inst.h +++ b/arch/ia64/kernel/paravirt_inst.h @@ -22,9 +22,6 @@ #ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK #include <asm/native/pvchk_inst.h> -#elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN) -#include <asm/xen/inst.h> -#include <asm/xen/minstate.h> #else #include <asm/native/inst.h> #endif diff --git a/arch/ia64/kernel/paravirt_patchlist.h b/arch/ia64/kernel/paravirt_patchlist.h index 0684aa6c6507..67cffc3643a3 100644 --- a/arch/ia64/kernel/paravirt_patchlist.h +++ b/arch/ia64/kernel/paravirt_patchlist.h @@ -20,9 +20,5 @@ * */ -#if defined(__IA64_GATE_PARAVIRTUALIZED_XEN) -#include <asm/xen/patchlist.h> -#else #include <asm/native/patchlist.h> -#endif diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 0ccb28fab27e..84f8a52ac5ae 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -182,12 +182,6 @@ SECTIONS { __start_gate_section = .; *(.data..gate) __stop_gate_section = .; -#ifdef CONFIG_XEN - . = ALIGN(PAGE_SIZE); - __xen_start_gate_section = .; - *(.data..gate.xen) - __xen_stop_gate_section = .; -#endif } /* * make sure the gate page doesn't expose diff --git a/arch/ia64/xen/Kconfig b/arch/ia64/xen/Kconfig deleted file mode 100644 index 5d8a06b0ddf7..000000000000 --- a/arch/ia64/xen/Kconfig +++ /dev/null @@ -1,25 +0,0 @@ -# -# This Kconfig describes xen/ia64 options -# - -config XEN - bool "Xen hypervisor support" - default y - depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB - select XEN_XENCOMM - select NO_IDLE_HZ - # followings are required to save/restore. - select ARCH_SUSPEND_POSSIBLE - select SUSPEND - select PM_SLEEP - help - Enable Xen hypervisor support. Resulting kernel runs - both as a guest OS on Xen and natively on hardware. - -config XEN_XENCOMM - depends on XEN - bool - -config NO_IDLE_HZ - depends on XEN - bool diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile deleted file mode 100644 index e6f4a0a74228..000000000000 --- a/arch/ia64/xen/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# -# Makefile for Xen components -# - -obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ - hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \ - gate-data.o - -obj-$(CONFIG_IA64_GENERIC) += machvec.o - -# The gate DSO image is built using a special linker script. -include $(srctree)/arch/ia64/kernel/Makefile.gate - -# tell compiled for xen -CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN -AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN - -# use same file of native. -$(obj)/gate.o: $(src)/../kernel/gate.S FORCE - $(call if_changed_dep,as_o_S) -$(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE - $(call if_changed_dep,cpp_lds_S) - - -AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN - -# xen multi compile -ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S -ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) -obj-y += $(ASM_PARAVIRT_OBJS) -define paravirtualized_xen -AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_XEN -endef -$(foreach o,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_xen,$(o)))) - -$(obj)/xen-%.o: $(src)/../kernel/%.S FORCE - $(call if_changed_dep,as_o_S) diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S deleted file mode 100644 index 6f95b6b32a4e..000000000000 --- a/arch/ia64/xen/gate-data.S +++ /dev/null @@ -1,3 +0,0 @@ - .section .data..gate.xen, "aw" - - .incbin "arch/ia64/xen/gate.so" diff --git a/arch/ia64/xen/grant-table.c b/arch/ia64/xen/grant-table.c deleted file mode 100644 index c18281332f84..000000000000 --- a/arch/ia64/xen/grant-table.c +++ /dev/null @@ -1,94 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/grant-table.c - * - * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include <linux/module.h> -#include <linux/vmalloc.h> -#include <linux/slab.h> -#include <linux/mm.h> - -#include <xen/interface/xen.h> -#include <xen/interface/memory.h> -#include <xen/grant_table.h> - -#include <asm/xen/hypervisor.h> - -/**************************************************************************** - * grant table hack - * cmd: GNTTABOP_xxx - */ - -int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, - unsigned long max_nr_gframes, - struct grant_entry **__shared) -{ - *__shared = __va(frames[0] << PAGE_SHIFT); - return 0; -} - -void arch_gnttab_unmap_shared(struct grant_entry *shared, - unsigned long nr_gframes) -{ - /* nothing */ -} - -static void -gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop) -{ - uint32_t flags; - - flags = uop->flags; - - if (flags & GNTMAP_host_map) { - if (flags & GNTMAP_application_map) { - printk(KERN_DEBUG - "GNTMAP_application_map is not supported yet: " - "flags 0x%x\n", flags); - BUG(); - } - if (flags & GNTMAP_contains_pte) { - printk(KERN_DEBUG - "GNTMAP_contains_pte is not supported yet: " - "flags 0x%x\n", flags); - BUG(); - } - } else if (flags & GNTMAP_device_map) { - printk("GNTMAP_device_map is not supported yet 0x%x\n", flags); - BUG(); /* not yet. actually this flag is not used. */ - } else { - BUG(); - } -} - -int -HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) -{ - if (cmd == GNTTABOP_map_grant_ref) { - unsigned int i; - for (i = 0; i < count; i++) { - gnttab_map_grant_ref_pre( - (struct gnttab_map_grant_ref *)uop + i); - } - } - return xencomm_hypercall_grant_table_op(cmd, uop, count); -} - -EXPORT_SYMBOL(HYPERVISOR_grant_table_op); diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S deleted file mode 100644 index 08847aa12583..000000000000 --- a/arch/ia64/xen/hypercall.S +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Support routines for Xen hypercalls - * - * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com> - * Copyright (C) 2008 Yaozu (Eddie) Dong <eddie.dong@intel.com> - */ - -#include <asm/asmmacro.h> -#include <asm/intrinsics.h> -#include <asm/xen/privop.h> - -#ifdef __INTEL_COMPILER -/* - * Hypercalls without parameter. - */ -#define __HCALL0(name,hcall) \ - GLOBAL_ENTRY(name); \ - break hcall; \ - br.ret.sptk.many rp; \ - END(name) - -/* - * Hypercalls with 1 parameter. - */ -#define __HCALL1(name,hcall) \ - GLOBAL_ENTRY(name); \ - mov r8=r32; \ - break hcall; \ - br.ret.sptk.many rp; \ - END(name) - -/* - * Hypercalls with 2 parameters. - */ -#define __HCALL2(name,hcall) \ - GLOBAL_ENTRY(name); \ - mov r8=r32; \ - mov r9=r33; \ - break hcall; \ - br.ret.sptk.many rp; \ - END(name) - -__HCALL0(xen_get_psr, HYPERPRIVOP_GET_PSR) -__HCALL0(xen_get_ivr, HYPERPRIVOP_GET_IVR) -__HCALL0(xen_get_tpr, HYPERPRIVOP_GET_TPR) -__HCALL0(xen_hyper_ssm_i, HYPERPRIVOP_SSM_I) - -__HCALL1(xen_set_tpr, HYPERPRIVOP_SET_TPR) -__HCALL1(xen_eoi, HYPERPRIVOP_EOI) -__HCALL1(xen_thash, HYPERPRIVOP_THASH) -__HCALL1(xen_set_itm, HYPERPRIVOP_SET_ITM) -__HCALL1(xen_get_rr, HYPERPRIVOP_GET_RR) -__HCALL1(xen_fc, HYPERPRIVOP_FC) -__HCALL1(xen_get_cpuid, HYPERPRIVOP_GET_CPUID) -__HCALL1(xen_get_pmd, HYPERPRIVOP_GET_PMD) - -__HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA) -__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR) -__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) - -GLOBAL_ENTRY(xen_set_rr0_to_rr4) - mov r8=r32 - mov r9=r33 - mov r10=r34 - mov r11=r35 - mov r14=r36 - XEN_HYPER_SET_RR0_TO_RR4 - br.ret.sptk.many rp - ;; -END(xen_set_rr0_to_rr4) -#endif - -GLOBAL_ENTRY(xen_send_ipi) - mov r14=r32 - mov r15=r33 - mov r2=0x400 - break 0x1000 - ;; - br.ret.sptk.many rp - ;; -END(xen_send_ipi) - -GLOBAL_ENTRY(__hypercall) - mov r2=r37 - break 0x1000 - br.ret.sptk.many b0 - ;; -END(__hypercall) diff --git a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c deleted file mode 100644 index fab62528a80b..000000000000 --- a/arch/ia64/xen/hypervisor.c +++ /dev/null @@ -1,97 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/hypervisor.c - * - * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include <linux/efi.h> -#include <linux/export.h> -#include <asm/xen/hypervisor.h> -#include <asm/xen/privop.h> - -#include "irq_xen.h" - -struct shared_info *HYPERVISOR_shared_info __read_mostly = - (struct shared_info *)XSI_BASE; -EXPORT_SYMBOL(HYPERVISOR_shared_info); - -DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); - -struct start_info *xen_start_info; -EXPORT_SYMBOL(xen_start_info); - -EXPORT_SYMBOL(xen_domain_type); - -EXPORT_SYMBOL(__hypercall); - -/* Stolen from arch/x86/xen/enlighten.c */ -/* - * Flag to determine whether vcpu info placement is available on all - * VCPUs. We assume it is to start with, and then set it to zero on - * the first failure. This is because it can succeed on some VCPUs - * and not others, since it can involve hypervisor memory allocation, - * or because the guest failed to guarantee all the appropriate - * constraints on all VCPUs (ie buffer can't cross a page boundary). - * - * Note that any particular CPU may be using a placed vcpu structure, - * but we can only optimise if the all are. - * - * 0: not available, 1: available - */ - -static void __init xen_vcpu_setup(int cpu) -{ - /* - * WARNING: - * before changing MAX_VIRT_CPUS, - * check that shared_info fits on a page - */ - BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE); - per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; -} - -void __init xen_setup_vcpu_info_placement(void) -{ - int cpu; - - for_each_possible_cpu(cpu) - xen_vcpu_setup(cpu); -} - -void -xen_cpu_init(void) -{ - xen_smp_intr_init(); -} - -/************************************************************************** - * opt feature - */ -void -xen_ia64_enable_opt_feature(void) -{ - /* Enable region 7 identity map optimizations in Xen */ - struct xen_ia64_opt_feature optf; - - optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7; - optf.on = XEN_IA64_OPTF_ON; - optf.pgprot = pgprot_val(PAGE_KERNEL); - optf.key = 0; /* No key on linux. */ - HYPERVISOR_opt_feature(&optf); -} diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c deleted file mode 100644 index efb74dafec4d..000000000000 --- a/arch/ia64/xen/irq_xen.c +++ /dev/null @@ -1,443 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/irq_xen.c - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include <linux/cpu.h> - -#include <xen/interface/xen.h> -#include <xen/interface/callback.h> -#include <xen/events.h> - -#include <asm/xen/privop.h> - -#include "irq_xen.h" - -/*************************************************************************** - * pv_irq_ops - * irq operations - */ - -static int -xen_assign_irq_vector(int irq) -{ - struct physdev_irq irq_op; - - irq_op.irq = irq; - if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) - return -ENOSPC; - - return irq_op.vector; -} - -static void -xen_free_irq_vector(int vector) -{ - struct physdev_irq irq_op; - - if (vector < IA64_FIRST_DEVICE_VECTOR || - vector > IA64_LAST_DEVICE_VECTOR) - return; - - irq_op.vector = vector; - if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op)) - printk(KERN_WARNING "%s: xen_free_irq_vector fail vector=%d\n", - __func__, vector); -} - - -static DEFINE_PER_CPU(int, xen_timer_irq) = -1; -static DEFINE_PER_CPU(int, xen_ipi_irq) = -1; -static DEFINE_PER_CPU(int, xen_resched_irq) = -1; -static DEFINE_PER_CPU(int, xen_cmc_irq) = -1; -static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1; -static DEFINE_PER_CPU(int, xen_cpep_irq) = -1; -#define NAME_SIZE 15 -static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name); -#undef NAME_SIZE - -struct saved_irq { - unsigned int irq; - struct irqaction *action; -}; -/* 16 should be far optimistic value, since only several percpu irqs - * are registered early. - */ -#define MAX_LATE_IRQ 16 -static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ]; -static unsigned short late_irq_cnt; -static unsigned short saved_irq_cnt; -static int xen_slab_ready; - -#ifdef CONFIG_SMP -#include <linux/sched.h> - -/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, - * it ends up to issue several memory accesses upon percpu data and - * thus adds unnecessary traffic to other paths. - */ -static irqreturn_t -xen_dummy_handler(int irq, void *dev_id) -{ - return IRQ_HANDLED; -} - -static irqreturn_t -xen_resched_handler(int irq, void *dev_id) -{ - scheduler_ipi(); - return IRQ_HANDLED; -} - -static struct irqaction xen_ipi_irqaction = { - .handler = handle_IPI, - .flags = IRQF_DISABLED, - .name = "IPI" -}; - -static struct irqaction xen_resched_irqaction = { - .handler = xen_resched_handler, - .flags = IRQF_DISABLED, - .name = "resched" -}; - -static struct irqaction xen_tlb_irqaction = { - .handler = xen_dummy_handler, - .flags = IRQF_DISABLED, - .name = "tlb_flush" -}; -#endif - -/* - * This is xen version percpu irq registration, which needs bind - * to xen specific evtchn sub-system. One trick here is that xen - * evtchn binding interface depends on kmalloc because related - * port needs to be freed at device/cpu down. So we cache the - * registration on BSP before slab is ready and then deal them - * at later point. For rest instances happening after slab ready, - * we hook them to xen evtchn immediately. - * - * FIXME: MCA is not supported by far, and thus "nomca" boot param is - * required. - */ -static void -__xen_register_percpu_irq(unsigned int cpu, unsigned int vec, - struct irqaction *action, int save) -{ - int irq = 0; - - if (xen_slab_ready) { - switch (vec) { - case IA64_TIMER_VECTOR: - snprintf(per_cpu(xen_timer_name, cpu), - sizeof(per_cpu(xen_timer_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, - action->handler, action->flags, - per_cpu(xen_timer_name, cpu), action->dev_id); - per_cpu(xen_timer_irq, cpu) = irq; - break; - case IA64_IPI_RESCHEDULE: - snprintf(per_cpu(xen_resched_name, cpu), - sizeof(per_cpu(xen_resched_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, - action->handler, action->flags, - per_cpu(xen_resched_name, cpu), action->dev_id); - per_cpu(xen_resched_irq, cpu) = irq; - break; - case IA64_IPI_VECTOR: - snprintf(per_cpu(xen_ipi_name, cpu), - sizeof(per_cpu(xen_ipi_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, - action->handler, action->flags, - per_cpu(xen_ipi_name, cpu), action->dev_id); - per_cpu(xen_ipi_irq, cpu) = irq; - break; - case IA64_CMC_VECTOR: - snprintf(per_cpu(xen_cmc_name, cpu), - sizeof(per_cpu(xen_cmc_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, - action->handler, - action->flags, - per_cpu(xen_cmc_name, cpu), - action->dev_id); - per_cpu(xen_cmc_irq, cpu) = irq; - break; - case IA64_CMCP_VECTOR: - snprintf(per_cpu(xen_cmcp_name, cpu), - sizeof(per_cpu(xen_cmcp_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, - action->handler, - action->flags, - per_cpu(xen_cmcp_name, cpu), - action->dev_id); - per_cpu(xen_cmcp_irq, cpu) = irq; - break; - case IA64_CPEP_VECTOR: - snprintf(per_cpu(xen_cpep_name, cpu), - sizeof(per_cpu(xen_cpep_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, - action->handler, - action->flags, - per_cpu(xen_cpep_name, cpu), - action->dev_id); - per_cpu(xen_cpep_irq, cpu) = irq; - break; - case IA64_CPE_VECTOR: - case IA64_MCA_RENDEZ_VECTOR: - case IA64_PERFMON_VECTOR: - case IA64_MCA_WAKEUP_VECTOR: - case IA64_SPURIOUS_INT_VECTOR: - /* No need to complain, these aren't supported. */ - break; - default: - printk(KERN_WARNING "Percpu irq %d is unsupported " - "by xen!\n", vec); - break; - } - BUG_ON(irq < 0); - - if (irq > 0) { - /* - * Mark percpu. Without this, migrate_irqs() will - * mark the interrupt for migrations and trigger it - * on cpu hotplug. - */ - irq_set_status_flags(irq, IRQ_PER_CPU); - } - } - - /* For BSP, we cache registered percpu irqs, and then re-walk - * them when initializing APs - */ - if (!cpu && save) { - BUG_ON(saved_irq_cnt == MAX_LATE_IRQ); - saved_percpu_irqs[saved_irq_cnt].irq = vec; - saved_percpu_irqs[saved_irq_cnt].action = action; - saved_irq_cnt++; - if (!xen_slab_ready) - late_irq_cnt++; - } -} - -static void -xen_register_percpu_irq(ia64_vector vec, struct irqaction *action) -{ - __xen_register_percpu_irq(smp_processor_id(), vec, action, 1); -} - -static void -xen_bind_early_percpu_irq(void) -{ - int i; - - xen_slab_ready = 1; - /* There's no race when accessing this cached array, since only - * BSP will face with such step shortly - */ - for (i = 0; i < late_irq_cnt; i++) - __xen_register_percpu_irq(smp_processor_id(), - saved_percpu_irqs[i].irq, - saved_percpu_irqs[i].action, 0); -} - -/* FIXME: There's no obvious point to check whether slab is ready. So - * a hack is used here by utilizing a late time hook. - */ - -#ifdef CONFIG_HOTPLUG_CPU -static int unbind_evtchn_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - - if (action == CPU_DEAD) { - /* Unregister evtchn. */ - if (per_cpu(xen_cpep_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu), - NULL); - per_cpu(xen_cpep_irq, cpu) = -1; - } - if (per_cpu(xen_cmcp_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu), - NULL); - per_cpu(xen_cmcp_irq, cpu) = -1; - } - if (per_cpu(xen_cmc_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL); - per_cpu(xen_cmc_irq, cpu) = -1; - } - if (per_cpu(xen_ipi_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL); - per_cpu(xen_ipi_irq, cpu) = -1; - } - if (per_cpu(xen_resched_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), - NULL); - per_cpu(xen_resched_irq, cpu) = -1; - } - if (per_cpu(xen_timer_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu), - NULL); - per_cpu(xen_timer_irq, cpu) = -1; - } - } - return NOTIFY_OK; -} - -static struct notifier_block unbind_evtchn_notifier = { - .notifier_call = unbind_evtchn_callback, - .priority = 0 -}; -#endif - -void xen_smp_intr_init_early(unsigned int cpu) -{ -#ifdef CONFIG_SMP - unsigned int i; - - for (i = 0; i < saved_irq_cnt; i++) - __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq, - saved_percpu_irqs[i].action, 0); -#endif -} - -void xen_smp_intr_init(void) -{ -#ifdef CONFIG_SMP - unsigned int cpu = smp_processor_id(); - struct callback_register event = { - .type = CALLBACKTYPE_event, - .address = { .ip = (unsigned long)&xen_event_callback }, - }; - - if (cpu == 0) { - /* Initialization was already done for boot cpu. */ -#ifdef CONFIG_HOTPLUG_CPU - /* Register the notifier only once. */ - register_cpu_notifier(&unbind_evtchn_notifier); -#endif - return; - } - - /* This should be piggyback when setup vcpu guest context */ - BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); -#endif /* CONFIG_SMP */ -} - -void __init -xen_irq_init(void) -{ - struct callback_register event = { - .type = CALLBACKTYPE_event, - .address = { .ip = (unsigned long)&xen_event_callback }, - }; - - xen_init_IRQ(); - BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); - late_time_init = xen_bind_early_percpu_irq; -} - -void -xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect) -{ -#ifdef CONFIG_SMP - /* TODO: we need to call vcpu_up here */ - if (unlikely(vector == ap_wakeup_vector)) { - /* XXX - * This should be in __cpu_up(cpu) in ia64 smpboot.c - * like x86. But don't want to modify it, - * keep it untouched. - */ - xen_smp_intr_init_early(cpu); - - xen_send_ipi(cpu, vector); - /* vcpu_prepare_and_up(cpu); */ - return; - } -#endif - - switch (vector) { - case IA64_IPI_VECTOR: - xen_send_IPI_one(cpu, XEN_IPI_VECTOR); - break; - case IA64_IPI_RESCHEDULE: - xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); - break; - case IA64_CMCP_VECTOR: - xen_send_IPI_one(cpu, XEN_CMCP_VECTOR); - break; - case IA64_CPEP_VECTOR: - xen_send_IPI_one(cpu, XEN_CPEP_VECTOR); - break; - case IA64_TIMER_VECTOR: { - /* this is used only once by check_sal_cache_flush() - at boot time */ - static int used = 0; - if (!used) { - xen_send_ipi(cpu, IA64_TIMER_VECTOR); - used = 1; - break; - } - /* fallthrough */ - } - default: - printk(KERN_WARNING "Unsupported IPI type 0x%x\n", - vector); - notify_remote_via_irq(0); /* defaults to 0 irq */ - break; - } -} - -static void __init -xen_register_ipi(void) -{ -#ifdef CONFIG_SMP - register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction); - register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction); - register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction); -#endif -} - -static void -xen_resend_irq(unsigned int vector) -{ - (void)resend_irq_on_evtchn(vector); -} - -const struct pv_irq_ops xen_irq_ops __initconst = { - .register_ipi = xen_register_ipi, - - .assign_irq_vector = xen_assign_irq_vector, - .free_irq_vector = xen_free_irq_vector, - .register_percpu_irq = xen_register_percpu_irq, - - .resend_irq = xen_resend_irq, -}; diff --git a/arch/ia64/xen/irq_xen.h b/arch/ia64/xen/irq_xen.h deleted file mode 100644 index 1778517b90fe..000000000000 --- a/arch/ia64/xen/irq_xen.h +++ /dev/null @@ -1,34 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/irq_xen.h - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef IRQ_XEN_H -#define IRQ_XEN_H - -extern void (*late_time_init)(void); -extern char xen_event_callback; -void __init xen_init_IRQ(void); - -extern const struct pv_irq_ops xen_irq_ops __initconst; -extern void xen_smp_intr_init(void); -extern void xen_send_ipi(int cpu, int vec); - -#endif /* IRQ_XEN_H */ diff --git a/arch/ia64/xen/machvec.c b/arch/ia64/xen/machvec.c deleted file mode 100644 index 4ad588a7c279..000000000000 --- a/arch/ia64/xen/machvec.c +++ /dev/null @@ -1,4 +0,0 @@ -#define MACHVEC_PLATFORM_NAME xen -#define MACHVEC_PLATFORM_HEADER <asm/machvec_xen.h> -#include <asm/machvec_init.h> - diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c deleted file mode 100644 index 419c8620945a..000000000000 --- a/arch/ia64/xen/suspend.c +++ /dev/null @@ -1,59 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/suspend.c - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * suspend/resume - */ - -#include <xen/xen-ops.h> -#include <asm/xen/hypervisor.h> -#include "time.h" - -void -xen_mm_pin_all(void) -{ - /* nothing */ -} - -void -xen_mm_unpin_all(void) -{ - /* nothing */ -} - -void -xen_arch_pre_suspend() -{ - /* nothing */ -} - -void -xen_arch_post_suspend(int suspend_cancelled) -{ - if (suspend_cancelled) - return; - - xen_ia64_enable_opt_feature(); - /* add more if necessary */ -} - -void xen_arch_resume(void) -{ - xen_timer_resume_on_aps(); -} diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c deleted file mode 100644 index 1f8244a78bee..000000000000 --- a/arch/ia64/xen/time.c +++ /dev/null @@ -1,257 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/time.c - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include <linux/delay.h> -#include <linux/kernel_stat.h> -#include <linux/posix-timers.h> -#include <linux/irq.h> -#include <linux/clocksource.h> - -#include <asm/timex.h> - -#include <asm/xen/hypervisor.h> - -#include <xen/interface/vcpu.h> - -#include "../kernel/fsyscall_gtod_data.h" - -static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); -static DEFINE_PER_CPU(unsigned long, xen_stolen_time); -static DEFINE_PER_CPU(unsigned long, xen_blocked_time); - -/* taken from i386/kernel/time-xen.c */ -static void xen_init_missing_ticks_accounting(int cpu) -{ - struct vcpu_register_runstate_memory_area area; - struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu); - int rc; - - memset(runstate, 0, sizeof(*runstate)); - - area.addr.v = runstate; - rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, - &area); - WARN_ON(rc && rc != -ENOSYS); - - per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; - per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] - + runstate->time[RUNSTATE_offline]; -} - -/* - * Runstate accounting - */ -/* stolen from arch/x86/xen/time.c */ -static void get_runstate_snapshot(struct vcpu_runstate_info *res) -{ - u64 state_time; - struct vcpu_runstate_info *state; - - BUG_ON(preemptible()); - - state = &__get_cpu_var(xen_runstate); - - /* - * The runstate info is always updated by the hypervisor on - * the current CPU, so there's no need to use anything - * stronger than a compiler barrier when fetching it. - */ - do { - state_time = state->state_entry_time; - rmb(); - *res = *state; - rmb(); - } while (state->state_entry_time != state_time); -} - -#define NS_PER_TICK (1000000000LL/HZ) - -static unsigned long -consider_steal_time(unsigned long new_itm) -{ - unsigned long stolen, blocked; - unsigned long delta_itm = 0, stolentick = 0; - int cpu = smp_processor_id(); - struct vcpu_runstate_info runstate; - struct task_struct *p = current; - - get_runstate_snapshot(&runstate); - - /* - * Check for vcpu migration effect - * In this case, itc value is reversed. - * This causes huge stolen value. - * This function just checks and reject this effect. - */ - if (!time_after_eq(runstate.time[RUNSTATE_blocked], - per_cpu(xen_blocked_time, cpu))) - blocked = 0; - - if (!time_after_eq(runstate.time[RUNSTATE_runnable] + - runstate.time[RUNSTATE_offline], - per_cpu(xen_stolen_time, cpu))) - stolen = 0; - - if (!time_after(delta_itm + new_itm, ia64_get_itc())) - stolentick = ia64_get_itc() - new_itm; - - do_div(stolentick, NS_PER_TICK); - stolentick++; - - do_div(stolen, NS_PER_TICK); - - if (stolen > stolentick) - stolen = stolentick; - - stolentick -= stolen; - do_div(blocked, NS_PER_TICK); - - if (blocked > stolentick) - blocked = stolentick; - - if (stolen > 0 || blocked > 0) { - account_steal_ticks(stolen); - account_idle_ticks(blocked); - run_local_timers(); - - rcu_check_callbacks(cpu, user_mode(get_irq_regs())); - - scheduler_tick(); - run_posix_cpu_timers(p); - delta_itm += local_cpu_data->itm_delta * (stolen + blocked); - - if (cpu == time_keeper_id) - xtime_update(stolen + blocked); - - local_cpu_data->itm_next = delta_itm + new_itm; - - per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; - per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; - } - return delta_itm; -} - -static int xen_do_steal_accounting(unsigned long *new_itm) -{ - unsigned long delta_itm; - delta_itm = consider_steal_time(*new_itm); - *new_itm += delta_itm; - if (time_after(*new_itm, ia64_get_itc()) && delta_itm) - return 1; - - return 0; -} - -static void xen_itc_jitter_data_reset(void) -{ - u64 lcycle, ret; - - do { - lcycle = itc_jitter_data.itc_lastcycle; - ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0); - } while (unlikely(ret != lcycle)); -} - -/* based on xen_sched_clock() in arch/x86/xen/time.c. */ -/* - * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined, - * something similar logic should be implemented here. - */ -/* - * Xen sched_clock implementation. Returns the number of unstolen - * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED - * states. - */ -static unsigned long long xen_sched_clock(void) -{ - struct vcpu_runstate_info runstate; - - unsigned long long now; - unsigned long long offset; - unsigned long long ret; - - /* - * Ideally sched_clock should be called on a per-cpu basis - * anyway, so preempt should already be disabled, but that's - * not current practice at the moment. - */ - preempt_disable(); - - /* - * both ia64_native_sched_clock() and xen's runstate are - * based on mAR.ITC. So difference of them makes sense. - */ - now = ia64_native_sched_clock(); - - get_runstate_snapshot(&runstate); - - WARN_ON(runstate.state != RUNSTATE_running); - - offset = 0; - if (now > runstate.state_entry_time) - offset = now - runstate.state_entry_time; - ret = runstate.time[RUNSTATE_blocked] + - runstate.time[RUNSTATE_running] + - offset; - - preempt_enable(); - - return ret; -} - -struct pv_time_ops xen_time_ops __initdata = { - .init_missing_ticks_accounting = xen_init_missing_ticks_accounting, - .do_steal_accounting = xen_do_steal_accounting, - .clocksource_resume = xen_itc_jitter_data_reset, - .sched_clock = xen_sched_clock, -}; - -/* Called after suspend, to resume time. */ -static void xen_local_tick_resume(void) -{ - /* Just trigger a tick. */ - ia64_cpu_local_tick(); - touch_softlockup_watchdog(); -} - -void -xen_timer_resume(void) -{ - unsigned int cpu; - - xen_local_tick_resume(); - - for_each_online_cpu(cpu) - xen_init_missing_ticks_accounting(cpu); -} - -static void ia64_cpu_local_tick_fn(void *unused) -{ - xen_local_tick_resume(); - xen_init_missing_ticks_accounting(smp_processor_id()); -} - -void -xen_timer_resume_on_aps(void) -{ - smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1); -} diff --git a/arch/ia64/xen/time.h b/arch/ia64/xen/time.h deleted file mode 100644 index f98d7e1a42f0..000000000000 --- a/arch/ia64/xen/time.h +++ /dev/null @@ -1,24 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/time.h - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -extern struct pv_time_ops xen_time_ops __initdata; -void xen_timer_resume_on_aps(void); diff --git a/arch/ia64/xen/xcom_hcall.c b/arch/ia64/xen/xcom_hcall.c deleted file mode 100644 index ccaf7431f7c8..000000000000 --- a/arch/ia64/xen/xcom_hcall.c +++ /dev/null @@ -1,441 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - * - * Tristan Gingold <tristan.gingold@bull.net> - * - * Copyright (c) 2007 - * Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * consolidate mini and inline version. - */ - -#include <linux/module.h> -#include <xen/interface/xen.h> -#include <xen/interface/memory.h> -#include <xen/interface/grant_table.h> -#include <xen/interface/callback.h> -#include <xen/interface/vcpu.h> -#include <asm/xen/hypervisor.h> -#include <asm/xen/xencomm.h> - -/* Xencomm notes: - * This file defines hypercalls to be used by xencomm. The hypercalls simply - * create inlines or mini descriptors for pointers and then call the raw arch - * hypercall xencomm_arch_hypercall_XXX - * - * If the arch wants to directly use these hypercalls, simply define macros - * in asm/xen/hypercall.h, eg: - * #define HYPERVISOR_sched_op xencomm_hypercall_sched_op - * - * The arch may also define HYPERVISOR_xxx as a function and do more operations - * before/after doing the hypercall. - * - * Note: because only inline or mini descriptors are created these functions - * must only be called with in kernel memory parameters. - */ - -int -xencomm_hypercall_console_io(int cmd, int count, char *str) -{ - /* xen early printk uses console io hypercall before - * xencomm initialization. In that case, we just ignore it. - */ - if (!xencomm_is_initialized()) - return 0; - - return xencomm_arch_hypercall_console_io - (cmd, count, xencomm_map_no_alloc(str, count)); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io); - -int -xencomm_hypercall_event_channel_op(int cmd, void *op) -{ - struct xencomm_handle *desc; - desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op)); - if (desc == NULL) - return -EINVAL; - - return xencomm_arch_hypercall_event_channel_op(cmd, desc); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op); - -int -xencomm_hypercall_xen_version(int cmd, void *arg) -{ - struct xencomm_handle *desc; - unsigned int argsize; - - switch (cmd) { - case XENVER_version: - /* do not actually pass an argument */ - return xencomm_arch_hypercall_xen_version(cmd, 0); - case XENVER_extraversion: - argsize = sizeof(struct xen_extraversion); - break; - case XENVER_compile_info: - argsize = sizeof(struct xen_compile_info); - break; - case XENVER_capabilities: - argsize = sizeof(struct xen_capabilities_info); - break; - case XENVER_changeset: - argsize = sizeof(struct xen_changeset_info); - break; - case XENVER_platform_parameters: - argsize = sizeof(struct xen_platform_parameters); - break; - case XENVER_get_features: - argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info); - break; - - default: - printk(KERN_DEBUG - "%s: unknown version op %d\n", __func__, cmd); - return -ENOSYS; - } - - desc = xencomm_map_no_alloc(arg, argsize); - if (desc == NULL) - return -EINVAL; - - return xencomm_arch_hypercall_xen_version(cmd, desc); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version); - -int -xencomm_hypercall_physdev_op(int cmd, void *op) -{ - unsigned int argsize; - - switch (cmd) { - case PHYSDEVOP_apic_read: - case PHYSDEVOP_apic_write: - argsize = sizeof(struct physdev_apic); - break; - case PHYSDEVOP_alloc_irq_vector: - case PHYSDEVOP_free_irq_vector: - argsize = sizeof(struct physdev_irq); - break; - case PHYSDEVOP_irq_status_query: - argsize = sizeof(struct physdev_irq_status_query); - break; - - default: - printk(KERN_DEBUG - "%s: unknown physdev op %d\n", __func__, cmd); - return -ENOSYS; - } - - return xencomm_arch_hypercall_physdev_op - (cmd, xencomm_map_no_alloc(op, argsize)); -} - -static int -xencommize_grant_table_op(struct xencomm_mini **xc_area, - unsigned int cmd, void *op, unsigned int count, - struct xencomm_handle **desc) -{ - struct xencomm_handle *desc1; - unsigned int argsize; - - switch (cmd) { - case GNTTABOP_map_grant_ref: - argsize = sizeof(struct gnttab_map_grant_ref); - break; - case GNTTABOP_unmap_grant_ref: - argsize = sizeof(struct gnttab_unmap_grant_ref); - break; - case GNTTABOP_setup_table: - { - struct gnttab_setup_table *setup = op; - - argsize = sizeof(*setup); - - if (count != 1) - return -EINVAL; - desc1 = __xencomm_map_no_alloc - (xen_guest_handle(setup->frame_list), - setup->nr_frames * - sizeof(*xen_guest_handle(setup->frame_list)), - *xc_area); - if (desc1 == NULL) - return -EINVAL; - (*xc_area)++; - set_xen_guest_handle(setup->frame_list, (void *)desc1); - break; - } - case GNTTABOP_dump_table: - argsize = sizeof(struct gnttab_dump_table); - break; - case GNTTABOP_transfer: - argsize = sizeof(struct gnttab_transfer); - break; - case GNTTABOP_copy: - argsize = sizeof(struct gnttab_copy); - break; - case GNTTABOP_query_size: - argsize = sizeof(struct gnttab_query_size); - break; - default: - printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n", - __func__, cmd); - BUG(); - } - - *desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area); - if (*desc == NULL) - return -EINVAL; - (*xc_area)++; - - return 0; -} - -int -xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, - unsigned int count) -{ - int rc; - struct xencomm_handle *desc; - XENCOMM_MINI_ALIGNED(xc_area, 2); - - rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc); - if (rc) - return rc; - - return xencomm_arch_hypercall_grant_table_op(cmd, desc, count); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op); - -int -xencomm_hypercall_sched_op(int cmd, void *arg) -{ - struct xencomm_handle *desc; - unsigned int argsize; - - switch (cmd) { - case SCHEDOP_yield: - case SCHEDOP_block: - argsize = 0; - break; - case SCHEDOP_shutdown: - argsize = sizeof(struct sched_shutdown); - break; - case SCHEDOP_poll: - { - struct sched_poll *poll = arg; - struct xencomm_handle *ports; - - argsize = sizeof(struct sched_poll); - ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports), - sizeof(*xen_guest_handle(poll->ports))); - - set_xen_guest_handle(poll->ports, (void *)ports); - break; - } - default: - printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd); - return -ENOSYS; - } - - desc = xencomm_map_no_alloc(arg, argsize); - if (desc == NULL) - return -EINVAL; - - return xencomm_arch_hypercall_sched_op(cmd, desc); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op); - -int -xencomm_hypercall_multicall(void *call_list, int nr_calls) -{ - int rc; - int i; - struct multicall_entry *mce; - struct xencomm_handle *desc; - XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2); - - for (i = 0; i < nr_calls; i++) { - mce = (struct multicall_entry *)call_list + i; - - switch (mce->op) { - case __HYPERVISOR_update_va_mapping: - case __HYPERVISOR_mmu_update: - /* No-op on ia64. */ - break; - case __HYPERVISOR_grant_table_op: - rc = xencommize_grant_table_op - (&xc_area, - mce->args[0], (void *)mce->args[1], - mce->args[2], &desc); - if (rc) - return rc; - mce->args[1] = (unsigned long)desc; - break; - case __HYPERVISOR_memory_op: - default: - printk(KERN_DEBUG - "%s: unhandled multicall op entry op %lu\n", - __func__, mce->op); - return -ENOSYS; - } - } - - desc = xencomm_map_no_alloc(call_list, - nr_calls * sizeof(struct multicall_entry)); - if (desc == NULL) - return -EINVAL; - - return xencomm_arch_hypercall_multicall(desc, nr_calls); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall); - -int -xencomm_hypercall_callback_op(int cmd, void *arg) -{ - unsigned int argsize; - switch (cmd) { - case CALLBACKOP_register: - argsize = sizeof(struct callback_register); - break; - case CALLBACKOP_unregister: - argsize = sizeof(struct callback_unregister); - break; - default: - printk(KERN_DEBUG - "%s: unknown callback op %d\n", __func__, cmd); - return -ENOSYS; - } - - return xencomm_arch_hypercall_callback_op - (cmd, xencomm_map_no_alloc(arg, argsize)); -} - -static int -xencommize_memory_reservation(struct xencomm_mini *xc_area, - struct xen_memory_reservation *mop) -{ - struct xencomm_handle *desc; - - desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start), - mop->nr_extents * - sizeof(*xen_guest_handle(mop->extent_start)), - xc_area); - if (desc == NULL) - return -EINVAL; - - set_xen_guest_handle(mop->extent_start, (void *)desc); - return 0; -} - -int -xencomm_hypercall_memory_op(unsigned int cmd, void *arg) -{ - GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} }; - struct xen_memory_reservation *xmr = NULL; - int rc; - struct xencomm_handle *desc; - unsigned int argsize; - XENCOMM_MINI_ALIGNED(xc_area, 2); - - switch (cmd) { - case XENMEM_increase_reservation: - case XENMEM_decrease_reservation: - case XENMEM_populate_physmap: - xmr = (struct xen_memory_reservation *)arg; - set_xen_guest_handle(extent_start_va[0], - xen_guest_handle(xmr->extent_start)); - - argsize = sizeof(*xmr); - rc = xencommize_memory_reservation(xc_area, xmr); - if (rc) - return rc; - xc_area++; - break; - - case XENMEM_maximum_ram_page: - argsize = 0; - break; - - case XENMEM_add_to_physmap: - argsize = sizeof(struct xen_add_to_physmap); - break; - - default: - printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd); - return -ENOSYS; - } - - desc = xencomm_map_no_alloc(arg, argsize); - if (desc == NULL) - return -EINVAL; - - rc = xencomm_arch_hypercall_memory_op(cmd, desc); - - switch (cmd) { - case XENMEM_increase_reservation: - case XENMEM_decrease_reservation: - case XENMEM_populate_physmap: - set_xen_guest_handle(xmr->extent_start, - xen_guest_handle(extent_start_va[0])); - break; - } - - return rc; -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op); - -int -xencomm_hypercall_suspend(unsigned long srec) -{ - struct sched_shutdown arg; - - arg.reason = SHUTDOWN_suspend; - - return xencomm_arch_hypercall_sched_op( - SCHEDOP_shutdown, xencomm_map_no_alloc(&arg, sizeof(arg))); -} - -long -xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg) -{ - unsigned int argsize; - switch (cmd) { - case VCPUOP_register_runstate_memory_area: { - struct vcpu_register_runstate_memory_area *area = - (struct vcpu_register_runstate_memory_area *)arg; - argsize = sizeof(*arg); - set_xen_guest_handle(area->addr.h, - (void *)xencomm_map_no_alloc(area->addr.v, - sizeof(area->addr.v))); - break; - } - - default: - printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd); - return -ENOSYS; - } - - return xencomm_arch_hypercall_vcpu_op(cmd, cpu, - xencomm_map_no_alloc(arg, argsize)); -} - -long -xencomm_hypercall_opt_feature(void *arg) -{ - return xencomm_arch_hypercall_opt_feature( - xencomm_map_no_alloc(arg, - sizeof(struct xen_ia64_opt_feature))); -} diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c deleted file mode 100644 index 3e8d350fdf39..000000000000 --- a/arch/ia64/xen/xen_pv_ops.c +++ /dev/null @@ -1,1141 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/xen_pv_ops.c - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include <linux/console.h> -#include <linux/irq.h> -#include <linux/kernel.h> -#include <linux/pm.h> -#include <linux/unistd.h> - -#include <asm/xen/hypervisor.h> -#include <asm/xen/xencomm.h> -#include <asm/xen/privop.h> - -#include "irq_xen.h" -#include "time.h" - -/*************************************************************************** - * general info - */ -static struct pv_info xen_info __initdata = { - .kernel_rpl = 2, /* or 1: determin at runtime */ - .paravirt_enabled = 1, - .name = "Xen/ia64", -}; - -#define IA64_RSC_PL_SHIFT 2 -#define IA64_RSC_PL_BIT_SIZE 2 -#define IA64_RSC_PL_MASK \ - (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT) - -static void __init -xen_info_init(void) -{ - /* Xenified Linux/ia64 may run on pl = 1 or 2. - * determin at run time. */ - unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC); - unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT; - xen_info.kernel_rpl = rpl; -} - -/*************************************************************************** - * pv_init_ops - * initialization hooks. - */ - -static void -xen_panic_hypercall(struct unw_frame_info *info, void *arg) -{ - current->thread.ksp = (__u64)info->sw - 16; - HYPERVISOR_shutdown(SHUTDOWN_crash); - /* we're never actually going to get here... */ -} - -static int -xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) -{ - unw_init_running(xen_panic_hypercall, NULL); - /* we're never actually going to get here... */ - return NOTIFY_DONE; -} - -static struct notifier_block xen_panic_block = { - xen_panic_event, NULL, 0 /* try to go last */ -}; - -static void xen_pm_power_off(void) -{ - local_irq_disable(); - HYPERVISOR_shutdown(SHUTDOWN_poweroff); -} - -static void __init -xen_banner(void) -{ - printk(KERN_INFO - "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld " - "flags=0x%x\n", - xen_info.kernel_rpl, - HYPERVISOR_shared_info->arch.start_info_pfn, - xen_start_info->nr_pages, xen_start_info->flags); -} - -static int __init -xen_reserve_memory(struct rsvd_region *region) -{ - region->start = (unsigned long)__va( - (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT)); - region->end = region->start + PAGE_SIZE; - return 1; -} - -static void __init -xen_arch_setup_early(void) -{ - struct shared_info *s; - BUG_ON(!xen_pv_domain()); - - s = HYPERVISOR_shared_info; - xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT); - - /* Must be done before any hypercall. */ - xencomm_initialize(); - - xen_setup_features(); - /* Register a call for panic conditions. */ - atomic_notifier_chain_register(&panic_notifier_list, - &xen_panic_block); - pm_power_off = xen_pm_power_off; - - xen_ia64_enable_opt_feature(); -} - -static void __init -xen_arch_setup_console(char **cmdline_p) -{ - add_preferred_console("xenboot", 0, NULL); - add_preferred_console("tty", 0, NULL); - /* use hvc_xen */ - add_preferred_console("hvc", 0, NULL); - -#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE) - conswitchp = NULL; -#endif -} - -static int __init -xen_arch_setup_nomca(void) -{ - return 1; -} - -static void __init -xen_post_smp_prepare_boot_cpu(void) -{ - xen_setup_vcpu_info_placement(); -} - -#ifdef ASM_SUPPORTED -static unsigned long __init_or_module -xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type); -#endif -static void __init -xen_patch_branch(unsigned long tag, unsigned long type); - -static const struct pv_init_ops xen_init_ops __initconst = { - .banner = xen_banner, - - .reserve_memory = xen_reserve_memory, - - .arch_setup_early = xen_arch_setup_early, - .arch_setup_console = xen_arch_setup_console, - .arch_setup_nomca = xen_arch_setup_nomca, - - .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, -#ifdef ASM_SUPPORTED - .patch_bundle = xen_patch_bundle, -#endif - .patch_branch = xen_patch_branch, -}; - -/*************************************************************************** - * pv_fsys_data - * addresses for fsys - */ - -extern unsigned long xen_fsyscall_table[NR_syscalls]; -extern char xen_fsys_bubble_down[]; -struct pv_fsys_data xen_fsys_data __initdata = { - .fsyscall_table = (unsigned long *)xen_fsyscall_table, - .fsys_bubble_down = (void *)xen_fsys_bubble_down, -}; - -/*************************************************************************** - * pv_patchdata - * patchdata addresses - */ - -#define DECLARE(name) \ - extern unsigned long __xen_start_gate_##name##_patchlist[]; \ - extern unsigned long __xen_end_gate_##name##_patchlist[] - -DECLARE(fsyscall); -DECLARE(brl_fsys_bubble_down); -DECLARE(vtop); -DECLARE(mckinley_e9); - -extern unsigned long __xen_start_gate_section[]; - -#define ASSIGN(name) \ - .start_##name##_patchlist = \ - (unsigned long)__xen_start_gate_##name##_patchlist, \ - .end_##name##_patchlist = \ - (unsigned long)__xen_end_gate_##name##_patchlist - -static struct pv_patchdata xen_patchdata __initdata = { - ASSIGN(fsyscall), - ASSIGN(brl_fsys_bubble_down), - ASSIGN(vtop), - ASSIGN(mckinley_e9), - - .gate_section = (void*)__xen_start_gate_section, -}; - -/*************************************************************************** - * pv_cpu_ops - * intrinsics hooks. - */ - -#ifndef ASM_SUPPORTED -static void -xen_set_itm_with_offset(unsigned long val) -{ - /* ia64_cpu_local_tick() calls this with interrupt enabled. */ - /* WARN_ON(!irqs_disabled()); */ - xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); -} - -static unsigned long -xen_get_itm_with_offset(void) -{ - /* unused at this moment */ - printk(KERN_DEBUG "%s is called.\n", __func__); - - WARN_ON(!irqs_disabled()); - return ia64_native_getreg(_IA64_REG_CR_ITM) + - XEN_MAPPEDREGS->itc_offset; -} - -/* ia64_set_itc() is only called by - * cpu_init() with ia64_set_itc(0) and ia64_sync_itc(). - * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant. - */ -static void -xen_set_itc(unsigned long val) -{ - unsigned long mitc; - - WARN_ON(!irqs_disabled()); - mitc = ia64_native_getreg(_IA64_REG_AR_ITC); - XEN_MAPPEDREGS->itc_offset = val - mitc; - XEN_MAPPEDREGS->itc_last = val; -} - -static unsigned long -xen_get_itc(void) -{ - unsigned long res; - unsigned long itc_offset; - unsigned long itc_last; - unsigned long ret_itc_last; - - itc_offset = XEN_MAPPEDREGS->itc_offset; - do { - itc_last = XEN_MAPPEDREGS->itc_last; - res = ia64_native_getreg(_IA64_REG_AR_ITC); - res += itc_offset; - if (itc_last >= res) - res = itc_last + 1; - ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, - itc_last, res); - } while (unlikely(ret_itc_last != itc_last)); - return res; - -#if 0 - /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled. - Should it be paravirtualized instead? */ - WARN_ON(!irqs_disabled()); - itc_offset = XEN_MAPPEDREGS->itc_offset; - itc_last = XEN_MAPPEDREGS->itc_last; - res = ia64_native_getreg(_IA64_REG_AR_ITC); - res += itc_offset; - if (itc_last >= res) - res = itc_last + 1; - XEN_MAPPEDREGS->itc_last = res; - return res; -#endif -} - -static void xen_setreg(int regnum, unsigned long val) -{ - switch (regnum) { - case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: - xen_set_kr(regnum - _IA64_REG_AR_KR0, val); - break; - case _IA64_REG_AR_ITC: - xen_set_itc(val); - break; - case _IA64_REG_CR_TPR: - xen_set_tpr(val); - break; - case _IA64_REG_CR_ITM: - xen_set_itm_with_offset(val); - break; - case _IA64_REG_CR_EOI: - xen_eoi(val); - break; - default: - ia64_native_setreg_func(regnum, val); - break; - } -} - -static unsigned long xen_getreg(int regnum) -{ - unsigned long res; - - switch (regnum) { - case _IA64_REG_PSR: - res = xen_get_psr(); - break; - case _IA64_REG_AR_ITC: - res = xen_get_itc(); - break; - case _IA64_REG_CR_ITM: - res = xen_get_itm_with_offset(); - break; - case _IA64_REG_CR_IVR: - res = xen_get_ivr(); - break; - case _IA64_REG_CR_TPR: - res = xen_get_tpr(); - break; - default: - res = ia64_native_getreg_func(regnum); - break; - } - return res; -} - -/* turning on interrupts is a bit more complicated.. write to the - * memory-mapped virtual psr.i bit first (to avoid race condition), - * then if any interrupts were pending, we have to execute a hyperprivop - * to ensure the pending interrupt gets delivered; else we're done! */ -static void -xen_ssm_i(void) -{ - int old = xen_get_virtual_psr_i(); - xen_set_virtual_psr_i(1); - barrier(); - if (!old && xen_get_virtual_pend()) - xen_hyper_ssm_i(); -} - -/* turning off interrupts can be paravirtualized simply by writing - * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */ -static void -xen_rsm_i(void) -{ - xen_set_virtual_psr_i(0); - barrier(); -} - -static unsigned long -xen_get_psr_i(void) -{ - return xen_get_virtual_psr_i() ? IA64_PSR_I : 0; -} - -static void -xen_intrin_local_irq_restore(unsigned long mask) -{ - if (mask & IA64_PSR_I) - xen_ssm_i(); - else - xen_rsm_i(); -} -#else -#define __DEFINE_FUNC(name, code) \ - extern const char xen_ ## name ## _direct_start[]; \ - extern const char xen_ ## name ## _direct_end[]; \ - asm (".align 32\n" \ - ".proc xen_" #name "\n" \ - "xen_" #name ":\n" \ - "xen_" #name "_direct_start:\n" \ - code \ - "xen_" #name "_direct_end:\n" \ - "br.cond.sptk.many b6\n" \ - ".endp xen_" #name "\n") - -#define DEFINE_VOID_FUNC0(name, code) \ - extern void \ - xen_ ## name (void); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_VOID_FUNC1(name, code) \ - extern void \ - xen_ ## name (unsigned long arg); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_VOID_FUNC1_VOID(name, code) \ - extern void \ - xen_ ## name (void *arg); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_VOID_FUNC2(name, code) \ - extern void \ - xen_ ## name (unsigned long arg0, \ - unsigned long arg1); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_FUNC0(name, code) \ - extern unsigned long \ - xen_ ## name (void); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_FUNC1(name, type, code) \ - extern unsigned long \ - xen_ ## name (type arg); \ - __DEFINE_FUNC(name, code) - -#define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) - -/* - * static void xen_set_itm_with_offset(unsigned long val) - * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); - */ -/* 2 bundles */ -DEFINE_VOID_FUNC1(set_itm_with_offset, - "mov r2 = " __stringify(XSI_BASE) " + " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - ";;\n" - "ld8 r3 = [r2]\n" - ";;\n" - "sub r8 = r8, r3\n" - "break " __stringify(HYPERPRIVOP_SET_ITM) "\n"); - -/* - * static unsigned long xen_get_itm_with_offset(void) - * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset; - */ -/* 2 bundles */ -DEFINE_FUNC0(get_itm_with_offset, - "mov r2 = " __stringify(XSI_BASE) " + " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - ";;\n" - "ld8 r3 = [r2]\n" - "mov r8 = cr.itm\n" - ";;\n" - "add r8 = r8, r2\n"); - -/* - * static void xen_set_itc(unsigned long val) - * unsigned long mitc; - * - * WARN_ON(!irqs_disabled()); - * mitc = ia64_native_getreg(_IA64_REG_AR_ITC); - * XEN_MAPPEDREGS->itc_offset = val - mitc; - * XEN_MAPPEDREGS->itc_last = val; - */ -/* 2 bundles */ -DEFINE_VOID_FUNC1(set_itc, - "mov r2 = " __stringify(XSI_BASE) " + " - __stringify(XSI_ITC_LAST_OFS) "\n" - "mov r3 = ar.itc\n" - ";;\n" - "sub r3 = r8, r3\n" - "st8 [r2] = r8, " - __stringify(XSI_ITC_LAST_OFS) " - " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - ";;\n" - "st8 [r2] = r3\n"); - -/* - * static unsigned long xen_get_itc(void) - * unsigned long res; - * unsigned long itc_offset; - * unsigned long itc_last; - * unsigned long ret_itc_last; - * - * itc_offset = XEN_MAPPEDREGS->itc_offset; - * do { - * itc_last = XEN_MAPPEDREGS->itc_last; - * res = ia64_native_getreg(_IA64_REG_AR_ITC); - * res += itc_offset; - * if (itc_last >= res) - * res = itc_last + 1; - * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, - * itc_last, res); - * } while (unlikely(ret_itc_last != itc_last)); - * return res; - */ -/* 5 bundles */ -DEFINE_FUNC0(get_itc, - "mov r2 = " __stringify(XSI_BASE) " + " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - ";;\n" - "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - /* r9 = itc_offset */ - /* r2 = XSI_ITC_OFFSET */ - "888:\n" - "mov r8 = ar.itc\n" /* res = ar.itc */ - ";;\n" - "ld8 r3 = [r2]\n" /* r3 = itc_last */ - "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */ - ";;\n" - "cmp.gtu p6, p0 = r3, r8\n" - ";;\n" - "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */ - ";;\n" - "mov ar.ccv = r8\n" - ";;\n" - "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n" - ";;\n" - "cmp.ne p6, p0 = r10, r3\n" - "(p6) hint @pause\n" - "(p6) br.cond.spnt 888b\n"); - -DEFINE_VOID_FUNC1_VOID(fc, - "break " __stringify(HYPERPRIVOP_FC) "\n"); - -/* - * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR - * masked_addr = *psr_i_addr_addr - * pending_intr_addr = masked_addr - 1 - * if (val & IA64_PSR_I) { - * masked = *masked_addr - * *masked_addr = 0:xen_set_virtual_psr_i(1) - * compiler barrier - * if (masked) { - * uint8_t pending = *pending_intr_addr; - * if (pending) - * XEN_HYPER_SSM_I - * } - * } else { - * *masked_addr = 1:xen_set_virtual_psr_i(0) - * } - */ -/* 6 bundles */ -DEFINE_VOID_FUNC1(intrin_local_irq_restore, - /* r8 = input value: 0 or IA64_PSR_I - * p6 = (flags & IA64_PSR_I) - * = if clause - * p7 = !(flags & IA64_PSR_I) - * = else clause - */ - "cmp.ne p6, p7 = r8, r0\n" - "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" - ";;\n" - /* r9 = XEN_PSR_I_ADDR */ - "ld8 r9 = [r9]\n" - ";;\n" - - /* r10 = masked previous value */ - "(p6) ld1.acq r10 = [r9]\n" - ";;\n" - - /* p8 = !masked interrupt masked previously? */ - "(p6) cmp.ne.unc p8, p0 = r10, r0\n" - - /* p7 = else clause */ - "(p7) mov r11 = 1\n" - ";;\n" - /* masked = 1 */ - "(p7) st1.rel [r9] = r11\n" - - /* p6 = if clause */ - /* masked = 0 - * r9 = masked_addr - 1 - * = pending_intr_addr - */ - "(p8) st1.rel [r9] = r0, -1\n" - ";;\n" - /* r8 = pending_intr */ - "(p8) ld1.acq r11 = [r9]\n" - ";;\n" - /* p9 = interrupt pending? */ - "(p8) cmp.ne.unc p9, p10 = r11, r0\n" - ";;\n" - "(p10) mf\n" - /* issue hypercall to trigger interrupt */ - "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"); - -DEFINE_VOID_FUNC2(ptcga, - "break " __stringify(HYPERPRIVOP_PTC_GA) "\n"); -DEFINE_VOID_FUNC2(set_rr, - "break " __stringify(HYPERPRIVOP_SET_RR) "\n"); - -/* - * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR; - * tmp = *tmp - * tmp = *tmp; - * psr_i = tmp? 0: IA64_PSR_I; - */ -/* 4 bundles */ -DEFINE_FUNC0(get_psr_i, - "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" - ";;\n" - "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */ - "mov r8 = 0\n" /* psr_i = 0 */ - ";;\n" - "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */ - ";;\n" - "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */ - ";;\n" - "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n"); - -DEFINE_FUNC1(thash, unsigned long, - "break " __stringify(HYPERPRIVOP_THASH) "\n"); -DEFINE_FUNC1(get_cpuid, int, - "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n"); -DEFINE_FUNC1(get_pmd, int, - "break " __stringify(HYPERPRIVOP_GET_PMD) "\n"); -DEFINE_FUNC1(get_rr, unsigned long, - "break " __stringify(HYPERPRIVOP_GET_RR) "\n"); - -/* - * void xen_privop_ssm_i(void) - * - * int masked = !xen_get_virtual_psr_i(); - * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr) - * xen_set_virtual_psr_i(1) - * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0 - * // compiler barrier - * if (masked) { - * uint8_t* pend_int_addr = - * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1; - * uint8_t pending = *pend_int_addr; - * if (pending) - * XEN_HYPER_SSM_I - * } - */ -/* 4 bundles */ -DEFINE_VOID_FUNC0(ssm_i, - "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" - ";;\n" - "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */ - ";;\n" - "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */ - ";;\n" - "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt - * r8 = XEN_PSR_I_ADDR - 1 - * = pend_int_addr - */ - "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I - * previously interrupt - * masked? - */ - ";;\n" - "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */ - ";;\n" - "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/ - ";;\n" - /* issue hypercall to get interrupt */ - "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n" - ";;\n"); - -/* - * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr - * = XEN_PSR_I_ADDR_ADDR; - * psr_i_addr = *psr_i_addr_addr; - * *psr_i_addr = 1; - */ -/* 2 bundles */ -DEFINE_VOID_FUNC0(rsm_i, - "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" - /* r8 = XEN_PSR_I_ADDR */ - "mov r9 = 1\n" - ";;\n" - "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */ - ";;\n" - "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */ - -extern void -xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, - unsigned long val2, unsigned long val3, - unsigned long val4); -__DEFINE_FUNC(set_rr0_to_rr4, - "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n"); - - -extern unsigned long xen_getreg(int regnum); -#define __DEFINE_GET_REG(id, privop) \ - "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ - ";;\n" \ - "cmp.eq p6, p0 = r2, r8\n" \ - ";;\n" \ - "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \ - "(p6) br.cond.sptk.many b6\n" \ - ";;\n" - -__DEFINE_FUNC(getreg, - __DEFINE_GET_REG(PSR, PSR) - - /* get_itc */ - "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" - ";;\n" - "cmp.eq p6, p0 = r2, r8\n" - ";;\n" - "(p6) br.cond.spnt xen_get_itc\n" - ";;\n" - - /* get itm */ - "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" - ";;\n" - "cmp.eq p6, p0 = r2, r8\n" - ";;\n" - "(p6) br.cond.spnt xen_get_itm_with_offset\n" - ";;\n" - - __DEFINE_GET_REG(CR_IVR, IVR) - __DEFINE_GET_REG(CR_TPR, TPR) - - /* fall back */ - "movl r2 = ia64_native_getreg_func\n" - ";;\n" - "mov b7 = r2\n" - ";;\n" - "br.cond.sptk.many b7\n"); - -extern void xen_setreg(int regnum, unsigned long val); -#define __DEFINE_SET_REG(id, privop) \ - "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ - ";;\n" \ - "cmp.eq p6, p0 = r2, r9\n" \ - ";;\n" \ - "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \ - "(p6) br.cond.sptk.many b6\n" \ - ";;\n" - -__DEFINE_FUNC(setreg, - /* kr0 .. kr 7*/ - /* - * if (_IA64_REG_AR_KR0 <= regnum && - * regnum <= _IA64_REG_AR_KR7) { - * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0 - * register __val asm ("r9") = val - * "break HYPERPRIVOP_SET_KR" - * } - */ - "mov r17 = r9\n" - "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n" - ";;\n" - "cmp.ge p6, p0 = r9, r2\n" - "sub r17 = r17, r2\n" - ";;\n" - "(p6) cmp.ge.unc p7, p0 = " - __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0) - ", r17\n" - ";;\n" - "(p7) mov r9 = r8\n" - ";;\n" - "(p7) mov r8 = r17\n" - "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n" - - /* set itm */ - "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" - ";;\n" - "cmp.eq p6, p0 = r2, r8\n" - ";;\n" - "(p6) br.cond.spnt xen_set_itm_with_offset\n" - - /* set itc */ - "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" - ";;\n" - "cmp.eq p6, p0 = r2, r8\n" - ";;\n" - "(p6) br.cond.spnt xen_set_itc\n" - - __DEFINE_SET_REG(CR_TPR, SET_TPR) - __DEFINE_SET_REG(CR_EOI, EOI) - - /* fall back */ - "movl r2 = ia64_native_setreg_func\n" - ";;\n" - "mov b7 = r2\n" - ";;\n" - "br.cond.sptk.many b7\n"); -#endif - -static const struct pv_cpu_ops xen_cpu_ops __initconst = { - .fc = xen_fc, - .thash = xen_thash, - .get_cpuid = xen_get_cpuid, - .get_pmd = xen_get_pmd, - .getreg = xen_getreg, - .setreg = xen_setreg, - .ptcga = xen_ptcga, - .get_rr = xen_get_rr, - .set_rr = xen_set_rr, - .set_rr0_to_rr4 = xen_set_rr0_to_rr4, - .ssm_i = xen_ssm_i, - .rsm_i = xen_rsm_i, - .get_psr_i = xen_get_psr_i, - .intrin_local_irq_restore - = xen_intrin_local_irq_restore, -}; - -/****************************************************************************** - * replacement of hand written assembly codes. - */ - -extern char xen_switch_to; -extern char xen_leave_syscall; -extern char xen_work_processed_syscall; -extern char xen_leave_kernel; - -const struct pv_cpu_asm_switch xen_cpu_asm_switch = { - .switch_to = (unsigned long)&xen_switch_to, - .leave_syscall = (unsigned long)&xen_leave_syscall, - .work_processed_syscall = (unsigned long)&xen_work_processed_syscall, - .leave_kernel = (unsigned long)&xen_leave_kernel, -}; - -/*************************************************************************** - * pv_iosapic_ops - * iosapic read/write hooks. - */ -static void -xen_pcat_compat_init(void) -{ - /* nothing */ -} - -static struct irq_chip* -xen_iosapic_get_irq_chip(unsigned long trigger) -{ - return NULL; -} - -static unsigned int -xen_iosapic_read(char __iomem *iosapic, unsigned int reg) -{ - struct physdev_apic apic_op; - int ret; - - apic_op.apic_physbase = (unsigned long)iosapic - - __IA64_UNCACHED_OFFSET; - apic_op.reg = reg; - ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op); - if (ret) - return ret; - return apic_op.value; -} - -static void -xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) -{ - struct physdev_apic apic_op; - - apic_op.apic_physbase = (unsigned long)iosapic - - __IA64_UNCACHED_OFFSET; - apic_op.reg = reg; - apic_op.value = val; - HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); -} - -static struct pv_iosapic_ops xen_iosapic_ops __initdata = { - .pcat_compat_init = xen_pcat_compat_init, - .__get_irq_chip = xen_iosapic_get_irq_chip, - - .__read = xen_iosapic_read, - .__write = xen_iosapic_write, -}; - -/*************************************************************************** - * pv_ops initialization - */ - -void __init -xen_setup_pv_ops(void) -{ - xen_info_init(); - pv_info = xen_info; - pv_init_ops = xen_init_ops; - pv_fsys_data = xen_fsys_data; - pv_patchdata = xen_patchdata; - pv_cpu_ops = xen_cpu_ops; - pv_iosapic_ops = xen_iosapic_ops; - pv_irq_ops = xen_irq_ops; - pv_time_ops = xen_time_ops; - - paravirt_cpu_asm_init(&xen_cpu_asm_switch); -} - -#ifdef ASM_SUPPORTED -/*************************************************************************** - * binary pacthing - * pv_init_ops.patch_bundle - */ - -#define DEFINE_FUNC_GETREG(name, privop) \ - DEFINE_FUNC0(get_ ## name, \ - "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n") - -DEFINE_FUNC_GETREG(psr, PSR); -DEFINE_FUNC_GETREG(eflag, EFLAG); -DEFINE_FUNC_GETREG(ivr, IVR); -DEFINE_FUNC_GETREG(tpr, TPR); - -#define DEFINE_FUNC_SET_KR(n) \ - DEFINE_VOID_FUNC0(set_kr ## n, \ - ";;\n" \ - "mov r9 = r8\n" \ - "mov r8 = " #n "\n" \ - "break " __stringify(HYPERPRIVOP_SET_KR) "\n") - -DEFINE_FUNC_SET_KR(0); -DEFINE_FUNC_SET_KR(1); -DEFINE_FUNC_SET_KR(2); -DEFINE_FUNC_SET_KR(3); -DEFINE_FUNC_SET_KR(4); -DEFINE_FUNC_SET_KR(5); -DEFINE_FUNC_SET_KR(6); -DEFINE_FUNC_SET_KR(7); - -#define __DEFINE_FUNC_SETREG(name, privop) \ - DEFINE_VOID_FUNC0(name, \ - "break "__stringify(HYPERPRIVOP_ ## privop) "\n") - -#define DEFINE_FUNC_SETREG(name, privop) \ - __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop) - -DEFINE_FUNC_SETREG(eflag, EFLAG); -DEFINE_FUNC_SETREG(tpr, TPR); -__DEFINE_FUNC_SETREG(eoi, EOI); - -extern const char xen_check_events[]; -extern const char __xen_intrin_local_irq_restore_direct_start[]; -extern const char __xen_intrin_local_irq_restore_direct_end[]; -extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc; - -asm ( - ".align 32\n" - ".proc xen_check_events\n" - "xen_check_events:\n" - /* masked = 0 - * r9 = masked_addr - 1 - * = pending_intr_addr - */ - "st1.rel [r9] = r0, -1\n" - ";;\n" - /* r8 = pending_intr */ - "ld1.acq r11 = [r9]\n" - ";;\n" - /* p9 = interrupt pending? */ - "cmp.ne p9, p10 = r11, r0\n" - ";;\n" - "(p10) mf\n" - /* issue hypercall to trigger interrupt */ - "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n" - "br.cond.sptk.many b6\n" - ".endp xen_check_events\n" - "\n" - ".align 32\n" - ".proc __xen_intrin_local_irq_restore_direct\n" - "__xen_intrin_local_irq_restore_direct:\n" - "__xen_intrin_local_irq_restore_direct_start:\n" - "1:\n" - "{\n" - "cmp.ne p6, p7 = r8, r0\n" - "mov r17 = ip\n" /* get ip to calc return address */ - "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n" - ";;\n" - "}\n" - "{\n" - /* r9 = XEN_PSR_I_ADDR */ - "ld8 r9 = [r9]\n" - ";;\n" - /* r10 = masked previous value */ - "(p6) ld1.acq r10 = [r9]\n" - "adds r17 = 1f - 1b, r17\n" /* calculate return address */ - ";;\n" - "}\n" - "{\n" - /* p8 = !masked interrupt masked previously? */ - "(p6) cmp.ne.unc p8, p0 = r10, r0\n" - "\n" - /* p7 = else clause */ - "(p7) mov r11 = 1\n" - ";;\n" - "(p8) mov b6 = r17\n" /* set return address */ - "}\n" - "{\n" - /* masked = 1 */ - "(p7) st1.rel [r9] = r11\n" - "\n" - "[99:]\n" - "(p8) brl.cond.dptk.few xen_check_events\n" - "}\n" - /* pv calling stub is 5 bundles. fill nop to adjust return address */ - "{\n" - "nop 0\n" - "nop 0\n" - "nop 0\n" - "}\n" - "1:\n" - "__xen_intrin_local_irq_restore_direct_end:\n" - ".endp __xen_intrin_local_irq_restore_direct\n" - "\n" - ".align 8\n" - "__xen_intrin_local_irq_restore_direct_reloc:\n" - "data8 99b\n" -); - -static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[] -__initdata_or_module = -{ -#define XEN_PATCH_BUNDLE_ELEM(name, type) \ - { \ - (void*)xen_ ## name ## _direct_start, \ - (void*)xen_ ## name ## _direct_end, \ - PARAVIRT_PATCH_TYPE_ ## type, \ - } - - XEN_PATCH_BUNDLE_ELEM(fc, FC), - XEN_PATCH_BUNDLE_ELEM(thash, THASH), - XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), - XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), - XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA), - XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR), - XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR), - XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), - XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), - XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), - XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), - { - (void*)__xen_intrin_local_irq_restore_direct_start, - (void*)__xen_intrin_local_irq_restore_direct_end, - PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE, - }, - -#define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ - { \ - xen_get_ ## name ## _direct_start, \ - xen_get_ ## name ## _direct_end, \ - PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ - } - - XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), - XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG), - - XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR), - XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR), - - XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC), - XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM), - - -#define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ - { \ - xen_ ## name ## _direct_start, \ - xen_ ## name ## _direct_end, \ - PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ - } - -#define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ - __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg) - - XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7), - - XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG), - XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR), - __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI), - - XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC), - XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM), -}; - -static unsigned long __init_or_module -xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type) -{ - const unsigned long nelems = sizeof(xen_patch_bundle_elems) / - sizeof(xen_patch_bundle_elems[0]); - unsigned long used; - const struct paravirt_patch_bundle_elem *found; - - used = __paravirt_patch_apply_bundle(sbundle, ebundle, type, - xen_patch_bundle_elems, nelems, - &found); - - if (found == NULL) - /* fallback */ - return ia64_native_patch_bundle(sbundle, ebundle, type); - if (used == 0) - return used; - - /* relocation */ - switch (type) { - case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: { - unsigned long reloc = - __xen_intrin_local_irq_restore_direct_reloc; - unsigned long reloc_offset = reloc - (unsigned long) - __xen_intrin_local_irq_restore_direct_start; - unsigned long tag = (unsigned long)sbundle + reloc_offset; - paravirt_patch_reloc_brl(tag, xen_check_events); - break; - } - default: - /* nothing */ - break; - } - return used; -} -#endif /* ASM_SUPPOTED */ - -const struct paravirt_patch_branch_target xen_branch_target[] -__initconst = { -#define PARAVIRT_BR_TARGET(name, type) \ - { \ - &xen_ ## name, \ - PARAVIRT_PATCH_TYPE_BR_ ## type, \ - } - PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), - PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), - PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), - PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), -}; - -static void __init -xen_patch_branch(unsigned long tag, unsigned long type) -{ - __paravirt_patch_apply_branch(tag, type, xen_branch_target, - ARRAY_SIZE(xen_branch_target)); -} diff --git a/arch/ia64/xen/xencomm.c b/arch/ia64/xen/xencomm.c deleted file mode 100644 index 73d903ca2d64..000000000000 --- a/arch/ia64/xen/xencomm.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include <linux/mm.h> -#include <linux/err.h> - -static unsigned long kernel_virtual_offset; -static int is_xencomm_initialized; - -/* for xen early printk. It uses console io hypercall which uses xencomm. - * However early printk may use it before xencomm initialization. - */ -int -xencomm_is_initialized(void) -{ - return is_xencomm_initialized; -} - -void -xencomm_initialize(void) -{ - kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START); - is_xencomm_initialized = 1; -} - -/* Translate virtual address to physical address. */ -unsigned long -xencomm_vtop(unsigned long vaddr) -{ - struct page *page; - struct vm_area_struct *vma; - - if (vaddr == 0) - return 0UL; - - if (REGION_NUMBER(vaddr) == 5) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *ptep; - - /* On ia64, TASK_SIZE refers to current. It is not initialized - during boot. - Furthermore the kernel is relocatable and __pa() doesn't - work on addresses. */ - if (vaddr >= KERNEL_START - && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) - return vaddr - kernel_virtual_offset; - - /* In kernel area -- virtually mapped. */ - pgd = pgd_offset_k(vaddr); - if (pgd_none(*pgd) || pgd_bad(*pgd)) - return ~0UL; - - pud = pud_offset(pgd, vaddr); - if (pud_none(*pud) || pud_bad(*pud)) - return ~0UL; - - pmd = pmd_offset(pud, vaddr); - if (pmd_none(*pmd) || pmd_bad(*pmd)) - return ~0UL; - - ptep = pte_offset_kernel(pmd, vaddr); - if (!ptep) - return ~0UL; - - return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK); - } - - if (vaddr > TASK_SIZE) { - /* percpu variables */ - if (REGION_NUMBER(vaddr) == 7 && - REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS)) - ia64_tpa(vaddr); - - /* kernel address */ - return __pa(vaddr); - } - - /* XXX double-check (lack of) locking */ - vma = find_extend_vma(current->mm, vaddr); - if (!vma) - return ~0UL; - - /* We assume the page is modified. */ - page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH); - if (IS_ERR_OR_NULL(page)) - return ~0UL; - - return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); -} diff --git a/arch/ia64/xen/xenivt.S b/arch/ia64/xen/xenivt.S deleted file mode 100644 index 3e71d50584d9..000000000000 --- a/arch/ia64/xen/xenivt.S +++ /dev/null @@ -1,52 +0,0 @@ -/* - * arch/ia64/xen/ivt.S - * - * Copyright (C) 2005 Hewlett-Packard Co - * Dan Magenheimer <dan.magenheimer@hp.com> - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * pv_ops. - */ - -#include <asm/asmmacro.h> -#include <asm/kregs.h> -#include <asm/pgtable.h> - -#include "../kernel/minstate.h" - - .section .text,"ax" -GLOBAL_ENTRY(xen_event_callback) - mov r31=pr // prepare to save predicates - ;; - SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 - ;; - movl r3=XSI_PSR_IC - mov r14=1 - ;; - st4 [r3]=r14 - ;; - adds r3=8,r2 // set up second base pointer for SAVE_REST - srlz.i // ensure everybody knows psr.ic is back on - ;; - SAVE_REST - ;; -1: - alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group - add out0=16,sp // pass pointer to pt_regs as first arg - ;; - br.call.sptk.many b0=xen_evtchn_do_upcall - ;; - movl r20=XSI_PSR_I_ADDR - ;; - ld8 r20=[r20] - ;; - adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending - ;; - ld1 r20=[r20] - ;; - cmp.ne p6,p0=r20,r0 // if there are pending events, - (p6) br.spnt.few 1b // call evtchn_do_upcall again. - br.sptk.many xen_leave_kernel // we know ia64_leave_kernel is - // paravirtualized as xen_leave_kernel -END(xen_event_callback) diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S deleted file mode 100644 index e29519ebe2d2..000000000000 --- a/arch/ia64/xen/xensetup.S +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Support routines for Xen - * - * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com> - */ - -#include <asm/processor.h> -#include <asm/asmmacro.h> -#include <asm/pgtable.h> -#include <asm/paravirt.h> -#include <asm/xen/privop.h> -#include <linux/elfnote.h> -#include <linux/init.h> -#include <xen/interface/elfnote.h> - - .section .data..read_mostly - .align 8 - .global xen_domain_type -xen_domain_type: - data4 XEN_NATIVE_ASM - .previous - - __INIT -ENTRY(startup_xen) - // Calculate load offset. - // The constant, LOAD_OFFSET, can't be used because the boot - // loader doesn't always load to the LMA specified by the vmlinux.lds. - mov r9=ip // must be the first instruction to make sure - // that r9 = the physical address of startup_xen. - // Usually r9 = startup_xen - LOAD_OFFSET - movl r8=startup_xen - ;; - sub r9=r9,r8 // Usually r9 = -LOAD_OFFSET. - - mov r10=PARAVIRT_HYPERVISOR_TYPE_XEN - movl r11=_start - ;; - add r11=r11,r9 - movl r8=hypervisor_type - ;; - add r8=r8,r9 - mov b0=r11 - ;; - st8 [r8]=r10 - br.cond.sptk.many b0 - ;; -END(startup_xen) - - ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") - ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") - ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") - ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, data8.ua startup_xen - LOAD_OFFSET) - -#define isBP p3 // are we the Bootstrap Processor? - -GLOBAL_ENTRY(xen_setup_hook) - mov r8=XEN_PV_DOMAIN_ASM -(isBP) movl r9=xen_domain_type;; -(isBP) st4 [r9]=r8 - movl r10=xen_ivt;; - - mov cr.iva=r10 - - /* Set xsi base. */ -#define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600 -(isBP) mov r2=FW_HYPERCALL_SET_SHARED_INFO_VA -(isBP) movl r28=XSI_BASE;; -(isBP) break 0x1000;; - - /* setup pv_ops */ -(isBP) mov r4=rp - ;; -(isBP) br.call.sptk.many rp=xen_setup_pv_ops - ;; -(isBP) mov rp=r4 - ;; - - br.ret.sptk.many rp - ;; -END(xen_setup_hook) diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h index 6976621efd3f..1a40265e8d88 100644 --- a/arch/m32r/include/asm/barrier.h +++ b/arch/m32r/include/asm/barrier.h @@ -11,84 +11,6 @@ #define nop() __asm__ __volatile__ ("nop" : : ) -/* - * Memory barrier. - * - * mb() prevents loads and stores being reordered across this point. - * rmb() prevents loads being reordered across this point. - * wmb() prevents stores being reordered across this point. - */ -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() - -/** - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - **/ - -#define read_barrier_depends() do { } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) -#define set_mb(var, value) do { var = value; barrier(); } while (0) -#endif +#include <asm-generic/barrier.h> #endif /* _ASM_M32R_BARRIER_H */ diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 75f25a8e3001..dbdd2231c75d 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -87,6 +87,30 @@ config MMU_SUN3 bool depends on MMU && !MMU_MOTOROLA && !MMU_COLDFIRE +config KEXEC + bool "kexec system call" + depends on M68KCLASSIC + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + The name comes from the similarity to the exec system call. + + It is an ongoing process to be certain the hardware in a machine + is properly shutdown, so do not be surprised if this code does not + initially work for you. As of this writing the exact hardware + interface is strongly in flux, so no good recommendation can be + made. + +config BOOTINFO_PROC + bool "Export bootinfo in procfs" + depends on KEXEC && M68KCLASSIC + help + Say Y to export the bootinfo used to boot the kernel in a + "bootinfo" file in procfs. This is useful with kexec. + menu "Platform setup" source arch/m68k/Kconfig.cpu diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c index 99449fbf9a72..ba03cec3f711 100644 --- a/arch/m68k/amiga/chipram.c +++ b/arch/m68k/amiga/chipram.c @@ -87,7 +87,7 @@ void *amiga_chip_alloc_res(unsigned long size, struct resource *res) atomic_sub(size, &chipavail); pr_debug("amiga_chip_alloc_res: returning %pR\n", res); - return (void *)ZTWO_VADDR(res->start); + return ZTWO_VADDR(res->start); } void amiga_chip_free(void *ptr) diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c index b819390e29cd..9625b7132227 100644 --- a/arch/m68k/amiga/config.c +++ b/arch/m68k/amiga/config.c @@ -28,6 +28,8 @@ #include <linux/keyboard.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-amiga.h> +#include <asm/byteorder.h> #include <asm/setup.h> #include <asm/pgtable.h> #include <asm/amigahw.h> @@ -140,46 +142,46 @@ static struct resource ram_resource[NUM_MEMINFO]; * Parse an Amiga-specific record in the bootinfo */ -int amiga_parse_bootinfo(const struct bi_record *record) +int __init amiga_parse_bootinfo(const struct bi_record *record) { int unknown = 0; - const unsigned long *data = record->data; + const void *data = record->data; - switch (record->tag) { + switch (be16_to_cpu(record->tag)) { case BI_AMIGA_MODEL: - amiga_model = *data; + amiga_model = be32_to_cpup(data); break; case BI_AMIGA_ECLOCK: - amiga_eclock = *data; + amiga_eclock = be32_to_cpup(data); break; case BI_AMIGA_CHIPSET: - amiga_chipset = *data; + amiga_chipset = be32_to_cpup(data); break; case BI_AMIGA_CHIP_SIZE: - amiga_chip_size = *(const int *)data; + amiga_chip_size = be32_to_cpup(data); break; case BI_AMIGA_VBLANK: - amiga_vblank = *(const unsigned char *)data; + amiga_vblank = *(const __u8 *)data; break; case BI_AMIGA_PSFREQ: - amiga_psfreq = *(const unsigned char *)data; + amiga_psfreq = *(const __u8 *)data; break; case BI_AMIGA_AUTOCON: #ifdef CONFIG_ZORRO if (zorro_num_autocon < ZORRO_NUM_AUTO) { - const struct ConfigDev *cd = (struct ConfigDev *)data; - struct zorro_dev *dev = &zorro_autocon[zorro_num_autocon++]; + const struct ConfigDev *cd = data; + struct zorro_dev_init *dev = &zorro_autocon_init[zorro_num_autocon++]; dev->rom = cd->cd_Rom; - dev->slotaddr = cd->cd_SlotAddr; - dev->slotsize = cd->cd_SlotSize; - dev->resource.start = (unsigned long)cd->cd_BoardAddr; - dev->resource.end = dev->resource.start + cd->cd_BoardSize - 1; + dev->slotaddr = be16_to_cpu(cd->cd_SlotAddr); + dev->slotsize = be16_to_cpu(cd->cd_SlotSize); + dev->boardaddr = be32_to_cpu(cd->cd_BoardAddr); + dev->boardsize = be32_to_cpu(cd->cd_BoardSize); } else printk("amiga_parse_bootinfo: too many AutoConfig devices\n"); #endif /* CONFIG_ZORRO */ @@ -358,6 +360,14 @@ static void __init amiga_identify(void) #undef AMIGAHW_ANNOUNCE } + +static unsigned long amiga_random_get_entropy(void) +{ + /* VPOSR/VHPOSR provide at least 17 bits of data changing at 1.79 MHz */ + return *(unsigned long *)&amiga_custom.vposr; +} + + /* * Setup the Amiga configuration info */ @@ -395,6 +405,8 @@ void __init config_amiga(void) mach_heartbeat = amiga_heartbeat; #endif + mach_random_get_entropy = amiga_random_get_entropy; + /* Fill in the clock value (based on the 700 kHz E-Clock) */ amiga_colorclock = 5*amiga_eclock; /* 3.5 MHz */ @@ -608,6 +620,8 @@ static void amiga_mem_console_write(struct console *co, const char *s, static int __init amiga_savekmsg_setup(char *arg) { + bool registered; + if (!MACH_IS_AMIGA || strcmp(arg, "mem")) return 0; @@ -618,14 +632,16 @@ static int __init amiga_savekmsg_setup(char *arg) /* Just steal the block, the chipram allocator isn't functional yet */ amiga_chip_size -= SAVEKMSG_MAXMEM; - savekmsg = (void *)ZTWO_VADDR(CHIP_PHYSADDR + amiga_chip_size); + savekmsg = ZTWO_VADDR(CHIP_PHYSADDR + amiga_chip_size); savekmsg->magic1 = SAVEKMSG_MAGIC1; savekmsg->magic2 = SAVEKMSG_MAGIC2; savekmsg->magicptr = ZTWO_PADDR(savekmsg); savekmsg->size = 0; + registered = !!amiga_console_driver.write; amiga_console_driver.write = amiga_mem_console_write; - register_console(&amiga_console_driver); + if (!registered) + register_console(&amiga_console_driver); return 0; } @@ -707,11 +723,16 @@ void amiga_serial_gets(struct console *co, char *s, int len) static int __init amiga_debug_setup(char *arg) { - if (MACH_IS_AMIGA && !strcmp(arg, "ser")) { - /* no initialization required (?) */ - amiga_console_driver.write = amiga_serial_console_write; + bool registered; + + if (!MACH_IS_AMIGA || strcmp(arg, "ser")) + return 0; + + /* no initialization required (?) */ + registered = !!amiga_console_driver.write; + amiga_console_driver.write = amiga_serial_console_write; + if (!registered) register_console(&amiga_console_driver); - } return 0; } diff --git a/arch/m68k/amiga/platform.c b/arch/m68k/amiga/platform.c index dacd9f911f71..d34029d7b058 100644 --- a/arch/m68k/amiga/platform.c +++ b/arch/m68k/amiga/platform.c @@ -13,6 +13,7 @@ #include <asm/amigahw.h> #include <asm/amigayle.h> +#include <asm/byteorder.h> #ifdef CONFIG_ZORRO @@ -66,10 +67,12 @@ static int __init z_dev_present(zorro_id id) { unsigned int i; - for (i = 0; i < zorro_num_autocon; i++) - if (zorro_autocon[i].rom.er_Manufacturer == ZORRO_MANUF(id) && - zorro_autocon[i].rom.er_Product == ZORRO_PROD(id)) + for (i = 0; i < zorro_num_autocon; i++) { + const struct ExpansionRom *rom = &zorro_autocon_init[i].rom; + if (be16_to_cpu(rom->er_Manufacturer) == ZORRO_MANUF(id) && + rom->er_Product == ZORRO_PROD(id)) return 1; + } return 0; } diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c index 3ea56b90e718..9268c0f96376 100644 --- a/arch/m68k/apollo/config.c +++ b/arch/m68k/apollo/config.c @@ -1,3 +1,4 @@ +#include <linux/init.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> @@ -9,6 +10,8 @@ #include <asm/setup.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-apollo.h> +#include <asm/byteorder.h> #include <asm/pgtable.h> #include <asm/apollohw.h> #include <asm/irq.h> @@ -43,26 +46,25 @@ static const char *apollo_models[] = { [APOLLO_DN4500-APOLLO_DN3000] = "DN4500 (Roadrunner)" }; -int apollo_parse_bootinfo(const struct bi_record *record) { - +int __init apollo_parse_bootinfo(const struct bi_record *record) +{ int unknown = 0; - const unsigned long *data = record->data; + const void *data = record->data; - switch(record->tag) { - case BI_APOLLO_MODEL: - apollo_model=*data; - break; + switch (be16_to_cpu(record->tag)) { + case BI_APOLLO_MODEL: + apollo_model = be32_to_cpup(data); + break; - default: - unknown=1; + default: + unknown=1; } return unknown; } -void dn_setup_model(void) { - - +static void __init dn_setup_model(void) +{ printk("Apollo hardware found: "); printk("[%s]\n", apollo_models[apollo_model - APOLLO_DN3000]); diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c index 20cde4e9fc77..3e73a63c066f 100644 --- a/arch/m68k/atari/ataints.c +++ b/arch/m68k/atari/ataints.c @@ -333,6 +333,9 @@ void __init atari_init_IRQ(void) m68k_setup_irq_controller(&atari_mfptimer_chip, handle_simple_irq, IRQ_MFP_TIMER1, 8); + irq_set_status_flags(IRQ_MFP_TIMER1, IRQ_IS_POLLED); + irq_set_status_flags(IRQ_MFP_TIMER2, IRQ_IS_POLLED); + /* prepare timer D data for use as poll interrupt */ /* set Timer D data Register - needs to be > 0 */ st_mfp.tim_dt_d = 254; /* < 100 Hz */ diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c index fb2d0bd9b3ad..01a62161b08a 100644 --- a/arch/m68k/atari/config.c +++ b/arch/m68k/atari/config.c @@ -37,6 +37,8 @@ #include <linux/module.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-atari.h> +#include <asm/byteorder.h> #include <asm/setup.h> #include <asm/atarihw.h> #include <asm/atariints.h> @@ -129,14 +131,14 @@ static int __init scc_test(volatile char *ctla) int __init atari_parse_bootinfo(const struct bi_record *record) { int unknown = 0; - const u_long *data = record->data; + const void *data = record->data; - switch (record->tag) { + switch (be16_to_cpu(record->tag)) { case BI_ATARI_MCH_COOKIE: - atari_mch_cookie = *data; + atari_mch_cookie = be32_to_cpup(data); break; case BI_ATARI_MCH_TYPE: - atari_mch_type = *data; + atari_mch_type = be32_to_cpup(data); break; default: unknown = 1; diff --git a/arch/m68k/atari/debug.c b/arch/m68k/atari/debug.c index a547ba9683d1..03cb5e08d7cf 100644 --- a/arch/m68k/atari/debug.c +++ b/arch/m68k/atari/debug.c @@ -287,6 +287,8 @@ static void __init atari_init_midi_port(int cflag) static int __init atari_debug_setup(char *arg) { + bool registered; + if (!MACH_IS_ATARI) return 0; @@ -294,6 +296,7 @@ static int __init atari_debug_setup(char *arg) /* defaults to ser2 for a Falcon and ser1 otherwise */ arg = MACH_IS_FALCON ? "ser2" : "ser1"; + registered = !!atari_console_driver.write; if (!strcmp(arg, "ser1")) { /* ST-MFP Modem1 serial port */ atari_init_mfp_port(B9600|CS8); @@ -317,7 +320,7 @@ static int __init atari_debug_setup(char *arg) sound_ym.wd_data = sound_ym.rd_data_reg_sel | 0x20; /* strobe H */ atari_console_driver.write = atari_par_console_write; } - if (atari_console_driver.write) + if (atari_console_driver.write && !registered) register_console(&atari_console_driver); return 0; diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c index 8943aa4c18e6..478623dbb209 100644 --- a/arch/m68k/bvme6000/config.c +++ b/arch/m68k/bvme6000/config.c @@ -28,6 +28,8 @@ #include <linux/bcd.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-vme.h> +#include <asm/byteorder.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/irq.h> @@ -50,9 +52,9 @@ void bvme6000_set_vectors (void); static irq_handler_t tick_handler; -int bvme6000_parse_bootinfo(const struct bi_record *bi) +int __init bvme6000_parse_bootinfo(const struct bi_record *bi) { - if (bi->tag == BI_VME_TYPE) + if (be16_to_cpu(bi->tag) == BI_VME_TYPE) return 0; else return 1; diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 19325e117eea..559ff3af8ff7 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -52,7 +52,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -63,11 +62,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -85,6 +84,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -98,6 +108,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -130,6 +141,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -144,11 +156,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -156,6 +175,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -170,6 +190,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -183,11 +206,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -195,10 +220,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -216,6 +244,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -262,6 +291,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -271,10 +301,10 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_3COM is not set CONFIG_A2065=y CONFIG_ARIADNE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_CIRRUS is not set -# CONFIG_NET_VENDOR_FUJITSU is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -285,6 +315,7 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -311,7 +342,6 @@ CONFIG_JOYSTICK_AMIGA=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m # CONFIG_SERIO is not set -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_PRINTER=m @@ -345,10 +375,6 @@ CONFIG_HEARTBEAT=y CONFIG_PROC_HARDWARE=y CONFIG_AMIGA_BUILTIN_SERIAL=y CONFIG_SERIAL_CONSOLE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -385,7 +411,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -444,10 +470,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -480,6 +506,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 14dc6ccda7f4..cb1f55df69b6 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -50,7 +50,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -61,11 +60,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -83,6 +82,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -96,6 +106,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -128,6 +139,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -142,11 +154,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -154,6 +173,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -168,6 +188,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -181,11 +204,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -193,10 +218,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -208,6 +236,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -244,12 +273,14 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -258,6 +289,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -279,7 +311,6 @@ CONFIG_INPUT_EVDEV=m # CONFIG_MOUSE_PS2 is not set CONFIG_MOUSE_SERIAL=m CONFIG_SERIO=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set @@ -302,10 +333,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_HEARTBEAT=y CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -342,7 +369,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -401,10 +428,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -437,6 +464,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 6d5370c914b2..e880cfbb62d9 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -49,7 +49,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -60,11 +59,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -82,6 +81,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -95,6 +105,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -127,6 +138,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -141,11 +153,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -153,6 +172,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -167,6 +187,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -180,11 +203,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -192,10 +217,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -211,6 +239,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -249,10 +278,10 @@ CONFIG_TCM_PSCSI=m CONFIG_NETDEVICES=y CONFIG_DUMMY=m CONFIG_EQUALIZER=m -CONFIG_MII=y CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -260,6 +289,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_ATARILANCE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -267,6 +297,7 @@ CONFIG_ATARILANCE=y # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -291,7 +322,6 @@ CONFIG_MOUSE_ATARI=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m # CONFIG_SERIO is not set -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_PRINTER=m @@ -320,10 +350,6 @@ CONFIG_NFBLOCK=y CONFIG_NFCON=y CONFIG_NFETH=y CONFIG_ATARI_DSP56K=m -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -360,7 +386,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -419,10 +445,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -455,6 +481,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index c015ddb6fd80..4aa4f45e52a8 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -191,10 +216,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -206,6 +234,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -243,12 +272,14 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_BVME6000_NET=y @@ -257,6 +288,7 @@ CONFIG_BVME6000_NET=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -393,10 +421,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -429,6 +457,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index ec7382d8afff..7cd9d9f456fb 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -50,7 +50,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -61,11 +60,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -83,6 +82,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -96,6 +106,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -128,6 +139,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -142,11 +154,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -154,6 +173,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -168,6 +188,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -181,11 +204,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -193,10 +218,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -208,6 +236,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -244,6 +273,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -251,6 +281,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_HPLANCE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -259,6 +290,7 @@ CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -282,7 +314,6 @@ CONFIG_MOUSE_SERIAL=m CONFIG_INPUT_MISC=y CONFIG_HP_SDC_RTC=m CONFIG_SERIO_SERPORT=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set @@ -304,10 +335,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -344,7 +371,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -403,10 +430,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -439,6 +466,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 7d46fbec7042..31f5bd061d14 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -49,7 +49,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -60,11 +59,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -82,6 +81,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -95,6 +105,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -127,6 +138,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -141,11 +153,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -153,6 +172,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -167,6 +187,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -180,11 +203,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -195,11 +220,13 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -212,6 +239,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -261,6 +289,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -268,6 +297,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_MACMACE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MAC89x0=y @@ -279,6 +309,7 @@ CONFIG_MAC8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -302,7 +333,6 @@ CONFIG_MOUSE_SERIAL=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m CONFIG_SERIO=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_SERIAL_PMACZILOG=y @@ -327,10 +357,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -367,7 +393,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -426,10 +452,11 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -462,6 +489,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index b17a8837f0e1..4e5adff326ee 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -58,7 +58,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -69,11 +68,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -91,6 +90,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -104,6 +114,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -136,6 +147,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -150,11 +162,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -162,6 +181,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -176,6 +196,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -189,11 +212,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -204,11 +229,13 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -230,6 +257,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -290,10 +318,10 @@ CONFIG_MAC_EMUMOUSEBTN=y CONFIG_NETDEVICES=y CONFIG_DUMMY=m CONFIG_EQUALIZER=m -CONFIG_MII=y CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -308,10 +336,10 @@ CONFIG_HPLANCE=y CONFIG_MVME147_NET=y CONFIG_SUN3LANCE=y CONFIG_MACMACE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MAC89x0=y -# CONFIG_NET_VENDOR_FUJITSU is not set # CONFIG_NET_VENDOR_HP is not set CONFIG_BVME6000_NET=y CONFIG_MVME16x_NET=y @@ -325,6 +353,7 @@ CONFIG_APNE=y CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m CONFIG_PPP=m @@ -357,7 +386,6 @@ CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m CONFIG_HP_SDC_RTC=m CONFIG_SERIO_Q40KBD=y -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_SERIAL_PMACZILOG=y @@ -405,10 +433,6 @@ CONFIG_NFETH=y CONFIG_ATARI_DSP56K=m CONFIG_AMIGA_BUILTIN_SERIAL=y CONFIG_SERIAL_CONSOLE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -445,7 +469,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -504,10 +528,11 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -540,6 +565,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 5586c6529fce..02cdbac5565e 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -47,7 +47,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -58,11 +57,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -80,6 +79,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -93,6 +103,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -125,6 +136,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -139,11 +151,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -151,6 +170,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -165,6 +185,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -178,11 +201,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -190,10 +215,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -205,6 +233,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -242,6 +271,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -249,6 +279,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_MVME147_NET=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -257,6 +288,7 @@ CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -393,10 +421,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -429,6 +457,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index e5e8262bbacd..05a990a9dbd4 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -191,10 +216,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -206,6 +234,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -243,12 +272,14 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MVME16x_NET=y @@ -257,6 +288,7 @@ CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -393,10 +421,11 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -429,6 +458,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index be1496ed9b66..568e2a98f976 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -191,10 +216,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -209,6 +237,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -249,6 +278,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -257,10 +287,10 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_CIRRUS is not set -# CONFIG_NET_VENDOR_FUJITSU is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -269,6 +299,7 @@ CONFIG_NE2000=m # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m CONFIG_PPP=m @@ -293,7 +324,6 @@ CONFIG_MOUSE_SERIAL=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m CONFIG_SERIO_Q40KBD=y -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_PRINTER=m @@ -318,10 +348,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_HEARTBEAT=y CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -358,7 +384,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -417,10 +443,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -453,6 +479,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 54674d61e001..60b0aeac5742 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -45,7 +45,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -56,11 +55,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -78,6 +77,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -91,6 +101,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -123,6 +134,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -137,11 +149,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -149,6 +168,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -163,6 +183,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -176,11 +199,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -188,10 +213,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -203,6 +231,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -240,6 +269,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -247,6 +277,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_SUN3LANCE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_MARVELL is not set @@ -255,6 +286,7 @@ CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -276,7 +308,6 @@ CONFIG_INPUT_EVDEV=m CONFIG_KEYBOARD_SUNKBD=y # CONFIG_MOUSE_PS2 is not set CONFIG_MOUSE_SERIAL=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set @@ -296,10 +327,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -336,7 +363,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -395,10 +422,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -431,6 +458,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 832d9539f441..21bda331eebb 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -45,7 +45,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -56,11 +55,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -78,6 +77,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -91,6 +101,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -123,6 +134,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -137,11 +149,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -149,6 +168,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -163,6 +183,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -176,11 +199,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -188,10 +213,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -203,6 +231,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -240,6 +269,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -247,6 +277,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_SUN3LANCE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -255,6 +286,7 @@ CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -276,7 +308,6 @@ CONFIG_INPUT_EVDEV=m CONFIG_KEYBOARD_SUNKBD=y # CONFIG_MOUSE_PS2 is not set CONFIG_MOUSE_SERIAL=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set @@ -296,10 +327,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -336,7 +363,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -395,10 +422,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -431,6 +458,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c index 121a6660ad4e..71b78ecee75c 100644 --- a/arch/m68k/emu/natfeat.c +++ b/arch/m68k/emu/natfeat.c @@ -9,6 +9,7 @@ * the GNU General Public License (GPL), incorporated herein by reference. */ +#include <linux/init.h> #include <linux/types.h> #include <linux/console.h> #include <linux/string.h> @@ -70,7 +71,7 @@ static void nf_poweroff(void) nf_call(id); } -void nf_init(void) +void __init nf_init(void) { unsigned long id, version; char buf[256]; diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c index b7609f791522..2e5a787ea11b 100644 --- a/arch/m68k/hp300/config.c +++ b/arch/m68k/hp300/config.c @@ -14,6 +14,8 @@ #include <linux/console.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-hp300.h> +#include <asm/byteorder.h> #include <asm/machdep.h> #include <asm/blinken.h> #include <asm/io.h> /* readb() and writeb() */ @@ -70,15 +72,15 @@ extern int hp300_setup_serial_console(void) __init; int __init hp300_parse_bootinfo(const struct bi_record *record) { int unknown = 0; - const unsigned long *data = record->data; + const void *data = record->data; - switch (record->tag) { + switch (be16_to_cpu(record->tag)) { case BI_HP300_MODEL: - hp300_model = *data; + hp300_model = be32_to_cpup(data); break; case BI_HP300_UART_SCODE: - hp300_uart_scode = *data; + hp300_uart_scode = be32_to_cpup(data); break; case BI_HP300_UART_ADDR: diff --git a/arch/m68k/include/asm/amigahw.h b/arch/m68k/include/asm/amigahw.h index 7a19b5686a4a..5ad568110f17 100644 --- a/arch/m68k/include/asm/amigahw.h +++ b/arch/m68k/include/asm/amigahw.h @@ -18,26 +18,7 @@ #include <linux/ioport.h> - /* - * Different Amiga models - */ - -#define AMI_UNKNOWN (0) -#define AMI_500 (1) -#define AMI_500PLUS (2) -#define AMI_600 (3) -#define AMI_1000 (4) -#define AMI_1200 (5) -#define AMI_2000 (6) -#define AMI_2500 (7) -#define AMI_3000 (8) -#define AMI_3000T (9) -#define AMI_3000PLUS (10) -#define AMI_4000 (11) -#define AMI_4000T (12) -#define AMI_CDTV (13) -#define AMI_CD32 (14) -#define AMI_DRACO (15) +#include <asm/bootinfo-amiga.h> /* @@ -46,11 +27,6 @@ extern unsigned long amiga_chipset; -#define CS_STONEAGE (0) -#define CS_OCS (1) -#define CS_ECS (2) -#define CS_AGA (3) - /* * Miscellaneous @@ -266,7 +242,7 @@ struct CIA { #define zTwoBase (0x80000000) #define ZTWO_PADDR(x) (((unsigned long)(x))-zTwoBase) -#define ZTWO_VADDR(x) (((unsigned long)(x))+zTwoBase) +#define ZTWO_VADDR(x) ((void __iomem *)(((unsigned long)(x))+zTwoBase)) #define CUSTOM_PHYSADDR (0xdff000) #define amiga_custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR))) diff --git a/arch/m68k/include/asm/apollohw.h b/arch/m68k/include/asm/apollohw.h index 6c19e0c22411..87fc899d32ee 100644 --- a/arch/m68k/include/asm/apollohw.h +++ b/arch/m68k/include/asm/apollohw.h @@ -5,18 +5,11 @@ #include <linux/types.h> -/* - apollo models -*/ +#include <asm/bootinfo-apollo.h> + extern u_long apollo_model; -#define APOLLO_UNKNOWN (0) -#define APOLLO_DN3000 (1) -#define APOLLO_DN3010 (2) -#define APOLLO_DN3500 (3) -#define APOLLO_DN4000 (4) -#define APOLLO_DN4500 (5) /* see scn2681 data sheet for more info. diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h index d887050e6da6..972c8f33f055 100644 --- a/arch/m68k/include/asm/atarihw.h +++ b/arch/m68k/include/asm/atarihw.h @@ -21,7 +21,7 @@ #define _LINUX_ATARIHW_H_ #include <linux/types.h> -#include <asm/bootinfo.h> +#include <asm/bootinfo-atari.h> #include <asm/raw_io.h> extern u_long atari_mch_cookie; diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h index 445ce22c23cb..15c5f77c1614 100644 --- a/arch/m68k/include/asm/barrier.h +++ b/arch/m68k/include/asm/barrier.h @@ -1,20 +1,8 @@ #ifndef _M68K_BARRIER_H #define _M68K_BARRIER_H -/* - * Force strict CPU ordering. - * Not really required on m68k... - */ #define nop() do { asm volatile ("nop"); barrier(); } while (0) -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define read_barrier_depends() ((void)0) -#define set_mb(var, value) ({ (var) = (value); wmb(); }) -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() ((void)0) +#include <asm-generic/barrier.h> #endif /* _M68K_BARRIER_H */ diff --git a/arch/m68k/include/asm/bootinfo.h b/arch/m68k/include/asm/bootinfo.h index 67e7a78ad96b..8e213267f8e7 100644 --- a/arch/m68k/include/asm/bootinfo.h +++ b/arch/m68k/include/asm/bootinfo.h @@ -6,373 +6,23 @@ ** This file is subject to the terms and conditions of the GNU General Public ** License. See the file COPYING in the main directory of this archive ** for more details. -** -** Created 09/29/92 by Greg Harp -** -** 5/2/94 Roman Hodek: -** Added bi_atari part of the machine dependent union bi_un; for now it -** contains just a model field to distinguish between TT and Falcon. -** 26/7/96 Roman Zippel: -** Renamed to setup.h; added some useful macros to allow gcc some -** optimizations if possible. -** 5/10/96 Geert Uytterhoeven: -** Redesign of the boot information structure; renamed to bootinfo.h again -** 27/11/96 Geert Uytterhoeven: -** Backwards compatibility with bootinfo interface version 1.0 */ #ifndef _M68K_BOOTINFO_H #define _M68K_BOOTINFO_H +#include <uapi/asm/bootinfo.h> - /* - * Bootinfo definitions - * - * This is an easily parsable and extendable structure containing all - * information to be passed from the bootstrap to the kernel. - * - * This way I hope to keep all future changes back/forewards compatible. - * Thus, keep your fingers crossed... - * - * This structure is copied right after the kernel bss by the bootstrap - * routine. - */ #ifndef __ASSEMBLY__ -struct bi_record { - unsigned short tag; /* tag ID */ - unsigned short size; /* size of record (in bytes) */ - unsigned long data[0]; /* data */ -}; - -#endif /* __ASSEMBLY__ */ - - - /* - * Tag Definitions - * - * Machine independent tags start counting from 0x0000 - * Machine dependent tags start counting from 0x8000 - */ - -#define BI_LAST 0x0000 /* last record (sentinel) */ -#define BI_MACHTYPE 0x0001 /* machine type (u_long) */ -#define BI_CPUTYPE 0x0002 /* cpu type (u_long) */ -#define BI_FPUTYPE 0x0003 /* fpu type (u_long) */ -#define BI_MMUTYPE 0x0004 /* mmu type (u_long) */ -#define BI_MEMCHUNK 0x0005 /* memory chunk address and size */ - /* (struct mem_info) */ -#define BI_RAMDISK 0x0006 /* ramdisk address and size */ - /* (struct mem_info) */ -#define BI_COMMAND_LINE 0x0007 /* kernel command line parameters */ - /* (string) */ - - /* - * Amiga-specific tags - */ - -#define BI_AMIGA_MODEL 0x8000 /* model (u_long) */ -#define BI_AMIGA_AUTOCON 0x8001 /* AutoConfig device */ - /* (struct ConfigDev) */ -#define BI_AMIGA_CHIP_SIZE 0x8002 /* size of Chip RAM (u_long) */ -#define BI_AMIGA_VBLANK 0x8003 /* VBLANK frequency (u_char) */ -#define BI_AMIGA_PSFREQ 0x8004 /* power supply frequency (u_char) */ -#define BI_AMIGA_ECLOCK 0x8005 /* EClock frequency (u_long) */ -#define BI_AMIGA_CHIPSET 0x8006 /* native chipset present (u_long) */ -#define BI_AMIGA_SERPER 0x8007 /* serial port period (u_short) */ - - /* - * Atari-specific tags - */ - -#define BI_ATARI_MCH_COOKIE 0x8000 /* _MCH cookie from TOS (u_long) */ -#define BI_ATARI_MCH_TYPE 0x8001 /* special machine type (u_long) */ - /* (values are ATARI_MACH_* defines */ - -/* mch_cookie values (upper word) */ -#define ATARI_MCH_ST 0 -#define ATARI_MCH_STE 1 -#define ATARI_MCH_TT 2 -#define ATARI_MCH_FALCON 3 - -/* mch_type values */ -#define ATARI_MACH_NORMAL 0 /* no special machine type */ -#define ATARI_MACH_MEDUSA 1 /* Medusa 040 */ -#define ATARI_MACH_HADES 2 /* Hades 040 or 060 */ -#define ATARI_MACH_AB40 3 /* Afterburner040 on Falcon */ - - /* - * VME-specific tags - */ - -#define BI_VME_TYPE 0x8000 /* VME sub-architecture (u_long) */ -#define BI_VME_BRDINFO 0x8001 /* VME board information (struct) */ - -/* BI_VME_TYPE codes */ -#define VME_TYPE_TP34V 0x0034 /* Tadpole TP34V */ -#define VME_TYPE_MVME147 0x0147 /* Motorola MVME147 */ -#define VME_TYPE_MVME162 0x0162 /* Motorola MVME162 */ -#define VME_TYPE_MVME166 0x0166 /* Motorola MVME166 */ -#define VME_TYPE_MVME167 0x0167 /* Motorola MVME167 */ -#define VME_TYPE_MVME172 0x0172 /* Motorola MVME172 */ -#define VME_TYPE_MVME177 0x0177 /* Motorola MVME177 */ -#define VME_TYPE_BVME4000 0x4000 /* BVM Ltd. BVME4000 */ -#define VME_TYPE_BVME6000 0x6000 /* BVM Ltd. BVME6000 */ - -/* BI_VME_BRDINFO is a 32 byte struct as returned by the Bug code on - * Motorola VME boards. Contains board number, Bug version, board - * configuration options, etc. See include/asm/mvme16xhw.h for details. - */ - - - /* - * Macintosh-specific tags (all u_long) - */ - -#define BI_MAC_MODEL 0x8000 /* Mac Gestalt ID (model type) */ -#define BI_MAC_VADDR 0x8001 /* Mac video base address */ -#define BI_MAC_VDEPTH 0x8002 /* Mac video depth */ -#define BI_MAC_VROW 0x8003 /* Mac video rowbytes */ -#define BI_MAC_VDIM 0x8004 /* Mac video dimensions */ -#define BI_MAC_VLOGICAL 0x8005 /* Mac video logical base */ -#define BI_MAC_SCCBASE 0x8006 /* Mac SCC base address */ -#define BI_MAC_BTIME 0x8007 /* Mac boot time */ -#define BI_MAC_GMTBIAS 0x8008 /* Mac GMT timezone offset */ -#define BI_MAC_MEMSIZE 0x8009 /* Mac RAM size (sanity check) */ -#define BI_MAC_CPUID 0x800a /* Mac CPU type (sanity check) */ -#define BI_MAC_ROMBASE 0x800b /* Mac system ROM base address */ - - /* - * Macintosh hardware profile data - unused, see macintosh.h for - * reasonable type values - */ - -#define BI_MAC_VIA1BASE 0x8010 /* Mac VIA1 base address (always present) */ -#define BI_MAC_VIA2BASE 0x8011 /* Mac VIA2 base address (type varies) */ -#define BI_MAC_VIA2TYPE 0x8012 /* Mac VIA2 type (VIA, RBV, OSS) */ -#define BI_MAC_ADBTYPE 0x8013 /* Mac ADB interface type */ -#define BI_MAC_ASCBASE 0x8014 /* Mac Apple Sound Chip base address */ -#define BI_MAC_SCSI5380 0x8015 /* Mac NCR 5380 SCSI (base address, multi) */ -#define BI_MAC_SCSIDMA 0x8016 /* Mac SCSI DMA (base address) */ -#define BI_MAC_SCSI5396 0x8017 /* Mac NCR 53C96 SCSI (base address, multi) */ -#define BI_MAC_IDETYPE 0x8018 /* Mac IDE interface type */ -#define BI_MAC_IDEBASE 0x8019 /* Mac IDE interface base address */ -#define BI_MAC_NUBUS 0x801a /* Mac Nubus type (none, regular, pseudo) */ -#define BI_MAC_SLOTMASK 0x801b /* Mac Nubus slots present */ -#define BI_MAC_SCCTYPE 0x801c /* Mac SCC serial type (normal, IOP) */ -#define BI_MAC_ETHTYPE 0x801d /* Mac builtin ethernet type (Sonic, MACE */ -#define BI_MAC_ETHBASE 0x801e /* Mac builtin ethernet base address */ -#define BI_MAC_PMU 0x801f /* Mac power management / poweroff hardware */ -#define BI_MAC_IOP_SWIM 0x8020 /* Mac SWIM floppy IOP */ -#define BI_MAC_IOP_ADB 0x8021 /* Mac ADB IOP */ - - /* - * Mac: compatibility with old booter data format (temporarily) - * Fields unused with the new bootinfo can be deleted now; instead of - * adding new fields the struct might be splitted into a hardware address - * part and a hardware type part - */ - -#ifndef __ASSEMBLY__ - -struct mac_booter_data -{ - unsigned long videoaddr; - unsigned long videorow; - unsigned long videodepth; - unsigned long dimensions; - unsigned long args; - unsigned long boottime; - unsigned long gmtbias; - unsigned long bootver; - unsigned long videological; - unsigned long sccbase; - unsigned long id; - unsigned long memsize; - unsigned long serialmf; - unsigned long serialhsk; - unsigned long serialgpi; - unsigned long printmf; - unsigned long printhsk; - unsigned long printgpi; - unsigned long cpuid; - unsigned long rombase; - unsigned long adbdelay; - unsigned long timedbra; -}; - -extern struct mac_booter_data - mac_bi_data; - +#ifdef CONFIG_BOOTINFO_PROC +extern void save_bootinfo(const struct bi_record *bi); +#else +static inline void save_bootinfo(const struct bi_record *bi) {} #endif - /* - * Apollo-specific tags - */ - -#define BI_APOLLO_MODEL 0x8000 /* model (u_long) */ - - /* - * HP300-specific tags - */ - -#define BI_HP300_MODEL 0x8000 /* model (u_long) */ -#define BI_HP300_UART_SCODE 0x8001 /* UART select code (u_long) */ -#define BI_HP300_UART_ADDR 0x8002 /* phys. addr of UART (u_long) */ - - /* - * Stuff for bootinfo interface versioning - * - * At the start of kernel code, a 'struct bootversion' is located. - * bootstrap checks for a matching version of the interface before booting - * a kernel, to avoid user confusion if kernel and bootstrap don't work - * together :-) - * - * If incompatible changes are made to the bootinfo interface, the major - * number below should be stepped (and the minor reset to 0) for the - * appropriate machine. If a change is backward-compatible, the minor - * should be stepped. "Backwards-compatible" means that booting will work, - * but certain features may not. - */ - -#define BOOTINFOV_MAGIC 0x4249561A /* 'BIV^Z' */ -#define MK_BI_VERSION(major,minor) (((major)<<16)+(minor)) -#define BI_VERSION_MAJOR(v) (((v) >> 16) & 0xffff) -#define BI_VERSION_MINOR(v) ((v) & 0xffff) - -#ifndef __ASSEMBLY__ - -struct bootversion { - unsigned short branch; - unsigned long magic; - struct { - unsigned long machtype; - unsigned long version; - } machversions[0]; -}; - #endif /* __ASSEMBLY__ */ -#define AMIGA_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define ATARI_BOOTI_VERSION MK_BI_VERSION( 2, 1 ) -#define MAC_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define MVME147_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define MVME16x_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define BVME6000_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define Q40_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define HP300_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) - -#ifdef BOOTINFO_COMPAT_1_0 - - /* - * Backwards compatibility with bootinfo interface version 1.0 - */ - -#define COMPAT_AMIGA_BOOTI_VERSION MK_BI_VERSION( 1, 0 ) -#define COMPAT_ATARI_BOOTI_VERSION MK_BI_VERSION( 1, 0 ) -#define COMPAT_MAC_BOOTI_VERSION MK_BI_VERSION( 1, 0 ) - -#include <linux/zorro.h> - -#define COMPAT_NUM_AUTO 16 - -struct compat_bi_Amiga { - int model; - int num_autocon; - struct ConfigDev autocon[COMPAT_NUM_AUTO]; - unsigned long chip_size; - unsigned char vblank; - unsigned char psfreq; - unsigned long eclock; - unsigned long chipset; - unsigned long hw_present; -}; - -struct compat_bi_Atari { - unsigned long hw_present; - unsigned long mch_cookie; -}; - -#ifndef __ASSEMBLY__ - -struct compat_bi_Macintosh -{ - unsigned long videoaddr; - unsigned long videorow; - unsigned long videodepth; - unsigned long dimensions; - unsigned long args; - unsigned long boottime; - unsigned long gmtbias; - unsigned long bootver; - unsigned long videological; - unsigned long sccbase; - unsigned long id; - unsigned long memsize; - unsigned long serialmf; - unsigned long serialhsk; - unsigned long serialgpi; - unsigned long printmf; - unsigned long printhsk; - unsigned long printgpi; - unsigned long cpuid; - unsigned long rombase; - unsigned long adbdelay; - unsigned long timedbra; -}; - -#endif - -struct compat_mem_info { - unsigned long addr; - unsigned long size; -}; - -#define COMPAT_NUM_MEMINFO 4 - -#define COMPAT_CPUB_68020 0 -#define COMPAT_CPUB_68030 1 -#define COMPAT_CPUB_68040 2 -#define COMPAT_CPUB_68060 3 -#define COMPAT_FPUB_68881 5 -#define COMPAT_FPUB_68882 6 -#define COMPAT_FPUB_68040 7 -#define COMPAT_FPUB_68060 8 - -#define COMPAT_CPU_68020 (1<<COMPAT_CPUB_68020) -#define COMPAT_CPU_68030 (1<<COMPAT_CPUB_68030) -#define COMPAT_CPU_68040 (1<<COMPAT_CPUB_68040) -#define COMPAT_CPU_68060 (1<<COMPAT_CPUB_68060) -#define COMPAT_CPU_MASK (31) -#define COMPAT_FPU_68881 (1<<COMPAT_FPUB_68881) -#define COMPAT_FPU_68882 (1<<COMPAT_FPUB_68882) -#define COMPAT_FPU_68040 (1<<COMPAT_FPUB_68040) -#define COMPAT_FPU_68060 (1<<COMPAT_FPUB_68060) -#define COMPAT_FPU_MASK (0xfe0) - -#define COMPAT_CL_SIZE (256) - -struct compat_bootinfo { - unsigned long machtype; - unsigned long cputype; - struct compat_mem_info memory[COMPAT_NUM_MEMINFO]; - int num_memory; - unsigned long ramdisk_size; - unsigned long ramdisk_addr; - char command_line[COMPAT_CL_SIZE]; - union { - struct compat_bi_Amiga bi_ami; - struct compat_bi_Atari bi_ata; - struct compat_bi_Macintosh bi_mac; - } bi_un; -}; - -#define bi_amiga bi_un.bi_ami -#define bi_atari bi_un.bi_ata -#define bi_mac bi_un.bi_mac - -#endif /* BOOTINFO_COMPAT_1_0 */ - #endif /* _M68K_BOOTINFO_H */ diff --git a/arch/m68k/include/asm/hp300hw.h b/arch/m68k/include/asm/hp300hw.h index d998ea67c19c..64f5271dd7be 100644 --- a/arch/m68k/include/asm/hp300hw.h +++ b/arch/m68k/include/asm/hp300hw.h @@ -1,25 +1,9 @@ #ifndef _M68K_HP300HW_H #define _M68K_HP300HW_H -extern unsigned long hp300_model; +#include <asm/bootinfo-hp300.h> -/* This information was taken from NetBSD */ -#define HP_320 (0) /* 16MHz 68020+HP MMU+16K external cache */ -#define HP_330 (1) /* 16MHz 68020+68851 MMU */ -#define HP_340 (2) /* 16MHz 68030 */ -#define HP_345 (3) /* 50MHz 68030+32K external cache */ -#define HP_350 (4) /* 25MHz 68020+HP MMU+32K external cache */ -#define HP_360 (5) /* 25MHz 68030 */ -#define HP_370 (6) /* 33MHz 68030+64K external cache */ -#define HP_375 (7) /* 50MHz 68030+32K external cache */ -#define HP_380 (8) /* 25MHz 68040 */ -#define HP_385 (9) /* 33MHz 68040 */ -#define HP_400 (10) /* 50MHz 68030+32K external cache */ -#define HP_425T (11) /* 25MHz 68040 - model 425t */ -#define HP_425S (12) /* 25MHz 68040 - model 425s */ -#define HP_425E (13) /* 25MHz 68040 - model 425e */ -#define HP_433T (14) /* 33MHz 68040 - model 433t */ -#define HP_433S (15) /* 33MHz 68040 - model 433s */ +extern unsigned long hp300_model; #endif /* _M68K_HP300HW_H */ diff --git a/arch/m68k/include/asm/kexec.h b/arch/m68k/include/asm/kexec.h new file mode 100644 index 000000000000..3df97abac147 --- /dev/null +++ b/arch/m68k/include/asm/kexec.h @@ -0,0 +1,29 @@ +#ifndef _ASM_M68K_KEXEC_H +#define _ASM_M68K_KEXEC_H + +#ifdef CONFIG_KEXEC + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +#define KEXEC_CONTROL_PAGE_SIZE 4096 + +#define KEXEC_ARCH KEXEC_ARCH_68K + +#ifndef __ASSEMBLY__ + +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + /* Dummy implementation for now */ +} + +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_KEXEC */ + +#endif /* _ASM_M68K_KEXEC_H */ diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h index aeeedf8b2d25..fe3fc9ae1b69 100644 --- a/arch/m68k/include/asm/mac_via.h +++ b/arch/m68k/include/asm/mac_via.h @@ -254,6 +254,8 @@ extern volatile __u8 *via1,*via2; extern int rbv_present,via_alt_mapping; +struct irq_desc; + extern void via_register_interrupts(void); extern void via_irq_enable(int); extern void via_irq_disable(int); diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h index 682a1a2ff55f..d323b2c2d07d 100644 --- a/arch/m68k/include/asm/macintosh.h +++ b/arch/m68k/include/asm/macintosh.h @@ -4,6 +4,9 @@ #include <linux/seq_file.h> #include <linux/interrupt.h> +#include <asm/bootinfo-mac.h> + + /* * Apple Macintoshisms */ @@ -74,65 +77,29 @@ struct mac_model #define MAC_FLOPPY_SWIM_IOP 3 #define MAC_FLOPPY_AV 4 -/* - * Gestalt numbers - */ +extern struct mac_model *macintosh_config; -#define MAC_MODEL_II 6 -#define MAC_MODEL_IIX 7 -#define MAC_MODEL_IICX 8 -#define MAC_MODEL_SE30 9 -#define MAC_MODEL_IICI 11 -#define MAC_MODEL_IIFX 13 /* And well numbered it is too */ -#define MAC_MODEL_IISI 18 -#define MAC_MODEL_LC 19 -#define MAC_MODEL_Q900 20 -#define MAC_MODEL_PB170 21 -#define MAC_MODEL_Q700 22 -#define MAC_MODEL_CLII 23 /* aka: P200 */ -#define MAC_MODEL_PB140 25 -#define MAC_MODEL_Q950 26 /* aka: WGS95 */ -#define MAC_MODEL_LCIII 27 /* aka: P450 */ -#define MAC_MODEL_PB210 29 -#define MAC_MODEL_C650 30 -#define MAC_MODEL_PB230 32 -#define MAC_MODEL_PB180 33 -#define MAC_MODEL_PB160 34 -#define MAC_MODEL_Q800 35 /* aka: WGS80 */ -#define MAC_MODEL_Q650 36 -#define MAC_MODEL_LCII 37 /* aka: P400/405/410/430 */ -#define MAC_MODEL_PB250 38 -#define MAC_MODEL_IIVI 44 -#define MAC_MODEL_P600 45 /* aka: P600CD */ -#define MAC_MODEL_IIVX 48 -#define MAC_MODEL_CCL 49 /* aka: P250 */ -#define MAC_MODEL_PB165C 50 -#define MAC_MODEL_C610 52 /* aka: WGS60 */ -#define MAC_MODEL_Q610 53 -#define MAC_MODEL_PB145 54 /* aka: PB145B */ -#define MAC_MODEL_P520 56 /* aka: LC520 */ -#define MAC_MODEL_C660 60 -#define MAC_MODEL_P460 62 /* aka: LCIII+, P466/P467 */ -#define MAC_MODEL_PB180C 71 -#define MAC_MODEL_PB520 72 /* aka: PB520C, PB540, PB540C, PB550C */ -#define MAC_MODEL_PB270C 77 -#define MAC_MODEL_Q840 78 -#define MAC_MODEL_P550 80 /* aka: LC550, P560 */ -#define MAC_MODEL_CCLII 83 /* aka: P275 */ -#define MAC_MODEL_PB165 84 -#define MAC_MODEL_PB190 85 /* aka: PB190CS */ -#define MAC_MODEL_TV 88 -#define MAC_MODEL_P475 89 /* aka: LC475, P476 */ -#define MAC_MODEL_P475F 90 /* aka: P475 w/ FPU (no LC040) */ -#define MAC_MODEL_P575 92 /* aka: LC575, P577/P578 */ -#define MAC_MODEL_Q605 94 -#define MAC_MODEL_Q605_ACC 95 /* Q605 accelerated to 33 MHz */ -#define MAC_MODEL_Q630 98 /* aka: LC630, P630/631/635/636/637/638/640 */ -#define MAC_MODEL_P588 99 /* aka: LC580, P580 */ -#define MAC_MODEL_PB280 102 -#define MAC_MODEL_PB280C 103 -#define MAC_MODEL_PB150 115 -extern struct mac_model *macintosh_config; + /* + * Internal representation of the Mac hardware, filled in from bootinfo + */ + +struct mac_booter_data +{ + unsigned long videoaddr; + unsigned long videorow; + unsigned long videodepth; + unsigned long dimensions; + unsigned long boottime; + unsigned long gmtbias; + unsigned long videological; + unsigned long sccbase; + unsigned long id; + unsigned long memsize; + unsigned long cpuid; + unsigned long rombase; +}; + +extern struct mac_booter_data mac_bi_data; #endif diff --git a/arch/m68k/include/asm/mc146818rtc.h b/arch/m68k/include/asm/mc146818rtc.h index 9f70a01f73dc..05b43bf5cdf3 100644 --- a/arch/m68k/include/asm/mc146818rtc.h +++ b/arch/m68k/include/asm/mc146818rtc.h @@ -10,16 +10,16 @@ #include <asm/atarihw.h> -#define RTC_PORT(x) (TT_RTC_BAS + 2*(x)) +#define ATARI_RTC_PORT(x) (TT_RTC_BAS + 2*(x)) #define RTC_ALWAYS_BCD 0 #define CMOS_READ(addr) ({ \ -atari_outb_p((addr),RTC_PORT(0)); \ -atari_inb_p(RTC_PORT(1)); \ +atari_outb_p((addr), ATARI_RTC_PORT(0)); \ +atari_inb_p(ATARI_RTC_PORT(1)); \ }) #define CMOS_WRITE(val, addr) ({ \ -atari_outb_p((addr),RTC_PORT(0)); \ -atari_outb_p((val),RTC_PORT(1)); \ +atari_outb_p((addr), ATARI_RTC_PORT(0)); \ +atari_outb_p((val), ATARI_RTC_PORT(1)); \ }) #endif /* CONFIG_ATARI */ diff --git a/arch/m68k/include/asm/mvme16xhw.h b/arch/m68k/include/asm/mvme16xhw.h index 6117f56653d2..1eb89de631e5 100644 --- a/arch/m68k/include/asm/mvme16xhw.h +++ b/arch/m68k/include/asm/mvme16xhw.h @@ -3,23 +3,6 @@ #include <asm/irq.h> -/* Board ID data structure - pointer to this retrieved from Bug by head.S */ - -/* Note, bytes 12 and 13 are board no in BCD (0162,0166,0167,0177,etc) */ - -extern long mvme_bdid_ptr; - -typedef struct { - char bdid[4]; - u_char rev, mth, day, yr; - u_short size, reserved; - u_short brdno; - char brdsuffix[2]; - u_long options; - u_short clun, dlun, ctype, dnum; - u_long option2; -} t_bdid, *p_bdid; - typedef struct { u_char ack_icr, diff --git a/arch/m68k/include/asm/setup.h b/arch/m68k/include/asm/setup.h index 65e78a2dad64..8f2023f8c1c4 100644 --- a/arch/m68k/include/asm/setup.h +++ b/arch/m68k/include/asm/setup.h @@ -22,6 +22,7 @@ #ifndef _M68K_SETUP_H #define _M68K_SETUP_H +#include <uapi/asm/bootinfo.h> #include <uapi/asm/setup.h> @@ -297,14 +298,14 @@ extern int m68k_is040or060; #define NUM_MEMINFO 4 #ifndef __ASSEMBLY__ -struct mem_info { +struct m68k_mem_info { unsigned long addr; /* physical address of memory chunk */ unsigned long size; /* length of memory chunk (in bytes) */ }; extern int m68k_num_memory; /* # of memory blocks found (and used) */ extern int m68k_realnum_memory; /* real # of memory blocks found */ -extern struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */ +extern struct m68k_mem_info m68k_memory[NUM_MEMINFO];/* memory description */ #endif #endif /* _M68K_SETUP_H */ diff --git a/arch/m68k/include/asm/timex.h b/arch/m68k/include/asm/timex.h index 6759dad954f6..efc1f4892357 100644 --- a/arch/m68k/include/asm/timex.h +++ b/arch/m68k/include/asm/timex.h @@ -28,4 +28,14 @@ static inline cycles_t get_cycles(void) return 0; } +extern unsigned long (*mach_random_get_entropy)(void); + +static inline unsigned long random_get_entropy(void) +{ + if (mach_random_get_entropy) + return mach_random_get_entropy(); + return 0; +} +#define random_get_entropy random_get_entropy + #endif diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild index 1fef45ada097..6a2d257bdfb2 100644 --- a/arch/m68k/include/uapi/asm/Kbuild +++ b/arch/m68k/include/uapi/asm/Kbuild @@ -11,6 +11,14 @@ generic-y += termbits.h generic-y += termios.h header-y += a.out.h +header-y += bootinfo.h +header-y += bootinfo-amiga.h +header-y += bootinfo-apollo.h +header-y += bootinfo-atari.h +header-y += bootinfo-hp300.h +header-y += bootinfo-mac.h +header-y += bootinfo-q40.h +header-y += bootinfo-vme.h header-y += byteorder.h header-y += cachectl.h header-y += fcntl.h diff --git a/arch/m68k/include/uapi/asm/bootinfo-amiga.h b/arch/m68k/include/uapi/asm/bootinfo-amiga.h new file mode 100644 index 000000000000..daad3c58d2da --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-amiga.h @@ -0,0 +1,63 @@ +/* +** asm/bootinfo-amiga.h -- Amiga-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_AMIGA_H +#define _UAPI_ASM_M68K_BOOTINFO_AMIGA_H + + + /* + * Amiga-specific tags + */ + +#define BI_AMIGA_MODEL 0x8000 /* model (__be32) */ +#define BI_AMIGA_AUTOCON 0x8001 /* AutoConfig device */ + /* (AmigaOS struct ConfigDev) */ +#define BI_AMIGA_CHIP_SIZE 0x8002 /* size of Chip RAM (__be32) */ +#define BI_AMIGA_VBLANK 0x8003 /* VBLANK frequency (__u8) */ +#define BI_AMIGA_PSFREQ 0x8004 /* power supply frequency (__u8) */ +#define BI_AMIGA_ECLOCK 0x8005 /* EClock frequency (__be32) */ +#define BI_AMIGA_CHIPSET 0x8006 /* native chipset present (__be32) */ +#define BI_AMIGA_SERPER 0x8007 /* serial port period (__be16) */ + + + /* + * Amiga models (BI_AMIGA_MODEL) + */ + +#define AMI_UNKNOWN 0 +#define AMI_500 1 +#define AMI_500PLUS 2 +#define AMI_600 3 +#define AMI_1000 4 +#define AMI_1200 5 +#define AMI_2000 6 +#define AMI_2500 7 +#define AMI_3000 8 +#define AMI_3000T 9 +#define AMI_3000PLUS 10 +#define AMI_4000 11 +#define AMI_4000T 12 +#define AMI_CDTV 13 +#define AMI_CD32 14 +#define AMI_DRACO 15 + + + /* + * Amiga chipsets (BI_AMIGA_CHIPSET) + */ + +#define CS_STONEAGE 0 +#define CS_OCS 1 +#define CS_ECS 2 +#define CS_AGA 3 + + + /* + * Latest Amiga bootinfo version + */ + +#define AMIGA_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_AMIGA_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-apollo.h b/arch/m68k/include/uapi/asm/bootinfo-apollo.h new file mode 100644 index 000000000000..a93e0af1c6fe --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-apollo.h @@ -0,0 +1,28 @@ +/* +** asm/bootinfo-apollo.h -- Apollo-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_APOLLO_H +#define _UAPI_ASM_M68K_BOOTINFO_APOLLO_H + + + /* + * Apollo-specific tags + */ + +#define BI_APOLLO_MODEL 0x8000 /* model (__be32) */ + + + /* + * Apollo models (BI_APOLLO_MODEL) + */ + +#define APOLLO_UNKNOWN 0 +#define APOLLO_DN3000 1 +#define APOLLO_DN3010 2 +#define APOLLO_DN3500 3 +#define APOLLO_DN4000 4 +#define APOLLO_DN4500 5 + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_APOLLO_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-atari.h b/arch/m68k/include/uapi/asm/bootinfo-atari.h new file mode 100644 index 000000000000..a817854049bb --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-atari.h @@ -0,0 +1,44 @@ +/* +** asm/bootinfo-atari.h -- Atari-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_ATARI_H +#define _UAPI_ASM_M68K_BOOTINFO_ATARI_H + + + /* + * Atari-specific tags + */ + +#define BI_ATARI_MCH_COOKIE 0x8000 /* _MCH cookie from TOS (__be32) */ +#define BI_ATARI_MCH_TYPE 0x8001 /* special machine type (__be32) */ + + + /* + * mch_cookie values (upper word of BI_ATARI_MCH_COOKIE) + */ + +#define ATARI_MCH_ST 0 +#define ATARI_MCH_STE 1 +#define ATARI_MCH_TT 2 +#define ATARI_MCH_FALCON 3 + + + /* + * Atari machine types (BI_ATARI_MCH_TYPE) + */ + +#define ATARI_MACH_NORMAL 0 /* no special machine type */ +#define ATARI_MACH_MEDUSA 1 /* Medusa 040 */ +#define ATARI_MACH_HADES 2 /* Hades 040 or 060 */ +#define ATARI_MACH_AB40 3 /* Afterburner040 on Falcon */ + + + /* + * Latest Atari bootinfo version + */ + +#define ATARI_BOOTI_VERSION MK_BI_VERSION(2, 1) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_ATARI_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-hp300.h b/arch/m68k/include/uapi/asm/bootinfo-hp300.h new file mode 100644 index 000000000000..c90cb71ed89a --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-hp300.h @@ -0,0 +1,50 @@ +/* +** asm/bootinfo-hp300.h -- HP9000/300-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_HP300_H +#define _UAPI_ASM_M68K_BOOTINFO_HP300_H + + + /* + * HP9000/300-specific tags + */ + +#define BI_HP300_MODEL 0x8000 /* model (__be32) */ +#define BI_HP300_UART_SCODE 0x8001 /* UART select code (__be32) */ +#define BI_HP300_UART_ADDR 0x8002 /* phys. addr of UART (__be32) */ + + + /* + * HP9000/300 and /400 models (BI_HP300_MODEL) + * + * This information was taken from NetBSD + */ + +#define HP_320 0 /* 16MHz 68020+HP MMU+16K external cache */ +#define HP_330 1 /* 16MHz 68020+68851 MMU */ +#define HP_340 2 /* 16MHz 68030 */ +#define HP_345 3 /* 50MHz 68030+32K external cache */ +#define HP_350 4 /* 25MHz 68020+HP MMU+32K external cache */ +#define HP_360 5 /* 25MHz 68030 */ +#define HP_370 6 /* 33MHz 68030+64K external cache */ +#define HP_375 7 /* 50MHz 68030+32K external cache */ +#define HP_380 8 /* 25MHz 68040 */ +#define HP_385 9 /* 33MHz 68040 */ + +#define HP_400 10 /* 50MHz 68030+32K external cache */ +#define HP_425T 11 /* 25MHz 68040 - model 425t */ +#define HP_425S 12 /* 25MHz 68040 - model 425s */ +#define HP_425E 13 /* 25MHz 68040 - model 425e */ +#define HP_433T 14 /* 33MHz 68040 - model 433t */ +#define HP_433S 15 /* 33MHz 68040 - model 433s */ + + + /* + * Latest HP9000/300 bootinfo version + */ + +#define HP300_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_HP300_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-mac.h b/arch/m68k/include/uapi/asm/bootinfo-mac.h new file mode 100644 index 000000000000..b44ff73898a9 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-mac.h @@ -0,0 +1,119 @@ +/* +** asm/bootinfo-mac.h -- Macintosh-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_MAC_H +#define _UAPI_ASM_M68K_BOOTINFO_MAC_H + + + /* + * Macintosh-specific tags (all __be32) + */ + +#define BI_MAC_MODEL 0x8000 /* Mac Gestalt ID (model type) */ +#define BI_MAC_VADDR 0x8001 /* Mac video base address */ +#define BI_MAC_VDEPTH 0x8002 /* Mac video depth */ +#define BI_MAC_VROW 0x8003 /* Mac video rowbytes */ +#define BI_MAC_VDIM 0x8004 /* Mac video dimensions */ +#define BI_MAC_VLOGICAL 0x8005 /* Mac video logical base */ +#define BI_MAC_SCCBASE 0x8006 /* Mac SCC base address */ +#define BI_MAC_BTIME 0x8007 /* Mac boot time */ +#define BI_MAC_GMTBIAS 0x8008 /* Mac GMT timezone offset */ +#define BI_MAC_MEMSIZE 0x8009 /* Mac RAM size (sanity check) */ +#define BI_MAC_CPUID 0x800a /* Mac CPU type (sanity check) */ +#define BI_MAC_ROMBASE 0x800b /* Mac system ROM base address */ + + + /* + * Macintosh hardware profile data - unused, see macintosh.h for + * reasonable type values + */ + +#define BI_MAC_VIA1BASE 0x8010 /* Mac VIA1 base address (always present) */ +#define BI_MAC_VIA2BASE 0x8011 /* Mac VIA2 base address (type varies) */ +#define BI_MAC_VIA2TYPE 0x8012 /* Mac VIA2 type (VIA, RBV, OSS) */ +#define BI_MAC_ADBTYPE 0x8013 /* Mac ADB interface type */ +#define BI_MAC_ASCBASE 0x8014 /* Mac Apple Sound Chip base address */ +#define BI_MAC_SCSI5380 0x8015 /* Mac NCR 5380 SCSI (base address, multi) */ +#define BI_MAC_SCSIDMA 0x8016 /* Mac SCSI DMA (base address) */ +#define BI_MAC_SCSI5396 0x8017 /* Mac NCR 53C96 SCSI (base address, multi) */ +#define BI_MAC_IDETYPE 0x8018 /* Mac IDE interface type */ +#define BI_MAC_IDEBASE 0x8019 /* Mac IDE interface base address */ +#define BI_MAC_NUBUS 0x801a /* Mac Nubus type (none, regular, pseudo) */ +#define BI_MAC_SLOTMASK 0x801b /* Mac Nubus slots present */ +#define BI_MAC_SCCTYPE 0x801c /* Mac SCC serial type (normal, IOP) */ +#define BI_MAC_ETHTYPE 0x801d /* Mac builtin ethernet type (Sonic, MACE */ +#define BI_MAC_ETHBASE 0x801e /* Mac builtin ethernet base address */ +#define BI_MAC_PMU 0x801f /* Mac power management / poweroff hardware */ +#define BI_MAC_IOP_SWIM 0x8020 /* Mac SWIM floppy IOP */ +#define BI_MAC_IOP_ADB 0x8021 /* Mac ADB IOP */ + + + /* + * Macintosh Gestalt numbers (BI_MAC_MODEL) + */ + +#define MAC_MODEL_II 6 +#define MAC_MODEL_IIX 7 +#define MAC_MODEL_IICX 8 +#define MAC_MODEL_SE30 9 +#define MAC_MODEL_IICI 11 +#define MAC_MODEL_IIFX 13 /* And well numbered it is too */ +#define MAC_MODEL_IISI 18 +#define MAC_MODEL_LC 19 +#define MAC_MODEL_Q900 20 +#define MAC_MODEL_PB170 21 +#define MAC_MODEL_Q700 22 +#define MAC_MODEL_CLII 23 /* aka: P200 */ +#define MAC_MODEL_PB140 25 +#define MAC_MODEL_Q950 26 /* aka: WGS95 */ +#define MAC_MODEL_LCIII 27 /* aka: P450 */ +#define MAC_MODEL_PB210 29 +#define MAC_MODEL_C650 30 +#define MAC_MODEL_PB230 32 +#define MAC_MODEL_PB180 33 +#define MAC_MODEL_PB160 34 +#define MAC_MODEL_Q800 35 /* aka: WGS80 */ +#define MAC_MODEL_Q650 36 +#define MAC_MODEL_LCII 37 /* aka: P400/405/410/430 */ +#define MAC_MODEL_PB250 38 +#define MAC_MODEL_IIVI 44 +#define MAC_MODEL_P600 45 /* aka: P600CD */ +#define MAC_MODEL_IIVX 48 +#define MAC_MODEL_CCL 49 /* aka: P250 */ +#define MAC_MODEL_PB165C 50 +#define MAC_MODEL_C610 52 /* aka: WGS60 */ +#define MAC_MODEL_Q610 53 +#define MAC_MODEL_PB145 54 /* aka: PB145B */ +#define MAC_MODEL_P520 56 /* aka: LC520 */ +#define MAC_MODEL_C660 60 +#define MAC_MODEL_P460 62 /* aka: LCIII+, P466/P467 */ +#define MAC_MODEL_PB180C 71 +#define MAC_MODEL_PB520 72 /* aka: PB520C, PB540, PB540C, PB550C */ +#define MAC_MODEL_PB270C 77 +#define MAC_MODEL_Q840 78 +#define MAC_MODEL_P550 80 /* aka: LC550, P560 */ +#define MAC_MODEL_CCLII 83 /* aka: P275 */ +#define MAC_MODEL_PB165 84 +#define MAC_MODEL_PB190 85 /* aka: PB190CS */ +#define MAC_MODEL_TV 88 +#define MAC_MODEL_P475 89 /* aka: LC475, P476 */ +#define MAC_MODEL_P475F 90 /* aka: P475 w/ FPU (no LC040) */ +#define MAC_MODEL_P575 92 /* aka: LC575, P577/P578 */ +#define MAC_MODEL_Q605 94 +#define MAC_MODEL_Q605_ACC 95 /* Q605 accelerated to 33 MHz */ +#define MAC_MODEL_Q630 98 /* aka: LC630, P630/631/635/636/637/638/640 */ +#define MAC_MODEL_P588 99 /* aka: LC580, P580 */ +#define MAC_MODEL_PB280 102 +#define MAC_MODEL_PB280C 103 +#define MAC_MODEL_PB150 115 + + + /* + * Latest Macintosh bootinfo version + */ + +#define MAC_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_MAC_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-q40.h b/arch/m68k/include/uapi/asm/bootinfo-q40.h new file mode 100644 index 000000000000..c79fea7e555b --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-q40.h @@ -0,0 +1,16 @@ +/* +** asm/bootinfo-q40.h -- Q40-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_Q40_H +#define _UAPI_ASM_M68K_BOOTINFO_Q40_H + + + /* + * Latest Q40 bootinfo version + */ + +#define Q40_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_Q40_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-vme.h b/arch/m68k/include/uapi/asm/bootinfo-vme.h new file mode 100644 index 000000000000..a135eb41d672 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-vme.h @@ -0,0 +1,70 @@ +/* +** asm/bootinfo-vme.h -- VME-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_VME_H +#define _UAPI_ASM_M68K_BOOTINFO_VME_H + + +#include <linux/types.h> + + + /* + * VME-specific tags + */ + +#define BI_VME_TYPE 0x8000 /* VME sub-architecture (__be32) */ +#define BI_VME_BRDINFO 0x8001 /* VME board information (struct) */ + + + /* + * VME models (BI_VME_TYPE) + */ + +#define VME_TYPE_TP34V 0x0034 /* Tadpole TP34V */ +#define VME_TYPE_MVME147 0x0147 /* Motorola MVME147 */ +#define VME_TYPE_MVME162 0x0162 /* Motorola MVME162 */ +#define VME_TYPE_MVME166 0x0166 /* Motorola MVME166 */ +#define VME_TYPE_MVME167 0x0167 /* Motorola MVME167 */ +#define VME_TYPE_MVME172 0x0172 /* Motorola MVME172 */ +#define VME_TYPE_MVME177 0x0177 /* Motorola MVME177 */ +#define VME_TYPE_BVME4000 0x4000 /* BVM Ltd. BVME4000 */ +#define VME_TYPE_BVME6000 0x6000 /* BVM Ltd. BVME6000 */ + + +#ifndef __ASSEMBLY__ + +/* + * Board ID data structure - pointer to this retrieved from Bug by head.S + * + * BI_VME_BRDINFO is a 32 byte struct as returned by the Bug code on + * Motorola VME boards. Contains board number, Bug version, board + * configuration options, etc. + * + * Note, bytes 12 and 13 are board no in BCD (0162,0166,0167,0177,etc) + */ + +typedef struct { + char bdid[4]; + __u8 rev, mth, day, yr; + __be16 size, reserved; + __be16 brdno; + char brdsuffix[2]; + __be32 options; + __be16 clun, dlun, ctype, dnum; + __be32 option2; +} t_bdid, *p_bdid; + +#endif /* __ASSEMBLY__ */ + + + /* + * Latest VME bootinfo versions + */ + +#define MVME147_BOOTI_VERSION MK_BI_VERSION(2, 0) +#define MVME16x_BOOTI_VERSION MK_BI_VERSION(2, 0) +#define BVME6000_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_VME_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo.h b/arch/m68k/include/uapi/asm/bootinfo.h new file mode 100644 index 000000000000..cdeb26a015b0 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo.h @@ -0,0 +1,174 @@ +/* + * asm/bootinfo.h -- Definition of the Linux/m68k boot information structure + * + * Copyright 1992 by Greg Harp + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_H +#define _UAPI_ASM_M68K_BOOTINFO_H + + +#include <linux/types.h> + + +#ifndef __ASSEMBLY__ + + /* + * Bootinfo definitions + * + * This is an easily parsable and extendable structure containing all + * information to be passed from the bootstrap to the kernel. + * + * This way I hope to keep all future changes back/forewards compatible. + * Thus, keep your fingers crossed... + * + * This structure is copied right after the kernel by the bootstrap + * routine. + */ + +struct bi_record { + __be16 tag; /* tag ID */ + __be16 size; /* size of record (in bytes) */ + __be32 data[0]; /* data */ +}; + + +struct mem_info { + __be32 addr; /* physical address of memory chunk */ + __be32 size; /* length of memory chunk (in bytes) */ +}; + +#endif /* __ASSEMBLY__ */ + + + /* + * Tag Definitions + * + * Machine independent tags start counting from 0x0000 + * Machine dependent tags start counting from 0x8000 + */ + +#define BI_LAST 0x0000 /* last record (sentinel) */ +#define BI_MACHTYPE 0x0001 /* machine type (__be32) */ +#define BI_CPUTYPE 0x0002 /* cpu type (__be32) */ +#define BI_FPUTYPE 0x0003 /* fpu type (__be32) */ +#define BI_MMUTYPE 0x0004 /* mmu type (__be32) */ +#define BI_MEMCHUNK 0x0005 /* memory chunk address and size */ + /* (struct mem_info) */ +#define BI_RAMDISK 0x0006 /* ramdisk address and size */ + /* (struct mem_info) */ +#define BI_COMMAND_LINE 0x0007 /* kernel command line parameters */ + /* (string) */ + + + /* + * Linux/m68k Architectures (BI_MACHTYPE) + */ + +#define MACH_AMIGA 1 +#define MACH_ATARI 2 +#define MACH_MAC 3 +#define MACH_APOLLO 4 +#define MACH_SUN3 5 +#define MACH_MVME147 6 +#define MACH_MVME16x 7 +#define MACH_BVME6000 8 +#define MACH_HP300 9 +#define MACH_Q40 10 +#define MACH_SUN3X 11 +#define MACH_M54XX 12 + + + /* + * CPU, FPU and MMU types (BI_CPUTYPE, BI_FPUTYPE, BI_MMUTYPE) + * + * Note: we may rely on the following equalities: + * + * CPU_68020 == MMU_68851 + * CPU_68030 == MMU_68030 + * CPU_68040 == FPU_68040 == MMU_68040 + * CPU_68060 == FPU_68060 == MMU_68060 + */ + +#define CPUB_68020 0 +#define CPUB_68030 1 +#define CPUB_68040 2 +#define CPUB_68060 3 +#define CPUB_COLDFIRE 4 + +#define CPU_68020 (1 << CPUB_68020) +#define CPU_68030 (1 << CPUB_68030) +#define CPU_68040 (1 << CPUB_68040) +#define CPU_68060 (1 << CPUB_68060) +#define CPU_COLDFIRE (1 << CPUB_COLDFIRE) + +#define FPUB_68881 0 +#define FPUB_68882 1 +#define FPUB_68040 2 /* Internal FPU */ +#define FPUB_68060 3 /* Internal FPU */ +#define FPUB_SUNFPA 4 /* Sun-3 FPA */ +#define FPUB_COLDFIRE 5 /* ColdFire FPU */ + +#define FPU_68881 (1 << FPUB_68881) +#define FPU_68882 (1 << FPUB_68882) +#define FPU_68040 (1 << FPUB_68040) +#define FPU_68060 (1 << FPUB_68060) +#define FPU_SUNFPA (1 << FPUB_SUNFPA) +#define FPU_COLDFIRE (1 << FPUB_COLDFIRE) + +#define MMUB_68851 0 +#define MMUB_68030 1 /* Internal MMU */ +#define MMUB_68040 2 /* Internal MMU */ +#define MMUB_68060 3 /* Internal MMU */ +#define MMUB_APOLLO 4 /* Custom Apollo */ +#define MMUB_SUN3 5 /* Custom Sun-3 */ +#define MMUB_COLDFIRE 6 /* Internal MMU */ + +#define MMU_68851 (1 << MMUB_68851) +#define MMU_68030 (1 << MMUB_68030) +#define MMU_68040 (1 << MMUB_68040) +#define MMU_68060 (1 << MMUB_68060) +#define MMU_SUN3 (1 << MMUB_SUN3) +#define MMU_APOLLO (1 << MMUB_APOLLO) +#define MMU_COLDFIRE (1 << MMUB_COLDFIRE) + + + /* + * Stuff for bootinfo interface versioning + * + * At the start of kernel code, a 'struct bootversion' is located. + * bootstrap checks for a matching version of the interface before booting + * a kernel, to avoid user confusion if kernel and bootstrap don't work + * together :-) + * + * If incompatible changes are made to the bootinfo interface, the major + * number below should be stepped (and the minor reset to 0) for the + * appropriate machine. If a change is backward-compatible, the minor + * should be stepped. "Backwards-compatible" means that booting will work, + * but certain features may not. + */ + +#define BOOTINFOV_MAGIC 0x4249561A /* 'BIV^Z' */ +#define MK_BI_VERSION(major, minor) (((major) << 16) + (minor)) +#define BI_VERSION_MAJOR(v) (((v) >> 16) & 0xffff) +#define BI_VERSION_MINOR(v) ((v) & 0xffff) + +#ifndef __ASSEMBLY__ + +struct bootversion { + __be16 branch; + __be32 magic; + struct { + __be32 machtype; + __be32 version; + } machversions[0]; +} __packed; + +#endif /* __ASSEMBLY__ */ + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_H */ diff --git a/arch/m68k/include/uapi/asm/setup.h b/arch/m68k/include/uapi/asm/setup.h index 85579bff455c..6a6dc636761e 100644 --- a/arch/m68k/include/uapi/asm/setup.h +++ b/arch/m68k/include/uapi/asm/setup.h @@ -6,98 +6,11 @@ ** This file is subject to the terms and conditions of the GNU General Public ** License. See the file COPYING in the main directory of this archive ** for more details. -** -** Created 09/29/92 by Greg Harp -** -** 5/2/94 Roman Hodek: -** Added bi_atari part of the machine dependent union bi_un; for now it -** contains just a model field to distinguish between TT and Falcon. -** 26/7/96 Roman Zippel: -** Renamed to setup.h; added some useful macros to allow gcc some -** optimizations if possible. -** 5/10/96 Geert Uytterhoeven: -** Redesign of the boot information structure; moved boot information -** structure to bootinfo.h */ #ifndef _UAPI_M68K_SETUP_H #define _UAPI_M68K_SETUP_H - - - /* - * Linux/m68k Architectures - */ - -#define MACH_AMIGA 1 -#define MACH_ATARI 2 -#define MACH_MAC 3 -#define MACH_APOLLO 4 -#define MACH_SUN3 5 -#define MACH_MVME147 6 -#define MACH_MVME16x 7 -#define MACH_BVME6000 8 -#define MACH_HP300 9 -#define MACH_Q40 10 -#define MACH_SUN3X 11 -#define MACH_M54XX 12 - #define COMMAND_LINE_SIZE 256 - - - /* - * CPU, FPU and MMU types - * - * Note: we may rely on the following equalities: - * - * CPU_68020 == MMU_68851 - * CPU_68030 == MMU_68030 - * CPU_68040 == FPU_68040 == MMU_68040 - * CPU_68060 == FPU_68060 == MMU_68060 - */ - -#define CPUB_68020 0 -#define CPUB_68030 1 -#define CPUB_68040 2 -#define CPUB_68060 3 -#define CPUB_COLDFIRE 4 - -#define CPU_68020 (1<<CPUB_68020) -#define CPU_68030 (1<<CPUB_68030) -#define CPU_68040 (1<<CPUB_68040) -#define CPU_68060 (1<<CPUB_68060) -#define CPU_COLDFIRE (1<<CPUB_COLDFIRE) - -#define FPUB_68881 0 -#define FPUB_68882 1 -#define FPUB_68040 2 /* Internal FPU */ -#define FPUB_68060 3 /* Internal FPU */ -#define FPUB_SUNFPA 4 /* Sun-3 FPA */ -#define FPUB_COLDFIRE 5 /* ColdFire FPU */ - -#define FPU_68881 (1<<FPUB_68881) -#define FPU_68882 (1<<FPUB_68882) -#define FPU_68040 (1<<FPUB_68040) -#define FPU_68060 (1<<FPUB_68060) -#define FPU_SUNFPA (1<<FPUB_SUNFPA) -#define FPU_COLDFIRE (1<<FPUB_COLDFIRE) - -#define MMUB_68851 0 -#define MMUB_68030 1 /* Internal MMU */ -#define MMUB_68040 2 /* Internal MMU */ -#define MMUB_68060 3 /* Internal MMU */ -#define MMUB_APOLLO 4 /* Custom Apollo */ -#define MMUB_SUN3 5 /* Custom Sun-3 */ -#define MMUB_COLDFIRE 6 /* Internal MMU */ - -#define MMU_68851 (1<<MMUB_68851) -#define MMU_68030 (1<<MMUB_68030) -#define MMU_68040 (1<<MMUB_68040) -#define MMU_68060 (1<<MMUB_68060) -#define MMU_SUN3 (1<<MMUB_SUN3) -#define MMU_APOLLO (1<<MMUB_APOLLO) -#define MMU_COLDFIRE (1<<MMUB_COLDFIRE) - - #endif /* _UAPI_M68K_SETUP_H */ diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile index 655347d80780..2d5d9be16273 100644 --- a/arch/m68k/kernel/Makefile +++ b/arch/m68k/kernel/Makefile @@ -22,3 +22,6 @@ obj-$(CONFIG_PCI) += pcibios.o obj-$(CONFIG_HAS_DMA) += dma.o +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_BOOTINFO_PROC) += bootinfo_proc.o + diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c index 8b7b22846366..3a386341aa6e 100644 --- a/arch/m68k/kernel/asm-offsets.c +++ b/arch/m68k/kernel/asm-offsets.c @@ -98,6 +98,9 @@ int main(void) DEFINE(CIABBASE, &ciab); DEFINE(C_PRA, offsetof(struct CIA, pra)); DEFINE(ZTWOBASE, zTwoBase); + + /* enum m68k_fixup_type */ + DEFINE(M68K_FIXUP_MEMOFFSET, m68k_fixup_memoffset); #endif return 0; diff --git a/arch/m68k/kernel/bootinfo_proc.c b/arch/m68k/kernel/bootinfo_proc.c new file mode 100644 index 000000000000..7ee853e1432b --- /dev/null +++ b/arch/m68k/kernel/bootinfo_proc.c @@ -0,0 +1,80 @@ +/* + * Based on arch/arm/kernel/atags_proc.c + */ + +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/printk.h> +#include <linux/proc_fs.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include <asm/bootinfo.h> +#include <asm/byteorder.h> + + +static char bootinfo_tmp[1536] __initdata; + +static void *bootinfo_copy; +static size_t bootinfo_size; + +static ssize_t bootinfo_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + return simple_read_from_buffer(buf, count, ppos, bootinfo_copy, + bootinfo_size); +} + +static const struct file_operations bootinfo_fops = { + .read = bootinfo_read, + .llseek = default_llseek, +}; + +void __init save_bootinfo(const struct bi_record *bi) +{ + const void *start = bi; + size_t size = sizeof(bi->tag); + + while (be16_to_cpu(bi->tag) != BI_LAST) { + uint16_t n = be16_to_cpu(bi->size); + size += n; + bi = (struct bi_record *)((unsigned long)bi + n); + } + + if (size > sizeof(bootinfo_tmp)) { + pr_err("Cannot save %zu bytes of bootinfo\n", size); + return; + } + + pr_info("Saving %zu bytes of bootinfo\n", size); + memcpy(bootinfo_tmp, start, size); + bootinfo_size = size; +} + +static int __init init_bootinfo_procfs(void) +{ + /* + * This cannot go into save_bootinfo() because kmalloc and proc don't + * work yet when it is called. + */ + struct proc_dir_entry *pde; + + if (!bootinfo_size) + return -EINVAL; + + bootinfo_copy = kmalloc(bootinfo_size, GFP_KERNEL); + if (!bootinfo_copy) + return -ENOMEM; + + memcpy(bootinfo_copy, bootinfo_tmp, bootinfo_size); + + pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_fops, NULL); + if (!pde) { + kfree(bootinfo_copy); + return -ENOMEM; + } + + return 0; +} + +arch_initcall(init_bootinfo_procfs); diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S index ac85f16534af..4c99bab7e664 100644 --- a/arch/m68k/kernel/head.S +++ b/arch/m68k/kernel/head.S @@ -23,7 +23,7 @@ ** 98/04/25 Phil Blundell: added HP300 support ** 1998/08/30 David Kilzer: Added support for font_desc structures ** for linux-2.1.115 -** 9/02/11 Richard Zidlicky: added Q40 support (initial vesion 99/01/01) +** 1999/02/11 Richard Zidlicky: added Q40 support (initial version 99/01/01) ** 2004/05/13 Kars de Jong: Finalised HP300 support ** ** This file is subject to the terms and conditions of the GNU General Public @@ -257,6 +257,12 @@ #include <linux/linkage.h> #include <linux/init.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-amiga.h> +#include <asm/bootinfo-atari.h> +#include <asm/bootinfo-hp300.h> +#include <asm/bootinfo-mac.h> +#include <asm/bootinfo-q40.h> +#include <asm/bootinfo-vme.h> #include <asm/setup.h> #include <asm/entry.h> #include <asm/pgtable.h> @@ -1532,7 +1538,7 @@ L(cache_done): /* * Find a tag record in the bootinfo structure - * The bootinfo structure is located right after the kernel bss + * The bootinfo structure is located right after the kernel * Returns: d0: size (-1 if not found) * a0: data pointer (end-of-records if not found) */ @@ -2909,7 +2915,9 @@ func_start serial_init,%d0/%d1/%a0/%a1 #if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) movel %pc@(L(mac_sccbase)),%a0 - /* Reset SCC device */ + /* Reset SCC register pointer */ + moveb %a0@(mac_scc_cha_a_ctrl_offset),%d0 + /* Reset SCC device: write register pointer then register value */ moveb #9,%a0@(mac_scc_cha_a_ctrl_offset) moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset) /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */ @@ -3896,8 +3904,6 @@ BVME_SCC_DATA_A = 0xffb0000f #endif #if defined(CONFIG_MAC) -L(mac_booter_data): - .long 0 L(mac_videobase): .long 0 L(mac_videodepth): diff --git a/arch/m68k/kernel/machine_kexec.c b/arch/m68k/kernel/machine_kexec.c new file mode 100644 index 000000000000..d4affc917d9d --- /dev/null +++ b/arch/m68k/kernel/machine_kexec.c @@ -0,0 +1,58 @@ +/* + * machine_kexec.c - handle transition of Linux booting another kernel + */ +#include <linux/compiler.h> +#include <linux/kexec.h> +#include <linux/mm.h> +#include <linux/delay.h> + +#include <asm/cacheflush.h> +#include <asm/page.h> +#include <asm/setup.h> + +extern const unsigned char relocate_new_kernel[]; +extern const size_t relocate_new_kernel_size; + +int machine_kexec_prepare(struct kimage *kimage) +{ + return 0; +} + +void machine_kexec_cleanup(struct kimage *kimage) +{ +} + +void machine_shutdown(void) +{ +} + +void machine_crash_shutdown(struct pt_regs *regs) +{ +} + +typedef void (*relocate_kernel_t)(unsigned long ptr, + unsigned long start, + unsigned long cpu_mmu_flags) __noreturn; + +void machine_kexec(struct kimage *image) +{ + void *reboot_code_buffer; + unsigned long cpu_mmu_flags; + + reboot_code_buffer = page_address(image->control_code_page); + + memcpy(reboot_code_buffer, relocate_new_kernel, + relocate_new_kernel_size); + + /* + * we do not want to be bothered. + */ + local_irq_disable(); + + pr_info("Will call new kernel at 0x%08lx. Bye...\n", image->start); + __flush_cache_all(); + cpu_mmu_flags = m68k_cputype | m68k_mmutype << 8; + ((relocate_kernel_t) reboot_code_buffer)(image->head & PAGE_MASK, + image->start, + cpu_mmu_flags); +} diff --git a/arch/m68k/kernel/relocate_kernel.S b/arch/m68k/kernel/relocate_kernel.S new file mode 100644 index 000000000000..3e09a89067ad --- /dev/null +++ b/arch/m68k/kernel/relocate_kernel.S @@ -0,0 +1,159 @@ +#include <linux/linkage.h> + +#include <asm/asm-offsets.h> +#include <asm/page.h> +#include <asm/setup.h> + + +#define MMU_BASE 8 /* MMU flags base in cpu_mmu_flags */ + +.text + +ENTRY(relocate_new_kernel) + movel %sp@(4),%a0 /* a0 = ptr */ + movel %sp@(8),%a1 /* a1 = start */ + movel %sp@(12),%d1 /* d1 = cpu_mmu_flags */ + movew #PAGE_MASK,%d2 /* d2 = PAGE_MASK */ + + /* Disable MMU */ + + btst #MMU_BASE + MMUB_68851,%d1 + jeq 3f + +1: /* 68851 or 68030 */ + + lea %pc@(.Lcopy),%a4 +2: addl #0x00000000,%a4 /* virt_to_phys() */ + + .section ".m68k_fixup","aw" + .long M68K_FIXUP_MEMOFFSET, 2b+2 + .previous + + .chip 68030 + pmove %tc,%d0 /* Disable MMU */ + bclr #7,%d0 + pmove %d0,%tc + jmp %a4@ /* Jump to physical .Lcopy */ + .chip 68k + +3: + btst #MMU_BASE + MMUB_68030,%d1 + jne 1b + + btst #MMU_BASE + MMUB_68040,%d1 + jeq 6f + +4: /* 68040 or 68060 */ + + lea %pc@(.Lcont040),%a4 +5: addl #0x00000000,%a4 /* virt_to_phys() */ + + .section ".m68k_fixup","aw" + .long M68K_FIXUP_MEMOFFSET, 5b+2 + .previous + + movel %a4,%d0 + andl #0xff000000,%d0 + orw #0xe020,%d0 /* Map 16 MiB, enable, cacheable */ + .chip 68040 + movec %d0,%itt0 + movec %d0,%dtt0 + .chip 68k + jmp %a4@ /* Jump to physical .Lcont040 */ + +.Lcont040: + moveq #0,%d0 + .chip 68040 + movec %d0,%tc /* Disable MMU */ + movec %d0,%itt0 + movec %d0,%itt1 + movec %d0,%dtt0 + movec %d0,%dtt1 + .chip 68k + jra .Lcopy + +6: + btst #MMU_BASE + MMUB_68060,%d1 + jne 4b + +.Lcopy: + movel %a0@+,%d0 /* d0 = entry = *ptr */ + jeq .Lflush + + btst #2,%d0 /* entry & IND_DONE? */ + jne .Lflush + + btst #1,%d0 /* entry & IND_INDIRECTION? */ + jeq 1f + andw %d2,%d0 + movel %d0,%a0 /* ptr = entry & PAGE_MASK */ + jra .Lcopy + +1: + btst #0,%d0 /* entry & IND_DESTINATION? */ + jeq 2f + andw %d2,%d0 + movel %d0,%a2 /* a2 = dst = entry & PAGE_MASK */ + jra .Lcopy + +2: + btst #3,%d0 /* entry & IND_SOURCE? */ + jeq .Lcopy + + andw %d2,%d0 + movel %d0,%a3 /* a3 = src = entry & PAGE_MASK */ + movew #PAGE_SIZE/32 - 1,%d0 /* d0 = PAGE_SIZE/32 - 1 */ +3: + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + dbf %d0, 3b + jra .Lcopy + +.Lflush: + /* Flush all caches */ + + btst #CPUB_68020,%d1 + jeq 2f + +1: /* 68020 or 68030 */ + .chip 68030 + movec %cacr,%d0 + orw #0x808,%d0 + movec %d0,%cacr + .chip 68k + jra .Lreincarnate + +2: + btst #CPUB_68030,%d1 + jne 1b + + btst #CPUB_68040,%d1 + jeq 4f + +3: /* 68040 or 68060 */ + .chip 68040 + nop + cpusha %bc + nop + cinva %bc + nop + .chip 68k + jra .Lreincarnate + +4: + btst #CPUB_68060,%d1 + jne 3b + +.Lreincarnate: + jmp %a1@ + +relocate_new_kernel_end: + +ENTRY(relocate_new_kernel_size) + .long relocate_new_kernel_end - relocate_new_kernel diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index e67e53159573..5b8ec4d5f8e8 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c @@ -26,6 +26,7 @@ #include <linux/initrd.h> #include <asm/bootinfo.h> +#include <asm/byteorder.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/fpu.h> @@ -71,12 +72,12 @@ EXPORT_SYMBOL(m68k_num_memory); int m68k_realnum_memory; EXPORT_SYMBOL(m68k_realnum_memory); unsigned long m68k_memoffset; -struct mem_info m68k_memory[NUM_MEMINFO]; +struct m68k_mem_info m68k_memory[NUM_MEMINFO]; EXPORT_SYMBOL(m68k_memory); -struct mem_info m68k_ramdisk; +static struct m68k_mem_info m68k_ramdisk __initdata; -static char m68k_command_line[CL_SIZE]; +static char m68k_command_line[CL_SIZE] __initdata; void (*mach_sched_init) (irq_handler_t handler) __initdata = NULL; /* machine dependent irq functions */ @@ -143,11 +144,16 @@ extern void paging_init(void); static void __init m68k_parse_bootinfo(const struct bi_record *record) { - while (record->tag != BI_LAST) { + uint16_t tag; + + save_bootinfo(record); + + while ((tag = be16_to_cpu(record->tag)) != BI_LAST) { int unknown = 0; - const unsigned long *data = record->data; + const void *data = record->data; + uint16_t size = be16_to_cpu(record->size); - switch (record->tag) { + switch (tag) { case BI_MACHTYPE: case BI_CPUTYPE: case BI_FPUTYPE: @@ -157,20 +163,27 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record) case BI_MEMCHUNK: if (m68k_num_memory < NUM_MEMINFO) { - m68k_memory[m68k_num_memory].addr = data[0]; - m68k_memory[m68k_num_memory].size = data[1]; + const struct mem_info *m = data; + m68k_memory[m68k_num_memory].addr = + be32_to_cpu(m->addr); + m68k_memory[m68k_num_memory].size = + be32_to_cpu(m->size); m68k_num_memory++; } else - printk("m68k_parse_bootinfo: too many memory chunks\n"); + pr_warn("%s: too many memory chunks\n", + __func__); break; case BI_RAMDISK: - m68k_ramdisk.addr = data[0]; - m68k_ramdisk.size = data[1]; + { + const struct mem_info *m = data; + m68k_ramdisk.addr = be32_to_cpu(m->addr); + m68k_ramdisk.size = be32_to_cpu(m->size); + } break; case BI_COMMAND_LINE: - strlcpy(m68k_command_line, (const char *)data, + strlcpy(m68k_command_line, data, sizeof(m68k_command_line)); break; @@ -197,17 +210,16 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record) unknown = 1; } if (unknown) - printk("m68k_parse_bootinfo: unknown tag 0x%04x ignored\n", - record->tag); - record = (struct bi_record *)((unsigned long)record + - record->size); + pr_warn("%s: unknown tag 0x%04x ignored\n", __func__, + tag); + record = (struct bi_record *)((unsigned long)record + size); } m68k_realnum_memory = m68k_num_memory; #ifdef CONFIG_SINGLE_MEMORY_CHUNK if (m68k_num_memory > 1) { - printk("Ignoring last %i chunks of physical memory\n", - (m68k_num_memory - 1)); + pr_warn("%s: ignoring last %i chunks of physical memory\n", + __func__, (m68k_num_memory - 1)); m68k_num_memory = 1; } #endif @@ -219,7 +231,7 @@ void __init setup_arch(char **cmdline_p) int i; #endif - /* The bootinfo is located right after the kernel bss */ + /* The bootinfo is located right after the kernel */ if (!CPU_IS_COLDFIRE) m68k_parse_bootinfo((const struct bi_record *)_end); @@ -247,7 +259,7 @@ void __init setup_arch(char **cmdline_p) asm (".chip 68060; movec %%pcr,%0; .chip 68k" : "=d" (pcr)); if (((pcr >> 8) & 0xff) <= 5) { - printk("Enabling workaround for errata I14\n"); + pr_warn("Enabling workaround for errata I14\n"); asm (".chip 68060; movec %0,%%pcr; .chip 68k" : : "d" (pcr | 0x20)); } @@ -336,12 +348,12 @@ void __init setup_arch(char **cmdline_p) panic("No configuration setup"); } + paging_init(); + #ifdef CONFIG_NATFEAT nf_init(); #endif - paging_init(); - #ifndef CONFIG_SUN3 for (i = 1; i < m68k_num_memory; i++) free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr, @@ -353,7 +365,7 @@ void __init setup_arch(char **cmdline_p) BOOTMEM_DEFAULT); initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr); initrd_end = initrd_start + m68k_ramdisk.size; - printk("initrd: %08lx - %08lx\n", initrd_start, initrd_end); + pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end); } #endif @@ -538,9 +550,9 @@ void check_bugs(void) { #ifndef CONFIG_M68KFPU_EMU if (m68k_fputype == 0) { - printk(KERN_EMERG "*** YOU DO NOT HAVE A FLOATING POINT UNIT, " + pr_emerg("*** YOU DO NOT HAVE A FLOATING POINT UNIT, " "WHICH IS REQUIRED BY LINUX/M68K ***\n"); - printk(KERN_EMERG "Upgrade your hardware or join the FPU " + pr_emerg("Upgrade your hardware or join the FPU " "emulation project\n"); panic("no FPU"); } diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index 7eb9792009f8..958f1adb9d0c 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c @@ -28,6 +28,10 @@ #include <linux/timex.h> #include <linux/profile.h> + +unsigned long (*mach_random_get_entropy)(void); + + /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "xtime_update()" routine every clocktick diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 88fcd8c70e7b..6c9ca24830e9 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -133,9 +133,7 @@ static inline void access_error060 (struct frame *fp) { unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */ -#ifdef DEBUG - printk("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr); -#endif + pr_debug("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr); if (fslw & MMU060_BPE) { /* branch prediction error -> clear branch cache */ @@ -162,9 +160,7 @@ static inline void access_error060 (struct frame *fp) } if (fslw & MMU060_W) errorcode |= 2; -#ifdef DEBUG - printk("errorcode = %d\n", errorcode ); -#endif + pr_debug("errorcode = %ld\n", errorcode); do_page_fault(&fp->ptregs, addr, errorcode); } else if (fslw & (MMU060_SEE)){ /* Software Emulation Error. @@ -173,8 +169,9 @@ static inline void access_error060 (struct frame *fp) send_fault_sig(&fp->ptregs); } else if (!(fslw & (MMU060_RE|MMU060_WE)) || send_fault_sig(&fp->ptregs) > 0) { - printk("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, fp->un.fmt4.effaddr); - printk( "68060 access error, fslw=%lx\n", fslw ); + pr_err("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, + fp->un.fmt4.effaddr); + pr_err("68060 access error, fslw=%lx\n", fslw); trap_c( fp ); } } @@ -225,9 +222,7 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba, set_fs(old_fs); -#ifdef DEBUG - printk("do_040writeback1, res=%d\n",res); -#endif + pr_debug("do_040writeback1, res=%d\n", res); return res; } @@ -249,7 +244,7 @@ static inline void do_040writebacks(struct frame *fp) int res = 0; #if 0 if (fp->un.fmt7.wb1s & WBV_040) - printk("access_error040: cannot handle 1st writeback. oops.\n"); + pr_err("access_error040: cannot handle 1st writeback. oops.\n"); #endif if ((fp->un.fmt7.wb2s & WBV_040) && @@ -302,14 +297,12 @@ static inline void access_error040(struct frame *fp) unsigned short ssw = fp->un.fmt7.ssw; unsigned long mmusr; -#ifdef DEBUG - printk("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr); - printk("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s, + pr_debug("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr); + pr_debug("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s, fp->un.fmt7.wb2s, fp->un.fmt7.wb3s); - printk ("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n", + pr_debug("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n", fp->un.fmt7.wb2a, fp->un.fmt7.wb3a, fp->un.fmt7.wb2d, fp->un.fmt7.wb3d); -#endif if (ssw & ATC_040) { unsigned long addr = fp->un.fmt7.faddr; @@ -324,9 +317,7 @@ static inline void access_error040(struct frame *fp) /* MMU error, get the MMUSR info for this access */ mmusr = probe040(!(ssw & RW_040), addr, ssw); -#ifdef DEBUG - printk("mmusr = %lx\n", mmusr); -#endif + pr_debug("mmusr = %lx\n", mmusr); errorcode = 1; if (!(mmusr & MMU_R_040)) { /* clear the invalid atc entry */ @@ -340,14 +331,10 @@ static inline void access_error040(struct frame *fp) errorcode |= 2; if (do_page_fault(&fp->ptregs, addr, errorcode)) { -#ifdef DEBUG - printk("do_page_fault() !=0\n"); -#endif + pr_debug("do_page_fault() !=0\n"); if (user_mode(&fp->ptregs)){ /* delay writebacks after signal delivery */ -#ifdef DEBUG - printk(".. was usermode - return\n"); -#endif + pr_debug(".. was usermode - return\n"); return; } /* disable writeback into user space from kernel @@ -355,9 +342,7 @@ static inline void access_error040(struct frame *fp) * the writeback won't do good) */ disable_wb: -#ifdef DEBUG - printk(".. disabling wb2\n"); -#endif + pr_debug(".. disabling wb2\n"); if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr) fp->un.fmt7.wb2s &= ~WBV_040; if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr) @@ -371,7 +356,7 @@ disable_wb: current->thread.signo = SIGBUS; current->thread.faddr = fp->un.fmt7.faddr; if (send_fault_sig(&fp->ptregs) >= 0) - printk("68040 bus error (ssw=%x, faddr=%lx)\n", ssw, + pr_err("68040 bus error (ssw=%x, faddr=%lx)\n", ssw, fp->un.fmt7.faddr); goto disable_wb; } @@ -394,19 +379,17 @@ static inline void bus_error030 (struct frame *fp) unsigned short ssw = fp->un.fmtb.ssw; extern unsigned long _sun3_map_test_start, _sun3_map_test_end; -#ifdef DEBUG if (ssw & (FC | FB)) - printk ("Instruction fault at %#010lx\n", + pr_debug("Instruction fault at %#010lx\n", ssw & FC ? fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2 : fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); if (ssw & DF) - printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); -#endif /* * Check if this page should be demand-mapped. This needs to go before @@ -429,7 +412,7 @@ static inline void bus_error030 (struct frame *fp) return; /* instruction fault or kernel data fault! */ if (ssw & (FC | FB)) - printk ("Instruction fault at %#010lx\n", + pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc); if (ssw & DF) { /* was this fault incurred testing bus mappings? */ @@ -439,12 +422,12 @@ static inline void bus_error030 (struct frame *fp) return; } - printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); } - printk ("BAD KERNEL BUSERR\n"); + pr_err("BAD KERNEL BUSERR\n"); die_if_kernel("Oops", &fp->ptregs,0); force_sig(SIGKILL, current); @@ -473,12 +456,11 @@ static inline void bus_error030 (struct frame *fp) else if (buserr_type & SUN3_BUSERR_INVALID) errorcode = 0x00; else { -#ifdef DEBUG - printk ("*** unexpected busfault type=%#04x\n", buserr_type); - printk ("invalid %s access at %#lx from pc %#lx\n", - !(ssw & RW) ? "write" : "read", addr, - fp->ptregs.pc); -#endif + pr_debug("*** unexpected busfault type=%#04x\n", + buserr_type); + pr_debug("invalid %s access at %#lx from pc %#lx\n", + !(ssw & RW) ? "write" : "read", addr, + fp->ptregs.pc); die_if_kernel ("Oops", &fp->ptregs, buserr_type); force_sig (SIGBUS, current); return; @@ -509,9 +491,7 @@ static inline void bus_error030 (struct frame *fp) if (!mmu_emu_handle_fault(addr, 1, 0)) do_page_fault (&fp->ptregs, addr, 0); } else { -#ifdef DEBUG - printk ("protection fault on insn access (segv).\n"); -#endif + pr_debug("protection fault on insn access (segv).\n"); force_sig (SIGSEGV, current); } } @@ -525,22 +505,22 @@ static inline void bus_error030 (struct frame *fp) unsigned short ssw = fp->un.fmtb.ssw; #ifdef DEBUG unsigned long desc; +#endif - printk ("pid = %x ", current->pid); - printk ("SSW=%#06x ", ssw); + pr_debug("pid = %x ", current->pid); + pr_debug("SSW=%#06x ", ssw); if (ssw & (FC | FB)) - printk ("Instruction fault at %#010lx\n", + pr_debug("Instruction fault at %#010lx\n", ssw & FC ? fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2 : fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); if (ssw & DF) - printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); -#endif /* ++andreas: If a data fault and an instruction fault happen at the same time map in both pages. */ @@ -554,27 +534,23 @@ static inline void bus_error030 (struct frame *fp) "pmove %%psr,%1" : "=a&" (desc), "=m" (temp) : "a" (addr), "d" (ssw)); + pr_debug("mmusr is %#x for addr %#lx in task %p\n", + temp, addr, current); + pr_debug("descriptor address is 0x%p, contents %#lx\n", + __va(desc), *(unsigned long *)__va(desc)); #else asm volatile ("ptestr %2,%1@,#7\n\t" "pmove %%psr,%0" : "=m" (temp) : "a" (addr), "d" (ssw)); #endif mmusr = temp; - -#ifdef DEBUG - printk("mmusr is %#x for addr %#lx in task %p\n", - mmusr, addr, current); - printk("descriptor address is %#lx, contents %#lx\n", - __va(desc), *(unsigned long *)__va(desc)); -#endif - errorcode = (mmusr & MMU_I) ? 0 : 1; if (!(ssw & RW) || (ssw & RM)) errorcode |= 2; if (mmusr & (MMU_I | MMU_WP)) { if (ssw & 4) { - printk("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); @@ -587,9 +563,10 @@ static inline void bus_error030 (struct frame *fp) } else if (!(mmusr & MMU_I)) { /* probably a 020 cas fault */ if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0) - printk("unexpected bus error (%#x,%#x)\n", ssw, mmusr); + pr_err("unexpected bus error (%#x,%#x)\n", ssw, + mmusr); } else if (mmusr & (MMU_B|MMU_L|MMU_S)) { - printk("invalid %s access at %#lx from pc %#lx\n", + pr_err("invalid %s access at %#lx from pc %#lx\n", !(ssw & RW) ? "write" : "read", addr, fp->ptregs.pc); die_if_kernel("Oops",&fp->ptregs,mmusr); @@ -600,7 +577,7 @@ static inline void bus_error030 (struct frame *fp) static volatile long tlong; #endif - printk("weird %s access at %#lx from pc %#lx (ssw is %#x)\n", + pr_err("weird %s access at %#lx from pc %#lx (ssw is %#x)\n", !(ssw & RW) ? "write" : "read", addr, fp->ptregs.pc, ssw); asm volatile ("ptestr #1,%1@,#0\n\t" @@ -609,18 +586,16 @@ static inline void bus_error030 (struct frame *fp) : "a" (addr)); mmusr = temp; - printk ("level 0 mmusr is %#x\n", mmusr); + pr_err("level 0 mmusr is %#x\n", mmusr); #if 0 asm volatile ("pmove %%tt0,%0" : "=m" (tlong)); - printk("tt0 is %#lx, ", tlong); + pr_debug("tt0 is %#lx, ", tlong); asm volatile ("pmove %%tt1,%0" : "=m" (tlong)); - printk("tt1 is %#lx\n", tlong); -#endif -#ifdef DEBUG - printk("Unknown SIGSEGV - 1\n"); + pr_debug("tt1 is %#lx\n", tlong); #endif + pr_debug("Unknown SIGSEGV - 1\n"); die_if_kernel("Oops",&fp->ptregs,mmusr); force_sig(SIGSEGV, current); return; @@ -641,10 +616,9 @@ static inline void bus_error030 (struct frame *fp) return; if (fp->ptregs.sr & PS_S) { - printk("Instruction fault at %#010lx\n", - fp->ptregs.pc); + pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc); buserr: - printk ("BAD KERNEL BUSERR\n"); + pr_err("BAD KERNEL BUSERR\n"); die_if_kernel("Oops",&fp->ptregs,0); force_sig(SIGKILL, current); return; @@ -668,28 +642,22 @@ static inline void bus_error030 (struct frame *fp) "pmove %%psr,%1" : "=a&" (desc), "=m" (temp) : "a" (addr)); + pr_debug("mmusr is %#x for addr %#lx in task %p\n", + temp, addr, current); + pr_debug("descriptor address is 0x%p, contents %#lx\n", + __va(desc), *(unsigned long *)__va(desc)); #else asm volatile ("ptestr #1,%1@,#7\n\t" "pmove %%psr,%0" : "=m" (temp) : "a" (addr)); #endif mmusr = temp; - -#ifdef DEBUG - printk ("mmusr is %#x for addr %#lx in task %p\n", - mmusr, addr, current); - printk ("descriptor address is %#lx, contents %#lx\n", - __va(desc), *(unsigned long *)__va(desc)); -#endif - if (mmusr & MMU_I) do_page_fault (&fp->ptregs, addr, 0); else if (mmusr & (MMU_B|MMU_L|MMU_S)) { - printk ("invalid insn access at %#lx from pc %#lx\n", + pr_err("invalid insn access at %#lx from pc %#lx\n", addr, fp->ptregs.pc); -#ifdef DEBUG - printk("Unknown SIGSEGV - 2\n"); -#endif + pr_debug("Unknown SIGSEGV - 2\n"); die_if_kernel("Oops",&fp->ptregs,mmusr); force_sig(SIGSEGV, current); return; @@ -791,9 +759,7 @@ asmlinkage void buserr_c(struct frame *fp) if (user_mode(&fp->ptregs)) current->thread.esp0 = (unsigned long) fp; -#ifdef DEBUG - printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format); -#endif + pr_debug("*** Bus Error *** Format is %x\n", fp->ptregs.format); #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU) if (CPU_IS_COLDFIRE) { @@ -836,9 +802,7 @@ asmlinkage void buserr_c(struct frame *fp) #endif default: die_if_kernel("bad frame format",&fp->ptregs,0); -#ifdef DEBUG - printk("Unknown SIGSEGV - 4\n"); -#endif + pr_debug("Unknown SIGSEGV - 4\n"); force_sig(SIGSEGV, current); } } @@ -852,7 +816,7 @@ void show_trace(unsigned long *stack) unsigned long addr; int i; - printk("Call Trace:"); + pr_info("Call Trace:"); addr = (unsigned long)stack + THREAD_SIZE - 1; endstack = (unsigned long *)(addr & -THREAD_SIZE); i = 0; @@ -869,13 +833,13 @@ void show_trace(unsigned long *stack) if (__kernel_text_address(addr)) { #ifndef CONFIG_KALLSYMS if (i % 5 == 0) - printk("\n "); + pr_cont("\n "); #endif - printk(" [<%08lx>] %pS\n", addr, (void *)addr); + pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr); i++; } } - printk("\n"); + pr_cont("\n"); } void show_registers(struct pt_regs *regs) @@ -887,81 +851,87 @@ void show_registers(struct pt_regs *regs) int i; print_modules(); - printk("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc); - printk("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2); - printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n", + pr_info("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc); + pr_info("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2); + pr_info("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n", regs->d0, regs->d1, regs->d2, regs->d3); - printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", + pr_info("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", regs->d4, regs->d5, regs->a0, regs->a1); - printk("Process %s (pid: %d, task=%p)\n", + pr_info("Process %s (pid: %d, task=%p)\n", current->comm, task_pid_nr(current), current); addr = (unsigned long)&fp->un; - printk("Frame format=%X ", regs->format); + pr_info("Frame format=%X ", regs->format); switch (regs->format) { case 0x2: - printk("instr addr=%08lx\n", fp->un.fmt2.iaddr); + pr_cont("instr addr=%08lx\n", fp->un.fmt2.iaddr); addr += sizeof(fp->un.fmt2); break; case 0x3: - printk("eff addr=%08lx\n", fp->un.fmt3.effaddr); + pr_cont("eff addr=%08lx\n", fp->un.fmt3.effaddr); addr += sizeof(fp->un.fmt3); break; case 0x4: - printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n" - : "eff addr=%08lx pc=%08lx\n"), - fp->un.fmt4.effaddr, fp->un.fmt4.pc); + if (CPU_IS_060) + pr_cont("fault addr=%08lx fslw=%08lx\n", + fp->un.fmt4.effaddr, fp->un.fmt4.pc); + else + pr_cont("eff addr=%08lx pc=%08lx\n", + fp->un.fmt4.effaddr, fp->un.fmt4.pc); addr += sizeof(fp->un.fmt4); break; case 0x7: - printk("eff addr=%08lx ssw=%04x faddr=%08lx\n", + pr_cont("eff addr=%08lx ssw=%04x faddr=%08lx\n", fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr); - printk("wb 1 stat/addr/data: %04x %08lx %08lx\n", + pr_info("wb 1 stat/addr/data: %04x %08lx %08lx\n", fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0); - printk("wb 2 stat/addr/data: %04x %08lx %08lx\n", + pr_info("wb 2 stat/addr/data: %04x %08lx %08lx\n", fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d); - printk("wb 3 stat/addr/data: %04x %08lx %08lx\n", + pr_info("wb 3 stat/addr/data: %04x %08lx %08lx\n", fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d); - printk("push data: %08lx %08lx %08lx %08lx\n", + pr_info("push data: %08lx %08lx %08lx %08lx\n", fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2, fp->un.fmt7.pd3); addr += sizeof(fp->un.fmt7); break; case 0x9: - printk("instr addr=%08lx\n", fp->un.fmt9.iaddr); + pr_cont("instr addr=%08lx\n", fp->un.fmt9.iaddr); addr += sizeof(fp->un.fmt9); break; case 0xa: - printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", + pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb, fp->un.fmta.daddr, fp->un.fmta.dobuf); addr += sizeof(fp->un.fmta); break; case 0xb: - printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", + pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb, fp->un.fmtb.daddr, fp->un.fmtb.dobuf); - printk("baddr=%08lx dibuf=%08lx ver=%x\n", + pr_info("baddr=%08lx dibuf=%08lx ver=%x\n", fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver); addr += sizeof(fp->un.fmtb); break; default: - printk("\n"); + pr_cont("\n"); } show_stack(NULL, (unsigned long *)addr); - printk("Code:"); + pr_info("Code:"); set_fs(KERNEL_DS); cp = (u16 *)regs->pc; for (i = -8; i < 16; i++) { if (get_user(c, cp + i) && i >= 0) { - printk(" Bad PC value."); + pr_cont(" Bad PC value."); break; } - printk(i ? " %04x" : " <%04x>", c); + if (i) + pr_cont(" %04x", c); + else + pr_cont(" <%04x>", c); } set_fs(old_fs); - printk ("\n"); + pr_cont("\n"); } void show_stack(struct task_struct *task, unsigned long *stack) @@ -978,16 +948,16 @@ void show_stack(struct task_struct *task, unsigned long *stack) } endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE); - printk("Stack from %08lx:", (unsigned long)stack); + pr_info("Stack from %08lx:", (unsigned long)stack); p = stack; for (i = 0; i < kstack_depth_to_print; i++) { if (p + 1 > endstack) break; if (i % 8 == 0) - printk("\n "); - printk(" %08lx", *p++); + pr_cont("\n "); + pr_cont(" %08lx", *p++); } - printk("\n"); + pr_cont("\n"); show_trace(stack); } @@ -1005,32 +975,32 @@ void bad_super_trap (struct frame *fp) console_verbose(); if (vector < ARRAY_SIZE(vec_names)) - printk ("*** %s *** FORMAT=%X\n", + pr_err("*** %s *** FORMAT=%X\n", vec_names[vector], fp->ptregs.format); else - printk ("*** Exception %d *** FORMAT=%X\n", + pr_err("*** Exception %d *** FORMAT=%X\n", vector, fp->ptregs.format); if (vector == VEC_ADDRERR && CPU_IS_020_OR_030) { unsigned short ssw = fp->un.fmtb.ssw; - printk ("SSW=%#06x ", ssw); + pr_err("SSW=%#06x ", ssw); if (ssw & RC) - printk ("Pipe stage C instruction fault at %#010lx\n", + pr_err("Pipe stage C instruction fault at %#010lx\n", (fp->ptregs.format) == 0xA ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2); if (ssw & RB) - printk ("Pipe stage B instruction fault at %#010lx\n", + pr_err("Pipe stage B instruction fault at %#010lx\n", (fp->ptregs.format) == 0xA ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); if (ssw & DF) - printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); } - printk ("Current process id is %d\n", task_pid_nr(current)); + pr_err("Current process id is %d\n", task_pid_nr(current)); die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0); } @@ -1162,7 +1132,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr) return; console_verbose(); - printk("%s: %08x\n",str,nr); + pr_crit("%s: %08x\n", str, nr); show_registers(fp); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); do_exit(SIGSEGV); diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index afb95d5fb26b..982c3fe73c4a 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -26,9 +26,10 @@ #include <linux/adb.h> #include <linux/cuda.h> -#define BOOTINFO_COMPAT_1_0 #include <asm/setup.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-mac.h> +#include <asm/byteorder.h> #include <asm/io.h> #include <asm/irq.h> @@ -107,45 +108,46 @@ static void __init mac_sched_init(irq_handler_t vector) int __init mac_parse_bootinfo(const struct bi_record *record) { int unknown = 0; - const u_long *data = record->data; + const void *data = record->data; - switch (record->tag) { + switch (be16_to_cpu(record->tag)) { case BI_MAC_MODEL: - mac_bi_data.id = *data; + mac_bi_data.id = be32_to_cpup(data); break; case BI_MAC_VADDR: - mac_bi_data.videoaddr = *data; + mac_bi_data.videoaddr = be32_to_cpup(data); break; case BI_MAC_VDEPTH: - mac_bi_data.videodepth = *data; + mac_bi_data.videodepth = be32_to_cpup(data); break; case BI_MAC_VROW: - mac_bi_data.videorow = *data; + mac_bi_data.videorow = be32_to_cpup(data); break; case BI_MAC_VDIM: - mac_bi_data.dimensions = *data; + mac_bi_data.dimensions = be32_to_cpup(data); break; case BI_MAC_VLOGICAL: - mac_bi_data.videological = VIDEOMEMBASE + (*data & ~VIDEOMEMMASK); - mac_orig_videoaddr = *data; + mac_orig_videoaddr = be32_to_cpup(data); + mac_bi_data.videological = + VIDEOMEMBASE + (mac_orig_videoaddr & ~VIDEOMEMMASK); break; case BI_MAC_SCCBASE: - mac_bi_data.sccbase = *data; + mac_bi_data.sccbase = be32_to_cpup(data); break; case BI_MAC_BTIME: - mac_bi_data.boottime = *data; + mac_bi_data.boottime = be32_to_cpup(data); break; case BI_MAC_GMTBIAS: - mac_bi_data.gmtbias = *data; + mac_bi_data.gmtbias = be32_to_cpup(data); break; case BI_MAC_MEMSIZE: - mac_bi_data.memsize = *data; + mac_bi_data.memsize = be32_to_cpup(data); break; case BI_MAC_CPUID: - mac_bi_data.cpuid = *data; + mac_bi_data.cpuid = be32_to_cpup(data); break; case BI_MAC_ROMBASE: - mac_bi_data.rombase = *data; + mac_bi_data.rombase = be32_to_cpup(data); break; default: unknown = 1; diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c index 7d8d46127ad9..4d2adfb32a2a 100644 --- a/arch/m68k/mac/iop.c +++ b/arch/m68k/mac/iop.c @@ -111,16 +111,15 @@ #include <linux/init.h> #include <linux/interrupt.h> -#include <asm/bootinfo.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_iop.h> /*#define DEBUG_IOP*/ -/* Set to non-zero if the IOPs are present. Set by iop_init() */ +/* Non-zero if the IOPs are present */ -int iop_scc_present,iop_ism_present; +int iop_scc_present, iop_ism_present; /* structure for tracking channel listeners */ diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index 5e085554ac7f..707b61aea203 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c @@ -25,8 +25,6 @@ #include <asm/mac_via.h> #include <asm/mac_oss.h> -#define BOOTINFO_COMPAT_1_0 -#include <asm/bootinfo.h> #include <asm/machdep.h> /* Offset between Unix time (1970-based) and Mac time (1904-based) */ diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c index 6c4c882c126e..54037125ebf8 100644 --- a/arch/m68k/mac/oss.c +++ b/arch/m68k/mac/oss.c @@ -21,7 +21,6 @@ #include <linux/init.h> #include <linux/irq.h> -#include <asm/bootinfo.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c index 6f026fc302fa..835fa04511c8 100644 --- a/arch/m68k/mac/psc.c +++ b/arch/m68k/mac/psc.c @@ -21,7 +21,6 @@ #include <linux/irq.h> #include <asm/traps.h> -#include <asm/bootinfo.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_psc.h> @@ -54,7 +53,7 @@ static void psc_debug_dump(void) * expanded to cover what I think are the other 7 channels. */ -static void psc_dma_die_die_die(void) +static __init void psc_dma_die_die_die(void) { int i; diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index 5d1458bb871b..e198dec868e4 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -30,7 +30,6 @@ #include <linux/module.h> #include <linux/irq.h> -#include <asm/bootinfo.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index eb1d61f68725..2bd7487440c4 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -25,9 +25,8 @@ int send_fault_sig(struct pt_regs *regs) siginfo.si_signo = current->thread.signo; siginfo.si_code = current->thread.code; siginfo.si_addr = (void *)current->thread.faddr; -#ifdef DEBUG - printk("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, siginfo.si_signo, siginfo.si_code); -#endif + pr_debug("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, + siginfo.si_signo, siginfo.si_code); if (user_mode(regs)) { force_sig_info(siginfo.si_signo, @@ -45,10 +44,10 @@ int send_fault_sig(struct pt_regs *regs) * terminate things with extreme prejudice. */ if ((unsigned long)siginfo.si_addr < PAGE_SIZE) - printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); + pr_alert("Unable to handle kernel NULL pointer dereference"); else - printk(KERN_ALERT "Unable to handle kernel access"); - printk(" at virtual address %p\n", siginfo.si_addr); + pr_alert("Unable to handle kernel access"); + pr_cont(" at virtual address %p\n", siginfo.si_addr); die_if_kernel("Oops", regs, 0 /*error_code*/); do_exit(SIGKILL); } @@ -75,11 +74,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; -#ifdef DEBUG - printk ("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", - regs->sr, regs->pc, address, error_code, - current->mm->pgd); -#endif + pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", + regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL); /* * If we're in an interrupt or have no user @@ -118,9 +114,7 @@ retry: * we can handle it.. */ good_area: -#ifdef DEBUG - printk("do_page_fault: good_area\n"); -#endif + pr_debug("do_page_fault: good_area\n"); switch (error_code & 3) { default: /* 3: write, present */ /* fall through */ @@ -143,9 +137,7 @@ good_area: */ fault = handle_mm_fault(mm, vma, address, flags); -#ifdef DEBUG - printk("handle_mm_fault returns %d\n",fault); -#endif + pr_debug("handle_mm_fault returns %d\n", fault); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return 0; diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 6b4baa6e4d31..acaff6a49e35 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -59,7 +59,7 @@ EXPORT_SYMBOL(pg_data_table); void __init m68k_setup_node(int node) { #ifndef CONFIG_SINGLE_MEMORY_CHUNK - struct mem_info *info = m68k_memory + node; + struct m68k_mem_info *info = m68k_memory + node; int i, end; i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift(); diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index 568cfad3ceb8..6e4955bc542b 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c @@ -27,9 +27,9 @@ /* * For 040/060 we can use the virtual memory area like other architectures, - * but for 020/030 we want to use early termination page descriptor and we + * but for 020/030 we want to use early termination page descriptors and we * can't mix this with normal page descriptors, so we have to copy that code - * (mm/vmalloc.c) and return appriorate aligned addresses. + * (mm/vmalloc.c) and return appropriately aligned addresses. */ #ifdef CPU_M68040_OR_M68060_ONLY @@ -224,7 +224,7 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla EXPORT_SYMBOL(__ioremap); /* - * Unmap a ioremap()ed region again + * Unmap an ioremap()ed region again */ void iounmap(void __iomem *addr) { @@ -241,8 +241,8 @@ EXPORT_SYMBOL(iounmap); /* * __iounmap unmaps nearly everything, so be careful - * it doesn't free currently pointer/page tables anymore but it - * wans't used anyway and might be added later. + * Currently it doesn't free pointer/page tables anymore but this + * wasn't used anyway and might be added later. */ void __iounmap(void *addr, unsigned long size) { diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 251c5437787b..7d4024432163 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -233,7 +233,7 @@ void __init paging_init(void) printk("Fix your bootloader or use a memfile to make use of this area!\n"); m68k_num_memory--; memmove(m68k_memory + i, m68k_memory + i + 1, - (m68k_num_memory - i) * sizeof(struct mem_info)); + (m68k_num_memory - i) * sizeof(struct m68k_mem_info)); continue; } addr = m68k_memory[i].addr + m68k_memory[i].size; diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c index 1c6262803b94..1bb3ce6634d3 100644 --- a/arch/m68k/mvme147/config.c +++ b/arch/m68k/mvme147/config.c @@ -26,6 +26,8 @@ #include <linux/interrupt.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-vme.h> +#include <asm/byteorder.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/irq.h> @@ -51,9 +53,10 @@ static int bcd2int (unsigned char b); irq_handler_t tick_handler; -int mvme147_parse_bootinfo(const struct bi_record *bi) +int __init mvme147_parse_bootinfo(const struct bi_record *bi) { - if (bi->tag == BI_VME_TYPE || bi->tag == BI_VME_BRDINFO) + uint16_t tag = be16_to_cpu(bi->tag); + if (tag == BI_VME_TYPE || tag == BI_VME_BRDINFO) return 0; else return 1; diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c index 080a342458a1..eab7d342757e 100644 --- a/arch/m68k/mvme16x/config.c +++ b/arch/m68k/mvme16x/config.c @@ -29,6 +29,8 @@ #include <linux/module.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-vme.h> +#include <asm/byteorder.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/irq.h> @@ -60,9 +62,10 @@ unsigned short mvme16x_config; EXPORT_SYMBOL(mvme16x_config); -int mvme16x_parse_bootinfo(const struct bi_record *bi) +int __init mvme16x_parse_bootinfo(const struct bi_record *bi) { - if (bi->tag == BI_VME_TYPE || bi->tag == BI_VME_BRDINFO) + uint16_t tag = be16_to_cpu(bi->tag); + if (tag == BI_VME_TYPE || tag == BI_VME_BRDINFO) return 0; else return 1; @@ -87,15 +90,15 @@ static void mvme16x_get_model(char *model) suf[3] = '\0'; suf[0] = suf[1] ? '-' : '\0'; - sprintf(model, "Motorola MVME%x%s", p->brdno, suf); + sprintf(model, "Motorola MVME%x%s", be16_to_cpu(p->brdno), suf); } static void mvme16x_get_hardware_list(struct seq_file *m) { - p_bdid p = &mvme_bdid; + uint16_t brdno = be16_to_cpu(mvme_bdid.brdno); - if (p->brdno == 0x0162 || p->brdno == 0x0172) + if (brdno == 0x0162 || brdno == 0x0172) { unsigned char rev = *(unsigned char *)MVME162_VERSION_REG; @@ -285,6 +288,7 @@ void __init config_mvme16x(void) { p_bdid p = &mvme_bdid; char id[40]; + uint16_t brdno = be16_to_cpu(p->brdno); mach_max_dma_address = 0xffffffff; mach_sched_init = mvme16x_sched_init; @@ -306,18 +310,18 @@ void __init config_mvme16x(void) } /* Board type is only set by newer versions of vmelilo/tftplilo */ if (vme_brdtype == 0) - vme_brdtype = p->brdno; + vme_brdtype = brdno; mvme16x_get_model(id); printk ("\nBRD_ID: %s BUG %x.%x %02x/%02x/%02x\n", id, p->rev>>4, p->rev&0xf, p->yr, p->mth, p->day); - if (p->brdno == 0x0162 || p->brdno == 0x172) + if (brdno == 0x0162 || brdno == 0x172) { unsigned char rev = *(unsigned char *)MVME162_VERSION_REG; mvme16x_config = rev | MVME16x_CONFIG_GOT_SCCA; - printk ("MVME%x Hardware status:\n", p->brdno); + printk ("MVME%x Hardware status:\n", brdno); printk (" CPU Type 68%s040\n", rev & MVME16x_CONFIG_GOT_FPU ? "" : "LC"); printk (" CPU clock %dMHz\n", @@ -347,12 +351,12 @@ void __init config_mvme16x(void) static irqreturn_t mvme16x_abort_int (int irq, void *dev_id) { - p_bdid p = &mvme_bdid; unsigned long *new = (unsigned long *)vectors; unsigned long *old = (unsigned long *)0xffe00000; volatile unsigned char uc, *ucp; + uint16_t brdno = be16_to_cpu(mvme_bdid.brdno); - if (p->brdno == 0x0162 || p->brdno == 0x172) + if (brdno == 0x0162 || brdno == 0x172) { ucp = (volatile unsigned char *)0xfff42043; uc = *ucp | 8; @@ -366,7 +370,7 @@ static irqreturn_t mvme16x_abort_int (int irq, void *dev_id) *(new+9) = *(old+9); /* Trace */ *(new+47) = *(old+47); /* Trap #15 */ - if (p->brdno == 0x0162 || p->brdno == 0x172) + if (brdno == 0x0162 || brdno == 0x172) *(new+0x5e) = *(old+0x5e); /* ABORT switch */ else *(new+0x6e) = *(old+0x6e); /* ABORT switch */ @@ -381,7 +385,7 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id) void mvme16x_sched_init (irq_handler_t timer_routine) { - p_bdid p = &mvme_bdid; + uint16_t brdno = be16_to_cpu(mvme_bdid.brdno); int irq; tick_handler = timer_routine; @@ -394,7 +398,7 @@ void mvme16x_sched_init (irq_handler_t timer_routine) "timer", mvme16x_timer_int)) panic ("Couldn't register timer int"); - if (p->brdno == 0x0162 || p->brdno == 0x172) + if (brdno == 0x0162 || brdno == 0x172) irq = MVME162_IRQ_ABORT; else irq = MVME167_IRQ_ABORT; diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c index 078bb744b5fe..e90fe903613e 100644 --- a/arch/m68k/q40/config.c +++ b/arch/m68k/q40/config.c @@ -154,7 +154,7 @@ static unsigned int serports[] = 0x3f8,0x2f8,0x3e8,0x2e8,0 }; -static void q40_disable_irqs(void) +static void __init q40_disable_irqs(void) { unsigned i, j; @@ -198,7 +198,7 @@ void __init config_q40(void) } -int q40_parse_bootinfo(const struct bi_record *rec) +int __init q40_parse_bootinfo(const struct bi_record *rec) { return 1; } diff --git a/arch/m68k/sun3/dvma.c b/arch/m68k/sun3/dvma.c index d522eaab4551..d95506e06c2a 100644 --- a/arch/m68k/sun3/dvma.c +++ b/arch/m68k/sun3/dvma.c @@ -7,6 +7,7 @@ * */ +#include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bootmem.h> @@ -62,10 +63,7 @@ int dvma_map_iommu(unsigned long kaddr, unsigned long baddr, } -void sun3_dvma_init(void) +void __init sun3_dvma_init(void) { - memset(ptelist, 0, sizeof(ptelist)); - - } diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c index 8edc510a21be..3f258e230ba5 100644 --- a/arch/m68k/sun3/mmu_emu.c +++ b/arch/m68k/sun3/mmu_emu.c @@ -6,6 +6,7 @@ ** Started 1/16/98 @ 2:22 am */ +#include <linux/init.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/kernel.h> @@ -122,7 +123,7 @@ void print_pte_vaddr (unsigned long vaddr) /* * Initialise the MMU emulator. */ -void mmu_emu_init(unsigned long bootmem_end) +void __init mmu_emu_init(unsigned long bootmem_end) { unsigned long seg, num; int i,j; diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c index cab54482ca34..b37521a5259d 100644 --- a/arch/m68k/sun3/sun3dvma.c +++ b/arch/m68k/sun3/sun3dvma.c @@ -6,6 +6,8 @@ * Contains common routines for sun3/sun3x DVMA management. */ +#include <linux/bootmem.h> +#include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/gfp.h> @@ -30,7 +32,7 @@ static inline void dvma_unmap_iommu(unsigned long a, int b) extern void sun3_dvma_init(void); #endif -static unsigned long iommu_use[IOMMU_TOTAL_ENTRIES]; +static unsigned long *iommu_use; #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT) @@ -245,7 +247,7 @@ static inline int free_baddr(unsigned long baddr) } -void dvma_init(void) +void __init dvma_init(void) { struct hole *hole; @@ -265,7 +267,7 @@ void dvma_init(void) list_add(&(hole->list), &hole_list); - memset(iommu_use, 0, sizeof(iommu_use)); + iommu_use = alloc_bootmem(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long)); dvma_unmap_iommu(DVMA_START, DVMA_SIZE); diff --git a/arch/m68k/sun3x/prom.c b/arch/m68k/sun3x/prom.c index a7b7e818d627..0898c3f81508 100644 --- a/arch/m68k/sun3x/prom.c +++ b/arch/m68k/sun3x/prom.c @@ -10,7 +10,6 @@ #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/bootinfo.h> #include <asm/setup.h> #include <asm/traps.h> #include <asm/sun3xprom.h> diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index c90bfc6bf648..5d6b4b407dda 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h @@ -82,4 +82,19 @@ static inline void fence(void) #define smp_read_barrier_depends() do { } while (0) #define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + #endif /* _ASM_METAG_BARRIER_H */ diff --git a/arch/metag/include/asm/smp.h b/arch/metag/include/asm/smp.h index e0373f81a117..1d7e770f7a54 100644 --- a/arch/metag/include/asm/smp.h +++ b/arch/metag/include/asm/smp.h @@ -7,13 +7,11 @@ enum ipi_msg_type { IPI_CALL_FUNC, - IPI_CALL_FUNC_SINGLE, IPI_RESCHEDULE, }; extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask asmlinkage void secondary_start_kernel(void); diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index db589ad5dbc4..c700d625067a 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c @@ -399,11 +399,6 @@ static int __init dma_alloc_init(void) pgd = pgd_offset(&init_mm, CONSISTENT_START); pud = pud_alloc(&init_mm, pgd, CONSISTENT_START); pmd = pmd_alloc(&init_mm, pud, CONSISTENT_START); - if (!pmd) { - pr_err("%s: no pmd tables\n", __func__); - ret = -ENOMEM; - break; - } WARN_ON(!pmd_none(*pmd)); pte = pte_alloc_kernel(pmd, CONSISTENT_START); diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c index 7c0113142981..f006d2276f40 100644 --- a/arch/metag/kernel/smp.c +++ b/arch/metag/kernel/smp.c @@ -68,7 +68,7 @@ static DECLARE_COMPLETION(cpu_running); /* * "thread" is assumed to be a valid Meta hardware thread ID. */ -int boot_secondary(unsigned int thread, struct task_struct *idle) +static int boot_secondary(unsigned int thread, struct task_struct *idle) { u32 val; @@ -491,7 +491,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) void arch_send_call_function_single_ipi(int cpu) { - send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); } void show_ipi_list(struct seq_file *p) @@ -517,11 +517,10 @@ static DEFINE_SPINLOCK(stop_lock); * * Bit 0 - Inter-processor function call */ -static int do_IPI(struct pt_regs *regs) +static int do_IPI(void) { unsigned int cpu = smp_processor_id(); struct ipi_data *ipi = &per_cpu(ipi_data, cpu); - struct pt_regs *old_regs = set_irq_regs(regs); unsigned long msgs, nextmsg; int handled = 0; @@ -546,10 +545,6 @@ static int do_IPI(struct pt_regs *regs) generic_smp_call_function_interrupt(); break; - case IPI_CALL_FUNC_SINGLE: - generic_smp_call_function_single_interrupt(); - break; - default: pr_crit("CPU%u: Unknown IPI message 0x%lx\n", cpu, nextmsg); @@ -557,8 +552,6 @@ static int do_IPI(struct pt_regs *regs) } } - set_irq_regs(old_regs); - return handled; } @@ -624,7 +617,7 @@ static void kick_raise_softirq(cpumask_t callmap, unsigned int irq) static TBIRES ipi_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI, int *handled) { - *handled = do_IPI((struct pt_regs *)State.Sig.pCtx); + *handled = do_IPI(); return State; } diff --git a/arch/metag/kernel/topology.c b/arch/metag/kernel/topology.c index bec3dec4922e..4ba595701f7d 100644 --- a/arch/metag/kernel/topology.c +++ b/arch/metag/kernel/topology.c @@ -19,6 +19,7 @@ DEFINE_PER_CPU(struct cpuinfo_metag, cpu_data); cpumask_t cpu_core_map[NR_CPUS]; +EXPORT_SYMBOL(cpu_core_map); static cpumask_t cpu_coregroup_map(unsigned int cpu) { diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index ce0bbf8f5640..a82426589fff 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -1,4 +1,5 @@ +generic-y += barrier.h generic-y += clkdev.h generic-y += exec.h generic-y += trace_clock.h diff --git a/arch/microblaze/include/asm/barrier.h b/arch/microblaze/include/asm/barrier.h deleted file mode 100644 index df5be3e87044..000000000000 --- a/arch/microblaze/include/asm/barrier.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2006 Atmark Techno, Inc. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#ifndef _ASM_MICROBLAZE_BARRIER_H -#define _ASM_MICROBLAZE_BARRIER_H - -#define nop() asm volatile ("nop") - -#define smp_read_barrier_depends() do {} while (0) -#define read_barrier_depends() do {} while (0) - -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() - -#endif /* _ASM_MICROBLAZE_BARRIER_H */ diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 650de3976e7a..c93d92beb3d6 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -47,6 +47,7 @@ config MIPS select MODULES_USE_ELF_RELA if MODULES && 64BIT select CLONE_BACKWARDS select HAVE_DEBUG_STACKOVERFLOW + select HAVE_CC_STACKPROTECTOR menu "Machine selection" @@ -2322,19 +2323,6 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. -config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" - help - This option turns on the -fstack-protector GCC feature. This - feature puts, at the beginning of functions, a canary value on - the stack just before the return address, and validates - the value just before actually returning. Stack based buffer - overflows (that need to overwrite this return address) now also - overwrite the canary, which gets detected and the attack is then - neutralized via a kernel panic. - - This feature requires gcc version 4.2 or above. - config USE_OF bool select OF diff --git a/arch/mips/Makefile b/arch/mips/Makefile index de300b993607..efe50787cd89 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -232,10 +232,6 @@ bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ LDFLAGS += -m $(ld-emul) -ifdef CONFIG_CC_STACKPROTECTOR - KBUILD_CFLAGS += -fstack-protector -endif - ifdef CONFIG_MIPS CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c index 9a357fffcfbe..820b7a313d9b 100644 --- a/arch/mips/ar7/setup.c +++ b/arch/mips/ar7/setup.c @@ -92,7 +92,6 @@ void __init plat_mem_setup(void) _machine_restart = ar7_machine_restart; _machine_halt = ar7_machine_halt; pm_power_off = ar7_machine_power_off; - panic_timeout = 3; io_base = (unsigned long)ioremap(AR7_REGS_BASE, 0x10000); if (!io_base) diff --git a/arch/mips/emma/markeins/setup.c b/arch/mips/emma/markeins/setup.c index d71005835c00..9100122e5cef 100644 --- a/arch/mips/emma/markeins/setup.c +++ b/arch/mips/emma/markeins/setup.c @@ -111,9 +111,6 @@ void __init plat_mem_setup(void) iomem_resource.start = EMMA2RH_IO_BASE; iomem_resource.end = EMMA2RH_ROM_BASE - 1; - /* Reboot on panic */ - panic_timeout = 180; - markeins_sio_setup(); } diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index f26d8e1bf3c3..e1aa4e4c2984 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -180,4 +180,19 @@ #define nudge_writes() mb() #endif +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + #endif /* __ASM_BARRIER_H */ diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h index c75025f27c20..06b9bc7ea14b 100644 --- a/arch/mips/include/asm/cacheops.h +++ b/arch/mips/include/asm/cacheops.h @@ -83,6 +83,6 @@ /* * Loongson2-specific cacheops */ -#define Hit_Invalidate_I_Loongson23 0x00 +#define Hit_Invalidate_I_Loongson2 0x00 #endif /* __ASM_CACHEOPS_H */ diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 34d1a1917125..c84caddb8bde 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -165,7 +165,7 @@ static inline void flush_icache_line(unsigned long addr) __iflush_prologue switch (boot_cpu_type()) { case CPU_LOONGSON2: - cache_op(Hit_Invalidate_I_Loongson23, addr); + cache_op(Hit_Invalidate_I_Loongson2, addr); break; default: @@ -219,7 +219,7 @@ static inline void protected_flush_icache_line(unsigned long addr) { switch (boot_cpu_type()) { case CPU_LOONGSON2: - protected_cache_op(Hit_Invalidate_I_Loongson23, addr); + protected_cache_op(Hit_Invalidate_I_Loongson2, addr); break; default: @@ -357,8 +357,8 @@ static inline void invalidate_tcache_page(unsigned long addr) "i" (op)); /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ -#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \ -static inline void blast_##pfx##cache##lsize(void) \ +#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \ +static inline void extra##blast_##pfx##cache##lsize(void) \ { \ unsigned long start = INDEX_BASE; \ unsigned long end = start + current_cpu_data.desc.waysize; \ @@ -376,7 +376,7 @@ static inline void blast_##pfx##cache##lsize(void) \ __##pfx##flush_epilogue \ } \ \ -static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ +static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \ { \ unsigned long start = page; \ unsigned long end = page + PAGE_SIZE; \ @@ -391,7 +391,7 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ __##pfx##flush_epilogue \ } \ \ -static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ +static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ { \ unsigned long indexmask = current_cpu_data.desc.waysize - 1; \ unsigned long start = INDEX_BASE + (page & indexmask); \ @@ -410,23 +410,24 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) __##pfx##flush_epilogue \ } -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32) -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64) -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) - -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16) -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, ) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, ) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, ) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, ) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, ) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, ) + +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, ) +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, ) /* build blast_xxx_range, protected_blast_xxx_range */ #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ @@ -452,8 +453,8 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) -__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \ - protected_, loongson23_) +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \ + protected_, loongson2_) __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) /* blast_inv_dcache_range */ diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 62ffd20ea869..49e572d879e1 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -237,6 +237,8 @@ static void r4k_blast_icache_page_setup(void) r4k_blast_icache_page = (void *)cache_noop; else if (ic_lsize == 16) r4k_blast_icache_page = blast_icache16_page; + else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2) + r4k_blast_icache_page = loongson2_blast_icache32_page; else if (ic_lsize == 32) r4k_blast_icache_page = blast_icache32_page; else if (ic_lsize == 64) @@ -261,6 +263,9 @@ static void r4k_blast_icache_page_indexed_setup(void) else if (TX49XX_ICACHE_INDEX_INV_WAR) r4k_blast_icache_page_indexed = tx49_blast_icache32_page_indexed; + else if (current_cpu_type() == CPU_LOONGSON2) + r4k_blast_icache_page_indexed = + loongson2_blast_icache32_page_indexed; else r4k_blast_icache_page_indexed = blast_icache32_page_indexed; @@ -284,6 +289,8 @@ static void r4k_blast_icache_setup(void) r4k_blast_icache = blast_r4600_v1_icache32; else if (TX49XX_ICACHE_INDEX_INV_WAR) r4k_blast_icache = tx49_blast_icache32; + else if (current_cpu_type() == CPU_LOONGSON2) + r4k_blast_icache = loongson2_blast_icache32; else r4k_blast_icache = blast_icache32; } else if (ic_lsize == 64) @@ -580,11 +587,11 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo else { switch (boot_cpu_type()) { case CPU_LOONGSON2: - protected_blast_icache_range(start, end); + protected_loongson2_blast_icache_range(start, end); break; default: - protected_loongson23_blast_icache_range(start, end); + protected_blast_icache_range(start, end); break; } } diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c index 6d981bb337ec..54e75c77184b 100644 --- a/arch/mips/netlogic/xlp/setup.c +++ b/arch/mips/netlogic/xlp/setup.c @@ -92,7 +92,6 @@ static void __init xlp_init_mem_from_bars(void) void __init plat_mem_setup(void) { - panic_timeout = 5; _machine_restart = (void (*)(char *))nlm_linux_exit; _machine_halt = nlm_linux_exit; pm_power_off = nlm_linux_exit; diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c index 214d123b79fa..921be5f77797 100644 --- a/arch/mips/netlogic/xlr/setup.c +++ b/arch/mips/netlogic/xlr/setup.c @@ -92,7 +92,6 @@ static void nlm_linux_exit(void) void __init plat_mem_setup(void) { - panic_timeout = 5; _machine_restart = (void (*)(char *))nlm_linux_exit; _machine_halt = nlm_linux_exit; pm_power_off = nlm_linux_exit; diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 41707a245dea..3462c831d0ea 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -134,8 +134,6 @@ void __init plat_mem_setup(void) #error invalid SiByte board configuration #endif - panic_timeout = 5; /* For debug. */ - board_be_handler = swarm_be_handler; if (xicor_probe()) diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 74742dc6a3da..032143ec2324 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -1,4 +1,5 @@ +generic-y += barrier.h generic-y += clkdev.h generic-y += exec.h generic-y += trace_clock.h diff --git a/arch/mn10300/include/asm/barrier.h b/arch/mn10300/include/asm/barrier.h deleted file mode 100644 index 2bd97a5c8af7..000000000000 --- a/arch/mn10300/include/asm/barrier.h +++ /dev/null @@ -1,37 +0,0 @@ -/* MN10300 memory barrier definitions - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ -#ifndef _ASM_BARRIER_H -#define _ASM_BARRIER_H - -#define nop() asm volatile ("nop") - -#define mb() asm volatile ("": : :"memory") -#define rmb() mb() -#define wmb() asm volatile ("": : :"memory") - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define set_mb(var, value) do { xchg(&var, value); } while (0) -#else /* CONFIG_SMP */ -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#endif /* CONFIG_SMP */ - -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -#define read_barrier_depends() do {} while (0) -#define smp_read_barrier_depends() do {} while (0) - -#endif /* _ASM_BARRIER_H */ diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index a603b9ebe54c..34b0be4ca52d 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -1,4 +1,5 @@ +generic-y += barrier.h generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \ segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \ div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \ diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h deleted file mode 100644 index e77d834aa803..000000000000 --- a/arch/parisc/include/asm/barrier.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __PARISC_BARRIER_H -#define __PARISC_BARRIER_H - -/* -** This is simply the barrier() macro from linux/kernel.h but when serial.c -** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h -** hasn't yet been included yet so it fails, thus repeating the macro here. -** -** PA-RISC architecture allows for weakly ordered memory accesses although -** none of the processors use it. There is a strong ordered bit that is -** set in the O-bit of the page directory entry. Operating systems that -** can not tolerate out of order accesses should set this bit when mapping -** pages. The O-bit of the PSW should also be set to 1 (I don't believe any -** of the processor implemented the PSW O-bit). The PCX-W ERS states that -** the TLB O-bit is not implemented so the page directory does not need to -** have the O-bit set when mapping pages (section 3.1). This section also -** states that the PSW Y, Z, G, and O bits are not implemented. -** So it looks like nothing needs to be done for parisc-linux (yet). -** (thanks to chada for the above comment -ggg) -** -** The __asm__ op below simple prevents gcc/ld from reordering -** instructions across the mb() "call". -*/ -#define mb() __asm__ __volatile__("":::"memory") /* barrier() */ -#define rmb() mb() -#define wmb() mb() -#define smp_mb() mb() -#define smp_rmb() mb() -#define smp_wmb() mb() -#define smp_read_barrier_depends() do { } while(0) -#define read_barrier_depends() do { } while(0) - -#define set_mb(var, value) do { var = value; mb(); } while (0) - -#endif /* __PARISC_BARRIER_H */ diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index f33113a6141e..70b3674dac4e 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -75,6 +75,6 @@ #define SO_BUSY_POLL 0x4027 -#define SO_MAX_PACING_RATE 0x4048 +#define SO_MAX_PACING_RATE 0x4028 #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b44b52c0a8f0..b2be8e8cb5c7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -147,6 +147,10 @@ config EARLY_PRINTK bool default y +config PANIC_TIMEOUT + int + default 180 + config COMPAT bool default y if PPC64 diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index ae782254e731..f89da808ce31 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -45,11 +45,15 @@ # define SMPWMB eieio #endif +#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") + #define smp_mb() mb() -#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") +#define smp_rmb() __lwsync() #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") #define smp_read_barrier_depends() read_barrier_depends() #else +#define __lwsync() barrier() + #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() @@ -65,4 +69,19 @@ #define data_barrier(x) \ asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + __lwsync(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + __lwsync(); \ + ___p1; \ +}) + #endif /* _ASM_POWERPC_BARRIER_H */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 703a8412dac2..11ba86e17631 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -26,6 +26,7 @@ extern void reloc_got2(unsigned long); void check_for_initrd(void); void do_init_bootmem(void); void setup_panic(void); +#define ARCH_PANIC_TIMEOUT 180 #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 5f54a744dcc5..f6e78d63fb6a 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -28,6 +28,8 @@ #include <asm/synch.h> #include <asm/ppc-opcode.h> +#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ + #define arch_spin_is_locked(x) ((x)->slock != 0) #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h index 75c6ecdb8f37..7422a999a39a 100644 --- a/arch/powerpc/include/asm/uprobes.h +++ b/arch/powerpc/include/asm/uprobes.h @@ -36,9 +36,8 @@ typedef ppc_opcode_t uprobe_opcode_t; struct arch_uprobe { union { - u8 insn[MAX_UINSN_BYTES]; - u8 ixol[MAX_UINSN_BYTES]; - u32 ainsn; + u32 insn; + u32 ixol; }; }; diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index cb64a6e1dc51..078145acf7fb 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1986,19 +1986,23 @@ static void __init prom_init_stdout(void) /* Get the full OF pathname of the stdout device */ memset(path, 0, 256); call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); - stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); - val = cpu_to_be32(stdout_node); - prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", - &val, sizeof(val)); prom_printf("OF stdout device is: %s\n", of_stdout_device); prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", path, strlen(path) + 1); - /* If it's a display, note it */ - memset(type, 0, sizeof(type)); - prom_getprop(stdout_node, "device_type", type, sizeof(type)); - if (strcmp(type, "display") == 0) - prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); + /* instance-to-package fails on PA-Semi */ + stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); + if (stdout_node != PROM_ERROR) { + val = cpu_to_be32(stdout_node); + prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", + &val, sizeof(val)); + + /* If it's a display, note it */ + memset(type, 0, sizeof(type)); + prom_getprop(stdout_node, "device_type", type, sizeof(type)); + if (strcmp(type, "display") == 0) + prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); + } } static int __init prom_find_machine_type(void) diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index b903dc5cf944..2b0da27eaee4 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -296,9 +296,6 @@ void __init setup_arch(char **cmdline_p) if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE)) ucache_bsize = icache_bsize = dcache_bsize; - /* reboot on panic */ - panic_timeout = 180; - if (ppc_md.panic) setup_panic(); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 4085aaa9478f..856dd4e99bfe 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -588,9 +588,6 @@ void __init setup_arch(char **cmdline_p) dcache_bsize = ppc64_caches.dline_size; icache_bsize = ppc64_caches.iline_size; - /* reboot on panic */ - panic_timeout = 180; - if (ppc_md.panic) setup_panic(); diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index 59f419b935f2..003b20964ea0 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c @@ -186,7 +186,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) * emulate_step() returns 1 if the insn was successfully emulated. * For all other cases, we need to single-step in hardware. */ - ret = emulate_step(regs, auprobe->ainsn); + ret = emulate_step(regs, auprobe->insn); if (ret > 0) return true; diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index ac3c2a10dafd..555034f8505e 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -223,10 +223,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, } PPC_DIVWU(r_A, r_A, r_X); break; - case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ + case BPF_S_ALU_DIV_K: /* A /= K */ + if (K == 1) + break; PPC_LI32(r_scratch1, K); - /* Top 32 bits of 64bit result -> A */ - PPC_MULHWU(r_A, r_A, r_scratch1); + PPC_DIVWU(r_A, r_A, r_scratch1); break; case BPF_S_ALU_AND_X: ctx->seen |= SEEN_XREG; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index c1f190858701..6f76ae417f47 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -470,7 +470,7 @@ static long pseries_little_endian_exceptions(void) static void __init pSeries_setup_arch(void) { - panic_timeout = 10; + set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); /* Discover PIC type and setup ppc_md accordingly */ pseries_discover_pic(); diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 16760eeb79b0..578680f6207a 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -32,4 +32,19 @@ #define set_mb(var, value) do { var = value; mb(); } while (0) +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + #endif /* __ASM_BARRIER_H */ diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 4bf9da03591e..5d7e8cf83bd6 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -38,7 +38,8 @@ #define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \ PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \ - PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME) + PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \ + PSW32_ASC_PRIMARY) #define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "s390\0\0\0\0" diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index c879fad404c8..cb700d54bd83 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -56,6 +56,96 @@ struct cpumf_ctr_info { u32 reserved2[12]; } __packed; +/* QUERY SAMPLING INFORMATION block */ +struct hws_qsi_info_block { /* Bit(s) */ + unsigned int b0_13:14; /* 0-13: zeros */ + unsigned int as:1; /* 14: basic-sampling authorization */ + unsigned int ad:1; /* 15: diag-sampling authorization */ + unsigned int b16_21:6; /* 16-21: zeros */ + unsigned int es:1; /* 22: basic-sampling enable control */ + unsigned int ed:1; /* 23: diag-sampling enable control */ + unsigned int b24_29:6; /* 24-29: zeros */ + unsigned int cs:1; /* 30: basic-sampling activation control */ + unsigned int cd:1; /* 31: diag-sampling activation control */ + unsigned int bsdes:16; /* 4-5: size of basic sampling entry */ + unsigned int dsdes:16; /* 6-7: size of diagnostic sampling entry */ + unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */ + unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/ + unsigned long tear; /* 24-31: TEAR contents */ + unsigned long dear; /* 32-39: DEAR contents */ + unsigned int rsvrd0; /* 40-43: reserved */ + unsigned int cpu_speed; /* 44-47: CPU speed */ + unsigned long long rsvrd1; /* 48-55: reserved */ + unsigned long long rsvrd2; /* 56-63: reserved */ +} __packed; + +/* SET SAMPLING CONTROLS request block */ +struct hws_lsctl_request_block { + unsigned int s:1; /* 0: maximum buffer indicator */ + unsigned int h:1; /* 1: part. level reserved for VM use*/ + unsigned long long b2_53:52;/* 2-53: zeros */ + unsigned int es:1; /* 54: basic-sampling enable control */ + unsigned int ed:1; /* 55: diag-sampling enable control */ + unsigned int b56_61:6; /* 56-61: - zeros */ + unsigned int cs:1; /* 62: basic-sampling activation control */ + unsigned int cd:1; /* 63: diag-sampling activation control */ + unsigned long interval; /* 8-15: sampling interval */ + unsigned long tear; /* 16-23: TEAR contents */ + unsigned long dear; /* 24-31: DEAR contents */ + /* 32-63: */ + unsigned long rsvrd1; /* reserved */ + unsigned long rsvrd2; /* reserved */ + unsigned long rsvrd3; /* reserved */ + unsigned long rsvrd4; /* reserved */ +} __packed; + +struct hws_basic_entry { + unsigned int def:16; /* 0-15 Data Entry Format */ + unsigned int R:4; /* 16-19 reserved */ + unsigned int U:4; /* 20-23 Number of unique instruct. */ + unsigned int z:2; /* zeros */ + unsigned int T:1; /* 26 PSW DAT mode */ + unsigned int W:1; /* 27 PSW wait state */ + unsigned int P:1; /* 28 PSW Problem state */ + unsigned int AS:2; /* 29-30 PSW address-space control */ + unsigned int I:1; /* 31 entry valid or invalid */ + unsigned int:16; + unsigned int prim_asn:16; /* primary ASN */ + unsigned long long ia; /* Instruction Address */ + unsigned long long gpp; /* Guest Program Parameter */ + unsigned long long hpp; /* Host Program Parameter */ +} __packed; + +struct hws_diag_entry { + unsigned int def:16; /* 0-15 Data Entry Format */ + unsigned int R:14; /* 16-19 and 20-30 reserved */ + unsigned int I:1; /* 31 entry valid or invalid */ + u8 data[]; /* Machine-dependent sample data */ +} __packed; + +struct hws_combined_entry { + struct hws_basic_entry basic; /* Basic-sampling data entry */ + struct hws_diag_entry diag; /* Diagnostic-sampling data entry */ +} __packed; + +struct hws_trailer_entry { + union { + struct { + unsigned int f:1; /* 0 - Block Full Indicator */ + unsigned int a:1; /* 1 - Alert request control */ + unsigned int t:1; /* 2 - Timestamp format */ + unsigned long long:61; /* 3 - 63: Reserved */ + }; + unsigned long long flags; /* 0 - 63: All indicators */ + }; + unsigned long long overflow; /* 64 - sample Overflow count */ + unsigned char timestamp[16]; /* 16 - 31 timestamp */ + unsigned long long reserved1; /* 32 -Reserved */ + unsigned long long reserved2; /* */ + unsigned long long progusage1; /* 48 - reserved for programming use */ + unsigned long long progusage2; /* */ +} __packed; + /* Query counter information */ static inline int qctri(struct cpumf_ctr_info *info) { @@ -99,4 +189,95 @@ static inline int ecctr(u64 ctr, u64 *val) return cc; } +/* Query sampling information */ +static inline int qsi(struct hws_qsi_info_block *info) +{ + int cc; + cc = 1; + + asm volatile( + "0: .insn s,0xb2860000,0(%1)\n" + "1: lhi %0,0\n" + "2:\n" + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) + : "=d" (cc), "+a" (info) + : "m" (*info) + : "cc", "memory"); + + return cc ? -EINVAL : 0; +} + +/* Load sampling controls */ +static inline int lsctl(struct hws_lsctl_request_block *req) +{ + int cc; + + cc = 1; + asm volatile( + "0: .insn s,0xb2870000,0(%1)\n" + "1: ipm %0\n" + " srl %0,28\n" + "2:\n" + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) + : "+d" (cc), "+a" (req) + : "m" (*req) + : "cc", "memory"); + + return cc ? -EINVAL : 0; +} + +/* Sampling control helper functions */ + +#include <linux/time.h> + +static inline unsigned long freq_to_sample_rate(struct hws_qsi_info_block *qsi, + unsigned long freq) +{ + return (USEC_PER_SEC / freq) * qsi->cpu_speed; +} + +static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi, + unsigned long rate) +{ + return USEC_PER_SEC * qsi->cpu_speed / rate; +} + +#define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL +#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL + +/* Return TOD timestamp contained in an trailer entry */ +static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te) +{ + /* TOD in STCKE format */ + if (te->t) + return *((unsigned long long *) &te->timestamp[1]); + + /* TOD in STCK format */ + return *((unsigned long long *) &te->timestamp[0]); +} + +/* Return pointer to trailer entry of an sample data block */ +static inline unsigned long *trailer_entry_ptr(unsigned long v) +{ + void *ret; + + ret = (void *) v; + ret += PAGE_SIZE; + ret -= sizeof(struct hws_trailer_entry); + + return (unsigned long *) ret; +} + +/* Return if the entry in the sample data block table (sdbt) + * is a link to the next sdbt */ +static inline int is_link_entry(unsigned long *s) +{ + return *s & 0x1ul ? 1 : 0; +} + +/* Return pointer to the linked sdbt */ +static inline unsigned long *get_next_sdbt(unsigned long *s) +{ + return (unsigned long *) (*s & ~0x1ul); +} #endif /* _ASM_S390_CPU_MF_H */ diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h index 7e1c917bbba2..09d1dd46bd57 100644 --- a/arch/s390/include/asm/css_chars.h +++ b/arch/s390/include/asm/css_chars.h @@ -29,6 +29,8 @@ struct css_general_char { u32 fcx : 1; /* bit 88 */ u32 : 19; u32 alt_ssi : 1; /* bit 108 */ + u32:1; + u32 narf:1; /* bit 110 */ } __packed; extern struct css_general_char css_general_characteristics; diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index c129ab2ac731..2583466f576b 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -144,6 +144,7 @@ int clp_disable_fh(struct zpci_dev *); void zpci_event_error(void *); void zpci_event_availability(void *); void zpci_rescan(void); +bool zpci_is_enabled(void); #else /* CONFIG_PCI */ static inline void zpci_event_error(void *e) {} static inline void zpci_event_availability(void *e) {} diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 1141fb3e7b21..159a8ec6da9a 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -1,21 +1,40 @@ /* * Performance event support - s390 specific definitions. * - * Copyright IBM Corp. 2009, 2012 + * Copyright IBM Corp. 2009, 2013 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> */ -#include <asm/cpu_mf.h> +#ifndef _ASM_S390_PERF_EVENT_H +#define _ASM_S390_PERF_EVENT_H -/* CPU-measurement counter facility */ -#define PERF_CPUM_CF_MAX_CTR 256 +#ifdef CONFIG_64BIT + +#include <linux/perf_event.h> +#include <linux/device.h> +#include <asm/cpu_mf.h> /* Per-CPU flags for PMU states */ #define PMU_F_RESERVED 0x1000 #define PMU_F_ENABLED 0x2000 +#define PMU_F_IN_USE 0x4000 +#define PMU_F_ERR_IBE 0x0100 +#define PMU_F_ERR_LSDA 0x0200 +#define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA) + +/* Perf defintions for PMU event attributes in sysfs */ +extern __init const struct attribute_group **cpumf_cf_event_group(void); +extern ssize_t cpumf_events_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page); +#define EVENT_VAR(_cat, _name) event_attr_##_cat##_##_name +#define EVENT_PTR(_cat, _name) (&EVENT_VAR(_cat, _name).attr.attr) + +#define CPUMF_EVENT_ATTR(cat, name, id) \ + PMU_EVENT_ATTR(name, EVENT_VAR(cat, name), id, cpumf_events_sysfs_show) +#define CPUMF_EVENT_PTR(cat, name) EVENT_PTR(cat, name) -#ifdef CONFIG_64BIT /* Perf callbacks */ struct pt_regs; @@ -23,4 +42,55 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs); extern unsigned long perf_misc_flags(struct pt_regs *regs); #define perf_misc_flags(regs) perf_misc_flags(regs) +/* Perf pt_regs extension for sample-data-entry indicators */ +struct perf_sf_sde_regs { + unsigned char in_guest:1; /* guest sample */ + unsigned long reserved:63; /* reserved */ +}; + +/* Perf PMU definitions for the counter facility */ +#define PERF_CPUM_CF_MAX_CTR 256 + +/* Perf PMU definitions for the sampling facility */ +#define PERF_CPUM_SF_MAX_CTR 2 +#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */ +#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */ +#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */ +#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */ +#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \ + PERF_CPUM_SF_DIAG_MODE) +#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */ + +#define REG_NONE 0 +#define REG_OVERFLOW 1 +#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config) +#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc) +#define RAWSAMPLE_REG(hwc) ((hwc)->config) +#define TEAR_REG(hwc) ((hwc)->last_tag) +#define SAMPL_RATE(hwc) ((hwc)->event_base) +#define SAMPL_FLAGS(hwc) ((hwc)->config_base) +#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) +#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS) + +/* Structure for sampling data entries to be passed as perf raw sample data + * to user space. Note that raw sample data must be aligned and, thus, might + * be padded with zeros. + */ +struct sf_raw_sample { +#define SF_RAW_SAMPLE_BASIC PERF_CPUM_SF_BASIC_MODE +#define SF_RAW_SAMPLE_DIAG PERF_CPUM_SF_DIAG_MODE + u64 format; + u32 size; /* Size of sf_raw_sample */ + u16 bsdes; /* Basic-sampling data entry size */ + u16 dsdes; /* Diagnostic-sampling data entry size */ + struct hws_basic_entry basic; /* Basic-sampling data entry */ + struct hws_diag_entry diag; /* Diagnostic-sampling data entry */ + u8 padding[]; /* Padding to next multiple of 8 */ +} __packed; + +/* Perf hardware reserve and release functions */ +int perf_reserve_sampling(void); +void perf_release_sampling(void); + #endif /* CONFIG_64BIT */ +#endif /* _ASM_S390_PERF_EVENT_H */ diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 57d0d7e794b1..d786c634e052 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -336,7 +336,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, #define QDIO_FLAG_CLEANUP_USING_HALT 0x02 /** - * struct qdio_initialize - qdio initalization data + * struct qdio_initialize - qdio initialization data * @cdev: associated ccw device * @q_format: queue format * @adapter_name: name for the adapter @@ -378,6 +378,34 @@ struct qdio_initialize { struct qdio_outbuf_state *output_sbal_state_array; }; +/** + * enum qdio_brinfo_entry_type - type of address entry for qdio_brinfo_desc() + * @l3_ipv6_addr: entry contains IPv6 address + * @l3_ipv4_addr: entry contains IPv4 address + * @l2_addr_lnid: entry contains MAC address and VLAN ID + */ +enum qdio_brinfo_entry_type {l3_ipv6_addr, l3_ipv4_addr, l2_addr_lnid}; + +/** + * struct qdio_brinfo_entry_XXX - Address entry for qdio_brinfo_desc() + * @nit: Network interface token + * @addr: Address of one of the three types + * + * The struct is passed to the callback function by qdio_brinfo_desc() + */ +struct qdio_brinfo_entry_l3_ipv6 { + u64 nit; + struct { unsigned char _s6_addr[16]; } addr; +} __packed; +struct qdio_brinfo_entry_l3_ipv4 { + u64 nit; + struct { uint32_t _s_addr; } addr; +} __packed; +struct qdio_brinfo_entry_l2 { + u64 nit; + struct { u8 mac[6]; u16 lnid; } addr_lnid; +} __packed; + #define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */ #define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */ #define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */ @@ -399,5 +427,10 @@ extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *); extern int qdio_shutdown(struct ccw_device *, int); extern int qdio_free(struct ccw_device *); extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *); +extern int qdio_pnso_brinfo(struct subchannel_id schid, + int cnc, u16 *response, + void (*cb)(void *priv, enum qdio_brinfo_entry_type type, + void *entry), + void *priv); #endif /* __QDIO_H__ */ diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 2f390956c7c1..220e171413f8 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -52,8 +52,8 @@ int sclp_chp_configure(struct chp_id chpid); int sclp_chp_deconfigure(struct chp_id chpid); int sclp_chp_read_info(struct sclp_chp_info *info); void sclp_get_ipl_info(struct sclp_ipl_info *info); -bool sclp_has_linemode(void); -bool sclp_has_vt220(void); +bool __init sclp_has_linemode(void); +bool __init sclp_has_vt220(void); int sclp_pci_configure(u32 fid); int sclp_pci_deconfigure(u32 fid); int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h index e83fc116f5bf..f2b18eacaca8 100644 --- a/arch/s390/include/uapi/asm/zcrypt.h +++ b/arch/s390/include/uapi/asm/zcrypt.h @@ -154,6 +154,67 @@ struct ica_xcRB { unsigned short priority_window; unsigned int status; } __attribute__((packed)); + +/** + * struct ep11_cprb - EP11 connectivity programming request block + * @cprb_len: CPRB header length [0x0020] + * @cprb_ver_id: CPRB version id. [0x04] + * @pad_000: Alignment pad bytes + * @flags: Admin cmd [0x80] or functional cmd [0x00] + * @func_id: Function id / subtype [0x5434] + * @source_id: Source id [originator id] + * @target_id: Target id [usage/ctrl domain id] + * @ret_code: Return code + * @reserved1: Reserved + * @reserved2: Reserved + * @payload_len: Payload length + */ +struct ep11_cprb { + uint16_t cprb_len; + unsigned char cprb_ver_id; + unsigned char pad_000[2]; + unsigned char flags; + unsigned char func_id[2]; + uint32_t source_id; + uint32_t target_id; + uint32_t ret_code; + uint32_t reserved1; + uint32_t reserved2; + uint32_t payload_len; +} __attribute__((packed)); + +/** + * struct ep11_target_dev - EP11 target device list + * @ap_id: AP device id + * @dom_id: Usage domain id + */ +struct ep11_target_dev { + uint16_t ap_id; + uint16_t dom_id; +}; + +/** + * struct ep11_urb - EP11 user request block + * @targets_num: Number of target adapters + * @targets: Addr to target adapter list + * @weight: Level of request priority + * @req_no: Request id/number + * @req_len: Request length + * @req: Addr to request block + * @resp_len: Response length + * @resp: Addr to response block + */ +struct ep11_urb { + uint16_t targets_num; + uint64_t targets; + uint64_t weight; + uint64_t req_no; + uint64_t req_len; + uint64_t req; + uint64_t resp_len; + uint64_t resp; +} __attribute__((packed)); + #define AUTOSELECT ((unsigned int)0xFFFFFFFF) #define ZCRYPT_IOCTL_MAGIC 'z' @@ -183,6 +244,9 @@ struct ica_xcRB { * ZSECSENDCPRB * Send an arbitrary CPRB to a crypto card. * + * ZSENDEP11CPRB + * Send an arbitrary EP11 CPRB to an EP11 coprocessor crypto card. + * * Z90STAT_STATUS_MASK * Return an 64 element array of unsigned chars for the status of * all devices. @@ -256,6 +320,7 @@ struct ica_xcRB { #define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) #define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) #define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) +#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) /* New status calls */ #define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int) diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 2403303cfed7..1b3ac09c11b6 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -60,7 +60,8 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o ifdef CONFIG_64BIT -obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \ + perf_cpum_cf_events.o obj-y += runtime_instr.o cache.o endif diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 95e7ba0fbb7e..8b84bc373e94 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -412,8 +412,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE; } else { regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; - err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, - (u16 __force __user *)(frame->retcode)); + if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, + (u16 __force __user *)(frame->retcode))) + goto give_sigsegv; } /* Set up backchain. */ diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index e5b43c97a834..384e609b4711 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -74,7 +74,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ .endm .macro LPP newpp -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP jz .+8 .insn s,0xb2800000,\newpp @@ -82,7 +82,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ .endm .macro HANDLE_SIE_INTERCEPT scratch,reason -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) tmhh %r8,0x0001 # interrupting from user ? jnz .+62 lgr \scratch,%r9 @@ -946,7 +946,7 @@ cleanup_idle_insn: .quad __critical_end - __critical_start -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) /* * sie64a calling convention: * %r2 pointer to sie control block @@ -975,7 +975,7 @@ sie_done: lctlg %c1,%c1,__LC_USER_ASCE # load primary asce # some program checks are suppressing. C code (e.g. do_protection_exception) # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other -# instructions beween sie64a and sie_done should not cause program +# instructions between sie64a and sie_done should not cause program # interrupts. So lets use a nop (47 00 00 00) as a landing pad. # See also HANDLE_SIE_INTERCEPT rewind_pad: diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 1105502bf6e9..f51214c04858 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -680,6 +680,7 @@ static int __init cpumf_pmu_init(void) goto out; } + cpumf_pmu.attr_groups = cpumf_cf_event_group(); rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); if (rc) { pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c new file mode 100644 index 000000000000..4554a4bae39e --- /dev/null +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -0,0 +1,322 @@ +/* + * Perf PMU sysfs events attributes for available CPU-measurement counters + * + */ + +#include <linux/slab.h> +#include <linux/perf_event.h> + + +/* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */ + +CPUMF_EVENT_ATTR(cf, CPU_CYCLES, 0x0000); +CPUMF_EVENT_ATTR(cf, INSTRUCTIONS, 0x0001); +CPUMF_EVENT_ATTR(cf, L1I_DIR_WRITES, 0x0002); +CPUMF_EVENT_ATTR(cf, L1I_PENALTY_CYCLES, 0x0003); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_CPU_CYCLES, 0x0020); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_INSTRUCTIONS, 0x0021); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_DIR_WRITES, 0x0022); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES, 0x0023); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_DIR_WRITES, 0x0024); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES, 0x0025); +CPUMF_EVENT_ATTR(cf, L1D_DIR_WRITES, 0x0004); +CPUMF_EVENT_ATTR(cf, L1D_PENALTY_CYCLES, 0x0005); +CPUMF_EVENT_ATTR(cf, PRNG_FUNCTIONS, 0x0040); +CPUMF_EVENT_ATTR(cf, PRNG_CYCLES, 0x0041); +CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_FUNCTIONS, 0x0042); +CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_CYCLES, 0x0043); +CPUMF_EVENT_ATTR(cf, SHA_FUNCTIONS, 0x0044); +CPUMF_EVENT_ATTR(cf, SHA_CYCLES, 0x0045); +CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_FUNCTIONS, 0x0046); +CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_CYCLES, 0x0047); +CPUMF_EVENT_ATTR(cf, DEA_FUNCTIONS, 0x0048); +CPUMF_EVENT_ATTR(cf, DEA_CYCLES, 0x0049); +CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_FUNCTIONS, 0x004a); +CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_CYCLES, 0x004b); +CPUMF_EVENT_ATTR(cf, AES_FUNCTIONS, 0x004c); +CPUMF_EVENT_ATTR(cf, AES_CYCLES, 0x004d); +CPUMF_EVENT_ATTR(cf, AES_BLOCKED_FUNCTIONS, 0x004e); +CPUMF_EVENT_ATTR(cf, AES_BLOCKED_CYCLES, 0x004f); +CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080); +CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081); +CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082); +CPUMF_EVENT_ATTR(cf_z10, L1D_L3_LOCAL_WRITES, 0x0083); +CPUMF_EVENT_ATTR(cf_z10, L1I_L3_REMOTE_WRITES, 0x0084); +CPUMF_EVENT_ATTR(cf_z10, L1D_L3_REMOTE_WRITES, 0x0085); +CPUMF_EVENT_ATTR(cf_z10, L1D_LMEM_SOURCED_WRITES, 0x0086); +CPUMF_EVENT_ATTR(cf_z10, L1I_LMEM_SOURCED_WRITES, 0x0087); +CPUMF_EVENT_ATTR(cf_z10, L1D_RO_EXCL_WRITES, 0x0088); +CPUMF_EVENT_ATTR(cf_z10, L1I_CACHELINE_INVALIDATES, 0x0089); +CPUMF_EVENT_ATTR(cf_z10, ITLB1_WRITES, 0x008a); +CPUMF_EVENT_ATTR(cf_z10, DTLB1_WRITES, 0x008b); +CPUMF_EVENT_ATTR(cf_z10, TLB2_PTE_WRITES, 0x008c); +CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_WRITES, 0x008d); +CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES, 0x008e); +CPUMF_EVENT_ATTR(cf_z10, ITLB1_MISSES, 0x0091); +CPUMF_EVENT_ATTR(cf_z10, DTLB1_MISSES, 0x0092); +CPUMF_EVENT_ATTR(cf_z10, L2C_STORES_SENT, 0x0093); +CPUMF_EVENT_ATTR(cf_z196, L1D_L2_SOURCED_WRITES, 0x0080); +CPUMF_EVENT_ATTR(cf_z196, L1I_L2_SOURCED_WRITES, 0x0081); +CPUMF_EVENT_ATTR(cf_z196, DTLB1_MISSES, 0x0082); +CPUMF_EVENT_ATTR(cf_z196, ITLB1_MISSES, 0x0083); +CPUMF_EVENT_ATTR(cf_z196, L2C_STORES_SENT, 0x0085); +CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0086); +CPUMF_EVENT_ATTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0087); +CPUMF_EVENT_ATTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES, 0x0088); +CPUMF_EVENT_ATTR(cf_z196, L1D_RO_EXCL_WRITES, 0x0089); +CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x008a); +CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x008b); +CPUMF_EVENT_ATTR(cf_z196, DTLB1_HPAGE_WRITES, 0x008c); +CPUMF_EVENT_ATTR(cf_z196, L1D_LMEM_SOURCED_WRITES, 0x008d); +CPUMF_EVENT_ATTR(cf_z196, L1I_LMEM_SOURCED_WRITES, 0x008e); +CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x008f); +CPUMF_EVENT_ATTR(cf_z196, DTLB1_WRITES, 0x0090); +CPUMF_EVENT_ATTR(cf_z196, ITLB1_WRITES, 0x0091); +CPUMF_EVENT_ATTR(cf_z196, TLB2_PTE_WRITES, 0x0092); +CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES, 0x0093); +CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_WRITES, 0x0094); +CPUMF_EVENT_ATTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0096); +CPUMF_EVENT_ATTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0098); +CPUMF_EVENT_ATTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099); +CPUMF_EVENT_ATTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009b); +CPUMF_EVENT_ATTR(cf_zec12, DTLB1_MISSES, 0x0080); +CPUMF_EVENT_ATTR(cf_zec12, ITLB1_MISSES, 0x0081); +CPUMF_EVENT_ATTR(cf_zec12, L1D_L2I_SOURCED_WRITES, 0x0082); +CPUMF_EVENT_ATTR(cf_zec12, L1I_L2I_SOURCED_WRITES, 0x0083); +CPUMF_EVENT_ATTR(cf_zec12, L1D_L2D_SOURCED_WRITES, 0x0084); +CPUMF_EVENT_ATTR(cf_zec12, DTLB1_WRITES, 0x0085); +CPUMF_EVENT_ATTR(cf_zec12, L1D_LMEM_SOURCED_WRITES, 0x0087); +CPUMF_EVENT_ATTR(cf_zec12, L1I_LMEM_SOURCED_WRITES, 0x0089); +CPUMF_EVENT_ATTR(cf_zec12, L1D_RO_EXCL_WRITES, 0x008a); +CPUMF_EVENT_ATTR(cf_zec12, DTLB1_HPAGE_WRITES, 0x008b); +CPUMF_EVENT_ATTR(cf_zec12, ITLB1_WRITES, 0x008c); +CPUMF_EVENT_ATTR(cf_zec12, TLB2_PTE_WRITES, 0x008d); +CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES, 0x008e); +CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_WRITES, 0x008f); +CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0091); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0092); +CPUMF_EVENT_ATTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0093); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x0094); +CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TEND, 0x0095); +CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0096); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV, 0x0097); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV, 0x0098); +CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009a); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x009b); +CPUMF_EVENT_ATTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES, 0x009c); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x009d); +CPUMF_EVENT_ATTR(cf_zec12, TX_C_TEND, 0x009e); +CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x009f); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV, 0x00a0); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1); +CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1); +CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2); +CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3); + +static struct attribute *cpumcf_pmu_event_attr[] = { + CPUMF_EVENT_PTR(cf, CPU_CYCLES), + CPUMF_EVENT_PTR(cf, INSTRUCTIONS), + CPUMF_EVENT_PTR(cf, L1I_DIR_WRITES), + CPUMF_EVENT_PTR(cf, L1I_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_CPU_CYCLES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_INSTRUCTIONS), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_DIR_WRITES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_DIR_WRITES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, L1D_DIR_WRITES), + CPUMF_EVENT_PTR(cf, L1D_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, PRNG_FUNCTIONS), + CPUMF_EVENT_PTR(cf, PRNG_CYCLES), + CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_CYCLES), + CPUMF_EVENT_PTR(cf, SHA_FUNCTIONS), + CPUMF_EVENT_PTR(cf, SHA_CYCLES), + CPUMF_EVENT_PTR(cf, SHA_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, SHA_BLOCKED_CYCLES), + CPUMF_EVENT_PTR(cf, DEA_FUNCTIONS), + CPUMF_EVENT_PTR(cf, DEA_CYCLES), + CPUMF_EVENT_PTR(cf, DEA_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, DEA_BLOCKED_CYCLES), + CPUMF_EVENT_PTR(cf, AES_FUNCTIONS), + CPUMF_EVENT_PTR(cf, AES_CYCLES), + CPUMF_EVENT_PTR(cf, AES_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, AES_BLOCKED_CYCLES), + NULL, +}; + +static struct attribute *cpumcf_z10_pmu_event_attr[] __initdata = { + CPUMF_EVENT_PTR(cf_z10, L1I_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_L3_LOCAL_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_L3_LOCAL_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_L3_REMOTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_L3_REMOTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_RO_EXCL_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_CACHELINE_INVALIDATES), + CPUMF_EVENT_PTR(cf_z10, ITLB1_WRITES), + CPUMF_EVENT_PTR(cf_z10, DTLB1_WRITES), + CPUMF_EVENT_PTR(cf_z10, TLB2_PTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_z10, ITLB1_MISSES), + CPUMF_EVENT_PTR(cf_z10, DTLB1_MISSES), + CPUMF_EVENT_PTR(cf_z10, L2C_STORES_SENT), + NULL, +}; + +static struct attribute *cpumcf_z196_pmu_event_attr[] __initdata = { + CPUMF_EVENT_PTR(cf_z196, L1D_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, DTLB1_MISSES), + CPUMF_EVENT_PTR(cf_z196, ITLB1_MISSES), + CPUMF_EVENT_PTR(cf_z196, L2C_STORES_SENT), + CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_RO_EXCL_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, DTLB1_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, DTLB1_WRITES), + CPUMF_EVENT_PTR(cf_z196, ITLB1_WRITES), + CPUMF_EVENT_PTR(cf_z196, TLB2_PTE_WRITES), + CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES), + NULL, +}; + +static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = { + CPUMF_EVENT_PTR(cf_zec12, DTLB1_MISSES), + CPUMF_EVENT_PTR(cf_zec12, ITLB1_MISSES), + CPUMF_EVENT_PTR(cf_zec12, L1D_L2I_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_L2I_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_L2D_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, DTLB1_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_RO_EXCL_WRITES), + CPUMF_EVENT_PTR(cf_zec12, DTLB1_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, ITLB1_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TLB2_PTE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TX_NC_TEND), + CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TX_C_TEND), + CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, TX_NC_TABORT), + CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_NO_SPECIAL), + CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_SPECIAL), + NULL, +}; + +/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */ + +static struct attribute_group cpumsf_pmu_events_group = { + .name = "events", + .attrs = cpumcf_pmu_event_attr, +}; + +PMU_FORMAT_ATTR(event, "config:0-63"); + +static struct attribute *cpumsf_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group cpumsf_pmu_format_group = { + .name = "format", + .attrs = cpumsf_pmu_format_attr, +}; + +static const struct attribute_group *cpumsf_pmu_attr_groups[] = { + &cpumsf_pmu_events_group, + &cpumsf_pmu_format_group, + NULL, +}; + + +static __init struct attribute **merge_attr(struct attribute **a, + struct attribute **b) +{ + struct attribute **new; + int j, i; + + for (j = 0; a[j]; j++) + ; + for (i = 0; b[i]; i++) + j++; + j++; + + new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL); + if (!new) + return NULL; + j = 0; + for (i = 0; a[i]; i++) + new[j++] = a[i]; + for (i = 0; b[i]; i++) + new[j++] = b[i]; + new[j] = NULL; + + return new; +} + +__init const struct attribute_group **cpumf_cf_event_group(void) +{ + struct attribute **combined, **model; + struct cpuid cpu_id; + + get_cpu_id(&cpu_id); + switch (cpu_id.machine) { + case 0x2097: + case 0x2098: + model = cpumcf_z10_pmu_event_attr; + break; + case 0x2817: + case 0x2818: + model = cpumcf_z196_pmu_event_attr; + break; + case 0x2827: + case 0x2828: + model = cpumcf_zec12_pmu_event_attr; + break; + default: + model = NULL; + break; + }; + + if (!model) + goto out; + + combined = merge_attr(cpumcf_pmu_event_attr, model); + if (combined) + cpumsf_pmu_events_group.attrs = combined; +out: + return cpumsf_pmu_attr_groups; +} diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c new file mode 100644 index 000000000000..6c0d29827cb6 --- /dev/null +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -0,0 +1,1641 @@ +/* + * Performance event support for the System z CPU-measurement Sampling Facility + * + * Copyright IBM Corp. 2013 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + */ +#define KMSG_COMPONENT "cpum_sf" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/kernel_stat.h> +#include <linux/perf_event.h> +#include <linux/percpu.h> +#include <linux/notifier.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/moduleparam.h> +#include <asm/cpu_mf.h> +#include <asm/irq.h> +#include <asm/debug.h> +#include <asm/timex.h> + +/* Minimum number of sample-data-block-tables: + * At least one table is required for the sampling buffer structure. + * A single table contains up to 511 pointers to sample-data-blocks. + */ +#define CPUM_SF_MIN_SDBT 1 + +/* Number of sample-data-blocks per sample-data-block-table (SDBT): + * A table contains SDB pointers (8 bytes) and one table-link entry + * that points to the origin of the next SDBT. + */ +#define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8) + +/* Maximum page offset for an SDBT table-link entry: + * If this page offset is reached, a table-link entry to the next SDBT + * must be added. + */ +#define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8) +static inline int require_table_link(const void *sdbt) +{ + return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET; +} + +/* Minimum and maximum sampling buffer sizes: + * + * This number represents the maximum size of the sampling buffer taking + * the number of sample-data-block-tables into account. Note that these + * numbers apply to the basic-sampling function only. + * The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if + * the diagnostic-sampling function is active. + * + * Sampling buffer size Buffer characteristics + * --------------------------------------------------- + * 64KB == 16 pages (4KB per page) + * 1 page for SDB-tables + * 15 pages for SDBs + * + * 32MB == 8192 pages (4KB per page) + * 16 pages for SDB-tables + * 8176 pages for SDBs + */ +static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15; +static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176; +static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1; + +struct sf_buffer { + unsigned long *sdbt; /* Sample-data-block-table origin */ + /* buffer characteristics (required for buffer increments) */ + unsigned long num_sdb; /* Number of sample-data-blocks */ + unsigned long num_sdbt; /* Number of sample-data-block-tables */ + unsigned long *tail; /* last sample-data-block-table */ +}; + +struct cpu_hw_sf { + /* CPU-measurement sampling information block */ + struct hws_qsi_info_block qsi; + /* CPU-measurement sampling control block */ + struct hws_lsctl_request_block lsctl; + struct sf_buffer sfb; /* Sampling buffer */ + unsigned int flags; /* Status flags */ + struct perf_event *event; /* Scheduled perf event */ +}; +static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf); + +/* Debug feature */ +static debug_info_t *sfdbg; + +/* + * sf_disable() - Switch off sampling facility + */ +static int sf_disable(void) +{ + struct hws_lsctl_request_block sreq; + + memset(&sreq, 0, sizeof(sreq)); + return lsctl(&sreq); +} + +/* + * sf_buffer_available() - Check for an allocated sampling buffer + */ +static int sf_buffer_available(struct cpu_hw_sf *cpuhw) +{ + return !!cpuhw->sfb.sdbt; +} + +/* + * deallocate sampling facility buffer + */ +static void free_sampling_buffer(struct sf_buffer *sfb) +{ + unsigned long *sdbt, *curr; + + if (!sfb->sdbt) + return; + + sdbt = sfb->sdbt; + curr = sdbt; + + /* Free the SDBT after all SDBs are processed... */ + while (1) { + if (!*curr || !sdbt) + break; + + /* Process table-link entries */ + if (is_link_entry(curr)) { + curr = get_next_sdbt(curr); + if (sdbt) + free_page((unsigned long) sdbt); + + /* If the origin is reached, sampling buffer is freed */ + if (curr == sfb->sdbt) + break; + else + sdbt = curr; + } else { + /* Process SDB pointer */ + if (*curr) { + free_page(*curr); + curr++; + } + } + } + + debug_sprintf_event(sfdbg, 5, + "free_sampling_buffer: freed sdbt=%p\n", sfb->sdbt); + memset(sfb, 0, sizeof(*sfb)); +} + +static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags) +{ + unsigned long sdb, *trailer; + + /* Allocate and initialize sample-data-block */ + sdb = get_zeroed_page(gfp_flags); + if (!sdb) + return -ENOMEM; + trailer = trailer_entry_ptr(sdb); + *trailer = SDB_TE_ALERT_REQ_MASK; + + /* Link SDB into the sample-data-block-table */ + *sdbt = sdb; + + return 0; +} + +/* + * realloc_sampling_buffer() - extend sampler memory + * + * Allocates new sample-data-blocks and adds them to the specified sampling + * buffer memory. + * + * Important: This modifies the sampling buffer and must be called when the + * sampling facility is disabled. + * + * Returns zero on success, non-zero otherwise. + */ +static int realloc_sampling_buffer(struct sf_buffer *sfb, + unsigned long num_sdb, gfp_t gfp_flags) +{ + int i, rc; + unsigned long *new, *tail; + + if (!sfb->sdbt || !sfb->tail) + return -EINVAL; + + if (!is_link_entry(sfb->tail)) + return -EINVAL; + + /* Append to the existing sampling buffer, overwriting the table-link + * register. + * The tail variables always points to the "tail" (last and table-link) + * entry in an SDB-table. + */ + tail = sfb->tail; + + /* Do a sanity check whether the table-link entry points to + * the sampling buffer origin. + */ + if (sfb->sdbt != get_next_sdbt(tail)) { + debug_sprintf_event(sfdbg, 3, "realloc_sampling_buffer: " + "sampling buffer is not linked: origin=%p" + "tail=%p\n", + (void *) sfb->sdbt, (void *) tail); + return -EINVAL; + } + + /* Allocate remaining SDBs */ + rc = 0; + for (i = 0; i < num_sdb; i++) { + /* Allocate a new SDB-table if it is full. */ + if (require_table_link(tail)) { + new = (unsigned long *) get_zeroed_page(gfp_flags); + if (!new) { + rc = -ENOMEM; + break; + } + sfb->num_sdbt++; + /* Link current page to tail of chain */ + *tail = (unsigned long)(void *) new + 1; + tail = new; + } + + /* Allocate a new sample-data-block. + * If there is not enough memory, stop the realloc process + * and simply use what was allocated. If this is a temporary + * issue, a new realloc call (if required) might succeed. + */ + rc = alloc_sample_data_block(tail, gfp_flags); + if (rc) + break; + sfb->num_sdb++; + tail++; + } + + /* Link sampling buffer to its origin */ + *tail = (unsigned long) sfb->sdbt + 1; + sfb->tail = tail; + + debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer" + " settings: sdbt=%lu sdb=%lu\n", + sfb->num_sdbt, sfb->num_sdb); + return rc; +} + +/* + * allocate_sampling_buffer() - allocate sampler memory + * + * Allocates and initializes a sampling buffer structure using the + * specified number of sample-data-blocks (SDB). For each allocation, + * a 4K page is used. The number of sample-data-block-tables (SDBT) + * are calculated from SDBs. + * Also set the ALERT_REQ mask in each SDBs trailer. + * + * Returns zero on success, non-zero otherwise. + */ +static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb) +{ + int rc; + + if (sfb->sdbt) + return -EINVAL; + + /* Allocate the sample-data-block-table origin */ + sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); + if (!sfb->sdbt) + return -ENOMEM; + sfb->num_sdb = 0; + sfb->num_sdbt = 1; + + /* Link the table origin to point to itself to prepare for + * realloc_sampling_buffer() invocation. + */ + sfb->tail = sfb->sdbt; + *sfb->tail = (unsigned long)(void *) sfb->sdbt + 1; + + /* Allocate requested number of sample-data-blocks */ + rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL); + if (rc) { + free_sampling_buffer(sfb); + debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: " + "realloc_sampling_buffer failed with rc=%i\n", rc); + } else + debug_sprintf_event(sfdbg, 4, + "alloc_sampling_buffer: tear=%p dear=%p\n", + sfb->sdbt, (void *) *sfb->sdbt); + return rc; +} + +static void sfb_set_limits(unsigned long min, unsigned long max) +{ + struct hws_qsi_info_block si; + + CPUM_SF_MIN_SDB = min; + CPUM_SF_MAX_SDB = max; + + memset(&si, 0, sizeof(si)); + if (!qsi(&si)) + CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes); +} + +static unsigned long sfb_max_limit(struct hw_perf_event *hwc) +{ + return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR + : CPUM_SF_MAX_SDB; +} + +static unsigned long sfb_pending_allocs(struct sf_buffer *sfb, + struct hw_perf_event *hwc) +{ + if (!sfb->sdbt) + return SFB_ALLOC_REG(hwc); + if (SFB_ALLOC_REG(hwc) > sfb->num_sdb) + return SFB_ALLOC_REG(hwc) - sfb->num_sdb; + return 0; +} + +static int sfb_has_pending_allocs(struct sf_buffer *sfb, + struct hw_perf_event *hwc) +{ + return sfb_pending_allocs(sfb, hwc) > 0; +} + +static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc) +{ + /* Limit the number of SDBs to not exceed the maximum */ + num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc)); + if (num) + SFB_ALLOC_REG(hwc) += num; +} + +static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc) +{ + SFB_ALLOC_REG(hwc) = 0; + sfb_account_allocs(num, hwc); +} + +static size_t event_sample_size(struct hw_perf_event *hwc) +{ + struct sf_raw_sample *sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc); + size_t sample_size; + + /* The sample size depends on the sampling function: The basic-sampling + * function must be always enabled, diagnostic-sampling function is + * optional. + */ + sample_size = sfr->bsdes; + if (SAMPL_DIAG_MODE(hwc)) + sample_size += sfr->dsdes; + + return sample_size; +} + +static void deallocate_buffers(struct cpu_hw_sf *cpuhw) +{ + if (cpuhw->sfb.sdbt) + free_sampling_buffer(&cpuhw->sfb); +} + +static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) +{ + unsigned long n_sdb, freq, factor; + size_t sfr_size, sample_size; + struct sf_raw_sample *sfr; + + /* Allocate raw sample buffer + * + * The raw sample buffer is used to temporarily store sampling data + * entries for perf raw sample processing. The buffer size mainly + * depends on the size of diagnostic-sampling data entries which is + * machine-specific. The exact size calculation includes: + * 1. The first 4 bytes of diagnostic-sampling data entries are + * already reflected in the sf_raw_sample structure. Subtract + * these bytes. + * 2. The perf raw sample data must be 8-byte aligned (u64) and + * perf's internal data size must be considered too. So add + * an additional u32 for correct alignment and subtract before + * allocating the buffer. + * 3. Store the raw sample buffer pointer in the perf event + * hardware structure. + */ + sfr_size = ALIGN((sizeof(*sfr) - sizeof(sfr->diag) + cpuhw->qsi.dsdes) + + sizeof(u32), sizeof(u64)); + sfr_size -= sizeof(u32); + sfr = kzalloc(sfr_size, GFP_KERNEL); + if (!sfr) + return -ENOMEM; + sfr->size = sfr_size; + sfr->bsdes = cpuhw->qsi.bsdes; + sfr->dsdes = cpuhw->qsi.dsdes; + RAWSAMPLE_REG(hwc) = (unsigned long) sfr; + + /* Calculate sampling buffers using 4K pages + * + * 1. Determine the sample data size which depends on the used + * sampling functions, for example, basic-sampling or + * basic-sampling with diagnostic-sampling. + * + * 2. Use the sampling frequency as input. The sampling buffer is + * designed for almost one second. This can be adjusted through + * the "factor" variable. + * In any case, alloc_sampling_buffer() sets the Alert Request + * Control indicator to trigger a measurement-alert to harvest + * sample-data-blocks (sdb). + * + * 3. Compute the number of sample-data-blocks and ensure a minimum + * of CPUM_SF_MIN_SDB. Also ensure the upper limit does not + * exceed a "calculated" maximum. The symbolic maximum is + * designed for basic-sampling only and needs to be increased if + * diagnostic-sampling is active. + * See also the remarks for these symbolic constants. + * + * 4. Compute the number of sample-data-block-tables (SDBT) and + * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up + * to 511 SDBs). + */ + sample_size = event_sample_size(hwc); + freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)); + factor = 1; + n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size)); + if (n_sdb < CPUM_SF_MIN_SDB) + n_sdb = CPUM_SF_MIN_SDB; + + /* If there is already a sampling buffer allocated, it is very likely + * that the sampling facility is enabled too. If the event to be + * initialized requires a greater sampling buffer, the allocation must + * be postponed. Changing the sampling buffer requires the sampling + * facility to be in the disabled state. So, account the number of + * required SDBs and let cpumsf_pmu_enable() resize the buffer just + * before the event is started. + */ + sfb_init_allocs(n_sdb, hwc); + if (sf_buffer_available(cpuhw)) + return 0; + + debug_sprintf_event(sfdbg, 3, + "allocate_buffers: rate=%lu f=%lu sdb=%lu/%lu" + " sample_size=%lu cpuhw=%p\n", + SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc), + sample_size, cpuhw); + + return alloc_sampling_buffer(&cpuhw->sfb, + sfb_pending_allocs(&cpuhw->sfb, hwc)); +} + +static unsigned long min_percent(unsigned int percent, unsigned long base, + unsigned long min) +{ + return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100)); +} + +static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base) +{ + /* Use a percentage-based approach to extend the sampling facility + * buffer. Accept up to 5% sample data loss. + * Vary the extents between 1% to 5% of the current number of + * sample-data-blocks. + */ + if (ratio <= 5) + return 0; + if (ratio <= 25) + return min_percent(1, base, 1); + if (ratio <= 50) + return min_percent(1, base, 1); + if (ratio <= 75) + return min_percent(2, base, 2); + if (ratio <= 100) + return min_percent(3, base, 3); + if (ratio <= 250) + return min_percent(4, base, 4); + + return min_percent(5, base, 8); +} + +static void sfb_account_overflows(struct cpu_hw_sf *cpuhw, + struct hw_perf_event *hwc) +{ + unsigned long ratio, num; + + if (!OVERFLOW_REG(hwc)) + return; + + /* The sample_overflow contains the average number of sample data + * that has been lost because sample-data-blocks were full. + * + * Calculate the total number of sample data entries that has been + * discarded. Then calculate the ratio of lost samples to total samples + * per second in percent. + */ + ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb, + sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc))); + + /* Compute number of sample-data-blocks */ + num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb); + if (num) + sfb_account_allocs(num, hwc); + + debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow=%llu ratio=%lu" + " num=%lu\n", OVERFLOW_REG(hwc), ratio, num); + OVERFLOW_REG(hwc) = 0; +} + +/* extend_sampling_buffer() - Extend sampling buffer + * @sfb: Sampling buffer structure (for local CPU) + * @hwc: Perf event hardware structure + * + * Use this function to extend the sampling buffer based on the overflow counter + * and postponed allocation extents stored in the specified Perf event hardware. + * + * Important: This function disables the sampling facility in order to safely + * change the sampling buffer structure. Do not call this function + * when the PMU is active. + */ +static void extend_sampling_buffer(struct sf_buffer *sfb, + struct hw_perf_event *hwc) +{ + unsigned long num, num_old; + int rc; + + num = sfb_pending_allocs(sfb, hwc); + if (!num) + return; + num_old = sfb->num_sdb; + + /* Disable the sampling facility to reset any states and also + * clear pending measurement alerts. + */ + sf_disable(); + + /* Extend the sampling buffer. + * This memory allocation typically happens in an atomic context when + * called by perf. Because this is a reallocation, it is fine if the + * new SDB-request cannot be satisfied immediately. + */ + rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); + if (rc) + debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc " + "failed with rc=%i\n", rc); + + if (sfb_has_pending_allocs(sfb, hwc)) + debug_sprintf_event(sfdbg, 5, "sfb: extend: " + "req=%lu alloc=%lu remaining=%lu\n", + num, sfb->num_sdb - num_old, + sfb_pending_allocs(sfb, hwc)); +} + + +/* Number of perf events counting hardware events */ +static atomic_t num_events; +/* Used to avoid races in calling reserve/release_cpumf_hardware */ +static DEFINE_MUTEX(pmc_reserve_mutex); + +#define PMC_INIT 0 +#define PMC_RELEASE 1 +#define PMC_FAILURE 2 +static void setup_pmc_cpu(void *flags) +{ + int err; + struct cpu_hw_sf *cpusf = &__get_cpu_var(cpu_hw_sf); + + err = 0; + switch (*((int *) flags)) { + case PMC_INIT: + memset(cpusf, 0, sizeof(*cpusf)); + err = qsi(&cpusf->qsi); + if (err) + break; + cpusf->flags |= PMU_F_RESERVED; + err = sf_disable(); + if (err) + pr_err("Switching off the sampling facility failed " + "with rc=%i\n", err); + debug_sprintf_event(sfdbg, 5, + "setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf); + break; + case PMC_RELEASE: + cpusf->flags &= ~PMU_F_RESERVED; + err = sf_disable(); + if (err) { + pr_err("Switching off the sampling facility failed " + "with rc=%i\n", err); + } else + deallocate_buffers(cpusf); + debug_sprintf_event(sfdbg, 5, + "setup_pmc_cpu: released: cpuhw=%p\n", cpusf); + break; + } + if (err) + *((int *) flags) |= PMC_FAILURE; +} + +static void release_pmc_hardware(void) +{ + int flags = PMC_RELEASE; + + irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); + on_each_cpu(setup_pmc_cpu, &flags, 1); + perf_release_sampling(); +} + +static int reserve_pmc_hardware(void) +{ + int flags = PMC_INIT; + int err; + + err = perf_reserve_sampling(); + if (err) + return err; + on_each_cpu(setup_pmc_cpu, &flags, 1); + if (flags & PMC_FAILURE) { + release_pmc_hardware(); + return -ENODEV; + } + irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); + + return 0; +} + +static void hw_perf_event_destroy(struct perf_event *event) +{ + /* Free raw sample buffer */ + if (RAWSAMPLE_REG(&event->hw)) + kfree((void *) RAWSAMPLE_REG(&event->hw)); + + /* Release PMC if this is the last perf event */ + if (!atomic_add_unless(&num_events, -1, 1)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_dec_return(&num_events) == 0) + release_pmc_hardware(); + mutex_unlock(&pmc_reserve_mutex); + } +} + +static void hw_init_period(struct hw_perf_event *hwc, u64 period) +{ + hwc->sample_period = period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); +} + +static void hw_reset_registers(struct hw_perf_event *hwc, + unsigned long *sdbt_origin) +{ + struct sf_raw_sample *sfr; + + /* (Re)set to first sample-data-block-table */ + TEAR_REG(hwc) = (unsigned long) sdbt_origin; + + /* (Re)set raw sampling buffer register */ + sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc); + memset(&sfr->basic, 0, sizeof(sfr->basic)); + memset(&sfr->diag, 0, sfr->dsdes); +} + +static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si, + unsigned long rate) +{ + return clamp_t(unsigned long, rate, + si->min_sampl_rate, si->max_sampl_rate); +} + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct cpu_hw_sf *cpuhw; + struct hws_qsi_info_block si; + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + unsigned long rate; + int cpu, err; + + /* Reserve CPU-measurement sampling facility */ + err = 0; + if (!atomic_inc_not_zero(&num_events)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) + err = -EBUSY; + else + atomic_inc(&num_events); + mutex_unlock(&pmc_reserve_mutex); + } + event->destroy = hw_perf_event_destroy; + + if (err) + goto out; + + /* Access per-CPU sampling information (query sampling info) */ + /* + * The event->cpu value can be -1 to count on every CPU, for example, + * when attaching to a task. If this is specified, use the query + * sampling info from the current CPU, otherwise use event->cpu to + * retrieve the per-CPU information. + * Later, cpuhw indicates whether to allocate sampling buffers for a + * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL). + */ + memset(&si, 0, sizeof(si)); + cpuhw = NULL; + if (event->cpu == -1) + qsi(&si); + else { + /* Event is pinned to a particular CPU, retrieve the per-CPU + * sampling structure for accessing the CPU-specific QSI. + */ + cpuhw = &per_cpu(cpu_hw_sf, event->cpu); + si = cpuhw->qsi; + } + + /* Check sampling facility authorization and, if not authorized, + * fall back to other PMUs. It is safe to check any CPU because + * the authorization is identical for all configured CPUs. + */ + if (!si.as) { + err = -ENOENT; + goto out; + } + + /* Always enable basic sampling */ + SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE; + + /* Check if diagnostic sampling is requested. Deny if the required + * sampling authorization is missing. + */ + if (attr->config == PERF_EVENT_CPUM_SF_DIAG) { + if (!si.ad) { + err = -EPERM; + goto out; + } + SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE; + } + + /* Check and set other sampling flags */ + if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS) + SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS; + + /* The sampling information (si) contains information about the + * min/max sampling intervals and the CPU speed. So calculate the + * correct sampling interval and avoid the whole period adjust + * feedback loop. + */ + rate = 0; + if (attr->freq) { + rate = freq_to_sample_rate(&si, attr->sample_freq); + rate = hw_limit_rate(&si, rate); + attr->freq = 0; + attr->sample_period = rate; + } else { + /* The min/max sampling rates specifies the valid range + * of sample periods. If the specified sample period is + * out of range, limit the period to the range boundary. + */ + rate = hw_limit_rate(&si, hwc->sample_period); + + /* The perf core maintains a maximum sample rate that is + * configurable through the sysctl interface. Ensure the + * sampling rate does not exceed this value. This also helps + * to avoid throttling when pushing samples with + * perf_event_overflow(). + */ + if (sample_rate_to_freq(&si, rate) > + sysctl_perf_event_sample_rate) { + err = -EINVAL; + debug_sprintf_event(sfdbg, 1, "Sampling rate exceeds maximum perf sample rate\n"); + goto out; + } + } + SAMPL_RATE(hwc) = rate; + hw_init_period(hwc, SAMPL_RATE(hwc)); + + /* Initialize sample data overflow accounting */ + hwc->extra_reg.reg = REG_OVERFLOW; + OVERFLOW_REG(hwc) = 0; + + /* Allocate the per-CPU sampling buffer using the CPU information + * from the event. If the event is not pinned to a particular + * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling + * buffers for each online CPU. + */ + if (cpuhw) + /* Event is pinned to a particular CPU */ + err = allocate_buffers(cpuhw, hwc); + else { + /* Event is not pinned, allocate sampling buffer on + * each online CPU + */ + for_each_online_cpu(cpu) { + cpuhw = &per_cpu(cpu_hw_sf, cpu); + err = allocate_buffers(cpuhw, hwc); + if (err) + break; + } + } +out: + return err; +} + +static int cpumsf_pmu_event_init(struct perf_event *event) +{ + int err; + + /* No support for taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + if ((event->attr.config != PERF_EVENT_CPUM_SF) && + (event->attr.config != PERF_EVENT_CPUM_SF_DIAG)) + return -ENOENT; + break; + case PERF_TYPE_HARDWARE: + /* Support sampling of CPU cycles in addition to the + * counter facility. However, the counter facility + * is more precise and, hence, restrict this PMU to + * sampling events only. + */ + if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES) + return -ENOENT; + if (!is_sampling_event(event)) + return -ENOENT; + break; + default: + return -ENOENT; + } + + /* Check online status of the CPU to which the event is pinned */ + if (event->cpu >= nr_cpumask_bits || + (event->cpu >= 0 && !cpu_online(event->cpu))) + return -ENODEV; + + /* Force reset of idle/hv excludes regardless of what the + * user requested. + */ + if (event->attr.exclude_hv) + event->attr.exclude_hv = 0; + if (event->attr.exclude_idle) + event->attr.exclude_idle = 0; + + err = __hw_perf_event_init(event); + if (unlikely(err)) + if (event->destroy) + event->destroy(event); + return err; +} + +static void cpumsf_pmu_enable(struct pmu *pmu) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + struct hw_perf_event *hwc; + int err; + + if (cpuhw->flags & PMU_F_ENABLED) + return; + + if (cpuhw->flags & PMU_F_ERR_MASK) + return; + + /* Check whether to extent the sampling buffer. + * + * Two conditions trigger an increase of the sampling buffer for a + * perf event: + * 1. Postponed buffer allocations from the event initialization. + * 2. Sampling overflows that contribute to pending allocations. + * + * Note that the extend_sampling_buffer() function disables the sampling + * facility, but it can be fully re-enabled using sampling controls that + * have been saved in cpumsf_pmu_disable(). + */ + if (cpuhw->event) { + hwc = &cpuhw->event->hw; + /* Account number of overflow-designated buffer extents */ + sfb_account_overflows(cpuhw, hwc); + if (sfb_has_pending_allocs(&cpuhw->sfb, hwc)) + extend_sampling_buffer(&cpuhw->sfb, hwc); + } + + /* (Re)enable the PMU and sampling facility */ + cpuhw->flags |= PMU_F_ENABLED; + barrier(); + + err = lsctl(&cpuhw->lsctl); + if (err) { + cpuhw->flags &= ~PMU_F_ENABLED; + pr_err("Loading sampling controls failed: op=%i err=%i\n", + 1, err); + return; + } + + debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i " + "tear=%p dear=%p\n", cpuhw->lsctl.es, cpuhw->lsctl.cs, + cpuhw->lsctl.ed, cpuhw->lsctl.cd, + (void *) cpuhw->lsctl.tear, (void *) cpuhw->lsctl.dear); +} + +static void cpumsf_pmu_disable(struct pmu *pmu) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + struct hws_lsctl_request_block inactive; + struct hws_qsi_info_block si; + int err; + + if (!(cpuhw->flags & PMU_F_ENABLED)) + return; + + if (cpuhw->flags & PMU_F_ERR_MASK) + return; + + /* Switch off sampling activation control */ + inactive = cpuhw->lsctl; + inactive.cs = 0; + inactive.cd = 0; + + err = lsctl(&inactive); + if (err) { + pr_err("Loading sampling controls failed: op=%i err=%i\n", + 2, err); + return; + } + + /* Save state of TEAR and DEAR register contents */ + if (!qsi(&si)) { + /* TEAR/DEAR values are valid only if the sampling facility is + * enabled. Note that cpumsf_pmu_disable() might be called even + * for a disabled sampling facility because cpumsf_pmu_enable() + * controls the enable/disable state. + */ + if (si.es) { + cpuhw->lsctl.tear = si.tear; + cpuhw->lsctl.dear = si.dear; + } + } else + debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: " + "qsi() failed with err=%i\n", err); + + cpuhw->flags &= ~PMU_F_ENABLED; +} + +/* perf_exclude_event() - Filter event + * @event: The perf event + * @regs: pt_regs structure + * @sde_regs: Sample-data-entry (sde) regs structure + * + * Filter perf events according to their exclude specification. + * + * Return non-zero if the event shall be excluded. + */ +static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs, + struct perf_sf_sde_regs *sde_regs) +{ + if (event->attr.exclude_user && user_mode(regs)) + return 1; + if (event->attr.exclude_kernel && !user_mode(regs)) + return 1; + if (event->attr.exclude_guest && sde_regs->in_guest) + return 1; + if (event->attr.exclude_host && !sde_regs->in_guest) + return 1; + return 0; +} + +/* perf_push_sample() - Push samples to perf + * @event: The perf event + * @sample: Hardware sample data + * + * Use the hardware sample data to create perf event sample. The sample + * is the pushed to the event subsystem and the function checks for + * possible event overflows. If an event overflow occurs, the PMU is + * stopped. + * + * Return non-zero if an event overflow occurred. + */ +static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr) +{ + int overflow; + struct pt_regs regs; + struct perf_sf_sde_regs *sde_regs; + struct perf_sample_data data; + struct perf_raw_record raw; + + /* Setup perf sample */ + perf_sample_data_init(&data, 0, event->hw.last_period); + raw.size = sfr->size; + raw.data = sfr; + data.raw = &raw; + + /* Setup pt_regs to look like an CPU-measurement external interrupt + * using the Program Request Alert code. The regs.int_parm_long + * field which is unused contains additional sample-data-entry related + * indicators. + */ + memset(®s, 0, sizeof(regs)); + regs.int_code = 0x1407; + regs.int_parm = CPU_MF_INT_SF_PRA; + sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long; + + regs.psw.addr = sfr->basic.ia; + if (sfr->basic.T) + regs.psw.mask |= PSW_MASK_DAT; + if (sfr->basic.W) + regs.psw.mask |= PSW_MASK_WAIT; + if (sfr->basic.P) + regs.psw.mask |= PSW_MASK_PSTATE; + switch (sfr->basic.AS) { + case 0x0: + regs.psw.mask |= PSW_ASC_PRIMARY; + break; + case 0x1: + regs.psw.mask |= PSW_ASC_ACCREG; + break; + case 0x2: + regs.psw.mask |= PSW_ASC_SECONDARY; + break; + case 0x3: + regs.psw.mask |= PSW_ASC_HOME; + break; + } + + /* The host-program-parameter (hpp) contains the sie control + * block that is set by sie64a() in entry64.S. Check if hpp + * refers to a valid control block and set sde_regs flags + * accordingly. This would allow to use hpp values for other + * purposes too. + * For now, simply use a non-zero value as guest indicator. + */ + if (sfr->basic.hpp) + sde_regs->in_guest = 1; + + overflow = 0; + if (perf_exclude_event(event, ®s, sde_regs)) + goto out; + if (perf_event_overflow(event, &data, ®s)) { + overflow = 1; + event->pmu->stop(event, 0); + } + perf_event_update_userpage(event); +out: + return overflow; +} + +static void perf_event_count_update(struct perf_event *event, u64 count) +{ + local64_add(count, &event->count); +} + +static int sample_format_is_valid(struct hws_combined_entry *sample, + unsigned int flags) +{ + if (likely(flags & PERF_CPUM_SF_BASIC_MODE)) + /* Only basic-sampling data entries with data-entry-format + * version of 0x0001 can be processed. + */ + if (sample->basic.def != 0x0001) + return 0; + if (flags & PERF_CPUM_SF_DIAG_MODE) + /* The data-entry-format number of diagnostic-sampling data + * entries can vary. Because diagnostic data is just passed + * through, do only a sanity check on the DEF. + */ + if (sample->diag.def < 0x8001) + return 0; + return 1; +} + +static int sample_is_consistent(struct hws_combined_entry *sample, + unsigned long flags) +{ + /* This check applies only to basic-sampling data entries of potentially + * combined-sampling data entries. Invalid entries cannot be processed + * by the PMU and, thus, do not deliver an associated + * diagnostic-sampling data entry. + */ + if (unlikely(!(flags & PERF_CPUM_SF_BASIC_MODE))) + return 0; + /* + * Samples are skipped, if they are invalid or for which the + * instruction address is not predictable, i.e., the wait-state bit is + * set. + */ + if (sample->basic.I || sample->basic.W) + return 0; + return 1; +} + +static void reset_sample_slot(struct hws_combined_entry *sample, + unsigned long flags) +{ + if (likely(flags & PERF_CPUM_SF_BASIC_MODE)) + sample->basic.def = 0; + if (flags & PERF_CPUM_SF_DIAG_MODE) + sample->diag.def = 0; +} + +static void sfr_store_sample(struct sf_raw_sample *sfr, + struct hws_combined_entry *sample) +{ + if (likely(sfr->format & PERF_CPUM_SF_BASIC_MODE)) + sfr->basic = sample->basic; + if (sfr->format & PERF_CPUM_SF_DIAG_MODE) + memcpy(&sfr->diag, &sample->diag, sfr->dsdes); +} + +static void debug_sample_entry(struct hws_combined_entry *sample, + struct hws_trailer_entry *te, + unsigned long flags) +{ + debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown " + "sampling data entry: te->f=%i basic.def=%04x (%p)" + " diag.def=%04x (%p)\n", te->f, + sample->basic.def, &sample->basic, + (flags & PERF_CPUM_SF_DIAG_MODE) + ? sample->diag.def : 0xFFFF, + (flags & PERF_CPUM_SF_DIAG_MODE) + ? &sample->diag : NULL); +} + +/* hw_collect_samples() - Walk through a sample-data-block and collect samples + * @event: The perf event + * @sdbt: Sample-data-block table + * @overflow: Event overflow counter + * + * Walks through a sample-data-block and collects sampling data entries that are + * then pushed to the perf event subsystem. Depending on the sampling function, + * there can be either basic-sampling or combined-sampling data entries. A + * combined-sampling data entry consists of a basic- and a diagnostic-sampling + * data entry. The sampling function is determined by the flags in the perf + * event hardware structure. The function always works with a combined-sampling + * data entry but ignores the the diagnostic portion if it is not available. + * + * Note that the implementation focuses on basic-sampling data entries and, if + * such an entry is not valid, the entire combined-sampling data entry is + * ignored. + * + * The overflow variables counts the number of samples that has been discarded + * due to a perf event overflow. + */ +static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, + unsigned long long *overflow) +{ + unsigned long flags = SAMPL_FLAGS(&event->hw); + struct hws_combined_entry *sample; + struct hws_trailer_entry *te; + struct sf_raw_sample *sfr; + size_t sample_size; + + /* Prepare and initialize raw sample data */ + sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(&event->hw); + sfr->format = flags & PERF_CPUM_SF_MODE_MASK; + + sample_size = event_sample_size(&event->hw); + te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); + sample = (struct hws_combined_entry *) *sdbt; + while ((unsigned long *) sample < (unsigned long *) te) { + /* Check for an empty sample */ + if (!sample->basic.def) + break; + + /* Update perf event period */ + perf_event_count_update(event, SAMPL_RATE(&event->hw)); + + /* Check sampling data entry */ + if (sample_format_is_valid(sample, flags)) { + /* If an event overflow occurred, the PMU is stopped to + * throttle event delivery. Remaining sample data is + * discarded. + */ + if (!*overflow) { + if (sample_is_consistent(sample, flags)) { + /* Deliver sample data to perf */ + sfr_store_sample(sfr, sample); + *overflow = perf_push_sample(event, sfr); + } + } else + /* Count discarded samples */ + *overflow += 1; + } else { + debug_sample_entry(sample, te, flags); + /* Sample slot is not yet written or other record. + * + * This condition can occur if the buffer was reused + * from a combined basic- and diagnostic-sampling. + * If only basic-sampling is then active, entries are + * written into the larger diagnostic entries. + * This is typically the case for sample-data-blocks + * that are not full. Stop processing if the first + * invalid format was detected. + */ + if (!te->f) + break; + } + + /* Reset sample slot and advance to next sample */ + reset_sample_slot(sample, flags); + sample += sample_size; + } +} + +/* hw_perf_event_update() - Process sampling buffer + * @event: The perf event + * @flush_all: Flag to also flush partially filled sample-data-blocks + * + * Processes the sampling buffer and create perf event samples. + * The sampling buffer position are retrieved and saved in the TEAR_REG + * register of the specified perf event. + * + * Only full sample-data-blocks are processed. Specify the flash_all flag + * to also walk through partially filled sample-data-blocks. It is ignored + * if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag + * enforces the processing of full sample-data-blocks only (trailer entries + * with the block-full-indicator bit set). + */ +static void hw_perf_event_update(struct perf_event *event, int flush_all) +{ + struct hw_perf_event *hwc = &event->hw; + struct hws_trailer_entry *te; + unsigned long *sdbt; + unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags; + int done; + + if (flush_all && SDB_FULL_BLOCKS(hwc)) + flush_all = 0; + + sdbt = (unsigned long *) TEAR_REG(hwc); + done = event_overflow = sampl_overflow = num_sdb = 0; + while (!done) { + /* Get the trailer entry of the sample-data-block */ + te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); + + /* Leave loop if no more work to do (block full indicator) */ + if (!te->f) { + done = 1; + if (!flush_all) + break; + } + + /* Check the sample overflow count */ + if (te->overflow) + /* Account sample overflows and, if a particular limit + * is reached, extend the sampling buffer. + * For details, see sfb_account_overflows(). + */ + sampl_overflow += te->overflow; + + /* Timestamps are valid for full sample-data-blocks only */ + debug_sprintf_event(sfdbg, 6, "hw_perf_event_update: sdbt=%p " + "overflow=%llu timestamp=0x%llx\n", + sdbt, te->overflow, + (te->f) ? trailer_timestamp(te) : 0ULL); + + /* Collect all samples from a single sample-data-block and + * flag if an (perf) event overflow happened. If so, the PMU + * is stopped and remaining samples will be discarded. + */ + hw_collect_samples(event, sdbt, &event_overflow); + num_sdb++; + + /* Reset trailer (using compare-double-and-swap) */ + do { + te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; + te_flags |= SDB_TE_ALERT_REQ_MASK; + } while (!cmpxchg_double(&te->flags, &te->overflow, + te->flags, te->overflow, + te_flags, 0ULL)); + + /* Advance to next sample-data-block */ + sdbt++; + if (is_link_entry(sdbt)) + sdbt = get_next_sdbt(sdbt); + + /* Update event hardware registers */ + TEAR_REG(hwc) = (unsigned long) sdbt; + + /* Stop processing sample-data if all samples of the current + * sample-data-block were flushed even if it was not full. + */ + if (flush_all && done) + break; + + /* If an event overflow happened, discard samples by + * processing any remaining sample-data-blocks. + */ + if (event_overflow) + flush_all = 1; + } + + /* Account sample overflows in the event hardware structure */ + if (sampl_overflow) + OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + + sampl_overflow, 1 + num_sdb); + if (sampl_overflow || event_overflow) + debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: " + "overflow stats: sample=%llu event=%llu\n", + sampl_overflow, event_overflow); +} + +static void cpumsf_pmu_read(struct perf_event *event) +{ + /* Nothing to do ... updates are interrupt-driven */ +} + +/* Activate sampling control. + * Next call of pmu_enable() starts sampling. + */ +static void cpumsf_pmu_start(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + + perf_pmu_disable(event->pmu); + event->hw.state = 0; + cpuhw->lsctl.cs = 1; + if (SAMPL_DIAG_MODE(&event->hw)) + cpuhw->lsctl.cd = 1; + perf_pmu_enable(event->pmu); +} + +/* Deactivate sampling control. + * Next call of pmu_enable() stops sampling. + */ +static void cpumsf_pmu_stop(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + + if (event->hw.state & PERF_HES_STOPPED) + return; + + perf_pmu_disable(event->pmu); + cpuhw->lsctl.cs = 0; + cpuhw->lsctl.cd = 0; + event->hw.state |= PERF_HES_STOPPED; + + if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { + hw_perf_event_update(event, 1); + event->hw.state |= PERF_HES_UPTODATE; + } + perf_pmu_enable(event->pmu); +} + +static int cpumsf_pmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + int err; + + if (cpuhw->flags & PMU_F_IN_USE) + return -EAGAIN; + + if (!cpuhw->sfb.sdbt) + return -EINVAL; + + err = 0; + perf_pmu_disable(event->pmu); + + event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + /* Set up sampling controls. Always program the sampling register + * using the SDB-table start. Reset TEAR_REG event hardware register + * that is used by hw_perf_event_update() to store the sampling buffer + * position after samples have been flushed. + */ + cpuhw->lsctl.s = 0; + cpuhw->lsctl.h = 1; + cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt; + cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt; + cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); + hw_reset_registers(&event->hw, cpuhw->sfb.sdbt); + + /* Ensure sampling functions are in the disabled state. If disabled, + * switch on sampling enable control. */ + if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) { + err = -EAGAIN; + goto out; + } + cpuhw->lsctl.es = 1; + if (SAMPL_DIAG_MODE(&event->hw)) + cpuhw->lsctl.ed = 1; + + /* Set in_use flag and store event */ + event->hw.idx = 0; /* only one sampling event per CPU supported */ + cpuhw->event = event; + cpuhw->flags |= PMU_F_IN_USE; + + if (flags & PERF_EF_START) + cpumsf_pmu_start(event, PERF_EF_RELOAD); +out: + perf_event_update_userpage(event); + perf_pmu_enable(event->pmu); + return err; +} + +static void cpumsf_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + + perf_pmu_disable(event->pmu); + cpumsf_pmu_stop(event, PERF_EF_UPDATE); + + cpuhw->lsctl.es = 0; + cpuhw->lsctl.ed = 0; + cpuhw->flags &= ~PMU_F_IN_USE; + cpuhw->event = NULL; + + perf_event_update_userpage(event); + perf_pmu_enable(event->pmu); +} + +static int cpumsf_pmu_event_idx(struct perf_event *event) +{ + return event->hw.idx; +} + +CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); +CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); + +static struct attribute *cpumsf_pmu_events_attr[] = { + CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), + CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG), + NULL, +}; + +PMU_FORMAT_ATTR(event, "config:0-63"); + +static struct attribute *cpumsf_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group cpumsf_pmu_events_group = { + .name = "events", + .attrs = cpumsf_pmu_events_attr, +}; +static struct attribute_group cpumsf_pmu_format_group = { + .name = "format", + .attrs = cpumsf_pmu_format_attr, +}; +static const struct attribute_group *cpumsf_pmu_attr_groups[] = { + &cpumsf_pmu_events_group, + &cpumsf_pmu_format_group, + NULL, +}; + +static struct pmu cpumf_sampling = { + .pmu_enable = cpumsf_pmu_enable, + .pmu_disable = cpumsf_pmu_disable, + + .event_init = cpumsf_pmu_event_init, + .add = cpumsf_pmu_add, + .del = cpumsf_pmu_del, + + .start = cpumsf_pmu_start, + .stop = cpumsf_pmu_stop, + .read = cpumsf_pmu_read, + + .event_idx = cpumsf_pmu_event_idx, + .attr_groups = cpumsf_pmu_attr_groups, +}; + +static void cpumf_measurement_alert(struct ext_code ext_code, + unsigned int alert, unsigned long unused) +{ + struct cpu_hw_sf *cpuhw; + + if (!(alert & CPU_MF_INT_SF_MASK)) + return; + inc_irq_stat(IRQEXT_CMS); + cpuhw = &__get_cpu_var(cpu_hw_sf); + + /* Measurement alerts are shared and might happen when the PMU + * is not reserved. Ignore these alerts in this case. */ + if (!(cpuhw->flags & PMU_F_RESERVED)) + return; + + /* The processing below must take care of multiple alert events that + * might be indicated concurrently. */ + + /* Program alert request */ + if (alert & CPU_MF_INT_SF_PRA) { + if (cpuhw->flags & PMU_F_IN_USE) + hw_perf_event_update(cpuhw->event, 0); + else + WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE)); + } + + /* Report measurement alerts only for non-PRA codes */ + if (alert != CPU_MF_INT_SF_PRA) + debug_sprintf_event(sfdbg, 6, "measurement alert: 0x%x\n", alert); + + /* Sampling authorization change request */ + if (alert & CPU_MF_INT_SF_SACA) + qsi(&cpuhw->qsi); + + /* Loss of sample data due to high-priority machine activities */ + if (alert & CPU_MF_INT_SF_LSDA) { + pr_err("Sample data was lost\n"); + cpuhw->flags |= PMU_F_ERR_LSDA; + sf_disable(); + } + + /* Invalid sampling buffer entry */ + if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) { + pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n", + alert); + cpuhw->flags |= PMU_F_ERR_IBE; + sf_disable(); + } +} + +static int cpumf_pmu_notifier(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (long) hcpu; + int flags; + + /* Ignore the notification if no events are scheduled on the PMU. + * This might be racy... + */ + if (!atomic_read(&num_events)) + return NOTIFY_OK; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + flags = PMC_INIT; + smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); + break; + case CPU_DOWN_PREPARE: + flags = PMC_RELEASE; + smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static int param_get_sfb_size(char *buffer, const struct kernel_param *kp) +{ + if (!cpum_sf_avail()) + return -ENODEV; + return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); +} + +static int param_set_sfb_size(const char *val, const struct kernel_param *kp) +{ + int rc; + unsigned long min, max; + + if (!cpum_sf_avail()) + return -ENODEV; + if (!val || !strlen(val)) + return -EINVAL; + + /* Valid parameter values: "min,max" or "max" */ + min = CPUM_SF_MIN_SDB; + max = CPUM_SF_MAX_SDB; + if (strchr(val, ',')) + rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL; + else + rc = kstrtoul(val, 10, &max); + + if (min < 2 || min >= max || max > get_num_physpages()) + rc = -EINVAL; + if (rc) + return rc; + + sfb_set_limits(min, max); + pr_info("The sampling buffer limits have changed to: " + "min=%lu max=%lu (diag=x%lu)\n", + CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR); + return 0; +} + +#define param_check_sfb_size(name, p) __param_check(name, p, void) +static struct kernel_param_ops param_ops_sfb_size = { + .set = param_set_sfb_size, + .get = param_get_sfb_size, +}; + +#define RS_INIT_FAILURE_QSI 0x0001 +#define RS_INIT_FAILURE_BSDES 0x0002 +#define RS_INIT_FAILURE_ALRT 0x0003 +#define RS_INIT_FAILURE_PERF 0x0004 +static void __init pr_cpumsf_err(unsigned int reason) +{ + pr_err("Sampling facility support for perf is not available: " + "reason=%04x\n", reason); +} + +static int __init init_cpum_sampling_pmu(void) +{ + struct hws_qsi_info_block si; + int err; + + if (!cpum_sf_avail()) + return -ENODEV; + + memset(&si, 0, sizeof(si)); + if (qsi(&si)) { + pr_cpumsf_err(RS_INIT_FAILURE_QSI); + return -ENODEV; + } + + if (si.bsdes != sizeof(struct hws_basic_entry)) { + pr_cpumsf_err(RS_INIT_FAILURE_BSDES); + return -EINVAL; + } + + if (si.ad) + sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); + + sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); + if (!sfdbg) + pr_err("Registering for s390dbf failed\n"); + debug_register_view(sfdbg, &debug_sprintf_view); + + err = register_external_interrupt(0x1407, cpumf_measurement_alert); + if (err) { + pr_cpumsf_err(RS_INIT_FAILURE_ALRT); + goto out; + } + + err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW); + if (err) { + pr_cpumsf_err(RS_INIT_FAILURE_PERF); + unregister_external_interrupt(0x1407, cpumf_measurement_alert); + goto out; + } + perf_cpu_notifier(cpumf_pmu_notifier); +out: + return err; +} +arch_initcall(init_cpum_sampling_pmu); +core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640); diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index 2343c218b8f9..5d2dfa31c4ef 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -1,7 +1,7 @@ /* * Performance event support for s390x * - * Copyright IBM Corp. 2012 + * Copyright IBM Corp. 2012, 2013 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify @@ -16,15 +16,19 @@ #include <linux/kvm_host.h> #include <linux/percpu.h> #include <linux/export.h> +#include <linux/seq_file.h> +#include <linux/spinlock.h> +#include <linux/sysfs.h> #include <asm/irq.h> #include <asm/cpu_mf.h> #include <asm/lowcore.h> #include <asm/processor.h> +#include <asm/sysinfo.h> const char *perf_pmu_name(void) { if (cpum_cf_avail() || cpum_sf_avail()) - return "CPU-measurement facilities (CPUMF)"; + return "CPU-Measurement Facilities (CPU-MF)"; return "pmu"; } EXPORT_SYMBOL(perf_pmu_name); @@ -35,6 +39,8 @@ int perf_num_counters(void) if (cpum_cf_avail()) num += PERF_CPUM_CF_MAX_CTR; + if (cpum_sf_avail()) + num += PERF_CPUM_SF_MAX_CTR; return num; } @@ -54,7 +60,7 @@ static bool is_in_guest(struct pt_regs *regs) { if (user_mode(regs)) return false; -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) return instruction_pointer(regs) == (unsigned long) &sie_exit; #else return false; @@ -83,8 +89,31 @@ static unsigned long perf_misc_guest_flags(struct pt_regs *regs) : PERF_RECORD_MISC_GUEST_KERNEL; } +static unsigned long perf_misc_flags_sf(struct pt_regs *regs) +{ + struct perf_sf_sde_regs *sde_regs; + unsigned long flags; + + sde_regs = (struct perf_sf_sde_regs *) ®s->int_parm_long; + if (sde_regs->in_guest) + flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER + : PERF_RECORD_MISC_GUEST_KERNEL; + else + flags = user_mode(regs) ? PERF_RECORD_MISC_USER + : PERF_RECORD_MISC_KERNEL; + return flags; +} + unsigned long perf_misc_flags(struct pt_regs *regs) { + /* Check if the cpum_sf PMU has created the pt_regs structure. + * In this case, perf misc flags can be easily extracted. Otherwise, + * do regular checks on the pt_regs content. + */ + if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA) + if (!regs->gprs[15]) + return perf_misc_flags_sf(regs); + if (is_in_guest(regs)) return perf_misc_guest_flags(regs); @@ -92,27 +121,107 @@ unsigned long perf_misc_flags(struct pt_regs *regs) : PERF_RECORD_MISC_KERNEL; } -void perf_event_print_debug(void) +void print_debug_cf(void) { struct cpumf_ctr_info cf_info; - unsigned long flags; - int cpu; - - if (!cpum_cf_avail()) - return; - - local_irq_save(flags); + int cpu = smp_processor_id(); - cpu = smp_processor_id(); memset(&cf_info, 0, sizeof(cf_info)); if (!qctri(&cf_info)) pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n", cpu, cf_info.cfvn, cf_info.csvn, cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl); +} + +static void print_debug_sf(void) +{ + struct hws_qsi_info_block si; + int cpu = smp_processor_id(); + memset(&si, 0, sizeof(si)); + if (qsi(&si)) + return; + + pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n", + cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate, + si.cpu_speed); + + if (si.as) + pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i" + " bsdes=%i tear=%016lx dear=%016lx\n", cpu, + si.as, si.es, si.cs, si.bsdes, si.tear, si.dear); + if (si.ad) + pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i" + " dsdes=%i tear=%016lx dear=%016lx\n", cpu, + si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear); +} + +void perf_event_print_debug(void) +{ + unsigned long flags; + + local_irq_save(flags); + if (cpum_cf_avail()) + print_debug_cf(); + if (cpum_sf_avail()) + print_debug_sf(); local_irq_restore(flags); } +/* Service level infrastructure */ +static void sl_print_counter(struct seq_file *m) +{ + struct cpumf_ctr_info ci; + + memset(&ci, 0, sizeof(ci)); + if (qctri(&ci)) + return; + + seq_printf(m, "CPU-MF: Counter facility: version=%u.%u " + "authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl); +} + +static void sl_print_sampling(struct seq_file *m) +{ + struct hws_qsi_info_block si; + + memset(&si, 0, sizeof(si)); + if (qsi(&si)) + return; + + if (!si.as && !si.ad) + return; + + seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu" + " cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate, + si.cpu_speed); + if (si.as) + seq_printf(m, "CPU-MF: Sampling facility: mode=basic" + " sample_size=%u\n", si.bsdes); + if (si.ad) + seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic" + " sample_size=%u\n", si.dsdes); +} + +static void service_level_perf_print(struct seq_file *m, + struct service_level *sl) +{ + if (cpum_cf_avail()) + sl_print_counter(m); + if (cpum_sf_avail()) + sl_print_sampling(m); +} + +static struct service_level service_level_perf = { + .seq_print = service_level_perf_print, +}; + +static int __init service_level_perf_register(void) +{ + return register_service_level(&service_level_perf); +} +arch_initcall(service_level_perf_register); + /* See also arch/s390/kernel/traps.c */ static unsigned long __store_trace(struct perf_callchain_entry *entry, unsigned long sp, @@ -172,3 +281,44 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, __store_trace(entry, head, S390_lowcore.thread_info, S390_lowcore.thread_info + THREAD_SIZE); } + +/* Perf defintions for PMU event attributes in sysfs */ +ssize_t cpumf_events_sysfs_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sprintf(page, "event=0x%04llx,name=%s\n", + pmu_attr->id, attr->attr.name); +} + +/* Reserve/release functions for sharing perf hardware */ +static DEFINE_SPINLOCK(perf_hw_owner_lock); +static void *perf_sampling_owner; + +int perf_reserve_sampling(void) +{ + int err; + + err = 0; + spin_lock(&perf_hw_owner_lock); + if (perf_sampling_owner) { + pr_warn("The sampling facility is already reserved by %p\n", + perf_sampling_owner); + err = -EBUSY; + } else + perf_sampling_owner = __builtin_return_address(0); + spin_unlock(&perf_hw_owner_lock); + return err; +} +EXPORT_SYMBOL(perf_reserve_sampling); + +void perf_release_sampling(void) +{ + spin_lock(&perf_hw_owner_lock); + WARN_ON(!perf_sampling_owner); + perf_sampling_owner = NULL; + spin_unlock(&perf_hw_owner_lock); +} +EXPORT_SYMBOL(perf_release_sampling); diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 7ed0d4e2a435..dd145321d215 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -261,20 +261,18 @@ static inline unsigned long brk_rnd(void) unsigned long arch_randomize_brk(struct mm_struct *mm) { - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); + unsigned long ret; - if (ret < mm->brk) - return mm->brk; - return ret; + ret = PAGE_ALIGN(mm->brk + brk_rnd()); + return (ret > mm->brk) ? ret : mm->brk; } unsigned long randomize_et_dyn(unsigned long base) { - unsigned long ret = PAGE_ALIGN(base + brk_rnd()); + unsigned long ret; if (!(current->flags & PF_RANDOMIZE)) return base; - if (ret < base) - return base; - return ret; + ret = PAGE_ALIGN(base + brk_rnd()); + return (ret > base) ? ret : base; } diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index e65c91c591e8..f6be6087a0e9 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -56,25 +56,26 @@ void update_cr_regs(struct task_struct *task) #ifdef CONFIG_64BIT /* Take care of the enable/disable of transactional execution. */ if (MACHINE_HAS_TE) { - unsigned long cr[3], cr_new[3]; + unsigned long cr, cr_new; - __ctl_store(cr, 0, 2); - cr_new[1] = cr[1]; + __ctl_store(cr, 0, 0); /* Set or clear transaction execution TXC bit 8. */ + cr_new = cr | (1UL << 55); if (task->thread.per_flags & PER_FLAG_NO_TE) - cr_new[0] = cr[0] & ~(1UL << 55); - else - cr_new[0] = cr[0] | (1UL << 55); + cr_new &= ~(1UL << 55); + if (cr_new != cr) + __ctl_load(cr, 0, 0); /* Set or clear transaction execution TDC bits 62 and 63. */ - cr_new[2] = cr[2] & ~3UL; + __ctl_store(cr, 2, 2); + cr_new = cr & ~3UL; if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) - cr_new[2] |= 1UL; + cr_new |= 1UL; else - cr_new[2] |= 2UL; + cr_new |= 2UL; } - if (memcmp(&cr_new, &cr, sizeof(cr))) - __ctl_load(cr_new, 0, 2); + if (cr_new != cr) + __ctl_load(cr_new, 2, 2); } #endif /* Copy user specified PER registers */ @@ -107,15 +108,11 @@ void update_cr_regs(struct task_struct *task) void user_enable_single_step(struct task_struct *task) { set_tsk_thread_flag(task, TIF_SINGLE_STEP); - if (task == current) - update_cr_regs(task); } void user_disable_single_step(struct task_struct *task) { clear_tsk_thread_flag(task, TIF_SINGLE_STEP); - if (task == current) - update_cr_regs(task); } /* diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 3bac589844a7..9f60467938d1 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -5,7 +5,7 @@ #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); #endif -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) EXPORT_SYMBOL(sie64a); EXPORT_SYMBOL(sie_exit); #endif diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 0f3d44ecbfc6..09e2f468f48b 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -373,7 +373,7 @@ static void __init setup_lowcore(void) /* * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant - * restart data to the absolute zero lowcore. This is necesary if + * restart data to the absolute zero lowcore. This is necessary if * PSW restart is done on an offline CPU that has lowcore zero. */ lc->restart_stack = (unsigned long) restart_stack; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 958704798f4a..a7125b62a9a6 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -59,7 +59,7 @@ enum { }; struct pcpu { - struct cpu cpu; + struct cpu *cpu; struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ unsigned long async_stack; /* async stack for the cpu */ unsigned long panic_stack; /* panic stack for the cpu */ @@ -159,9 +159,9 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) { int order; - set_bit(ec_bit, &pcpu->ec_mask); - order = pcpu_running(pcpu) ? - SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; + if (test_and_set_bit(ec_bit, &pcpu->ec_mask)) + return; + order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; pcpu_sigp_retry(pcpu, order, 0); } @@ -965,7 +965,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; - struct cpu *c = &pcpu_devices[cpu].cpu; + struct cpu *c = pcpu_devices[cpu].cpu; struct device *s = &c->dev; int err = 0; @@ -982,10 +982,15 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action, static int smp_add_present_cpu(int cpu) { - struct cpu *c = &pcpu_devices[cpu].cpu; - struct device *s = &c->dev; + struct device *s; + struct cpu *c; int rc; + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + pcpu_devices[cpu].cpu = c; + s = &c->dev; c->hotpluggable = 1; rc = register_cpu(c, cpu); if (rc) diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 2440602e6df1..d101dae62771 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -275,7 +275,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } else { /* - * Set condition code 3 to stop the guest from issueing channel + * Set condition code 3 to stop the guest from issuing channel * I/O instructions. */ kvm_s390_set_psw_cc(vcpu, 3); diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index dbdab3e7a1a6..0632dc50da78 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -74,8 +74,8 @@ static size_t copy_in_kernel(size_t count, void __user *to, /* * Returns kernel address for user virtual address. If the returned address is - * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address - * contains the (negative) exception code. + * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the + * address contains the (negative) exception code. */ #ifdef CONFIG_64BIT diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index e794c88f699a..3584ed9b20a1 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -293,7 +293,7 @@ static int gmap_alloc_table(struct gmap *gmap, * @addr: address in the guest address space * @len: length of the memory area to unmap * - * Returns 0 if the unmap succeded, -EINVAL if not. + * Returns 0 if the unmap succeeded, -EINVAL if not. */ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) { @@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(gmap_unmap_segment); * @from: source address in the parent address space * @to: target address in the guest address space * - * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. + * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not. */ int gmap_map_segment(struct gmap *gmap, unsigned long from, unsigned long to, unsigned long len) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 16871da37371..708d60e40066 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -368,14 +368,16 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg)); /* lhi %r4,0 */ EMIT4(0xa7480000); - /* dr %r4,%r12 */ - EMIT2(0x1d4c); + /* dlr %r4,%r12 */ + EMIT4(0xb997004c); break; - case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */ - /* m %r4,<d(K)>(%r13) */ - EMIT4_DISP(0x5c40d000, EMIT_CONST(K)); - /* lr %r5,%r4 */ - EMIT2(0x1854); + case BPF_S_ALU_DIV_K: /* A /= K */ + if (K == 1) + break; + /* lhi %r4,0 */ + EMIT4(0xa7480000); + /* dl %r4,<d(K)>(%r13) */ + EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K)); break; case BPF_S_ALU_MOD_X: /* A %= X */ jit->seen |= SEEN_XREG | SEEN_RET0; @@ -385,16 +387,21 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg)); /* lhi %r4,0 */ EMIT4(0xa7480000); - /* dr %r4,%r12 */ - EMIT2(0x1d4c); + /* dlr %r4,%r12 */ + EMIT4(0xb997004c); /* lr %r5,%r4 */ EMIT2(0x1854); break; case BPF_S_ALU_MOD_K: /* A %= K */ + if (K == 1) { + /* lhi %r5,0 */ + EMIT4(0xa7580000); + break; + } /* lhi %r4,0 */ EMIT4(0xa7480000); - /* d %r4,<d(K)>(%r13) */ - EMIT4_DISP(0x5d40d000, EMIT_CONST(K)); + /* dl %r4,<d(K)>(%r13) */ + EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K)); /* lr %r5,%r4 */ EMIT2(0x1854); break; diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index 231cecafc2f1..a32c96761eab 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c @@ -26,9 +26,6 @@ #define MAX_NUM_SDB 511 #define MIN_NUM_SDB 1 -#define ALERT_REQ_MASK 0x4000000000000000ul -#define BUFFER_FULL_MASK 0x8000000000000000ul - DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); struct hws_execute_parms { @@ -44,6 +41,7 @@ static DEFINE_MUTEX(hws_sem_oom); static unsigned char hws_flush_all; static unsigned int hws_oom; +static unsigned int hws_alert; static struct workqueue_struct *hws_wq; static unsigned int hws_state; @@ -65,43 +63,6 @@ static unsigned long interval; static unsigned long min_sampler_rate; static unsigned long max_sampler_rate; -static int ssctl(void *buffer) -{ - int cc; - - /* set in order to detect a program check */ - cc = 1; - - asm volatile( - "0: .insn s,0xB2870000,0(%1)\n" - "1: ipm %0\n" - " srl %0,28\n" - "2:\n" - EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) - : "+d" (cc), "+a" (buffer) - : "m" (*((struct hws_ssctl_request_block *)buffer)) - : "cc", "memory"); - - return cc ? -EINVAL : 0 ; -} - -static int qsi(void *buffer) -{ - int cc; - cc = 1; - - asm volatile( - "0: .insn s,0xB2860000,0(%1)\n" - "1: lhi %0,0\n" - "2:\n" - EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) - : "=d" (cc), "+a" (buffer) - : "m" (*((struct hws_qsi_info_block *)buffer)) - : "cc", "memory"); - - return cc ? -EINVAL : 0; -} - static void execute_qsi(void *parms) { struct hws_execute_parms *ep = parms; @@ -113,7 +74,7 @@ static void execute_ssctl(void *parms) { struct hws_execute_parms *ep = parms; - ep->rc = ssctl(ep->buffer); + ep->rc = lsctl(ep->buffer); } static int smp_ctl_ssctl_stop(int cpu) @@ -214,17 +175,6 @@ static int smp_ctl_qsi(int cpu) return ep.rc; } -static inline unsigned long *trailer_entry_ptr(unsigned long v) -{ - void *ret; - - ret = (void *)v; - ret += PAGE_SIZE; - ret -= sizeof(struct hws_trailer_entry); - - return (unsigned long *) ret; -} - static void hws_ext_handler(struct ext_code ext_code, unsigned int param32, unsigned long param64) { @@ -233,6 +183,9 @@ static void hws_ext_handler(struct ext_code ext_code, if (!(param32 & CPU_MF_INT_SF_MASK)) return; + if (!hws_alert) + return; + inc_irq_stat(IRQEXT_CMS); atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32); @@ -256,16 +209,6 @@ static void init_all_cpu_buffers(void) } } -static int is_link_entry(unsigned long *s) -{ - return *s & 0x1ul ? 1 : 0; -} - -static unsigned long *get_next_sdbt(unsigned long *s) -{ - return (unsigned long *) (*s & ~0x1ul); -} - static int prepare_cpu_buffers(void) { int cpu; @@ -353,7 +296,7 @@ static int allocate_sdbt(int cpu) } *sdbt = sdb; trailer = trailer_entry_ptr(*sdbt); - *trailer = ALERT_REQ_MASK; + *trailer = SDB_TE_ALERT_REQ_MASK; sdbt++; mutex_unlock(&hws_sem_oom); } @@ -829,7 +772,7 @@ static void worker_on_interrupt(unsigned int cpu) trailer = trailer_entry_ptr(*sdbt); /* leave loop if no more work to do */ - if (!(*trailer & BUFFER_FULL_MASK)) { + if (!(*trailer & SDB_TE_BUFFER_FULL_MASK)) { done = 1; if (!hws_flush_all) continue; @@ -856,7 +799,7 @@ static void worker_on_interrupt(unsigned int cpu) static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, unsigned long *dear) { - struct hws_data_entry *sample_data_ptr; + struct hws_basic_entry *sample_data_ptr; unsigned long *trailer; trailer = trailer_entry_ptr(*sdbt); @@ -866,7 +809,7 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, trailer = dear; } - sample_data_ptr = (struct hws_data_entry *)(*sdbt); + sample_data_ptr = (struct hws_basic_entry *)(*sdbt); while ((unsigned long *)sample_data_ptr < trailer) { struct pt_regs *regs = NULL; @@ -1002,6 +945,7 @@ int hwsampler_deallocate(void) goto deallocate_exit; irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); + hws_alert = 0; deallocate_sdbt(); hws_state = HWS_DEALLOCATED; @@ -1116,6 +1060,7 @@ int hwsampler_shutdown(void) if (hws_state == HWS_STOPPED) { irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); + hws_alert = 0; deallocate_sdbt(); } if (hws_wq) { @@ -1190,6 +1135,7 @@ start_all_exit: hws_oom = 1; hws_flush_all = 0; /* now let them in, 1407 CPUMF external interrupts */ + hws_alert = 1; irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); return 0; diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h index 0022e1ebfbde..a483d06f2fa7 100644 --- a/arch/s390/oprofile/hwsampler.h +++ b/arch/s390/oprofile/hwsampler.h @@ -9,27 +9,7 @@ #define HWSAMPLER_H_ #include <linux/workqueue.h> - -struct hws_qsi_info_block /* QUERY SAMPLING information block */ -{ /* Bit(s) */ - unsigned int b0_13:14; /* 0-13: zeros */ - unsigned int as:1; /* 14: sampling authorisation control*/ - unsigned int b15_21:7; /* 15-21: zeros */ - unsigned int es:1; /* 22: sampling enable control */ - unsigned int b23_29:7; /* 23-29: zeros */ - unsigned int cs:1; /* 30: sampling activation control */ - unsigned int:1; /* 31: reserved */ - unsigned int bsdes:16; /* 4-5: size of sampling entry */ - unsigned int:16; /* 6-7: reserved */ - unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */ - unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/ - unsigned long tear; /* 24-31: TEAR contents */ - unsigned long dear; /* 32-39: DEAR contents */ - unsigned int rsvrd0; /* 40-43: reserved */ - unsigned int cpu_speed; /* 44-47: CPU speed */ - unsigned long long rsvrd1; /* 48-55: reserved */ - unsigned long long rsvrd2; /* 56-63: reserved */ -}; +#include <asm/cpu_mf.h> struct hws_ssctl_request_block /* SET SAMPLING CONTROLS req block */ { /* bytes 0 - 7 Bit(s) */ @@ -68,36 +48,6 @@ struct hws_cpu_buffer { unsigned int stop_mode:1; }; -struct hws_data_entry { - unsigned int def:16; /* 0-15 Data Entry Format */ - unsigned int R:4; /* 16-19 reserved */ - unsigned int U:4; /* 20-23 Number of unique instruct. */ - unsigned int z:2; /* zeros */ - unsigned int T:1; /* 26 PSW DAT mode */ - unsigned int W:1; /* 27 PSW wait state */ - unsigned int P:1; /* 28 PSW Problem state */ - unsigned int AS:2; /* 29-30 PSW address-space control */ - unsigned int I:1; /* 31 entry valid or invalid */ - unsigned int:16; - unsigned int prim_asn:16; /* primary ASN */ - unsigned long long ia; /* Instruction Address */ - unsigned long long gpp; /* Guest Program Parameter */ - unsigned long long hpp; /* Host Program Parameter */ -}; - -struct hws_trailer_entry { - unsigned int f:1; /* 0 - Block Full Indicator */ - unsigned int a:1; /* 1 - Alert request control */ - unsigned long:62; /* 2 - 63: Reserved */ - unsigned long overflow; /* 64 - sample Overflow count */ - unsigned long timestamp; /* 16 - time-stamp */ - unsigned long timestamp1; /* */ - unsigned long reserved1; /* 32 -Reserved */ - unsigned long reserved2; /* */ - unsigned long progusage1; /* 48 - reserved for programming use */ - unsigned long progusage2; /* */ -}; - int hwsampler_setup(void); int hwsampler_shutdown(void); int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 04e1b6a85362..9ffe645d5989 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -10,6 +10,7 @@ */ #include <linux/oprofile.h> +#include <linux/perf_event.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/fs.h> @@ -67,6 +68,21 @@ module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0); MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling" "(report cpu_type \"timer\""); +static int __oprofile_hwsampler_start(void) +{ + int retval; + + retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks); + if (retval) + return retval; + + retval = hwsampler_start_all(oprofile_hw_interval); + if (retval) + hwsampler_deallocate(); + + return retval; +} + static int oprofile_hwsampler_start(void) { int retval; @@ -76,13 +92,13 @@ static int oprofile_hwsampler_start(void) if (!hwsampler_running) return timer_ops.start(); - retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks); + retval = perf_reserve_sampling(); if (retval) return retval; - retval = hwsampler_start_all(oprofile_hw_interval); + retval = __oprofile_hwsampler_start(); if (retval) - hwsampler_deallocate(); + perf_release_sampling(); return retval; } @@ -96,6 +112,7 @@ static void oprofile_hwsampler_stop(void) hwsampler_stop_all(); hwsampler_deallocate(); + perf_release_sampling(); return; } diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index bf7c73d71eef..0820362c7b0f 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -919,17 +919,23 @@ static void zpci_mem_exit(void) kmem_cache_destroy(zdev_fmb_cache); } -static unsigned int s390_pci_probe; +static unsigned int s390_pci_probe = 1; +static unsigned int s390_pci_initialized; char * __init pcibios_setup(char *str) { - if (!strcmp(str, "on")) { - s390_pci_probe = 1; + if (!strcmp(str, "off")) { + s390_pci_probe = 0; return NULL; } return str; } +bool zpci_is_enabled(void) +{ + return s390_pci_initialized; +} + static int __init pci_base_init(void) { int rc; @@ -961,6 +967,7 @@ static int __init pci_base_init(void) if (rc) goto out_find; + s390_pci_initialized = 1; return 0; out_find: @@ -978,5 +985,6 @@ subsys_initcall_sync(pci_base_init); void zpci_rescan(void) { - clp_rescan_pci_devices_simple(); + if (zpci_is_enabled()) + clp_rescan_pci_devices_simple(); } diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 9b83d080902d..60c11a629d96 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -285,7 +285,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, flags |= ZPCI_TABLE_PROTECTED; if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { - atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages); + atomic64_add(nr_pages, &zdev->fmb->mapped_pages); return dma_addr + (offset & ~PAGE_MASK); } @@ -313,7 +313,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, zpci_err_hex(&dma_addr, sizeof(dma_addr)); } - atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages); + atomic64_add(npages, &zdev->fmb->unmapped_pages); iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; dma_free_iommu(zdev, iommu_page_index, npages); } @@ -332,7 +332,6 @@ static void *s390_dma_alloc(struct device *dev, size_t size, if (!page) return NULL; - atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages); pa = page_to_phys(page); memset((void *) pa, 0, size); @@ -343,6 +342,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size, return NULL; } + atomic64_add(size / PAGE_SIZE, &zdev->fmb->allocated_pages); if (dma_handle) *dma_handle = map; return (void *) pa; @@ -352,8 +352,11 @@ static void s390_dma_free(struct device *dev, size_t size, void *pa, dma_addr_t dma_handle, struct dma_attrs *attrs) { - s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size), - DMA_BIDIRECTIONAL, NULL); + struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + + size = PAGE_ALIGN(size); + atomic64_sub(size / PAGE_SIZE, &zdev->fmb->allocated_pages); + s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); free_pages((unsigned long) pa, get_order(size)); } diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 069607209a30..01e251b1da0c 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -43,9 +43,8 @@ struct zpci_ccdf_avail { u16 pec; /* PCI event code */ } __packed; -void zpci_event_error(void *data) +static void __zpci_event_error(struct zpci_ccdf_err *ccdf) { - struct zpci_ccdf_err *ccdf = data; struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); zpci_err("error CCDF:\n"); @@ -58,9 +57,14 @@ void zpci_event_error(void *data) pci_name(zdev->pdev), ccdf->pec, ccdf->fid); } -void zpci_event_availability(void *data) +void zpci_event_error(void *data) +{ + if (zpci_is_enabled()) + __zpci_event_error(data); +} + +static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) { - struct zpci_ccdf_avail *ccdf = data; struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); struct pci_dev *pdev = zdev ? zdev->pdev : NULL; int ret; @@ -99,8 +103,12 @@ void zpci_event_availability(void *data) break; case 0x0304: /* Configured -> Standby */ - if (pdev) + if (pdev) { + /* Give the driver a hint that the function is + * already unusable. */ + pdev->error_state = pci_channel_io_perm_failure; pci_stop_and_remove_bus_device(pdev); + } zdev->fh = ccdf->fh; zpci_disable_device(zdev); @@ -110,6 +118,8 @@ void zpci_event_availability(void *data) clp_rescan_pci_devices(); break; case 0x0308: /* Standby -> Reserved */ + if (!zdev) + break; pci_stop_root_bus(zdev->bus); pci_remove_root_bus(zdev->bus); break; @@ -117,3 +127,9 @@ void zpci_event_availability(void *data) break; } } + +void zpci_event_availability(void *data) +{ + if (zpci_is_enabled()) + __zpci_event_availability(data); +} diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index f3414ade77a3..fe7471eb0167 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -1,6 +1,7 @@ header-y += +generic-y += barrier.h generic-y += clkdev.h generic-y += trace_clock.h generic-y += xor.h diff --git a/arch/score/include/asm/barrier.h b/arch/score/include/asm/barrier.h deleted file mode 100644 index 0eacb6471e6d..000000000000 --- a/arch/score/include/asm/barrier.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _ASM_SCORE_BARRIER_H -#define _ASM_SCORE_BARRIER_H - -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() - -#define read_barrier_depends() do {} while (0) -#define smp_read_barrier_depends() do {} while (0) - -#define set_mb(var, value) do {var = value; wmb(); } while (0) - -#endif /* _ASM_SCORE_BARRIER_H */ diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 9b0979f4df7a..ce298317a73e 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -66,6 +66,7 @@ config SUPERH32 select PERF_EVENTS select ARCH_HIBERNATION_POSSIBLE if MMU select SPARSE_IRQ + select HAVE_CC_STACKPROTECTOR config SUPERH64 def_bool ARCH = "sh64" @@ -695,20 +696,6 @@ config SECCOMP If unsure, say N. -config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" - depends on SUPERH32 - help - This option turns on the -fstack-protector GCC feature. This - feature puts, at the beginning of functions, a canary value on - the stack just before the return address, and validates - the value just before actually returning. Stack based buffer - overflows (that need to overwrite this return address) now also - overwrite the canary, which gets detected and the attack is then - neutralized via a kernel panic. - - This feature requires gcc version 4.2 or above. - config SMP bool "Symmetric multi-processing support" depends on SYS_SUPPORTS_SMP diff --git a/arch/sh/Makefile b/arch/sh/Makefile index aed701c7b11b..d4d16e4be07c 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -199,10 +199,6 @@ ifeq ($(CONFIG_DWARF_UNWINDER),y) KBUILD_CFLAGS += -fasynchronous-unwind-tables endif -ifeq ($(CONFIG_CC_STACKPROTECTOR),y) - KBUILD_CFLAGS += -fstack-protector -endif - libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h index 72c103dae300..43715308b068 100644 --- a/arch/sh/include/asm/barrier.h +++ b/arch/sh/include/asm/barrier.h @@ -26,29 +26,14 @@ #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) #define mb() __asm__ __volatile__ ("synco": : :"memory") #define rmb() mb() -#define wmb() __asm__ __volatile__ ("synco": : :"memory") +#define wmb() mb() #define ctrl_barrier() __icbi(PAGE_OFFSET) -#define read_barrier_depends() do { } while(0) #else -#define mb() __asm__ __volatile__ ("": : :"memory") -#define rmb() mb() -#define wmb() __asm__ __volatile__ ("": : :"memory") #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") -#define read_barrier_depends() do { } while(0) -#endif - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) #endif #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) +#include <asm-generic/barrier.h> + #endif /* __ASM_SH_BARRIER_H */ diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h index c1b76654ee76..ae69eda288f4 100644 --- a/arch/sparc/include/asm/barrier_32.h +++ b/arch/sparc/include/asm/barrier_32.h @@ -1,15 +1,7 @@ #ifndef __SPARC_BARRIER_H #define __SPARC_BARRIER_H -/* XXX Change this if we ever use a PSO mode kernel. */ -#define mb() __asm__ __volatile__ ("" : : : "memory") -#define rmb() mb() -#define wmb() mb() -#define read_barrier_depends() do { } while(0) -#define set_mb(__var, __value) do { __var = __value; mb(); } while(0) -#define smp_mb() __asm__ __volatile__("":::"memory") -#define smp_rmb() __asm__ __volatile__("":::"memory") -#define smp_wmb() __asm__ __volatile__("":::"memory") -#define smp_read_barrier_depends() do { } while(0) +#include <asm/processor.h> /* for nop() */ +#include <asm-generic/barrier.h> #endif /* !(__SPARC_BARRIER_H) */ diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 95d45986f908..b5aad964558e 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h @@ -53,4 +53,19 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ #define smp_read_barrier_depends() do { } while(0) +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + #endif /* !(__SPARC64_BARRIER_H) */ diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index 218b6b23c378..01fe9946d388 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -497,9 +497,20 @@ void bpf_jit_compile(struct sk_filter *fp) case BPF_S_ALU_MUL_K: /* A *= K */ emit_alu_K(MUL, K); break; - case BPF_S_ALU_DIV_K: /* A /= K */ - emit_alu_K(MUL, K); - emit_read_y(r_A); + case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/ + if (K == 1) + break; + emit_write_y(G0); +#ifdef CONFIG_SPARC32 + /* The Sparc v8 architecture requires + * three instructions between a %y + * register write and the first use. + */ + emit_nop(); + emit_nop(); + emit_nop(); +#endif + emit_alu_K(DIV, K); break; case BPF_S_ALU_DIV_X: /* A /= X; */ emit_cmpi(r_X, 0); diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h index a9a73da5865d..b5a05d050a8f 100644 --- a/arch/tile/include/asm/barrier.h +++ b/arch/tile/include/asm/barrier.h @@ -22,59 +22,6 @@ #include <arch/spr_def.h> #include <asm/timex.h> -/* - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - */ -#define read_barrier_depends() do { } while (0) - #define __sync() __insn_mf() #include <hv/syscall_public.h> @@ -125,20 +72,7 @@ mb_incoherent(void) #define mb() fast_mb() #define iob() fast_iob() -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) -#endif - -#define set_mb(var, value) \ - do { var = value; mb(); } while (0) +#include <asm-generic/barrier.h> #endif /* !__ASSEMBLY__ */ #endif /* _ASM_TILE_BARRIER_H */ diff --git a/arch/unicore32/include/asm/barrier.h b/arch/unicore32/include/asm/barrier.h index a6620e5336b6..83d6a520f4bd 100644 --- a/arch/unicore32/include/asm/barrier.h +++ b/arch/unicore32/include/asm/barrier.h @@ -14,15 +14,6 @@ #define dsb() __asm__ __volatile__ ("" : : : "memory") #define dmb() __asm__ __volatile__ ("" : : : "memory") -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - -#define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#include <asm-generic/barrier.h> #endif /* __UNICORE_BARRIER_H__ */ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0952ecd60eca..838e7c34dd60 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -125,6 +125,7 @@ config X86 select RTC_LIB select HAVE_DEBUG_STACKOVERFLOW select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64 + select HAVE_CC_STACKPROTECTOR config INSTRUCTION_DECODER def_bool y @@ -1617,22 +1618,6 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. -config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection" - ---help--- - This option turns on the -fstack-protector GCC feature. This - feature puts, at the beginning of functions, a canary value on - the stack just before the return address, and validates - the value just before actually returning. Stack based buffer - overflows (that need to overwrite this return address) now also - overwrite the canary, which gets detected and the attack is then - neutralized via a kernel panic. - - This feature requires gcc version 4.2 or above, or a distribution - gcc with the feature backported. Older versions are automatically - detected and for those versions, this configuration option is - ignored. (and a warning is printed during bootup) - source kernel/Kconfig.hz config KEXEC diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 57d021507120..13b22e0f681d 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -89,13 +89,11 @@ else KBUILD_CFLAGS += -maccumulate-outgoing-args endif +# Make sure compiler does not have buggy stack-protector support. ifdef CONFIG_CC_STACKPROTECTOR cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh - ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) - stackp-y := -fstack-protector - KBUILD_CFLAGS += $(stackp-y) - else - $(warning stack protector enabled but no compiler support) + ifneq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) + $(warning stack-protector enabled but compiler support broken) endif endif diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index c6cd358a1eec..04a48903b2eb 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -92,12 +92,53 @@ #endif #define smp_read_barrier_depends() read_barrier_depends() #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) -#else +#else /* !SMP */ #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define smp_read_barrier_depends() do { } while (0) #define set_mb(var, value) do { var = value; barrier(); } while (0) +#endif /* SMP */ + +#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) + +/* + * For either of these options x86 doesn't have a strong TSO memory + * model and we should fall back to full barriers. + */ + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + +#else /* regular x86 TSO memory ordering */ + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + #endif /* diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index c49a613c6452..cea1c76d49bf 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -293,12 +293,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk) /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is pending. Clear the x87 state here by setting it to fixed values. "m" is a random variable that should be in L1 */ - alternative_input( - ASM_NOP8 ASM_NOP2, - "emms\n\t" /* clear stack tags */ - "fildl %P[addr]", /* set F?P to defined value */ - X86_FEATURE_FXSAVE_LEAK, - [addr] "m" (tsk->thread.fpu.has_fpu)); + if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) { + asm volatile( + "fnclex\n\t" + "emms\n\t" + "fildl %P[addr]" /* set F?P to defined value */ + : : [addr] "m" (tsk->thread.fpu.has_fpu)); + } return fpu_restore_checking(&tsk->thread.fpu); } diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index cba45d99ac1a..67d69b8e2d20 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -191,6 +191,9 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); #define trace_interrupt interrupt #endif +#define VECTOR_UNDEFINED -1 +#define VECTOR_RETRIGGERED -2 + typedef int vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); extern void setup_vector_irq(int cpu); diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 2f366d0ac6b4..1da25a5f96f9 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_MWAIT_H #define _ASM_X86_MWAIT_H +#include <linux/sched.h> + #define MWAIT_SUBSTATE_MASK 0xf #define MWAIT_CSTATE_MASK 0xf #define MWAIT_SUBSTATE_SIZE 4 @@ -13,4 +15,45 @@ #define MWAIT_ECX_INTERRUPT_BREAK 0x1 +static inline void __monitor(const void *eax, unsigned long ecx, + unsigned long edx) +{ + /* "monitor %eax, %ecx, %edx;" */ + asm volatile(".byte 0x0f, 0x01, 0xc8;" + :: "a" (eax), "c" (ecx), "d"(edx)); +} + +static inline void __mwait(unsigned long eax, unsigned long ecx) +{ + /* "mwait %eax, %ecx;" */ + asm volatile(".byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); +} + +/* + * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, + * which can obviate IPI to trigger checking of need_resched. + * We execute MONITOR against need_resched and enter optimized wait state + * through MWAIT. Whenever someone changes need_resched, we would be woken + * up from MWAIT (without an IPI). + * + * New with Core Duo processors, MWAIT can take some hints based on CPU + * capability. + */ +static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) +{ + if (!current_set_polling_and_test()) { + if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) { + mb(); + clflush((void *)¤t_thread_info()->flags); + mb(); + } + + __monitor((void *)¤t_thread_info()->flags, 0, 0); + if (!need_resched()) + __mwait(eax, ecx); + } + current_clr_polling(); +} + #endif /* _ASM_X86_MWAIT_H */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 7b034a4057f9..24821f5768bc 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -700,29 +700,6 @@ static inline void sync_core(void) #endif } -static inline void __monitor(const void *eax, unsigned long ecx, - unsigned long edx) -{ - /* "monitor %eax, %ecx, %edx;" */ - asm volatile(".byte 0x0f, 0x01, 0xc8;" - :: "a" (eax), "c" (ecx), "d"(edx)); -} - -static inline void __mwait(unsigned long eax, unsigned long ecx) -{ - /* "mwait %eax, %ecx;" */ - asm volatile(".byte 0x0f, 0x01, 0xc9;" - :: "a" (eax), "c" (ecx)); -} - -static inline void __sti_mwait(unsigned long eax, unsigned long ecx) -{ - trace_hardirqs_on(); - /* "mwait %eax, %ecx;" */ - asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" - :: "a" (eax), "c" (ecx)); -} - extern void select_idle_routine(const struct cpuinfo_x86 *c); extern void init_amd_e400_c1e_mask(void); diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index 34baa0eb5d0c..3de54ef0aea5 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h @@ -4,6 +4,7 @@ #include <linux/pm.h> #include <linux/percpu.h> #include <linux/interrupt.h> +#include <linux/math64.h> #define TICK_SIZE (tick_nsec / 1000) @@ -12,68 +13,26 @@ extern int recalibrate_cpu_khz(void); extern int no_timer_check; -/* Accelerators for sched_clock() - * convert from cycles(64bits) => nanoseconds (64bits) - * basic equation: - * ns = cycles / (freq / ns_per_sec) - * ns = cycles * (ns_per_sec / freq) - * ns = cycles * (10^9 / (cpu_khz * 10^3)) - * ns = cycles * (10^6 / cpu_khz) +/* + * We use the full linear equation: f(x) = a + b*x, in order to allow + * a continuous function in the face of dynamic freq changes. * - * Then we use scaling math (suggested by george@mvista.com) to get: - * ns = cycles * (10^6 * SC / cpu_khz) / SC - * ns = cycles * cyc2ns_scale / SC + * Continuity means that when our frequency changes our slope (b); we want to + * ensure that: f(t) == f'(t), which gives: a + b*t == a' + b'*t. * - * And since SC is a constant power of two, we can convert the div - * into a shift. + * Without an offset (a) the above would not be possible. * - * We can use khz divisor instead of mhz to keep a better precision, since - * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. - * (mathieu.desnoyers@polymtl.ca) - * - * -johnstul@us.ibm.com "math is hard, lets go shopping!" - * - * In: - * - * ns = cycles * cyc2ns_scale / SC - * - * Although we may still have enough bits to store the value of ns, - * in some cases, we may not have enough bits to store cycles * cyc2ns_scale, - * leading to an incorrect result. - * - * To avoid this, we can decompose 'cycles' into quotient and remainder - * of division by SC. Then, - * - * ns = (quot * SC + rem) * cyc2ns_scale / SC - * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC - * - * - sqazi@google.com + * See the comment near cycles_2_ns() for details on how we compute (b). */ - -DECLARE_PER_CPU(unsigned long, cyc2ns); -DECLARE_PER_CPU(unsigned long long, cyc2ns_offset); - -#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ - -static inline unsigned long long __cycles_2_ns(unsigned long long cyc) -{ - int cpu = smp_processor_id(); - unsigned long long ns = per_cpu(cyc2ns_offset, cpu); - ns += mult_frac(cyc, per_cpu(cyc2ns, cpu), - (1UL << CYC2NS_SCALE_FACTOR)); - return ns; -} - -static inline unsigned long long cycles_2_ns(unsigned long long cyc) -{ - unsigned long long ns; - unsigned long flags; - - local_irq_save(flags); - ns = __cycles_2_ns(cyc); - local_irq_restore(flags); - - return ns; -} +struct cyc2ns_data { + u32 cyc2ns_mul; + u32 cyc2ns_shift; + u64 cyc2ns_offset; + u32 __count; + /* u32 hole */ +}; /* 24 bytes -- do not grow */ + +extern struct cyc2ns_data *cyc2ns_read_begin(void); +extern void cyc2ns_read_end(struct cyc2ns_data *); #endif /* _ASM_X86_TIMER_H */ diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index d2b7f27781bc..e69182fd01cf 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); -/* - * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, - * which can obviate IPI to trigger checking of need_resched. - * We execute MONITOR against need_resched and enter optimized wait state - * through MWAIT. Whenever someone changes need_resched, we would be woken - * up from MWAIT (without an IPI). - * - * New with Core Duo processors, MWAIT can take some hints based on CPU - * capability. - */ -void mwait_idle_with_hints(unsigned long ax, unsigned long cx) -{ - if (!need_resched()) { - if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) - clflush((void *)¤t_thread_info()->flags); - - __monitor((void *)¤t_thread_info()->flags, 0, 0); - smp_mb(); - if (!need_resched()) - __mwait(ax, cx); - } -} - void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) { unsigned int cpu = smp_processor_id(); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index e63a5bd2a78f..a43f068ebec1 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1142,9 +1142,10 @@ next: if (test_bit(vector, used_vectors)) goto next; - for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) - if (per_cpu(vector_irq, new_cpu)[vector] != -1) + for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) { + if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED) goto next; + } /* Found one! */ current_vector = vector; current_offset = offset; @@ -1183,7 +1184,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) vector = cfg->vector; for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) - per_cpu(vector_irq, cpu)[vector] = -1; + per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; cfg->vector = 0; cpumask_clear(cfg->domain); @@ -1191,11 +1192,10 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) if (likely(!cfg->move_in_progress)) return; for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { - for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; - vector++) { + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { if (per_cpu(vector_irq, cpu)[vector] != irq) continue; - per_cpu(vector_irq, cpu)[vector] = -1; + per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; break; } } @@ -1228,12 +1228,12 @@ void __setup_vector_irq(int cpu) /* Mark the free vectors */ for (vector = 0; vector < NR_VECTORS; ++vector) { irq = per_cpu(vector_irq, cpu)[vector]; - if (irq < 0) + if (irq <= VECTOR_UNDEFINED) continue; cfg = irq_cfg(irq); if (!cpumask_test_cpu(cpu, cfg->domain)) - per_cpu(vector_irq, cpu)[vector] = -1; + per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; } raw_spin_unlock(&vector_lock); } @@ -2202,13 +2202,13 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) me = smp_processor_id(); for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { - unsigned int irq; + int irq; unsigned int irr; struct irq_desc *desc; struct irq_cfg *cfg; irq = __this_cpu_read(vector_irq[vector]); - if (irq == -1) + if (irq <= VECTOR_UNDEFINED) continue; desc = irq_to_desc(irq); diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 47b56a7e99cb..6359506a19ee 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -36,7 +36,7 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o endif obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o -obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o +obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_rapl.o endif diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index bca023bdd6b2..8bc79cddd9a2 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -487,7 +487,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); if (!check_tsc_unstable()) - sched_clock_stable = 1; + set_sched_clock_stable(); } #ifdef CONFIG_X86_64 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index ea04b342c026..1a439c047ff3 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -93,7 +93,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); if (!check_tsc_unstable()) - sched_clock_stable = 1; + set_sched_clock_stable(); } /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 8e132931614d..b88645191fe5 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1883,21 +1883,27 @@ static struct pmu pmu = { void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) { + struct cyc2ns_data *data; + userpg->cap_user_time = 0; userpg->cap_user_time_zero = 0; userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; userpg->pmc_width = x86_pmu.cntval_bits; - if (!sched_clock_stable) + if (!sched_clock_stable()) return; + data = cyc2ns_read_begin(); + userpg->cap_user_time = 1; - userpg->time_mult = this_cpu_read(cyc2ns); - userpg->time_shift = CYC2NS_SCALE_FACTOR; - userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; + userpg->time_mult = data->cyc2ns_mul; + userpg->time_shift = data->cyc2ns_shift; + userpg->time_offset = data->cyc2ns_offset - now; userpg->cap_user_time_zero = 1; - userpg->time_zero = this_cpu_read(cyc2ns_offset); + userpg->time_zero = data->cyc2ns_offset; + + cyc2ns_read_end(data); } /* diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index e09f0bfb7b8f..4b8e4d3cd6ea 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/ptrace.h> +#include <linux/syscore_ops.h> #include <asm/apic.h> @@ -816,6 +817,18 @@ out: return ret; } +static void ibs_eilvt_setup(void) +{ + /* + * Force LVT offset assignment for family 10h: The offsets are + * not assigned by the BIOS for this family, so the OS is + * responsible for doing it. If the OS assignment fails, fall + * back to BIOS settings and try to setup this. + */ + if (boot_cpu_data.x86 == 0x10) + force_ibs_eilvt_setup(); +} + static inline int get_ibs_lvt_offset(void) { u64 val; @@ -851,6 +864,36 @@ static void clear_APIC_ibs(void *dummy) setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); } +#ifdef CONFIG_PM + +static int perf_ibs_suspend(void) +{ + clear_APIC_ibs(NULL); + return 0; +} + +static void perf_ibs_resume(void) +{ + ibs_eilvt_setup(); + setup_APIC_ibs(NULL); +} + +static struct syscore_ops perf_ibs_syscore_ops = { + .resume = perf_ibs_resume, + .suspend = perf_ibs_suspend, +}; + +static void perf_ibs_pm_init(void) +{ + register_syscore_ops(&perf_ibs_syscore_ops); +} + +#else + +static inline void perf_ibs_pm_init(void) { } + +#endif + static int perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { @@ -877,18 +920,12 @@ static __init int amd_ibs_init(void) if (!caps) return -ENODEV; /* ibs not supported by the cpu */ - /* - * Force LVT offset assignment for family 10h: The offsets are - * not assigned by the BIOS for this family, so the OS is - * responsible for doing it. If the OS assignment fails, fall - * back to BIOS settings and try to setup this. - */ - if (boot_cpu_data.x86 == 0x10) - force_ibs_eilvt_setup(); + ibs_eilvt_setup(); if (!ibs_eilvt_valid()) goto out; + perf_ibs_pm_init(); get_online_cpus(); ibs_caps = caps; /* make ibs_caps visible to other cpus: */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c new file mode 100644 index 000000000000..5ad35ad94d0f --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c @@ -0,0 +1,679 @@ +/* + * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters + * Copyright (C) 2013 Google, Inc., Stephane Eranian + * + * Intel RAPL interface is specified in the IA-32 Manual Vol3b + * section 14.7.1 (September 2013) + * + * RAPL provides more controls than just reporting energy consumption + * however here we only expose the 3 energy consumption free running + * counters (pp0, pkg, dram). + * + * Each of those counters increments in a power unit defined by the + * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules + * but it can vary. + * + * Counter to rapl events mappings: + * + * pp0 counter: consumption of all physical cores (power plane 0) + * event: rapl_energy_cores + * perf code: 0x1 + * + * pkg counter: consumption of the whole processor package + * event: rapl_energy_pkg + * perf code: 0x2 + * + * dram counter: consumption of the dram domain (servers only) + * event: rapl_energy_dram + * perf code: 0x3 + * + * dram counter: consumption of the builtin-gpu domain (client only) + * event: rapl_energy_gpu + * perf code: 0x4 + * + * We manage those counters as free running (read-only). They may be + * use simultaneously by other tools, such as turbostat. + * + * The events only support system-wide mode counting. There is no + * sampling support because it does not make sense and is not + * supported by the RAPL hardware. + * + * Because we want to avoid floating-point operations in the kernel, + * the events are all reported in fixed point arithmetic (32.32). + * Tools must adjust the counts to convert them to Watts using + * the duration of the measurement. Tools may use a function such as + * ldexp(raw_count, -32); + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/perf_event.h> +#include <asm/cpu_device_id.h> +#include "perf_event.h" + +/* + * RAPL energy status counters + */ +#define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */ +#define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */ +#define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */ +#define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */ +#define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */ +#define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */ +#define RAPL_IDX_PP1_NRG_STAT 3 /* DRAM */ +#define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */ + +/* Clients have PP0, PKG */ +#define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\ + 1<<RAPL_IDX_PKG_NRG_STAT|\ + 1<<RAPL_IDX_PP1_NRG_STAT) + +/* Servers have PP0, PKG, RAM */ +#define RAPL_IDX_SRV (1<<RAPL_IDX_PP0_NRG_STAT|\ + 1<<RAPL_IDX_PKG_NRG_STAT|\ + 1<<RAPL_IDX_RAM_NRG_STAT) + +/* + * event code: LSB 8 bits, passed in attr->config + * any other bit is reserved + */ +#define RAPL_EVENT_MASK 0xFFULL + +#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \ +static ssize_t __rapl_##_var##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ +static struct kobj_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __rapl_##_var##_show, NULL) + +#define RAPL_EVENT_DESC(_name, _config) \ +{ \ + .attr = __ATTR(_name, 0444, rapl_event_show, NULL), \ + .config = _config, \ +} + +#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */ + +struct rapl_pmu { + spinlock_t lock; + int hw_unit; /* 1/2^hw_unit Joule */ + int n_active; /* number of active events */ + struct list_head active_list; + struct pmu *pmu; /* pointer to rapl_pmu_class */ + ktime_t timer_interval; /* in ktime_t unit */ + struct hrtimer hrtimer; +}; + +static struct pmu rapl_pmu_class; +static cpumask_t rapl_cpu_mask; +static int rapl_cntr_mask; + +static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu); +static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu_to_free); + +static inline u64 rapl_read_counter(struct perf_event *event) +{ + u64 raw; + rdmsrl(event->hw.event_base, raw); + return raw; +} + +static inline u64 rapl_scale(u64 v) +{ + /* + * scale delta to smallest unit (1/2^32) + * users must then scale back: count * 1/(1e9*2^32) to get Joules + * or use ldexp(count, -32). + * Watts = Joules/Time delta + */ + return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit); +} + +static u64 rapl_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 prev_raw_count, new_raw_count; + s64 delta, sdelta; + int shift = RAPL_CNTR_WIDTH; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + rdmsrl(event->hw.event_base, new_raw_count); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) { + cpu_relax(); + goto again; + } + + /* + * Now we have the new raw value and have updated the prev + * timestamp already. We can now calculate the elapsed delta + * (event-)time and add that to the generic event. + * + * Careful, not all hw sign-extends above the physical width + * of the count. + */ + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + sdelta = rapl_scale(delta); + + local64_add(sdelta, &event->count); + + return new_raw_count; +} + +static void rapl_start_hrtimer(struct rapl_pmu *pmu) +{ + __hrtimer_start_range_ns(&pmu->hrtimer, + pmu->timer_interval, 0, + HRTIMER_MODE_REL_PINNED, 0); +} + +static void rapl_stop_hrtimer(struct rapl_pmu *pmu) +{ + hrtimer_cancel(&pmu->hrtimer); +} + +static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct perf_event *event; + unsigned long flags; + + if (!pmu->n_active) + return HRTIMER_NORESTART; + + spin_lock_irqsave(&pmu->lock, flags); + + list_for_each_entry(event, &pmu->active_list, active_entry) { + rapl_event_update(event); + } + + spin_unlock_irqrestore(&pmu->lock, flags); + + hrtimer_forward_now(hrtimer, pmu->timer_interval); + + return HRTIMER_RESTART; +} + +static void rapl_hrtimer_init(struct rapl_pmu *pmu) +{ + struct hrtimer *hr = &pmu->hrtimer; + + hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hr->function = rapl_hrtimer_handle; +} + +static void __rapl_pmu_event_start(struct rapl_pmu *pmu, + struct perf_event *event) +{ + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + event->hw.state = 0; + + list_add_tail(&event->active_entry, &pmu->active_list); + + local64_set(&event->hw.prev_count, rapl_read_counter(event)); + + pmu->n_active++; + if (pmu->n_active == 1) + rapl_start_hrtimer(pmu); +} + +static void rapl_pmu_event_start(struct perf_event *event, int mode) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + unsigned long flags; + + spin_lock_irqsave(&pmu->lock, flags); + __rapl_pmu_event_start(pmu, event); + spin_unlock_irqrestore(&pmu->lock, flags); +} + +static void rapl_pmu_event_stop(struct perf_event *event, int mode) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + + spin_lock_irqsave(&pmu->lock, flags); + + /* mark event as deactivated and stopped */ + if (!(hwc->state & PERF_HES_STOPPED)) { + WARN_ON_ONCE(pmu->n_active <= 0); + pmu->n_active--; + if (pmu->n_active == 0) + rapl_stop_hrtimer(pmu); + + list_del(&event->active_entry); + + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + } + + /* check if update of sw counter is necessary */ + if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + rapl_event_update(event); + hwc->state |= PERF_HES_UPTODATE; + } + + spin_unlock_irqrestore(&pmu->lock, flags); +} + +static int rapl_pmu_event_add(struct perf_event *event, int mode) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + + spin_lock_irqsave(&pmu->lock, flags); + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (mode & PERF_EF_START) + __rapl_pmu_event_start(pmu, event); + + spin_unlock_irqrestore(&pmu->lock, flags); + + return 0; +} + +static void rapl_pmu_event_del(struct perf_event *event, int flags) +{ + rapl_pmu_event_stop(event, PERF_EF_UPDATE); +} + +static int rapl_pmu_event_init(struct perf_event *event) +{ + u64 cfg = event->attr.config & RAPL_EVENT_MASK; + int bit, msr, ret = 0; + + /* only look at RAPL events */ + if (event->attr.type != rapl_pmu_class.type) + return -ENOENT; + + /* check only supported bits are set */ + if (event->attr.config & ~RAPL_EVENT_MASK) + return -EINVAL; + + /* + * check event is known (determines counter) + */ + switch (cfg) { + case INTEL_RAPL_PP0: + bit = RAPL_IDX_PP0_NRG_STAT; + msr = MSR_PP0_ENERGY_STATUS; + break; + case INTEL_RAPL_PKG: + bit = RAPL_IDX_PKG_NRG_STAT; + msr = MSR_PKG_ENERGY_STATUS; + break; + case INTEL_RAPL_RAM: + bit = RAPL_IDX_RAM_NRG_STAT; + msr = MSR_DRAM_ENERGY_STATUS; + break; + case INTEL_RAPL_PP1: + bit = RAPL_IDX_PP1_NRG_STAT; + msr = MSR_PP1_ENERGY_STATUS; + break; + default: + return -EINVAL; + } + /* check event supported */ + if (!(rapl_cntr_mask & (1 << bit))) + return -EINVAL; + + /* unsupported modes and filters */ + if (event->attr.exclude_user || + event->attr.exclude_kernel || + event->attr.exclude_hv || + event->attr.exclude_idle || + event->attr.exclude_host || + event->attr.exclude_guest || + event->attr.sample_period) /* no sampling */ + return -EINVAL; + + /* must be done before validate_group */ + event->hw.event_base = msr; + event->hw.config = cfg; + event->hw.idx = bit; + + return ret; +} + +static void rapl_pmu_event_read(struct perf_event *event) +{ + rapl_event_update(event); +} + +static ssize_t rapl_get_attr_cpumask(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &rapl_cpu_mask); + + buf[n++] = '\n'; + buf[n] = '\0'; + return n; +} + +static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL); + +static struct attribute *rapl_pmu_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group rapl_pmu_attr_group = { + .attrs = rapl_pmu_attrs, +}; + +EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); +EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02"); +EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03"); +EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04"); + +EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules"); +EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules"); +EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules"); +EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules"); + +/* + * we compute in 0.23 nJ increments regardless of MSR + */ +EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10"); +EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10"); +EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10"); +EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10"); + +static struct attribute *rapl_events_srv_attr[] = { + EVENT_PTR(rapl_cores), + EVENT_PTR(rapl_pkg), + EVENT_PTR(rapl_ram), + + EVENT_PTR(rapl_cores_unit), + EVENT_PTR(rapl_pkg_unit), + EVENT_PTR(rapl_ram_unit), + + EVENT_PTR(rapl_cores_scale), + EVENT_PTR(rapl_pkg_scale), + EVENT_PTR(rapl_ram_scale), + NULL, +}; + +static struct attribute *rapl_events_cln_attr[] = { + EVENT_PTR(rapl_cores), + EVENT_PTR(rapl_pkg), + EVENT_PTR(rapl_gpu), + + EVENT_PTR(rapl_cores_unit), + EVENT_PTR(rapl_pkg_unit), + EVENT_PTR(rapl_gpu_unit), + + EVENT_PTR(rapl_cores_scale), + EVENT_PTR(rapl_pkg_scale), + EVENT_PTR(rapl_gpu_scale), + NULL, +}; + +static struct attribute_group rapl_pmu_events_group = { + .name = "events", + .attrs = NULL, /* patched at runtime */ +}; + +DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7"); +static struct attribute *rapl_formats_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group rapl_pmu_format_group = { + .name = "format", + .attrs = rapl_formats_attr, +}; + +const struct attribute_group *rapl_attr_groups[] = { + &rapl_pmu_attr_group, + &rapl_pmu_format_group, + &rapl_pmu_events_group, + NULL, +}; + +static struct pmu rapl_pmu_class = { + .attr_groups = rapl_attr_groups, + .task_ctx_nr = perf_invalid_context, /* system-wide only */ + .event_init = rapl_pmu_event_init, + .add = rapl_pmu_event_add, /* must have */ + .del = rapl_pmu_event_del, /* must have */ + .start = rapl_pmu_event_start, + .stop = rapl_pmu_event_stop, + .read = rapl_pmu_event_read, +}; + +static void rapl_cpu_exit(int cpu) +{ + struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); + int i, phys_id = topology_physical_package_id(cpu); + int target = -1; + + /* find a new cpu on same package */ + for_each_online_cpu(i) { + if (i == cpu) + continue; + if (phys_id == topology_physical_package_id(i)) { + target = i; + break; + } + } + /* + * clear cpu from cpumask + * if was set in cpumask and still some cpu on package, + * then move to new cpu + */ + if (cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask) && target >= 0) + cpumask_set_cpu(target, &rapl_cpu_mask); + + WARN_ON(cpumask_empty(&rapl_cpu_mask)); + /* + * migrate events and context to new cpu + */ + if (target >= 0) + perf_pmu_migrate_context(pmu->pmu, cpu, target); + + /* cancel overflow polling timer for CPU */ + rapl_stop_hrtimer(pmu); +} + +static void rapl_cpu_init(int cpu) +{ + int i, phys_id = topology_physical_package_id(cpu); + + /* check if phys_is is already covered */ + for_each_cpu(i, &rapl_cpu_mask) { + if (phys_id == topology_physical_package_id(i)) + return; + } + /* was not found, so add it */ + cpumask_set_cpu(cpu, &rapl_cpu_mask); +} + +static int rapl_cpu_prepare(int cpu) +{ + struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); + int phys_id = topology_physical_package_id(cpu); + u64 ms; + + if (pmu) + return 0; + + if (phys_id < 0) + return -1; + + pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); + if (!pmu) + return -1; + + spin_lock_init(&pmu->lock); + + INIT_LIST_HEAD(&pmu->active_list); + + /* + * grab power unit as: 1/2^unit Joules + * + * we cache in local PMU instance + */ + rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit); + pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL; + pmu->pmu = &rapl_pmu_class; + + /* + * use reference of 200W for scaling the timeout + * to avoid missing counter overflows. + * 200W = 200 Joules/sec + * divide interval by 2 to avoid lockstep (2 * 100) + * if hw unit is 32, then we use 2 ms 1/200/2 + */ + if (pmu->hw_unit < 32) + ms = (1000 / (2 * 100)) * (1ULL << (32 - pmu->hw_unit - 1)); + else + ms = 2; + + pmu->timer_interval = ms_to_ktime(ms); + + rapl_hrtimer_init(pmu); + + /* set RAPL pmu for this cpu for now */ + per_cpu(rapl_pmu, cpu) = pmu; + per_cpu(rapl_pmu_to_free, cpu) = NULL; + + return 0; +} + +static void rapl_cpu_kfree(int cpu) +{ + struct rapl_pmu *pmu = per_cpu(rapl_pmu_to_free, cpu); + + kfree(pmu); + + per_cpu(rapl_pmu_to_free, cpu) = NULL; +} + +static int rapl_cpu_dying(int cpu) +{ + struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); + + if (!pmu) + return 0; + + per_cpu(rapl_pmu, cpu) = NULL; + + per_cpu(rapl_pmu_to_free, cpu) = pmu; + + return 0; +} + +static int rapl_cpu_notifier(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + rapl_cpu_prepare(cpu); + break; + case CPU_STARTING: + rapl_cpu_init(cpu); + break; + case CPU_UP_CANCELED: + case CPU_DYING: + rapl_cpu_dying(cpu); + break; + case CPU_ONLINE: + case CPU_DEAD: + rapl_cpu_kfree(cpu); + break; + case CPU_DOWN_PREPARE: + rapl_cpu_exit(cpu); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static const struct x86_cpu_id rapl_cpu_match[] = { + [0] = { .vendor = X86_VENDOR_INTEL, .family = 6 }, + [1] = {}, +}; + +static int __init rapl_pmu_init(void) +{ + struct rapl_pmu *pmu; + int cpu, ret; + + /* + * check for Intel processor family 6 + */ + if (!x86_match_cpu(rapl_cpu_match)) + return 0; + + /* check supported CPU */ + switch (boot_cpu_data.x86_model) { + case 42: /* Sandy Bridge */ + case 58: /* Ivy Bridge */ + case 60: /* Haswell */ + case 69: /* Haswell-Celeron */ + rapl_cntr_mask = RAPL_IDX_CLN; + rapl_pmu_events_group.attrs = rapl_events_cln_attr; + break; + case 45: /* Sandy Bridge-EP */ + case 62: /* IvyTown */ + rapl_cntr_mask = RAPL_IDX_SRV; + rapl_pmu_events_group.attrs = rapl_events_srv_attr; + break; + + default: + /* unsupported */ + return 0; + } + get_online_cpus(); + + for_each_online_cpu(cpu) { + rapl_cpu_prepare(cpu); + rapl_cpu_init(cpu); + } + + perf_cpu_notifier(rapl_cpu_notifier); + + ret = perf_pmu_register(&rapl_pmu_class, "power", -1); + if (WARN_ON(ret)) { + pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret); + put_online_cpus(); + return -1; + } + + pmu = __get_cpu_var(rapl_pmu); + + pr_info("RAPL PMU detected, hw unit 2^-%d Joules," + " API unit is 2^-32 Joules," + " %d fixed counters" + " %llu ms ovfl timer\n", + pmu->hw_unit, + hweight32(rapl_cntr_mask), + ktime_to_ms(pmu->timer_interval)); + + put_online_cpus(); + + return 0; +} +device_initcall(rapl_pmu_init); diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 51e2988c5728..a2a4f4697889 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1082,7 +1082,7 @@ ENTRY(ftrace_caller) pushl $0 /* Pass NULL as regs pointer */ movl 4*4(%esp), %eax movl 0x4(%ebp), %edx - leal function_trace_op, %ecx + movl function_trace_op, %ecx subl $MCOUNT_INSN_SIZE, %eax .globl ftrace_call @@ -1140,7 +1140,7 @@ ENTRY(ftrace_regs_caller) movl 12*4(%esp), %eax /* Load ip (1st parameter) */ subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ - leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ + movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ pushl %esp /* Save pt_regs as 4th parameter */ GLOBAL(ftrace_regs_call) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e21b0785a85b..1e96c3628bf2 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -88,7 +88,7 @@ END(function_hook) MCOUNT_SAVE_FRAME \skip /* Load the ftrace_ops into the 3rd parameter */ - leaq function_trace_op, %rdx + movq function_trace_op(%rip), %rdx /* Load ip into the first parameter */ movq RIP(%rsp), %rdi diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 22d0687e7fda..884d875c1434 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -193,9 +193,13 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) if (!handle_irq(irq, regs)) { ack_APIC_irq(); - if (printk_ratelimit()) - pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n", - __func__, smp_processor_id(), vector, irq); + if (irq != VECTOR_RETRIGGERED) { + pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n", + __func__, smp_processor_id(), + vector, irq); + } else { + __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); + } } irq_exit(); @@ -344,7 +348,7 @@ void fixup_irqs(void) for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { unsigned int irr; - if (__this_cpu_read(vector_irq[vector]) < 0) + if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNDEFINED) continue; irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); @@ -355,11 +359,14 @@ void fixup_irqs(void) data = irq_desc_get_irq_data(desc); chip = irq_data_get_irq_chip(data); raw_spin_lock(&desc->lock); - if (chip->irq_retrigger) + if (chip->irq_retrigger) { chip->irq_retrigger(data); + __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED); + } raw_spin_unlock(&desc->lock); } - __this_cpu_write(vector_irq[vector], -1); + if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED) + __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); } } #endif diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index a2a1fbc594ff..7f50156542fb 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -52,7 +52,7 @@ static struct irqaction irq2 = { }; DEFINE_PER_CPU(vector_irq_t, vector_irq) = { - [0 ... NR_VECTORS - 1] = -1, + [0 ... NR_VECTORS - 1] = VECTOR_UNDEFINED, }; int vector_used_by_percpu_irq(unsigned int vector) @@ -60,7 +60,7 @@ int vector_used_by_percpu_irq(unsigned int vector) int cpu; for_each_online_cpu(cpu) { - if (per_cpu(vector_irq, cpu)[vector] != -1) + if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNDEFINED) return 1; } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 85dc05a3aa02..f5252c4eec8c 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1417,7 +1417,9 @@ static inline void mwait_play_dead(void) * The WBINVD is insufficient due to the spurious-wakeup * case where we return around the loop. */ + mb(); clflush(mwait_ptr); + mb(); __monitor(mwait_ptr, 0, 0); mb(); __mwait(eax, 0); diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 930e5d48f560..6377fb28b958 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -11,6 +11,7 @@ #include <linux/clocksource.h> #include <linux/percpu.h> #include <linux/timex.h> +#include <linux/static_key.h> #include <asm/hpet.h> #include <asm/timer.h> @@ -37,13 +38,244 @@ static int __read_mostly tsc_unstable; erroneous rdtsc usage on !cpu_has_tsc processors */ static int __read_mostly tsc_disabled = -1; +static struct static_key __use_tsc = STATIC_KEY_INIT; + int tsc_clocksource_reliable; + +/* + * Use a ring-buffer like data structure, where a writer advances the head by + * writing a new data entry and a reader advances the tail when it observes a + * new entry. + * + * Writers are made to wait on readers until there's space to write a new + * entry. + * + * This means that we can always use an {offset, mul} pair to compute a ns + * value that is 'roughly' in the right direction, even if we're writing a new + * {offset, mul} pair during the clock read. + * + * The down-side is that we can no longer guarantee strict monotonicity anymore + * (assuming the TSC was that to begin with), because while we compute the + * intersection point of the two clock slopes and make sure the time is + * continuous at the point of switching; we can no longer guarantee a reader is + * strictly before or after the switch point. + * + * It does mean a reader no longer needs to disable IRQs in order to avoid + * CPU-Freq updates messing with his times, and similarly an NMI reader will + * no longer run the risk of hitting half-written state. + */ + +struct cyc2ns { + struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */ + struct cyc2ns_data *head; /* 48 + 8 = 56 */ + struct cyc2ns_data *tail; /* 56 + 8 = 64 */ +}; /* exactly fits one cacheline */ + +static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns); + +struct cyc2ns_data *cyc2ns_read_begin(void) +{ + struct cyc2ns_data *head; + + preempt_disable(); + + head = this_cpu_read(cyc2ns.head); + /* + * Ensure we observe the entry when we observe the pointer to it. + * matches the wmb from cyc2ns_write_end(). + */ + smp_read_barrier_depends(); + head->__count++; + barrier(); + + return head; +} + +void cyc2ns_read_end(struct cyc2ns_data *head) +{ + barrier(); + /* + * If we're the outer most nested read; update the tail pointer + * when we're done. This notifies possible pending writers + * that we've observed the head pointer and that the other + * entry is now free. + */ + if (!--head->__count) { + /* + * x86-TSO does not reorder writes with older reads; + * therefore once this write becomes visible to another + * cpu, we must be finished reading the cyc2ns_data. + * + * matches with cyc2ns_write_begin(). + */ + this_cpu_write(cyc2ns.tail, head); + } + preempt_enable(); +} + +/* + * Begin writing a new @data entry for @cpu. + * + * Assumes some sort of write side lock; currently 'provided' by the assumption + * that cpufreq will call its notifiers sequentially. + */ +static struct cyc2ns_data *cyc2ns_write_begin(int cpu) +{ + struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); + struct cyc2ns_data *data = c2n->data; + + if (data == c2n->head) + data++; + + /* XXX send an IPI to @cpu in order to guarantee a read? */ + + /* + * When we observe the tail write from cyc2ns_read_end(), + * the cpu must be done with that entry and its safe + * to start writing to it. + */ + while (c2n->tail == data) + cpu_relax(); + + return data; +} + +static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data) +{ + struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); + + /* + * Ensure the @data writes are visible before we publish the + * entry. Matches the data-depencency in cyc2ns_read_begin(). + */ + smp_wmb(); + + ACCESS_ONCE(c2n->head) = data; +} + +/* + * Accelerators for sched_clock() + * convert from cycles(64bits) => nanoseconds (64bits) + * basic equation: + * ns = cycles / (freq / ns_per_sec) + * ns = cycles * (ns_per_sec / freq) + * ns = cycles * (10^9 / (cpu_khz * 10^3)) + * ns = cycles * (10^6 / cpu_khz) + * + * Then we use scaling math (suggested by george@mvista.com) to get: + * ns = cycles * (10^6 * SC / cpu_khz) / SC + * ns = cycles * cyc2ns_scale / SC + * + * And since SC is a constant power of two, we can convert the div + * into a shift. + * + * We can use khz divisor instead of mhz to keep a better precision, since + * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. + * (mathieu.desnoyers@polymtl.ca) + * + * -johnstul@us.ibm.com "math is hard, lets go shopping!" + */ + +#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ + +static void cyc2ns_data_init(struct cyc2ns_data *data) +{ + data->cyc2ns_mul = 1U << CYC2NS_SCALE_FACTOR; + data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; + data->cyc2ns_offset = 0; + data->__count = 0; +} + +static void cyc2ns_init(int cpu) +{ + struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); + + cyc2ns_data_init(&c2n->data[0]); + cyc2ns_data_init(&c2n->data[1]); + + c2n->head = c2n->data; + c2n->tail = c2n->data; +} + +static inline unsigned long long cycles_2_ns(unsigned long long cyc) +{ + struct cyc2ns_data *data, *tail; + unsigned long long ns; + + /* + * See cyc2ns_read_*() for details; replicated in order to avoid + * an extra few instructions that came with the abstraction. + * Notable, it allows us to only do the __count and tail update + * dance when its actually needed. + */ + + preempt_disable(); + data = this_cpu_read(cyc2ns.head); + tail = this_cpu_read(cyc2ns.tail); + + if (likely(data == tail)) { + ns = data->cyc2ns_offset; + ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); + } else { + data->__count++; + + barrier(); + + ns = data->cyc2ns_offset; + ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); + + barrier(); + + if (!--data->__count) + this_cpu_write(cyc2ns.tail, data); + } + preempt_enable(); + + return ns; +} + +/* XXX surely we already have this someplace in the kernel?! */ +#define DIV_ROUND(n, d) (((n) + ((d) / 2)) / (d)) + +static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) +{ + unsigned long long tsc_now, ns_now; + struct cyc2ns_data *data; + unsigned long flags; + + local_irq_save(flags); + sched_clock_idle_sleep_event(); + + if (!cpu_khz) + goto done; + + data = cyc2ns_write_begin(cpu); + + rdtscll(tsc_now); + ns_now = cycles_2_ns(tsc_now); + + /* + * Compute a new multiplier as per the above comment and ensure our + * time function is continuous; see the comment near struct + * cyc2ns_data. + */ + data->cyc2ns_mul = DIV_ROUND(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, cpu_khz); + data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; + data->cyc2ns_offset = ns_now - + mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); + + cyc2ns_write_end(cpu, data); + +done: + sched_clock_idle_wakeup_event(0); + local_irq_restore(flags); +} /* * Scheduler clock - returns current time in nanosec units. */ u64 native_sched_clock(void) { - u64 this_offset; + u64 tsc_now; /* * Fall back to jiffies if there's no TSC available: @@ -53,16 +285,16 @@ u64 native_sched_clock(void) * very important for it to be as fast as the platform * can achieve it. ) */ - if (unlikely(tsc_disabled)) { + if (!static_key_false(&__use_tsc)) { /* No locking but a rare wrong value is not a big deal: */ return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); } /* read the Time Stamp Counter: */ - rdtscll(this_offset); + rdtscll(tsc_now); /* return the value in ns */ - return __cycles_2_ns(this_offset); + return cycles_2_ns(tsc_now); } /* We need to define a real function for sched_clock, to override the @@ -589,61 +821,11 @@ int recalibrate_cpu_khz(void) EXPORT_SYMBOL(recalibrate_cpu_khz); -/* Accelerators for sched_clock() - * convert from cycles(64bits) => nanoseconds (64bits) - * basic equation: - * ns = cycles / (freq / ns_per_sec) - * ns = cycles * (ns_per_sec / freq) - * ns = cycles * (10^9 / (cpu_khz * 10^3)) - * ns = cycles * (10^6 / cpu_khz) - * - * Then we use scaling math (suggested by george@mvista.com) to get: - * ns = cycles * (10^6 * SC / cpu_khz) / SC - * ns = cycles * cyc2ns_scale / SC - * - * And since SC is a constant power of two, we can convert the div - * into a shift. - * - * We can use khz divisor instead of mhz to keep a better precision, since - * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. - * (mathieu.desnoyers@polymtl.ca) - * - * -johnstul@us.ibm.com "math is hard, lets go shopping!" - */ - -DEFINE_PER_CPU(unsigned long, cyc2ns); -DEFINE_PER_CPU(unsigned long long, cyc2ns_offset); - -static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) -{ - unsigned long long tsc_now, ns_now, *offset; - unsigned long flags, *scale; - - local_irq_save(flags); - sched_clock_idle_sleep_event(); - - scale = &per_cpu(cyc2ns, cpu); - offset = &per_cpu(cyc2ns_offset, cpu); - - rdtscll(tsc_now); - ns_now = __cycles_2_ns(tsc_now); - - if (cpu_khz) { - *scale = ((NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR) + - cpu_khz / 2) / cpu_khz; - *offset = ns_now - mult_frac(tsc_now, *scale, - (1UL << CYC2NS_SCALE_FACTOR)); - } - - sched_clock_idle_wakeup_event(0); - local_irq_restore(flags); -} - static unsigned long long cyc2ns_suspend; void tsc_save_sched_clock_state(void) { - if (!sched_clock_stable) + if (!sched_clock_stable()) return; cyc2ns_suspend = sched_clock(); @@ -663,16 +845,26 @@ void tsc_restore_sched_clock_state(void) unsigned long flags; int cpu; - if (!sched_clock_stable) + if (!sched_clock_stable()) return; local_irq_save(flags); - __this_cpu_write(cyc2ns_offset, 0); + /* + * We're comming out of suspend, there's no concurrency yet; don't + * bother being nice about the RCU stuff, just write to both + * data fields. + */ + + this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0); + this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0); + offset = cyc2ns_suspend - sched_clock(); - for_each_possible_cpu(cpu) - per_cpu(cyc2ns_offset, cpu) = offset; + for_each_possible_cpu(cpu) { + per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset; + per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset; + } local_irq_restore(flags); } @@ -795,7 +987,7 @@ void mark_tsc_unstable(char *reason) { if (!tsc_unstable) { tsc_unstable = 1; - sched_clock_stable = 0; + clear_sched_clock_stable(); disable_sched_clock_irqtime(); pr_info("Marking TSC unstable due to %s\n", reason); /* Change only the rating, when not registered */ @@ -995,14 +1187,18 @@ void __init tsc_init(void) * speed as the bootup CPU. (cpufreq notifiers will fix this * up if their speed diverges) */ - for_each_possible_cpu(cpu) + for_each_possible_cpu(cpu) { + cyc2ns_init(cpu); set_cyc2ns_scale(cpu_khz, cpu); + } if (tsc_disabled > 0) return; /* now allow native_sched_clock() to use rdtsc */ + tsc_disabled = 0; + static_key_slow_inc(&__use_tsc); if (!no_sched_irq_time) enable_sched_clock_irqtime(); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 1673940cf9c3..775702f649ca 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1355,7 +1355,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) vcpu->arch.apic_base = value; /* update jump label if enable bit changes */ - if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) { + if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) { if (value & MSR_IA32_APICBASE_ENABLE) static_key_slow_dec_deferred(&apic_hw_disabled); else diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9ff85bb8dd69..9d591c895803 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -641,6 +641,20 @@ no_context(struct pt_regs *regs, unsigned long error_code, /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) { + /* + * Any interrupt that takes a fault gets the fixup. This makes + * the below recursive fault logic only apply to a faults from + * task context. + */ + if (in_interrupt()) + return; + + /* + * Per the above we're !in_interrupt(), aka. task context. + * + * In this case we need to make sure we're not recursively + * faulting through the emulate_vsyscall() logic. + */ if (current_thread_info()->sig_on_uaccess_error && signal) { tsk->thread.trap_nr = X86_TRAP_PF; tsk->thread.error_code = error_code | PF_USER; @@ -649,6 +663,10 @@ no_context(struct pt_regs *regs, unsigned long error_code, /* XXX: hwpoison faults will set the wrong code. */ force_sig_info_fault(signal, si_code, address, tsk, 0); } + + /* + * Barring that, we can do the fixup and be happy. + */ return; } diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 26328e800869..4ed75dd81d05 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -359,15 +359,21 @@ void bpf_jit_compile(struct sk_filter *fp) EMIT2(0x89, 0xd0); /* mov %edx,%eax */ break; case BPF_S_ALU_MOD_K: /* A %= K; */ + if (K == 1) { + CLEAR_A(); + break; + } EMIT2(0x31, 0xd2); /* xor %edx,%edx */ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ EMIT2(0xf7, 0xf1); /* div %ecx */ EMIT2(0x89, 0xd0); /* mov %edx,%eax */ break; - case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ - EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */ - EMIT(K, 4); - EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */ + case BPF_S_ALU_DIV_K: /* A /= K */ + if (K == 1) + break; + EMIT2(0x31, 0xd2); /* xor %edx,%edx */ + EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ + EMIT2(0xf7, 0xf1); /* div %ecx */ break; case BPF_S_ALU_AND_X: seen |= SEEN_XREG; diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index efe4d7220397..dfe605ac1bcd 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -433,15 +433,49 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) return; } -static inline unsigned long cycles_2_us(unsigned long long cyc) +/* + * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative + * number, not an absolute. It converts a duration in cycles to a duration in + * ns. + */ +static inline unsigned long long cycles_2_ns(unsigned long long cyc) { + struct cyc2ns_data *data = cyc2ns_read_begin(); unsigned long long ns; - unsigned long us; - int cpu = smp_processor_id(); - ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR; - us = ns / 1000; - return us; + ns = mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift); + + cyc2ns_read_end(data); + return ns; +} + +/* + * The reverse of the above; converts a duration in ns to a duration in cycles. + */ +static inline unsigned long long ns_2_cycles(unsigned long long ns) +{ + struct cyc2ns_data *data = cyc2ns_read_begin(); + unsigned long long cyc; + + cyc = (ns << data->cyc2ns_shift) / data->cyc2ns_mul; + + cyc2ns_read_end(data); + return cyc; +} + +static inline unsigned long cycles_2_us(unsigned long long cyc) +{ + return cycles_2_ns(cyc) / NSEC_PER_USEC; +} + +static inline cycles_t sec_2_cycles(unsigned long sec) +{ + return ns_2_cycles(sec * NSEC_PER_SEC); +} + +static inline unsigned long long usec_2_cycles(unsigned long usec) +{ + return ns_2_cycles(usec * NSEC_PER_USEC); } /* @@ -668,16 +702,6 @@ static int wait_completion(struct bau_desc *bau_desc, bcp, try); } -static inline cycles_t sec_2_cycles(unsigned long sec) -{ - unsigned long ns; - cycles_t cyc; - - ns = sec * 1000000000; - cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); - return cyc; -} - /* * Our retries are blocked by all destination sw ack resources being * in use, and a timeout is pending. In that case hardware immediately @@ -1327,16 +1351,6 @@ static void ptc_seq_stop(struct seq_file *file, void *data) { } -static inline unsigned long long usec_2_cycles(unsigned long microsec) -{ - unsigned long ns; - unsigned long long cyc; - - ns = microsec * 1000; - cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); - return cyc; -} - /* * Display the statistics thru /proc/sgi_uv/ptc_statistics * 'data' points to the cpu number diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index aabfb8380a1c..96bc506ac6de 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl @@ -357,3 +357,5 @@ 348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev 349 i386 kcmp sys_kcmp 350 i386 finit_module sys_finit_module +351 i386 sched_setattr sys_sched_setattr +352 i386 sched_getattr sys_sched_getattr diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index 38ae65dfd14f..a12bddc7ccea 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl @@ -320,6 +320,8 @@ 311 64 process_vm_writev sys_process_vm_writev 312 common kcmp sys_kcmp 313 common finit_module sys_finit_module +314 common sched_setattr sys_sched_setattr +315 common sched_getattr sys_sched_getattr # # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 2ada505067cc..eb5d7a56f8d4 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -178,7 +178,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts) ts->tv_nsec = 0; do { - seq = read_seqcount_begin_no_lockdep(>od->seq); + seq = raw_read_seqcount_begin(>od->seq); mode = gtod->clock.vclock_mode; ts->tv_sec = gtod->wall_time_sec; ns = gtod->wall_time_snsec; @@ -198,7 +198,7 @@ notrace static int do_monotonic(struct timespec *ts) ts->tv_nsec = 0; do { - seq = read_seqcount_begin_no_lockdep(>od->seq); + seq = raw_read_seqcount_begin(>od->seq); mode = gtod->clock.vclock_mode; ts->tv_sec = gtod->monotonic_time_sec; ns = gtod->monotonic_time_snsec; @@ -214,7 +214,7 @@ notrace static int do_realtime_coarse(struct timespec *ts) { unsigned long seq; do { - seq = read_seqcount_begin_no_lockdep(>od->seq); + seq = raw_read_seqcount_begin(>od->seq); ts->tv_sec = gtod->wall_time_coarse.tv_sec; ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; } while (unlikely(read_seqcount_retry(>od->seq, seq))); @@ -225,7 +225,7 @@ notrace static int do_monotonic_coarse(struct timespec *ts) { unsigned long seq; do { - seq = read_seqcount_begin_no_lockdep(>od->seq); + seq = raw_read_seqcount_begin(>od->seq); ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; } while (unlikely(read_seqcount_retry(>od->seq, seq))); diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h index ef021677d536..e1ee6b51dfc5 100644 --- a/arch/xtensa/include/asm/barrier.h +++ b/arch/xtensa/include/asm/barrier.h @@ -9,21 +9,14 @@ #ifndef _XTENSA_SYSTEM_H #define _XTENSA_SYSTEM_H -#define smp_read_barrier_depends() do { } while(0) -#define read_barrier_depends() do { } while(0) - #define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) #define rmb() barrier() #define wmb() mb() #ifdef CONFIG_SMP #error smp_* not defined -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() #endif -#define set_mb(var, value) do { var = value; mb(); } while (0) +#include <asm-generic/barrier.h> #endif /* _XTENSA_SYSTEM_H */ |