diff options
Diffstat (limited to 'arch/powerpc')
37 files changed, 1263 insertions, 90 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index c781170e9923..6987b0832e5f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -452,6 +452,23 @@ config PPC_TRANSACTIONAL_MEM help Support user-mode Transactional Memory on POWERPC. +config PPC_UV + bool "Ultravisor support" + depends on KVM_BOOK3S_HV_POSSIBLE + select ZONE_DEVICE + select DEV_PAGEMAP_OPS + select DEVICE_PRIVATE + select MEMORY_HOTPLUG + select MEMORY_HOTREMOVE + default n + help + This option paravirtualizes the kernel to run in POWER platforms that + supports the Protected Execution Facility (PEF). On such platforms, + the ultravisor firmware runs at a privilege level above the + hypervisor. + + If unsure, say "N". + config LD_HEAD_STUB_CATCH bool "Reserve 256 bytes to cope with linker stubs in HEAD text" if EXPERT depends on PPC64 diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h index 9c63b596e6ce..a09595f00cab 100644 --- a/arch/powerpc/include/asm/archrandom.h +++ b/arch/powerpc/include/asm/archrandom.h @@ -28,7 +28,7 @@ static inline int arch_get_random_seed_int(unsigned int *v) unsigned long val; int rc; - rc = arch_get_random_long(&val); + rc = arch_get_random_seed_long(&val); if (rc) *v = val; diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 603aed229af7..28dcf8222943 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -64,7 +64,7 @@ /* Macro for generating the ***_bits() functions */ #define DEFINE_BITOP(fn, op, prefix) \ -static __inline__ void fn(unsigned long mask, \ +static inline void fn(unsigned long mask, \ volatile unsigned long *_p) \ { \ unsigned long old; \ @@ -86,22 +86,22 @@ DEFINE_BITOP(clear_bits, andc, "") DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER) DEFINE_BITOP(change_bits, xor, "") -static __inline__ void set_bit(int nr, volatile unsigned long *addr) +static inline void arch_set_bit(int nr, volatile unsigned long *addr) { set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); } -static __inline__ void clear_bit(int nr, volatile unsigned long *addr) +static inline void arch_clear_bit(int nr, volatile unsigned long *addr) { clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); } -static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr) +static inline void arch_clear_bit_unlock(int nr, volatile unsigned long *addr) { clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr)); } -static __inline__ void change_bit(int nr, volatile unsigned long *addr) +static inline void arch_change_bit(int nr, volatile unsigned long *addr) { change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); } @@ -109,7 +109,7 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr) /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output * operands. */ #define DEFINE_TESTOP(fn, op, prefix, postfix, eh) \ -static __inline__ unsigned long fn( \ +static inline unsigned long fn( \ unsigned long mask, \ volatile unsigned long *_p) \ { \ @@ -138,34 +138,34 @@ DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER, DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, 0) -static __inline__ int test_and_set_bit(unsigned long nr, - volatile unsigned long *addr) +static inline int arch_test_and_set_bit(unsigned long nr, + volatile unsigned long *addr) { return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; } -static __inline__ int test_and_set_bit_lock(unsigned long nr, - volatile unsigned long *addr) +static inline int arch_test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *addr) { return test_and_set_bits_lock(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; } -static __inline__ int test_and_clear_bit(unsigned long nr, - volatile unsigned long *addr) +static inline int arch_test_and_clear_bit(unsigned long nr, + volatile unsigned long *addr) { return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; } -static __inline__ int test_and_change_bit(unsigned long nr, - volatile unsigned long *addr) +static inline int arch_test_and_change_bit(unsigned long nr, + volatile unsigned long *addr) { return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; } #ifdef CONFIG_PPC64 -static __inline__ unsigned long clear_bit_unlock_return_word(int nr, - volatile unsigned long *addr) +static inline unsigned long +clear_bit_unlock_return_word(int nr, volatile unsigned long *addr) { unsigned long old, t; unsigned long *p = (unsigned long *)addr + BIT_WORD(nr); @@ -185,15 +185,18 @@ static __inline__ unsigned long clear_bit_unlock_return_word(int nr, return old; } -/* This is a special function for mm/filemap.c */ -#define clear_bit_unlock_is_negative_byte(nr, addr) \ - (clear_bit_unlock_return_word(nr, addr) & BIT_MASK(PG_waiters)) +/* + * This is a special function for mm/filemap.c + * Bit 7 corresponds to PG_waiters. + */ +#define arch_clear_bit_unlock_is_negative_byte(nr, addr) \ + (clear_bit_unlock_return_word(nr, addr) & BIT_MASK(7)) #endif /* CONFIG_PPC64 */ #include <asm-generic/bitops/non-atomic.h> -static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) +static inline void arch___clear_bit_unlock(int nr, volatile unsigned long *addr) { __asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory"); __clear_bit(nr, addr); @@ -215,14 +218,14 @@ static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) * fls: find last (most-significant) bit set. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static __inline__ int fls(unsigned int x) +static inline int fls(unsigned int x) { return 32 - __builtin_clz(x); } #include <asm-generic/bitops/builtin-__fls.h> -static __inline__ int fls64(__u64 x) +static inline int fls64(__u64 x) { return 64 - __builtin_clzll(x); } @@ -239,6 +242,10 @@ unsigned long __arch_hweight64(__u64 w); #include <asm-generic/bitops/find.h> +/* wrappers that deal with KASAN instrumentation */ +#include <asm-generic/bitops/instrumented-atomic.h> +#include <asm-generic/bitops/instrumented-lock.h> + /* Little-endian versions */ #include <asm-generic/bitops/le.h> diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 11112023e327..13bd870609c3 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -342,6 +342,15 @@ #define H_TLB_INVALIDATE 0xF808 #define H_COPY_TOFROM_GUEST 0xF80C +/* Flags for H_SVM_PAGE_IN */ +#define H_PAGE_IN_SHARED 0x1 + +/* Platform-specific hcalls used by the Ultravisor */ +#define H_SVM_PAGE_IN 0xEF00 +#define H_SVM_PAGE_OUT 0xEF04 +#define H_SVM_INIT_START 0xEF08 +#define H_SVM_INIT_DONE 0xEF0C + /* Values for 2nd argument to H_SET_MODE */ #define H_SET_MODE_RESOURCE_SET_CIABR 1 #define H_SET_MODE_RESOURCE_SET_DAWR 2 diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h new file mode 100644 index 000000000000..50204e228f16 --- /dev/null +++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_KVM_BOOK3S_UVMEM_H__ +#define __ASM_KVM_BOOK3S_UVMEM_H__ + +#ifdef CONFIG_PPC_UV +int kvmppc_uvmem_init(void); +void kvmppc_uvmem_free(void); +int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot); +void kvmppc_uvmem_slot_free(struct kvm *kvm, + const struct kvm_memory_slot *slot); +unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, + unsigned long gra, + unsigned long flags, + unsigned long page_shift); +unsigned long kvmppc_h_svm_page_out(struct kvm *kvm, + unsigned long gra, + unsigned long flags, + unsigned long page_shift); +unsigned long kvmppc_h_svm_init_start(struct kvm *kvm); +unsigned long kvmppc_h_svm_init_done(struct kvm *kvm); +int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn); +void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free, + struct kvm *kvm); +#else +static inline int kvmppc_uvmem_init(void) +{ + return 0; +} + +static inline void kvmppc_uvmem_free(void) { } + +static inline int +kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) +{ + return 0; +} + +static inline void +kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) { } + +static inline unsigned long +kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gra, + unsigned long flags, unsigned long page_shift) +{ + return H_UNSUPPORTED; +} + +static inline unsigned long +kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gra, + unsigned long flags, unsigned long page_shift) +{ + return H_UNSUPPORTED; +} + +static inline unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) +{ + return H_UNSUPPORTED; +} + +static inline unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) +{ + return H_UNSUPPORTED; +} + +static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) +{ + return -EFAULT; +} + +static inline void +kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free, + struct kvm *kvm) { } +#endif /* CONFIG_PPC_UV */ +#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 4273e799203d..0a398f2321c2 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -275,6 +275,10 @@ struct kvm_hpt_info { struct kvm_resize_hpt; +/* Flag values for kvm_arch.secure_guest */ +#define KVMPPC_SECURE_INIT_START 0x1 /* H_SVM_INIT_START has been called */ +#define KVMPPC_SECURE_INIT_DONE 0x2 /* H_SVM_INIT_DONE completed */ + struct kvm_arch { unsigned int lpid; unsigned int smt_mode; /* # vcpus per virtual core */ @@ -330,6 +334,8 @@ struct kvm_arch { #endif struct kvmppc_ops *kvm_ops; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + struct mutex uvmem_lock; + struct list_head uvmem_pfns; struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */ u64 l1_ptcr; int max_nested_lpid; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index d63f649fe713..3d2f871241a8 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -322,6 +322,7 @@ struct kvmppc_ops { int size); int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, int size); + int (*svm_off)(struct kvm *kvm); }; extern struct kvmppc_ops *kvmppc_hv_ops; diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index e9a960e28f3c..1b55fc08f853 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -36,10 +36,12 @@ #endif #ifdef CONFIG_PPC_PSERIES +DECLARE_STATIC_KEY_FALSE(shared_processor); + #define vcpu_is_preempted vcpu_is_preempted static inline bool vcpu_is_preempted(int cpu) { - if (!firmware_has_feature(FW_FEATURE_SPLPAR)) + if (!static_branch_unlikely(&shared_processor)) return false; return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); } @@ -110,13 +112,8 @@ static inline void splpar_rw_yield(arch_rwlock_t *lock) {}; static inline bool is_shared_processor(void) { -/* - * LPPACA is only available on Pseries so guard anything LPPACA related to - * allow other platforms (which include this common header) to compile. - */ -#ifdef CONFIG_PPC_PSERIES - return (IS_ENABLED(CONFIG_PPC_SPLPAR) && - lppaca_shared_proc(local_paca->lppaca_ptr)); +#ifdef CONFIG_PPC_SPLPAR + return static_branch_unlikely(&shared_processor); #else return false; #endif diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 15002b51ff18..c92fe7fe9692 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -401,7 +401,7 @@ copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n) return n; } -extern unsigned long __clear_user(void __user *addr, unsigned long size); +unsigned long __arch_clear_user(void __user *addr, unsigned long size); static inline unsigned long clear_user(void __user *addr, unsigned long size) { @@ -409,12 +409,17 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size) might_fault(); if (likely(access_ok(addr, size))) { allow_write_to_user(addr, size); - ret = __clear_user(addr, size); + ret = __arch_clear_user(addr, size); prevent_write_to_user(addr, size); } return ret; } +static inline unsigned long __clear_user(void __user *addr, unsigned long size) +{ + return clear_user(addr, size); +} + extern long strncpy_from_user(char *dst, const char __user *src, long count); extern __must_check long strnlen_user(const char __user *str, long n); diff --git a/arch/powerpc/include/asm/ultravisor-api.h b/arch/powerpc/include/asm/ultravisor-api.h index 4fcda1d5793d..b66f6db7be6c 100644 --- a/arch/powerpc/include/asm/ultravisor-api.h +++ b/arch/powerpc/include/asm/ultravisor-api.h @@ -26,8 +26,14 @@ #define UV_WRITE_PATE 0xF104 #define UV_RETURN 0xF11C #define UV_ESM 0xF110 +#define UV_REGISTER_MEM_SLOT 0xF120 +#define UV_UNREGISTER_MEM_SLOT 0xF124 +#define UV_PAGE_IN 0xF128 +#define UV_PAGE_OUT 0xF12C #define UV_SHARE_PAGE 0xF130 #define UV_UNSHARE_PAGE 0xF134 #define UV_UNSHARE_ALL_PAGES 0xF140 +#define UV_PAGE_INVAL 0xF138 +#define UV_SVM_TERMINATE 0xF13C #endif /* _ASM_POWERPC_ULTRAVISOR_API_H */ diff --git a/arch/powerpc/include/asm/ultravisor.h b/arch/powerpc/include/asm/ultravisor.h index b1bc2e043ed4..790b0e63681f 100644 --- a/arch/powerpc/include/asm/ultravisor.h +++ b/arch/powerpc/include/asm/ultravisor.h @@ -46,4 +46,40 @@ static inline int uv_unshare_all_pages(void) return ucall_norets(UV_UNSHARE_ALL_PAGES); } +static inline int uv_page_in(u64 lpid, u64 src_ra, u64 dst_gpa, u64 flags, + u64 page_shift) +{ + return ucall_norets(UV_PAGE_IN, lpid, src_ra, dst_gpa, flags, + page_shift); +} + +static inline int uv_page_out(u64 lpid, u64 dst_ra, u64 src_gpa, u64 flags, + u64 page_shift) +{ + return ucall_norets(UV_PAGE_OUT, lpid, dst_ra, src_gpa, flags, + page_shift); +} + +static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size, + u64 flags, u64 slotid) +{ + return ucall_norets(UV_REGISTER_MEM_SLOT, lpid, start_gpa, + size, flags, slotid); +} + +static inline int uv_unregister_mem_slot(u64 lpid, u64 slotid) +{ + return ucall_norets(UV_UNREGISTER_MEM_SLOT, lpid, slotid); +} + +static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift) +{ + return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift); +} + +static inline int uv_svm_terminate(u64 lpid) +{ + return ucall_norets(UV_SVM_TERMINATE, lpid); +} + #endif /* _ASM_POWERPC_ULTRAVISOR_H */ diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index a115970a6809..40f13f3626d3 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -83,6 +83,7 @@ struct vdso_data { __s64 wtom_clock_sec; /* Wall to monotonic clock sec */ __s64 stamp_xtime_sec; /* xtime secs as at tb_orig_stamp */ __s64 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */ + __u32 hrtimer_res; /* hrtimer resolution */ __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ }; @@ -105,6 +106,7 @@ struct vdso_data { __s32 stamp_xtime_sec; /* xtime seconds as at tb_orig_stamp */ __s32 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */ __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */ + __u32 hrtimer_res; /* hrtimer resolution */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 dcache_block_size; /* L1 d-cache block size */ __u32 icache_block_size; /* L1 i-cache block size */ diff --git a/arch/powerpc/include/uapi/asm/msgbuf.h b/arch/powerpc/include/uapi/asm/msgbuf.h index 969bd83e4d3d..7919b2ba41b5 100644 --- a/arch/powerpc/include/uapi/asm/msgbuf.h +++ b/arch/powerpc/include/uapi/asm/msgbuf.h @@ -2,6 +2,8 @@ #ifndef _ASM_POWERPC_MSGBUF_H #define _ASM_POWERPC_MSGBUF_H +#include <asm/ipcbuf.h> + /* * The msqid64_ds structure for the PowerPC architecture. * Note extra padding because this structure is passed back and forth diff --git a/arch/powerpc/include/uapi/asm/sembuf.h b/arch/powerpc/include/uapi/asm/sembuf.h index 008ae77c6746..85e96ccb5f0f 100644 --- a/arch/powerpc/include/uapi/asm/sembuf.h +++ b/arch/powerpc/include/uapi/asm/sembuf.h @@ -2,6 +2,8 @@ #ifndef _ASM_POWERPC_SEMBUF_H #define _ASM_POWERPC_SEMBUF_H +#include <asm/ipcbuf.h> + /* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index f22bd6d1fe93..3d47aec7becf 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -388,6 +388,7 @@ int main(void) OFFSET(STAMP_XTIME_SEC, vdso_data, stamp_xtime_sec); OFFSET(STAMP_XTIME_NSEC, vdso_data, stamp_xtime_nsec); OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction); + OFFSET(CLOCK_HRTIMER_RES, vdso_data, hrtimer_res); OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size); OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size); OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_data, icache_log_block_size); @@ -413,7 +414,6 @@ int main(void) DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); DEFINE(CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE); DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); - DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); #ifdef CONFIG_BUG DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 838d9d4650c7..6f7a3a7162c5 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -240,6 +240,9 @@ set_ivor: bl early_init +#ifdef CONFIG_KASAN + bl kasan_early_init +#endif #ifdef CONFIG_RELOCATABLE mr r3,r30 mr r4,r31 @@ -266,9 +269,6 @@ set_ivor: /* * Decide what sort of machine this is and initialize the MMU. */ -#ifdef CONFIG_KASAN - bl kasan_early_init -#endif mr r3,r30 mr r4,r31 bl machine_init diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5645bc9cbc09..add67498c126 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -619,8 +619,6 @@ void __do_irq(struct pt_regs *regs) trace_irq_entry(regs); - check_stack_overflow(); - /* * Query the platform PIC for the interrupt & ack it. * @@ -652,6 +650,8 @@ void do_IRQ(struct pt_regs *regs) irqsp = hardirq_ctx[raw_smp_processor_id()]; sirqsp = softirq_ctx[raw_smp_processor_id()]; + check_stack_overflow(); + /* Already there ? */ if (unlikely(cursp == irqsp || cursp == sirqsp)) { __do_irq(regs); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 2d13cea13954..1168e8b37e30 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -960,6 +960,7 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->stamp_xtime_sec = xt.tv_sec; vdso_data->stamp_xtime_nsec = xt.tv_nsec; vdso_data->stamp_sec_fraction = frac_sec; + vdso_data->hrtimer_res = hrtimer_resolution; smp_wmb(); ++(vdso_data->tb_update_count); } diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index c8e6902cb01b..3306672f57a9 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -154,12 +154,15 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) cror cr0*4+eq,cr0*4+eq,cr1*4+eq bne cr0,99f + mflr r12 + .cfi_register lr,r12 + bl __get_datapage@local /* get data page */ + lwz r5, CLOCK_HRTIMER_RES(r3) + mtlr r12 li r3,0 cmpli cr0,r4,0 crclr cr0*4+so beqlr - lis r5,CLOCK_REALTIME_RES@h - ori r5,r5,CLOCK_REALTIME_RES@l stw r3,TSPC32_TV_SEC(r4) stw r5,TSPC32_TV_NSEC(r4) blr diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S index 1f24e411af80..1c9a04703250 100644 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S @@ -186,12 +186,15 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) cror cr0*4+eq,cr0*4+eq,cr1*4+eq bne cr0,99f + mflr r12 + .cfi_register lr,r12 + bl V_LOCAL_FUNC(__get_datapage) + lwz r5, CLOCK_HRTIMER_RES(r3) + mtlr r12 li r3,0 cmpldi cr0,r4,0 crclr cr0*4+so beqlr - lis r5,CLOCK_REALTIME_RES@h - ori r5,r5,CLOCK_REALTIME_RES@l std r3,TSPC64_TV_SEC(r4) std r5,TSPC64_TV_NSEC(r4) blr diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 4c67cc79de7c..2bfeaa13befb 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -71,6 +71,9 @@ kvm-hv-y += \ book3s_64_mmu_radix.o \ book3s_hv_nested.o +kvm-hv-$(CONFIG_PPC_UV) += \ + book3s_hv_uvmem.o + kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \ book3s_hv_tm.o diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 2d415c36a61d..da857c8ba6e4 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -19,6 +19,8 @@ #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/pte-walk.h> +#include <asm/ultravisor.h> +#include <asm/kvm_book3s_uvmem.h> /* * Supported radix tree geometry. @@ -915,6 +917,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, if (!(dsisr & DSISR_PRTABLE_FAULT)) gpa |= ea & 0xfff; + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) + return kvmppc_send_page_to_uv(kvm, gfn); + /* Get the corresponding memslot */ memslot = gfn_to_memslot(kvm, gfn); @@ -972,6 +977,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gpa = gfn << PAGE_SHIFT; unsigned int shift; + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) { + uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT); + return 0; + } + ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep)) kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, @@ -989,6 +999,9 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, int ref = 0; unsigned long old, *rmapp; + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) + return ref; + ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep) && pte_young(*ptep)) { old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0, @@ -1013,6 +1026,9 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned int shift; int ref = 0; + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) + return ref; + ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep) && pte_young(*ptep)) ref = 1; @@ -1030,6 +1046,9 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm, int ret = 0; unsigned long old, *rmapp; + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) + return ret; + ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) { ret = 1; @@ -1082,6 +1101,12 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm, unsigned long gpa; unsigned int shift; + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START) + kvmppc_uvmem_drop_pages(memslot, kvm); + + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) + return; + gpa = memslot->base_gfn << PAGE_SHIFT; spin_lock(&kvm->mmu_lock); for (n = memslot->npages; n; --n) { diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index ec5c0379296a..6ff3f896d908 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -72,6 +72,9 @@ #include <asm/xics.h> #include <asm/xive.h> #include <asm/hw_breakpoint.h> +#include <asm/kvm_host.h> +#include <asm/kvm_book3s_uvmem.h> +#include <asm/ultravisor.h> #include "book3s.h" @@ -1070,6 +1073,25 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); break; + case H_SVM_PAGE_IN: + ret = kvmppc_h_svm_page_in(vcpu->kvm, + kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6)); + break; + case H_SVM_PAGE_OUT: + ret = kvmppc_h_svm_page_out(vcpu->kvm, + kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6)); + break; + case H_SVM_INIT_START: + ret = kvmppc_h_svm_init_start(vcpu->kvm); + break; + case H_SVM_INIT_DONE: + ret = kvmppc_h_svm_init_done(vcpu->kvm); + break; + default: return RESUME_HOST; } @@ -4494,6 +4516,29 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) && ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES)) kvmppc_radix_flush_memslot(kvm, old); + /* + * If UV hasn't yet called H_SVM_INIT_START, don't register memslots. + */ + if (!kvm->arch.secure_guest) + return; + + switch (change) { + case KVM_MR_CREATE: + if (kvmppc_uvmem_slot_init(kvm, new)) + return; + uv_register_mem_slot(kvm->arch.lpid, + new->base_gfn << PAGE_SHIFT, + new->npages * PAGE_SIZE, + 0, new->id); + break; + case KVM_MR_DELETE: + uv_unregister_mem_slot(kvm->arch.lpid, old->id); + kvmppc_uvmem_slot_free(kvm, old); + break; + default: + /* TODO: Handle KVM_MR_MOVE */ + break; + } } /* @@ -4767,6 +4812,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) char buf[32]; int ret; + mutex_init(&kvm->arch.uvmem_lock); + INIT_LIST_HEAD(&kvm->arch.uvmem_pfns); mutex_init(&kvm->arch.mmu_setup_lock); /* Allocate the guest's logical partition ID */ @@ -4936,8 +4983,11 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) if (nesting_enabled(kvm)) kvmhv_release_all_nested(kvm); kvm->arch.process_table = 0; + if (kvm->arch.secure_guest) + uv_svm_terminate(kvm->arch.lpid); kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); } + kvmppc_free_lpid(kvm->arch.lpid); kvmppc_free_pimap(kvm); @@ -5377,6 +5427,94 @@ static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, return rc; } +static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa) +{ + unpin_vpa(kvm, vpa); + vpa->gpa = 0; + vpa->pinned_addr = NULL; + vpa->dirty = false; + vpa->update_pending = 0; +} + +/* + * IOCTL handler to turn off secure mode of guest + * + * - Release all device pages + * - Issue ucall to terminate the guest on the UV side + * - Unpin the VPA pages. + * - Reinit the partition scoped page tables + */ +static int kvmhv_svm_off(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + int mmu_was_ready; + int srcu_idx; + int ret = 0; + int i; + + if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) + return ret; + + mutex_lock(&kvm->arch.mmu_setup_lock); + mmu_was_ready = kvm->arch.mmu_ready; + if (kvm->arch.mmu_ready) { + kvm->arch.mmu_ready = 0; + /* order mmu_ready vs. vcpus_running */ + smp_mb(); + if (atomic_read(&kvm->arch.vcpus_running)) { + kvm->arch.mmu_ready = 1; + ret = -EBUSY; + goto out; + } + } + + srcu_idx = srcu_read_lock(&kvm->srcu); + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + struct kvm_memory_slot *memslot; + struct kvm_memslots *slots = __kvm_memslots(kvm, i); + + if (!slots) + continue; + + kvm_for_each_memslot(memslot, slots) { + kvmppc_uvmem_drop_pages(memslot, kvm); + uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); + } + } + srcu_read_unlock(&kvm->srcu, srcu_idx); + + ret = uv_svm_terminate(kvm->arch.lpid); + if (ret != U_SUCCESS) { + ret = -EINVAL; + goto out; + } + + /* + * When secure guest is reset, all the guest pages are sent + * to UV via UV_PAGE_IN before the non-boot vcpus get a + * chance to run and unpin their VPA pages. Unpinning of all + * VPA pages is done here explicitly so that VPA pages + * can be migrated to the secure side. + * + * This is required to for the secure SMP guest to reboot + * correctly. + */ + kvm_for_each_vcpu(i, vcpu, kvm) { + spin_lock(&vcpu->arch.vpa_update_lock); + unpin_vpa_reset(kvm, &vcpu->arch.dtl); + unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow); + unpin_vpa_reset(kvm, &vcpu->arch.vpa); + spin_unlock(&vcpu->arch.vpa_update_lock); + } + + kvmppc_setup_partition_table(kvm); + kvm->arch.secure_guest = 0; + kvm->arch.mmu_ready = mmu_was_ready; +out: + mutex_unlock(&kvm->arch.mmu_setup_lock); + return ret; +} + static struct kvmppc_ops kvm_ops_hv = { .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, @@ -5420,6 +5558,7 @@ static struct kvmppc_ops kvm_ops_hv = { .enable_nested = kvmhv_enable_nested, .load_from_eaddr = kvmhv_load_from_eaddr, .store_to_eaddr = kvmhv_store_to_eaddr, + .svm_off = kvmhv_svm_off, }; static int kvm_init_subcore_bitmap(void) @@ -5528,11 +5667,16 @@ static int kvmppc_book3s_init_hv(void) no_mixing_hpt_and_radix = true; } + r = kvmppc_uvmem_init(); + if (r < 0) + pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r); + return r; } static void kvmppc_book3s_exit_hv(void) { + kvmppc_uvmem_free(); kvmppc_free_host_rm_ops(); if (kvmppc_radix_possible()) kvmppc_radix_exit(); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 0496e66aaa56..c6fbbd29bd87 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1117,7 +1117,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ld r7, VCPU_GPR(R7)(r4) bne ret_to_ultra - lwz r0, VCPU_CR(r4) + ld r0, VCPU_CR(r4) mtcr r0 ld r0, VCPU_GPR(R0)(r4) @@ -1137,7 +1137,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) * R3 = UV_RETURN */ ret_to_ultra: - lwz r0, VCPU_CR(r4) + ld r0, VCPU_CR(r4) mtcr r0 ld r0, VCPU_GPR(R3)(r4) diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c new file mode 100644 index 000000000000..2de264fc3156 --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -0,0 +1,785 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Secure pages management: Migration of pages between normal and secure + * memory of KVM guests. + * + * Copyright 2018 Bharata B Rao, IBM Corp. <bharata@linux.ibm.com> + */ + +/* + * A pseries guest can be run as secure guest on Ultravisor-enabled + * POWER platforms. On such platforms, this driver will be used to manage + * the movement of guest pages between the normal memory managed by + * hypervisor (HV) and secure memory managed by Ultravisor (UV). + * + * The page-in or page-out requests from UV will come to HV as hcalls and + * HV will call back into UV via ultracalls to satisfy these page requests. + * + * Private ZONE_DEVICE memory equal to the amount of secure memory + * available in the platform for running secure guests is hotplugged. + * Whenever a page belonging to the guest becomes secure, a page from this + * private device memory is used to represent and track that secure page + * on the HV side. Some pages (like virtio buffers, VPA pages etc) are + * shared between UV and HV. However such pages aren't represented by + * device private memory and mappings to shared memory exist in both + * UV and HV page tables. + */ + +/* + * Notes on locking + * + * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent + * page-in and page-out requests for the same GPA. Concurrent accesses + * can either come via UV (guest vCPUs requesting for same page) + * or when HV and guest simultaneously access the same page. + * This mutex serializes the migration of page from HV(normal) to + * UV(secure) and vice versa. So the serialization points are around + * migrate_vma routines and page-in/out routines. + * + * Per-guest mutex comes with a cost though. Mainly it serializes the + * fault path as page-out can occur when HV faults on accessing secure + * guest pages. Currently UV issues page-in requests for all the guest + * PFNs one at a time during early boot (UV_ESM uvcall), so this is + * not a cause for concern. Also currently the number of page-outs caused + * by HV touching secure pages is very very low. If an when UV supports + * overcommitting, then we might see concurrent guest driven page-outs. + * + * Locking order + * + * 1. kvm->srcu - Protects KVM memslots + * 2. kvm->mm->mmap_sem - find_vma, migrate_vma_pages and helpers, ksm_madvise + * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting + * as sync-points for page-in/out + */ + +/* + * Notes on page size + * + * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN + * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks + * secure GPAs at 64K page size and maintains one device PFN for each + * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued + * for 64K page at a time. + * + * HV faulting on secure pages: When HV touches any secure page, it + * faults and issues a UV_PAGE_OUT request with 64K page size. Currently + * UV splits and remaps the 2MB page if necessary and copies out the + * required 64K page contents. + * + * Shared pages: Whenever guest shares a secure page, UV will split and + * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size. + * + * HV invalidating a page: When a regular page belonging to secure + * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K + * page size. Using 64K page size is correct here because any non-secure + * page will essentially be of 64K page size. Splitting by UV during sharing + * and page-out ensures this. + * + * Page fault handling: When HV handles page fault of a page belonging + * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request. + * Using 64K size is correct here too as UV would have split the 2MB page + * into 64k mappings and would have done page-outs earlier. + * + * In summary, the current secure pages handling code in HV assumes + * 64K page size and in fact fails any page-in/page-out requests of + * non-64K size upfront. If and when UV starts supporting multiple + * page-sizes, we need to break this assumption. + */ + +#include <linux/pagemap.h> +#include <linux/migrate.h> +#include <linux/kvm_host.h> +#include <linux/ksm.h> +#include <asm/ultravisor.h> +#include <asm/mman.h> +#include <asm/kvm_ppc.h> + +static struct dev_pagemap kvmppc_uvmem_pgmap; +static unsigned long *kvmppc_uvmem_bitmap; +static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock); + +#define KVMPPC_UVMEM_PFN (1UL << 63) + +struct kvmppc_uvmem_slot { + struct list_head list; + unsigned long nr_pfns; + unsigned long base_pfn; + unsigned long *pfns; +}; + +struct kvmppc_uvmem_page_pvt { + struct kvm *kvm; + unsigned long gpa; + bool skip_page_out; +}; + +int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) +{ + struct kvmppc_uvmem_slot *p; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + p->pfns = vzalloc(array_size(slot->npages, sizeof(*p->pfns))); + if (!p->pfns) { + kfree(p); + return -ENOMEM; + } + p->nr_pfns = slot->npages; + p->base_pfn = slot->base_gfn; + + mutex_lock(&kvm->arch.uvmem_lock); + list_add(&p->list, &kvm->arch.uvmem_pfns); + mutex_unlock(&kvm->arch.uvmem_lock); + + return 0; +} + +/* + * All device PFNs are already released by the time we come here. + */ +void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) +{ + struct kvmppc_uvmem_slot *p, *next; + + mutex_lock(&kvm->arch.uvmem_lock); + list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) { + if (p->base_pfn == slot->base_gfn) { + vfree(p->pfns); + list_del(&p->list); + kfree(p); + break; + } + } + mutex_unlock(&kvm->arch.uvmem_lock); +} + +static void kvmppc_uvmem_pfn_insert(unsigned long gfn, unsigned long uvmem_pfn, + struct kvm *kvm) +{ + struct kvmppc_uvmem_slot *p; + + list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { + if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { + unsigned long index = gfn - p->base_pfn; + + p->pfns[index] = uvmem_pfn | KVMPPC_UVMEM_PFN; + return; + } + } +} + +static void kvmppc_uvmem_pfn_remove(unsigned long gfn, struct kvm *kvm) +{ + struct kvmppc_uvmem_slot *p; + + list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { + if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { + p->pfns[gfn - p->base_pfn] = 0; + return; + } + } +} + +static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, + unsigned long *uvmem_pfn) +{ + struct kvmppc_uvmem_slot *p; + + list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { + if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { + unsigned long index = gfn - p->base_pfn; + + if (p->pfns[index] & KVMPPC_UVMEM_PFN) { + if (uvmem_pfn) + *uvmem_pfn = p->pfns[index] & + ~KVMPPC_UVMEM_PFN; + return true; + } else + return false; + } + } + return false; +} + +unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int ret = H_SUCCESS; + int srcu_idx; + + if (!kvmppc_uvmem_bitmap) + return H_UNSUPPORTED; + + /* Only radix guests can be secure guests */ + if (!kvm_is_radix(kvm)) + return H_UNSUPPORTED; + + srcu_idx = srcu_read_lock(&kvm->srcu); + slots = kvm_memslots(kvm); + kvm_for_each_memslot(memslot, slots) { + if (kvmppc_uvmem_slot_init(kvm, memslot)) { + ret = H_PARAMETER; + goto out; + } + ret = uv_register_mem_slot(kvm->arch.lpid, + memslot->base_gfn << PAGE_SHIFT, + memslot->npages * PAGE_SIZE, + 0, memslot->id); + if (ret < 0) { + kvmppc_uvmem_slot_free(kvm, memslot); + ret = H_PARAMETER; + goto out; + } + } + kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_START; +out: + srcu_read_unlock(&kvm->srcu, srcu_idx); + return ret; +} + +unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) +{ + if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) + return H_UNSUPPORTED; + + kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE; + pr_info("LPID %d went secure\n", kvm->arch.lpid); + return H_SUCCESS; +} + +/* + * Drop device pages that we maintain for the secure guest + * + * We first mark the pages to be skipped from UV_PAGE_OUT when there + * is HV side fault on these pages. Next we *get* these pages, forcing + * fault on them, do fault time migration to replace the device PTEs in + * QEMU page table with normal PTEs from newly allocated pages. + */ +void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free, + struct kvm *kvm) +{ + int i; + struct kvmppc_uvmem_page_pvt *pvt; + unsigned long pfn, uvmem_pfn; + unsigned long gfn = free->base_gfn; + + for (i = free->npages; i; --i, ++gfn) { + struct page *uvmem_page; + + mutex_lock(&kvm->arch.uvmem_lock); + if (!kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { + mutex_unlock(&kvm->arch.uvmem_lock); + continue; + } + + uvmem_page = pfn_to_page(uvmem_pfn); + pvt = uvmem_page->zone_device_data; + pvt->skip_page_out = true; + mutex_unlock(&kvm->arch.uvmem_lock); + + pfn = gfn_to_pfn(kvm, gfn); + if (is_error_noslot_pfn(pfn)) + continue; + kvm_release_pfn_clean(pfn); + } +} + +/* + * Get a free device PFN from the pool + * + * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device + * PFN will be used to keep track of the secure page on HV side. + * + * Called with kvm->arch.uvmem_lock held + */ +static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) +{ + struct page *dpage = NULL; + unsigned long bit, uvmem_pfn; + struct kvmppc_uvmem_page_pvt *pvt; + unsigned long pfn_last, pfn_first; + + pfn_first = kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT; + pfn_last = pfn_first + + (resource_size(&kvmppc_uvmem_pgmap.res) >> PAGE_SHIFT); + + spin_lock(&kvmppc_uvmem_bitmap_lock); + bit = find_first_zero_bit(kvmppc_uvmem_bitmap, + pfn_last - pfn_first); + if (bit >= (pfn_last - pfn_first)) + goto out; + bitmap_set(kvmppc_uvmem_bitmap, bit, 1); + spin_unlock(&kvmppc_uvmem_bitmap_lock); + + pvt = kzalloc(sizeof(*pvt), GFP_KERNEL); + if (!pvt) + goto out_clear; + + uvmem_pfn = bit + pfn_first; + kvmppc_uvmem_pfn_insert(gpa >> PAGE_SHIFT, uvmem_pfn, kvm); + + pvt->gpa = gpa; + pvt->kvm = kvm; + + dpage = pfn_to_page(uvmem_pfn); + dpage->zone_device_data = pvt; + get_page(dpage); + lock_page(dpage); + return dpage; +out_clear: + spin_lock(&kvmppc_uvmem_bitmap_lock); + bitmap_clear(kvmppc_uvmem_bitmap, bit, 1); +out: + spin_unlock(&kvmppc_uvmem_bitmap_lock); + return NULL; +} + +/* + * Alloc a PFN from private device memory pool and copy page from normal + * memory to secure memory using UV_PAGE_IN uvcall. + */ +static int +kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long gpa, struct kvm *kvm, + unsigned long page_shift, bool *downgrade) +{ + unsigned long src_pfn, dst_pfn = 0; + struct migrate_vma mig; + struct page *spage; + unsigned long pfn; + struct page *dpage; + int ret = 0; + + memset(&mig, 0, sizeof(mig)); + mig.vma = vma; + mig.start = start; + mig.end = end; + mig.src = &src_pfn; + mig.dst = &dst_pfn; + + /* + * We come here with mmap_sem write lock held just for + * ksm_madvise(), otherwise we only need read mmap_sem. + * Hence downgrade to read lock once ksm_madvise() is done. + */ + ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, + MADV_UNMERGEABLE, &vma->vm_flags); + downgrade_write(&kvm->mm->mmap_sem); + *downgrade = true; + if (ret) + return ret; + + ret = migrate_vma_setup(&mig); + if (ret) + return ret; + + if (!(*mig.src & MIGRATE_PFN_MIGRATE)) { + ret = -1; + goto out_finalize; + } + + dpage = kvmppc_uvmem_get_page(gpa, kvm); + if (!dpage) { + ret = -1; + goto out_finalize; + } + + pfn = *mig.src >> MIGRATE_PFN_SHIFT; + spage = migrate_pfn_to_page(*mig.src); + if (spage) + uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, + page_shift); + + *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + migrate_vma_pages(&mig); +out_finalize: + migrate_vma_finalize(&mig); + return ret; +} + +/* + * Shares the page with HV, thus making it a normal page. + * + * - If the page is already secure, then provision a new page and share + * - If the page is a normal page, share the existing page + * + * In the former case, uses dev_pagemap_ops.migrate_to_ram handler + * to unmap the device page from QEMU's page tables. + */ +static unsigned long +kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift) +{ + + int ret = H_PARAMETER; + struct page *uvmem_page; + struct kvmppc_uvmem_page_pvt *pvt; + unsigned long pfn; + unsigned long gfn = gpa >> page_shift; + int srcu_idx; + unsigned long uvmem_pfn; + + srcu_idx = srcu_read_lock(&kvm->srcu); + mutex_lock(&kvm->arch.uvmem_lock); + if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { + uvmem_page = pfn_to_page(uvmem_pfn); + pvt = uvmem_page->zone_device_data; + pvt->skip_page_out = true; + } + +retry: + mutex_unlock(&kvm->arch.uvmem_lock); + pfn = gfn_to_pfn(kvm, gfn); + if (is_error_noslot_pfn(pfn)) + goto out; + + mutex_lock(&kvm->arch.uvmem_lock); + if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { + uvmem_page = pfn_to_page(uvmem_pfn); + pvt = uvmem_page->zone_device_data; + pvt->skip_page_out = true; + kvm_release_pfn_clean(pfn); + goto retry; + } + + if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift)) + ret = H_SUCCESS; + kvm_release_pfn_clean(pfn); + mutex_unlock(&kvm->arch.uvmem_lock); +out: + srcu_read_unlock(&kvm->srcu, srcu_idx); + return ret; +} + +/* + * H_SVM_PAGE_IN: Move page from normal memory to secure memory. + * + * H_PAGE_IN_SHARED flag makes the page shared which means that the same + * memory in is visible from both UV and HV. + */ +unsigned long +kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, + unsigned long flags, unsigned long page_shift) +{ + bool downgrade = false; + unsigned long start, end; + struct vm_area_struct *vma; + int srcu_idx; + unsigned long gfn = gpa >> page_shift; + int ret; + + if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) + return H_UNSUPPORTED; + + if (page_shift != PAGE_SHIFT) + return H_P3; + + if (flags & ~H_PAGE_IN_SHARED) + return H_P2; + + if (flags & H_PAGE_IN_SHARED) + return kvmppc_share_page(kvm, gpa, page_shift); + + ret = H_PARAMETER; + srcu_idx = srcu_read_lock(&kvm->srcu); + down_write(&kvm->mm->mmap_sem); + + start = gfn_to_hva(kvm, gfn); + if (kvm_is_error_hva(start)) + goto out; + + mutex_lock(&kvm->arch.uvmem_lock); + /* Fail the page-in request of an already paged-in page */ + if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) + goto out_unlock; + + end = start + (1UL << page_shift); + vma = find_vma_intersection(kvm->mm, start, end); + if (!vma || vma->vm_start > start || vma->vm_end < end) + goto out_unlock; + + if (!kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift, + &downgrade)) + ret = H_SUCCESS; +out_unlock: + mutex_unlock(&kvm->arch.uvmem_lock); +out: + if (downgrade) + up_read(&kvm->mm->mmap_sem); + else + up_write(&kvm->mm->mmap_sem); + srcu_read_unlock(&kvm->srcu, srcu_idx); + return ret; +} + +/* + * Provision a new page on HV side and copy over the contents + * from secure memory using UV_PAGE_OUT uvcall. + */ +static int +kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long page_shift, + struct kvm *kvm, unsigned long gpa) +{ + unsigned long src_pfn, dst_pfn = 0; + struct migrate_vma mig; + struct page *dpage, *spage; + struct kvmppc_uvmem_page_pvt *pvt; + unsigned long pfn; + int ret = U_SUCCESS; + + memset(&mig, 0, sizeof(mig)); + mig.vma = vma; + mig.start = start; + mig.end = end; + mig.src = &src_pfn; + mig.dst = &dst_pfn; + + mutex_lock(&kvm->arch.uvmem_lock); + /* The requested page is already paged-out, nothing to do */ + if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) + goto out; + + ret = migrate_vma_setup(&mig); + if (ret) + return ret; + + spage = migrate_pfn_to_page(*mig.src); + if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE)) + goto out_finalize; + + if (!is_zone_device_page(spage)) + goto out_finalize; + + dpage = alloc_page_vma(GFP_HIGHUSER, vma, start); + if (!dpage) { + ret = -1; + goto out_finalize; + } + + lock_page(dpage); + pvt = spage->zone_device_data; + pfn = page_to_pfn(dpage); + + /* + * This function is used in two cases: + * - When HV touches a secure page, for which we do UV_PAGE_OUT + * - When a secure page is converted to shared page, we *get* + * the page to essentially unmap the device page. In this + * case we skip page-out. + */ + if (!pvt->skip_page_out) + ret = uv_page_out(kvm->arch.lpid, pfn << page_shift, + gpa, 0, page_shift); + + if (ret == U_SUCCESS) + *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED; + else { + unlock_page(dpage); + __free_page(dpage); + goto out_finalize; + } + + migrate_vma_pages(&mig); +out_finalize: + migrate_vma_finalize(&mig); +out: + mutex_unlock(&kvm->arch.uvmem_lock); + return ret; +} + +/* + * Fault handler callback that gets called when HV touches any page that + * has been moved to secure memory, we ask UV to give back the page by + * issuing UV_PAGE_OUT uvcall. + * + * This eventually results in dropping of device PFN and the newly + * provisioned page/PFN gets populated in QEMU page tables. + */ +static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf) +{ + struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data; + + if (kvmppc_svm_page_out(vmf->vma, vmf->address, + vmf->address + PAGE_SIZE, PAGE_SHIFT, + pvt->kvm, pvt->gpa)) + return VM_FAULT_SIGBUS; + else + return 0; +} + +/* + * Release the device PFN back to the pool + * + * Gets called when secure page becomes a normal page during H_SVM_PAGE_OUT. + * Gets called with kvm->arch.uvmem_lock held. + */ +static void kvmppc_uvmem_page_free(struct page *page) +{ + unsigned long pfn = page_to_pfn(page) - + (kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT); + struct kvmppc_uvmem_page_pvt *pvt; + + spin_lock(&kvmppc_uvmem_bitmap_lock); + bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1); + spin_unlock(&kvmppc_uvmem_bitmap_lock); + + pvt = page->zone_device_data; + page->zone_device_data = NULL; + kvmppc_uvmem_pfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm); + kfree(pvt); +} + +static const struct dev_pagemap_ops kvmppc_uvmem_ops = { + .page_free = kvmppc_uvmem_page_free, + .migrate_to_ram = kvmppc_uvmem_migrate_to_ram, +}; + +/* + * H_SVM_PAGE_OUT: Move page from secure memory to normal memory. + */ +unsigned long +kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa, + unsigned long flags, unsigned long page_shift) +{ + unsigned long gfn = gpa >> page_shift; + unsigned long start, end; + struct vm_area_struct *vma; + int srcu_idx; + int ret; + + if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) + return H_UNSUPPORTED; + + if (page_shift != PAGE_SHIFT) + return H_P3; + + if (flags) + return H_P2; + + ret = H_PARAMETER; + srcu_idx = srcu_read_lock(&kvm->srcu); + down_read(&kvm->mm->mmap_sem); + start = gfn_to_hva(kvm, gfn); + if (kvm_is_error_hva(start)) + goto out; + + end = start + (1UL << page_shift); + vma = find_vma_intersection(kvm->mm, start, end); + if (!vma || vma->vm_start > start || vma->vm_end < end) + goto out; + + if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa)) + ret = H_SUCCESS; +out: + up_read(&kvm->mm->mmap_sem); + srcu_read_unlock(&kvm->srcu, srcu_idx); + return ret; +} + +int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) +{ + unsigned long pfn; + int ret = U_SUCCESS; + + pfn = gfn_to_pfn(kvm, gfn); + if (is_error_noslot_pfn(pfn)) + return -EFAULT; + + mutex_lock(&kvm->arch.uvmem_lock); + if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) + goto out; + + ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT, + 0, PAGE_SHIFT); +out: + kvm_release_pfn_clean(pfn); + mutex_unlock(&kvm->arch.uvmem_lock); + return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT; +} + +static u64 kvmppc_get_secmem_size(void) +{ + struct device_node *np; + int i, len; + const __be32 *prop; + u64 size = 0; + + np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware"); + if (!np) + goto out; + + prop = of_get_property(np, "secure-memory-ranges", &len); + if (!prop) + goto out_put; + + for (i = 0; i < len / (sizeof(*prop) * 4); i++) + size += of_read_number(prop + (i * 4) + 2, 2); + +out_put: + of_node_put(np); +out: + return size; +} + +int kvmppc_uvmem_init(void) +{ + int ret = 0; + unsigned long size; + struct resource *res; + void *addr; + unsigned long pfn_last, pfn_first; + + size = kvmppc_get_secmem_size(); + if (!size) { + /* + * Don't fail the initialization of kvm-hv module if + * the platform doesn't export ibm,uv-firmware node. + * Let normal guests run on such PEF-disabled platform. + */ + pr_info("KVMPPC-UVMEM: No support for secure guests\n"); + goto out; + } + + res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem"); + if (IS_ERR(res)) { + ret = PTR_ERR(res); + goto out; + } + + kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE; + kvmppc_uvmem_pgmap.res = *res; + kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops; + addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE); + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); + goto out_free_region; + } + + pfn_first = res->start >> PAGE_SHIFT; + pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT); + kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first), + sizeof(unsigned long), GFP_KERNEL); + if (!kvmppc_uvmem_bitmap) { + ret = -ENOMEM; + goto out_unmap; + } + + pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size); + return ret; +out_unmap: + memunmap_pages(&kvmppc_uvmem_pgmap); +out_free_region: + release_mem_region(res->start, size); +out: + return ret; +} + +void kvmppc_uvmem_free(void) +{ + memunmap_pages(&kvmppc_uvmem_pgmap); + release_mem_region(kvmppc_uvmem_pgmap.res.start, + resource_size(&kvmppc_uvmem_pgmap.res)); + kfree(kvmppc_uvmem_bitmap); +} diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 9e085e931d74..416fb3d2a1d0 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -31,6 +31,8 @@ #include <asm/hvcall.h> #include <asm/plpar_wrappers.h> #endif +#include <asm/ultravisor.h> +#include <asm/kvm_host.h> #include "timing.h" #include "irq.h" @@ -2413,6 +2415,16 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; break; } + case KVM_PPC_SVM_OFF: { + struct kvm *kvm = filp->private_data; + + r = 0; + if (!kvm->arch.kvm_ops->svm_off) + goto out; + + r = kvm->arch.kvm_ops->svm_off(kvm); + break; + } default: { struct kvm *kvm = filp->private_data; r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); diff --git a/arch/powerpc/lib/pmem.c b/arch/powerpc/lib/pmem.c index 377712e85605..0666a8d29596 100644 --- a/arch/powerpc/lib/pmem.c +++ b/arch/powerpc/lib/pmem.c @@ -17,14 +17,14 @@ void arch_wb_cache_pmem(void *addr, size_t size) unsigned long start = (unsigned long) addr; flush_dcache_range(start, start + size); } -EXPORT_SYMBOL(arch_wb_cache_pmem); +EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); void arch_invalidate_pmem(void *addr, size_t size) { unsigned long start = (unsigned long) addr; flush_dcache_range(start, start + size); } -EXPORT_SYMBOL(arch_invalidate_pmem); +EXPORT_SYMBOL_GPL(arch_invalidate_pmem); /* * CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE symbols diff --git a/arch/powerpc/lib/string_32.S b/arch/powerpc/lib/string_32.S index f69a6aab7bfb..1ddb26394e8a 100644 --- a/arch/powerpc/lib/string_32.S +++ b/arch/powerpc/lib/string_32.S @@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES LG_CACHELINE_BYTES = L1_CACHE_SHIFT CACHELINE_MASK = (L1_CACHE_BYTES-1) -_GLOBAL(__clear_user) +_GLOBAL(__arch_clear_user) /* * Use dcbz on the complete cache lines in the destination * to set them to zero. This requires that the destination @@ -87,4 +87,4 @@ _GLOBAL(__clear_user) EX_TABLE(8b, 91b) EX_TABLE(9b, 91b) -EXPORT_SYMBOL(__clear_user) +EXPORT_SYMBOL(__arch_clear_user) diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S index 507b18b1660e..169872bc0892 100644 --- a/arch/powerpc/lib/string_64.S +++ b/arch/powerpc/lib/string_64.S @@ -17,7 +17,7 @@ PPC64_CACHES: .section ".text" /** - * __clear_user: - Zero a block of memory in user space, with less checking. + * __arch_clear_user: - Zero a block of memory in user space, with less checking. * @to: Destination address, in user space. * @n: Number of bytes to zero. * @@ -58,7 +58,7 @@ err3; stb r0,0(r3) mr r3,r4 blr -_GLOBAL_TOC(__clear_user) +_GLOBAL_TOC(__arch_clear_user) cmpdi r4,32 neg r6,r3 li r0,0 @@ -181,4 +181,4 @@ err1; dcbz 0,r3 cmpdi r4,32 blt .Lshort_clear b .Lmedium_clear -EXPORT_SYMBOL(__clear_user) +EXPORT_SYMBOL(__arch_clear_user) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index ad299e72ec30..617c2777926f 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -121,7 +121,7 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, unsigned long i; for (i = start; i < stop; i += chunk) { - flush_dcache_range(i, min(stop, start + chunk)); + flush_dcache_range(i, min(stop, i + chunk)); cond_resched(); } } @@ -289,6 +289,14 @@ void __init mem_init(void) BUILD_BUG_ON(MMU_PAGE_COUNT > 16); #ifdef CONFIG_SWIOTLB + /* + * Some platforms (e.g. 85xx) limit DMA-able memory way below + * 4G. We force memblock to bottom-up mode to ensure that the + * memory allocated in swiotlb_init() is DMA-able. + * As it's the last memblock allocation, no need to reset it + * back to to-down. + */ + memblock_set_bottom_up(true); swiotlb_init(0); #endif diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 090af2d2d3e4..96eb8e43f39b 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -103,7 +103,7 @@ static void mmu_patch_addis(s32 *site, long simm) patch_instruction_site(site, instr); } -void __init mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot) +static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot) { unsigned long s = offset; unsigned long v = PAGE_OFFSET + s; diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h index 6e5a2a4faeab..4ec2a9f14f84 100644 --- a/arch/powerpc/net/bpf_jit32.h +++ b/arch/powerpc/net/bpf_jit32.h @@ -97,12 +97,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); #ifdef CONFIG_SMP #ifdef CONFIG_PPC64 #define PPC_BPF_LOAD_CPU(r) \ - do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \ + do { BUILD_BUG_ON(sizeof_field(struct paca_struct, paca_index) != 2); \ PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \ } while (0) #else #define PPC_BPF_LOAD_CPU(r) \ - do { BUILD_BUG_ON(FIELD_SIZEOF(struct task_struct, cpu) != 4); \ + do { BUILD_BUG_ON(sizeof_field(struct task_struct, cpu) != 4); \ PPC_LHZ_OFFS(r, 2, offsetof(struct task_struct, cpu)); \ } while(0) #endif diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index d57b46e0dd60..0acc9d5fb19e 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -321,7 +321,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); break; case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */ - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); + BUILD_BUG_ON(sizeof_field(struct sk_buff, len) != 4); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); break; case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */ @@ -333,16 +333,16 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, /*** Ancillary info loads ***/ case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */ - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, + BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2); PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff, protocol)); break; case BPF_ANC | SKF_AD_IFINDEX: case BPF_ANC | SKF_AD_HATYPE: - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, + BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4); - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, + BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2); PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, dev)); @@ -365,17 +365,17 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, break; case BPF_ANC | SKF_AD_MARK: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); + BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, mark)); break; case BPF_ANC | SKF_AD_RXHASH: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); + BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, hash)); break; case BPF_ANC | SKF_AD_VLAN_TAG: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); + BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2); PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, vlan_tci)); @@ -388,7 +388,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_ANDI(r_A, r_A, 1); break; case BPF_ANC | SKF_AD_QUEUE: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, + BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2); PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, queue_mapping)); diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index e04b20625cb9..000b350d4060 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -59,10 +59,6 @@ static void export_imc_mode_and_cmd(struct device_node *node, imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root); - /* - * Return here, either because 'imc' directory already exists, - * Or failed to create a new one. - */ if (!imc_debugfs_parent) return; @@ -135,7 +131,6 @@ static int imc_get_mem_addr_nest(struct device_node *node, } pmu_ptr->imc_counter_mmaped = true; - export_imc_mode_and_cmd(node, pmu_ptr); kfree(base_addr_arr); kfree(chipid_arr); return 0; @@ -151,7 +146,7 @@ error: * and domain as the inputs. * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets */ -static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) +static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain) { int ret = 0; struct imc_pmu *pmu_ptr; @@ -159,27 +154,23 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) /* Return for unknown domain */ if (domain < 0) - return -EINVAL; + return NULL; /* memory for pmu */ pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL); if (!pmu_ptr) - return -ENOMEM; + return NULL; /* Set the domain */ pmu_ptr->domain = domain; ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size); - if (ret) { - ret = -EINVAL; + if (ret) goto free_pmu; - } if (!of_property_read_u32(parent, "offset", &offset)) { - if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) { - ret = -EINVAL; + if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) goto free_pmu; - } } /* Function to register IMC pmu */ @@ -190,14 +181,14 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) if (pmu_ptr->domain == IMC_DOMAIN_NEST) kfree(pmu_ptr->mem_info); kfree(pmu_ptr); - return ret; + return NULL; } - return 0; + return pmu_ptr; free_pmu: kfree(pmu_ptr); - return ret; + return NULL; } static void disable_nest_pmu_counters(void) @@ -254,6 +245,7 @@ int get_max_nest_dev(void) static int opal_imc_counters_probe(struct platform_device *pdev) { struct device_node *imc_dev = pdev->dev.of_node; + struct imc_pmu *pmu; int pmu_count = 0, domain; bool core_imc_reg = false, thread_imc_reg = false; u32 type; @@ -269,6 +261,7 @@ static int opal_imc_counters_probe(struct platform_device *pdev) } for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) { + pmu = NULL; if (of_property_read_u32(imc_dev, "type", &type)) { pr_warn("IMC Device without type property\n"); continue; @@ -285,7 +278,14 @@ static int opal_imc_counters_probe(struct platform_device *pdev) domain = IMC_DOMAIN_THREAD; break; case IMC_TYPE_TRACE: - domain = IMC_DOMAIN_TRACE; + /* + * FIXME. Using trace_imc events to monitor application + * or KVM thread performance can cause a checkstop + * (system crash). + * Disable it for now. + */ + pr_info_once("IMC: disabling trace_imc PMU\n"); + domain = -1; break; default: pr_warn("IMC Unknown Device type \n"); @@ -293,9 +293,13 @@ static int opal_imc_counters_probe(struct platform_device *pdev) break; } - if (!imc_pmu_create(imc_dev, pmu_count, domain)) { - if (domain == IMC_DOMAIN_NEST) + pmu = imc_pmu_create(imc_dev, pmu_count, domain); + if (pmu != NULL) { + if (domain == IMC_DOMAIN_NEST) { + if (!imc_debugfs_parent) + export_imc_mode_and_cmd(imc_dev, pmu); pmu_count++; + } if (domain == IMC_DOMAIN_CORE) core_imc_reg = true; if (domain == IMC_DOMAIN_THREAD) @@ -303,10 +307,6 @@ static int opal_imc_counters_probe(struct platform_device *pdev) } } - /* If none of the nest units are registered, remove debugfs interface */ - if (pmu_count == 0) - debugfs_remove_recursive(imc_debugfs_parent); - /* If core imc is not registered, unregister thread-imc */ if (!core_imc_reg && thread_imc_reg) unregister_thread_imc(); diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 91571841df8a..9dba7e880885 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -539,6 +539,16 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info, /* balloon page list reference */ get_page(newpage); + /* + * When we migrate a page to a different zone, we have to fixup the + * count of both involved zones as we adjusted the managed page count + * when inflating. + */ + if (page_zone(page) != page_zone(newpage)) { + adjust_managed_page_count(page, 1); + adjust_managed_page_count(newpage, -1); + } + spin_lock_irqsave(&b_dev_info->pages_lock, flags); balloon_page_insert(b_dev_info, newpage); balloon_page_delete(page); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 0a40201f315f..0c8421dd01ab 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -74,6 +74,9 @@ #include "pseries.h" #include "../../../../drivers/pci/pci.h" +DEFINE_STATIC_KEY_FALSE(shared_processor); +EXPORT_SYMBOL_GPL(shared_processor); + int CMO_PrPSP = -1; int CMO_SecPSP = -1; unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K); @@ -758,6 +761,10 @@ static void __init pSeries_setup_arch(void) if (firmware_has_feature(FW_FEATURE_LPAR)) { vpa_init(boot_cpuid); + + if (lppaca_shared_proc(get_lppaca())) + static_branch_enable(&shared_processor); + ppc_md.power_save = pseries_lpar_idle; ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; #ifdef CONFIG_PCI_IOV diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 33c10749edec..55dc61cb4867 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -392,20 +392,28 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) data->esb_shift = esb_shift; data->trig_page = trig_page; + data->hw_irq = hw_irq; + /* * No chip-id for the sPAPR backend. This has an impact how we * pick a target. See xive_pick_irq_target(). */ data->src_chip = XIVE_INVALID_CHIP_ID; + /* + * When the H_INT_ESB flag is set, the H_INT_ESB hcall should + * be used for interrupt management. Skip the remapping of the + * ESB pages which are not available. + */ + if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB) + return 0; + data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift); if (!data->eoi_mmio) { pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq); return -ENOMEM; } - data->hw_irq = hw_irq; - /* Full function page supports trigger */ if (flags & XIVE_SRC_TRIGGER) { data->trig_mmio = data->eoi_mmio; |