diff options
Diffstat (limited to 'arch/powerpc/include')
25 files changed, 345 insertions, 73 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index e3b1d41c89be..28992d012926 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -8,6 +8,7 @@ #ifdef __KERNEL__ #include <linux/types.h> #include <asm/cmpxchg.h> +#include <asm/barrier.h> #define ATOMIC_INIT(i) { (i) } @@ -270,11 +271,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) } #define atomic_dec_if_positive atomic_dec_if_positive -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - #ifdef __powerpc64__ #define ATOMIC64_INIT(i) { (i) } diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index f89da808ce31..bab79a110c7b 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -84,4 +84,7 @@ do { \ ___p1; \ }) +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() + #endif /* _ASM_POWERPC_BARRIER_H */ diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index a5e9a7d494d8..bd3bd573d0ae 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -51,11 +51,7 @@ #define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit)) #define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) -/* - * clear_bit doesn't imply a memory barrier - */ -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() +#include <asm/barrier.h> /* Macro for generating the ***_bits() functions */ #define DEFINE_BITOP(fn, op, prefix) \ diff --git a/arch/powerpc/include/asm/dcr-mmio.h b/arch/powerpc/include/asm/dcr-mmio.h index acd491dbd45a..93a68b28e695 100644 --- a/arch/powerpc/include/asm/dcr-mmio.h +++ b/arch/powerpc/include/asm/dcr-mmio.h @@ -51,10 +51,6 @@ static inline void dcr_write_mmio(dcr_host_mmio_t host, out_be32(host.token + ((host.base + dcr_n) * host.stride), value); } -extern u64 of_translate_dcr_address(struct device_node *dev, - unsigned int dcr_n, - unsigned int *stride); - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_DCR_MMIO_H */ diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h index 856f8deb557a..6330a61b875a 100644 --- a/arch/powerpc/include/asm/disassemble.h +++ b/arch/powerpc/include/asm/disassemble.h @@ -81,4 +81,38 @@ static inline unsigned int get_oc(u32 inst) { return (inst >> 11) & 0x7fff; } + +#define IS_XFORM(inst) (get_op(inst) == 31) +#define IS_DSFORM(inst) (get_op(inst) >= 56) + +/* + * Create a DSISR value from the instruction + */ +static inline unsigned make_dsisr(unsigned instr) +{ + unsigned dsisr; + + + /* bits 6:15 --> 22:31 */ + dsisr = (instr & 0x03ff0000) >> 16; + + if (IS_XFORM(instr)) { + /* bits 29:30 --> 15:16 */ + dsisr |= (instr & 0x00000006) << 14; + /* bit 25 --> 17 */ + dsisr |= (instr & 0x00000040) << 8; + /* bits 21:24 --> 18:21 */ + dsisr |= (instr & 0x00000780) << 3; + } else { + /* bit 5 --> 17 */ + dsisr |= (instr & 0x04000000) >> 12; + /* bits 1: 4 --> 18:21 */ + dsisr |= (instr & 0x78000000) >> 17; + /* bits 30:31 --> 12:13 */ + if (IS_DSFORM(instr)) + dsisr |= (instr & 0x00000003) << 18; + } + + return dsisr; +} #endif /* __ASM_PPC_DISASSEMBLE_H__ */ diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 19eb74a95b59..9601741080e5 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -102,6 +102,7 @@ #define BOOK3S_INTERRUPT_PERFMON 0xf00 #define BOOK3S_INTERRUPT_ALTIVEC 0xf20 #define BOOK3S_INTERRUPT_VSX 0xf40 +#define BOOK3S_INTERRUPT_FAC_UNAVAIL 0xf60 #define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80 #define BOOK3S_IRQPRIO_SYSTEM_RESET 0 @@ -114,14 +115,15 @@ #define BOOK3S_IRQPRIO_FP_UNAVAIL 7 #define BOOK3S_IRQPRIO_ALTIVEC 8 #define BOOK3S_IRQPRIO_VSX 9 -#define BOOK3S_IRQPRIO_SYSCALL 10 -#define BOOK3S_IRQPRIO_MACHINE_CHECK 11 -#define BOOK3S_IRQPRIO_DEBUG 12 -#define BOOK3S_IRQPRIO_EXTERNAL 13 -#define BOOK3S_IRQPRIO_DECREMENTER 14 -#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 15 -#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 16 -#define BOOK3S_IRQPRIO_MAX 17 +#define BOOK3S_IRQPRIO_FAC_UNAVAIL 10 +#define BOOK3S_IRQPRIO_SYSCALL 11 +#define BOOK3S_IRQPRIO_MACHINE_CHECK 12 +#define BOOK3S_IRQPRIO_DEBUG 13 +#define BOOK3S_IRQPRIO_EXTERNAL 14 +#define BOOK3S_IRQPRIO_DECREMENTER 15 +#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 16 +#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 17 +#define BOOK3S_IRQPRIO_MAX 18 #define BOOK3S_HFLAG_DCBZ32 0x1 #define BOOK3S_HFLAG_SLB 0x2 diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index bb1e38a23ac7..f52f65694527 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -268,9 +268,10 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) return vcpu->arch.pc; } +static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu); static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) { - return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); + return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); } static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 51388befeddb..fddb72b48ce9 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -77,34 +77,122 @@ static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits) return old == 0; } +static inline int __hpte_actual_psize(unsigned int lp, int psize) +{ + int i, shift; + unsigned int mask; + + /* start from 1 ignoring MMU_PAGE_4K */ + for (i = 1; i < MMU_PAGE_COUNT; i++) { + + /* invalid penc */ + if (mmu_psize_defs[psize].penc[i] == -1) + continue; + /* + * encoding bits per actual page size + * PTE LP actual page size + * rrrr rrrz >=8KB + * rrrr rrzz >=16KB + * rrrr rzzz >=32KB + * rrrr zzzz >=64KB + * ....... + */ + shift = mmu_psize_defs[i].shift - LP_SHIFT; + if (shift > LP_BITS) + shift = LP_BITS; + mask = (1 << shift) - 1; + if ((lp & mask) == mmu_psize_defs[psize].penc[i]) + return i; + } + return -1; +} + static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, unsigned long pte_index) { - unsigned long rb, va_low; + int b_psize, a_psize; + unsigned int penc; + unsigned long rb = 0, va_low, sllp; + unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1); + + if (!(v & HPTE_V_LARGE)) { + /* both base and actual psize is 4k */ + b_psize = MMU_PAGE_4K; + a_psize = MMU_PAGE_4K; + } else { + for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) { + + /* valid entries have a shift value */ + if (!mmu_psize_defs[b_psize].shift) + continue; + a_psize = __hpte_actual_psize(lp, b_psize); + if (a_psize != -1) + break; + } + } + /* + * Ignore the top 14 bits of va + * v have top two bits covering segment size, hence move + * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits. + * AVA field in v also have the lower 23 bits ignored. + * For base page size 4K we need 14 .. 65 bits (so need to + * collect extra 11 bits) + * For others we need 14..14+i + */ + /* This covers 14..54 bits of va*/ rb = (v & ~0x7fUL) << 16; /* AVA field */ + /* + * AVA in v had cleared lower 23 bits. We need to derive + * that from pteg index + */ va_low = pte_index >> 3; if (v & HPTE_V_SECONDARY) va_low = ~va_low; - /* xor vsid from AVA */ + /* + * get the vpn bits from va_low using reverse of hashing. + * In v we have va with 23 bits dropped and then left shifted + * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need + * right shift it with (SID_SHIFT - (23 - 7)) + */ if (!(v & HPTE_V_1TB_SEG)) - va_low ^= v >> 12; + va_low ^= v >> (SID_SHIFT - 16); else - va_low ^= v >> 24; + va_low ^= v >> (SID_SHIFT_1T - 16); va_low &= 0x7ff; - if (v & HPTE_V_LARGE) { - rb |= 1; /* L field */ - if (cpu_has_feature(CPU_FTR_ARCH_206) && - (r & 0xff000)) { - /* non-16MB large page, must be 64k */ - /* (masks depend on page size) */ - rb |= 0x1000; /* page encoding in LP field */ - rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ - rb |= ((va_low << 4) & 0xf0); /* AVAL field (P7 doesn't seem to care) */ - } - } else { - /* 4kB page */ - rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */ + + switch (b_psize) { + case MMU_PAGE_4K: + sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) | + ((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4); + rb |= sllp << 5; /* AP field */ + rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */ + break; + default: + { + int aval_shift; + /* + * remaining 7bits of AVA/LP fields + * Also contain the rr bits of LP + */ + rb |= (va_low & 0x7f) << 16; + /* + * Now clear not needed LP bits based on actual psize + */ + rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1); + /* + * AVAL field 58..77 - base_page_shift bits of va + * we have space for 58..64 bits, Missing bits should + * be zero filled. +1 is to take care of L bit shift + */ + aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1; + rb |= ((va_low << aval_shift) & 0xfe); + + rb |= 1; /* L field */ + penc = mmu_psize_defs[b_psize].penc[a_psize]; + rb |= penc << 12; /* LP field */ + break; + } } rb |= (v >> 54) & 0x300; /* B field */ return rb; @@ -112,14 +200,26 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) { + int size, a_psize; + /* Look at the 8 bit LP value */ + unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); + /* only handle 4k, 64k and 16M pages for now */ if (!(h & HPTE_V_LARGE)) - return 1ul << 12; /* 4k page */ - if ((l & 0xf000) == 0x1000 && cpu_has_feature(CPU_FTR_ARCH_206)) - return 1ul << 16; /* 64k page */ - if ((l & 0xff000) == 0) - return 1ul << 24; /* 16M page */ - return 0; /* error */ + return 1ul << 12; + else { + for (size = 0; size < MMU_PAGE_COUNT; size++) { + /* valid entries have a shift value */ + if (!mmu_psize_defs[size].shift) + continue; + + a_psize = __hpte_actual_psize(lp, size); + if (a_psize != -1) + return 1ul << mmu_psize_defs[a_psize].shift; + } + + } + return 0; } static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 821725c1bf46..5bdfb5dd3400 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -104,6 +104,7 @@ struct kvmppc_host_state { #ifdef CONFIG_PPC_BOOK3S_64 u64 cfar; u64 ppr; + u64 host_fscr; #endif }; @@ -133,6 +134,7 @@ struct kvmppc_book3s_shadow_vcpu { u64 esid; u64 vsid; } slb[64]; /* guest SLB */ + u64 shadow_fscr; #endif }; diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index 80d46b5a7efb..c7aed6105ff9 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -108,9 +108,4 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) { return vcpu->arch.fault_dear; } - -static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.shared->msr; -} #endif /* __ASM_KVM_BOOKE_H__ */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 1eaea2dea174..bb66d8b8efdf 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -449,7 +449,9 @@ struct kvm_vcpu_arch { ulong pc; ulong ctr; ulong lr; +#ifdef CONFIG_PPC_BOOK3S ulong tar; +#endif ulong xer; u32 cr; @@ -475,6 +477,7 @@ struct kvm_vcpu_arch { ulong ppr; ulong pspb; ulong fscr; + ulong shadow_fscr; ulong ebbhr; ulong ebbrr; ulong bescr; @@ -562,6 +565,7 @@ struct kvm_vcpu_arch { #ifdef CONFIG_PPC_BOOK3S ulong fault_dar; u32 fault_dsisr; + unsigned long intr_msr; #endif #ifdef CONFIG_BOOKE @@ -622,8 +626,12 @@ struct kvm_vcpu_arch { wait_queue_head_t cpu_run; struct kvm_vcpu_arch_shared *shared; +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) + bool shared_big_endian; +#endif unsigned long magic_page_pa; /* phys addr to map the magic page to */ unsigned long magic_page_ea; /* effect. addr to map the magic page to */ + bool disable_kernel_nx; int irq_type; /* one of KVM_IRQ_* */ int irq_cpu_id; @@ -654,7 +662,6 @@ struct kvm_vcpu_arch { spinlock_t tbacct_lock; u64 busy_stolen; u64 busy_preempt; - unsigned long intr_msr; #endif }; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 4096f16502a9..4a7cc453be0b 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -449,6 +449,84 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn) } /* + * Shared struct helpers. The shared struct can be little or big endian, + * depending on the guest endianness. So expose helpers to all of them. + */ +static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu) +{ +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) + /* Only Book3S_64 PR supports bi-endian for now */ + return vcpu->arch.shared_big_endian; +#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__) + /* Book3s_64 HV on little endian is always little endian */ + return false; +#else + return true; +#endif +} + +#define SHARED_WRAPPER_GET(reg, size) \ +static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ +{ \ + if (kvmppc_shared_big_endian(vcpu)) \ + return be##size##_to_cpu(vcpu->arch.shared->reg); \ + else \ + return le##size##_to_cpu(vcpu->arch.shared->reg); \ +} \ + +#define SHARED_WRAPPER_SET(reg, size) \ +static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ +{ \ + if (kvmppc_shared_big_endian(vcpu)) \ + vcpu->arch.shared->reg = cpu_to_be##size(val); \ + else \ + vcpu->arch.shared->reg = cpu_to_le##size(val); \ +} \ + +#define SHARED_WRAPPER(reg, size) \ + SHARED_WRAPPER_GET(reg, size) \ + SHARED_WRAPPER_SET(reg, size) \ + +SHARED_WRAPPER(critical, 64) +SHARED_WRAPPER(sprg0, 64) +SHARED_WRAPPER(sprg1, 64) +SHARED_WRAPPER(sprg2, 64) +SHARED_WRAPPER(sprg3, 64) +SHARED_WRAPPER(srr0, 64) +SHARED_WRAPPER(srr1, 64) +SHARED_WRAPPER(dar, 64) +SHARED_WRAPPER_GET(msr, 64) +static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) +{ + if (kvmppc_shared_big_endian(vcpu)) + vcpu->arch.shared->msr = cpu_to_be64(val); + else + vcpu->arch.shared->msr = cpu_to_le64(val); +} +SHARED_WRAPPER(dsisr, 32) +SHARED_WRAPPER(int_pending, 32) +SHARED_WRAPPER(sprg4, 64) +SHARED_WRAPPER(sprg5, 64) +SHARED_WRAPPER(sprg6, 64) +SHARED_WRAPPER(sprg7, 64) + +static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr) +{ + if (kvmppc_shared_big_endian(vcpu)) + return be32_to_cpu(vcpu->arch.shared->sr[nr]); + else + return le32_to_cpu(vcpu->arch.shared->sr[nr]); +} + +static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val) +{ + if (kvmppc_shared_big_endian(vcpu)) + vcpu->arch.shared->sr[nr] = cpu_to_be32(val); + else + vcpu->arch.shared->sr[nr] = cpu_to_le32(val); +} + +/* * Please call after prepare_to_enter. This function puts the lazy ee and irq * disabled tracking state back to normal mode, without actually enabling * interrupts. @@ -485,7 +563,7 @@ static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb) msr_64bit = MSR_SF; #endif - if (!(vcpu->arch.shared->msr & msr_64bit)) + if (!(kvmppc_get_msr(vcpu) & msr_64bit)) ea = (uint32_t)ea; return ea; diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 95145a15c708..1b0739bc14b5 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -46,11 +46,6 @@ struct pci_dev; #define pcibios_assign_all_busses() \ (pci_has_flag(PCI_REASSIGN_ALL_BUS)) -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - #define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 3ebb188c3ff5..d98c1ecc3266 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -44,6 +44,12 @@ static inline int pte_present(pte_t pte) return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA); } +#define pte_present_nonuma pte_present_nonuma +static inline int pte_present_nonuma(pte_t pte) +{ + return pte_val(pte) & (_PAGE_PRESENT); +} + #define pte_numa pte_numa static inline int pte_numa(pte_t pte) { diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 6586a40a46ce..cded7c1278ef 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -318,11 +318,16 @@ n: addi reg,reg,(name - 0b)@l; #ifdef __powerpc64__ +#ifdef HAVE_AS_ATHIGH +#define __AS_ATHIGH high +#else +#define __AS_ATHIGH h +#endif #define LOAD_REG_IMMEDIATE(reg,expr) \ lis reg,(expr)@highest; \ ori reg,reg,(expr)@higher; \ rldicr reg,reg,32,31; \ - oris reg,reg,(expr)@h; \ + oris reg,reg,(expr)@__AS_ATHIGH; \ ori reg,reg,(expr)@l; #define LOAD_REG_ADDR(reg,name) \ diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index d977b9b78696..74b79f07f041 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -26,6 +26,45 @@ #include <linux/of_irq.h> #include <linux/platform_device.h> +#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ +#define OF_DT_END_NODE 0x2 /* End node */ +#define OF_DT_PROP 0x3 /* Property: name off, size, + * content */ +#define OF_DT_NOP 0x4 /* nop */ +#define OF_DT_END 0x9 + +#define OF_DT_VERSION 0x10 + +/* + * This is what gets passed to the kernel by prom_init or kexec + * + * The dt struct contains the device tree structure, full pathes and + * property contents. The dt strings contain a separate block with just + * the strings for the property names, and is fully page aligned and + * self contained in a page, so that it can be kept around by the kernel, + * each property name appears only once in this page (cheap compression) + * + * the mem_rsvmap contains a map of reserved ranges of physical memory, + * passing it here instead of in the device-tree itself greatly simplifies + * the job of everybody. It's just a list of u64 pairs (base/size) that + * ends when size is 0 + */ +struct boot_param_header { + __be32 magic; /* magic word OF_DT_HEADER */ + __be32 totalsize; /* total size of DT block */ + __be32 off_dt_struct; /* offset to structure */ + __be32 off_dt_strings; /* offset to strings */ + __be32 off_mem_rsvmap; /* offset to memory reserve map */ + __be32 version; /* format version */ + __be32 last_comp_version; /* last compatible version */ + /* version 2 fields below */ + __be32 boot_cpuid_phys; /* Physical CPU id we're booting on */ + /* version 3 fields below */ + __be32 dt_strings_size; /* size of the DT strings block */ + /* version 17 fields below */ + __be32 dt_struct_size; /* size of the DT structure block */ +}; + /* * OF address retreival & translation */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e5d2e0bc7e03..4852bcf270f3 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -670,18 +670,20 @@ #define MMCR0_PROBLEM_DISABLE MMCR0_FCP #define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */ #define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */ -#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */ -#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */ +#define MMCR0_PMXE ASM_CONST(0x04000000) /* perf mon exception enable */ +#define MMCR0_FCECE ASM_CONST(0x02000000) /* freeze ctrs on enabled cond or event */ #define MMCR0_TBEE 0x00400000UL /* time base exception enable */ #define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */ #define MMCR0_EBE 0x00100000UL /* Event based branch enable */ #define MMCR0_PMCC 0x000c0000UL /* PMC control */ #define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */ #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ -#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/ +#define MMCR0_PMCjCE ASM_CONST(0x00004000) /* PMCj count enable*/ #define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */ -#define MMCR0_PMAO_SYNC 0x00000800UL /* PMU interrupt is synchronous */ -#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */ +#define MMCR0_PMAO_SYNC ASM_CONST(0x00000800) /* PMU intr is synchronous */ +#define MMCR0_C56RUN ASM_CONST(0x00000100) /* PMC5/6 count when RUN=0 */ +/* performance monitor alert has occurred, set to 0 after handling exception */ +#define MMCR0_PMAO ASM_CONST(0x00000080) #define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */ #define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */ #define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */ diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 163c3b05a76e..464f1089b532 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -583,6 +583,7 @@ /* Bit definitions for L1CSR0. */ #define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */ +#define L1CSR0_CUL 0x00000400 /* Data Cache Unable to Lock */ #define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */ #define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ #define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */ diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index d0e784e0ff48..521790330672 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -39,6 +39,17 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end) (unsigned long)_stext < end; } +static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end) +{ +#ifdef CONFIG_KVM_GUEST + extern char kvm_tmp[]; + return start < (unsigned long)kvm_tmp && + (unsigned long)&kvm_tmp[1024 * 1024] < end; +#else + return 0; +#endif +} + #undef dereference_function_descriptor static inline void *dereference_function_descriptor(void *ptr) { diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 3ddf70276706..ea4dc3a89c1f 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -361,3 +361,4 @@ SYSCALL(finit_module) SYSCALL(ni_syscall) /* sys_kcmp */ SYSCALL_SPU(sched_setattr) SYSCALL_SPU(sched_getattr) +SYSCALL_SPU(renameat2) diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index c9202151079f..6c8a8c5a37a1 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -9,12 +9,8 @@ struct device_node; #ifdef CONFIG_NUMA /* - * Before going off node we want the VM to try and reclaim from the local - * node. It does this if the remote distance is larger than RECLAIM_DISTANCE. - * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of - * 20, we never reclaim and go off node straight away. - * - * To fix this we choose a smaller value of RECLAIM_DISTANCE. + * If zone_reclaim_mode is enabled, a RECLAIM_DISTANCE of 10 will mean that + * all zones on all nodes will be eligible for zone_reclaim(). */ #define RECLAIM_DISTANCE 10 diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 4494f029b632..5ce5552ab9f5 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls 357 +#define __NR_syscalls 358 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls @@ -29,7 +29,6 @@ #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_PAUSE -#define __ARCH_WANT_SYS_SGETMASK #define __ARCH_WANT_SYS_SIGNAL #define __ARCH_WANT_SYS_TIME #define __ARCH_WANT_SYS_UTIME diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index a6665be4f3ab..2bc4a9409a93 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -545,7 +545,6 @@ struct kvm_get_htab_header { #define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1) #define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2) #define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3) -#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb4) #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) @@ -555,6 +554,7 @@ struct kvm_get_htab_header { #define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7) #define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8) +#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h index e3af3286a068..91e42f09b323 100644 --- a/arch/powerpc/include/uapi/asm/kvm_para.h +++ b/arch/powerpc/include/uapi/asm/kvm_para.h @@ -82,10 +82,16 @@ struct kvm_vcpu_arch_shared { #define KVM_FEATURE_MAGIC_PAGE 1 +/* Magic page flags from host to guest */ + #define KVM_MAGIC_FEAT_SR (1 << 0) /* MASn, ESR, PIR, and high SPRGs */ #define KVM_MAGIC_FEAT_MAS0_TO_SPRG7 (1 << 1) +/* Magic page flags from guest to host */ + +#define MAGIC_PAGE_FLAG_NOT_MAPPED_NX (1 << 0) + #endif /* _UAPI__POWERPC_KVM_PARA_H__ */ diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 881bf2e2560d..2d526f7b48da 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -379,5 +379,6 @@ #define __NR_kcmp 354 #define __NR_sched_setattr 355 #define __NR_sched_getattr 356 +#define __NR_renameat2 357 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ |