From 32fad281c0680ed0ccade7dda85a2121cf9b1d06 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 4 May 2012 02:32:53 +0000 Subject: KVM: PPC: Book3S HV: Make the guest hash table size configurable This adds a new ioctl to enable userspace to control the size of the guest hashed page table (HPT) and to clear it out when resetting the guest. The KVM_PPC_ALLOCATE_HTAB ioctl is a VM ioctl and takes as its parameter a pointer to a u32 containing the desired order of the HPT (log base 2 of the size in bytes), which is updated on successful return to the actual order of the HPT which was allocated. There must be no vcpus running at the time of this ioctl. To enforce this, we now keep a count of the number of vcpus running in kvm->arch.vcpus_running. If the ioctl is called when a HPT has already been allocated, we don't reallocate the HPT but just clear it out. We first clear the kvm->arch.rma_setup_done flag, which has two effects: (a) since we hold the kvm->lock mutex, it will prevent any vcpus from starting to run until we're done, and (b) it means that the first vcpu to run after we're done will re-establish the VRMA if necessary. If userspace doesn't call this ioctl before running the first vcpu, the kernel will allocate a default-sized HPT at that point. We do it then rather than when creating the VM, as the code did previously, so that userspace has a chance to do the ioctl if it wants. When allocating the HPT, we can allocate either from the kernel page allocator, or from the preallocated pool. If userspace is asking for a different size from the preallocated HPTs, we first try to allocate using the kernel page allocator. Then we try to allocate from the preallocated pool, and then if that fails, we try allocating decreasing sizes from the kernel page allocator, down to the minimum size allowed (256kB). Note that the kernel page allocator limits allocations to 1 << CONFIG_FORCE_MAX_ZONEORDER pages, which by default corresponds to 16MB (on 64-bit powerpc, at least). Signed-off-by: Paul Mackerras [agraf: fix module compilation] Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 123 +++++++++++++++++++++++++++-------- arch/powerpc/kvm/book3s_hv.c | 40 ++++++++---- arch/powerpc/kvm/book3s_hv_builtin.c | 5 +- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 15 +++-- arch/powerpc/kvm/powerpc.c | 18 +++++ 5 files changed, 153 insertions(+), 48 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 80a577517584..d03eb6f7b058 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -37,56 +37,121 @@ /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ #define MAX_LPID_970 63 -long kvmppc_alloc_hpt(struct kvm *kvm) +/* Power architecture requires HPT is at least 256kB */ +#define PPC_MIN_HPT_ORDER 18 + +long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) { unsigned long hpt; - long lpid; struct revmap_entry *rev; struct kvmppc_linear_info *li; + long order = kvm_hpt_order; - /* Allocate guest's hashed page table */ - li = kvm_alloc_hpt(); - if (li) { - /* using preallocated memory */ - hpt = (ulong)li->base_virt; - kvm->arch.hpt_li = li; - } else { - /* using dynamic memory */ + if (htab_orderp) { + order = *htab_orderp; + if (order < PPC_MIN_HPT_ORDER) + order = PPC_MIN_HPT_ORDER; + } + + /* + * If the user wants a different size from default, + * try first to allocate it from the kernel page allocator. + */ + hpt = 0; + if (order != kvm_hpt_order) { hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| - __GFP_NOWARN, HPT_ORDER - PAGE_SHIFT); + __GFP_NOWARN, order - PAGE_SHIFT); + if (!hpt) + --order; } + /* Next try to allocate from the preallocated pool */ if (!hpt) { - pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n"); - return -ENOMEM; + li = kvm_alloc_hpt(); + if (li) { + hpt = (ulong)li->base_virt; + kvm->arch.hpt_li = li; + order = kvm_hpt_order; + } } + + /* Lastly try successively smaller sizes from the page allocator */ + while (!hpt && order > PPC_MIN_HPT_ORDER) { + hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| + __GFP_NOWARN, order - PAGE_SHIFT); + if (!hpt) + --order; + } + + if (!hpt) + return -ENOMEM; + kvm->arch.hpt_virt = hpt; + kvm->arch.hpt_order = order; + /* HPTEs are 2**4 bytes long */ + kvm->arch.hpt_npte = 1ul << (order - 4); + /* 128 (2**7) bytes in each HPTEG */ + kvm->arch.hpt_mask = (1ul << (order - 7)) - 1; /* Allocate reverse map array */ - rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE); + rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte); if (!rev) { pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n"); goto out_freehpt; } kvm->arch.revmap = rev; + kvm->arch.sdr1 = __pa(hpt) | (order - 18); - lpid = kvmppc_alloc_lpid(); - if (lpid < 0) - goto out_freeboth; + pr_info("KVM guest htab at %lx (order %ld), LPID %x\n", + hpt, order, kvm->arch.lpid); - kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18); - kvm->arch.lpid = lpid; - - pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid); + if (htab_orderp) + *htab_orderp = order; return 0; - out_freeboth: - vfree(rev); out_freehpt: - free_pages(hpt, HPT_ORDER - PAGE_SHIFT); + if (kvm->arch.hpt_li) + kvm_release_hpt(kvm->arch.hpt_li); + else + free_pages(hpt, order - PAGE_SHIFT); return -ENOMEM; } +long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp) +{ + long err = -EBUSY; + long order; + + mutex_lock(&kvm->lock); + if (kvm->arch.rma_setup_done) { + kvm->arch.rma_setup_done = 0; + /* order rma_setup_done vs. vcpus_running */ + smp_mb(); + if (atomic_read(&kvm->arch.vcpus_running)) { + kvm->arch.rma_setup_done = 1; + goto out; + } + } + if (kvm->arch.hpt_virt) { + order = kvm->arch.hpt_order; + /* Set the entire HPT to 0, i.e. invalid HPTEs */ + memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); + /* + * Set the whole last_vcpu array to an invalid vcpu number. + * This ensures that each vcpu will flush its TLB on next entry. + */ + memset(kvm->arch.last_vcpu, 0xff, sizeof(kvm->arch.last_vcpu)); + *htab_orderp = order; + err = 0; + } else { + err = kvmppc_alloc_hpt(kvm, htab_orderp); + order = *htab_orderp; + } + out: + mutex_unlock(&kvm->lock); + return err; +} + void kvmppc_free_hpt(struct kvm *kvm) { kvmppc_free_lpid(kvm->arch.lpid); @@ -94,7 +159,8 @@ void kvmppc_free_hpt(struct kvm *kvm) if (kvm->arch.hpt_li) kvm_release_hpt(kvm->arch.hpt_li); else - free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT); + free_pages(kvm->arch.hpt_virt, + kvm->arch.hpt_order - PAGE_SHIFT); } /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ @@ -119,6 +185,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, unsigned long psize; unsigned long hp0, hp1; long ret; + struct kvm *kvm = vcpu->kvm; psize = 1ul << porder; npages = memslot->npages >> (porder - PAGE_SHIFT); @@ -127,8 +194,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, if (npages > 1ul << (40 - porder)) npages = 1ul << (40 - porder); /* Can't use more than 1 HPTE per HPTEG */ - if (npages > HPT_NPTEG) - npages = HPT_NPTEG; + if (npages > kvm->arch.hpt_mask + 1) + npages = kvm->arch.hpt_mask + 1; hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); @@ -138,7 +205,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, for (i = 0; i < npages; ++i) { addr = i << porder; /* can't use hpt_hash since va > 64 bits */ - hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK; + hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask; /* * We assume that the hash table is empty and no * vcpus are using it at this stage. Since we create diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index c6af1d623839..d084e412b3c5 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -56,7 +56,7 @@ /* #define EXIT_DEBUG_INT */ static void kvmppc_end_cede(struct kvm_vcpu *vcpu); -static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu); +static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { @@ -1068,11 +1068,15 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) return -EINTR; } - /* On the first time here, set up VRMA or RMA */ + atomic_inc(&vcpu->kvm->arch.vcpus_running); + /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */ + smp_mb(); + + /* On the first time here, set up HTAB and VRMA or RMA */ if (!vcpu->kvm->arch.rma_setup_done) { - r = kvmppc_hv_setup_rma(vcpu); + r = kvmppc_hv_setup_htab_rma(vcpu); if (r) - return r; + goto out; } flush_fp_to_thread(current); @@ -1090,6 +1094,9 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) kvmppc_core_prepare_to_enter(vcpu); } } while (r == RESUME_GUEST); + + out: + atomic_dec(&vcpu->kvm->arch.vcpus_running); return r; } @@ -1305,7 +1312,7 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, { } -static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu) +static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) { int err = 0; struct kvm *kvm = vcpu->kvm; @@ -1324,6 +1331,15 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu) if (kvm->arch.rma_setup_done) goto out; /* another vcpu beat us to it */ + /* Allocate hashed page table (if not done already) and reset it */ + if (!kvm->arch.hpt_virt) { + err = kvmppc_alloc_hpt(kvm, NULL); + if (err) { + pr_err("KVM: Couldn't alloc HPT\n"); + goto out; + } + } + /* Look up the memslot for guest physical address 0 */ memslot = gfn_to_memslot(kvm, 0); @@ -1435,13 +1451,14 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu) int kvmppc_core_init_vm(struct kvm *kvm) { - long r; - unsigned long lpcr; + unsigned long lpcr, lpid; - /* Allocate hashed page table */ - r = kvmppc_alloc_hpt(kvm); - if (r) - return r; + /* Allocate the guest's logical partition ID */ + + lpid = kvmppc_alloc_lpid(); + if (lpid < 0) + return -ENOMEM; + kvm->arch.lpid = lpid; INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); @@ -1451,7 +1468,6 @@ int kvmppc_core_init_vm(struct kvm *kvm) if (cpu_has_feature(CPU_FTR_ARCH_201)) { /* PPC970; HID4 is effectively the LPCR */ - unsigned long lpid = kvm->arch.lpid; kvm->arch.host_lpid = 0; kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4); lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH)); diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index e1b60f56f2a1..fb4eac290fef 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -25,6 +25,9 @@ static void __init kvm_linear_init_one(ulong size, int count, int type); static struct kvmppc_linear_info *kvm_alloc_linear(int type); static void kvm_release_linear(struct kvmppc_linear_info *ri); +int kvm_hpt_order = KVM_DEFAULT_HPT_ORDER; +EXPORT_SYMBOL_GPL(kvm_hpt_order); + /*************** RMA *************/ /* @@ -209,7 +212,7 @@ static void kvm_release_linear(struct kvmppc_linear_info *ri) void __init kvm_linear_init(void) { /* HPT */ - kvm_linear_init_one(1 << HPT_ORDER, kvm_hpt_count, KVM_LINEAR_HPT); + kvm_linear_init_one(1 << kvm_hpt_order, kvm_hpt_count, KVM_LINEAR_HPT); /* RMA */ /* Only do this on PPC970 in HV mode */ diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index cec4daddbf31..5c70d19494f9 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -237,7 +237,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, /* Find and lock the HPTEG slot to use */ do_insert: - if (pte_index >= HPT_NPTE) + if (pte_index >= kvm->arch.hpt_npte) return H_PARAMETER; if (likely((flags & H_EXACT) == 0)) { pte_index &= ~7UL; @@ -352,7 +352,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long v, r, rb; struct revmap_entry *rev; - if (pte_index >= HPT_NPTE) + if (pte_index >= kvm->arch.hpt_npte) return H_PARAMETER; hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) @@ -419,7 +419,8 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) i = 4; break; } - if (req != 1 || flags == 3 || pte_index >= HPT_NPTE) { + if (req != 1 || flags == 3 || + pte_index >= kvm->arch.hpt_npte) { /* parameter error */ args[j] = ((0xa0 | flags) << 56) + pte_index; ret = H_PARAMETER; @@ -521,7 +522,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, struct revmap_entry *rev; unsigned long v, r, rb, mask, bits; - if (pte_index >= HPT_NPTE) + if (pte_index >= kvm->arch.hpt_npte) return H_PARAMETER; hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); @@ -583,7 +584,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, int i, n = 1; struct revmap_entry *rev = NULL; - if (pte_index >= HPT_NPTE) + if (pte_index >= kvm->arch.hpt_npte) return H_PARAMETER; if (flags & H_READ_4) { pte_index &= ~3; @@ -678,7 +679,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, somask = (1UL << 28) - 1; vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; } - hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK; + hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask; avpn = slb_v & ~(somask >> 16); /* also includes B */ avpn |= (eaddr & somask) >> 16; @@ -723,7 +724,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, if (val & HPTE_V_SECONDARY) break; val |= HPTE_V_SECONDARY; - hash = hash ^ HPT_HASH_MASK; + hash = hash ^ kvm->arch.hpt_mask; } return -1; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 1493c8de947b..87f4dc886076 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -246,6 +246,7 @@ int kvm_dev_ioctl_check_extension(long ext) #endif #ifdef CONFIG_PPC_BOOK3S_64 case KVM_CAP_SPAPR_TCE: + case KVM_CAP_PPC_ALLOC_HTAB: r = 1; break; #endif /* CONFIG_PPC_BOOK3S_64 */ @@ -802,6 +803,23 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; break; } + + case KVM_PPC_ALLOCATE_HTAB: { + struct kvm *kvm = filp->private_data; + u32 htab_order; + + r = -EFAULT; + if (get_user(htab_order, (u32 __user *)argp)) + break; + r = kvmppc_alloc_reset_hpt(kvm, &htab_order); + if (r) + break; + r = -EFAULT; + if (put_user(htab_order, (u32 __user *)argp)) + break; + r = 0; + break; + } #endif /* CONFIG_KVM_BOOK3S_64_HV */ #ifdef CONFIG_PPC_BOOK3S_64 -- cgit v1.2.3 From 21bd000abff7d587229dbbee6f8c17f3aad9f9d8 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Sun, 20 May 2012 23:21:23 +0000 Subject: KVM: PPC: booke: Added DECAR support Added the decrementer auto-reload support. DECAR is readable on e500v2/e500mc and later cpus. Signed-off-by: Bharat Bhushan Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_host.h | 2 ++ arch/powerpc/kvm/booke.c | 5 +++++ arch/powerpc/kvm/booke_emulate.c | 3 +++ arch/powerpc/kvm/e500_emulate.c | 3 +++ 4 files changed, 13 insertions(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index dd783beb88b3..50ea12fd7bf5 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -418,7 +418,9 @@ struct kvm_vcpu_arch { ulong mcsrr1; ulong mcsr; u32 dec; +#ifdef CONFIG_BOOKE u32 decar; +#endif u32 tbl; u32 tbu; u32 tcr; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 72f13f4a06e0..86681eec60b1 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -1267,6 +1267,11 @@ void kvmppc_decrementer_func(unsigned long data) { struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; + if (vcpu->arch.tcr & TCR_ARE) { + vcpu->arch.dec = vcpu->arch.decar; + kvmppc_emulate_dec(vcpu); + } + kvmppc_set_tsr_bits(vcpu, TSR_DIS); } diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 6c76397f2af4..9eb9809eb13e 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c @@ -129,6 +129,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) kvmppc_set_tcr(vcpu, spr_val); break; + case SPRN_DECAR: + vcpu->arch.decar = spr_val; + break; /* * Note: SPRG4-7 are user-readable. * These values are loaded into the real SPRGs when resuming the diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 8b99e076dc81..e04b0ef55ce0 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -269,6 +269,9 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) *spr_val = vcpu->arch.shared->mas7_3 >> 32; break; #endif + case SPRN_DECAR: + *spr_val = vcpu->arch.decar; + break; case SPRN_TLB0CFG: *spr_val = vcpu->arch.tlbcfg[0]; break; -- cgit v1.2.3 From c75df6f96c59beed8632e3aced5fb4faabaa6c5b Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Mon, 25 Jun 2012 13:33:10 +0000 Subject: powerpc: Fix usage of register macros getting ready for %r0 change Anything that uses a constructed instruction (ie. from ppc-opcode.h), need to use the new R0 macro, as %r0 is not going to work. Also convert usages of macros where we are just determining an offset (usually for a load/store), like: std r14,STK_REG(r14)(r1) Can't use STK_REG(r14) as %r14 doesn't work in the STK_REG macro since it's just calculating an offset. Signed-off-by: Michael Neuling Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/cpu_setup_a2.S | 6 +- arch/powerpc/kernel/fpu.S | 4 +- arch/powerpc/kernel/kvm.c | 2 +- arch/powerpc/kernel/misc_64.S | 4 +- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 220 ++++++++++---------- arch/powerpc/kvm/book3s_interrupts.S | 72 +++---- arch/powerpc/kvm/booke_interrupts.S | 272 ++++++++++++------------- arch/powerpc/kvm/bookehv_interrupts.S | 222 ++++++++++---------- arch/powerpc/lib/checksum_64.S | 24 +-- arch/powerpc/lib/copypage_power7.S | 28 +-- arch/powerpc/lib/copyuser_64.S | 6 +- arch/powerpc/lib/copyuser_power7.S | 84 ++++---- arch/powerpc/lib/hweight_64.S | 14 +- arch/powerpc/lib/ldstfp.S | 12 +- arch/powerpc/lib/mem_64.S | 6 +- arch/powerpc/lib/memcpy_64.S | 6 +- arch/powerpc/lib/memcpy_power7.S | 60 +++--- arch/powerpc/mm/hash_low_64.S | 148 +++++++------- arch/powerpc/mm/tlb_low_64e.S | 10 +- arch/powerpc/mm/tlb_nohash_low.S | 6 +- arch/powerpc/net/bpf_jit_comp.c | 4 +- arch/powerpc/platforms/cell/beat_hvCall.S | 26 +-- arch/powerpc/platforms/powernv/opal-takeover.S | 8 +- arch/powerpc/platforms/powernv/opal-wrappers.S | 2 +- arch/powerpc/platforms/pseries/hvCall.S | 72 +++---- 25 files changed, 659 insertions(+), 659 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S index ebc62f42a237..95675a7181dc 100644 --- a/arch/powerpc/kernel/cpu_setup_a2.S +++ b/arch/powerpc/kernel/cpu_setup_a2.S @@ -100,19 +100,19 @@ _icswx_skip_guest: lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h mtspr SPRN_MMUCR0, r4 li r4,A2_IERAT_SIZE-1 - PPC_ERATWE(r4,r4,3) + PPC_ERATWE(R4,R4,3) /* Now set the D-ERAT watermark to 31 */ lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h mtspr SPRN_MMUCR0, r4 li r4,A2_DERAT_SIZE-1 - PPC_ERATWE(r4,r4,3) + PPC_ERATWE(R4,R4,3) /* And invalidate the beast just in case. That won't get rid of * a bolted entry though it will be in LRU and so will go away eventually * but let's not bother for now */ - PPC_ERATILX(0,0,0) + PPC_ERATILX(0,R0,R0) 1: blr diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index de369558bf0a..71c1c73bc65f 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -106,7 +106,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif lfd fr0,THREAD_FPSCR(r5) MTFSF_L(fr0) - REST_32FPVSRS(0, r4, r5) + REST_32FPVSRS(0, R4, R5) #ifndef CONFIG_SMP subi r4,r5,THREAD fromreal(r4) @@ -140,7 +140,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) addi r3,r3,THREAD /* want THREAD of task */ PPC_LL r5,PT_REGS(r3) PPC_LCMPI 0,r5,0 - SAVE_32FPVSRS(0, r4 ,r3) + SAVE_32FPVSRS(0, R4 ,R3) mffs fr0 stfd fr0,THREAD_FPSCR(r3) beq 1f diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 62bdf2389669..02c167db6ba0 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -302,7 +302,7 @@ static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one) if (imm_one) { p[kvm_emulate_wrtee_reg_offs] = - KVM_INST_LI | __PPC_RT(30) | MSR_EE; + KVM_INST_LI | __PPC_RT(R30) | MSR_EE; } else { /* Make clobbered registers work too */ switch (get_rt(rt)) { diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 616921ef1439..6ba08bc91b21 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -314,7 +314,7 @@ _GLOBAL(real_205_readb) mtmsrd r0 sync isync - LBZCIX(r3,0,r3) + LBZCIX(R3,0,R3) isync mtmsrd r7 sync @@ -329,7 +329,7 @@ _GLOBAL(real_205_writeb) mtmsrd r0 sync isync - STBCIX(r3,0,r4) + STBCIX(R3,0,R4) isync mtmsrd r7 sync diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index a1044f43becd..bc99015030c3 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -206,24 +206,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) /* Load up FP, VMX and VSX registers */ bl kvmppc_load_fp - ld r14, VCPU_GPR(r14)(r4) - ld r15, VCPU_GPR(r15)(r4) - ld r16, VCPU_GPR(r16)(r4) - ld r17, VCPU_GPR(r17)(r4) - ld r18, VCPU_GPR(r18)(r4) - ld r19, VCPU_GPR(r19)(r4) - ld r20, VCPU_GPR(r20)(r4) - ld r21, VCPU_GPR(r21)(r4) - ld r22, VCPU_GPR(r22)(r4) - ld r23, VCPU_GPR(r23)(r4) - ld r24, VCPU_GPR(r24)(r4) - ld r25, VCPU_GPR(r25)(r4) - ld r26, VCPU_GPR(r26)(r4) - ld r27, VCPU_GPR(r27)(r4) - ld r28, VCPU_GPR(r28)(r4) - ld r29, VCPU_GPR(r29)(r4) - ld r30, VCPU_GPR(r30)(r4) - ld r31, VCPU_GPR(r31)(r4) + ld r14, VCPU_GPR(R14)(r4) + ld r15, VCPU_GPR(R15)(r4) + ld r16, VCPU_GPR(R16)(r4) + ld r17, VCPU_GPR(R17)(r4) + ld r18, VCPU_GPR(R18)(r4) + ld r19, VCPU_GPR(R19)(r4) + ld r20, VCPU_GPR(R20)(r4) + ld r21, VCPU_GPR(R21)(r4) + ld r22, VCPU_GPR(R22)(r4) + ld r23, VCPU_GPR(R23)(r4) + ld r24, VCPU_GPR(R24)(r4) + ld r25, VCPU_GPR(R25)(r4) + ld r26, VCPU_GPR(R26)(r4) + ld r27, VCPU_GPR(R27)(r4) + ld r28, VCPU_GPR(R28)(r4) + ld r29, VCPU_GPR(R29)(r4) + ld r30, VCPU_GPR(R30)(r4) + ld r31, VCPU_GPR(R31)(r4) BEGIN_FTR_SECTION /* Switch DSCR to guest value */ @@ -547,21 +547,21 @@ fast_guest_return: mtlr r5 mtcr r6 - ld r0, VCPU_GPR(r0)(r4) - ld r1, VCPU_GPR(r1)(r4) - ld r2, VCPU_GPR(r2)(r4) - ld r3, VCPU_GPR(r3)(r4) - ld r5, VCPU_GPR(r5)(r4) - ld r6, VCPU_GPR(r6)(r4) - ld r7, VCPU_GPR(r7)(r4) - ld r8, VCPU_GPR(r8)(r4) - ld r9, VCPU_GPR(r9)(r4) - ld r10, VCPU_GPR(r10)(r4) - ld r11, VCPU_GPR(r11)(r4) - ld r12, VCPU_GPR(r12)(r4) - ld r13, VCPU_GPR(r13)(r4) - - ld r4, VCPU_GPR(r4)(r4) + ld r0, VCPU_GPR(R0)(r4) + ld r1, VCPU_GPR(R1)(r4) + ld r2, VCPU_GPR(R2)(r4) + ld r3, VCPU_GPR(R3)(r4) + ld r5, VCPU_GPR(R5)(r4) + ld r6, VCPU_GPR(R6)(r4) + ld r7, VCPU_GPR(R7)(r4) + ld r8, VCPU_GPR(R8)(r4) + ld r9, VCPU_GPR(R9)(r4) + ld r10, VCPU_GPR(R10)(r4) + ld r11, VCPU_GPR(R11)(r4) + ld r12, VCPU_GPR(R12)(r4) + ld r13, VCPU_GPR(R13)(r4) + + ld r4, VCPU_GPR(R4)(r4) hrfid b . @@ -590,22 +590,22 @@ kvmppc_interrupt: /* Save registers */ - std r0, VCPU_GPR(r0)(r9) - std r1, VCPU_GPR(r1)(r9) - std r2, VCPU_GPR(r2)(r9) - std r3, VCPU_GPR(r3)(r9) - std r4, VCPU_GPR(r4)(r9) - std r5, VCPU_GPR(r5)(r9) - std r6, VCPU_GPR(r6)(r9) - std r7, VCPU_GPR(r7)(r9) - std r8, VCPU_GPR(r8)(r9) + std r0, VCPU_GPR(R0)(r9) + std r1, VCPU_GPR(R1)(r9) + std r2, VCPU_GPR(R2)(r9) + std r3, VCPU_GPR(R3)(r9) + std r4, VCPU_GPR(R4)(r9) + std r5, VCPU_GPR(R5)(r9) + std r6, VCPU_GPR(R6)(r9) + std r7, VCPU_GPR(R7)(r9) + std r8, VCPU_GPR(R8)(r9) ld r0, HSTATE_HOST_R2(r13) - std r0, VCPU_GPR(r9)(r9) - std r10, VCPU_GPR(r10)(r9) - std r11, VCPU_GPR(r11)(r9) + std r0, VCPU_GPR(R9)(r9) + std r10, VCPU_GPR(R10)(r9) + std r11, VCPU_GPR(R11)(r9) ld r3, HSTATE_SCRATCH0(r13) lwz r4, HSTATE_SCRATCH1(r13) - std r3, VCPU_GPR(r12)(r9) + std r3, VCPU_GPR(R12)(r9) stw r4, VCPU_CR(r9) /* Restore R1/R2 so we can handle faults */ @@ -626,7 +626,7 @@ kvmppc_interrupt: GET_SCRATCH0(r3) mflr r4 - std r3, VCPU_GPR(r13)(r9) + std r3, VCPU_GPR(R13)(r9) std r4, VCPU_LR(r9) /* Unset guest mode */ @@ -968,24 +968,24 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) /* Save non-volatile GPRs */ - std r14, VCPU_GPR(r14)(r9) - std r15, VCPU_GPR(r15)(r9) - std r16, VCPU_GPR(r16)(r9) - std r17, VCPU_GPR(r17)(r9) - std r18, VCPU_GPR(r18)(r9) - std r19, VCPU_GPR(r19)(r9) - std r20, VCPU_GPR(r20)(r9) - std r21, VCPU_GPR(r21)(r9) - std r22, VCPU_GPR(r22)(r9) - std r23, VCPU_GPR(r23)(r9) - std r24, VCPU_GPR(r24)(r9) - std r25, VCPU_GPR(r25)(r9) - std r26, VCPU_GPR(r26)(r9) - std r27, VCPU_GPR(r27)(r9) - std r28, VCPU_GPR(r28)(r9) - std r29, VCPU_GPR(r29)(r9) - std r30, VCPU_GPR(r30)(r9) - std r31, VCPU_GPR(r31)(r9) + std r14, VCPU_GPR(R14)(r9) + std r15, VCPU_GPR(R15)(r9) + std r16, VCPU_GPR(R16)(r9) + std r17, VCPU_GPR(R17)(r9) + std r18, VCPU_GPR(R18)(r9) + std r19, VCPU_GPR(R19)(r9) + std r20, VCPU_GPR(R20)(r9) + std r21, VCPU_GPR(R21)(r9) + std r22, VCPU_GPR(R22)(r9) + std r23, VCPU_GPR(R23)(r9) + std r24, VCPU_GPR(R24)(r9) + std r25, VCPU_GPR(R25)(r9) + std r26, VCPU_GPR(R26)(r9) + std r27, VCPU_GPR(R27)(r9) + std r28, VCPU_GPR(R28)(r9) + std r29, VCPU_GPR(R29)(r9) + std r30, VCPU_GPR(R30)(r9) + std r31, VCPU_GPR(R31)(r9) /* Save SPRGs */ mfspr r3, SPRN_SPRG0 @@ -1160,7 +1160,7 @@ kvmppc_hdsi: andi. r0, r11, MSR_DR /* data relocation enabled? */ beq 3f clrrdi r0, r4, 28 - PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ + PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ bne 1f /* if no SLB entry found */ 4: std r4, VCPU_FAULT_DAR(r9) stw r6, VCPU_FAULT_DSISR(r9) @@ -1234,7 +1234,7 @@ kvmppc_hisi: andi. r0, r11, MSR_IR /* instruction relocation enabled? */ beq 3f clrrdi r0, r10, 28 - PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ + PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ bne 1f /* if no SLB entry found */ 4: /* Search the hash table. */ @@ -1278,7 +1278,7 @@ kvmppc_hisi: */ .globl hcall_try_real_mode hcall_try_real_mode: - ld r3,VCPU_GPR(r3)(r9) + ld r3,VCPU_GPR(R3)(r9) andi. r0,r11,MSR_PR bne hcall_real_cont clrrdi r3,r3,2 @@ -1291,12 +1291,12 @@ hcall_try_real_mode: add r3,r3,r4 mtctr r3 mr r3,r9 /* get vcpu pointer */ - ld r4,VCPU_GPR(r4)(r9) + ld r4,VCPU_GPR(R4)(r9) bctrl cmpdi r3,H_TOO_HARD beq hcall_real_fallback ld r4,HSTATE_KVM_VCPU(r13) - std r3,VCPU_GPR(r3)(r4) + std r3,VCPU_GPR(R3)(r4) ld r10,VCPU_PC(r4) ld r11,VCPU_MSR(r4) b fast_guest_return @@ -1424,7 +1424,7 @@ _GLOBAL(kvmppc_h_cede) li r0,0 /* set trap to 0 to say hcall is handled */ stw r0,VCPU_TRAP(r3) li r0,H_SUCCESS - std r0,VCPU_GPR(r3)(r3) + std r0,VCPU_GPR(R3)(r3) BEGIN_FTR_SECTION b 2f /* just send it up to host on 970 */ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) @@ -1443,7 +1443,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) addi r6,r5,VCORE_NAPPING_THREADS 31: lwarx r4,0,r6 or r4,r4,r0 - PPC_POPCNTW(r7,r4) + PPC_POPCNTW(R7,R4) cmpw r7,r8 bge 2f stwcx. r4,0,r6 @@ -1464,24 +1464,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. */ /* Save non-volatile GPRs */ - std r14, VCPU_GPR(r14)(r3) - std r15, VCPU_GPR(r15)(r3) - std r16, VCPU_GPR(r16)(r3) - std r17, VCPU_GPR(r17)(r3) - std r18, VCPU_GPR(r18)(r3) - std r19, VCPU_GPR(r19)(r3) - std r20, VCPU_GPR(r20)(r3) - std r21, VCPU_GPR(r21)(r3) - std r22, VCPU_GPR(r22)(r3) - std r23, VCPU_GPR(r23)(r3) - std r24, VCPU_GPR(r24)(r3) - std r25, VCPU_GPR(r25)(r3) - std r26, VCPU_GPR(r26)(r3) - std r27, VCPU_GPR(r27)(r3) - std r28, VCPU_GPR(r28)(r3) - std r29, VCPU_GPR(r29)(r3) - std r30, VCPU_GPR(r30)(r3) - std r31, VCPU_GPR(r31)(r3) + std r14, VCPU_GPR(R14)(r3) + std r15, VCPU_GPR(R15)(r3) + std r16, VCPU_GPR(R16)(r3) + std r17, VCPU_GPR(R17)(r3) + std r18, VCPU_GPR(R18)(r3) + std r19, VCPU_GPR(R19)(r3) + std r20, VCPU_GPR(R20)(r3) + std r21, VCPU_GPR(R21)(r3) + std r22, VCPU_GPR(R22)(r3) + std r23, VCPU_GPR(R23)(r3) + std r24, VCPU_GPR(R24)(r3) + std r25, VCPU_GPR(R25)(r3) + std r26, VCPU_GPR(R26)(r3) + std r27, VCPU_GPR(R27)(r3) + std r28, VCPU_GPR(R28)(r3) + std r29, VCPU_GPR(R29)(r3) + std r30, VCPU_GPR(R30)(r3) + std r31, VCPU_GPR(R31)(r3) /* save FP state */ bl .kvmppc_save_fp @@ -1513,24 +1513,24 @@ kvm_end_cede: bl kvmppc_load_fp /* Load NV GPRS */ - ld r14, VCPU_GPR(r14)(r4) - ld r15, VCPU_GPR(r15)(r4) - ld r16, VCPU_GPR(r16)(r4) - ld r17, VCPU_GPR(r17)(r4) - ld r18, VCPU_GPR(r18)(r4) - ld r19, VCPU_GPR(r19)(r4) - ld r20, VCPU_GPR(r20)(r4) - ld r21, VCPU_GPR(r21)(r4) - ld r22, VCPU_GPR(r22)(r4) - ld r23, VCPU_GPR(r23)(r4) - ld r24, VCPU_GPR(r24)(r4) - ld r25, VCPU_GPR(r25)(r4) - ld r26, VCPU_GPR(r26)(r4) - ld r27, VCPU_GPR(r27)(r4) - ld r28, VCPU_GPR(r28)(r4) - ld r29, VCPU_GPR(r29)(r4) - ld r30, VCPU_GPR(r30)(r4) - ld r31, VCPU_GPR(r31)(r4) + ld r14, VCPU_GPR(R14)(r4) + ld r15, VCPU_GPR(R15)(r4) + ld r16, VCPU_GPR(R16)(r4) + ld r17, VCPU_GPR(R17)(r4) + ld r18, VCPU_GPR(R18)(r4) + ld r19, VCPU_GPR(R19)(r4) + ld r20, VCPU_GPR(R20)(r4) + ld r21, VCPU_GPR(R21)(r4) + ld r22, VCPU_GPR(R22)(r4) + ld r23, VCPU_GPR(R23)(r4) + ld r24, VCPU_GPR(R24)(r4) + ld r25, VCPU_GPR(R25)(r4) + ld r26, VCPU_GPR(R26)(r4) + ld r27, VCPU_GPR(R27)(r4) + ld r28, VCPU_GPR(R28)(r4) + ld r29, VCPU_GPR(R29)(r4) + ld r30, VCPU_GPR(R30)(r4) + ld r31, VCPU_GPR(R31)(r4) /* clear our bit in vcore->napping_threads */ 33: ld r5,HSTATE_KVM_VCORE(r13) @@ -1649,7 +1649,7 @@ BEGIN_FTR_SECTION reg = 0 .rept 32 li r6,reg*16+VCPU_VSRS - STXVD2X(reg,r6,r3) + STXVD2X(reg,R6,R3) reg = reg + 1 .endr FTR_SECTION_ELSE @@ -1711,7 +1711,7 @@ BEGIN_FTR_SECTION reg = 0 .rept 32 li r7,reg*16+VCPU_VSRS - LXVD2X(reg,r7,r4) + LXVD2X(reg,R7,R4) reg = reg + 1 .endr FTR_SECTION_ELSE diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 3e35383bdb21..2ddab0f90a81 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -39,24 +39,24 @@ #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) #define VCPU_LOAD_NVGPRS(vcpu) \ - PPC_LL r14, VCPU_GPR(r14)(vcpu); \ - PPC_LL r15, VCPU_GPR(r15)(vcpu); \ - PPC_LL r16, VCPU_GPR(r16)(vcpu); \ - PPC_LL r17, VCPU_GPR(r17)(vcpu); \ - PPC_LL r18, VCPU_GPR(r18)(vcpu); \ - PPC_LL r19, VCPU_GPR(r19)(vcpu); \ - PPC_LL r20, VCPU_GPR(r20)(vcpu); \ - PPC_LL r21, VCPU_GPR(r21)(vcpu); \ - PPC_LL r22, VCPU_GPR(r22)(vcpu); \ - PPC_LL r23, VCPU_GPR(r23)(vcpu); \ - PPC_LL r24, VCPU_GPR(r24)(vcpu); \ - PPC_LL r25, VCPU_GPR(r25)(vcpu); \ - PPC_LL r26, VCPU_GPR(r26)(vcpu); \ - PPC_LL r27, VCPU_GPR(r27)(vcpu); \ - PPC_LL r28, VCPU_GPR(r28)(vcpu); \ - PPC_LL r29, VCPU_GPR(r29)(vcpu); \ - PPC_LL r30, VCPU_GPR(r30)(vcpu); \ - PPC_LL r31, VCPU_GPR(r31)(vcpu); \ + PPC_LL r14, VCPU_GPR(R14)(vcpu); \ + PPC_LL r15, VCPU_GPR(R15)(vcpu); \ + PPC_LL r16, VCPU_GPR(R16)(vcpu); \ + PPC_LL r17, VCPU_GPR(R17)(vcpu); \ + PPC_LL r18, VCPU_GPR(R18)(vcpu); \ + PPC_LL r19, VCPU_GPR(R19)(vcpu); \ + PPC_LL r20, VCPU_GPR(R20)(vcpu); \ + PPC_LL r21, VCPU_GPR(R21)(vcpu); \ + PPC_LL r22, VCPU_GPR(R22)(vcpu); \ + PPC_LL r23, VCPU_GPR(R23)(vcpu); \ + PPC_LL r24, VCPU_GPR(R24)(vcpu); \ + PPC_LL r25, VCPU_GPR(R25)(vcpu); \ + PPC_LL r26, VCPU_GPR(R26)(vcpu); \ + PPC_LL r27, VCPU_GPR(R27)(vcpu); \ + PPC_LL r28, VCPU_GPR(R28)(vcpu); \ + PPC_LL r29, VCPU_GPR(R29)(vcpu); \ + PPC_LL r30, VCPU_GPR(R30)(vcpu); \ + PPC_LL r31, VCPU_GPR(R31)(vcpu); \ /***************************************************************************** * * @@ -131,24 +131,24 @@ kvmppc_handler_highmem: /* R7 = vcpu */ PPC_LL r7, GPR4(r1) - PPC_STL r14, VCPU_GPR(r14)(r7) - PPC_STL r15, VCPU_GPR(r15)(r7) - PPC_STL r16, VCPU_GPR(r16)(r7) - PPC_STL r17, VCPU_GPR(r17)(r7) - PPC_STL r18, VCPU_GPR(r18)(r7) - PPC_STL r19, VCPU_GPR(r19)(r7) - PPC_STL r20, VCPU_GPR(r20)(r7) - PPC_STL r21, VCPU_GPR(r21)(r7) - PPC_STL r22, VCPU_GPR(r22)(r7) - PPC_STL r23, VCPU_GPR(r23)(r7) - PPC_STL r24, VCPU_GPR(r24)(r7) - PPC_STL r25, VCPU_GPR(r25)(r7) - PPC_STL r26, VCPU_GPR(r26)(r7) - PPC_STL r27, VCPU_GPR(r27)(r7) - PPC_STL r28, VCPU_GPR(r28)(r7) - PPC_STL r29, VCPU_GPR(r29)(r7) - PPC_STL r30, VCPU_GPR(r30)(r7) - PPC_STL r31, VCPU_GPR(r31)(r7) + PPC_STL r14, VCPU_GPR(R14)(r7) + PPC_STL r15, VCPU_GPR(R15)(r7) + PPC_STL r16, VCPU_GPR(R16)(r7) + PPC_STL r17, VCPU_GPR(R17)(r7) + PPC_STL r18, VCPU_GPR(R18)(r7) + PPC_STL r19, VCPU_GPR(R19)(r7) + PPC_STL r20, VCPU_GPR(R20)(r7) + PPC_STL r21, VCPU_GPR(R21)(r7) + PPC_STL r22, VCPU_GPR(R22)(r7) + PPC_STL r23, VCPU_GPR(R23)(r7) + PPC_STL r24, VCPU_GPR(R24)(r7) + PPC_STL r25, VCPU_GPR(R25)(r7) + PPC_STL r26, VCPU_GPR(R26)(r7) + PPC_STL r27, VCPU_GPR(R27)(r7) + PPC_STL r28, VCPU_GPR(R28)(r7) + PPC_STL r29, VCPU_GPR(R29)(r7) + PPC_STL r30, VCPU_GPR(R30)(r7) + PPC_STL r31, VCPU_GPR(R31)(r7) /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ mr r5, r12 diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 8feec2ff3928..e598a5a0d5e4 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -37,7 +37,7 @@ #define HOST_CR 16 #define HOST_NV_GPRS 20 #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) -#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) +#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4) #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ @@ -58,8 +58,8 @@ _GLOBAL(kvmppc_handler_\ivor_nr) /* Get pointer to vcpu and record exit number. */ mtspr SPRN_SPRG_WSCRATCH0, r4 mfspr r4, SPRN_SPRG_RVCPU - stw r5, VCPU_GPR(r5)(r4) - stw r6, VCPU_GPR(r6)(r4) + stw r5, VCPU_GPR(R5)(r4) + stw r6, VCPU_GPR(R6)(r4) mfctr r5 lis r6, kvmppc_resume_host@h stw r5, VCPU_CTR(r4) @@ -100,12 +100,12 @@ _GLOBAL(kvmppc_handler_len) * r5: KVM exit number */ _GLOBAL(kvmppc_resume_host) - stw r3, VCPU_GPR(r3)(r4) + stw r3, VCPU_GPR(R3)(r4) mfcr r3 stw r3, VCPU_CR(r4) - stw r7, VCPU_GPR(r7)(r4) - stw r8, VCPU_GPR(r8)(r4) - stw r9, VCPU_GPR(r9)(r4) + stw r7, VCPU_GPR(R7)(r4) + stw r8, VCPU_GPR(R8)(r4) + stw r9, VCPU_GPR(R9)(r4) li r6, 1 slw r6, r6, r5 @@ -135,23 +135,23 @@ _GLOBAL(kvmppc_resume_host) isync stw r9, VCPU_LAST_INST(r4) - stw r15, VCPU_GPR(r15)(r4) - stw r16, VCPU_GPR(r16)(r4) - stw r17, VCPU_GPR(r17)(r4) - stw r18, VCPU_GPR(r18)(r4) - stw r19, VCPU_GPR(r19)(r4) - stw r20, VCPU_GPR(r20)(r4) - stw r21, VCPU_GPR(r21)(r4) - stw r22, VCPU_GPR(r22)(r4) - stw r23, VCPU_GPR(r23)(r4) - stw r24, VCPU_GPR(r24)(r4) - stw r25, VCPU_GPR(r25)(r4) - stw r26, VCPU_GPR(r26)(r4) - stw r27, VCPU_GPR(r27)(r4) - stw r28, VCPU_GPR(r28)(r4) - stw r29, VCPU_GPR(r29)(r4) - stw r30, VCPU_GPR(r30)(r4) - stw r31, VCPU_GPR(r31)(r4) + stw r15, VCPU_GPR(R15)(r4) + stw r16, VCPU_GPR(R16)(r4) + stw r17, VCPU_GPR(R17)(r4) + stw r18, VCPU_GPR(R18)(r4) + stw r19, VCPU_GPR(R19)(r4) + stw r20, VCPU_GPR(R20)(r4) + stw r21, VCPU_GPR(R21)(r4) + stw r22, VCPU_GPR(R22)(r4) + stw r23, VCPU_GPR(R23)(r4) + stw r24, VCPU_GPR(R24)(r4) + stw r25, VCPU_GPR(R25)(r4) + stw r26, VCPU_GPR(R26)(r4) + stw r27, VCPU_GPR(R27)(r4) + stw r28, VCPU_GPR(R28)(r4) + stw r29, VCPU_GPR(R29)(r4) + stw r30, VCPU_GPR(R30)(r4) + stw r31, VCPU_GPR(R31)(r4) ..skip_inst_copy: /* Also grab DEAR and ESR before the host can clobber them. */ @@ -169,20 +169,20 @@ _GLOBAL(kvmppc_resume_host) ..skip_esr: /* Save remaining volatile guest register state to vcpu. */ - stw r0, VCPU_GPR(r0)(r4) - stw r1, VCPU_GPR(r1)(r4) - stw r2, VCPU_GPR(r2)(r4) - stw r10, VCPU_GPR(r10)(r4) - stw r11, VCPU_GPR(r11)(r4) - stw r12, VCPU_GPR(r12)(r4) - stw r13, VCPU_GPR(r13)(r4) - stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */ + stw r0, VCPU_GPR(R0)(r4) + stw r1, VCPU_GPR(R1)(r4) + stw r2, VCPU_GPR(R2)(r4) + stw r10, VCPU_GPR(R10)(r4) + stw r11, VCPU_GPR(R11)(r4) + stw r12, VCPU_GPR(R12)(r4) + stw r13, VCPU_GPR(R13)(r4) + stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */ mflr r3 stw r3, VCPU_LR(r4) mfxer r3 stw r3, VCPU_XER(r4) mfspr r3, SPRN_SPRG_RSCRATCH0 - stw r3, VCPU_GPR(r4)(r4) + stw r3, VCPU_GPR(R4)(r4) mfspr r3, SPRN_SRR0 stw r3, VCPU_PC(r4) @@ -214,28 +214,28 @@ _GLOBAL(kvmppc_resume_host) /* Restore vcpu pointer and the nonvolatiles we used. */ mr r4, r14 - lwz r14, VCPU_GPR(r14)(r4) + lwz r14, VCPU_GPR(R14)(r4) /* Sometimes instruction emulation must restore complete GPR state. */ andi. r5, r3, RESUME_FLAG_NV beq ..skip_nv_load - lwz r15, VCPU_GPR(r15)(r4) - lwz r16, VCPU_GPR(r16)(r4) - lwz r17, VCPU_GPR(r17)(r4) - lwz r18, VCPU_GPR(r18)(r4) - lwz r19, VCPU_GPR(r19)(r4) - lwz r20, VCPU_GPR(r20)(r4) - lwz r21, VCPU_GPR(r21)(r4) - lwz r22, VCPU_GPR(r22)(r4) - lwz r23, VCPU_GPR(r23)(r4) - lwz r24, VCPU_GPR(r24)(r4) - lwz r25, VCPU_GPR(r25)(r4) - lwz r26, VCPU_GPR(r26)(r4) - lwz r27, VCPU_GPR(r27)(r4) - lwz r28, VCPU_GPR(r28)(r4) - lwz r29, VCPU_GPR(r29)(r4) - lwz r30, VCPU_GPR(r30)(r4) - lwz r31, VCPU_GPR(r31)(r4) + lwz r15, VCPU_GPR(R15)(r4) + lwz r16, VCPU_GPR(R16)(r4) + lwz r17, VCPU_GPR(R17)(r4) + lwz r18, VCPU_GPR(R18)(r4) + lwz r19, VCPU_GPR(R19)(r4) + lwz r20, VCPU_GPR(R20)(r4) + lwz r21, VCPU_GPR(R21)(r4) + lwz r22, VCPU_GPR(R22)(r4) + lwz r23, VCPU_GPR(R23)(r4) + lwz r24, VCPU_GPR(R24)(r4) + lwz r25, VCPU_GPR(R25)(r4) + lwz r26, VCPU_GPR(R26)(r4) + lwz r27, VCPU_GPR(R27)(r4) + lwz r28, VCPU_GPR(R28)(r4) + lwz r29, VCPU_GPR(R29)(r4) + lwz r30, VCPU_GPR(R30)(r4) + lwz r31, VCPU_GPR(R31)(r4) ..skip_nv_load: /* Should we return to the guest? */ @@ -257,43 +257,43 @@ heavyweight_exit: /* We already saved guest volatile register state; now save the * non-volatiles. */ - stw r15, VCPU_GPR(r15)(r4) - stw r16, VCPU_GPR(r16)(r4) - stw r17, VCPU_GPR(r17)(r4) - stw r18, VCPU_GPR(r18)(r4) - stw r19, VCPU_GPR(r19)(r4) - stw r20, VCPU_GPR(r20)(r4) - stw r21, VCPU_GPR(r21)(r4) - stw r22, VCPU_GPR(r22)(r4) - stw r23, VCPU_GPR(r23)(r4) - stw r24, VCPU_GPR(r24)(r4) - stw r25, VCPU_GPR(r25)(r4) - stw r26, VCPU_GPR(r26)(r4) - stw r27, VCPU_GPR(r27)(r4) - stw r28, VCPU_GPR(r28)(r4) - stw r29, VCPU_GPR(r29)(r4) - stw r30, VCPU_GPR(r30)(r4) - stw r31, VCPU_GPR(r31)(r4) + stw r15, VCPU_GPR(R15)(r4) + stw r16, VCPU_GPR(R16)(r4) + stw r17, VCPU_GPR(R17)(r4) + stw r18, VCPU_GPR(R18)(r4) + stw r19, VCPU_GPR(R19)(r4) + stw r20, VCPU_GPR(R20)(r4) + stw r21, VCPU_GPR(R21)(r4) + stw r22, VCPU_GPR(R22)(r4) + stw r23, VCPU_GPR(R23)(r4) + stw r24, VCPU_GPR(R24)(r4) + stw r25, VCPU_GPR(R25)(r4) + stw r26, VCPU_GPR(R26)(r4) + stw r27, VCPU_GPR(R27)(r4) + stw r28, VCPU_GPR(R28)(r4) + stw r29, VCPU_GPR(R29)(r4) + stw r30, VCPU_GPR(R30)(r4) + stw r31, VCPU_GPR(R31)(r4) /* Load host non-volatile register state from host stack. */ - lwz r14, HOST_NV_GPR(r14)(r1) - lwz r15, HOST_NV_GPR(r15)(r1) - lwz r16, HOST_NV_GPR(r16)(r1) - lwz r17, HOST_NV_GPR(r17)(r1) - lwz r18, HOST_NV_GPR(r18)(r1) - lwz r19, HOST_NV_GPR(r19)(r1) - lwz r20, HOST_NV_GPR(r20)(r1) - lwz r21, HOST_NV_GPR(r21)(r1) - lwz r22, HOST_NV_GPR(r22)(r1) - lwz r23, HOST_NV_GPR(r23)(r1) - lwz r24, HOST_NV_GPR(r24)(r1) - lwz r25, HOST_NV_GPR(r25)(r1) - lwz r26, HOST_NV_GPR(r26)(r1) - lwz r27, HOST_NV_GPR(r27)(r1) - lwz r28, HOST_NV_GPR(r28)(r1) - lwz r29, HOST_NV_GPR(r29)(r1) - lwz r30, HOST_NV_GPR(r30)(r1) - lwz r31, HOST_NV_GPR(r31)(r1) + lwz r14, HOST_NV_GPR(R14)(r1) + lwz r15, HOST_NV_GPR(R15)(r1) + lwz r16, HOST_NV_GPR(R16)(r1) + lwz r17, HOST_NV_GPR(R17)(r1) + lwz r18, HOST_NV_GPR(R18)(r1) + lwz r19, HOST_NV_GPR(R19)(r1) + lwz r20, HOST_NV_GPR(R20)(r1) + lwz r21, HOST_NV_GPR(R21)(r1) + lwz r22, HOST_NV_GPR(R22)(r1) + lwz r23, HOST_NV_GPR(R23)(r1) + lwz r24, HOST_NV_GPR(R24)(r1) + lwz r25, HOST_NV_GPR(R25)(r1) + lwz r26, HOST_NV_GPR(R26)(r1) + lwz r27, HOST_NV_GPR(R27)(r1) + lwz r28, HOST_NV_GPR(R28)(r1) + lwz r29, HOST_NV_GPR(R29)(r1) + lwz r30, HOST_NV_GPR(R30)(r1) + lwz r31, HOST_NV_GPR(R31)(r1) /* Return to kvm_vcpu_run(). */ lwz r4, HOST_STACK_LR(r1) @@ -321,44 +321,44 @@ _GLOBAL(__kvmppc_vcpu_run) stw r5, HOST_CR(r1) /* Save host non-volatile register state to stack. */ - stw r14, HOST_NV_GPR(r14)(r1) - stw r15, HOST_NV_GPR(r15)(r1) - stw r16, HOST_NV_GPR(r16)(r1) - stw r17, HOST_NV_GPR(r17)(r1) - stw r18, HOST_NV_GPR(r18)(r1) - stw r19, HOST_NV_GPR(r19)(r1) - stw r20, HOST_NV_GPR(r20)(r1) - stw r21, HOST_NV_GPR(r21)(r1) - stw r22, HOST_NV_GPR(r22)(r1) - stw r23, HOST_NV_GPR(r23)(r1) - stw r24, HOST_NV_GPR(r24)(r1) - stw r25, HOST_NV_GPR(r25)(r1) - stw r26, HOST_NV_GPR(r26)(r1) - stw r27, HOST_NV_GPR(r27)(r1) - stw r28, HOST_NV_GPR(r28)(r1) - stw r29, HOST_NV_GPR(r29)(r1) - stw r30, HOST_NV_GPR(r30)(r1) - stw r31, HOST_NV_GPR(r31)(r1) + stw r14, HOST_NV_GPR(R14)(r1) + stw r15, HOST_NV_GPR(R15)(r1) + stw r16, HOST_NV_GPR(R16)(r1) + stw r17, HOST_NV_GPR(R17)(r1) + stw r18, HOST_NV_GPR(R18)(r1) + stw r19, HOST_NV_GPR(R19)(r1) + stw r20, HOST_NV_GPR(R20)(r1) + stw r21, HOST_NV_GPR(R21)(r1) + stw r22, HOST_NV_GPR(R22)(r1) + stw r23, HOST_NV_GPR(R23)(r1) + stw r24, HOST_NV_GPR(R24)(r1) + stw r25, HOST_NV_GPR(R25)(r1) + stw r26, HOST_NV_GPR(R26)(r1) + stw r27, HOST_NV_GPR(R27)(r1) + stw r28, HOST_NV_GPR(R28)(r1) + stw r29, HOST_NV_GPR(R29)(r1) + stw r30, HOST_NV_GPR(R30)(r1) + stw r31, HOST_NV_GPR(R31)(r1) /* Load guest non-volatiles. */ - lwz r14, VCPU_GPR(r14)(r4) - lwz r15, VCPU_GPR(r15)(r4) - lwz r16, VCPU_GPR(r16)(r4) - lwz r17, VCPU_GPR(r17)(r4) - lwz r18, VCPU_GPR(r18)(r4) - lwz r19, VCPU_GPR(r19)(r4) - lwz r20, VCPU_GPR(r20)(r4) - lwz r21, VCPU_GPR(r21)(r4) - lwz r22, VCPU_GPR(r22)(r4) - lwz r23, VCPU_GPR(r23)(r4) - lwz r24, VCPU_GPR(r24)(r4) - lwz r25, VCPU_GPR(r25)(r4) - lwz r26, VCPU_GPR(r26)(r4) - lwz r27, VCPU_GPR(r27)(r4) - lwz r28, VCPU_GPR(r28)(r4) - lwz r29, VCPU_GPR(r29)(r4) - lwz r30, VCPU_GPR(r30)(r4) - lwz r31, VCPU_GPR(r31)(r4) + lwz r14, VCPU_GPR(R14)(r4) + lwz r15, VCPU_GPR(R15)(r4) + lwz r16, VCPU_GPR(R16)(r4) + lwz r17, VCPU_GPR(R17)(r4) + lwz r18, VCPU_GPR(R18)(r4) + lwz r19, VCPU_GPR(R19)(r4) + lwz r20, VCPU_GPR(R20)(r4) + lwz r21, VCPU_GPR(R21)(r4) + lwz r22, VCPU_GPR(R22)(r4) + lwz r23, VCPU_GPR(R23)(r4) + lwz r24, VCPU_GPR(R24)(r4) + lwz r25, VCPU_GPR(R25)(r4) + lwz r26, VCPU_GPR(R26)(r4) + lwz r27, VCPU_GPR(R27)(r4) + lwz r28, VCPU_GPR(R28)(r4) + lwz r29, VCPU_GPR(R29)(r4) + lwz r30, VCPU_GPR(R30)(r4) + lwz r31, VCPU_GPR(R31)(r4) #ifdef CONFIG_SPE /* save host SPEFSCR and load guest SPEFSCR */ @@ -386,13 +386,13 @@ lightweight_exit: #endif /* Load some guest volatiles. */ - lwz r0, VCPU_GPR(r0)(r4) - lwz r2, VCPU_GPR(r2)(r4) - lwz r9, VCPU_GPR(r9)(r4) - lwz r10, VCPU_GPR(r10)(r4) - lwz r11, VCPU_GPR(r11)(r4) - lwz r12, VCPU_GPR(r12)(r4) - lwz r13, VCPU_GPR(r13)(r4) + lwz r0, VCPU_GPR(R0)(r4) + lwz r2, VCPU_GPR(R2)(r4) + lwz r9, VCPU_GPR(R9)(r4) + lwz r10, VCPU_GPR(R10)(r4) + lwz r11, VCPU_GPR(R11)(r4) + lwz r12, VCPU_GPR(R12)(r4) + lwz r13, VCPU_GPR(R13)(r4) lwz r3, VCPU_LR(r4) mtlr r3 lwz r3, VCPU_XER(r4) @@ -411,7 +411,7 @@ lightweight_exit: /* Can't switch the stack pointer until after IVPR is switched, * because host interrupt handlers would get confused. */ - lwz r1, VCPU_GPR(r1)(r4) + lwz r1, VCPU_GPR(R1)(r4) /* * Host interrupt handlers may have clobbered these @@ -449,10 +449,10 @@ lightweight_exit: mtcr r5 mtsrr0 r6 mtsrr1 r7 - lwz r5, VCPU_GPR(r5)(r4) - lwz r6, VCPU_GPR(r6)(r4) - lwz r7, VCPU_GPR(r7)(r4) - lwz r8, VCPU_GPR(r8)(r4) + lwz r5, VCPU_GPR(R5)(r4) + lwz r6, VCPU_GPR(R6)(r4) + lwz r7, VCPU_GPR(R7)(r4) + lwz r8, VCPU_GPR(R8)(r4) /* Clear any debug events which occurred since we disabled MSR[DE]. * XXX This gives us a 3-instruction window in which a breakpoint @@ -461,8 +461,8 @@ lightweight_exit: ori r3, r3, 0xffff mtspr SPRN_DBSR, r3 - lwz r3, VCPU_GPR(r3)(r4) - lwz r4, VCPU_GPR(r4)(r4) + lwz r3, VCPU_GPR(R3)(r4) + lwz r4, VCPU_GPR(R4)(r4) rfi #ifdef CONFIG_SPE diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 6048a00515d7..a623b1d32d3e 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -67,15 +67,15 @@ */ .macro kvm_handler_common intno, srr0, flags /* Restore host stack pointer */ - PPC_STL r1, VCPU_GPR(r1)(r4) - PPC_STL r2, VCPU_GPR(r2)(r4) + PPC_STL r1, VCPU_GPR(R1)(r4) + PPC_STL r2, VCPU_GPR(R2)(r4) PPC_LL r1, VCPU_HOST_STACK(r4) PPC_LL r2, HOST_R2(r1) mfspr r10, SPRN_PID lwz r8, VCPU_HOST_PID(r4) PPC_LL r11, VCPU_SHARED(r4) - PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ + PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */ li r14, \intno stw r10, VCPU_GUEST_PID(r4) @@ -137,27 +137,27 @@ */ mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ - PPC_STL r15, VCPU_GPR(r15)(r4) - PPC_STL r16, VCPU_GPR(r16)(r4) - PPC_STL r17, VCPU_GPR(r17)(r4) - PPC_STL r18, VCPU_GPR(r18)(r4) - PPC_STL r19, VCPU_GPR(r19)(r4) + PPC_STL r15, VCPU_GPR(R15)(r4) + PPC_STL r16, VCPU_GPR(R16)(r4) + PPC_STL r17, VCPU_GPR(R17)(r4) + PPC_STL r18, VCPU_GPR(R18)(r4) + PPC_STL r19, VCPU_GPR(R19)(r4) mr r8, r3 - PPC_STL r20, VCPU_GPR(r20)(r4) + PPC_STL r20, VCPU_GPR(R20)(r4) rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS - PPC_STL r21, VCPU_GPR(r21)(r4) + PPC_STL r21, VCPU_GPR(R21)(r4) rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR - PPC_STL r22, VCPU_GPR(r22)(r4) + PPC_STL r22, VCPU_GPR(R22)(r4) rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID - PPC_STL r23, VCPU_GPR(r23)(r4) - PPC_STL r24, VCPU_GPR(r24)(r4) - PPC_STL r25, VCPU_GPR(r25)(r4) - PPC_STL r26, VCPU_GPR(r26)(r4) - PPC_STL r27, VCPU_GPR(r27)(r4) - PPC_STL r28, VCPU_GPR(r28)(r4) - PPC_STL r29, VCPU_GPR(r29)(r4) - PPC_STL r30, VCPU_GPR(r30)(r4) - PPC_STL r31, VCPU_GPR(r31)(r4) + PPC_STL r23, VCPU_GPR(R23)(r4) + PPC_STL r24, VCPU_GPR(R24)(r4) + PPC_STL r25, VCPU_GPR(R25)(r4) + PPC_STL r26, VCPU_GPR(R26)(r4) + PPC_STL r27, VCPU_GPR(R27)(r4) + PPC_STL r28, VCPU_GPR(R28)(r4) + PPC_STL r29, VCPU_GPR(R29)(r4) + PPC_STL r30, VCPU_GPR(R30)(r4) + PPC_STL r31, VCPU_GPR(R31)(r4) mtspr SPRN_EPLC, r8 /* disable preemption, so we are sure we hit the fixup handler */ @@ -211,24 +211,24 @@ .macro kvm_handler intno srr0, srr1, flags _GLOBAL(kvmppc_handler_\intno\()_\srr1) GET_VCPU(r11, r10) - PPC_STL r3, VCPU_GPR(r3)(r11) + PPC_STL r3, VCPU_GPR(R3)(r11) mfspr r3, SPRN_SPRG_RSCRATCH0 - PPC_STL r4, VCPU_GPR(r4)(r11) + PPC_STL r4, VCPU_GPR(R4)(r11) PPC_LL r4, THREAD_NORMSAVE(0)(r10) - PPC_STL r5, VCPU_GPR(r5)(r11) + PPC_STL r5, VCPU_GPR(R5)(r11) stw r13, VCPU_CR(r11) mfspr r5, \srr0 - PPC_STL r3, VCPU_GPR(r10)(r11) + PPC_STL r3, VCPU_GPR(R10)(r11) PPC_LL r3, THREAD_NORMSAVE(2)(r10) - PPC_STL r6, VCPU_GPR(r6)(r11) - PPC_STL r4, VCPU_GPR(r11)(r11) + PPC_STL r6, VCPU_GPR(R6)(r11) + PPC_STL r4, VCPU_GPR(R11)(r11) mfspr r6, \srr1 - PPC_STL r7, VCPU_GPR(r7)(r11) - PPC_STL r8, VCPU_GPR(r8)(r11) - PPC_STL r9, VCPU_GPR(r9)(r11) - PPC_STL r3, VCPU_GPR(r13)(r11) + PPC_STL r7, VCPU_GPR(R7)(r11) + PPC_STL r8, VCPU_GPR(R8)(r11) + PPC_STL r9, VCPU_GPR(R9)(r11) + PPC_STL r3, VCPU_GPR(R13)(r11) mfctr r7 - PPC_STL r12, VCPU_GPR(r12)(r11) + PPC_STL r12, VCPU_GPR(R12)(r11) PPC_STL r7, VCPU_CTR(r11) mr r4, r11 kvm_handler_common \intno, \srr0, \flags @@ -238,25 +238,25 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) _GLOBAL(kvmppc_handler_\intno\()_\srr1) mfspr r10, SPRN_SPRG_THREAD GET_VCPU(r11, r10) - PPC_STL r3, VCPU_GPR(r3)(r11) + PPC_STL r3, VCPU_GPR(R3)(r11) mfspr r3, \scratch - PPC_STL r4, VCPU_GPR(r4)(r11) + PPC_STL r4, VCPU_GPR(R4)(r11) PPC_LL r4, GPR9(r8) - PPC_STL r5, VCPU_GPR(r5)(r11) + PPC_STL r5, VCPU_GPR(R5)(r11) stw r9, VCPU_CR(r11) mfspr r5, \srr0 - PPC_STL r3, VCPU_GPR(r8)(r11) + PPC_STL r3, VCPU_GPR(R8)(r11) PPC_LL r3, GPR10(r8) - PPC_STL r6, VCPU_GPR(r6)(r11) - PPC_STL r4, VCPU_GPR(r9)(r11) + PPC_STL r6, VCPU_GPR(R6)(r11) + PPC_STL r4, VCPU_GPR(R9)(r11) mfspr r6, \srr1 PPC_LL r4, GPR11(r8) - PPC_STL r7, VCPU_GPR(r7)(r11) - PPC_STL r3, VCPU_GPR(r10)(r11) + PPC_STL r7, VCPU_GPR(R7)(r11) + PPC_STL r3, VCPU_GPR(R10)(r11) mfctr r7 - PPC_STL r12, VCPU_GPR(r12)(r11) - PPC_STL r13, VCPU_GPR(r13)(r11) - PPC_STL r4, VCPU_GPR(r11)(r11) + PPC_STL r12, VCPU_GPR(R12)(r11) + PPC_STL r13, VCPU_GPR(R13)(r11) + PPC_STL r4, VCPU_GPR(R11)(r11) PPC_STL r7, VCPU_CTR(r11) mr r4, r11 kvm_handler_common \intno, \srr0, \flags @@ -310,7 +310,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ _GLOBAL(kvmppc_resume_host) /* Save remaining volatile guest register state to vcpu. */ mfspr r3, SPRN_VRSAVE - PPC_STL r0, VCPU_GPR(r0)(r4) + PPC_STL r0, VCPU_GPR(R0)(r4) mflr r5 mfspr r6, SPRN_SPRG4 PPC_STL r5, VCPU_LR(r4) @@ -358,27 +358,27 @@ _GLOBAL(kvmppc_resume_host) /* Restore vcpu pointer and the nonvolatiles we used. */ mr r4, r14 - PPC_LL r14, VCPU_GPR(r14)(r4) + PPC_LL r14, VCPU_GPR(R14)(r4) andi. r5, r3, RESUME_FLAG_NV beq skip_nv_load - PPC_LL r15, VCPU_GPR(r15)(r4) - PPC_LL r16, VCPU_GPR(r16)(r4) - PPC_LL r17, VCPU_GPR(r17)(r4) - PPC_LL r18, VCPU_GPR(r18)(r4) - PPC_LL r19, VCPU_GPR(r19)(r4) - PPC_LL r20, VCPU_GPR(r20)(r4) - PPC_LL r21, VCPU_GPR(r21)(r4) - PPC_LL r22, VCPU_GPR(r22)(r4) - PPC_LL r23, VCPU_GPR(r23)(r4) - PPC_LL r24, VCPU_GPR(r24)(r4) - PPC_LL r25, VCPU_GPR(r25)(r4) - PPC_LL r26, VCPU_GPR(r26)(r4) - PPC_LL r27, VCPU_GPR(r27)(r4) - PPC_LL r28, VCPU_GPR(r28)(r4) - PPC_LL r29, VCPU_GPR(r29)(r4) - PPC_LL r30, VCPU_GPR(r30)(r4) - PPC_LL r31, VCPU_GPR(r31)(r4) + PPC_LL r15, VCPU_GPR(R15)(r4) + PPC_LL r16, VCPU_GPR(R16)(r4) + PPC_LL r17, VCPU_GPR(R17)(r4) + PPC_LL r18, VCPU_GPR(R18)(r4) + PPC_LL r19, VCPU_GPR(R19)(r4) + PPC_LL r20, VCPU_GPR(R20)(r4) + PPC_LL r21, VCPU_GPR(R21)(r4) + PPC_LL r22, VCPU_GPR(R22)(r4) + PPC_LL r23, VCPU_GPR(R23)(r4) + PPC_LL r24, VCPU_GPR(R24)(r4) + PPC_LL r25, VCPU_GPR(R25)(r4) + PPC_LL r26, VCPU_GPR(R26)(r4) + PPC_LL r27, VCPU_GPR(R27)(r4) + PPC_LL r28, VCPU_GPR(R28)(r4) + PPC_LL r29, VCPU_GPR(R29)(r4) + PPC_LL r30, VCPU_GPR(R30)(r4) + PPC_LL r31, VCPU_GPR(R31)(r4) skip_nv_load: /* Should we return to the guest? */ andi. r5, r3, RESUME_FLAG_HOST @@ -396,23 +396,23 @@ heavyweight_exit: * non-volatiles. */ - PPC_STL r15, VCPU_GPR(r15)(r4) - PPC_STL r16, VCPU_GPR(r16)(r4) - PPC_STL r17, VCPU_GPR(r17)(r4) - PPC_STL r18, VCPU_GPR(r18)(r4) - PPC_STL r19, VCPU_GPR(r19)(r4) - PPC_STL r20, VCPU_GPR(r20)(r4) - PPC_STL r21, VCPU_GPR(r21)(r4) - PPC_STL r22, VCPU_GPR(r22)(r4) - PPC_STL r23, VCPU_GPR(r23)(r4) - PPC_STL r24, VCPU_GPR(r24)(r4) - PPC_STL r25, VCPU_GPR(r25)(r4) - PPC_STL r26, VCPU_GPR(r26)(r4) - PPC_STL r27, VCPU_GPR(r27)(r4) - PPC_STL r28, VCPU_GPR(r28)(r4) - PPC_STL r29, VCPU_GPR(r29)(r4) - PPC_STL r30, VCPU_GPR(r30)(r4) - PPC_STL r31, VCPU_GPR(r31)(r4) + PPC_STL r15, VCPU_GPR(R15)(r4) + PPC_STL r16, VCPU_GPR(R16)(r4) + PPC_STL r17, VCPU_GPR(R17)(r4) + PPC_STL r18, VCPU_GPR(R18)(r4) + PPC_STL r19, VCPU_GPR(R19)(r4) + PPC_STL r20, VCPU_GPR(R20)(r4) + PPC_STL r21, VCPU_GPR(R21)(r4) + PPC_STL r22, VCPU_GPR(R22)(r4) + PPC_STL r23, VCPU_GPR(R23)(r4) + PPC_STL r24, VCPU_GPR(R24)(r4) + PPC_STL r25, VCPU_GPR(R25)(r4) + PPC_STL r26, VCPU_GPR(R26)(r4) + PPC_STL r27, VCPU_GPR(R27)(r4) + PPC_STL r28, VCPU_GPR(R28)(r4) + PPC_STL r29, VCPU_GPR(R29)(r4) + PPC_STL r30, VCPU_GPR(R30)(r4) + PPC_STL r31, VCPU_GPR(R31)(r4) /* Load host non-volatile register state from host stack. */ PPC_LL r14, HOST_NV_GPR(r14)(r1) @@ -478,24 +478,24 @@ _GLOBAL(__kvmppc_vcpu_run) PPC_STL r31, HOST_NV_GPR(r31)(r1) /* Load guest non-volatiles. */ - PPC_LL r14, VCPU_GPR(r14)(r4) - PPC_LL r15, VCPU_GPR(r15)(r4) - PPC_LL r16, VCPU_GPR(r16)(r4) - PPC_LL r17, VCPU_GPR(r17)(r4) - PPC_LL r18, VCPU_GPR(r18)(r4) - PPC_LL r19, VCPU_GPR(r19)(r4) - PPC_LL r20, VCPU_GPR(r20)(r4) - PPC_LL r21, VCPU_GPR(r21)(r4) - PPC_LL r22, VCPU_GPR(r22)(r4) - PPC_LL r23, VCPU_GPR(r23)(r4) - PPC_LL r24, VCPU_GPR(r24)(r4) - PPC_LL r25, VCPU_GPR(r25)(r4) - PPC_LL r26, VCPU_GPR(r26)(r4) - PPC_LL r27, VCPU_GPR(r27)(r4) - PPC_LL r28, VCPU_GPR(r28)(r4) - PPC_LL r29, VCPU_GPR(r29)(r4) - PPC_LL r30, VCPU_GPR(r30)(r4) - PPC_LL r31, VCPU_GPR(r31)(r4) + PPC_LL r14, VCPU_GPR(R14)(r4) + PPC_LL r15, VCPU_GPR(R15)(r4) + PPC_LL r16, VCPU_GPR(R16)(r4) + PPC_LL r17, VCPU_GPR(R17)(r4) + PPC_LL r18, VCPU_GPR(R18)(r4) + PPC_LL r19, VCPU_GPR(R19)(r4) + PPC_LL r20, VCPU_GPR(R20)(r4) + PPC_LL r21, VCPU_GPR(R21)(r4) + PPC_LL r22, VCPU_GPR(R22)(r4) + PPC_LL r23, VCPU_GPR(R23)(r4) + PPC_LL r24, VCPU_GPR(R24)(r4) + PPC_LL r25, VCPU_GPR(R25)(r4) + PPC_LL r26, VCPU_GPR(R26)(r4) + PPC_LL r27, VCPU_GPR(R27)(r4) + PPC_LL r28, VCPU_GPR(R28)(r4) + PPC_LL r29, VCPU_GPR(R29)(r4) + PPC_LL r30, VCPU_GPR(R30)(r4) + PPC_LL r31, VCPU_GPR(R31)(r4) lightweight_exit: @@ -554,13 +554,13 @@ lightweight_exit: lwz r7, VCPU_CR(r4) PPC_LL r8, VCPU_PC(r4) PPC_LD(r9, VCPU_SHARED_MSR, r11) - PPC_LL r0, VCPU_GPR(r0)(r4) - PPC_LL r1, VCPU_GPR(r1)(r4) - PPC_LL r2, VCPU_GPR(r2)(r4) - PPC_LL r10, VCPU_GPR(r10)(r4) - PPC_LL r11, VCPU_GPR(r11)(r4) - PPC_LL r12, VCPU_GPR(r12)(r4) - PPC_LL r13, VCPU_GPR(r13)(r4) + PPC_LL r0, VCPU_GPR(R0)(r4) + PPC_LL r1, VCPU_GPR(R1)(r4) + PPC_LL r2, VCPU_GPR(R2)(r4) + PPC_LL r10, VCPU_GPR(R10)(r4) + PPC_LL r11, VCPU_GPR(R11)(r4) + PPC_LL r12, VCPU_GPR(R12)(r4) + PPC_LL r13, VCPU_GPR(R13)(r4) mtlr r3 mtxer r5 mtctr r6 @@ -586,12 +586,12 @@ lightweight_exit: mtcr r7 /* Finish loading guest volatiles and jump to guest. */ - PPC_LL r5, VCPU_GPR(r5)(r4) - PPC_LL r6, VCPU_GPR(r6)(r4) - PPC_LL r7, VCPU_GPR(r7)(r4) - PPC_LL r8, VCPU_GPR(r8)(r4) - PPC_LL r9, VCPU_GPR(r9)(r4) - - PPC_LL r3, VCPU_GPR(r3)(r4) - PPC_LL r4, VCPU_GPR(r4)(r4) + PPC_LL r5, VCPU_GPR(R5)(r4) + PPC_LL r6, VCPU_GPR(R6)(r4) + PPC_LL r7, VCPU_GPR(R7)(r4) + PPC_LL r8, VCPU_GPR(R8)(r4) + PPC_LL r9, VCPU_GPR(R9)(r4) + + PPC_LL r3, VCPU_GPR(R3)(r4) + PPC_LL r4, VCPU_GPR(R4)(r4) rfi diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index 18245af38aea..7ca6871c11ee 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S @@ -114,9 +114,9 @@ _GLOBAL(csum_partial) mtctr r6 stdu r1,-STACKFRAMESIZE(r1) - std r14,STK_REG(r14)(r1) - std r15,STK_REG(r15)(r1) - std r16,STK_REG(r16)(r1) + std r14,STK_REG(R14)(r1) + std r15,STK_REG(R15)(r1) + std r16,STK_REG(R16)(r1) ld r6,0(r3) ld r9,8(r3) @@ -175,9 +175,9 @@ _GLOBAL(csum_partial) adde r0,r0,r15 adde r0,r0,r16 - ld r14,STK_REG(r14)(r1) - ld r15,STK_REG(r15)(r1) - ld r16,STK_REG(r16)(r1) + ld r14,STK_REG(R14)(r1) + ld r15,STK_REG(R15)(r1) + ld r16,STK_REG(R16)(r1) addi r1,r1,STACKFRAMESIZE andi. r4,r4,63 @@ -299,9 +299,9 @@ dest; sth r6,0(r4) mtctr r6 stdu r1,-STACKFRAMESIZE(r1) - std r14,STK_REG(r14)(r1) - std r15,STK_REG(r15)(r1) - std r16,STK_REG(r16)(r1) + std r14,STK_REG(R14)(r1) + std r15,STK_REG(R15)(r1) + std r16,STK_REG(R16)(r1) source; ld r6,0(r3) source; ld r9,8(r3) @@ -382,9 +382,9 @@ dest; std r16,56(r4) adde r0,r0,r15 adde r0,r0,r16 - ld r14,STK_REG(r14)(r1) - ld r15,STK_REG(r15)(r1) - ld r16,STK_REG(r16)(r1) + ld r14,STK_REG(R14)(r1) + ld r15,STK_REG(R15)(r1) + ld r16,STK_REG(R16)(r1) addi r1,r1,STACKFRAMESIZE andi. r5,r5,63 diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S index 01e2b5db325f..a2126cebb957 100644 --- a/arch/powerpc/lib/copypage_power7.S +++ b/arch/powerpc/lib/copypage_power7.S @@ -113,13 +113,13 @@ _GLOBAL(copypage_power7) #endif .Lnonvmx_copy: - std r14,STK_REG(r14)(r1) - std r15,STK_REG(r15)(r1) - std r16,STK_REG(r16)(r1) - std r17,STK_REG(r17)(r1) - std r18,STK_REG(r18)(r1) - std r19,STK_REG(r19)(r1) - std r20,STK_REG(r20)(r1) + std r14,STK_REG(R14)(r1) + std r15,STK_REG(R15)(r1) + std r16,STK_REG(R16)(r1) + std r17,STK_REG(R17)(r1) + std r18,STK_REG(R18)(r1) + std r19,STK_REG(R19)(r1) + std r20,STK_REG(R20)(r1) 1: ld r0,0(r4) ld r5,8(r4) @@ -157,12 +157,12 @@ _GLOBAL(copypage_power7) addi r3,r3,128 bdnz 1b - ld r14,STK_REG(r14)(r1) - ld r15,STK_REG(r15)(r1) - ld r16,STK_REG(r16)(r1) - ld r17,STK_REG(r17)(r1) - ld r18,STK_REG(r18)(r1) - ld r19,STK_REG(r19)(r1) - ld r20,STK_REG(r20)(r1) + ld r14,STK_REG(R14)(r1) + ld r15,STK_REG(R15)(r1) + ld r16,STK_REG(R16)(r1) + ld r17,STK_REG(R17)(r1) + ld r18,STK_REG(R18)(r1) + ld r19,STK_REG(R19)(r1) + ld r20,STK_REG(R20)(r1) addi r1,r1,STACKFRAMESIZE blr diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index d73a59014900..f47d05a51eb8 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -30,7 +30,7 @@ _GLOBAL(__copy_tofrom_user_base) dcbt 0,r4 beq .Lcopy_page_4K andi. r6,r6,7 - PPC_MTOCRF(0x01,r5) + PPC_MTOCRF(0x01,R5) blt cr1,.Lshort_copy /* Below we want to nop out the bne if we're on a CPU that has the * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit @@ -186,7 +186,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) blr .Ldst_unaligned: - PPC_MTOCRF(0x01,r6) /* put #bytes to 8B bdry into cr7 */ + PPC_MTOCRF(0x01,R6) /* put #bytes to 8B bdry into cr7 */ subf r5,r6,r5 li r7,0 cmpldi cr1,r5,16 @@ -201,7 +201,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 2: bf cr7*4+1,3f 37: lwzx r0,r7,r4 83: stwx r0,r7,r3 -3: PPC_MTOCRF(0x01,r5) +3: PPC_MTOCRF(0x01,R5) add r4,r6,r4 add r3,r6,r3 b .Ldst_aligned diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index 48e3f8c5768c..c8680c0cc3e4 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -57,9 +57,9 @@ .Ldo_err4: - ld r16,STK_REG(r16)(r1) - ld r15,STK_REG(r15)(r1) - ld r14,STK_REG(r14)(r1) + ld r16,STK_REG(R16)(r1) + ld r15,STK_REG(R15)(r1) + ld r14,STK_REG(R14)(r1) .Ldo_err3: bl .exit_vmx_usercopy ld r0,STACKFRAMESIZE+16(r1) @@ -68,15 +68,15 @@ #endif /* CONFIG_ALTIVEC */ .Ldo_err2: - ld r22,STK_REG(r22)(r1) - ld r21,STK_REG(r21)(r1) - ld r20,STK_REG(r20)(r1) - ld r19,STK_REG(r19)(r1) - ld r18,STK_REG(r18)(r1) - ld r17,STK_REG(r17)(r1) - ld r16,STK_REG(r16)(r1) - ld r15,STK_REG(r15)(r1) - ld r14,STK_REG(r14)(r1) + ld r22,STK_REG(R22)(r1) + ld r21,STK_REG(R21)(r1) + ld r20,STK_REG(R20)(r1) + ld r19,STK_REG(R19)(r1) + ld r18,STK_REG(R18)(r1) + ld r17,STK_REG(R17)(r1) + ld r16,STK_REG(R16)(r1) + ld r15,STK_REG(R15)(r1) + ld r14,STK_REG(R14)(r1) .Lexit: addi r1,r1,STACKFRAMESIZE .Ldo_err1: @@ -137,15 +137,15 @@ err1; stw r0,0(r3) mflr r0 stdu r1,-STACKFRAMESIZE(r1) - std r14,STK_REG(r14)(r1) - std r15,STK_REG(r15)(r1) - std r16,STK_REG(r16)(r1) - std r17,STK_REG(r17)(r1) - std r18,STK_REG(r18)(r1) - std r19,STK_REG(r19)(r1) - std r20,STK_REG(r20)(r1) - std r21,STK_REG(r21)(r1) - std r22,STK_REG(r22)(r1) + std r14,STK_REG(R14)(r1) + std r15,STK_REG(R15)(r1) + std r16,STK_REG(R16)(r1) + std r17,STK_REG(R17)(r1) + std r18,STK_REG(R18)(r1) + std r19,STK_REG(R19)(r1) + std r20,STK_REG(R20)(r1) + std r21,STK_REG(R21)(r1) + std r22,STK_REG(R22)(r1) std r0,STACKFRAMESIZE+16(r1) srdi r6,r5,7 @@ -192,15 +192,15 @@ err2; std r21,120(r3) clrldi r5,r5,(64-7) - ld r14,STK_REG(r14)(r1) - ld r15,STK_REG(r15)(r1) - ld r16,STK_REG(r16)(r1) - ld r17,STK_REG(r17)(r1) - ld r18,STK_REG(r18)(r1) - ld r19,STK_REG(r19)(r1) - ld r20,STK_REG(r20)(r1) - ld r21,STK_REG(r21)(r1) - ld r22,STK_REG(r22)(r1) + ld r14,STK_REG(R14)(r1) + ld r15,STK_REG(R15)(r1) + ld r16,STK_REG(R16)(r1) + ld r17,STK_REG(R17)(r1) + ld r18,STK_REG(R18)(r1) + ld r19,STK_REG(R19)(r1) + ld r20,STK_REG(R20)(r1) + ld r21,STK_REG(R21)(r1) + ld r22,STK_REG(R22)(r1) addi r1,r1,STACKFRAMESIZE /* Up to 127B to go */ @@ -440,9 +440,9 @@ err3; stvx vr0,r3,r11 7: sub r5,r5,r6 srdi r6,r5,7 - std r14,STK_REG(r14)(r1) - std r15,STK_REG(r15)(r1) - std r16,STK_REG(r16)(r1) + std r14,STK_REG(R14)(r1) + std r15,STK_REG(R15)(r1) + std r16,STK_REG(R16)(r1) li r12,64 li r14,80 @@ -477,9 +477,9 @@ err4; stvx vr0,r3,r16 addi r3,r3,128 bdnz 8b - ld r14,STK_REG(r14)(r1) - ld r15,STK_REG(r15)(r1) - ld r16,STK_REG(r16)(r1) + ld r14,STK_REG(R14)(r1) + ld r15,STK_REG(R15)(r1) + ld r16,STK_REG(R16)(r1) /* Up to 127B to go */ clrldi r5,r5,(64-7) @@ -625,9 +625,9 @@ err3; stvx vr11,r3,r11 7: sub r5,r5,r6 srdi r6,r5,7 - std r14,STK_REG(r14)(r1) - std r15,STK_REG(r15)(r1) - std r16,STK_REG(r16)(r1) + std r14,STK_REG(R14)(r1) + std r15,STK_REG(R15)(r1) + std r16,STK_REG(R16)(r1) li r12,64 li r14,80 @@ -670,9 +670,9 @@ err4; stvx vr15,r3,r16 addi r3,r3,128 bdnz 8b - ld r14,STK_REG(r14)(r1) - ld r15,STK_REG(r15)(r1) - ld r16,STK_REG(r16)(r1) + ld r14,STK_REG(R14)(r1) + ld r15,STK_REG(R15)(r1) + ld r16,STK_REG(R16)(r1) /* Up to 127B to go */ clrldi r5,r5,(64-7) diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S index fda27868cf8c..9b96ff2ecd4d 100644 --- a/arch/powerpc/lib/hweight_64.S +++ b/arch/powerpc/lib/hweight_64.S @@ -28,7 +28,7 @@ BEGIN_FTR_SECTION nop nop FTR_SECTION_ELSE - PPC_POPCNTB(r3,r3) + PPC_POPCNTB(R3,R3) clrldi r3,r3,64-8 blr ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) @@ -42,14 +42,14 @@ BEGIN_FTR_SECTION nop FTR_SECTION_ELSE BEGIN_FTR_SECTION_NESTED(50) - PPC_POPCNTB(r3,r3) + PPC_POPCNTB(R3,R3) srdi r4,r3,8 add r3,r4,r3 clrldi r3,r3,64-8 blr FTR_SECTION_ELSE_NESTED(50) clrlwi r3,r3,16 - PPC_POPCNTW(r3,r3) + PPC_POPCNTW(R3,R3) clrldi r3,r3,64-8 blr ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50) @@ -66,7 +66,7 @@ BEGIN_FTR_SECTION nop FTR_SECTION_ELSE BEGIN_FTR_SECTION_NESTED(51) - PPC_POPCNTB(r3,r3) + PPC_POPCNTB(R3,R3) srdi r4,r3,16 add r3,r4,r3 srdi r4,r3,8 @@ -74,7 +74,7 @@ FTR_SECTION_ELSE clrldi r3,r3,64-8 blr FTR_SECTION_ELSE_NESTED(51) - PPC_POPCNTW(r3,r3) + PPC_POPCNTW(R3,R3) clrldi r3,r3,64-8 blr ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51) @@ -93,7 +93,7 @@ BEGIN_FTR_SECTION nop FTR_SECTION_ELSE BEGIN_FTR_SECTION_NESTED(52) - PPC_POPCNTB(r3,r3) + PPC_POPCNTB(R3,R3) srdi r4,r3,32 add r3,r4,r3 srdi r4,r3,16 @@ -103,7 +103,7 @@ FTR_SECTION_ELSE clrldi r3,r3,64-8 blr FTR_SECTION_ELSE_NESTED(52) - PPC_POPCNTD(r3,r3) + PPC_POPCNTD(R3,R3) clrldi r3,r3,64-8 blr ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52) diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S index 6a85380520b6..3abae6bc7b4b 100644 --- a/arch/powerpc/lib/ldstfp.S +++ b/arch/powerpc/lib/ldstfp.S @@ -330,13 +330,13 @@ _GLOBAL(do_lxvd2x) MTMSRD(r7) isync beq cr7,1f - STXVD2X(0,r1,r8) + STXVD2X(0,R1,R8) 1: li r9,-EFAULT -2: LXVD2X(0,0,r4) +2: LXVD2X(0,0,R4) li r9,0 3: beq cr7,4f bl put_vsr - LXVD2X(0,r1,r8) + LXVD2X(0,R1,R8) 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) mtlr r0 MTMSRD(r6) @@ -358,13 +358,13 @@ _GLOBAL(do_stxvd2x) MTMSRD(r7) isync beq cr7,1f - STXVD2X(0,r1,r8) + STXVD2X(0,R1,R8) bl get_vsr 1: li r9,-EFAULT -2: STXVD2X(0,0,r4) +2: STXVD2X(0,0,R4) li r9,0 3: beq cr7,4f - LXVD2X(0,r1,r8) + LXVD2X(0,R1,R8) 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) mtlr r0 MTMSRD(r6) diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S index f4fcb0bc6563..886bfc780681 100644 --- a/arch/powerpc/lib/mem_64.S +++ b/arch/powerpc/lib/mem_64.S @@ -19,7 +19,7 @@ _GLOBAL(memset) rlwimi r4,r4,16,0,15 cmplw cr1,r5,r0 /* do we get that far? */ rldimi r4,r4,32,0 - PPC_MTOCRF(1,r0) + PPC_MTOCRF(1,R0) mr r6,r3 blt cr1,8f beq+ 3f /* if already 8-byte aligned */ @@ -49,7 +49,7 @@ _GLOBAL(memset) bdnz 4b 5: srwi. r0,r5,3 clrlwi r5,r5,29 - PPC_MTOCRF(1,r0) + PPC_MTOCRF(1,R0) beq 8f bf 29,6f std r4,0(r6) @@ -65,7 +65,7 @@ _GLOBAL(memset) std r4,0(r6) addi r6,r6,8 8: cmpwi r5,0 - PPC_MTOCRF(1,r5) + PPC_MTOCRF(1,R5) beqlr+ bf 29,9f stw r4,0(r6) diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index d2bbbc8d7dc0..0a87b37e16fe 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S @@ -16,7 +16,7 @@ BEGIN_FTR_SECTION FTR_SECTION_ELSE b memcpy_power7 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) - PPC_MTOCRF(0x01,r5) + PPC_MTOCRF(0x01,R5) cmpldi cr1,r5,16 neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry andi. r6,r6,7 @@ -158,7 +158,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) blr .Ldst_unaligned: - PPC_MTOCRF(0x01,r6) # put #bytes to 8B bdry into cr7 + PPC_MTOCRF(0x01,R6) # put #bytes to 8B bdry into cr7 subf r5,r6,r5 li r7,0 cmpldi cr1,r5,16 @@ -173,7 +173,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 2: bf cr7*4+1,3f lwzx r0,r7,r4 stwx r0,r7,r3 -3: PPC_MTOCRF(0x01,r5) +3: PPC_MTOCRF(0x01,R5) add r4,r6,r4 add r3,r6,r3 b .Ldst_aligned diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index 84674d897937..04524a2a0b88 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -69,15 +69,15 @@ _GLOBAL(memcpy_power7) mflr r0 stdu r1,-STACKFRAMESIZE(r1) - std r14,STK_REG(r14)(r1) - std r15,STK_REG(r15)(r1) - std r16,STK_REG(r16)(r1) - std r17,STK_REG(r17)(r1) - std r18,STK_REG(r18)(r1) - std r19,STK_REG(r19)(r1) - std r20,STK_REG(r20)(r1) - std r21,STK_REG(r21)(r1) - std r22,STK_REG(r22)(r1) + std r14,STK_REG(R14)(r1) + std r15,STK_REG(R15)(r1) + std r16,STK_REG(R16)(r1) + std r17,STK_REG(R17)(r1) + std r18,STK_REG(R18)(r1) + std r19,STK_REG(R19)(r1) + std r20,STK_REG(R20)(r1) + std r21,STK_REG(R21)(r1) + std r22,STK_REG(R22)(r1) std r0,STACKFRAMESIZE+16(r1) srdi r6,r5,7 @@ -124,15 +124,15 @@ _GLOBAL(memcpy_power7) clrldi r5,r5,(64-7) - ld r14,STK_REG(r14)(r1) - ld r15,STK_REG(r15)(r1) - ld r16,STK_REG(r16)(r1) - ld r17,STK_REG(r17)(r1) - ld r18,STK_REG(r18)(r1) - ld r19,STK_REG(r19)(r1) - ld r20,STK_REG(r20)(r1) - ld r21,STK_REG(r21)(r1) - ld r22,STK_REG(r22)(r1) + ld r14,STK_REG(R14)(r1) + ld r15,STK_REG(R15)(r1) + ld r16,STK_REG(R16)(r1) + ld r17,STK_REG(R17)(r1) + ld r18,STK_REG(R18)(r1) + ld r19,STK_REG(R19)(r1) + ld r20,STK_REG(R20)(r1) + ld r21,STK_REG(R21)(r1) + ld r22,STK_REG(R22)(r1) addi r1,r1,STACKFRAMESIZE /* Up to 127B to go */ @@ -343,9 +343,9 @@ _GLOBAL(memcpy_power7) 7: sub r5,r5,r6 srdi r6,r5,7 - std r14,STK_REG(r14)(r1) - std r15,STK_REG(r15)(r1) - std r16,STK_REG(r16)(r1) + std r14,STK_REG(R14)(r1) + std r15,STK_REG(R15)(r1) + std r16,STK_REG(R16)(r1) li r12,64 li r14,80 @@ -380,9 +380,9 @@ _GLOBAL(memcpy_power7) addi r3,r3,128 bdnz 8b - ld r14,STK_REG(r14)(r1) - ld r15,STK_REG(r15)(r1) - ld r16,STK_REG(r16)(r1) + ld r14,STK_REG(R14)(r1) + ld r15,STK_REG(R15)(r1) + ld r16,STK_REG(R16)(r1) /* Up to 127B to go */ clrldi r5,r5,(64-7) @@ -529,9 +529,9 @@ _GLOBAL(memcpy_power7) 7: sub r5,r5,r6 srdi r6,r5,7 - std r14,STK_REG(r14)(r1) - std r15,STK_REG(r15)(r1) - std r16,STK_REG(r16)(r1) + std r14,STK_REG(R14)(r1) + std r15,STK_REG(R15)(r1) + std r16,STK_REG(R16)(r1) li r12,64 li r14,80 @@ -574,9 +574,9 @@ _GLOBAL(memcpy_power7) addi r3,r3,128 bdnz 8b - ld r14,STK_REG(r14)(r1) - ld r15,STK_REG(r15)(r1) - ld r16,STK_REG(r16)(r1) + ld r14,STK_REG(R14)(r1) + ld r15,STK_REG(R15)(r1) + ld r16,STK_REG(R16)(r1) /* Up to 127B to go */ clrldi r5,r5,(64-7) diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index a242b5d7cbe4..113dcb0f61df 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -64,9 +64,9 @@ _GLOBAL(__hash_page_4K) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) /* Save all params that we need after a function call */ - std r6,STK_PARM(r6)(r1) - std r8,STK_PARM(r8)(r1) - std r9,STK_PARM(r9)(r1) + std r6,STK_PARM(R6)(r1) + std r8,STK_PARM(R8)(r1) + std r9,STK_PARM(R9)(r1) /* Save non-volatile registers. * r31 will hold "old PTE" @@ -75,11 +75,11 @@ _GLOBAL(__hash_page_4K) * r28 is a hash value * r27 is hashtab mask (maybe dynamic patched instead ?) */ - std r27,STK_REG(r27)(r1) - std r28,STK_REG(r28)(r1) - std r29,STK_REG(r29)(r1) - std r30,STK_REG(r30)(r1) - std r31,STK_REG(r31)(r1) + std r27,STK_REG(R27)(r1) + std r28,STK_REG(R28)(r1) + std r29,STK_REG(R29)(r1) + std r30,STK_REG(R30)(r1) + std r31,STK_REG(R31)(r1) /* Step 1: * @@ -162,7 +162,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) /* At this point, r3 contains new PP bits, save them in * place of "access" in the param area (sic) */ - std r3,STK_PARM(r4)(r1) + std r3,STK_PARM(R4)(r1) /* Get htab_hash_mask */ ld r4,htab_hash_mask@got(2) @@ -192,11 +192,11 @@ htab_insert_pte: rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ mr r4,r29 /* Retrieve va */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_4K /* page size */ - ld r9,STK_PARM(r9)(r1) /* segment size */ + ld r9,STK_PARM(R9)(r1) /* segment size */ _GLOBAL(htab_call_hpte_insert1) bl . /* Patched by htab_finish_init() */ cmpdi 0,r3,0 @@ -215,11 +215,11 @@ _GLOBAL(htab_call_hpte_insert1) rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ mr r4,r29 /* Retrieve va */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_4K /* page size */ - ld r9,STK_PARM(r9)(r1) /* segment size */ + ld r9,STK_PARM(R9)(r1) /* segment size */ _GLOBAL(htab_call_hpte_insert2) bl . /* Patched by htab_finish_init() */ cmpdi 0,r3,0 @@ -255,15 +255,15 @@ htab_pte_insert_ok: * (maybe add eieio may be good still ?) */ htab_write_out_pte: - ld r6,STK_PARM(r6)(r1) + ld r6,STK_PARM(R6)(r1) std r30,0(r6) li r3, 0 htab_bail: - ld r27,STK_REG(r27)(r1) - ld r28,STK_REG(r28)(r1) - ld r29,STK_REG(r29)(r1) - ld r30,STK_REG(r30)(r1) - ld r31,STK_REG(r31)(r1) + ld r27,STK_REG(R27)(r1) + ld r28,STK_REG(R28)(r1) + ld r29,STK_REG(R29)(r1) + ld r30,STK_REG(R30)(r1) + ld r31,STK_REG(R31)(r1) addi r1,r1,STACKFRAMESIZE ld r0,16(r1) mtlr r0 @@ -288,8 +288,8 @@ htab_modify_pte: /* Call ppc_md.hpte_updatepp */ mr r5,r29 /* va */ li r6,MMU_PAGE_4K /* page size */ - ld r7,STK_PARM(r9)(r1) /* segment size */ - ld r8,STK_PARM(r8)(r1) /* get "local" param */ + ld r7,STK_PARM(R9)(r1) /* segment size */ + ld r8,STK_PARM(R8)(r1) /* get "local" param */ _GLOBAL(htab_call_hpte_updatepp) bl . /* Patched by htab_finish_init() */ @@ -312,7 +312,7 @@ htab_wrong_access: htab_pte_insert_failure: /* Bail out restoring old PTE */ - ld r6,STK_PARM(r6)(r1) + ld r6,STK_PARM(R6)(r1) std r31,0(r6) li r3,-1 b htab_bail @@ -340,9 +340,9 @@ _GLOBAL(__hash_page_4K) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) /* Save all params that we need after a function call */ - std r6,STK_PARM(r6)(r1) - std r8,STK_PARM(r8)(r1) - std r9,STK_PARM(r9)(r1) + std r6,STK_PARM(R6)(r1) + std r8,STK_PARM(R8)(r1) + std r9,STK_PARM(R9)(r1) /* Save non-volatile registers. * r31 will hold "old PTE" @@ -353,13 +353,13 @@ _GLOBAL(__hash_page_4K) * r26 is the hidx mask * r25 is the index in combo page */ - std r25,STK_REG(r25)(r1) - std r26,STK_REG(r26)(r1) - std r27,STK_REG(r27)(r1) - std r28,STK_REG(r28)(r1) - std r29,STK_REG(r29)(r1) - std r30,STK_REG(r30)(r1) - std r31,STK_REG(r31)(r1) + std r25,STK_REG(R25)(r1) + std r26,STK_REG(R26)(r1) + std r27,STK_REG(R27)(r1) + std r28,STK_REG(R28)(r1) + std r29,STK_REG(R29)(r1) + std r30,STK_REG(R30)(r1) + std r31,STK_REG(R31)(r1) /* Step 1: * @@ -452,7 +452,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) /* At this point, r3 contains new PP bits, save them in * place of "access" in the param area (sic) */ - std r3,STK_PARM(r4)(r1) + std r3,STK_PARM(R4)(r1) /* Get htab_hash_mask */ ld r4,htab_hash_mask@got(2) @@ -473,7 +473,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) andis. r0,r31,_PAGE_COMBO@h beq htab_inval_old_hpte - ld r6,STK_PARM(r6)(r1) + ld r6,STK_PARM(R6)(r1) ori r26,r6,0x8000 /* Load the hidx mask */ ld r26,0(r26) addi r5,r25,36 /* Check actual HPTE_SUB bit, this */ @@ -495,11 +495,11 @@ htab_special_pfn: rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ mr r4,r29 /* Retrieve va */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_4K /* page size */ - ld r9,STK_PARM(r9)(r1) /* segment size */ + ld r9,STK_PARM(R9)(r1) /* segment size */ _GLOBAL(htab_call_hpte_insert1) bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 @@ -522,11 +522,11 @@ _GLOBAL(htab_call_hpte_insert1) rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ mr r4,r29 /* Retrieve va */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_4K /* page size */ - ld r9,STK_PARM(r9)(r1) /* segment size */ + ld r9,STK_PARM(R9)(r1) /* segment size */ _GLOBAL(htab_call_hpte_insert2) bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 @@ -559,8 +559,8 @@ htab_inval_old_hpte: mr r4,r31 /* PTE.pte */ li r5,0 /* PTE.hidx */ li r6,MMU_PAGE_64K /* psize */ - ld r7,STK_PARM(r9)(r1) /* ssize */ - ld r8,STK_PARM(r8)(r1) /* local */ + ld r7,STK_PARM(R9)(r1) /* ssize */ + ld r8,STK_PARM(R8)(r1) /* local */ bl .flush_hash_page /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ lis r0,_PAGE_HPTE_SUB@h @@ -576,7 +576,7 @@ htab_pte_insert_ok: /* Insert slot number & secondary bit in PTE second half, * clear _PAGE_BUSY and set approriate HPTE slot bit */ - ld r6,STK_PARM(r6)(r1) + ld r6,STK_PARM(R6)(r1) li r0,_PAGE_BUSY andc r30,r30,r0 /* HPTE SUB bit */ @@ -597,13 +597,13 @@ htab_pte_insert_ok: std r30,0(r6) li r3, 0 htab_bail: - ld r25,STK_REG(r25)(r1) - ld r26,STK_REG(r26)(r1) - ld r27,STK_REG(r27)(r1) - ld r28,STK_REG(r28)(r1) - ld r29,STK_REG(r29)(r1) - ld r30,STK_REG(r30)(r1) - ld r31,STK_REG(r31)(r1) + ld r25,STK_REG(R25)(r1) + ld r26,STK_REG(R26)(r1) + ld r27,STK_REG(R27)(r1) + ld r28,STK_REG(R28)(r1) + ld r29,STK_REG(R29)(r1) + ld r30,STK_REG(R30)(r1) + ld r31,STK_REG(R31)(r1) addi r1,r1,STACKFRAMESIZE ld r0,16(r1) mtlr r0 @@ -630,8 +630,8 @@ htab_modify_pte: /* Call ppc_md.hpte_updatepp */ mr r5,r29 /* va */ li r6,MMU_PAGE_4K /* page size */ - ld r7,STK_PARM(r9)(r1) /* segment size */ - ld r8,STK_PARM(r8)(r1) /* get "local" param */ + ld r7,STK_PARM(R9)(r1) /* segment size */ + ld r8,STK_PARM(R8)(r1) /* get "local" param */ _GLOBAL(htab_call_hpte_updatepp) bl . /* patched by htab_finish_init() */ @@ -644,7 +644,7 @@ _GLOBAL(htab_call_hpte_updatepp) /* Clear the BUSY bit and Write out the PTE */ li r0,_PAGE_BUSY andc r30,r30,r0 - ld r6,STK_PARM(r6)(r1) + ld r6,STK_PARM(R6)(r1) std r30,0(r6) li r3,0 b htab_bail @@ -657,7 +657,7 @@ htab_wrong_access: htab_pte_insert_failure: /* Bail out restoring old PTE */ - ld r6,STK_PARM(r6)(r1) + ld r6,STK_PARM(R6)(r1) std r31,0(r6) li r3,-1 b htab_bail @@ -677,9 +677,9 @@ _GLOBAL(__hash_page_64K) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) /* Save all params that we need after a function call */ - std r6,STK_PARM(r6)(r1) - std r8,STK_PARM(r8)(r1) - std r9,STK_PARM(r9)(r1) + std r6,STK_PARM(R6)(r1) + std r8,STK_PARM(R8)(r1) + std r9,STK_PARM(R9)(r1) /* Save non-volatile registers. * r31 will hold "old PTE" @@ -688,11 +688,11 @@ _GLOBAL(__hash_page_64K) * r28 is a hash value * r27 is hashtab mask (maybe dynamic patched instead ?) */ - std r27,STK_REG(r27)(r1) - std r28,STK_REG(r28)(r1) - std r29,STK_REG(r29)(r1) - std r30,STK_REG(r30)(r1) - std r31,STK_REG(r31)(r1) + std r27,STK_REG(R27)(r1) + std r28,STK_REG(R28)(r1) + std r29,STK_REG(R29)(r1) + std r30,STK_REG(R30)(r1) + std r31,STK_REG(R31)(r1) /* Step 1: * @@ -780,7 +780,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) /* At this point, r3 contains new PP bits, save them in * place of "access" in the param area (sic) */ - std r3,STK_PARM(r4)(r1) + std r3,STK_PARM(R4)(r1) /* Get htab_hash_mask */ ld r4,htab_hash_mask@got(2) @@ -813,11 +813,11 @@ ht64_insert_pte: rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ mr r4,r29 /* Retrieve va */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_64K - ld r9,STK_PARM(r9)(r1) /* segment size */ + ld r9,STK_PARM(R9)(r1) /* segment size */ _GLOBAL(ht64_call_hpte_insert1) bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 @@ -836,11 +836,11 @@ _GLOBAL(ht64_call_hpte_insert1) rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ mr r4,r29 /* Retrieve va */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_64K - ld r9,STK_PARM(r9)(r1) /* segment size */ + ld r9,STK_PARM(R9)(r1) /* segment size */ _GLOBAL(ht64_call_hpte_insert2) bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 @@ -876,15 +876,15 @@ ht64_pte_insert_ok: * (maybe add eieio may be good still ?) */ ht64_write_out_pte: - ld r6,STK_PARM(r6)(r1) + ld r6,STK_PARM(R6)(r1) std r30,0(r6) li r3, 0 ht64_bail: - ld r27,STK_REG(r27)(r1) - ld r28,STK_REG(r28)(r1) - ld r29,STK_REG(r29)(r1) - ld r30,STK_REG(r30)(r1) - ld r31,STK_REG(r31)(r1) + ld r27,STK_REG(R27)(r1) + ld r28,STK_REG(R28)(r1) + ld r29,STK_REG(R29)(r1) + ld r30,STK_REG(R30)(r1) + ld r31,STK_REG(R31)(r1) addi r1,r1,STACKFRAMESIZE ld r0,16(r1) mtlr r0 @@ -909,8 +909,8 @@ ht64_modify_pte: /* Call ppc_md.hpte_updatepp */ mr r5,r29 /* va */ li r6,MMU_PAGE_64K - ld r7,STK_PARM(r9)(r1) /* segment size */ - ld r8,STK_PARM(r8)(r1) /* get "local" param */ + ld r7,STK_PARM(R9)(r1) /* segment size */ + ld r8,STK_PARM(R8)(r1) /* get "local" param */ _GLOBAL(ht64_call_hpte_updatepp) bl . /* patched by htab_finish_init() */ @@ -933,7 +933,7 @@ ht64_wrong_access: ht64_pte_insert_failure: /* Bail out restoring old PTE */ - ld r6,STK_PARM(r6)(r1) + ld r6,STK_PARM(R6)(r1) std r31,0(r6) li r3,-1 b ht64_bail diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index ff672bd8fea9..4b9e2643d21b 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -126,7 +126,7 @@ BEGIN_MMU_FTR_SECTION /* Set the TLB reservation and search for existing entry. Then load * the entry. */ - PPC_TLBSRX_DOT(0,r16) + PPC_TLBSRX_DOT(R0,R16) ldx r14,r14,r15 /* grab pgd entry */ beq normal_tlb_miss_done /* tlb exists already, bail */ MMU_FTR_SECTION_ELSE @@ -395,7 +395,7 @@ BEGIN_MMU_FTR_SECTION /* Set the TLB reservation and search for existing entry. Then load * the entry. */ - PPC_TLBSRX_DOT(0,r16) + PPC_TLBSRX_DOT(R0,R16) ld r14,0(r10) beq normal_tlb_miss_done MMU_FTR_SECTION_ELSE @@ -528,7 +528,7 @@ BEGIN_MMU_FTR_SECTION /* Search if we already have a TLB entry for that virtual address, and * if we do, bail out. */ - PPC_TLBSRX_DOT(0,r16) + PPC_TLBSRX_DOT(R0,R16) beq virt_page_table_tlb_miss_done END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV) @@ -779,7 +779,7 @@ htw_tlb_miss: * * MAS1:IND should be already set based on MAS4 */ - PPC_TLBSRX_DOT(0,r16) + PPC_TLBSRX_DOT(R0,R16) beq htw_tlb_miss_done /* Now, we need to walk the page tables. First check if we are in @@ -919,7 +919,7 @@ tlb_load_linear: mtspr SPRN_MAS1,r15 /* Already somebody there ? */ - PPC_TLBSRX_DOT(0,r16) + PPC_TLBSRX_DOT(R0,R16) beq tlb_load_linear_done /* Now we build the remaining MAS. MAS0 and 2 should be fine diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S index 7c63c0ed4f1b..5a1285a9109f 100644 --- a/arch/powerpc/mm/tlb_nohash_low.S +++ b/arch/powerpc/mm/tlb_nohash_low.S @@ -313,7 +313,7 @@ BEGIN_MMU_FTR_SECTION mtspr SPRN_MAS1,r4 tlbwe MMU_FTR_SECTION_ELSE - PPC_TLBILX_VA(0,r3) + PPC_TLBILX_VA(R0,R3) ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) msync isync @@ -364,7 +364,7 @@ _GLOBAL(_tlbil_va) beq 1f rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ - PPC_TLBILX_VA(0,r3) + PPC_TLBILX_VA(R0,R3) msync isync wrtee r10 @@ -379,7 +379,7 @@ _GLOBAL(_tlbivax_bcast) beq 1f rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ - PPC_TLBIVAX(0,r3) + PPC_TLBIVAX(R0,R3) eieio tlbsync sync diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 2dc8b1484845..dd1130642d07 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -39,7 +39,7 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, /* Make stackframe */ if (ctx->seen & SEEN_DATAREF) { /* If we call any helpers (for loads), save LR */ - EMIT(PPC_INST_MFLR | __PPC_RT(0)); + EMIT(PPC_INST_MFLR | __PPC_RT(R0)); PPC_STD(0, 1, 16); /* Back up non-volatile regs. */ @@ -56,7 +56,7 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, PPC_STD(i, 1, -(8*(32-i))); } } - EMIT(PPC_INST_STDU | __PPC_RS(1) | __PPC_RA(1) | + EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) | (-BPF_PPC_STACKFRAME & 0xfffc)); } diff --git a/arch/powerpc/platforms/cell/beat_hvCall.S b/arch/powerpc/platforms/cell/beat_hvCall.S index 74c817448948..079165eff3a2 100644 --- a/arch/powerpc/platforms/cell/beat_hvCall.S +++ b/arch/powerpc/platforms/cell/beat_hvCall.S @@ -74,7 +74,7 @@ _GLOBAL(beat_hcall_norets8) mr r6,r7 mr r7,r8 mr r8,r9 - ld r10,STK_PARM(r10)(r1) + ld r10,STK_PARM(R10)(r1) HVSC /* invoke the hypervisor */ @@ -94,7 +94,7 @@ _GLOBAL(beat_hcall1) HCALL_INST_PRECALL - std r4,STK_PARM(r4)(r1) /* save ret buffer */ + std r4,STK_PARM(R4)(r1) /* save ret buffer */ mr r11,r3 mr r3,r5 @@ -108,7 +108,7 @@ _GLOBAL(beat_hcall1) HCALL_INST_POSTCALL - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARM(R4)(r1) std r4, 0(r12) lwz r0,8(r1) @@ -125,7 +125,7 @@ _GLOBAL(beat_hcall2) HCALL_INST_PRECALL - std r4,STK_PARM(r4)(r1) /* save ret buffer */ + std r4,STK_PARM(R4)(r1) /* save ret buffer */ mr r11,r3 mr r3,r5 @@ -139,7 +139,7 @@ _GLOBAL(beat_hcall2) HCALL_INST_POSTCALL - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARM(R4)(r1) std r4, 0(r12) std r5, 8(r12) @@ -157,7 +157,7 @@ _GLOBAL(beat_hcall3) HCALL_INST_PRECALL - std r4,STK_PARM(r4)(r1) /* save ret buffer */ + std r4,STK_PARM(R4)(r1) /* save ret buffer */ mr r11,r3 mr r3,r5 @@ -171,7 +171,7 @@ _GLOBAL(beat_hcall3) HCALL_INST_POSTCALL - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) @@ -190,7 +190,7 @@ _GLOBAL(beat_hcall4) HCALL_INST_PRECALL - std r4,STK_PARM(r4)(r1) /* save ret buffer */ + std r4,STK_PARM(R4)(r1) /* save ret buffer */ mr r11,r3 mr r3,r5 @@ -204,7 +204,7 @@ _GLOBAL(beat_hcall4) HCALL_INST_POSTCALL - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) @@ -224,7 +224,7 @@ _GLOBAL(beat_hcall5) HCALL_INST_PRECALL - std r4,STK_PARM(r4)(r1) /* save ret buffer */ + std r4,STK_PARM(R4)(r1) /* save ret buffer */ mr r11,r3 mr r3,r5 @@ -238,7 +238,7 @@ _GLOBAL(beat_hcall5) HCALL_INST_POSTCALL - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) @@ -259,7 +259,7 @@ _GLOBAL(beat_hcall6) HCALL_INST_PRECALL - std r4,STK_PARM(r4)(r1) /* save ret buffer */ + std r4,STK_PARM(R4)(r1) /* save ret buffer */ mr r11,r3 mr r3,r5 @@ -273,7 +273,7 @@ _GLOBAL(beat_hcall6) HCALL_INST_POSTCALL - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) diff --git a/arch/powerpc/platforms/powernv/opal-takeover.S b/arch/powerpc/platforms/powernv/opal-takeover.S index 77b48b2b9309..1bb7768abe4c 100644 --- a/arch/powerpc/platforms/powernv/opal-takeover.S +++ b/arch/powerpc/platforms/powernv/opal-takeover.S @@ -23,14 +23,14 @@ _GLOBAL(opal_query_takeover) mfcr r0 stw r0,8(r1) - std r3,STK_PARAM(r3)(r1) - std r4,STK_PARAM(r4)(r1) + std r3,STK_PARAM(R3)(r1) + std r4,STK_PARAM(R4)(r1) li r3,H_HAL_TAKEOVER li r4,H_HAL_TAKEOVER_QUERY_MAGIC HVSC - ld r10,STK_PARAM(r3)(r1) + ld r10,STK_PARAM(R3)(r1) std r4,0(r10) - ld r10,STK_PARAM(r4)(r1) + ld r10,STK_PARAM(R4)(r1) std r5,0(r10) lwz r0,8(r1) mtcrf 0xff,r0 diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 3bb07e5e43cd..afcddec5d3a1 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -32,7 +32,7 @@ std r12,PACASAVEDMSR(r13); \ andc r12,r12,r0; \ mtmsrd r12,1; \ - LOAD_REG_ADDR(r0,.opal_return); \ + LOAD_REG_ADDR(R0,.opal_return); \ mtlr r0; \ li r0,MSR_DR|MSR_IR; \ andc r12,r12,r0; \ diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index 3ce73d0052b1..04b8efdee5b7 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -40,28 +40,28 @@ END_FTR_SECTION(0, 1); \ cmpdi r12,0; \ beq+ 1f; \ mflr r0; \ - std r3,STK_PARM(r3)(r1); \ - std r4,STK_PARM(r4)(r1); \ - std r5,STK_PARM(r5)(r1); \ - std r6,STK_PARM(r6)(r1); \ - std r7,STK_PARM(r7)(r1); \ - std r8,STK_PARM(r8)(r1); \ - std r9,STK_PARM(r9)(r1); \ - std r10,STK_PARM(r10)(r1); \ + std r3,STK_PARM(R3)(r1); \ + std r4,STK_PARM(R4)(r1); \ + std r5,STK_PARM(R5)(r1); \ + std r6,STK_PARM(R6)(r1); \ + std r7,STK_PARM(R7)(r1); \ + std r8,STK_PARM(R8)(r1); \ + std r9,STK_PARM(R9)(r1); \ + std r10,STK_PARM(R10)(r1); \ std r0,16(r1); \ addi r4,r1,STK_PARM(FIRST_REG); \ stdu r1,-STACK_FRAME_OVERHEAD(r1); \ bl .__trace_hcall_entry; \ addi r1,r1,STACK_FRAME_OVERHEAD; \ ld r0,16(r1); \ - ld r3,STK_PARM(r3)(r1); \ - ld r4,STK_PARM(r4)(r1); \ - ld r5,STK_PARM(r5)(r1); \ - ld r6,STK_PARM(r6)(r1); \ - ld r7,STK_PARM(r7)(r1); \ - ld r8,STK_PARM(r8)(r1); \ - ld r9,STK_PARM(r9)(r1); \ - ld r10,STK_PARM(r10)(r1); \ + ld r3,STK_PARM(R3)(r1); \ + ld r4,STK_PARM(R4)(r1); \ + ld r5,STK_PARM(R5)(r1); \ + ld r6,STK_PARM(R6)(r1); \ + ld r7,STK_PARM(R7)(r1); \ + ld r8,STK_PARM(R8)(r1); \ + ld r9,STK_PARM(R9)(r1); \ + ld r10,STK_PARM(R10)(r1); \ mtlr r0; \ 1: @@ -79,8 +79,8 @@ END_FTR_SECTION(0, 1); \ cmpdi r12,0; \ beq+ 1f; \ mflr r0; \ - ld r6,STK_PARM(r3)(r1); \ - std r3,STK_PARM(r3)(r1); \ + ld r6,STK_PARM(R3)(r1); \ + std r3,STK_PARM(R3)(r1); \ mr r4,r3; \ mr r3,r6; \ std r0,16(r1); \ @@ -88,7 +88,7 @@ END_FTR_SECTION(0, 1); \ bl .__trace_hcall_exit; \ addi r1,r1,STACK_FRAME_OVERHEAD; \ ld r0,16(r1); \ - ld r3,STK_PARM(r3)(r1); \ + ld r3,STK_PARM(R3)(r1); \ mtlr r0; \ 1: @@ -114,7 +114,7 @@ _GLOBAL(plpar_hcall_norets) mfcr r0 stw r0,8(r1) - HCALL_INST_PRECALL(r4) + HCALL_INST_PRECALL(R4) HVSC /* invoke the hypervisor */ @@ -130,9 +130,9 @@ _GLOBAL(plpar_hcall) mfcr r0 stw r0,8(r1) - HCALL_INST_PRECALL(r5) + HCALL_INST_PRECALL(R5) - std r4,STK_PARM(r4)(r1) /* Save ret buffer */ + std r4,STK_PARM(R4)(r1) /* Save ret buffer */ mr r4,r5 mr r5,r6 @@ -143,7 +143,7 @@ _GLOBAL(plpar_hcall) HVSC /* invoke the hypervisor */ - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) @@ -168,7 +168,7 @@ _GLOBAL(plpar_hcall_raw) mfcr r0 stw r0,8(r1) - std r4,STK_PARM(r4)(r1) /* Save ret buffer */ + std r4,STK_PARM(R4)(r1) /* Save ret buffer */ mr r4,r5 mr r5,r6 @@ -179,7 +179,7 @@ _GLOBAL(plpar_hcall_raw) HVSC /* invoke the hypervisor */ - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) @@ -196,9 +196,9 @@ _GLOBAL(plpar_hcall9) mfcr r0 stw r0,8(r1) - HCALL_INST_PRECALL(r5) + HCALL_INST_PRECALL(R5) - std r4,STK_PARM(r4)(r1) /* Save ret buffer */ + std r4,STK_PARM(R4)(r1) /* Save ret buffer */ mr r4,r5 mr r5,r6 @@ -206,14 +206,14 @@ _GLOBAL(plpar_hcall9) mr r7,r8 mr r8,r9 mr r9,r10 - ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */ - ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */ - ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */ + ld r10,STK_PARM(R11)(r1) /* put arg7 in R10 */ + ld r11,STK_PARM(R12)(r1) /* put arg8 in R11 */ + ld r12,STK_PARM(R13)(r1) /* put arg9 in R12 */ HVSC /* invoke the hypervisor */ mr r0,r12 - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) @@ -238,7 +238,7 @@ _GLOBAL(plpar_hcall9_raw) mfcr r0 stw r0,8(r1) - std r4,STK_PARM(r4)(r1) /* Save ret buffer */ + std r4,STK_PARM(R4)(r1) /* Save ret buffer */ mr r4,r5 mr r5,r6 @@ -246,14 +246,14 @@ _GLOBAL(plpar_hcall9_raw) mr r7,r8 mr r8,r9 mr r9,r10 - ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */ - ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */ - ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */ + ld r10,STK_PARM(R11)(r1) /* put arg7 in R10 */ + ld r11,STK_PARM(R12)(r1) /* put arg8 in R11 */ + ld r12,STK_PARM(R13)(r1) /* put arg9 in R12 */ HVSC /* invoke the hypervisor */ mr r0,r12 - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) -- cgit v1.2.3 From d72be892c810cb8269ed8a625fd925c30727639e Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Mon, 25 Jun 2012 13:33:15 +0000 Subject: powerpc: Merge VCPU_GPR Merge the defines of VCPU_GPR from different places. Signed-off-by: Michael Neuling Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/ppc_asm.h | 7 +++++++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 3 --- arch/powerpc/kvm/book3s_interrupts.S | 8 -------- arch/powerpc/kvm/booke_interrupts.S | 2 -- arch/powerpc/kvm/bookehv_interrupts.S | 1 - 5 files changed, 7 insertions(+), 14 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index c0c6318e7c24..2c7edd567738 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -178,6 +178,13 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #define HMT_HIGH or 3,3,3 #define HMT_EXTRA_HIGH or 7,7,7 # power7 only +#ifdef CONFIG_PPC64 +#define ULONG_SIZE 8 +#else +#define ULONG_SIZE 4 +#endif +#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) + #ifdef __KERNEL__ #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bc99015030c3..e764e2361d47 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -72,9 +72,6 @@ _GLOBAL(kvmppc_hv_entry_trampoline) mtsrr1 r6 RFI -#define ULONG_SIZE 8 -#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) - /****************************************************************************** * * * Entry code * diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 2ddab0f90a81..48cbbf862958 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -25,19 +25,11 @@ #include #if defined(CONFIG_PPC_BOOK3S_64) - -#define ULONG_SIZE 8 #define FUNC(name) GLUE(.,name) - #elif defined(CONFIG_PPC_BOOK3S_32) - -#define ULONG_SIZE 4 #define FUNC(name) name - #endif /* CONFIG_PPC_BOOK3S_XX */ - -#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) #define VCPU_LOAD_NVGPRS(vcpu) \ PPC_LL r14, VCPU_GPR(R14)(vcpu); \ PPC_LL r15, VCPU_GPR(R15)(vcpu); \ diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index e598a5a0d5e4..91c971bcddd0 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -25,8 +25,6 @@ #include #include -#define VCPU_GPR(n) (VCPU_GPRS + (n * 4)) - /* The host stack layout: */ #define HOST_R1 0 /* Implied by stwu. */ #define HOST_CALLEE_LR 4 diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index a623b1d32d3e..18cee4b0098a 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -37,7 +37,6 @@ #define LONGBYTES (BITS_PER_LONG / 8) -#define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES)) #define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES)) /* The host stack layout: */ -- cgit v1.2.3 From b38c77d82e42f4e72c162f49ea75f0f0ad76f1b6 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Jul 2012 14:49:12 +1000 Subject: powerpc: Move and fix MTMSR_EERI definition Move this duplicated definition to ppc_asm.h and remove the braces which prevent the use of %rN register names Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/ppc_asm.h | 2 ++ arch/powerpc/kvm/book3s_rmhandlers.S | 1 - arch/powerpc/kvm/book3s_segment.S | 2 -- 3 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 2c7edd567738..d4c589b4a2b8 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -475,6 +475,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) #ifdef CONFIG_PPC_BOOK3S_64 #define RFI rfid #define MTMSRD(r) mtmsrd r +#define MTMSR_EERI(reg) mtmsrd reg,1 #else #define FIX_SRR1(ra, rb) #ifndef CONFIG_40x @@ -483,6 +484,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) #define RFI rfi; b . /* Prevent prefetch past rfi */ #endif #define MTMSRD(r) mtmsr r +#define MTMSR_EERI(reg) mtmsr reg #define CLR_TOP32(r) #endif diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 34187585c507..ab523f3c1731 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -37,7 +37,6 @@ #if defined(CONFIG_PPC_BOOK3S_64) #define FUNC(name) GLUE(.,name) -#define MTMSR_EERI(reg) mtmsrd (reg),1 .globl kvmppc_skip_interrupt kvmppc_skip_interrupt: diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 798491a268b3..1abe4788191a 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -23,7 +23,6 @@ #define GET_SHADOW_VCPU(reg) \ mr reg, r13 -#define MTMSR_EERI(reg) mtmsrd (reg),1 #elif defined(CONFIG_PPC_BOOK3S_32) @@ -31,7 +30,6 @@ tophys(reg, r2); \ lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \ tophys(reg, reg) -#define MTMSR_EERI(reg) mtmsr (reg) #endif -- cgit v1.2.3 From 0b7673c35e9240a364594ac4f2c2dd2c111c0aba Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Mon, 25 Jun 2012 13:33:23 +0000 Subject: powerpc: Enforce usage of R0-R31 where possible Enforce the use of R0-R31 in macros where possible now we have all the fixes in. R0-R31 macros are removed here so that can't be used anymore. They should not be defined anywhere. Signed-off-by: Michael Neuling Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/ppc-opcode.h | 41 ++++------------------------------- arch/powerpc/include/asm/ppc_asm.h | 17 +++++++++------ arch/powerpc/kernel/fpu.S | 12 +++++----- arch/powerpc/kvm/booke_interrupts.S | 3 ++- 4 files changed, 23 insertions(+), 50 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index c74e00778f72..d14508f82247 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -15,39 +15,6 @@ #include #include -#define R0 0 -#define R1 1 -#define R2 2 -#define R3 3 -#define R4 4 -#define R5 5 -#define R6 6 -#define R7 7 -#define R8 8 -#define R9 9 -#define R10 10 -#define R11 11 -#define R12 12 -#define R13 13 -#define R14 14 -#define R15 15 -#define R16 16 -#define R17 17 -#define R18 18 -#define R19 19 -#define R20 20 -#define R21 21 -#define R22 22 -#define R23 23 -#define R24 24 -#define R25 25 -#define R26 26 -#define R27 27 -#define R28 28 -#define R29 29 -#define R30 30 -#define R31 31 - #define __REG_R0 0 #define __REG_R1 1 #define __REG_R2 2 @@ -181,10 +148,10 @@ #define ___PPC_RB(b) (((b) & 0x1f) << 11) #define ___PPC_RS(s) (((s) & 0x1f) << 21) #define ___PPC_RT(t) ___PPC_RS(t) -#define __PPC_RA(a) (((a) & 0x1f) << 16) -#define __PPC_RB(b) (((b) & 0x1f) << 11) -#define __PPC_RS(s) (((s) & 0x1f) << 21) -#define __PPC_RT(s) __PPC_RS(s) +#define __PPC_RA(a) ___PPC_RA(__REG_##a) +#define __PPC_RB(b) ___PPC_RB(__REG_##b) +#define __PPC_RS(s) ___PPC_RS(__REG_##s) +#define __PPC_RT(t) ___PPC_RT(__REG_##t) #define __PPC_XA(a) ((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3)) #define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4)) #define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index dbc768358ac1..ea2a86e8ff95 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -126,26 +126,26 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) /* Save the lower 32 VSRs in the thread VSR region */ -#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,base,b) +#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,R##base,R##b) #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) -#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,base,b) +#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,R##base,R##b) #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) #define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base) #define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base) /* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */ -#define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,base,b) +#define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,R##base,R##b) #define SAVE_2VSRSU(n,b,base) SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base) #define SAVE_4VSRSU(n,b,base) SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base) #define SAVE_8VSRSU(n,b,base) SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base) #define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base) #define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base) -#define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,base,b) +#define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,R##base,R##b) #define REST_2VSRSU(n,b,base) REST_VSRU(n,b,base); REST_VSRU(n+1,b,base) #define REST_4VSRSU(n,b,base) REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base) #define REST_8VSRSU(n,b,base) REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base) @@ -183,15 +183,18 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #else #define ULONG_SIZE 4 #endif -#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) +#define __VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) +#define VCPU_GPR(n) __VCPU_GPR(__REG_##n) #ifdef __KERNEL__ #ifdef CONFIG_PPC64 #define STACKFRAMESIZE 256 -#define STK_REG(i) (112 + ((i)-14)*8) +#define __STK_REG(i) (112 + ((i)-14)*8) +#define STK_REG(i) __STK_REG(__REG_##i) -#define STK_PARAM(i) (48 + ((i)-3)*8) +#define __STK_PARAM(i) (48 + ((i)-3)*8) +#define STK_PARAM(i) __STK_PARAM(__REG_##i) #define XGLUE(a,b) a##b #define GLUE(a,b) XGLUE(a,b) diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 71c1c73bc65f..e0ada05f2df3 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -26,7 +26,7 @@ #include #ifdef CONFIG_VSX -#define REST_32FPVSRS(n,c,base) \ +#define __REST_32FPVSRS(n,c,base) \ BEGIN_FTR_SECTION \ b 2f; \ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ @@ -35,7 +35,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 2: REST_32VSRS(n,c,base); \ 3: -#define SAVE_32FPVSRS(n,c,base) \ +#define __SAVE_32FPVSRS(n,c,base) \ BEGIN_FTR_SECTION \ b 2f; \ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ @@ -44,9 +44,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 2: SAVE_32VSRS(n,c,base); \ 3: #else -#define REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) -#define SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) +#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) +#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) #endif +#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base) +#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base) /* * This task wants to use the FPU now. @@ -79,7 +81,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) beq 1f toreal(r4) addi r4,r4,THREAD /* want last_task_used_math->thread */ - SAVE_32FPVSRS(0, r5, r4) + SAVE_32FPVSRS(0, R5, R4) mffs fr0 stfd fr0,THREAD_FPSCR(r4) PPC_LL r5,PT_REGS(r4) diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 91c971bcddd0..8fd4b2a0911b 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -34,7 +34,8 @@ #define HOST_R2 12 #define HOST_CR 16 #define HOST_NV_GPRS 20 -#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) +#define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) +#define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n) #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4) #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ -- cgit v1.2.3 From 9778b696a0188ad3b3524b383953ee73b31b7b68 Mon Sep 17 00:00:00 2001 From: Stuart Yoder Date: Thu, 5 Jul 2012 04:41:35 +0000 Subject: powerpc: Use CURRENT_THREAD_INFO instead of open coded assembly Signed-off-by: Stuart Yoder Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/exception-64s.h | 4 ++-- arch/powerpc/include/asm/thread_info.h | 6 ++++++ arch/powerpc/kernel/entry_32.S | 24 ++++++++++++------------ arch/powerpc/kernel/entry_64.S | 12 ++++++------ arch/powerpc/kernel/exceptions-64e.S | 2 +- arch/powerpc/kernel/exceptions-64s.S | 2 +- arch/powerpc/kernel/head_fsl_booke.S | 2 +- arch/powerpc/kernel/idle_6xx.S | 4 ++-- arch/powerpc/kernel/idle_book3e.S | 2 +- arch/powerpc/kernel/idle_e500.S | 4 ++-- arch/powerpc/kernel/idle_power4.S | 2 +- arch/powerpc/kernel/iommu.c.rej | 22 ++++++++++++++++++++++ arch/powerpc/kernel/misc_32.S | 4 ++-- arch/powerpc/kvm/bookehv_interrupts.S | 6 +----- arch/powerpc/mm/hash_low_32.S | 8 ++++---- arch/powerpc/sysdev/6xx-suspend.S | 2 +- 16 files changed, 65 insertions(+), 41 deletions(-) create mode 100644 arch/powerpc/kernel/iommu.c.rej (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index d58fc4e4149c..a43c1473915f 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -293,7 +293,7 @@ label##_hv: \ #define RUNLATCH_ON \ BEGIN_FTR_SECTION \ - clrrdi r3,r1,THREAD_SHIFT; \ + CURRENT_THREAD_INFO(r3, r1); \ ld r4,TI_LOCAL_FLAGS(r3); \ andi. r0,r4,_TLF_RUNLATCH; \ beql ppc64_runlatch_on_trampoline; \ @@ -332,7 +332,7 @@ label##_common: \ #ifdef CONFIG_PPC_970_NAP #define FINISH_NAP \ BEGIN_FTR_SECTION \ - clrrdi r11,r1,THREAD_SHIFT; \ + CURRENT_THREAD_INFO(r11, r1); \ ld r9,TI_LOCAL_FLAGS(r11); \ andi. r10,r9,_TLF_NAPPING; \ bnel power4_fixup_nap; \ diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 68831e9cf82f..faf93529cbf0 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -22,6 +22,12 @@ #define THREAD_SIZE (1 << THREAD_SHIFT) +#ifdef CONFIG_PPC64 +#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT +#else +#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT +#endif + #ifndef __ASSEMBLY__ #include #include diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index ba3aeb4bc06a..bad42e3d27a9 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -92,7 +92,7 @@ crit_transfer_to_handler: mfspr r8,SPRN_SPRG_THREAD lwz r0,KSP_LIMIT(r8) stw r0,SAVED_KSP_LIMIT(r11) - rlwimi r0,r1,0,0,(31-THREAD_SHIFT) + CURRENT_THREAD_INFO(r0, r1) stw r0,KSP_LIMIT(r8) /* fall through */ #endif @@ -112,7 +112,7 @@ crit_transfer_to_handler: mfspr r8,SPRN_SPRG_THREAD lwz r0,KSP_LIMIT(r8) stw r0,saved_ksp_limit@l(0) - rlwimi r0,r1,0,0,(31-THREAD_SHIFT) + CURRENT_THREAD_INFO(r0, r1) stw r0,KSP_LIMIT(r8) /* fall through */ #endif @@ -158,7 +158,7 @@ transfer_to_handler: tophys(r11,r11) addi r11,r11,global_dbcr0@l #ifdef CONFIG_SMP - rlwinm r9,r1,0,0,(31-THREAD_SHIFT) + CURRENT_THREAD_INFO(r9, r1) lwz r9,TI_CPU(r9) slwi r9,r9,3 add r11,r11,r9 @@ -179,7 +179,7 @@ transfer_to_handler: ble- stack_ovf /* then the kernel stack overflowed */ 5: #if defined(CONFIG_6xx) || defined(CONFIG_E500) - rlwinm r9,r1,0,0,31-THREAD_SHIFT + CURRENT_THREAD_INFO(r9, r1) tophys(r9,r9) /* check local flags */ lwz r12,TI_LOCAL_FLAGS(r9) mtcrf 0x01,r12 @@ -333,7 +333,7 @@ _GLOBAL(DoSyscall) mtmsr r11 1: #endif /* CONFIG_TRACE_IRQFLAGS */ - rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ + CURRENT_THREAD_INFO(r10, r1) lwz r11,TI_FLAGS(r10) andi. r11,r11,_TIF_SYSCALL_T_OR_A bne- syscall_dotrace @@ -354,7 +354,7 @@ ret_from_syscall: bl do_show_syscall_exit #endif mr r6,r3 - rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ + CURRENT_THREAD_INFO(r12, r1) /* disable interrupts so current_thread_info()->flags can't change */ LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ /* Note: We don't bother telling lockdep about it */ @@ -815,7 +815,7 @@ ret_from_except: user_exc_return: /* r10 contains MSR_KERNEL here */ /* Check current_thread_info()->flags */ - rlwinm r9,r1,0,0,(31-THREAD_SHIFT) + CURRENT_THREAD_INFO(r9, r1) lwz r9,TI_FLAGS(r9) andi. r0,r9,_TIF_USER_WORK_MASK bne do_work @@ -835,7 +835,7 @@ restore_user: /* N.B. the only way to get here is from the beq following ret_from_except. */ resume_kernel: /* check current_thread_info->preempt_count */ - rlwinm r9,r1,0,0,(31-THREAD_SHIFT) + CURRENT_THREAD_INFO(r9, r1) lwz r0,TI_PREEMPT(r9) cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ bne restore @@ -852,7 +852,7 @@ resume_kernel: bl trace_hardirqs_off #endif 1: bl preempt_schedule_irq - rlwinm r9,r1,0,0,(31-THREAD_SHIFT) + CURRENT_THREAD_INFO(r9, r1) lwz r3,TI_FLAGS(r9) andi. r0,r3,_TIF_NEED_RESCHED bne- 1b @@ -1122,7 +1122,7 @@ ret_from_debug_exc: lwz r10,SAVED_KSP_LIMIT(r1) stw r10,KSP_LIMIT(r9) lwz r9,THREAD_INFO-THREAD(r9) - rlwinm r10,r1,0,0,(31-THREAD_SHIFT) + CURRENT_THREAD_INFO(r10, r1) lwz r10,TI_PREEMPT(r10) stw r10,TI_PREEMPT(r9) RESTORE_xSRR(SRR0,SRR1); @@ -1156,7 +1156,7 @@ load_dbcr0: lis r11,global_dbcr0@ha addi r11,r11,global_dbcr0@l #ifdef CONFIG_SMP - rlwinm r9,r1,0,0,(31-THREAD_SHIFT) + CURRENT_THREAD_INFO(r9, r1) lwz r9,TI_CPU(r9) slwi r9,r9,3 add r11,r11,r9 @@ -1197,7 +1197,7 @@ recheck: LOAD_MSR_KERNEL(r10,MSR_KERNEL) SYNC MTMSRD(r10) /* disable interrupts */ - rlwinm r9,r1,0,0,(31-THREAD_SHIFT) + CURRENT_THREAD_INFO(r9, r1) lwz r9,TI_FLAGS(r9) andi. r0,r9,_TIF_NEED_RESCHED bne- do_resched diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index cf38a17ab28a..4b01a25e29ef 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -146,7 +146,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) REST_2GPRS(7,r1) addi r9,r1,STACK_FRAME_OVERHEAD #endif - clrrdi r11,r1,THREAD_SHIFT + CURRENT_THREAD_INFO(r11, r1) ld r10,TI_FLAGS(r11) andi. r11,r10,_TIF_SYSCALL_T_OR_A bne- syscall_dotrace @@ -181,7 +181,7 @@ syscall_exit: bl .do_show_syscall_exit ld r3,RESULT(r1) #endif - clrrdi r12,r1,THREAD_SHIFT + CURRENT_THREAD_INFO(r12, r1) ld r8,_MSR(r1) #ifdef CONFIG_PPC_BOOK3S @@ -260,7 +260,7 @@ syscall_dotrace: ld r7,GPR7(r1) ld r8,GPR8(r1) addi r9,r1,STACK_FRAME_OVERHEAD - clrrdi r10,r1,THREAD_SHIFT + CURRENT_THREAD_INFO(r10, r1) ld r10,TI_FLAGS(r10) b .Lsyscall_dotrace_cont @@ -500,7 +500,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 2: #endif /* !CONFIG_PPC_BOOK3S */ - clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ + CURRENT_THREAD_INFO(r7, r8) /* base of new stack */ /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE because we don't need to leave the 288-byte ABI gap at the top of the kernel stack. */ @@ -559,7 +559,7 @@ _GLOBAL(ret_from_except_lite) mtmsrd r10,1 /* Update machine state */ #endif /* CONFIG_PPC_BOOK3E */ - clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ + CURRENT_THREAD_INFO(r9, r1) ld r3,_MSR(r1) ld r4,TI_FLAGS(r9) andi. r3,r3,MSR_PR @@ -602,7 +602,7 @@ resume_kernel: 1: bl .preempt_schedule_irq /* Re-test flags and eventually loop */ - clrrdi r9,r1,THREAD_SHIFT + CURRENT_THREAD_INFO(r9, r1) ld r4,TI_FLAGS(r9) andi. r0,r4,_TIF_NEED_RESCHED bne 1b diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index ecba705bd628..98be7f0cd227 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -222,7 +222,7 @@ exc_##n##_bad_stack: \ * interrupts happen before the wait instruction. */ #define CHECK_NAPPING() \ - clrrdi r11,r1,THREAD_SHIFT; \ + CURRENT_THREAD_INFO(r11, r1); \ ld r10,TI_LOCAL_FLAGS(r11); \ andi. r9,r10,_TLF_NAPPING; \ beq+ 1f; \ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 1c06d2971545..8ad346882684 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -851,7 +851,7 @@ BEGIN_FTR_SECTION bne- do_ste_alloc /* If so handle it */ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) - clrrdi r11,r1,THREAD_SHIFT + CURRENT_THREAD_INFO(r11, r1) lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ bne 77f /* then don't call hash_page now */ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 1f4434a38608..7e7bd88674db 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -192,7 +192,7 @@ _ENTRY(__early_start) li r0,0 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) - rlwinm r22,r1,0,0,31-THREAD_SHIFT /* current thread_info */ + CURRENT_THREAD_INFO(r22, r1) stw r24, TI_CPU(r22) bl early_init diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S index 15c611de1ee2..1686916cc7f0 100644 --- a/arch/powerpc/kernel/idle_6xx.S +++ b/arch/powerpc/kernel/idle_6xx.S @@ -135,7 +135,7 @@ BEGIN_FTR_SECTION DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - rlwinm r9,r1,0,0,31-THREAD_SHIFT /* current thread_info */ + CURRENT_THREAD_INFO(r9, r1) lwz r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ ori r8,r8,_TLF_NAPPING /* so when we take an exception */ stw r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ @@ -158,7 +158,7 @@ _GLOBAL(power_save_ppc32_restore) stw r9,_NIP(r11) /* make it do a blr */ #ifdef CONFIG_SMP - rlwinm r12,r11,0,0,31-THREAD_SHIFT + CURRENT_THREAD_INFO(r12, r11) lwz r11,TI_CPU(r12) /* get cpu number * 4 */ slwi r11,r11,2 #else diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S index ff007b59448d..4c7cb4008585 100644 --- a/arch/powerpc/kernel/idle_book3e.S +++ b/arch/powerpc/kernel/idle_book3e.S @@ -60,7 +60,7 @@ _GLOBAL(book3e_idle) 1: /* Let's set the _TLF_NAPPING flag so interrupts make us return * to the right spot */ - clrrdi r11,r1,THREAD_SHIFT + CURRENT_THREAD_INFO(r11, r1) ld r10,TI_LOCAL_FLAGS(r11) ori r10,r10,_TLF_NAPPING std r10,TI_LOCAL_FLAGS(r11) diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S index 4f0ab85f3788..15448668988d 100644 --- a/arch/powerpc/kernel/idle_e500.S +++ b/arch/powerpc/kernel/idle_e500.S @@ -21,7 +21,7 @@ .text _GLOBAL(e500_idle) - rlwinm r3,r1,0,0,31-THREAD_SHIFT /* current thread_info */ + CURRENT_THREAD_INFO(r3, r1) lwz r4,TI_LOCAL_FLAGS(r3) /* set napping bit */ ori r4,r4,_TLF_NAPPING /* so when we take an exception */ stw r4,TI_LOCAL_FLAGS(r3) /* it will return to our caller */ @@ -96,7 +96,7 @@ _GLOBAL(power_save_ppc32_restore) stw r9,_NIP(r11) /* make it do a blr */ #ifdef CONFIG_SMP - rlwinm r12,r1,0,0,31-THREAD_SHIFT + CURRENT_THREAD_INFO(r12, r1) lwz r11,TI_CPU(r12) /* get cpu number * 4 */ slwi r11,r11,2 #else diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index 2c71b0fc9f91..e3edaa189911 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -59,7 +59,7 @@ BEGIN_FTR_SECTION DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - clrrdi r9,r1,THREAD_SHIFT /* current thread_info */ + CURRENT_THREAD_INFO(r9, r1) ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ ori r8,r8,_TLF_NAPPING /* so when we take an exception */ std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ diff --git a/arch/powerpc/kernel/iommu.c.rej b/arch/powerpc/kernel/iommu.c.rej new file mode 100644 index 000000000000..9d10d341cf86 --- /dev/null +++ b/arch/powerpc/kernel/iommu.c.rej @@ -0,0 +1,22 @@ +--- arch/powerpc/kernel/iommu.c 2012-06-08 09:01:02.785709100 +1000 ++++ arch/powerpc/kernel/iommu.c 2012-06-08 09:01:07.489784856 +1000 +@@ -33,7 +33,9 @@ + #include + #include + #include ++#include + #include ++#include + #include + #include + #include +@@ -171,6 +261,9 @@ + return DMA_ERROR_CODE; + } + ++ if (should_fail_iommu(dev)) ++ return DMA_ERROR_CODE; ++ + if (handle && *handle) + start = *handle; + else diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 386d57f66f28..407e293aad2f 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -179,7 +179,7 @@ _GLOBAL(low_choose_750fx_pll) mtspr SPRN_HID1,r4 /* Store new HID1 image */ - rlwinm r6,r1,0,0,(31-THREAD_SHIFT) + CURRENT_THREAD_INFO(r6, r1) lwz r6,TI_CPU(r6) slwi r6,r6,2 addis r6,r6,nap_save_hid1@ha @@ -699,7 +699,7 @@ _GLOBAL(kernel_thread) #ifdef CONFIG_SMP _GLOBAL(start_secondary_resume) /* Reset stack */ - rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ + CURRENT_THREAD_INFO(r1, r1) addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD li r3,0 stw r3,0(r1) /* Zero the stack frame pointer */ diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 18cee4b0098a..1685dc43bcf2 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -160,11 +160,7 @@ mtspr SPRN_EPLC, r8 /* disable preemption, so we are sure we hit the fixup handler */ -#ifdef CONFIG_PPC64 - clrrdi r8,r1,THREAD_SHIFT -#else - rlwinm r8,r1,0,0,31-THREAD_SHIFT /* current thread_info */ -#endif + CURRENT_THREAD_INFO(r8, r1) li r7, 1 stw r7, TI_PREEMPT(r8) diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S index b13d58932bf6..115347f74ce5 100644 --- a/arch/powerpc/mm/hash_low_32.S +++ b/arch/powerpc/mm/hash_low_32.S @@ -184,7 +184,7 @@ _GLOBAL(add_hash_page) add r3,r3,r0 /* note create_hpte trims to 24 bits */ #ifdef CONFIG_SMP - rlwinm r8,r1,0,0,(31-THREAD_SHIFT) /* use cpu number to make tag */ + CURRENT_THREAD_INFO(r8, r1) /* use cpu number to make tag */ lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */ oris r8,r8,12 #endif /* CONFIG_SMP */ @@ -545,7 +545,7 @@ _GLOBAL(flush_hash_pages) #ifdef CONFIG_SMP addis r9,r7,mmu_hash_lock@ha addi r9,r9,mmu_hash_lock@l - rlwinm r8,r1,0,0,(31-THREAD_SHIFT) + CURRENT_THREAD_INFO(r8, r1) add r8,r8,r7 lwz r8,TI_CPU(r8) oris r8,r8,9 @@ -639,7 +639,7 @@ _GLOBAL(flush_hash_patch_B) */ _GLOBAL(_tlbie) #ifdef CONFIG_SMP - rlwinm r8,r1,0,0,(31-THREAD_SHIFT) + CURRENT_THREAD_INFO(r8, r1) lwz r8,TI_CPU(r8) oris r8,r8,11 mfmsr r10 @@ -677,7 +677,7 @@ _GLOBAL(_tlbie) */ _GLOBAL(_tlbia) #if defined(CONFIG_SMP) - rlwinm r8,r1,0,0,(31-THREAD_SHIFT) + CURRENT_THREAD_INFO(r8, r1) lwz r8,TI_CPU(r8) oris r8,r8,10 mfmsr r10 diff --git a/arch/powerpc/sysdev/6xx-suspend.S b/arch/powerpc/sysdev/6xx-suspend.S index 21cda085d926..cf48e9cb2575 100644 --- a/arch/powerpc/sysdev/6xx-suspend.S +++ b/arch/powerpc/sysdev/6xx-suspend.S @@ -29,7 +29,7 @@ _GLOBAL(mpc6xx_enter_standby) ori r5, r5, ret_from_standby@l mtlr r5 - rlwinm r5, r1, 0, 0, 31-THREAD_SHIFT + CURRENT_THREAD_INFO(r5, r1) lwz r6, TI_LOCAL_FLAGS(r5) ori r6, r6, _TLF_SLEEPING stw r6, TI_LOCAL_FLAGS(r5) -- cgit v1.2.3 From 18ad51dd342a7eb09dbcd059d0b451b616d4dafc Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Wed, 4 Jul 2012 20:37:11 +0000 Subject: powerpc: Add VDSO version of getcpu We have a request for a fast method of getting CPU and NUMA node IDs from userspace. This patch implements a getcpu VDSO function, similar to x86. Ben suggested we use SPRG3 which is userspace readable. SPRG3 can be modified by a KVM guest, so we save the SPRG3 value in the paca and restore it when transitioning from the guest to the host. I have a glibc patch that implements sched_getcpu on top of this. Testing on a POWER7: baseline: 538 cycles vdso: 30 cycles Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/kvm_book3s_asm.h | 1 + arch/powerpc/include/asm/reg.h | 5 ++-- arch/powerpc/include/asm/vdso.h | 2 ++ arch/powerpc/kernel/asm-offsets.c | 1 + arch/powerpc/kernel/smp.c | 3 +++ arch/powerpc/kernel/vdso.c | 28 +++++++++++++++++++ arch/powerpc/kernel/vdso32/Makefile | 4 ++- arch/powerpc/kernel/vdso32/getcpu.S | 45 +++++++++++++++++++++++++++++++ arch/powerpc/kernel/vdso32/vdso32.lds.S | 3 +++ arch/powerpc/kernel/vdso64/Makefile | 2 +- arch/powerpc/kernel/vdso64/getcpu.S | 45 +++++++++++++++++++++++++++++++ arch/powerpc/kernel/vdso64/vdso64.lds.S | 1 + arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 +++ 13 files changed, 140 insertions(+), 4 deletions(-) create mode 100644 arch/powerpc/kernel/vdso32/getcpu.S create mode 100644 arch/powerpc/kernel/vdso64/getcpu.S (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 88609b23b775..bfcd00c1485d 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -74,6 +74,7 @@ struct kvmppc_host_state { ulong vmhandler; ulong scratch0; ulong scratch1; + ulong sprg3; u8 in_guest; u8 restore_hid5; u8 napping; diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index f0cb7f461b9d..2baeb7c8764f 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -491,6 +491,7 @@ #define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */ #define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */ #define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */ +#define SPRN_USPRG3 0x103 /* SPRG3 userspace read */ #define SPRN_SPRG4 0x114 /* Special Purpose Register General 4 */ #define SPRN_SPRG5 0x115 /* Special Purpose Register General 5 */ #define SPRN_SPRG6 0x116 /* Special Purpose Register General 6 */ @@ -753,14 +754,14 @@ * 64-bit server: * - SPRG0 unused (reserved for HV on Power4) * - SPRG2 scratch for exception vectors - * - SPRG3 unused (user visible) + * - SPRG3 CPU and NUMA node for VDSO getcpu (user visible) * - HSPRG0 stores PACA in HV mode * - HSPRG1 scratch for "HV" exceptions * * 64-bit embedded * - SPRG0 generic exception scratch * - SPRG2 TLB exception stack - * - SPRG3 unused (user visible) + * - SPRG3 CPU and NUMA node for VDSO getcpu (user visible) * - SPRG4 unused (user visible) * - SPRG6 TLB miss scratch (user visible, sorry !) * - SPRG7 critical exception scratch diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h index dc0419b66f17..50f261bc3e95 100644 --- a/arch/powerpc/include/asm/vdso.h +++ b/arch/powerpc/include/asm/vdso.h @@ -22,6 +22,8 @@ extern unsigned long vdso64_rt_sigtramp; extern unsigned long vdso32_sigtramp; extern unsigned long vdso32_rt_sigtramp; +int __cpuinit vdso_getcpu_init(void); + #else /* __ASSEMBLY__ */ #ifdef __VDSO64__ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 52c7ad78242e..85b05c463fae 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -533,6 +533,7 @@ int main(void) HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); + HSTATE_FIELD(HSTATE_SPRG3, sprg3); HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); HSTATE_FIELD(HSTATE_NAPPING, napping); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index e1417c42155c..0321007086f7 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -48,6 +48,7 @@ #ifdef CONFIG_PPC64 #include #endif +#include #include #ifdef DEBUG @@ -570,6 +571,8 @@ void __devinit start_secondary(void *unused) #ifdef CONFIG_PPC64 if (system_state == SYSTEM_RUNNING) vdso_data->processorCount++; + + vdso_getcpu_init(); #endif notify_cpu_starting(cpu); set_cpu_online(cpu, true); diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 9eb5b9b536a7..b67db22e102d 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -706,6 +706,34 @@ static void __init vdso_setup_syscall_map(void) } } +#ifdef CONFIG_PPC64 +int __cpuinit vdso_getcpu_init(void) +{ + unsigned long cpu, node, val; + + /* + * SPRG3 contains the CPU in the bottom 16 bits and the NUMA node in + * the next 16 bits. The VDSO uses this to implement getcpu(). + */ + cpu = get_cpu(); + WARN_ON_ONCE(cpu > 0xffff); + + node = cpu_to_node(cpu); + WARN_ON_ONCE(node > 0xffff); + + val = (cpu & 0xfff) | ((node & 0xffff) << 16); + mtspr(SPRN_SPRG3, val); +#ifdef CONFIG_KVM_BOOK3S_HANDLER + get_paca()->kvm_hstate.sprg3 = val; +#endif + + put_cpu(); + + return 0; +} +/* We need to call this before SMP init */ +early_initcall(vdso_getcpu_init); +#endif static int __init vdso_init(void) { diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile index 9a7946c41738..53e6c9b979ec 100644 --- a/arch/powerpc/kernel/vdso32/Makefile +++ b/arch/powerpc/kernel/vdso32/Makefile @@ -1,7 +1,9 @@ # List of files in the vdso, has to be asm only for now -obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o +obj-vdso32-$(CONFIG_PPC64) = getcpu.o +obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o \ + $(obj-vdso32-y) # Build rules diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso32/getcpu.S new file mode 100644 index 000000000000..47afd08c90f7 --- /dev/null +++ b/arch/powerpc/kernel/vdso32/getcpu.S @@ -0,0 +1,45 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2012 + * + * Author: Anton Blanchard + */ +#include +#include + + .text +/* + * Exact prototype of getcpu + * + * int __kernel_getcpu(unsigned *cpu, unsigned *node); + * + */ +V_FUNCTION_BEGIN(__kernel_getcpu) + .cfi_startproc + mfspr r5,SPRN_USPRG3 + cmpdi cr0,r3,0 + cmpdi cr1,r4,0 + clrlwi r6,r5,16 + rlwinm r7,r5,16,31-15,31-0 + beq cr0,1f + stw r6,0(r3) +1: beq cr1,2f + stw r7,0(r4) +2: crclr cr0*4+so + li r3,0 /* always success */ + blr + .cfi_endproc +V_FUNCTION_END(__kernel_getcpu) diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index 0546bcd49cd0..43200ba2e570 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -147,6 +147,9 @@ VERSION __kernel_sync_dicache_p5; __kernel_sigtramp32; __kernel_sigtramp_rt32; +#ifdef CONFIG_PPC64 + __kernel_getcpu; +#endif local: *; }; diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile index 8c500d8622e4..effca9404b17 100644 --- a/arch/powerpc/kernel/vdso64/Makefile +++ b/arch/powerpc/kernel/vdso64/Makefile @@ -1,6 +1,6 @@ # List of files in the vdso, has to be asm only for now -obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o +obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o getcpu.o # Build rules diff --git a/arch/powerpc/kernel/vdso64/getcpu.S b/arch/powerpc/kernel/vdso64/getcpu.S new file mode 100644 index 000000000000..47afd08c90f7 --- /dev/null +++ b/arch/powerpc/kernel/vdso64/getcpu.S @@ -0,0 +1,45 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2012 + * + * Author: Anton Blanchard + */ +#include +#include + + .text +/* + * Exact prototype of getcpu + * + * int __kernel_getcpu(unsigned *cpu, unsigned *node); + * + */ +V_FUNCTION_BEGIN(__kernel_getcpu) + .cfi_startproc + mfspr r5,SPRN_USPRG3 + cmpdi cr0,r3,0 + cmpdi cr1,r4,0 + clrlwi r6,r5,16 + rlwinm r7,r5,16,31-15,31-0 + beq cr0,1f + stw r6,0(r3) +1: beq cr1,2f + stw r7,0(r4) +2: crclr cr0*4+so + li r3,0 /* always success */ + blr + .cfi_endproc +V_FUNCTION_END(__kernel_getcpu) diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S index 0e615404e247..e6c1758f3588 100644 --- a/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S @@ -146,6 +146,7 @@ VERSION __kernel_sync_dicache; __kernel_sync_dicache_p5; __kernel_sigtramp_rt64; + __kernel_getcpu; local: *; }; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index e764e2361d47..5a84c8d3d040 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1064,6 +1064,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) mtspr SPRN_DABR,r5 mtspr SPRN_DABRX,r6 + /* Restore SPRG3 */ + ld r3,HSTATE_SPRG3(r13) + mtspr SPRN_SPRG3,r3 + /* * Reload DEC. HDEC interrupts were disabled when * we reloaded the host's LPCR value. -- cgit v1.2.3 From 6328e593c3df5e81ad7d18a2752ef2235391e9cc Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Wed, 20 Jun 2012 05:56:53 +0000 Subject: booke/bookehv: Add host crit-watchdog exception support Signed-off-by: Bharat Bhushan Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/hw_irq.h | 2 ++ arch/powerpc/kvm/booke.c | 21 +++++++++++++++++++++ 2 files changed, 23 insertions(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index c9aac24b02e2..2aadb47efaec 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -34,6 +34,8 @@ extern void __replay_interrupt(unsigned int vector); extern void timer_interrupt(struct pt_regs *); extern void performance_monitor_exception(struct pt_regs *regs); +extern void WatchdogException(struct pt_regs *regs); +extern void unknown_exception(struct pt_regs *regs); #ifdef CONFIG_PPC64 #include diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 86681eec60b1..d25a097c852b 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -612,6 +612,12 @@ static void kvmppc_fill_pt_regs(struct pt_regs *regs) regs->link = lr; } +/* + * For interrupts needed to be handled by host interrupt handlers, + * corresponding host handler are called from here in similar way + * (but not exact) as they are called from low level handler + * (such as from arch/powerpc/kernel/head_fsl_booke.S). + */ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, unsigned int exit_nr) { @@ -639,6 +645,17 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, kvmppc_fill_pt_regs(®s); performance_monitor_exception(®s); break; + case BOOKE_INTERRUPT_WATCHDOG: + kvmppc_fill_pt_regs(®s); +#ifdef CONFIG_BOOKE_WDT + WatchdogException(®s); +#else + unknown_exception(®s); +#endif + break; + case BOOKE_INTERRUPT_CRITICAL: + unknown_exception(®s); + break; } } @@ -683,6 +700,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, r = RESUME_GUEST; break; + case BOOKE_INTERRUPT_WATCHDOG: + r = RESUME_GUEST; + break; + case BOOKE_INTERRUPT_DOORBELL: kvmppc_account_exit(vcpu, DBELL_EXITS); r = RESUME_GUEST; -- cgit v1.2.3 From 75c44bbb20807b5148eae19642a0fdb8db20c344 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Wed, 20 Jun 2012 05:56:54 +0000 Subject: booke: Added crit/mc exception handler for e500v2 Watchdog is taken at critical exception level. So this patch is tested with host watchdog exception happening when guest is running. Signed-off-by: Bharat Bhushan Signed-off-by: Alexander Graf --- arch/powerpc/kvm/booke_interrupts.S | 55 ++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 28 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 8feec2ff3928..09456c4719f3 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -53,16 +53,21 @@ (1< Date: Mon, 18 Jun 2012 12:14:55 +0000 Subject: KVM: PPC: bookehv64: Add support for std/ld emulation. Add support for std/ld emulation. Signed-off-by: Varun Sethi Signed-off-by: Alexander Graf --- arch/powerpc/kvm/emulate.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index f90e86dea7a2..ee04abaefe23 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -59,11 +59,13 @@ #define OP_31_XOP_STHBRX 918 #define OP_LWZ 32 +#define OP_LD 58 #define OP_LWZU 33 #define OP_LBZ 34 #define OP_LBZU 35 #define OP_STW 36 #define OP_STWU 37 +#define OP_STD 62 #define OP_STB 38 #define OP_STBU 39 #define OP_LHZ 40 @@ -392,6 +394,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; + /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ + case OP_LD: + rt = get_rt(inst); + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); + break; + case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); @@ -412,6 +420,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 4, 1); break; + /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ + case OP_STD: + rs = get_rs(inst); + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), + 8, 1); + break; + case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), -- cgit v1.2.3 From 9997782ed5c3d831d23b3252db3af2d83a5aded9 Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Fri, 22 Jun 2012 13:33:12 +0000 Subject: KVM: PPC: bookehv: Add ESR flag to Data Storage Interrupt ESR register is required by Data Storage Interrupt handling code. Add the specific flag to the interrupt handler. Signed-off-by: Mihai Caraman Signed-off-by: Alexander Graf --- arch/powerpc/kvm/bookehv_interrupts.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 6048a00515d7..0fa2ef7df036 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -267,7 +267,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \ kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \ SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0 kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \ - SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR) + SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \ -- cgit v1.2.3 From c7ba7771c3dd01183d3155640129ec7c9707f082 Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Mon, 25 Jun 2012 02:26:19 +0000 Subject: KVM: PPC64: booke: Set interrupt computation mode for 64-bit host 64-bit host needs to remain in 64-bit mode when an exception take place. Set interrupt computaion mode in EPCR register. Signed-off-by: Mihai Caraman Signed-off-by: Alexander Graf --- arch/powerpc/kvm/e500mc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index fe6c1de6b701..db97ee3a0827 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010 Freescale Semiconductor, Inc. All rights reserved. + * Copyright (C) 2010,2012 Freescale Semiconductor, Inc. All rights reserved. * * Author: Varun Sethi, * @@ -183,6 +183,9 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \ SPRN_EPCR_DUVD; +#ifdef CONFIG_64BIT + vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM; +#endif vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP; vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT); vcpu->arch.epsc = vcpu->arch.eplc; -- cgit v1.2.3 From 66c9897d9d7675bfb8f4cc4d57ceb00b6a12a2e8 Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Mon, 25 Jun 2012 02:26:26 +0000 Subject: KVM: PPC: e500mc: Fix tlbilx emulation for 64-bit guests tlbilxva emulation was using an u32 variable for guest effective address. Replace it with gva_t type to handle 64-bit guests. Signed-off-by: Mihai Caraman Signed-off-by: Alexander Graf --- arch/powerpc/kvm/e500mc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index db97ee3a0827..1f89d26e65fb 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -57,7 +57,8 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, struct kvm_book3e_206_tlb_entry *gtlbe) { unsigned int tid, ts; - u32 val, eaddr, lpid; + gva_t eaddr; + u32 val, lpid; unsigned long flags; ts = get_tlb_ts(gtlbe); -- cgit v1.2.3 From 0c1fc3c3c496f8719fbea7fd3668491413ad420e Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Wed, 27 Jun 2012 19:37:31 +0000 Subject: KVM: PPC: Critical interrupt emulation support rfci instruction and CSRR0/1 registers are emulated. Signed-off-by: Scott Wood Signed-off-by: Stuart Yoder Signed-off-by: Bharat Bhushan Signed-off-by: Alexander Graf --- arch/powerpc/kvm/booke_emulate.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 9eb9809eb13e..12834bb608ab 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c @@ -24,6 +24,7 @@ #include "booke.h" #define OP_19_XOP_RFI 50 +#define OP_19_XOP_RFCI 51 #define OP_31_XOP_MFMSR 83 #define OP_31_XOP_WRTEE 131 @@ -36,6 +37,12 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); } +static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) +{ + vcpu->arch.pc = vcpu->arch.csrr0; + kvmppc_set_msr(vcpu, vcpu->arch.csrr1); +} + int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { @@ -52,6 +59,12 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, *advance = 0; break; + case OP_19_XOP_RFCI: + kvmppc_emul_rfci(vcpu); + kvmppc_set_exit_type(vcpu, EMULATED_RFCI_EXITS); + *advance = 0; + break; + default: emulated = EMULATE_FAIL; break; @@ -113,6 +126,12 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) case SPRN_ESR: vcpu->arch.shared->esr = spr_val; break; + case SPRN_CSRR0: + vcpu->arch.csrr0 = spr_val; + break; + case SPRN_CSRR1: + vcpu->arch.csrr1 = spr_val; + break; case SPRN_DBCR0: vcpu->arch.dbcr0 = spr_val; break; @@ -232,6 +251,12 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) case SPRN_ESR: *spr_val = vcpu->arch.shared->esr; break; + case SPRN_CSRR0: + *spr_val = vcpu->arch.csrr0; + break; + case SPRN_CSRR1: + *spr_val = vcpu->arch.csrr1; + break; case SPRN_DBCR0: *spr_val = vcpu->arch.dbcr0; break; -- cgit v1.2.3 From 38df850172d3e09a4581419c516aa77bdcab431a Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 24 Jul 2012 13:02:34 +0000 Subject: powerpc/kvm/bookehv: Fix build regression After merging the register type check patches from Ben's tree, the hv enabled booke implementation ceased to compile. This patch fixes things up so everyone's happy again. Signed-off-by: Alexander Graf Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kvm/bookehv_interrupts.S | 77 ++++++++++++++++++----------------- 1 file changed, 39 insertions(+), 38 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index d28c2d43ac1b..099fe8272b57 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -50,8 +50,9 @@ #define HOST_R2 (3 * LONGBYTES) #define HOST_CR (4 * LONGBYTES) #define HOST_NV_GPRS (5 * LONGBYTES) -#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES)) -#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES) +#define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES)) +#define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n) +#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + LONGBYTES) #define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */ #define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */ @@ -410,24 +411,24 @@ heavyweight_exit: PPC_STL r31, VCPU_GPR(R31)(r4) /* Load host non-volatile register state from host stack. */ - PPC_LL r14, HOST_NV_GPR(r14)(r1) - PPC_LL r15, HOST_NV_GPR(r15)(r1) - PPC_LL r16, HOST_NV_GPR(r16)(r1) - PPC_LL r17, HOST_NV_GPR(r17)(r1) - PPC_LL r18, HOST_NV_GPR(r18)(r1) - PPC_LL r19, HOST_NV_GPR(r19)(r1) - PPC_LL r20, HOST_NV_GPR(r20)(r1) - PPC_LL r21, HOST_NV_GPR(r21)(r1) - PPC_LL r22, HOST_NV_GPR(r22)(r1) - PPC_LL r23, HOST_NV_GPR(r23)(r1) - PPC_LL r24, HOST_NV_GPR(r24)(r1) - PPC_LL r25, HOST_NV_GPR(r25)(r1) - PPC_LL r26, HOST_NV_GPR(r26)(r1) - PPC_LL r27, HOST_NV_GPR(r27)(r1) - PPC_LL r28, HOST_NV_GPR(r28)(r1) - PPC_LL r29, HOST_NV_GPR(r29)(r1) - PPC_LL r30, HOST_NV_GPR(r30)(r1) - PPC_LL r31, HOST_NV_GPR(r31)(r1) + PPC_LL r14, HOST_NV_GPR(R14)(r1) + PPC_LL r15, HOST_NV_GPR(R15)(r1) + PPC_LL r16, HOST_NV_GPR(R16)(r1) + PPC_LL r17, HOST_NV_GPR(R17)(r1) + PPC_LL r18, HOST_NV_GPR(R18)(r1) + PPC_LL r19, HOST_NV_GPR(R19)(r1) + PPC_LL r20, HOST_NV_GPR(R20)(r1) + PPC_LL r21, HOST_NV_GPR(R21)(r1) + PPC_LL r22, HOST_NV_GPR(R22)(r1) + PPC_LL r23, HOST_NV_GPR(R23)(r1) + PPC_LL r24, HOST_NV_GPR(R24)(r1) + PPC_LL r25, HOST_NV_GPR(R25)(r1) + PPC_LL r26, HOST_NV_GPR(R26)(r1) + PPC_LL r27, HOST_NV_GPR(R27)(r1) + PPC_LL r28, HOST_NV_GPR(R28)(r1) + PPC_LL r29, HOST_NV_GPR(R29)(r1) + PPC_LL r30, HOST_NV_GPR(R30)(r1) + PPC_LL r31, HOST_NV_GPR(R31)(r1) /* Return to kvm_vcpu_run(). */ mtlr r5 @@ -453,24 +454,24 @@ _GLOBAL(__kvmppc_vcpu_run) stw r5, HOST_CR(r1) /* Save host non-volatile register state to stack. */ - PPC_STL r14, HOST_NV_GPR(r14)(r1) - PPC_STL r15, HOST_NV_GPR(r15)(r1) - PPC_STL r16, HOST_NV_GPR(r16)(r1) - PPC_STL r17, HOST_NV_GPR(r17)(r1) - PPC_STL r18, HOST_NV_GPR(r18)(r1) - PPC_STL r19, HOST_NV_GPR(r19)(r1) - PPC_STL r20, HOST_NV_GPR(r20)(r1) - PPC_STL r21, HOST_NV_GPR(r21)(r1) - PPC_STL r22, HOST_NV_GPR(r22)(r1) - PPC_STL r23, HOST_NV_GPR(r23)(r1) - PPC_STL r24, HOST_NV_GPR(r24)(r1) - PPC_STL r25, HOST_NV_GPR(r25)(r1) - PPC_STL r26, HOST_NV_GPR(r26)(r1) - PPC_STL r27, HOST_NV_GPR(r27)(r1) - PPC_STL r28, HOST_NV_GPR(r28)(r1) - PPC_STL r29, HOST_NV_GPR(r29)(r1) - PPC_STL r30, HOST_NV_GPR(r30)(r1) - PPC_STL r31, HOST_NV_GPR(r31)(r1) + PPC_STL r14, HOST_NV_GPR(R14)(r1) + PPC_STL r15, HOST_NV_GPR(R15)(r1) + PPC_STL r16, HOST_NV_GPR(R16)(r1) + PPC_STL r17, HOST_NV_GPR(R17)(r1) + PPC_STL r18, HOST_NV_GPR(R18)(r1) + PPC_STL r19, HOST_NV_GPR(R19)(r1) + PPC_STL r20, HOST_NV_GPR(R20)(r1) + PPC_STL r21, HOST_NV_GPR(R21)(r1) + PPC_STL r22, HOST_NV_GPR(R22)(r1) + PPC_STL r23, HOST_NV_GPR(R23)(r1) + PPC_STL r24, HOST_NV_GPR(R24)(r1) + PPC_STL r25, HOST_NV_GPR(R25)(r1) + PPC_STL r26, HOST_NV_GPR(R26)(r1) + PPC_STL r27, HOST_NV_GPR(R27)(r1) + PPC_STL r28, HOST_NV_GPR(R28)(r1) + PPC_STL r29, HOST_NV_GPR(R29)(r1) + PPC_STL r30, HOST_NV_GPR(R30)(r1) + PPC_STL r31, HOST_NV_GPR(R31)(r1) /* Load guest non-volatiles. */ PPC_LL r14, VCPU_GPR(R14)(r4) -- cgit v1.2.3 From ad36cb0d1d3e2b7f161cd33932433f9349cade1e Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 30 Jul 2012 12:32:59 +0000 Subject: powerpc/kvm/book3s_32: Fix MTMSR_EERI macro Commit b38c77d82e4 moved the MTMSR_EERI macro from the KVM code to generic ppc_asm.h code. However, while adding it in the headers for the ppc32 case, it missed out to remove the former definition in the KVM code. This patch fixes compilation on server type PPC32 targets with CONFIG_KVM enabled. Signed-off-by: Alexander Graf Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kvm/book3s_rmhandlers.S | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index ab523f3c1731..9ecf6e35cd8d 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -67,7 +67,6 @@ kvmppc_skip_Hinterrupt: #elif defined(CONFIG_PPC_BOOK3S_32) #define FUNC(name) name -#define MTMSR_EERI(reg) mtmsr (reg) .macro INTERRUPT_TRAMPOLINE intno -- cgit v1.2.3 From 04f995a544d1289ffb8108849cd71b1325c5af6a Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 6 Aug 2012 00:03:28 +0000 Subject: KVM: PPC: Book3S HV: Fix incorrect branch in H_CEDE code In handling the H_CEDE hypercall, if this vcpu has already been prodded (with the H_PROD hypercall, which Linux guests don't in fact use), we branch to a numeric label '1f'. Unfortunately there is another '1:' label before the one that we want to jump to. This fixes the problem by using a textual label, 'kvm_cede_prodded'. It also changes the label for another longish branch from '2:' to 'kvm_cede_exit' to avoid a possible future problem if code modifications add another numeric '2:' label in between. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 5a84c8d3d040..44b72feaff7d 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1421,13 +1421,13 @@ _GLOBAL(kvmppc_h_cede) sync /* order setting ceded vs. testing prodded */ lbz r5,VCPU_PRODDED(r3) cmpwi r5,0 - bne 1f + bne kvm_cede_prodded li r0,0 /* set trap to 0 to say hcall is handled */ stw r0,VCPU_TRAP(r3) li r0,H_SUCCESS std r0,VCPU_GPR(R3)(r3) BEGIN_FTR_SECTION - b 2f /* just send it up to host on 970 */ + b kvm_cede_exit /* just send it up to host on 970 */ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) /* @@ -1446,7 +1446,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) or r4,r4,r0 PPC_POPCNTW(R7,R4) cmpw r7,r8 - bge 2f + bge kvm_cede_exit stwcx. r4,0,r6 bne 31b li r0,1 @@ -1555,7 +1555,8 @@ kvm_end_cede: b hcall_real_fallback /* cede when already previously prodded case */ -1: li r0,0 +kvm_cede_prodded: + li r0,0 stb r0,VCPU_PRODDED(r3) sync /* order testing prodded vs. clearing ceded */ stb r0,VCPU_CEDED(r3) @@ -1563,7 +1564,8 @@ kvm_end_cede: blr /* we've ceded but we want to give control to the host */ -2: li r3,H_TOO_HARD +kvm_cede_exit: + li r3,H_TOO_HARD blr secondary_too_late: -- cgit v1.2.3 From 249ba1ee0f8fcb4e40caa5fbea11dafde201cc46 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 3 Aug 2012 13:56:33 +0200 Subject: KVM: PPC: Add cache flush on page map When we map a page that wasn't icache cleared before, do so when first mapping it in KVM using the same information bits as the Linux mapping logic. That way we are 100% sure that any page we map does not have stale entries in the icache. Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/include/asm/kvm_ppc.h | 12 ++++++++++++ arch/powerpc/kvm/book3s_32_mmu_host.c | 3 +++ arch/powerpc/kvm/book3s_64_mmu_host.c | 2 ++ arch/powerpc/kvm/e500_tlb.c | 3 +++ arch/powerpc/mm/mem.c | 1 + 6 files changed, 22 insertions(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 50ea12fd7bf5..a8bf5c673a3c 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -33,6 +33,7 @@ #include #include #include +#include #define KVM_MAX_VCPUS NR_CPUS #define KVM_MAX_VCORES NR_CPUS diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 0124937a23b9..e006f0bdea95 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -219,4 +219,16 @@ void kvmppc_claim_lpid(long lpid); void kvmppc_free_lpid(long lpid); void kvmppc_init_lpid(unsigned long nr_lpids); +static inline void kvmppc_mmu_flush_icache(pfn_t pfn) +{ + /* Clear i-cache for new pages */ + struct page *page; + page = pfn_to_page(pfn); + if (!test_bit(PG_arch_1, &page->flags)) { + flush_dcache_icache_page(page); + set_bit(PG_arch_1, &page->flags); + } +} + + #endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index f922c29bb234..837f13e7b6bf 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -211,6 +211,9 @@ next_pteg: pteg1 |= PP_RWRX; } + if (orig_pte->may_execute) + kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); + local_irq_disable(); if (pteg[rr]) { diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 10fc8ec9d2a8..0688b6b39585 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -126,6 +126,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) if (!orig_pte->may_execute) rflags |= HPTE_R_N; + else + kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M); diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index c510fc961302..fb3bb3ad8b3c 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -539,6 +539,9 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, ref, gvaddr, stlbe); + + /* Clear i-cache for new pages */ + kvmppc_mmu_flush_icache(pfn); } /* XXX only map the one-one case, for now use TLB0 */ diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index baaafde7d135..fbdad0e3929a 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -469,6 +469,7 @@ void flush_dcache_icache_page(struct page *page) __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); #endif } +EXPORT_SYMBOL(flush_dcache_icache_page); void clear_user_page(void *page, unsigned long vaddr, struct page *pg) { -- cgit v1.2.3 From e8143ccb6b501f78bb95d9c5ee100d18423008cf Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Tue, 14 Aug 2012 12:10:09 +0000 Subject: ppc: e500_tlb memset clears nothing Put the parameters the right way around Addresses https://bugzilla.kernel.org/show_bug.cgi?id=44031 Reported-by: David Binderman Signed-off-by: Alan Cox Signed-off-by: Andrew Morton Signed-off-by: Alexander Graf --- arch/powerpc/kvm/e500_tlb.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index fb3bb3ad8b3c..a2b66717813d 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -322,11 +322,11 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) { if (vcpu_e500->g2h_tlb1_map) - memset(vcpu_e500->g2h_tlb1_map, - sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0); + memset(vcpu_e500->g2h_tlb1_map, 0, + sizeof(u64) * vcpu_e500->gtlb_params[1].entries); if (vcpu_e500->h2g_tlb1_rmap) - memset(vcpu_e500->h2g_tlb1_rmap, - sizeof(unsigned int) * host_tlb_params[1].entries, 0); + memset(vcpu_e500->h2g_tlb1_rmap, 0, + sizeof(unsigned int) * host_tlb_params[1].entries); } static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) -- cgit v1.2.3