diff options
Diffstat (limited to 'arch/powerpc')
25 files changed, 262 insertions, 180 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index feab3bad6d0f..73ec03945717 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -141,7 +141,7 @@ config PPC select IRQ_FORCED_THREADING select HAVE_RCU_TABLE_FREE if SMP select HAVE_SYSCALL_TRACEPOINTS - select HAVE_BPF_JIT if (PPC64 && NET) + select HAVE_BPF_JIT if PPC64 select HAVE_ARCH_JUMP_LABEL select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 548da3aa0a30..d58fc4e4149c 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -288,13 +288,6 @@ label##_hv: \ /* Exception addition: Hard disable interrupts */ #define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11) -/* Exception addition: Keep interrupt state */ -#define ENABLE_INTS \ - ld r11,PACAKMSR(r13); \ - ld r12,_MSR(r1); \ - rlwimi r11,r12,0,MSR_EE; \ - mtmsrd r11,1 - #define ADD_NVGPRS \ bl .save_nvgprs diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index e648af92ced1..0e40843a1c6e 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -18,10 +18,6 @@ #include <linux/atomic.h> -/* Define a way to iterate across irqs. */ -#define for_each_irq(i) \ - for ((i) = 0; (i) < NR_IRQS; ++(i)) - extern atomic_t ppc_n_lost_interrupts; /* This number is used when no interrupt has been assigned */ diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index aa795ccef294..fd07f43d6622 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -81,12 +81,13 @@ struct kvmppc_vcpu_book3s { u64 sdr1; u64 hior; u64 msr_mask; - u64 vsid_next; #ifdef CONFIG_PPC_BOOK3S_32 u32 vsid_pool[VSID_POOL_SIZE]; + u32 vsid_next; #else - u64 vsid_first; - u64 vsid_max; + u64 proto_vsid_first; + u64 proto_vsid_max; + u64 proto_vsid_next; #endif int context_id[SID_CONTEXTS]; diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index f8a7a1a1a9f4..ef2074c3e906 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -588,23 +588,19 @@ _GLOBAL(ret_from_except_lite) fast_exc_return_irq: restore: /* - * This is the main kernel exit path, we first check if we - * have to change our interrupt state. + * This is the main kernel exit path. First we check if we + * are about to re-enable interrupts */ ld r5,SOFTE(r1) lbz r6,PACASOFTIRQEN(r13) - cmpwi cr1,r5,0 - cmpw cr0,r5,r6 - beq cr0,4f + cmpwi cr0,r5,0 + beq restore_irq_off - /* We do, handle disable first, which is easy */ - bne cr1,3f; - li r0,0 - stb r0,PACASOFTIRQEN(r13); - TRACE_DISABLE_INTS - b 4f + /* We are enabling, were we already enabled ? Yes, just return */ + cmpwi cr0,r6,1 + beq cr0,do_restore -3: /* + /* * We are about to soft-enable interrupts (we are hard disabled * at this point). We check if there's anything that needs to * be replayed first. @@ -626,7 +622,7 @@ restore_no_replay: /* * Final return path. BookE is handled in a different file */ -4: +do_restore: #ifdef CONFIG_PPC_BOOK3E b .exception_return_book3e #else @@ -700,6 +696,25 @@ fast_exception_return: #endif /* CONFIG_PPC_BOOK3E */ /* + * We are returning to a context with interrupts soft disabled. + * + * However, we may also about to hard enable, so we need to + * make sure that in this case, we also clear PACA_IRQ_HARD_DIS + * or that bit can get out of sync and bad things will happen + */ +restore_irq_off: + ld r3,_MSR(r1) + lbz r7,PACAIRQHAPPENED(r13) + andi. r0,r3,MSR_EE + beq 1f + rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS + stb r7,PACAIRQHAPPENED(r13) +1: li r0,0 + stb r0,PACASOFTIRQEN(r13); + TRACE_DISABLE_INTS + b do_restore + + /* * Something did happen, check if a re-emit is needed * (this also clears paca->irq_happened) */ @@ -748,6 +763,9 @@ restore_check_irq_replay: #endif /* CONFIG_PPC_BOOK3E */ 1: b .ret_from_except /* What else to do here ? */ + + +3: do_work: #ifdef CONFIG_PREEMPT andi. r0,r3,MSR_PR /* Returning to user mode? */ @@ -767,16 +785,6 @@ do_work: SOFT_DISABLE_INTS(r3,r4) 1: bl .preempt_schedule_irq - /* Hard-disable interrupts again (and update PACA) */ -#ifdef CONFIG_PPC_BOOK3E - wrteei 0 -#else - ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ - mtmsrd r10,1 -#endif /* CONFIG_PPC_BOOK3E */ - li r0,PACA_IRQ_HARD_DIS - stb r0,PACAIRQHAPPENED(r13) - /* Re-test flags and eventually loop */ clrrdi r9,r1,THREAD_SHIFT ld r4,TI_FLAGS(r9) @@ -787,14 +795,6 @@ do_work: user_work: #endif /* CONFIG_PREEMPT */ - /* Enable interrupts */ -#ifdef CONFIG_PPC_BOOK3E - wrteei 1 -#else - ori r10,r10,MSR_EE - mtmsrd r10,1 -#endif /* CONFIG_PPC_BOOK3E */ - andi. r0,r4,_TIF_NEED_RESCHED beq 1f bl .restore_interrupts diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index cb705fdbb458..8f880bc77c56 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -768,8 +768,8 @@ alignment_common: std r3,_DAR(r1) std r4,_DSISR(r1) bl .save_nvgprs + DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - ENABLE_INTS bl .alignment_exception b .ret_from_except diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5ec1b2354ca6..641da9e868ce 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -229,6 +229,19 @@ notrace void arch_local_irq_restore(unsigned long en) */ if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) __hard_irq_disable(); +#ifdef CONFIG_TRACE_IRQFLAG + else { + /* + * We should already be hard disabled here. We had bugs + * where that wasn't the case so let's dbl check it and + * warn if we are wrong. Only do that when IRQ tracing + * is enabled as mfmsr() can be costly. + */ + if (WARN_ON(mfmsr() & MSR_EE)) + __hard_irq_disable(); + } +#endif /* CONFIG_TRACE_IRQFLAG */ + set_soft_enabled(0); /* @@ -260,11 +273,17 @@ EXPORT_SYMBOL(arch_local_irq_restore); * if they are currently disabled. This is typically called before * schedule() or do_signal() when returning to userspace. We do it * in C to avoid the burden of dealing with lockdep etc... + * + * NOTE: This is called with interrupts hard disabled but not marked + * as such in paca->irq_happened, so we need to resync this. */ void restore_interrupts(void) { - if (irqs_disabled()) + if (irqs_disabled()) { + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; local_irq_enable(); + } else + __hard_irq_enable(); } #endif /* CONFIG_PPC64 */ @@ -330,14 +349,10 @@ void migrate_irqs(void) alloc_cpumask_var(&mask, GFP_KERNEL); - for_each_irq(irq) { + for_each_irq_desc(irq, desc) { struct irq_data *data; struct irq_chip *chip; - desc = irq_to_desc(irq); - if (!desc) - continue; - data = irq_desc_get_irq_data(desc); if (irqd_is_per_cpu(data)) continue; diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index c957b1202bdc..5df777794403 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -23,14 +23,11 @@ void machine_kexec_mask_interrupts(void) { unsigned int i; + struct irq_desc *desc; - for_each_irq(i) { - struct irq_desc *desc = irq_to_desc(i); + for_each_irq_desc(i, desc) { struct irq_chip *chip; - if (!desc) - continue; - chip = irq_desc_get_chip(desc); if (!chip) continue; diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6aa0c663e247..158972341a2d 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) addr, regs->nip, regs->link, code); } - if (!arch_irq_disabled_regs(regs)) + if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) local_irq_enable(); memset(&info, 0, sizeof(info)); @@ -1019,7 +1019,9 @@ void __kprobes program_check_exception(struct pt_regs *regs) return; } - local_irq_enable(); + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); #ifdef CONFIG_MATH_EMULATION /* (reason & REASON_ILLEGAL) would be the obvious thing here, @@ -1069,6 +1071,10 @@ void alignment_exception(struct pt_regs *regs) { int sig, code, fixed = 0; + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + /* we don't implement logging of alignment exceptions */ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) fixed = fix_alignment(regs); diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 6f87f39a1ac2..10fc8ec9d2a8 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -194,14 +194,14 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) backwards_map = !backwards_map; /* Uh-oh ... out of mappings. Let's flush! */ - if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) { - vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; + if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) { + vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first; memset(vcpu_book3s->sid_map, 0, sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); kvmppc_mmu_pte_flush(vcpu, 0, 0); kvmppc_mmu_flush_segments(vcpu); } - map->host_vsid = vcpu_book3s->vsid_next++; + map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M); map->guest_vsid = gvsid; map->valid = true; @@ -319,9 +319,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) return -1; vcpu3s->context_id[0] = err; - vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1; - vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; - vcpu3s->vsid_next = vcpu3s->vsid_first; + vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) + << USER_ESID_BITS) - 1; + vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; + vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; kvmppc_mmu_hpte_init(vcpu); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index ddc485a529f2..c3beaeef3f60 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -258,6 +258,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, !(memslot->userspace_addr & (s - 1))) { start &= ~(s - 1); pgsize = s; + get_page(hpage); + put_page(page); page = hpage; } } @@ -281,11 +283,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, err = 0; out: - if (got) { - if (PageHuge(page)) - page = compound_head(page); + if (got) put_page(page); - } return err; up_err: @@ -678,8 +677,15 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, SetPageDirty(page); out_put: - if (page) - put_page(page); + if (page) { + /* + * We drop pages[0] here, not page because page might + * have been set to the head page of a compound, but + * we have to drop the reference on the correct tail + * page to match the get inside gup() + */ + put_page(pages[0]); + } return ret; out_unlock: @@ -979,6 +985,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, pa = *physp; } page = pfn_to_page(pa >> PAGE_SHIFT); + get_page(page); } else { hva = gfn_to_hva_memslot(memslot, gfn); npages = get_user_pages_fast(hva, 1, 1, pages); @@ -991,8 +998,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, page = compound_head(page); psize <<= compound_order(page); } - if (!kvm->arch.using_mmu_notifiers) - get_page(page); offset = gpa & (psize - 1); if (nb_ret) *nb_ret = psize - offset; @@ -1003,7 +1008,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va) { struct page *page = virt_to_page(va); - page = compound_head(page); put_page(page); } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 01294a5099dd..108d1f580177 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1192,8 +1192,6 @@ static void unpin_slot(struct kvm *kvm, int slot_id) continue; pfn = physp[j] >> PAGE_SHIFT; page = pfn_to_page(pfn); - if (PageHuge(page)) - page = compound_head(page); SetPageDirty(page); put_page(page); } diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index def880aea63a..cec4daddbf31 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -463,6 +463,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) /* insert R and C bits from PTE */ rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); args[j] |= rcbits << (56 - 5); + hp[0] = 0; continue; } diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 0676ae249b9f..6e6e9cef34a8 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -197,7 +197,8 @@ kvmppc_interrupt: /* Save guest PC and MSR */ #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION - andi. r0,r12,0x2 + andi. r0, r12, 0x2 + cmpwi cr1, r0, 0 beq 1f mfspr r3,SPRN_HSRR0 mfspr r4,SPRN_HSRR1 @@ -250,6 +251,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) beq ld_last_prev_inst cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT beq- ld_last_inst +#ifdef CONFIG_PPC64 +BEGIN_FTR_SECTION + cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST + beq- ld_last_inst +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) +#endif b no_ld_last_inst @@ -316,23 +323,17 @@ no_dcbz32_off: * Having set up SRR0/1 with the address where we want * to continue with relocation on (potentially in module * space), we either just go straight there with rfi[d], - * or we jump to an interrupt handler with bctr if there - * is an interrupt to be handled first. In the latter - * case, the rfi[d] at the end of the interrupt handler - * will get us back to where we want to continue. + * or we jump to an interrupt handler if there is an + * interrupt to be handled first. In the latter case, + * the rfi[d] at the end of the interrupt handler will + * get us back to where we want to continue. */ - cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL - beq 1f - cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER - beq 1f - cmpwi r12, BOOK3S_INTERRUPT_PERFMON -1: mtctr r12 - /* Register usage at this point: * * R1 = host R1 * R2 = host R2 + * R10 = raw exit handler id * R12 = exit handler id * R13 = shadow vcpu (32-bit) or PACA (64-bit) * SVCPU.* = guest * @@ -342,12 +343,25 @@ no_dcbz32_off: PPC_LL r6, HSTATE_HOST_MSR(r13) PPC_LL r8, HSTATE_VMHANDLER(r13) - /* Restore host msr -> SRR1 */ +#ifdef CONFIG_PPC64 +BEGIN_FTR_SECTION + beq cr1, 1f + mtspr SPRN_HSRR1, r6 + mtspr SPRN_HSRR0, r8 +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) +#endif +1: /* Restore host msr -> SRR1 */ mtsrr1 r6 /* Load highmem handler address */ mtsrr0 r8 /* RFI into the highmem handler, or jump to interrupt handler */ - beqctr + cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL + beqa BOOK3S_INTERRUPT_EXTERNAL + cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER + beqa BOOK3S_INTERRUPT_DECREMENTER + cmpwi r12, BOOK3S_INTERRUPT_PERFMON + beqa BOOK3S_INTERRUPT_PERFMON + RFI kvmppc_handler_trampoline_exit_end: diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index af1ab5e9a691..5c3cf2d04e41 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -48,7 +48,13 @@ /* * Assembly helpers from arch/powerpc/net/bpf_jit.S: */ -extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; +#define DECLARE_LOAD_FUNC(func) \ + extern u8 func[], func##_negative_offset[], func##_positive_offset[] + +DECLARE_LOAD_FUNC(sk_load_word); +DECLARE_LOAD_FUNC(sk_load_half); +DECLARE_LOAD_FUNC(sk_load_byte); +DECLARE_LOAD_FUNC(sk_load_byte_msh); #define FUNCTION_DESCR_SIZE 24 diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S index ff4506e85cce..55ba3855a97f 100644 --- a/arch/powerpc/net/bpf_jit_64.S +++ b/arch/powerpc/net/bpf_jit_64.S @@ -31,14 +31,13 @@ * then branch directly to slow_path_XXX if required. (In fact, could * load a spare GPR with the address of slow_path_generic and pass size * as an argument, making the call site a mtlr, li and bllr.) - * - * Technically, the "is addr < 0" check is unnecessary & slowing down - * the ABS path, as it's statically checked on generation. */ .globl sk_load_word sk_load_word: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_word_neg + .globl sk_load_word_positive_offset +sk_load_word_positive_offset: /* Are we accessing past headlen? */ subi r_scratch1, r_HL, 4 cmpd r_scratch1, r_addr @@ -51,7 +50,9 @@ sk_load_word: .globl sk_load_half sk_load_half: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_half_neg + .globl sk_load_half_positive_offset +sk_load_half_positive_offset: subi r_scratch1, r_HL, 2 cmpd r_scratch1, r_addr blt bpf_slow_path_half @@ -61,7 +62,9 @@ sk_load_half: .globl sk_load_byte sk_load_byte: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_byte_neg + .globl sk_load_byte_positive_offset +sk_load_byte_positive_offset: cmpd r_HL, r_addr ble bpf_slow_path_byte lbzx r_A, r_D, r_addr @@ -69,22 +72,20 @@ sk_load_byte: /* * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) - * r_addr is the offset value, already known positive + * r_addr is the offset value */ .globl sk_load_byte_msh sk_load_byte_msh: + cmpdi r_addr, 0 + blt bpf_slow_path_byte_msh_neg + .globl sk_load_byte_msh_positive_offset +sk_load_byte_msh_positive_offset: cmpd r_HL, r_addr ble bpf_slow_path_byte_msh lbzx r_X, r_D, r_addr rlwinm r_X, r_X, 2, 32-4-2, 31-2 blr -bpf_error: - /* Entered with cr0 = lt */ - li r3, 0 - /* Generated code will 'blt epilogue', returning 0. */ - blr - /* Call out to skb_copy_bits: * We'll need to back up our volatile regs first; we have * local variable space at r1+(BPF_PPC_STACK_BASIC). @@ -136,3 +137,84 @@ bpf_slow_path_byte_msh: lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1) rlwinm r_X, r_X, 2, 32-4-2, 31-2 blr + +/* Call out to bpf_internal_load_pointer_neg_helper: + * We'll need to back up our volatile regs first; we have + * local variable space at r1+(BPF_PPC_STACK_BASIC). + * Allocate a new stack frame here to remain ABI-compliant in + * stashing LR. + */ +#define sk_negative_common(SIZE) \ + mflr r0; \ + std r0, 16(r1); \ + /* R3 goes in parameter space of caller's frame */ \ + std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ + std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ + /* R3 = r_skb, as passed */ \ + mr r4, r_addr; \ + li r5, SIZE; \ + bl bpf_internal_load_pointer_neg_helper; \ + /* R3 != 0 on success */ \ + addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ + ld r0, 16(r1); \ + ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ + ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + mtlr r0; \ + cmpldi r3, 0; \ + beq bpf_error_slow; /* cr0 = EQ */ \ + mr r_addr, r3; \ + ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + /* Great success! */ + +bpf_slow_path_word_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_word_negative_offset +sk_load_word_negative_offset: + sk_negative_common(4) + lwz r_A, 0(r_addr) + blr + +bpf_slow_path_half_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_half_negative_offset +sk_load_half_negative_offset: + sk_negative_common(2) + lhz r_A, 0(r_addr) + blr + +bpf_slow_path_byte_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_byte_negative_offset +sk_load_byte_negative_offset: + sk_negative_common(1) + lbz r_A, 0(r_addr) + blr + +bpf_slow_path_byte_msh_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_byte_msh_negative_offset +sk_load_byte_msh_negative_offset: + sk_negative_common(1) + lbz r_X, 0(r_addr) + rlwinm r_X, r_X, 2, 32-4-2, 31-2 + blr + +bpf_error_slow: + /* fabricate a cr0 = lt */ + li r_scratch1, -1 + cmpdi r_scratch1, 0 +bpf_error: + /* Entered with cr0 = lt */ + li r3, 0 + /* Generated code will 'blt epilogue', returning 0. */ + blr diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 73619d3aeb6c..2dc8b1484845 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -127,6 +127,9 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) PPC_BLR(); } +#define CHOOSE_LOAD_FUNC(K, func) \ + ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) + /* Assemble the body code between the prologue & epilogue. */ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, struct codegen_context *ctx, @@ -391,21 +394,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, /*** Absolute loads from packet header/data ***/ case BPF_S_LD_W_ABS: - func = sk_load_word; + func = CHOOSE_LOAD_FUNC(K, sk_load_word); goto common_load; case BPF_S_LD_H_ABS: - func = sk_load_half; + func = CHOOSE_LOAD_FUNC(K, sk_load_half); goto common_load; case BPF_S_LD_B_ABS: - func = sk_load_byte; + func = CHOOSE_LOAD_FUNC(K, sk_load_byte); common_load: - /* - * Load from [K]. Reference with the (negative) - * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported. - */ + /* Load from [K]. */ ctx->seen |= SEEN_DATAREF; - if ((int)K < 0) - return -ENOTSUPP; PPC_LI64(r_scratch1, func); PPC_MTLR(r_scratch1); PPC_LI32(r_addr, K); @@ -429,7 +427,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, common_load_ind: /* * Load from [X + K]. Negative offsets are tested for - * in the helper functions, and result in a 'ret 0'. + * in the helper functions. */ ctx->seen |= SEEN_DATAREF | SEEN_XREG; PPC_LI64(r_scratch1, func); @@ -443,13 +441,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, break; case BPF_S_LDX_B_MSH: - /* - * x86 version drops packet (RET 0) when K<0, whereas - * interpreter does allow K<0 (__load_pointer, special - * ancillary data). common_load returns ENOTSUPP if K<0, - * so we fall back to interpreter & filter works. - */ - func = sk_load_byte_msh; + func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); goto common_load; break; diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index d09f3e8e6867..85825b5401e5 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -114,7 +114,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) pr_devel("axon_msi: woff %x roff %x msi %x\n", write_offset, msic->read_offset, msi); - if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) { + if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { generic_handle_irq(msi); msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); } else { @@ -276,9 +276,6 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) if (rc) return rc; - /* We rely on being able to stash a virq in a u16 */ - BUILD_BUG_ON(NR_IRQS > 65536); - list_for_each_entry(entry, &dev->msi_list, list) { virq = irq_create_direct_mapping(msic->irq_domain); if (virq == NO_IRQ) { @@ -392,7 +389,8 @@ static int axon_msi_probe(struct platform_device *device) } memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); - msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic); + /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ + msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); if (!msic->irq_domain) { printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", dn->full_name); diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index f9a48af335cb..8c6dc42ecf65 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c @@ -248,6 +248,6 @@ void beatic_deinit_IRQ(void) { int i; - for (i = 1; i < NR_IRQS; i++) + for (i = 1; i < nr_irqs; i++) beat_destruct_irq_plug(i); } diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 66ad93de1d55..c4e630576ff2 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -57,9 +57,9 @@ static int max_real_irqs; static DEFINE_RAW_SPINLOCK(pmac_pic_lock); -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +/* The max irq number this driver deals with is 128; see max_irqs */ +static DECLARE_BITMAP(ppc_lost_interrupts, 128); +static DECLARE_BITMAP(ppc_cached_irq_mask, 128); static int pmac_irq_cascade = -1; static struct irq_domain *pmac_pic_host; diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index aadbe4f6d537..178a5f300bc9 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -30,9 +30,9 @@ config PPC_SPLPAR two or more partitions. config EEH - bool "PCI Extended Error Handling (EEH)" if EXPERT + bool depends on PPC_PSERIES && PCI - default y if !EXPERT + default y config PSERIES_MSI bool diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index d3be961e2ae7..10386b676d87 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c @@ -51,8 +51,7 @@ static intctl_cpm2_t __iomem *cpm2_intctl; static struct irq_domain *cpm2_pic_host; -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */ static const u_char irq_to_siureg[] = { 1, 1, 1, 1, 1, 1, 1, 1, diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index d5f5416be310..b724622c3a0b 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c @@ -18,69 +18,45 @@ extern int cpm_get_irq(struct pt_regs *regs); static struct irq_domain *mpc8xx_pic_host; -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +static unsigned long mpc8xx_cached_irq_mask; static sysconf8xx_t __iomem *siu_reg; -int cpm_get_irq(struct pt_regs *regs); +static inline unsigned long mpc8xx_irqd_to_bit(struct irq_data *d) +{ + return 0x80000000 >> irqd_to_hwirq(d); +} static void mpc8xx_unmask_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] |= (1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static void mpc8xx_mask_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] &= ~(1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask &= ~mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static void mpc8xx_ack(struct irq_data *d) { - int bit; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - out_be32(&siu_reg->sc_sipend, 1 << (31-bit)); + out_be32(&siu_reg->sc_sipend, mpc8xx_irqd_to_bit(d)); } static void mpc8xx_end_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] |= (1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) { - if (flow_type & IRQ_TYPE_EDGE_FALLING) { - irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d); + /* only external IRQ senses are programmable */ + if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !(irqd_to_hwirq(d) & 1)) { unsigned int siel = in_be32(&siu_reg->sc_siel); - - /* only external IRQ senses are programmable */ - if ((hw & 1) == 0) { - siel |= (0x80000000 >> hw); - out_be32(&siu_reg->sc_siel, siel); - __irq_set_handler_locked(d->irq, handle_edge_irq); - } + siel |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_siel, siel); + __irq_set_handler_locked(d->irq, handle_edge_irq); } return 0; } @@ -132,6 +108,9 @@ static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct, IRQ_TYPE_EDGE_FALLING, }; + if (intspec[0] > 0x1f) + return 0; + *out_hwirq = intspec[0]; if (intsize > 1 && intspec[1] < 4) *out_flags = map_pic_senses[intspec[1]]; diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c index 49a3ece1c6b3..702256a1ca11 100644 --- a/arch/powerpc/sysdev/scom.c +++ b/arch/powerpc/sysdev/scom.c @@ -22,6 +22,7 @@ #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/export.h> +#include <asm/debug.h> #include <asm/prom.h> #include <asm/scom.h> diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index ea5e204e3450..cd1d18db92c6 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -188,6 +188,7 @@ void xics_migrate_irqs_away(void) { int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); unsigned int irq, virq; + struct irq_desc *desc; /* If we used to be the default server, move to the new "boot_cpuid" */ if (hw_cpu == xics_default_server) @@ -202,8 +203,7 @@ void xics_migrate_irqs_away(void) /* Allow IPIs again... */ icp_ops->set_priority(DEFAULT_PRIORITY); - for_each_irq(virq) { - struct irq_desc *desc; + for_each_irq_desc(virq, desc) { struct irq_chip *chip; long server; unsigned long flags; @@ -212,9 +212,8 @@ void xics_migrate_irqs_away(void) /* We can't set affinity on ISA interrupts */ if (virq < NUM_ISA_INTERRUPTS) continue; - desc = irq_to_desc(virq); /* We only need to migrate enabled IRQS */ - if (!desc || !desc->action) + if (!desc->action) continue; if (desc->irq_data.domain != xics_host) continue; |