diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/exception-64s.h | 7 | ||||
-rw-r--r-- | arch/powerpc/include/asm/irq.h | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 62 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 27 | ||||
-rw-r--r-- | arch/powerpc/kernel/machine_kexec.c | 7 | ||||
-rw-r--r-- | arch/powerpc/kernel/traps.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 22 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 2 | ||||
-rw-r--r-- | arch/powerpc/net/bpf_jit.h | 8 | ||||
-rw-r--r-- | arch/powerpc/net/bpf_jit_64.S | 108 | ||||
-rw-r--r-- | arch/powerpc/net/bpf_jit_comp.c | 26 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/axon_msi.c | 8 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/beat_interrupt.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/powermac/pic.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/Kconfig | 4 | ||||
-rw-r--r-- | arch/powerpc/sysdev/cpm2_pic.c | 3 | ||||
-rw-r--r-- | arch/powerpc/sysdev/mpc8xx_pic.c | 61 | ||||
-rw-r--r-- | arch/powerpc/sysdev/scom.c | 1 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/xics-common.c | 7 |
20 files changed, 221 insertions, 156 deletions
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 548da3aa0a30..d58fc4e4149c 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -288,13 +288,6 @@ label##_hv: \ /* Exception addition: Hard disable interrupts */ #define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11) -/* Exception addition: Keep interrupt state */ -#define ENABLE_INTS \ - ld r11,PACAKMSR(r13); \ - ld r12,_MSR(r1); \ - rlwimi r11,r12,0,MSR_EE; \ - mtmsrd r11,1 - #define ADD_NVGPRS \ bl .save_nvgprs diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index e648af92ced1..0e40843a1c6e 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -18,10 +18,6 @@ #include <linux/atomic.h> -/* Define a way to iterate across irqs. */ -#define for_each_irq(i) \ - for ((i) = 0; (i) < NR_IRQS; ++(i)) - extern atomic_t ppc_n_lost_interrupts; /* This number is used when no interrupt has been assigned */ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index f8a7a1a1a9f4..ef2074c3e906 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -588,23 +588,19 @@ _GLOBAL(ret_from_except_lite) fast_exc_return_irq: restore: /* - * This is the main kernel exit path, we first check if we - * have to change our interrupt state. + * This is the main kernel exit path. First we check if we + * are about to re-enable interrupts */ ld r5,SOFTE(r1) lbz r6,PACASOFTIRQEN(r13) - cmpwi cr1,r5,0 - cmpw cr0,r5,r6 - beq cr0,4f + cmpwi cr0,r5,0 + beq restore_irq_off - /* We do, handle disable first, which is easy */ - bne cr1,3f; - li r0,0 - stb r0,PACASOFTIRQEN(r13); - TRACE_DISABLE_INTS - b 4f + /* We are enabling, were we already enabled ? Yes, just return */ + cmpwi cr0,r6,1 + beq cr0,do_restore -3: /* + /* * We are about to soft-enable interrupts (we are hard disabled * at this point). We check if there's anything that needs to * be replayed first. @@ -626,7 +622,7 @@ restore_no_replay: /* * Final return path. BookE is handled in a different file */ -4: +do_restore: #ifdef CONFIG_PPC_BOOK3E b .exception_return_book3e #else @@ -700,6 +696,25 @@ fast_exception_return: #endif /* CONFIG_PPC_BOOK3E */ /* + * We are returning to a context with interrupts soft disabled. + * + * However, we may also about to hard enable, so we need to + * make sure that in this case, we also clear PACA_IRQ_HARD_DIS + * or that bit can get out of sync and bad things will happen + */ +restore_irq_off: + ld r3,_MSR(r1) + lbz r7,PACAIRQHAPPENED(r13) + andi. r0,r3,MSR_EE + beq 1f + rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS + stb r7,PACAIRQHAPPENED(r13) +1: li r0,0 + stb r0,PACASOFTIRQEN(r13); + TRACE_DISABLE_INTS + b do_restore + + /* * Something did happen, check if a re-emit is needed * (this also clears paca->irq_happened) */ @@ -748,6 +763,9 @@ restore_check_irq_replay: #endif /* CONFIG_PPC_BOOK3E */ 1: b .ret_from_except /* What else to do here ? */ + + +3: do_work: #ifdef CONFIG_PREEMPT andi. r0,r3,MSR_PR /* Returning to user mode? */ @@ -767,16 +785,6 @@ do_work: SOFT_DISABLE_INTS(r3,r4) 1: bl .preempt_schedule_irq - /* Hard-disable interrupts again (and update PACA) */ -#ifdef CONFIG_PPC_BOOK3E - wrteei 0 -#else - ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ - mtmsrd r10,1 -#endif /* CONFIG_PPC_BOOK3E */ - li r0,PACA_IRQ_HARD_DIS - stb r0,PACAIRQHAPPENED(r13) - /* Re-test flags and eventually loop */ clrrdi r9,r1,THREAD_SHIFT ld r4,TI_FLAGS(r9) @@ -787,14 +795,6 @@ do_work: user_work: #endif /* CONFIG_PREEMPT */ - /* Enable interrupts */ -#ifdef CONFIG_PPC_BOOK3E - wrteei 1 -#else - ori r10,r10,MSR_EE - mtmsrd r10,1 -#endif /* CONFIG_PPC_BOOK3E */ - andi. r0,r4,_TIF_NEED_RESCHED beq 1f bl .restore_interrupts diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index cb705fdbb458..8f880bc77c56 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -768,8 +768,8 @@ alignment_common: std r3,_DAR(r1) std r4,_DSISR(r1) bl .save_nvgprs + DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - ENABLE_INTS bl .alignment_exception b .ret_from_except diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5ec1b2354ca6..641da9e868ce 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -229,6 +229,19 @@ notrace void arch_local_irq_restore(unsigned long en) */ if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) __hard_irq_disable(); +#ifdef CONFIG_TRACE_IRQFLAG + else { + /* + * We should already be hard disabled here. We had bugs + * where that wasn't the case so let's dbl check it and + * warn if we are wrong. Only do that when IRQ tracing + * is enabled as mfmsr() can be costly. + */ + if (WARN_ON(mfmsr() & MSR_EE)) + __hard_irq_disable(); + } +#endif /* CONFIG_TRACE_IRQFLAG */ + set_soft_enabled(0); /* @@ -260,11 +273,17 @@ EXPORT_SYMBOL(arch_local_irq_restore); * if they are currently disabled. This is typically called before * schedule() or do_signal() when returning to userspace. We do it * in C to avoid the burden of dealing with lockdep etc... + * + * NOTE: This is called with interrupts hard disabled but not marked + * as such in paca->irq_happened, so we need to resync this. */ void restore_interrupts(void) { - if (irqs_disabled()) + if (irqs_disabled()) { + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; local_irq_enable(); + } else + __hard_irq_enable(); } #endif /* CONFIG_PPC64 */ @@ -330,14 +349,10 @@ void migrate_irqs(void) alloc_cpumask_var(&mask, GFP_KERNEL); - for_each_irq(irq) { + for_each_irq_desc(irq, desc) { struct irq_data *data; struct irq_chip *chip; - desc = irq_to_desc(irq); - if (!desc) - continue; - data = irq_desc_get_irq_data(desc); if (irqd_is_per_cpu(data)) continue; diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index c957b1202bdc..5df777794403 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -23,14 +23,11 @@ void machine_kexec_mask_interrupts(void) { unsigned int i; + struct irq_desc *desc; - for_each_irq(i) { - struct irq_desc *desc = irq_to_desc(i); + for_each_irq_desc(i, desc) { struct irq_chip *chip; - if (!desc) - continue; - chip = irq_desc_get_chip(desc); if (!chip) continue; diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6aa0c663e247..158972341a2d 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) addr, regs->nip, regs->link, code); } - if (!arch_irq_disabled_regs(regs)) + if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) local_irq_enable(); memset(&info, 0, sizeof(info)); @@ -1019,7 +1019,9 @@ void __kprobes program_check_exception(struct pt_regs *regs) return; } - local_irq_enable(); + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); #ifdef CONFIG_MATH_EMULATION /* (reason & REASON_ILLEGAL) would be the obvious thing here, @@ -1069,6 +1071,10 @@ void alignment_exception(struct pt_regs *regs) { int sig, code, fixed = 0; + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + /* we don't implement logging of alignment exceptions */ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) fixed = fix_alignment(regs); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index ddc485a529f2..c3beaeef3f60 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -258,6 +258,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, !(memslot->userspace_addr & (s - 1))) { start &= ~(s - 1); pgsize = s; + get_page(hpage); + put_page(page); page = hpage; } } @@ -281,11 +283,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, err = 0; out: - if (got) { - if (PageHuge(page)) - page = compound_head(page); + if (got) put_page(page); - } return err; up_err: @@ -678,8 +677,15 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, SetPageDirty(page); out_put: - if (page) - put_page(page); + if (page) { + /* + * We drop pages[0] here, not page because page might + * have been set to the head page of a compound, but + * we have to drop the reference on the correct tail + * page to match the get inside gup() + */ + put_page(pages[0]); + } return ret; out_unlock: @@ -979,6 +985,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, pa = *physp; } page = pfn_to_page(pa >> PAGE_SHIFT); + get_page(page); } else { hva = gfn_to_hva_memslot(memslot, gfn); npages = get_user_pages_fast(hva, 1, 1, pages); @@ -991,8 +998,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, page = compound_head(page); psize <<= compound_order(page); } - if (!kvm->arch.using_mmu_notifiers) - get_page(page); offset = gpa & (psize - 1); if (nb_ret) *nb_ret = psize - offset; @@ -1003,7 +1008,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va) { struct page *page = virt_to_page(va); - page = compound_head(page); put_page(page); } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 01294a5099dd..108d1f580177 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1192,8 +1192,6 @@ static void unpin_slot(struct kvm *kvm, int slot_id) continue; pfn = physp[j] >> PAGE_SHIFT; page = pfn_to_page(pfn); - if (PageHuge(page)) - page = compound_head(page); SetPageDirty(page); put_page(page); } diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index af1ab5e9a691..5c3cf2d04e41 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -48,7 +48,13 @@ /* * Assembly helpers from arch/powerpc/net/bpf_jit.S: */ -extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; +#define DECLARE_LOAD_FUNC(func) \ + extern u8 func[], func##_negative_offset[], func##_positive_offset[] + +DECLARE_LOAD_FUNC(sk_load_word); +DECLARE_LOAD_FUNC(sk_load_half); +DECLARE_LOAD_FUNC(sk_load_byte); +DECLARE_LOAD_FUNC(sk_load_byte_msh); #define FUNCTION_DESCR_SIZE 24 diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S index ff4506e85cce..55ba3855a97f 100644 --- a/arch/powerpc/net/bpf_jit_64.S +++ b/arch/powerpc/net/bpf_jit_64.S @@ -31,14 +31,13 @@ * then branch directly to slow_path_XXX if required. (In fact, could * load a spare GPR with the address of slow_path_generic and pass size * as an argument, making the call site a mtlr, li and bllr.) - * - * Technically, the "is addr < 0" check is unnecessary & slowing down - * the ABS path, as it's statically checked on generation. */ .globl sk_load_word sk_load_word: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_word_neg + .globl sk_load_word_positive_offset +sk_load_word_positive_offset: /* Are we accessing past headlen? */ subi r_scratch1, r_HL, 4 cmpd r_scratch1, r_addr @@ -51,7 +50,9 @@ sk_load_word: .globl sk_load_half sk_load_half: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_half_neg + .globl sk_load_half_positive_offset +sk_load_half_positive_offset: subi r_scratch1, r_HL, 2 cmpd r_scratch1, r_addr blt bpf_slow_path_half @@ -61,7 +62,9 @@ sk_load_half: .globl sk_load_byte sk_load_byte: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_byte_neg + .globl sk_load_byte_positive_offset +sk_load_byte_positive_offset: cmpd r_HL, r_addr ble bpf_slow_path_byte lbzx r_A, r_D, r_addr @@ -69,22 +72,20 @@ sk_load_byte: /* * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) - * r_addr is the offset value, already known positive + * r_addr is the offset value */ .globl sk_load_byte_msh sk_load_byte_msh: + cmpdi r_addr, 0 + blt bpf_slow_path_byte_msh_neg + .globl sk_load_byte_msh_positive_offset +sk_load_byte_msh_positive_offset: cmpd r_HL, r_addr ble bpf_slow_path_byte_msh lbzx r_X, r_D, r_addr rlwinm r_X, r_X, 2, 32-4-2, 31-2 blr -bpf_error: - /* Entered with cr0 = lt */ - li r3, 0 - /* Generated code will 'blt epilogue', returning 0. */ - blr - /* Call out to skb_copy_bits: * We'll need to back up our volatile regs first; we have * local variable space at r1+(BPF_PPC_STACK_BASIC). @@ -136,3 +137,84 @@ bpf_slow_path_byte_msh: lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1) rlwinm r_X, r_X, 2, 32-4-2, 31-2 blr + +/* Call out to bpf_internal_load_pointer_neg_helper: + * We'll need to back up our volatile regs first; we have + * local variable space at r1+(BPF_PPC_STACK_BASIC). + * Allocate a new stack frame here to remain ABI-compliant in + * stashing LR. + */ +#define sk_negative_common(SIZE) \ + mflr r0; \ + std r0, 16(r1); \ + /* R3 goes in parameter space of caller's frame */ \ + std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ + std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ + /* R3 = r_skb, as passed */ \ + mr r4, r_addr; \ + li r5, SIZE; \ + bl bpf_internal_load_pointer_neg_helper; \ + /* R3 != 0 on success */ \ + addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ + ld r0, 16(r1); \ + ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ + ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + mtlr r0; \ + cmpldi r3, 0; \ + beq bpf_error_slow; /* cr0 = EQ */ \ + mr r_addr, r3; \ + ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + /* Great success! */ + +bpf_slow_path_word_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_word_negative_offset +sk_load_word_negative_offset: + sk_negative_common(4) + lwz r_A, 0(r_addr) + blr + +bpf_slow_path_half_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_half_negative_offset +sk_load_half_negative_offset: + sk_negative_common(2) + lhz r_A, 0(r_addr) + blr + +bpf_slow_path_byte_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_byte_negative_offset +sk_load_byte_negative_offset: + sk_negative_common(1) + lbz r_A, 0(r_addr) + blr + +bpf_slow_path_byte_msh_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_byte_msh_negative_offset +sk_load_byte_msh_negative_offset: + sk_negative_common(1) + lbz r_X, 0(r_addr) + rlwinm r_X, r_X, 2, 32-4-2, 31-2 + blr + +bpf_error_slow: + /* fabricate a cr0 = lt */ + li r_scratch1, -1 + cmpdi r_scratch1, 0 +bpf_error: + /* Entered with cr0 = lt */ + li r3, 0 + /* Generated code will 'blt epilogue', returning 0. */ + blr diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 73619d3aeb6c..2dc8b1484845 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -127,6 +127,9 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) PPC_BLR(); } +#define CHOOSE_LOAD_FUNC(K, func) \ + ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) + /* Assemble the body code between the prologue & epilogue. */ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, struct codegen_context *ctx, @@ -391,21 +394,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, /*** Absolute loads from packet header/data ***/ case BPF_S_LD_W_ABS: - func = sk_load_word; + func = CHOOSE_LOAD_FUNC(K, sk_load_word); goto common_load; case BPF_S_LD_H_ABS: - func = sk_load_half; + func = CHOOSE_LOAD_FUNC(K, sk_load_half); goto common_load; case BPF_S_LD_B_ABS: - func = sk_load_byte; + func = CHOOSE_LOAD_FUNC(K, sk_load_byte); common_load: - /* - * Load from [K]. Reference with the (negative) - * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported. - */ + /* Load from [K]. */ ctx->seen |= SEEN_DATAREF; - if ((int)K < 0) - return -ENOTSUPP; PPC_LI64(r_scratch1, func); PPC_MTLR(r_scratch1); PPC_LI32(r_addr, K); @@ -429,7 +427,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, common_load_ind: /* * Load from [X + K]. Negative offsets are tested for - * in the helper functions, and result in a 'ret 0'. + * in the helper functions. */ ctx->seen |= SEEN_DATAREF | SEEN_XREG; PPC_LI64(r_scratch1, func); @@ -443,13 +441,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, break; case BPF_S_LDX_B_MSH: - /* - * x86 version drops packet (RET 0) when K<0, whereas - * interpreter does allow K<0 (__load_pointer, special - * ancillary data). common_load returns ENOTSUPP if K<0, - * so we fall back to interpreter & filter works. - */ - func = sk_load_byte_msh; + func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); goto common_load; break; diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index d09f3e8e6867..85825b5401e5 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -114,7 +114,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) pr_devel("axon_msi: woff %x roff %x msi %x\n", write_offset, msic->read_offset, msi); - if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) { + if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { generic_handle_irq(msi); msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); } else { @@ -276,9 +276,6 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) if (rc) return rc; - /* We rely on being able to stash a virq in a u16 */ - BUILD_BUG_ON(NR_IRQS > 65536); - list_for_each_entry(entry, &dev->msi_list, list) { virq = irq_create_direct_mapping(msic->irq_domain); if (virq == NO_IRQ) { @@ -392,7 +389,8 @@ static int axon_msi_probe(struct platform_device *device) } memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); - msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic); + /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ + msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); if (!msic->irq_domain) { printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", dn->full_name); diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index f9a48af335cb..8c6dc42ecf65 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c @@ -248,6 +248,6 @@ void beatic_deinit_IRQ(void) { int i; - for (i = 1; i < NR_IRQS; i++) + for (i = 1; i < nr_irqs; i++) beat_destruct_irq_plug(i); } diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 66ad93de1d55..c4e630576ff2 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -57,9 +57,9 @@ static int max_real_irqs; static DEFINE_RAW_SPINLOCK(pmac_pic_lock); -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +/* The max irq number this driver deals with is 128; see max_irqs */ +static DECLARE_BITMAP(ppc_lost_interrupts, 128); +static DECLARE_BITMAP(ppc_cached_irq_mask, 128); static int pmac_irq_cascade = -1; static struct irq_domain *pmac_pic_host; diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index aadbe4f6d537..178a5f300bc9 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -30,9 +30,9 @@ config PPC_SPLPAR two or more partitions. config EEH - bool "PCI Extended Error Handling (EEH)" if EXPERT + bool depends on PPC_PSERIES && PCI - default y if !EXPERT + default y config PSERIES_MSI bool diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index d3be961e2ae7..10386b676d87 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c @@ -51,8 +51,7 @@ static intctl_cpm2_t __iomem *cpm2_intctl; static struct irq_domain *cpm2_pic_host; -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */ static const u_char irq_to_siureg[] = { 1, 1, 1, 1, 1, 1, 1, 1, diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index d5f5416be310..b724622c3a0b 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c @@ -18,69 +18,45 @@ extern int cpm_get_irq(struct pt_regs *regs); static struct irq_domain *mpc8xx_pic_host; -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +static unsigned long mpc8xx_cached_irq_mask; static sysconf8xx_t __iomem *siu_reg; -int cpm_get_irq(struct pt_regs *regs); +static inline unsigned long mpc8xx_irqd_to_bit(struct irq_data *d) +{ + return 0x80000000 >> irqd_to_hwirq(d); +} static void mpc8xx_unmask_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] |= (1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static void mpc8xx_mask_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] &= ~(1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask &= ~mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static void mpc8xx_ack(struct irq_data *d) { - int bit; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - out_be32(&siu_reg->sc_sipend, 1 << (31-bit)); + out_be32(&siu_reg->sc_sipend, mpc8xx_irqd_to_bit(d)); } static void mpc8xx_end_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] |= (1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) { - if (flow_type & IRQ_TYPE_EDGE_FALLING) { - irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d); + /* only external IRQ senses are programmable */ + if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !(irqd_to_hwirq(d) & 1)) { unsigned int siel = in_be32(&siu_reg->sc_siel); - - /* only external IRQ senses are programmable */ - if ((hw & 1) == 0) { - siel |= (0x80000000 >> hw); - out_be32(&siu_reg->sc_siel, siel); - __irq_set_handler_locked(d->irq, handle_edge_irq); - } + siel |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_siel, siel); + __irq_set_handler_locked(d->irq, handle_edge_irq); } return 0; } @@ -132,6 +108,9 @@ static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct, IRQ_TYPE_EDGE_FALLING, }; + if (intspec[0] > 0x1f) + return 0; + *out_hwirq = intspec[0]; if (intsize > 1 && intspec[1] < 4) *out_flags = map_pic_senses[intspec[1]]; diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c index 49a3ece1c6b3..702256a1ca11 100644 --- a/arch/powerpc/sysdev/scom.c +++ b/arch/powerpc/sysdev/scom.c @@ -22,6 +22,7 @@ #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/export.h> +#include <asm/debug.h> #include <asm/prom.h> #include <asm/scom.h> diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index ea5e204e3450..cd1d18db92c6 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -188,6 +188,7 @@ void xics_migrate_irqs_away(void) { int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); unsigned int irq, virq; + struct irq_desc *desc; /* If we used to be the default server, move to the new "boot_cpuid" */ if (hw_cpu == xics_default_server) @@ -202,8 +203,7 @@ void xics_migrate_irqs_away(void) /* Allow IPIs again... */ icp_ops->set_priority(DEFAULT_PRIORITY); - for_each_irq(virq) { - struct irq_desc *desc; + for_each_irq_desc(virq, desc) { struct irq_chip *chip; long server; unsigned long flags; @@ -212,9 +212,8 @@ void xics_migrate_irqs_away(void) /* We can't set affinity on ISA interrupts */ if (virq < NUM_ISA_INTERRUPTS) continue; - desc = irq_to_desc(virq); /* We only need to migrate enabled IRQS */ - if (!desc || !desc->action) + if (!desc->action) continue; if (desc->irq_data.domain != xics_host) continue; |