diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-10-14 08:42:46 +0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-14 08:53:15 +0400 |
commit | eb2d8d60327bec172ec80efbda94d0c492088204 (patch) | |
tree | 5c01deb8c251f8aa64cc3db2b95fd26f8ac285a6 | |
parent | a650d3839e7a68321e5b76264398a63019b0928b (diff) | |
download | linux-eb2d8d60327bec172ec80efbda94d0c492088204.tar.xz |
[SPARC64]: Access ivector_table[] using physical addresses.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/sparc64/kernel/entry.S | 12 | ||||
-rw-r--r-- | arch/sparc64/kernel/irq.c | 56 | ||||
-rw-r--r-- | arch/sparc64/kernel/sun4v_ivec.S | 16 | ||||
-rw-r--r-- | arch/sparc64/kernel/traps.c | 4 | ||||
-rw-r--r-- | include/asm-sparc64/cpudata.h | 12 |
5 files changed, 55 insertions, 45 deletions
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 9a785293203f..c9b0d7af64ae 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -429,16 +429,16 @@ do_ivec: stxa %g0, [%g0] ASI_INTR_RECEIVE membar #Sync - sethi %hi(ivector_table), %g2 + sethi %hi(ivector_table_pa), %g2 + ldx [%g2 + %lo(ivector_table_pa)], %g2 sllx %g3, 4, %g3 - or %g2, %lo(ivector_table), %g2 add %g2, %g3, %g3 - TRAP_LOAD_IRQ_WORK(%g6, %g1) + TRAP_LOAD_IRQ_WORK_PA(%g6, %g1) - ldx [%g6], %g5 /* g5 = irq_work(cpu) */ - stx %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ - stx %g3, [%g6] /* irq_work(cpu) = bucket */ + ldx [%g6], %g5 + stxa %g5, [%g3] ASI_PHYS_USE_EC + stx %g3, [%g6] wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint retry do_ivec_xcall: diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 4db4dd576210..26cdf47981c3 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -51,15 +51,12 @@ * To make processing these packets efficient and race free we use * an array of irq buckets below. The interrupt vector handler in * entry.S feeds incoming packets into per-cpu pil-indexed lists. - * The IVEC handler does not need to act atomically, the PIL dispatch - * code uses CAS to get an atomic snapshot of the list and clear it - * at the same time. * * If you make changes to ino_bucket, please update hand coded assembler * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S */ struct ino_bucket { -/*0x00*/unsigned long irq_chain; +/*0x00*/unsigned long irq_chain_pa; /* Virtual interrupt number assigned to this INO. */ /*0x08*/unsigned int virt_irq; @@ -68,20 +65,14 @@ struct ino_bucket { #define NUM_IVECS (IMAP_INR + 1) struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES))); +unsigned long ivector_table_pa; #define __irq_ino(irq) \ (((struct ino_bucket *)(irq)) - &ivector_table[0]) #define __bucket(irq) ((struct ino_bucket *)(irq)) #define __irq(bucket) ((unsigned long)(bucket)) -/* This has to be in the main kernel image, it cannot be - * turned into per-cpu data. The reason is that the main - * kernel image is locked into the TLB and this structure - * is accessed from the vectored interrupt trap handler. If - * access to this structure takes a TLB miss it could cause - * the 5-level sparc v9 trap stack to overflow. - */ -#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist) +#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) static struct { unsigned long irq; @@ -689,9 +680,8 @@ void ack_bad_irq(unsigned int virt_irq) void handler_irq(int irq, struct pt_regs *regs) { - struct ino_bucket *bucket; + unsigned long pstate, bucket_pa; struct pt_regs *old_regs; - unsigned long pstate; clear_softint(1 << irq); @@ -704,18 +694,30 @@ void handler_irq(int irq, struct pt_regs *regs) "ldx [%2], %1\n\t" "stx %%g0, [%2]\n\t" "wrpr %0, 0x0, %%pstate\n\t" - : "=&r" (pstate), "=&r" (bucket) - : "r" (irq_work(smp_processor_id())), + : "=&r" (pstate), "=&r" (bucket_pa) + : "r" (irq_work_pa(smp_processor_id())), "i" (PSTATE_IE) : "memory"); - while (bucket) { - struct ino_bucket *next = __bucket(bucket->irq_chain); + while (bucket_pa) { + unsigned long next_pa; + unsigned int virt_irq; - bucket->irq_chain = 0UL; - __do_IRQ(bucket->virt_irq); + __asm__ __volatile__("ldxa [%2] %4, %0\n\t" + "lduwa [%3] %4, %1\n\t" + "stxa %%g0, [%2] %4" + : "=&r" (next_pa), "=&r" (virt_irq) + : "r" (bucket_pa + + offsetof(struct ino_bucket, + irq_chain_pa)), + "r" (bucket_pa + + offsetof(struct ino_bucket, + virt_irq)), + "i" (ASI_PHYS_USE_EC)); - bucket = next; + __do_IRQ(virt_irq); + + bucket_pa = next_pa; } irq_exit(); @@ -815,7 +817,7 @@ void init_irqwork_curcpu(void) { int cpu = hard_smp_processor_id(); - trap_block[cpu].irq_worklist = 0UL; + trap_block[cpu].irq_worklist_pa = 0UL; } /* Please be very careful with register_one_mondo() and @@ -926,6 +928,14 @@ static struct irqaction timer_irq_action = { .name = "timer", }; +/* XXX Belongs in a common location. XXX */ +static unsigned long kimage_addr_to_ra(void *p) +{ + unsigned long val = (unsigned long) p; + + return kern_base + (val - KERNBASE); +} + /* Only invoked on boot processor. */ void __init init_IRQ(void) { @@ -933,6 +943,8 @@ void __init init_IRQ(void) kill_prom_timer(); memset(&ivector_table[0], 0, sizeof(ivector_table)); + ivector_table_pa = kimage_addr_to_ra(&ivector_table[0]); + if (tlb_type == hypervisor) sun4v_init_mondo_queues(); diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S index e3e9d4c1574b..16d306445912 100644 --- a/arch/sparc64/kernel/sun4v_ivec.S +++ b/arch/sparc64/kernel/sun4v_ivec.S @@ -96,19 +96,17 @@ sun4v_dev_mondo: stxa %g2, [%g4] ASI_QUEUE membar #Sync - /* Get &__irq_work[smp_processor_id()] into %g1. */ - TRAP_LOAD_IRQ_WORK(%g1, %g4) + TRAP_LOAD_IRQ_WORK_PA(%g1, %g4) - /* Get &ivector_table[IVEC] into %g4. */ - sethi %hi(ivector_table), %g4 + /* Get __pa(&ivector_table[IVEC]) into %g4. */ + sethi %hi(ivector_table_pa), %g4 + ldx [%g4 + %lo(ivector_table_pa)], %g4 sllx %g3, 4, %g3 - or %g4, %lo(ivector_table), %g4 add %g4, %g3, %g4 - /* Insert ivector_table[] entry into __irq_work[] queue. */ - ldx [%g1], %g2 /* g2 = irq_work(cpu) */ - stx %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */ - stx %g4, [%g1] /* irq_work(cpu) = bucket */ + ldx [%g1], %g2 + stxa %g2, [%g4] ASI_PHYS_USE_EC + stx %g4, [%g1] /* Signal the interrupt by setting (1 << pil) in %softint. */ wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 6ef42b8e53d8..34573a55b6e5 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -2569,8 +2569,8 @@ void __init trap_init(void) offsetof(struct trap_per_cpu, tsb_huge)) || (TRAP_PER_CPU_TSB_HUGE_TEMP != offsetof(struct trap_per_cpu, tsb_huge_temp)) || - (TRAP_PER_CPU_IRQ_WORKLIST != - offsetof(struct trap_per_cpu, irq_worklist)) || + (TRAP_PER_CPU_IRQ_WORKLIST_PA != + offsetof(struct trap_per_cpu, irq_worklist_pa)) || (TRAP_PER_CPU_CPU_MONDO_QMASK != offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || (TRAP_PER_CPU_DEV_MONDO_QMASK != diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 379c219c3b7a..542421460a12 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -75,7 +75,7 @@ struct trap_per_cpu { unsigned long tsb_huge_temp; /* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */ - unsigned long irq_worklist; + unsigned long irq_worklist_pa; unsigned int cpu_mondo_qmask; unsigned int dev_mondo_qmask; unsigned int resum_qmask; @@ -127,7 +127,7 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, #define TRAP_PER_CPU_CPU_LIST_PA 0xc8 #define TRAP_PER_CPU_TSB_HUGE 0xd0 #define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8 -#define TRAP_PER_CPU_IRQ_WORKLIST 0xe0 +#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0 #define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8 #define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec #define TRAP_PER_CPU_RESUM_QMASK 0xf0 @@ -183,9 +183,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ -#define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ +#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ - add DEST, TRAP_PER_CPU_IRQ_WORKLIST, DEST; + add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST; /* Clobbers TMP, loads DEST with current thread info pointer. */ #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ @@ -222,9 +222,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ -#define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ +#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ - add DEST, TRAP_PER_CPU_IRQ_WORKLIST, DEST; + add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST; #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ |