diff options
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r-- | arch/parisc/kernel/drivers.c | 40 | ||||
-rw-r--r-- | arch/parisc/kernel/hpmc.S | 8 | ||||
-rw-r--r-- | arch/parisc/kernel/init_task.c | 1 | ||||
-rw-r--r-- | arch/parisc/kernel/irq.c | 23 | ||||
-rw-r--r-- | arch/parisc/kernel/module.c | 216 | ||||
-rw-r--r-- | arch/parisc/kernel/pdc_cons.c | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/perf.c | 4 | ||||
-rw-r--r-- | arch/parisc/kernel/processor.c | 68 | ||||
-rw-r--r-- | arch/parisc/kernel/setup.c | 11 | ||||
-rw-r--r-- | arch/parisc/kernel/smp.c | 47 | ||||
-rw-r--r-- | arch/parisc/kernel/time.c | 4 | ||||
-rw-r--r-- | arch/parisc/kernel/topology.c | 4 | ||||
-rw-r--r-- | arch/parisc/kernel/traps.c | 9 | ||||
-rw-r--r-- | arch/parisc/kernel/unwind.c | 2 |
14 files changed, 255 insertions, 184 deletions
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 884b7ce16a3b..994bcd980909 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -549,6 +549,38 @@ static int parisc_generic_match(struct device *dev, struct device_driver *drv) return match_device(to_parisc_driver(drv), to_parisc_device(dev)); } +static ssize_t make_modalias(struct device *dev, char *buf) +{ + const struct parisc_device *padev = to_parisc_device(dev); + const struct parisc_device_id *id = &padev->id; + + return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n", + (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev, + (u32)id->sversion); +} + +static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + const struct parisc_device *padev; + char modalias[40]; + + if (!dev) + return -ENODEV; + + padev = to_parisc_device(dev); + if (!padev) + return -ENODEV; + + if (add_uevent_var(env, "PARISC_NAME=%s", padev->name)) + return -ENOMEM; + + make_modalias(dev, modalias); + if (add_uevent_var(env, "MODALIAS=%s", modalias)) + return -ENOMEM; + + return 0; +} + #define pa_dev_attr(name, field, format_string) \ static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \ { \ @@ -566,12 +598,7 @@ pa_dev_attr_id(sversion, "0x%05x\n"); static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct parisc_device *padev = to_parisc_device(dev); - struct parisc_device_id *id = &padev->id; - - return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n", - (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev, - (u32)id->sversion); + return make_modalias(dev, buf); } static struct device_attribute parisc_device_attrs[] = { @@ -587,6 +614,7 @@ static struct device_attribute parisc_device_attrs[] = { struct bus_type parisc_bus_type = { .name = "parisc", .match = parisc_generic_match, + .uevent = parisc_uevent, .dev_attrs = parisc_device_attrs, .probe = parisc_driver_probe, .remove = parisc_driver_remove, diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index 2cbf13b3ef11..5595a2f31181 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S @@ -80,6 +80,7 @@ END(hpmc_pim_data) .import intr_save, code ENTRY(os_hpmc) +.os_hpmc: /* * registers modified: @@ -295,5 +296,10 @@ os_hpmc_6: b . nop ENDPROC(os_hpmc) -ENTRY(os_hpmc_end) /* this label used to compute os_hpmc checksum */ +.os_hpmc_end: nop +.data +.align 4 + .export os_hpmc_size +os_hpmc_size: + .word .os_hpmc_end-.os_hpmc diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c index f5941c086551..1e25a45d64c1 100644 --- a/arch/parisc/kernel/init_task.c +++ b/arch/parisc/kernel/init_task.c @@ -34,7 +34,6 @@ #include <asm/pgtable.h> #include <asm/pgalloc.h> -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 23ef950df008..49482806863f 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest) if (CHECK_IRQ_PER_CPU(irq)) { /* Bad linux design decision. The mask has already * been set; we must reset it */ - irq_desc[irq].affinity = CPU_MASK_ALL; + cpumask_setall(irq_desc[irq].affinity); return -EINVAL; } @@ -131,12 +131,12 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest) return 0; } -static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest) +static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) { - if (cpu_check_affinity(irq, &dest)) + if (cpu_check_affinity(irq, dest)) return; - irq_desc[irq].affinity = dest; + cpumask_copy(irq_desc[irq].affinity, dest); } #endif @@ -295,10 +295,10 @@ int txn_alloc_irq(unsigned int bits_wide) unsigned long txn_affinity_addr(unsigned int irq, int cpu) { #ifdef CONFIG_SMP - irq_desc[irq].affinity = cpumask_of_cpu(cpu); + cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); #endif - return cpu_data[cpu].txn_addr; + return per_cpu(cpu_data, cpu).txn_addr; } @@ -309,8 +309,9 @@ unsigned long txn_alloc_addr(unsigned int virt_irq) next_cpu++; /* assign to "next" CPU we want this bugger on */ /* validate entry */ - while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr || - !cpu_online(next_cpu))) + while ((next_cpu < NR_CPUS) && + (!per_cpu(cpu_data, next_cpu).txn_addr || + !cpu_online(next_cpu))) next_cpu++; if (next_cpu >= NR_CPUS) @@ -351,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) irq = eirr_to_irq(eirr_val); #ifdef CONFIG_SMP - dest = irq_desc[irq].affinity; + cpumask_copy(&dest, irq_desc[irq].affinity); if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && !cpu_isset(smp_processor_id(), dest)) { int cpu = first_cpu(dest); @@ -359,7 +360,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", irq, smp_processor_id(), cpu); gsc_writel(irq + CPU_IRQ_BASE, - cpu_data[cpu].hpa); + per_cpu(cpu_data, cpu).hpa); goto set_out; } #endif @@ -421,5 +422,5 @@ void __init init_IRQ(void) void ack_bad_irq(unsigned int irq) { - printk("unexpected IRQ %d\n", irq); + printk(KERN_WARNING "unexpected IRQ %d\n", irq); } diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 44138c3e6ea7..9013243cecca 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -6,6 +6,7 @@ * * Linux/PA-RISC Project (http://www.parisc-linux.org/) * Copyright (C) 2003 Randolph Chung <tausq at debian . org> + * Copyright (C) 2008 Helge Deller <deller@gmx.de> * * * This program is free software; you can redistribute it and/or modify @@ -24,6 +25,19 @@ * * * Notes: + * - PLT stub handling + * On 32bit (and sometimes 64bit) and with big kernel modules like xfs or + * ipv6 the relocation types R_PARISC_PCREL17F and R_PARISC_PCREL22F may + * fail to reach their PLT stub if we only create one big stub array for + * all sections at the beginning of the core or init section. + * Instead we now insert individual PLT stub entries directly in front of + * of the code sections where the stubs are actually called. + * This reduces the distance between the PCREL location and the stub entry + * so that the relocations can be fulfilled. + * While calculating the final layout of the kernel module in memory, the + * kernel module loader calls arch_mod_section_prepend() to request the + * to be reserved amount of memory in front of each individual section. + * * - SEGREL32 handling * We are not doing SEGREL32 handling correctly. According to the ABI, we * should do a value offset, like this: @@ -58,9 +72,13 @@ #define DEBUGP(fmt...) #endif +#define RELOC_REACHABLE(val, bits) \ + (( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \ + ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) ? \ + 0 : 1) + #define CHECK_RELOC(val, bits) \ - if ( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \ - ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) { \ + if (!RELOC_REACHABLE(val, bits)) { \ printk(KERN_ERR "module %s relocation of symbol %s is out of range (0x%lx in %d bits)\n", \ me->name, strtab + sym->st_name, (unsigned long)val, bits); \ return -ENOEXEC; \ @@ -92,13 +110,6 @@ static inline int in_local(struct module *me, void *loc) return in_init(me, loc) || in_core(me, loc); } -static inline int in_local_section(struct module *me, void *loc, void *dot) -{ - return (in_init(me, loc) && in_init(me, dot)) || - (in_core(me, loc) && in_core(me, dot)); -} - - #ifndef CONFIG_64BIT struct got_entry { Elf32_Addr addr; @@ -258,23 +269,42 @@ static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n) /* Free memory returned from module_alloc */ void module_free(struct module *mod, void *module_region) { + kfree(mod->arch.section); + mod->arch.section = NULL; + vfree(module_region); /* FIXME: If module_region == mod->init_region, trim exception table entries. */ } +/* Additional bytes needed in front of individual sections */ +unsigned int arch_mod_section_prepend(struct module *mod, + unsigned int section) +{ + /* size needed for all stubs of this section (including + * one additional for correct alignment of the stubs) */ + return (mod->arch.section[section].stub_entries + 1) + * sizeof(struct stub_entry); +} + #define CONST int module_frob_arch_sections(CONST Elf_Ehdr *hdr, CONST Elf_Shdr *sechdrs, CONST char *secstrings, struct module *me) { - unsigned long gots = 0, fdescs = 0, stubs = 0, init_stubs = 0; + unsigned long gots = 0, fdescs = 0, len; unsigned int i; + len = hdr->e_shnum * sizeof(me->arch.section[0]); + me->arch.section = kzalloc(len, GFP_KERNEL); + if (!me->arch.section) + return -ENOMEM; + for (i = 1; i < hdr->e_shnum; i++) { - const Elf_Rela *rels = (void *)hdr + sechdrs[i].sh_offset; + const Elf_Rela *rels = (void *)sechdrs[i].sh_addr; unsigned long nrels = sechdrs[i].sh_size / sizeof(*rels); + unsigned int count, s; if (strncmp(secstrings + sechdrs[i].sh_name, ".PARISC.unwind", 14) == 0) @@ -290,11 +320,23 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, */ gots += count_gots(rels, nrels); fdescs += count_fdescs(rels, nrels); - if(strncmp(secstrings + sechdrs[i].sh_name, - ".rela.init", 10) == 0) - init_stubs += count_stubs(rels, nrels); - else - stubs += count_stubs(rels, nrels); + + /* XXX: By sorting the relocs and finding duplicate entries + * we could reduce the number of necessary stubs and save + * some memory. */ + count = count_stubs(rels, nrels); + if (!count) + continue; + + /* so we need relocation stubs. reserve necessary memory. */ + /* sh_info gives the section for which we need to add stubs. */ + s = sechdrs[i].sh_info; + + /* each code section should only have one relocation section */ + WARN_ON(me->arch.section[s].stub_entries); + + /* store number of stubs we need for this section */ + me->arch.section[s].stub_entries += count; } /* align things a bit */ @@ -306,18 +348,8 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, me->arch.fdesc_offset = me->core_size; me->core_size += fdescs * sizeof(Elf_Fdesc); - me->core_size = ALIGN(me->core_size, 16); - me->arch.stub_offset = me->core_size; - me->core_size += stubs * sizeof(struct stub_entry); - - me->init_size = ALIGN(me->init_size, 16); - me->arch.init_stub_offset = me->init_size; - me->init_size += init_stubs * sizeof(struct stub_entry); - me->arch.got_max = gots; me->arch.fdesc_max = fdescs; - me->arch.stub_max = stubs; - me->arch.init_stub_max = init_stubs; return 0; } @@ -380,23 +412,27 @@ enum elf_stub_type { }; static Elf_Addr get_stub(struct module *me, unsigned long value, long addend, - enum elf_stub_type stub_type, int init_section) + enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec) { - unsigned long i; struct stub_entry *stub; - if(init_section) { - i = me->arch.init_stub_count++; - BUG_ON(me->arch.init_stub_count > me->arch.init_stub_max); - stub = me->module_init + me->arch.init_stub_offset + - i * sizeof(struct stub_entry); - } else { - i = me->arch.stub_count++; - BUG_ON(me->arch.stub_count > me->arch.stub_max); - stub = me->module_core + me->arch.stub_offset + - i * sizeof(struct stub_entry); + /* initialize stub_offset to point in front of the section */ + if (!me->arch.section[targetsec].stub_offset) { + loc0 -= (me->arch.section[targetsec].stub_entries + 1) * + sizeof(struct stub_entry); + /* get correct alignment for the stubs */ + loc0 = ALIGN(loc0, sizeof(struct stub_entry)); + me->arch.section[targetsec].stub_offset = loc0; } + /* get address of stub entry */ + stub = (void *) me->arch.section[targetsec].stub_offset; + me->arch.section[targetsec].stub_offset += sizeof(struct stub_entry); + + /* do not write outside available stub area */ + BUG_ON(0 == me->arch.section[targetsec].stub_entries--); + + #ifndef CONFIG_64BIT /* for 32-bit the stub looks like this: * ldil L'XXX,%r1 @@ -489,15 +525,19 @@ int apply_relocate_add(Elf_Shdr *sechdrs, Elf32_Addr val; Elf32_Sword addend; Elf32_Addr dot; + Elf_Addr loc0; + unsigned int targetsec = sechdrs[relsec].sh_info; //unsigned long dp = (unsigned long)$global$; register unsigned long dp asm ("r27"); DEBUGP("Applying relocate section %u to %u\n", relsec, - sechdrs[relsec].sh_info); + targetsec); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ - loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + loc = (void *)sechdrs[targetsec].sh_addr + rel[i].r_offset; + /* This is the start of the target section */ + loc0 = sechdrs[targetsec].sh_addr; /* This is the symbol it is referring to */ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + ELF32_R_SYM(rel[i].r_info); @@ -569,19 +609,32 @@ int apply_relocate_add(Elf_Shdr *sechdrs, break; case R_PARISC_PCREL17F: /* 17-bit PC relative address */ - val = get_stub(me, val, addend, ELF_STUB_GOT, in_init(me, loc)); + /* calculate direct call offset */ + val += addend; val = (val - dot - 8)/4; - CHECK_RELOC(val, 17) + if (!RELOC_REACHABLE(val, 17)) { + /* direct distance too far, create + * stub entry instead */ + val = get_stub(me, sym->st_value, addend, + ELF_STUB_DIRECT, loc0, targetsec); + val = (val - dot - 8)/4; + CHECK_RELOC(val, 17); + } *loc = (*loc & ~0x1f1ffd) | reassemble_17(val); break; case R_PARISC_PCREL22F: /* 22-bit PC relative address; only defined for pa20 */ - val = get_stub(me, val, addend, ELF_STUB_GOT, in_init(me, loc)); - DEBUGP("STUB FOR %s loc %lx+%lx at %lx\n", - strtab + sym->st_name, (unsigned long)loc, addend, - val) + /* calculate direct call offset */ + val += addend; val = (val - dot - 8)/4; - CHECK_RELOC(val, 22); + if (!RELOC_REACHABLE(val, 22)) { + /* direct distance too far, create + * stub entry instead */ + val = get_stub(me, sym->st_value, addend, + ELF_STUB_DIRECT, loc0, targetsec); + val = (val - dot - 8)/4; + CHECK_RELOC(val, 22); + } *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val); break; @@ -610,13 +663,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs, Elf64_Addr val; Elf64_Sxword addend; Elf64_Addr dot; + Elf_Addr loc0; + unsigned int targetsec = sechdrs[relsec].sh_info; DEBUGP("Applying relocate section %u to %u\n", relsec, - sechdrs[relsec].sh_info); + targetsec); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ - loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + loc = (void *)sechdrs[targetsec].sh_addr + rel[i].r_offset; + /* This is the start of the target section */ + loc0 = sechdrs[targetsec].sh_addr; /* This is the symbol it is referring to */ sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + ELF64_R_SYM(rel[i].r_info); @@ -672,42 +729,40 @@ int apply_relocate_add(Elf_Shdr *sechdrs, DEBUGP("PCREL22F Symbol %s loc %p val %lx\n", strtab + sym->st_name, loc, val); + val += addend; /* can we reach it locally? */ - if(!in_local_section(me, (void *)val, (void *)dot)) { - - if (in_local(me, (void *)val)) - /* this is the case where the - * symbol is local to the - * module, but in a different - * section, so stub the jump - * in case it's more than 22 - * bits away */ - val = get_stub(me, val, addend, ELF_STUB_DIRECT, - in_init(me, loc)); - else if (strncmp(strtab + sym->st_name, "$$", 2) + if (in_local(me, (void *)val)) { + /* this is the case where the symbol is local + * to the module, but in a different section, + * so stub the jump in case it's more than 22 + * bits away */ + val = (val - dot - 8)/4; + if (!RELOC_REACHABLE(val, 22)) { + /* direct distance too far, create + * stub entry instead */ + val = get_stub(me, sym->st_value, + addend, ELF_STUB_DIRECT, + loc0, targetsec); + } else { + /* Ok, we can reach it directly. */ + val = sym->st_value; + val += addend; + } + } else { + val = sym->st_value; + if (strncmp(strtab + sym->st_name, "$$", 2) == 0) val = get_stub(me, val, addend, ELF_STUB_MILLI, - in_init(me, loc)); + loc0, targetsec); else val = get_stub(me, val, addend, ELF_STUB_GOT, - in_init(me, loc)); + loc0, targetsec); } DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n", strtab + sym->st_name, loc, sym->st_value, addend, val); - /* FIXME: local symbols work as long as the - * core and init pieces aren't separated too - * far. If this is ever broken, you will trip - * the check below. The way to fix it would - * be to generate local stubs to go between init - * and core */ - if((Elf64_Sxword)(val - dot - 8) > 0x800000 -1 || - (Elf64_Sxword)(val - dot - 8) < -0x800000) { - printk(KERN_ERR "Module %s, symbol %s is out of range for PCREL22F relocation\n", - me->name, strtab + sym->st_name); - return -ENOEXEC; - } val = (val - dot - 8)/4; + CHECK_RELOC(val, 22); *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val); break; case R_PARISC_DIR64: @@ -794,12 +849,8 @@ int module_finalize(const Elf_Ehdr *hdr, addr = (u32 *)entry->addr; printk("INSNS: %x %x %x %x\n", addr[0], addr[1], addr[2], addr[3]); - printk("stubs used %ld, stubs max %ld\n" - "init_stubs used %ld, init stubs max %ld\n" - "got entries used %ld, gots max %ld\n" + printk("got entries used %ld, gots max %ld\n" "fdescs used %ld, fdescs max %ld\n", - me->arch.stub_count, me->arch.stub_max, - me->arch.init_stub_count, me->arch.init_stub_max, me->arch.got_count, me->arch.got_max, me->arch.fdesc_count, me->arch.fdesc_max); #endif @@ -829,7 +880,10 @@ int module_finalize(const Elf_Ehdr *hdr, me->name, me->arch.got_count, MAX_GOTS); return -EINVAL; } - + + kfree(me->arch.section); + me->arch.section = NULL; + /* no symbol table */ if(symhdr == NULL) return 0; diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c index ccb68090781e..1ff366cb9685 100644 --- a/arch/parisc/kernel/pdc_cons.c +++ b/arch/parisc/kernel/pdc_cons.c @@ -52,7 +52,7 @@ #include <linux/tty.h> #include <asm/pdc.h> /* for iodc_call() proto and friends */ -static spinlock_t pdc_console_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(pdc_console_lock); static void pdc_console_write(struct console *co, const char *s, unsigned count) { diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index f696f57faa15..75099efb3bf3 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c @@ -541,9 +541,9 @@ static int __init perf_init(void) spin_lock_init(&perf_lock); /* TODO: this only lets us access the first cpu.. what to do for SMP? */ - cpu_device = cpu_data[0].dev; + cpu_device = per_cpu(cpu_data, 0).dev; printk("Performance monitoring counters enabled for %s\n", - cpu_data[0].dev->name); + per_cpu(cpu_data, 0).dev->name); return 0; } diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index 370086fb8333..ecb609342feb 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -3,7 +3,7 @@ * Initial setup-routines for HP 9000 based hardware. * * Copyright (C) 1991, 1992, 1995 Linus Torvalds - * Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de> + * Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de> * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf) * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net> * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org> @@ -46,7 +46,7 @@ struct system_cpuinfo_parisc boot_cpu_data __read_mostly; EXPORT_SYMBOL(boot_cpu_data); -struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly; +DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data); extern int update_cr16_clocksource(void); /* from time.c */ @@ -69,6 +69,23 @@ extern int update_cr16_clocksource(void); /* from time.c */ */ /** + * init_cpu_profiler - enable/setup per cpu profiling hooks. + * @cpunum: The processor instance. + * + * FIXME: doesn't do much yet... + */ +static void __cpuinit +init_percpu_prof(unsigned long cpunum) +{ + struct cpuinfo_parisc *p; + + p = &per_cpu(cpu_data, cpunum); + p->prof_counter = 1; + p->prof_multiplier = 1; +} + + +/** * processor_probe - Determine if processor driver should claim this device. * @dev: The device which has been found. * @@ -147,7 +164,7 @@ static int __cpuinit processor_probe(struct parisc_device *dev) } #endif - p = &cpu_data[cpuid]; + p = &per_cpu(cpu_data, cpuid); boot_cpu_data.cpu_count++; /* initialize counters - CPU 0 gets it_value set in time_init() */ @@ -162,12 +179,9 @@ static int __cpuinit processor_probe(struct parisc_device *dev) #ifdef CONFIG_SMP /* ** FIXME: review if any other initialization is clobbered - ** for boot_cpu by the above memset(). + ** for boot_cpu by the above memset(). */ - - /* stolen from init_percpu_prof() */ - cpu_data[cpuid].prof_counter = 1; - cpu_data[cpuid].prof_multiplier = 1; + init_percpu_prof(cpuid); #endif /* @@ -261,19 +275,6 @@ void __init collect_boot_cpu_data(void) } -/** - * init_cpu_profiler - enable/setup per cpu profiling hooks. - * @cpunum: The processor instance. - * - * FIXME: doesn't do much yet... - */ -static inline void __init -init_percpu_prof(int cpunum) -{ - cpu_data[cpunum].prof_counter = 1; - cpu_data[cpunum].prof_multiplier = 1; -} - /** * init_per_cpu - Handle individual processor initializations. @@ -293,7 +294,7 @@ init_percpu_prof(int cpunum) * * o Enable CPU profiling hooks. */ -int __init init_per_cpu(int cpunum) +int __cpuinit init_per_cpu(int cpunum) { int ret; struct pdc_coproc_cfg coproc_cfg; @@ -307,8 +308,8 @@ int __init init_per_cpu(int cpunum) /* FWIW, FP rev/model is a more accurate way to determine ** CPU type. CPU rev/model has some ambiguous cases. */ - cpu_data[cpunum].fp_rev = coproc_cfg.revision; - cpu_data[cpunum].fp_model = coproc_cfg.model; + per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; + per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", cpunum, coproc_cfg.revision, coproc_cfg.model); @@ -344,16 +345,17 @@ int __init init_per_cpu(int cpunum) int show_cpuinfo (struct seq_file *m, void *v) { - int n; + unsigned long cpu; - for(n=0; n<boot_cpu_data.cpu_count; n++) { + for_each_online_cpu(cpu) { + const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); #ifdef CONFIG_SMP - if (0 == cpu_data[n].hpa) + if (0 == cpuinfo->hpa) continue; #endif - seq_printf(m, "processor\t: %d\n" + seq_printf(m, "processor\t: %lu\n" "cpu family\t: PA-RISC %s\n", - n, boot_cpu_data.family_name); + cpu, boot_cpu_data.family_name); seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name ); @@ -365,8 +367,8 @@ show_cpuinfo (struct seq_file *m, void *v) seq_printf(m, "model\t\t: %s\n" "model name\t: %s\n", boot_cpu_data.pdc.sys_model_name, - cpu_data[n].dev ? - cpu_data[n].dev->name : "Unknown" ); + cpuinfo->dev ? + cpuinfo->dev->name : "Unknown"); seq_printf(m, "hversion\t: 0x%08x\n" "sversion\t: 0x%08x\n", @@ -377,8 +379,8 @@ show_cpuinfo (struct seq_file *m, void *v) show_cache_info(m); seq_printf(m, "bogomips\t: %lu.%02lu\n", - cpu_data[n].loops_per_jiffy / (500000 / HZ), - (cpu_data[n].loops_per_jiffy / (5000 / HZ)) % 100); + cpuinfo->loops_per_jiffy / (500000 / HZ), + (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100); seq_printf(m, "software id\t: %ld\n\n", boot_cpu_data.pdc.model.sw_id); diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 7d27853ff8c8..82131ca8e05c 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -58,11 +58,6 @@ int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */ EXPORT_SYMBOL(parisc_bus_is_phys); #endif -/* This sets the vmerge boundary and size, it's here because it has to - * be available on all platforms (zero means no-virtual merging) */ -unsigned long parisc_vmerge_boundary = 0; -unsigned long parisc_vmerge_max_size = 0; - void __init setup_cmdline(char **cmdline_p) { extern unsigned int boot_args[]; @@ -321,7 +316,7 @@ static int __init parisc_init(void) processor_init(); printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n", - boot_cpu_data.cpu_count, + num_present_cpus(), boot_cpu_data.cpu_name, boot_cpu_data.cpu_hz / 1000000, boot_cpu_data.cpu_hz % 1000000 ); @@ -387,8 +382,8 @@ void start_parisc(void) if (ret >= 0 && coproc_cfg.ccr_functional) { mtctl(coproc_cfg.ccr_functional, 10); - cpu_data[cpunum].fp_rev = coproc_cfg.revision; - cpu_data[cpunum].fp_model = coproc_cfg.model; + per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; + per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; asm volatile ("fstd %fr0,8(%sp)"); } else { diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index d47f3975c9c6..9995d7ed5819 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -56,31 +56,17 @@ static int smp_debug_lvl = 0; if (lvl >= smp_debug_lvl) \ printk(printargs); #else -#define smp_debug(lvl, ...) +#define smp_debug(lvl, ...) do { } while(0) #endif /* DEBUG_SMP */ DEFINE_SPINLOCK(smp_lock); volatile struct task_struct *smp_init_current_idle_task; -static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */ +/* track which CPU is booting */ +static volatile int cpu_now_booting __cpuinitdata; -static int parisc_max_cpus __read_mostly = 1; - -/* online cpus are ones that we've managed to bring up completely - * possible cpus are all valid cpu - * present cpus are all detected cpu - * - * On startup we bring up the "possible" cpus. Since we discover - * CPUs later, we add them as hotplug, so the possible cpu mask is - * empty in the beginning. - */ - -cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */ -cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */ - -EXPORT_SYMBOL(cpu_online_map); -EXPORT_SYMBOL(cpu_possible_map); +static int parisc_max_cpus __cpuinitdata = 1; DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; @@ -138,7 +124,7 @@ irqreturn_t ipi_interrupt(int irq, void *dev_id) { int this_cpu = smp_processor_id(); - struct cpuinfo_parisc *p = &cpu_data[this_cpu]; + struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); unsigned long ops; unsigned long flags; @@ -217,13 +203,13 @@ ipi_interrupt(int irq, void *dev_id) static inline void ipi_send(int cpu, enum ipi_message_type op) { - struct cpuinfo_parisc *p = &cpu_data[cpu]; + struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu); spinlock_t *lock = &per_cpu(ipi_lock, cpu); unsigned long flags; spin_lock_irqsave(lock, flags); p->pending_ipi |= 1 << op; - gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa); + gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa); spin_unlock_irqrestore(lock, flags); } @@ -239,10 +225,7 @@ send_IPI_mask(cpumask_t mask, enum ipi_message_type op) static inline void send_IPI_single(int dest_cpu, enum ipi_message_type op) { - if (dest_cpu == NO_PROC_ID) { - BUG(); - return; - } + BUG_ON(dest_cpu == NO_PROC_ID); ipi_send(dest_cpu, op); } @@ -324,8 +307,7 @@ smp_cpu_init(int cpunum) /* Initialise the idle task for this CPU */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; - if(current->mm) - BUG(); + BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); init_IRQ(); /* make sure no IRQs are enabled or pending */ @@ -360,6 +342,7 @@ void __init smp_callin(void) */ int __cpuinit smp_boot_one_cpu(int cpuid) { + const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); struct task_struct *idle; long timeout; @@ -391,7 +374,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid) smp_init_current_idle_task = idle ; mb(); - printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa); + printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa); /* ** This gets PDC to release the CPU from a very tight loop. @@ -402,7 +385,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid) ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the ** contents of memory are valid." */ - gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa); + gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa); mb(); /* @@ -434,12 +417,12 @@ alive: return 0; } -void __devinit smp_prepare_boot_cpu(void) +void __init smp_prepare_boot_cpu(void) { - int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */ + int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; /* Setup BSP mappings */ - printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor); + printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor); cpu_set(bootstrap_processor, cpu_online_map); cpu_set(bootstrap_processor, cpu_present_map); diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 4d09203bc693..9d46c43a4152 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -60,7 +60,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) unsigned long cycles_elapsed, ticks_elapsed; unsigned long cycles_remainder; unsigned int cpu = smp_processor_id(); - struct cpuinfo_parisc *cpuinfo = &cpu_data[cpu]; + struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); /* gcc can optimize for "read-only" case with a local clocktick */ unsigned long cpt = clocktick; @@ -213,7 +213,7 @@ void __init start_cpu_itimer(void) mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ - cpu_data[cpu].it_value = next_tick; + per_cpu(cpu_data, cpu).it_value = next_tick; } struct platform_device rtc_parisc_dev = { diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c index d71cb018a21e..f5159381fdd6 100644 --- a/arch/parisc/kernel/topology.c +++ b/arch/parisc/kernel/topology.c @@ -22,14 +22,14 @@ #include <linux/cpu.h> #include <linux/cache.h> -static struct cpu cpu_devices[NR_CPUS] __read_mostly; +static DEFINE_PER_CPU(struct cpu, cpu_devices); static int __init topology_init(void) { int num; for_each_present_cpu(num) { - register_cpu(&cpu_devices[num], num); + register_cpu(&per_cpu(cpu_devices, num), num); } return 0; } diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 4c771cd580ec..ba658d2086f7 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -745,6 +745,10 @@ void handle_interruption(int code, struct pt_regs *regs) /* Fall Through */ case 27: /* Data memory protection ID trap */ + if (code == 27 && !user_mode(regs) && + fixup_exception(regs)) + return; + die_if_kernel("Protection id trap", regs, code); si.si_code = SEGV_MAPERR; si.si_signo = SIGSEGV; @@ -821,8 +825,8 @@ void handle_interruption(int code, struct pt_regs *regs) int __init check_ivt(void *iva) { + extern u32 os_hpmc_size; extern const u32 os_hpmc[]; - extern const u32 os_hpmc_end[]; int i; u32 check = 0; @@ -839,8 +843,7 @@ int __init check_ivt(void *iva) *ivap++ = 0; /* Compute Checksum for HPMC handler */ - - length = os_hpmc_end - os_hpmc; + length = os_hpmc_size; ivap[7] = length; hpmcp = (u32 *)os_hpmc; diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 6773c582e457..69dad5a850a8 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -372,7 +372,7 @@ void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct struct pt_regs *r = &t->thread.regs; struct pt_regs *r2; - r2 = kmalloc(sizeof(struct pt_regs), GFP_KERNEL); + r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC); if (!r2) return; *r2 = *r; |