summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/Makefile16
-rw-r--r--arch/powerpc/configs/cell_defconfig7
-rw-r--r--arch/powerpc/configs/g5_defconfig9
-rw-r--r--arch/powerpc/configs/iseries_defconfig7
-rw-r--r--arch/powerpc/configs/maple_defconfig10
-rw-r--r--arch/powerpc/configs/ppc64_defconfig7
-rw-r--r--arch/powerpc/configs/pseries_defconfig7
-rw-r--r--arch/powerpc/kernel/entry_64.S4
-rw-r--r--arch/powerpc/kernel/kprobes.c2
-rw-r--r--arch/powerpc/kernel/pci_64.c18
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c9
-rw-r--r--arch/powerpc/kernel/process.c63
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/ptrace-common.h164
-rw-r--r--arch/powerpc/kernel/ptrace.c3
-rw-r--r--arch/powerpc/kernel/ptrace32.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c10
-rw-r--r--arch/powerpc/kernel/syscalls.c2
-rw-r--r--arch/powerpc/kernel/vdso.c9
-rw-r--r--arch/powerpc/mm/4xx_mmu.c4
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage.c105
-rw-r--r--arch/powerpc/mm/imalloc.c3
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/mmu_decl.h14
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/mm/pgtable_64.c3
-rw-r--r--arch/powerpc/mm/stab.c7
-rw-r--r--arch/powerpc/mm/tlb_32.c6
-rw-r--r--arch/powerpc/mm/tlb_64.c4
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c2
-rw-r--r--arch/powerpc/platforms/powermac/feature.c21
-rw-r--r--arch/powerpc/platforms/powermac/smp.c6
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c13
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c12
-rw-r--r--arch/powerpc/platforms/pseries/xics.c21
-rw-r--r--arch/powerpc/sysdev/dart.h2
-rw-r--r--arch/powerpc/sysdev/mpic.c13
-rw-r--r--arch/powerpc/sysdev/u3_iommu.c4
41 files changed, 406 insertions, 199 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index bb2efdd566a9..db93dbc0e21a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -227,7 +227,7 @@ config SMP
If you don't know what to do here, say N.
config NR_CPUS
- int "Maximum number of CPUs (2-32)"
+ int "Maximum number of CPUs (2-128)"
range 2 128
depends on SMP
default "32" if PPC64
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 98f67c78d1bd..a13eb575f834 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -61,15 +61,17 @@ endif
LDFLAGS_vmlinux := -Bstatic
# The -Iarch/$(ARCH)/include is temporary while we are merging
-CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include
-AFLAGS += -Iarch/$(ARCH)
-CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe
+CPPFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARCH) -Iarch/$(ARCH)/include
+AFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARCH)
CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc
-CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
-CFLAGS += $(CFLAGS-y)
+CFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARCH) -ffixed-r2 -mmultiple
+CPPFLAGS += $(CPPFLAGS-y)
+AFLAGS += $(AFLAGS-y)
+CFLAGS += -msoft-float -pipe $(CFLAGS-y)
CPP = $(CC) -E $(CFLAGS)
# Temporary hack until we have migrated to asm-powerpc
-LINUXINCLUDE += -Iarch/$(ARCH)/include
+LINUXINCLUDE-$(CONFIG_PPC32) := -Iarch/$(ARCH)/include
+LINUXINCLUDE += $(LINUXINCLUDE-y)
CHECKFLAGS += -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__
@@ -173,11 +175,13 @@ archclean:
archprepare: checkbin
+ifeq ($(CONFIG_PPC32),y)
# Temporary hack until we have migrated to asm-powerpc
include/asm: arch/$(ARCH)/include/asm
arch/$(ARCH)/include/asm: FORCE
$(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
$(Q)ln -fsn $(srctree)/include/asm-$(OLDARCH) arch/$(ARCH)/include/asm
+endif
# Use the file '.tmp_gas_check' for binutils tests, as gas won't output
# to stdout and these checks are run even on install targets.
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 4b433411b9e3..b657f7e44762 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.15-rc1
-# Tue Nov 15 14:36:20 2005
+# Linux kernel version: 2.6.15-rc5
+# Tue Dec 20 15:59:26 2005
#
CONFIG_PPC64=y
CONFIG_64BIT=y
@@ -53,6 +53,7 @@ CONFIG_KOBJECT_UEVENT=y
# CONFIG_IKCONFIG is not set
# CONFIG_CPUSETS is not set
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
@@ -151,7 +152,7 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
-CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PPC_64K_PAGES is not set
CONFIG_SCHED_SMT=y
CONFIG_PROC_DEVICETREE=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index e7c23e3902b8..3c22ccb18519 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.15-rc1
-# Tue Nov 15 14:39:20 2005
+# Linux kernel version: 2.6.15-rc5
+# Tue Dec 20 15:59:30 2005
#
CONFIG_PPC64=y
CONFIG_64BIT=y
@@ -53,6 +53,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_CPUSETS is not set
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
@@ -162,7 +163,7 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
-CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PPC_64K_PAGES is not set
# CONFIG_SCHED_SMT is not set
CONFIG_PROC_DEVICETREE=y
@@ -1203,6 +1204,7 @@ CONFIG_USB_MON=y
CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
# CONFIG_USB_SERIAL_AIRPRIME is not set
+# CONFIG_USB_SERIAL_ANYDATA is not set
CONFIG_USB_SERIAL_BELKIN=m
CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
# CONFIG_USB_SERIAL_CP2101 is not set
@@ -1233,7 +1235,6 @@ CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
CONFIG_USB_SERIAL_KLSI=m
CONFIG_USB_SERIAL_KOBIL_SCT=m
CONFIG_USB_SERIAL_MCT_U232=m
-# CONFIG_USB_SERIAL_NOKIA_DKU2 is not set
CONFIG_USB_SERIAL_PL2303=m
# CONFIG_USB_SERIAL_HP4X is not set
CONFIG_USB_SERIAL_SAFE=m
diff --git a/arch/powerpc/configs/iseries_defconfig b/arch/powerpc/configs/iseries_defconfig
index 5d0866707a75..751a622fb7a7 100644
--- a/arch/powerpc/configs/iseries_defconfig
+++ b/arch/powerpc/configs/iseries_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.15-rc1
-# Tue Nov 15 14:38:09 2005
+# Linux kernel version: 2.6.15-rc5
+# Tue Dec 20 15:59:32 2005
#
CONFIG_PPC64=y
CONFIG_64BIT=y
@@ -55,6 +55,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_CPUSETS is not set
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
@@ -144,7 +145,7 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
-CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PPC_64K_PAGES is not set
# CONFIG_SCHED_SMT is not set
CONFIG_PROC_DEVICETREE=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 92e42613ef06..07b6d3d23360 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.15-rc1
-# Tue Nov 15 14:38:58 2005
+# Linux kernel version: 2.6.15-rc5
+# Tue Dec 20 15:59:36 2005
#
CONFIG_PPC64=y
CONFIG_64BIT=y
@@ -53,6 +53,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_CPUSETS is not set
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
@@ -149,7 +150,7 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
-CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PPC_64K_PAGES is not set
# CONFIG_SCHED_SMT is not set
CONFIG_PROC_DEVICETREE=y
@@ -242,7 +243,6 @@ CONFIG_TCP_CONG_BIC=y
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
@@ -794,6 +794,7 @@ CONFIG_USB_SERIAL=y
# CONFIG_USB_SERIAL_CONSOLE is not set
CONFIG_USB_SERIAL_GENERIC=y
# CONFIG_USB_SERIAL_AIRPRIME is not set
+# CONFIG_USB_SERIAL_ANYDATA is not set
# CONFIG_USB_SERIAL_BELKIN is not set
# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
# CONFIG_USB_SERIAL_CP2101 is not set
@@ -824,7 +825,6 @@ CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
# CONFIG_USB_SERIAL_KLSI is not set
# CONFIG_USB_SERIAL_KOBIL_SCT is not set
# CONFIG_USB_SERIAL_MCT_U232 is not set
-# CONFIG_USB_SERIAL_NOKIA_DKU2 is not set
# CONFIG_USB_SERIAL_PL2303 is not set
# CONFIG_USB_SERIAL_HP4X is not set
# CONFIG_USB_SERIAL_SAFE is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index b5ba3bbd96fb..509399eab6f5 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.15-rc1
-# Fri Nov 18 16:23:24 2005
+# Linux kernel version: 2.6.15-rc5
+# Tue Dec 20 15:59:38 2005
#
CONFIG_PPC64=y
CONFIG_64BIT=y
@@ -54,6 +54,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CPUSETS=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
@@ -176,7 +177,7 @@ CONFIG_HAVE_MEMORY_PRESENT=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPARSEMEM_EXTREME=y
# CONFIG_MEMORY_HOTPLUG is not set
-CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PPC_64K_PAGES is not set
# CONFIG_SCHED_SMT is not set
CONFIG_PROC_DEVICETREE=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index b589b196eb3f..a50ce0fa9243 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.15-rc1
-# Tue Nov 15 14:36:55 2005
+# Linux kernel version: 2.6.15-rc5
+# Tue Dec 20 15:59:40 2005
#
CONFIG_PPC64=y
CONFIG_64BIT=y
@@ -55,6 +55,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CPUSETS=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
@@ -163,7 +164,7 @@ CONFIG_HAVE_MEMORY_PRESENT=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPARSEMEM_EXTREME=y
# CONFIG_MEMORY_HOTPLUG is not set
-CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
# CONFIG_PPC_64K_PAGES is not set
CONFIG_SCHED_SMT=y
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2d22bf03484e..bce33a38399f 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -183,8 +183,8 @@ syscall_exit_trace_cont:
ld r13,GPR13(r1) /* returning to usermode */
1: ld r2,GPR2(r1)
li r12,MSR_RI
- andc r10,r10,r12
- mtmsrd r10,1 /* clear MSR.RI */
+ andc r11,r10,r12
+ mtmsrd r11,1 /* clear MSR.RI */
ld r1,GPR1(r1)
mtlr r4
mtcr r5
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 511af54e6230..5368f9c2e6bf 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -177,7 +177,7 @@ static inline int kprobe_handler(struct pt_regs *regs)
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kcb->kprobe_saved_msr = regs->msr;
- p->nmissed++;
+ kprobes_inc_nmissed_count(p);
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 5a5b24685081..8b6008ab217d 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -40,7 +40,7 @@
#endif
unsigned long pci_probe_only = 1;
-unsigned long pci_assign_all_buses = 0;
+int pci_assign_all_buses = 0;
/*
* legal IO pages under MAX_ISA_PORT. This is to ensure we don't touch
@@ -55,11 +55,6 @@ static void fixup_resource(struct resource *res, struct pci_dev *dev);
static void do_bus_setup(struct pci_bus *bus);
#endif
-unsigned int pcibios_assign_all_busses(void)
-{
- return pci_assign_all_buses;
-}
-
/* pci_io_base -- the base address from which io bars are offsets.
* This is the lowest I/O base address (so bar values are always positive),
* and it *must* be the start of ISA space if an ISA bus exists because
@@ -1186,17 +1181,6 @@ void phbs_remap_io(void)
remap_bus_range(hose->bus);
}
-/*
- * ppc64 can have multifunction devices that do not respond to function 0.
- * In this case we must scan all functions.
- * XXX this can go now, we use the OF device tree in all the
- * cases that caused problems. -- paulus
- */
-int pcibios_scan_all_fns(struct pci_bus *bus, int devfn)
-{
- return 0;
-}
-
static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 59846b40d521..94db25708456 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -146,9 +146,6 @@ EXPORT_SYMBOL(pci_bus_io_base);
EXPORT_SYMBOL(pci_bus_io_base_phys);
EXPORT_SYMBOL(pci_bus_mem_base_phys);
EXPORT_SYMBOL(pci_bus_to_hose);
-EXPORT_SYMBOL(pci_resource_to_bus);
-EXPORT_SYMBOL(pci_phys_to_bus);
-EXPORT_SYMBOL(pci_bus_to_phys);
#endif /* CONFIG_PCI */
#ifdef CONFIG_NOT_COHERENT_CACHE
@@ -166,15 +163,13 @@ EXPORT_SYMBOL(giveup_altivec);
EXPORT_SYMBOL(giveup_spe);
#endif /* CONFIG_SPE */
-#ifdef CONFIG_PPC64
-EXPORT_SYMBOL(__flush_icache_range);
-#else
+#ifndef CONFIG_PPC64
EXPORT_SYMBOL(flush_instruction_cache);
-EXPORT_SYMBOL(flush_icache_range);
EXPORT_SYMBOL(flush_tlb_kernel_range);
EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(_tlbie);
#endif
+EXPORT_SYMBOL(__flush_icache_range);
EXPORT_SYMBOL(flush_dcache_range);
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index de69fb37c731..105d5609ff57 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -201,6 +201,28 @@ int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
}
#endif /* CONFIG_SPE */
+/*
+ * If we are doing lazy switching of CPU state (FP, altivec or SPE),
+ * and the current task has some state, discard it.
+ */
+static inline void discard_lazy_cpu_state(void)
+{
+#ifndef CONFIG_SMP
+ preempt_disable();
+ if (last_task_used_math == current)
+ last_task_used_math = NULL;
+#ifdef CONFIG_ALTIVEC
+ if (last_task_used_altivec == current)
+ last_task_used_altivec = NULL;
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+ if (last_task_used_spe == current)
+ last_task_used_spe = NULL;
+#endif
+ preempt_enable();
+#endif /* CONFIG_SMP */
+}
+
int set_dabr(unsigned long dabr)
{
if (ppc_md.set_dabr)
@@ -434,19 +456,7 @@ void show_regs(struct pt_regs * regs)
void exit_thread(void)
{
kprobe_flush_task(current);
-
-#ifndef CONFIG_SMP
- if (last_task_used_math == current)
- last_task_used_math = NULL;
-#ifdef CONFIG_ALTIVEC
- if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
- if (last_task_used_spe == current)
- last_task_used_spe = NULL;
-#endif
-#endif /* CONFIG_SMP */
+ discard_lazy_cpu_state();
}
void flush_thread(void)
@@ -457,20 +467,8 @@ void flush_thread(void)
if (t->flags & _TIF_ABI_PENDING)
t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
#endif
- kprobe_flush_task(current);
-#ifndef CONFIG_SMP
- if (last_task_used_math == current)
- last_task_used_math = NULL;
-#ifdef CONFIG_ALTIVEC
- if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
- if (last_task_used_spe == current)
- last_task_used_spe = NULL;
-#endif
-#endif /* CONFIG_SMP */
+ discard_lazy_cpu_state();
#ifdef CONFIG_PPC64 /* for now */
if (current->thread.dabr) {
@@ -636,18 +634,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
}
#endif
-#ifndef CONFIG_SMP
- if (last_task_used_math == current)
- last_task_used_math = NULL;
-#ifdef CONFIG_ALTIVEC
- if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
-#endif
-#ifdef CONFIG_SPE
- if (last_task_used_spe == current)
- last_task_used_spe = NULL;
-#endif
-#endif /* CONFIG_SMP */
+ discard_lazy_cpu_state();
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
current->thread.fpscr.val = 0;
#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 4ce0105c308e..bcdc209dca85 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -265,7 +265,7 @@ static int __init call_prom_ret(const char *service, int nargs, int nret,
va_end(list);
for (i = 0; i < nret; i++)
- rets[nargs+i] = 0;
+ args.args[nargs+i] = 0;
if (enter_prom(&args, RELOC(prom_entry)) < 0)
return PROM_ERROR;
diff --git a/arch/powerpc/kernel/ptrace-common.h b/arch/powerpc/kernel/ptrace-common.h
new file mode 100644
index 000000000000..b1babb729673
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace-common.h
@@ -0,0 +1,164 @@
+/*
+ * linux/arch/ppc64/kernel/ptrace-common.h
+ *
+ * Copyright (c) 2002 Stephen Rothwell, IBM Coproration
+ * Extracted from ptrace.c and ptrace32.c
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file README.legal in the main directory of
+ * this archive for more details.
+ */
+
+#ifndef _PPC64_PTRACE_COMMON_H
+#define _PPC64_PTRACE_COMMON_H
+
+#include <linux/config.h>
+#include <asm/system.h>
+
+/*
+ * Set of msr bits that gdb can change on behalf of a process.
+ */
+#define MSR_DEBUGCHANGE (MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1)
+
+/*
+ * Get contents of register REGNO in task TASK.
+ */
+static inline unsigned long get_reg(struct task_struct *task, int regno)
+{
+ unsigned long tmp = 0;
+
+ /*
+ * Put the correct FP bits in, they might be wrong as a result
+ * of our lazy FP restore.
+ */
+ if (regno == PT_MSR) {
+ tmp = ((unsigned long *)task->thread.regs)[PT_MSR];
+ tmp |= task->thread.fpexc_mode;
+ } else if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
+ tmp = ((unsigned long *)task->thread.regs)[regno];
+ }
+
+ return tmp;
+}
+
+/*
+ * Write contents of register REGNO in task TASK.
+ */
+static inline int put_reg(struct task_struct *task, int regno,
+ unsigned long data)
+{
+ if (regno < PT_SOFTE) {
+ if (regno == PT_MSR)
+ data = (data & MSR_DEBUGCHANGE)
+ | (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
+ ((unsigned long *)task->thread.regs)[regno] = data;
+ return 0;
+ }
+ return -EIO;
+}
+
+static inline void set_single_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+ if (regs != NULL)
+ regs->msr |= MSR_SE;
+ set_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
+}
+
+static inline void clear_single_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+ if (regs != NULL)
+ regs->msr &= ~MSR_SE;
+ clear_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
+}
+
+#ifdef CONFIG_ALTIVEC
+/*
+ * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
+ * The transfer totals 34 quadword. Quadwords 0-31 contain the
+ * corresponding vector registers. Quadword 32 contains the vscr as the
+ * last word (offset 12) within that quadword. Quadword 33 contains the
+ * vrsave as the first word (offset 0) within the quadword.
+ *
+ * This definition of the VMX state is compatible with the current PPC32
+ * ptrace interface. This allows signal handling and ptrace to use the
+ * same structures. This also simplifies the implementation of a bi-arch
+ * (combined (32- and 64-bit) gdb.
+ */
+
+/*
+ * Get contents of AltiVec register state in task TASK
+ */
+static inline int get_vrregs(unsigned long __user *data,
+ struct task_struct *task)
+{
+ unsigned long regsize;
+
+ /* copy AltiVec registers VR[0] .. VR[31] */
+ regsize = 32 * sizeof(vector128);
+ if (copy_to_user(data, task->thread.vr, regsize))
+ return -EFAULT;
+ data += (regsize / sizeof(unsigned long));
+
+ /* copy VSCR */
+ regsize = 1 * sizeof(vector128);
+ if (copy_to_user(data, &task->thread.vscr, regsize))
+ return -EFAULT;
+ data += (regsize / sizeof(unsigned long));
+
+ /* copy VRSAVE */
+ if (put_user(task->thread.vrsave, (u32 __user *)data))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Write contents of AltiVec register state into task TASK.
+ */
+static inline int set_vrregs(struct task_struct *task,
+ unsigned long __user *data)
+{
+ unsigned long regsize;
+
+ /* copy AltiVec registers VR[0] .. VR[31] */
+ regsize = 32 * sizeof(vector128);
+ if (copy_from_user(task->thread.vr, data, regsize))
+ return -EFAULT;
+ data += (regsize / sizeof(unsigned long));
+
+ /* copy VSCR */
+ regsize = 1 * sizeof(vector128);
+ if (copy_from_user(&task->thread.vscr, data, regsize))
+ return -EFAULT;
+ data += (regsize / sizeof(unsigned long));
+
+ /* copy VRSAVE */
+ if (get_user(task->thread.vrsave, (u32 __user *)data))
+ return -EFAULT;
+
+ return 0;
+}
+#endif
+
+static inline int ptrace_set_debugreg(struct task_struct *task,
+ unsigned long addr, unsigned long data)
+{
+ /* We only support one DABR and no IABRS at the moment */
+ if (addr > 0)
+ return -EINVAL;
+
+ /* The bottom 3 bits are flags */
+ if ((data & ~0x7UL) >= TASK_SIZE)
+ return -EIO;
+
+ /* Ensure translation is on */
+ if (data && !(data & DABR_TRANSLATION))
+ return -EIO;
+
+ task->thread.dabr = data;
+ return 0;
+}
+
+#endif /* _PPC64_PTRACE_COMMON_H */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 3d2abd95c7ae..400793c71304 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -36,8 +36,9 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
+
#ifdef CONFIG_PPC64
-#include <asm/ptrace-common.h>
+#include "ptrace-common.h"
#endif
#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 91eb952e0293..61762640b877 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -33,7 +33,8 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
-#include <asm/ptrace-common.h>
+
+#include "ptrace-common.h"
/*
* does not yet catch signals sent when the child dies.
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 608fee7c7e20..e3fb78397dc6 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -102,7 +102,15 @@ int boot_cpuid_phys = 0;
dev_t boot_dev;
u64 ppc64_pft_size;
-struct ppc64_caches ppc64_caches;
+/* Pick defaults since we might want to patch instructions
+ * before we've read this from the device tree.
+ */
+struct ppc64_caches ppc64_caches = {
+ .dline_size = 0x80,
+ .log_dline_size = 7,
+ .iline_size = 0x80,
+ .log_iline_size = 7
+};
EXPORT_SYMBOL_GPL(ppc64_caches);
/*
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index f72ced11212d..91b93d917b64 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -247,7 +247,7 @@ long ppc64_personality(unsigned long personality)
#define OVERRIDE_MACHINE 0
#endif
-static inline int override_machine(char *mach)
+static inline int override_machine(char __user *mach)
{
if (OVERRIDE_MACHINE) {
/* change ppc64 to ppc */
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 0d4d8bec0df4..f0c47dab0903 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -145,8 +145,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma)
struct page *pg = virt_to_page(vdso32_kbase +
i*PAGE_SIZE);
struct page *upg = (vma && vma->vm_mm) ?
- follow_page(vma->vm_mm, vma->vm_start +
- i*PAGE_SIZE, 0)
+ follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0)
: NULL;
dump_one_vdso_page(pg, upg);
}
@@ -157,8 +156,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma)
struct page *pg = virt_to_page(vdso64_kbase +
i*PAGE_SIZE);
struct page *upg = (vma && vma->vm_mm) ?
- follow_page(vma->vm_mm, vma->vm_start +
- i*PAGE_SIZE, 0)
+ follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0)
: NULL;
dump_one_vdso_page(pg, upg);
}
@@ -285,8 +283,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
* It's fine to use that for setting breakpoints in the vDSO code
* pages though
*/
- vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE |
- VM_MAYEXEC | VM_RESERVED;
+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
vma->vm_flags |= mm->def_flags;
vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
vma->vm_ops = &vdso_vmops;
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/4xx_mmu.c
index b7bcbc232f39..4d006aa1a0d1 100644
--- a/arch/powerpc/mm/4xx_mmu.c
+++ b/arch/powerpc/mm/4xx_mmu.c
@@ -110,13 +110,11 @@ unsigned long __init mmu_mapin_ram(void)
pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
- spin_lock(&init_mm.page_table_lock);
pmdp = pmd_offset(pgd_offset_k(v), v);
pmd_val(*pmdp++) = val;
pmd_val(*pmdp++) = val;
pmd_val(*pmdp++) = val;
pmd_val(*pmdp++) = val;
- spin_unlock(&init_mm.page_table_lock);
v += LARGE_PAGE_SIZE_16M;
p += LARGE_PAGE_SIZE_16M;
@@ -127,10 +125,8 @@ unsigned long __init mmu_mapin_ram(void)
pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
- spin_lock(&init_mm.page_table_lock);
pmdp = pmd_offset(pgd_offset_k(v), v);
pmd_val(*pmdp) = val;
- spin_unlock(&init_mm.page_table_lock);
v += LARGE_PAGE_SIZE_4M;
p += LARGE_PAGE_SIZE_4M;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 706e8a63ced9..a606504678bd 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -514,7 +514,7 @@ void __init htab_initialize(void)
#undef KB
#undef MB
-void __init htab_initialize_secondary(void)
+void htab_initialize_secondary(void)
{
if (!platform_is_lpar())
mtspr(SPRN_SDR1, _SDR1);
@@ -601,7 +601,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
/* Handle hugepage regions */
if (unlikely(in_hugepage_area(mm->context, ea))) {
DBG_LOW(" -> huge page !\n");
- return hash_huge_page(mm, access, ea, vsid, local);
+ return hash_huge_page(mm, access, ea, vsid, local, trap);
}
/* Get PTE and page size from page tables */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 426c269e552e..54131b877da3 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -148,43 +148,63 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
return 0;
}
+struct slb_flush_info {
+ struct mm_struct *mm;
+ u16 newareas;
+};
+
static void flush_low_segments(void *parm)
{
- u16 areas = (unsigned long) parm;
+ struct slb_flush_info *fi = parm;
unsigned long i;
- asm volatile("isync" : : : "memory");
+ BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
+
+ if (current->active_mm != fi->mm)
+ return;
+
+ /* Only need to do anything if this CPU is working in the same
+ * mm as the one which has changed */
- BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS);
+ /* update the paca copy of the context struct */
+ get_paca()->context = current->active_mm->context;
+ asm volatile("isync" : : : "memory");
for (i = 0; i < NUM_LOW_AREAS; i++) {
- if (! (areas & (1U << i)))
+ if (! (fi->newareas & (1U << i)))
continue;
asm volatile("slbie %0"
: : "r" ((i << SID_SHIFT) | SLBIE_C));
}
-
asm volatile("isync" : : : "memory");
}
static void flush_high_segments(void *parm)
{
- u16 areas = (unsigned long) parm;
+ struct slb_flush_info *fi = parm;
unsigned long i, j;
- asm volatile("isync" : : : "memory");
- BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS);
+ BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
+ if (current->active_mm != fi->mm)
+ return;
+
+ /* Only need to do anything if this CPU is working in the same
+ * mm as the one which has changed */
+
+ /* update the paca copy of the context struct */
+ get_paca()->context = current->active_mm->context;
+
+ asm volatile("isync" : : : "memory");
for (i = 0; i < NUM_HIGH_AREAS; i++) {
- if (! (areas & (1U << i)))
+ if (! (fi->newareas & (1U << i)))
continue;
for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
asm volatile("slbie %0"
:: "r" (((i << HTLB_AREA_SHIFT)
- + (j << SID_SHIFT)) | SLBIE_C));
+ + (j << SID_SHIFT)) | SLBIE_C));
}
-
asm volatile("isync" : : : "memory");
}
@@ -229,6 +249,7 @@ static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
{
unsigned long i;
+ struct slb_flush_info fi;
BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
@@ -244,19 +265,20 @@ static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
mm->context.low_htlb_areas |= newareas;
- /* update the paca copy of the context struct */
- get_paca()->context = mm->context;
-
/* the context change must make it to memory before the flush,
* so that further SLB misses do the right thing. */
mb();
- on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1);
+
+ fi.mm = mm;
+ fi.newareas = newareas;
+ on_each_cpu(flush_low_segments, &fi, 0, 1);
return 0;
}
static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
{
+ struct slb_flush_info fi;
unsigned long i;
BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
@@ -280,22 +302,25 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
/* the context change must make it to memory before the flush,
* so that further SLB misses do the right thing. */
mb();
- on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1);
+
+ fi.mm = mm;
+ fi.newareas = newareas;
+ on_each_cpu(flush_high_segments, &fi, 0, 1);
return 0;
}
int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
- int err;
+ int err = 0;
if ( (addr+len) < addr )
return -EINVAL;
- if ((addr + len) < 0x100000000UL)
+ if (addr < 0x100000000UL)
err = open_low_hpage_areas(current->mm,
LOW_ESID_MASK(addr, len));
- else
+ if ((addr + len) > 0x100000000UL)
err = open_high_hpage_areas(current->mm,
HTLB_AREA_MASK(addr, len));
if (err) {
@@ -639,8 +664,36 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return -ENOMEM;
}
+/*
+ * Called by asm hashtable.S for doing lazy icache flush
+ */
+static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
+ pte_t pte, int trap)
+{
+ struct page *page;
+ int i;
+
+ if (!pfn_valid(pte_pfn(pte)))
+ return rflags;
+
+ page = pte_page(pte);
+
+ /* page is dirty */
+ if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
+ if (trap == 0x400) {
+ for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
+ __flush_dcache_icache(page_address(page+i));
+ set_bit(PG_arch_1, &page->flags);
+ } else {
+ rflags |= HPTE_R_N;
+ }
+ }
+ return rflags;
+}
+
int hash_huge_page(struct mm_struct *mm, unsigned long access,
- unsigned long ea, unsigned long vsid, int local)
+ unsigned long ea, unsigned long vsid, int local,
+ unsigned long trap)
{
pte_t *ptep;
unsigned long old_pte, new_pte;
@@ -691,6 +744,11 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
rflags = 0x2 | (!(new_pte & _PAGE_RW));
/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
+ if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+ /* No CPU has hugepages but lacks no execute, so we
+ * don't need to worry about that case */
+ rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
+ trap);
/* Check if pte already has an hpte (case 2) */
if (unlikely(old_pte & _PAGE_HASHPTE)) {
@@ -703,7 +761,8 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (old_pte & _PAGE_F_GIX) >> 12;
- if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1)
+ if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
+ local) == -1)
old_pte &= ~_PAGE_HPTEFLAGS;
}
@@ -754,9 +813,7 @@ repeat:
}
/*
- * No need to use ldarx/stdcx here because all who
- * might be updating the pte will hold the
- * page_table_lock
+ * No need to use ldarx/stdcx here
*/
*ptep = __pte(new_pte & ~_PAGE_BUSY);
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
index f4ca29cf5364..f9587bcc6a48 100644
--- a/arch/powerpc/mm/imalloc.c
+++ b/arch/powerpc/mm/imalloc.c
@@ -14,9 +14,10 @@
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/semaphore.h>
-#include <asm/imalloc.h>
#include <asm/cacheflush.h>
+#include "mmu_decl.h"
+
static DECLARE_MUTEX(imlist_sem);
struct vm_struct * imlist = NULL;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 1134f70f231d..81cfb0c2ec58 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -64,7 +64,8 @@
#include <asm/iommu.h>
#include <asm/abs_addr.h>
#include <asm/vdso.h>
-#include <asm/imalloc.h>
+
+#include "mmu_decl.h"
#ifdef DEBUG
#define DBG(fmt...) printk(fmt)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 4bd7b0a70996..ed6ed2e30dac 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -495,7 +495,7 @@ EXPORT_SYMBOL(flush_icache_user_range);
* We use it to preload an HPTE into the hash table corresponding to
* the updated linux PTE.
*
- * This must always be called with the mm->page_table_lock held
+ * This must always be called with the pte lock held.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte)
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index a4d7a327c0e5..bea2d21ac6f7 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -33,7 +33,6 @@ extern void invalidate_tlbcam_entry(int index);
extern int __map_without_bats;
extern unsigned long ioremap_base;
-extern unsigned long ioremap_bot;
extern unsigned int rtas_data, rtas_size;
extern PTE *Hash, *Hash_end;
@@ -42,6 +41,7 @@ extern unsigned long Hash_size, Hash_mask;
extern unsigned int num_tlbcam_entries;
#endif
+extern unsigned long ioremap_bot;
extern unsigned long __max_low_memory;
extern unsigned long __initial_memory_limit;
extern unsigned long total_memory;
@@ -84,4 +84,16 @@ static inline void flush_HPTE(unsigned context, unsigned long va,
else
_tlbie(va);
}
+#else /* CONFIG_PPC64 */
+/* imalloc region types */
+#define IM_REGION_UNUSED 0x1
+#define IM_REGION_SUBSET 0x2
+#define IM_REGION_EXISTS 0x4
+#define IM_REGION_OVERLAP 0x8
+#define IM_REGION_SUPERSET 0x10
+
+extern struct vm_struct * im_get_free_area(unsigned long size);
+extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
+ int region_type);
+extern void im_free(void *addr);
#endif
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index f72cf87364cb..ba7a3055a9fc 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -125,7 +125,7 @@ void __init get_region(unsigned int nid, unsigned long *start_pfn,
/* We didnt find a matching region, return start/end as 0 */
if (*start_pfn == -1UL)
- start_pfn = 0;
+ *start_pfn = 0;
}
static inline void map_cpu_to_node(int cpu, int node)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index c7f7bb6f30b3..2ffca63602c5 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -64,7 +64,8 @@
#include <asm/iommu.h>
#include <asm/abs_addr.h>
#include <asm/vdso.h>
-#include <asm/imalloc.h>
+
+#include "mmu_decl.h"
unsigned long ioremap_bot = IMALLOC_BASE;
static unsigned long phbs_io_bot = PHBS_IO_BASE;
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index cfbb4e1f966b..51e7951414e5 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -288,11 +288,6 @@ void stab_initialize(unsigned long stab)
return;
}
#endif /* CONFIG_PPC_ISERIES */
-#ifdef CONFIG_PPC_PSERIES
- if (platform_is_lpar()) {
- plpar_hcall_norets(H_SET_ASR, stabreal);
- return;
- }
-#endif
+
mtspr(SPRN_ASR, stabreal);
}
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index 6c3dc3c44c86..ad580f3742e5 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -149,6 +149,12 @@ void flush_tlb_mm(struct mm_struct *mm)
return;
}
+ /*
+ * It is safe to go down the mm's list of vmas when called
+ * from dup_mmap, holding mmap_sem. It would also be safe from
+ * unmap_region or exit_mmap, but not from vmtruncate on SMP -
+ * but it seems dup_mmap is the only SMP case which gets here.
+ */
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
FINISH_FLUSH;
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index 53e31b834ace..859d29a0cac5 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -95,7 +95,7 @@ static void pte_free_submit(struct pte_freelist_batch *batch)
void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
{
- /* This is safe as we are holding page_table_lock */
+ /* This is safe since tlb_gather_mmu has disabled preemption */
cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
@@ -206,7 +206,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
void pte_free_finish(void)
{
- /* This is safe as we are holding page_table_lock */
+ /* This is safe since tlb_gather_mmu has disabled preemption */
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
if (*batchp == NULL)
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index bf081b345820..2b54eeb2c899 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -3,7 +3,7 @@
*
* Rewrite, cleanup:
*
- * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
*
* Dynamic DMA mapping support, iSeries-specific parts.
*
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index 0d7fa00fcb00..f6e22da2a5da 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -1650,11 +1650,19 @@ void pmac_tweak_clock_spreading(int enable)
*/
if (macio->type == macio_intrepid) {
- if (enable)
- UN_OUT(UNI_N_CLOCK_SPREADING, 2);
- else
- UN_OUT(UNI_N_CLOCK_SPREADING, 0);
- mdelay(40);
+ struct device_node *clock =
+ of_find_node_by_path("/uni-n@f8000000/hw-clock");
+ if (clock && get_property(clock, "platform-do-clockspreading",
+ NULL)) {
+ printk(KERN_INFO "%sabling clock spreading on Intrepid"
+ " ASIC\n", enable ? "En" : "Dis");
+ if (enable)
+ UN_OUT(UNI_N_CLOCK_SPREADING, 2);
+ else
+ UN_OUT(UNI_N_CLOCK_SPREADING, 0);
+ mdelay(40);
+ }
+ of_node_put(clock);
}
while (machine_is_compatible("PowerBook5,2") ||
@@ -1724,6 +1732,9 @@ void pmac_tweak_clock_spreading(int enable)
pmac_low_i2c_close(ui2c);
break;
}
+ printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n",
+ enable ? "En" : "Dis");
+
pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
DBG("write result: %d,", rc);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 957b09103422..fb2a7c798e82 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -34,6 +34,7 @@
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/cpu.h>
+#include <linux/compiler.h>
#include <asm/ptrace.h>
#include <asm/atomic.h>
@@ -631,8 +632,9 @@ void smp_core99_give_timebase(void)
mb();
/* wait for the secondary to have taken it */
- for (t = 100000; t > 0 && sec_tb_reset; --t)
- udelay(10);
+ /* note: can't use udelay here, since it needs the timebase running */
+ for (t = 10000000; t > 0 && sec_tb_reset; --t)
+ barrier();
if (sec_tb_reset)
/* XXX BUG_ON here? */
printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 97ba5214417f..2043659ea7b1 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -5,7 +5,7 @@
*
* Rewrite, cleanup:
*
- * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
*
* Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
*
@@ -109,6 +109,9 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
u64 rc;
union tce_entry tce;
+ tcenum <<= TCE_PAGE_FACTOR;
+ npages <<= TCE_PAGE_FACTOR;
+
tce.te_word = 0;
tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
tce.te_rdwr = 1;
@@ -143,10 +146,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
union tce_entry tce, *tcep;
long l, limit;
- tcenum <<= TCE_PAGE_FACTOR;
- npages <<= TCE_PAGE_FACTOR;
-
- if (npages == 1)
+ if (TCE_PAGE_FACTOR == 0 && npages == 1)
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction);
@@ -164,6 +164,9 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
__get_cpu_var(tce_page) = tcep;
}
+ tcenum <<= TCE_PAGE_FACTOR;
+ npages <<= TCE_PAGE_FACTOR;
+
tce.te_word = 0;
tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
tce.te_rdwr = 1;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index a50e5f3f396d..cf1bc11b3346 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -298,18 +298,6 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
if (!(vflags & HPTE_V_BOLTED))
DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
-#if 1
- {
- int i;
- for (i=0;i<8;i++) {
- unsigned long w0, w1;
- plpar_pte_read(0, hpte_group, &w0, &w1);
- BUG_ON (HPTE_V_COMPARE(hpte_v, w0)
- && (w0 & HPTE_V_VALID));
- }
- }
-#endif
-
/* Now fill in the actual HPTE */
/* Set CEC cookie to 0 */
/* Zero page = 0 */
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 72ac18067ece..0377decc0719 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -48,11 +48,6 @@ static struct hw_interrupt_type xics_pic = {
.set_affinity = xics_set_affinity
};
-static struct hw_interrupt_type xics_8259_pic = {
- .typename = " XICS/8259",
- .ack = xics_mask_and_ack_irq,
-};
-
/* This is used to map real irq numbers to virtual */
static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
@@ -367,12 +362,7 @@ int xics_get_irq(struct pt_regs *regs)
/* for sanity, this had better be < NR_IRQS - 16 */
if (vec == xics_irq_8259_cascade_real) {
irq = i8259_irq(regs);
- if (irq == -1) {
- /* Spurious cascaded interrupt. Still must ack xics */
- xics_end_irq(irq_offset_up(xics_irq_8259_cascade));
-
- irq = -1;
- }
+ xics_end_irq(irq_offset_up(xics_irq_8259_cascade));
} else if (vec == XICS_IRQ_SPURIOUS) {
irq = -1;
} else {
@@ -542,6 +532,7 @@ nextnode:
xics_irq_8259_cascade_real = *ireg;
xics_irq_8259_cascade
= virt_irq_create_mapping(xics_irq_8259_cascade_real);
+ i8259_init(0, 0);
of_node_put(np);
}
@@ -565,12 +556,7 @@ nextnode:
#endif /* CONFIG_SMP */
}
- xics_8259_pic.enable = i8259_pic.enable;
- xics_8259_pic.disable = i8259_pic.disable;
- xics_8259_pic.end = i8259_pic.end;
- for (i = 0; i < 16; ++i)
- get_irq_desc(i)->handler = &xics_8259_pic;
- for (; i < NR_IRQS; ++i)
+ for (i = irq_offset_value(); i < NR_IRQS; ++i)
get_irq_desc(i)->handler = &xics_pic;
xics_setup_cpu();
@@ -590,7 +576,6 @@ static int __init xics_setup_i8259(void)
no_action, 0, "8259 cascade", NULL))
printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 "
"cascade\n");
- i8259_init(0, 0);
}
return 0;
}
diff --git a/arch/powerpc/sysdev/dart.h b/arch/powerpc/sysdev/dart.h
index ea8f0d9eed8a..33ed9ed7fc1e 100644
--- a/arch/powerpc/sysdev/dart.h
+++ b/arch/powerpc/sysdev/dart.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 105f05341a41..58d1cc2023c8 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -361,7 +361,8 @@ static void mpic_enable_irq(unsigned int irq)
DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
- mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & ~MPIC_VECPRI_MASK);
+ mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) &
+ ~MPIC_VECPRI_MASK);
/* make sure mask gets to controller before we return to user */
do {
@@ -381,7 +382,8 @@ static void mpic_disable_irq(unsigned int irq)
DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
- mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) | MPIC_VECPRI_MASK);
+ mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) |
+ MPIC_VECPRI_MASK);
/* make sure mask gets to controller before we return to user */
do {
@@ -735,12 +737,13 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
spin_lock_irqsave(&mpic_lock, flags);
if (is_ipi) {
- reg = mpic_ipi_read(irq - mpic->ipi_offset) & MPIC_VECPRI_PRIORITY_MASK;
+ reg = mpic_ipi_read(irq - mpic->ipi_offset) &
+ ~MPIC_VECPRI_PRIORITY_MASK;
mpic_ipi_write(irq - mpic->ipi_offset,
reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
} else {
- reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI)
- & MPIC_VECPRI_PRIORITY_MASK;
+ reg = mpic_irq_read(irq - mpic->irq_offset,MPIC_IRQ_VECTOR_PRI)
+ & ~MPIC_VECPRI_PRIORITY_MASK;
mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI,
reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
}
diff --git a/arch/powerpc/sysdev/u3_iommu.c b/arch/powerpc/sysdev/u3_iommu.c
index f32baf7f4693..5c1a26a6d00c 100644
--- a/arch/powerpc/sysdev/u3_iommu.c
+++ b/arch/powerpc/sysdev/u3_iommu.c
@@ -1,11 +1,11 @@
/*
* arch/powerpc/sysdev/u3_iommu.c
*
- * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
*
* Based on pSeries_iommu.c:
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
- * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
*
* Dynamic DMA mapping support, Apple U3 & IBM CPC925 "DART" iommu.
*