summaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile4
-rw-r--r--arch/arm/kernel/apm.c189
-rw-r--r--arch/arm/kernel/asm-offsets.c1
-rw-r--r--arch/arm/kernel/calls.S37
-rw-r--r--arch/arm/kernel/ecard.c12
-rw-r--r--arch/arm/kernel/entry-armv.S21
-rw-r--r--arch/arm/kernel/head-nommu.S1
-rw-r--r--arch/arm/kernel/head.S20
-rw-r--r--arch/arm/kernel/irq.c8
-rw-r--r--arch/arm/kernel/iwmmxt-notifier.c63
-rw-r--r--arch/arm/kernel/process.c61
-rw-r--r--arch/arm/kernel/setup.c18
-rw-r--r--arch/arm/kernel/signal.c2
-rw-r--r--arch/arm/kernel/sys_arm.c13
-rw-r--r--arch/arm/kernel/time.c13
-rw-r--r--arch/arm/kernel/traps.c8
-rw-r--r--arch/arm/kernel/xscale-cp0.c179
17 files changed, 413 insertions, 237 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 1320a0efca73..ab06a86e85d5 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -24,7 +24,9 @@ obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
-obj-$(CONFIG_IWMMXT) += iwmmxt.o iwmmxt-notifier.o
+obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
+obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
+obj-$(CONFIG_IWMMXT) += iwmmxt.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
ifneq ($(CONFIG_ARCH_EBSA110),y)
diff --git a/arch/arm/kernel/apm.c b/arch/arm/kernel/apm.c
index ecf4f9472d94..2c37b70b17ab 100644
--- a/arch/arm/kernel/apm.c
+++ b/arch/arm/kernel/apm.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
#include <linux/poll.h>
-#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/miscdevice.h>
@@ -26,6 +25,7 @@
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/kthread.h>
+#include <linux/delay.h>
#include <asm/apm.h> /* apm_power_info */
#include <asm/system.h>
@@ -71,7 +71,8 @@ struct apm_user {
#define SUSPEND_PENDING 1 /* suspend pending read */
#define SUSPEND_READ 2 /* suspend read, pending ack */
#define SUSPEND_ACKED 3 /* suspend acked */
-#define SUSPEND_DONE 4 /* suspend completed */
+#define SUSPEND_WAIT 4 /* waiting for suspend */
+#define SUSPEND_DONE 5 /* suspend completed */
struct apm_queue queue;
};
@@ -101,6 +102,7 @@ static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
static DEFINE_SPINLOCK(kapmd_queue_lock);
static struct apm_queue kapmd_queue;
+static DEFINE_MUTEX(state_lock);
static const char driver_version[] = "1.13"; /* no spaces */
@@ -148,38 +150,60 @@ static void queue_add_event(struct apm_queue *q, apm_event_t event)
q->events[q->event_head] = event;
}
-static void queue_event_one_user(struct apm_user *as, apm_event_t event)
+static void queue_event(apm_event_t event)
{
- if (as->suser && as->writer) {
- switch (event) {
- case APM_SYS_SUSPEND:
- case APM_USER_SUSPEND:
- /*
- * If this user already has a suspend pending,
- * don't queue another one.
- */
- if (as->suspend_state != SUSPEND_NONE)
- return;
+ struct apm_user *as;
- as->suspend_state = SUSPEND_PENDING;
- suspends_pending++;
- break;
- }
+ down_read(&user_list_lock);
+ list_for_each_entry(as, &apm_user_list, list) {
+ if (as->reader)
+ queue_add_event(&as->queue, event);
}
- queue_add_event(&as->queue, event);
+ up_read(&user_list_lock);
+ wake_up_interruptible(&apm_waitqueue);
}
-static void queue_event(apm_event_t event, struct apm_user *sender)
+/*
+ * queue_suspend_event - queue an APM suspend event.
+ *
+ * Check that we're in a state where we can suspend. If not,
+ * return -EBUSY. Otherwise, queue an event to all "writer"
+ * users. If there are no "writer" users, return '1' to
+ * indicate that we can immediately suspend.
+ */
+static int queue_suspend_event(apm_event_t event, struct apm_user *sender)
{
struct apm_user *as;
+ int ret = 1;
+ mutex_lock(&state_lock);
down_read(&user_list_lock);
+
+ /*
+ * If a thread is still processing, we can't suspend, so reject
+ * the request.
+ */
list_for_each_entry(as, &apm_user_list, list) {
- if (as != sender && as->reader)
- queue_event_one_user(as, event);
+ if (as != sender && as->reader && as->writer && as->suser &&
+ as->suspend_state != SUSPEND_NONE) {
+ ret = -EBUSY;
+ goto out;
+ }
}
+
+ list_for_each_entry(as, &apm_user_list, list) {
+ if (as != sender && as->reader && as->writer && as->suser) {
+ as->suspend_state = SUSPEND_PENDING;
+ suspends_pending++;
+ queue_add_event(&as->queue, event);
+ ret = 0;
+ }
+ }
+ out:
up_read(&user_list_lock);
+ mutex_unlock(&state_lock);
wake_up_interruptible(&apm_waitqueue);
+ return ret;
}
static void apm_suspend(void)
@@ -191,17 +215,22 @@ static void apm_suspend(void)
* Anyone on the APM queues will think we're still suspended.
* Send a message so everyone knows we're now awake again.
*/
- queue_event(APM_NORMAL_RESUME, NULL);
+ queue_event(APM_NORMAL_RESUME);
/*
* Finally, wake up anyone who is sleeping on the suspend.
*/
+ mutex_lock(&state_lock);
down_read(&user_list_lock);
list_for_each_entry(as, &apm_user_list, list) {
- as->suspend_result = err;
- as->suspend_state = SUSPEND_DONE;
+ if (as->suspend_state == SUSPEND_WAIT ||
+ as->suspend_state == SUSPEND_ACKED) {
+ as->suspend_result = err;
+ as->suspend_state = SUSPEND_DONE;
+ }
}
up_read(&user_list_lock);
+ mutex_unlock(&state_lock);
wake_up(&apm_suspend_waitqueue);
}
@@ -227,8 +256,11 @@ static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t
if (copy_to_user(buf, &event, sizeof(event)))
break;
- if (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)
+ mutex_lock(&state_lock);
+ if (as->suspend_state == SUSPEND_PENDING &&
+ (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND))
as->suspend_state = SUSPEND_READ;
+ mutex_unlock(&state_lock);
buf += sizeof(event);
i -= sizeof(event);
@@ -270,9 +302,13 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
switch (cmd) {
case APM_IOC_SUSPEND:
+ mutex_lock(&state_lock);
+
as->suspend_result = -EINTR;
if (as->suspend_state == SUSPEND_READ) {
+ int pending;
+
/*
* If we read a suspend command from /dev/apm_bios,
* then the corresponding APM_IOC_SUSPEND ioctl is
@@ -280,47 +316,73 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
*/
as->suspend_state = SUSPEND_ACKED;
suspends_pending--;
+ pending = suspends_pending == 0;
+ mutex_unlock(&state_lock);
+
+ /*
+ * If there are no further acknowledges required,
+ * suspend the system.
+ */
+ if (pending)
+ apm_suspend();
+
+ /*
+ * Wait for the suspend/resume to complete. If there
+ * are pending acknowledges, we wait here for them.
+ *
+ * Note: we need to ensure that the PM subsystem does
+ * not kick us out of the wait when it suspends the
+ * threads.
+ */
+ flags = current->flags;
+ current->flags |= PF_NOFREEZE;
+
+ wait_event(apm_suspend_waitqueue,
+ as->suspend_state == SUSPEND_DONE);
} else {
+ as->suspend_state = SUSPEND_WAIT;
+ mutex_unlock(&state_lock);
+
/*
* Otherwise it is a request to suspend the system.
* Queue an event for all readers, and expect an
* acknowledge from all writers who haven't already
* acknowledged.
*/
- queue_event(APM_USER_SUSPEND, as);
- }
-
- /*
- * If there are no further acknowledges required, suspend
- * the system.
- */
- if (suspends_pending == 0)
- apm_suspend();
+ err = queue_suspend_event(APM_USER_SUSPEND, as);
+ if (err < 0) {
+ /*
+ * Avoid taking the lock here - this
+ * should be fine.
+ */
+ as->suspend_state = SUSPEND_NONE;
+ break;
+ }
+
+ if (err > 0)
+ apm_suspend();
- /*
- * Wait for the suspend/resume to complete. If there are
- * pending acknowledges, we wait here for them.
- *
- * Note that we need to ensure that the PM subsystem does
- * not kick us out of the wait when it suspends the threads.
- */
- flags = current->flags;
- current->flags |= PF_NOFREEZE;
+ /*
+ * Wait for the suspend/resume to complete. If there
+ * are pending acknowledges, we wait here for them.
+ *
+ * Note: we need to ensure that the PM subsystem does
+ * not kick us out of the wait when it suspends the
+ * threads.
+ */
+ flags = current->flags;
+ current->flags |= PF_NOFREEZE;
- /*
- * Note: do not allow a thread which is acking the suspend
- * to escape until the resume is complete.
- */
- if (as->suspend_state == SUSPEND_ACKED)
- wait_event(apm_suspend_waitqueue,
- as->suspend_state == SUSPEND_DONE);
- else
wait_event_interruptible(apm_suspend_waitqueue,
as->suspend_state == SUSPEND_DONE);
+ }
current->flags = flags;
+
+ mutex_lock(&state_lock);
err = as->suspend_result;
as->suspend_state = SUSPEND_NONE;
+ mutex_unlock(&state_lock);
break;
}
@@ -330,6 +392,8 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
static int apm_release(struct inode * inode, struct file * filp)
{
struct apm_user *as = filp->private_data;
+ int pending = 0;
+
filp->private_data = NULL;
down_write(&user_list_lock);
@@ -342,11 +406,14 @@ static int apm_release(struct inode * inode, struct file * filp)
* need to balance suspends_pending, which means the
* possibility of sleeping.
*/
+ mutex_lock(&state_lock);
if (as->suspend_state != SUSPEND_NONE) {
suspends_pending -= 1;
- if (suspends_pending == 0)
- apm_suspend();
+ pending = suspends_pending == 0;
}
+ mutex_unlock(&state_lock);
+ if (pending)
+ apm_suspend();
kfree(as);
return 0;
@@ -356,7 +423,7 @@ static int apm_open(struct inode * inode, struct file * filp)
{
struct apm_user *as;
- as = (struct apm_user *)kzalloc(sizeof(*as), GFP_KERNEL);
+ as = kzalloc(sizeof(*as), GFP_KERNEL);
if (as) {
/*
* XXX - this is a tiny bit broken, when we consider BSD
@@ -470,6 +537,7 @@ static int kapmd(void *arg)
{
do {
apm_event_t event;
+ int ret;
wait_event_interruptible(kapmd_wait,
!queue_empty(&kapmd_queue) || kthread_should_stop());
@@ -489,13 +557,20 @@ static int kapmd(void *arg)
case APM_LOW_BATTERY:
case APM_POWER_STATUS_CHANGE:
- queue_event(event, NULL);
+ queue_event(event);
break;
case APM_USER_SUSPEND:
case APM_SYS_SUSPEND:
- queue_event(event, NULL);
- if (suspends_pending == 0)
+ ret = queue_suspend_event(event, NULL);
+ if (ret < 0) {
+ /*
+ * We were busy. Try again in 50ms.
+ */
+ queue_add_event(&kapmd_queue, event);
+ msleep(50);
+ }
+ if (ret > 0)
apm_suspend();
break;
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index cc2d58d028e1..3c078e346753 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -15,6 +15,7 @@
#include <asm/mach/arch.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
+#include <asm/procinfo.h>
/*
* Make sure that the compiler and target are compatible.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 3173924a9b60..f7598cbc7ec5 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -110,7 +110,7 @@
CALL(sys_ni_syscall) /* was sys_profil */
CALL(sys_statfs)
/* 100 */ CALL(sys_fstatfs)
- CALL(sys_ni_syscall)
+ CALL(sys_ni_syscall) /* sys_ioperm */
CALL(OBSOLETE(ABI(sys_socketcall, sys_oabi_socketcall)))
CALL(sys_syslog)
CALL(sys_setitimer)
@@ -132,7 +132,7 @@
/* 120 */ CALL(sys_clone_wrapper)
CALL(sys_setdomainname)
CALL(sys_newuname)
- CALL(sys_ni_syscall)
+ CALL(sys_ni_syscall) /* modify_ldt */
CALL(sys_adjtimex)
/* 125 */ CALL(sys_mprotect)
CALL(sys_sigprocmask)
@@ -146,7 +146,7 @@
CALL(sys_bdflush)
/* 135 */ CALL(sys_sysfs)
CALL(sys_personality)
- CALL(sys_ni_syscall) /* CALL(_sys_afs_syscall) */
+ CALL(sys_ni_syscall) /* reserved for afs_syscall */
CALL(sys_setfsuid16)
CALL(sys_setfsgid16)
/* 140 */ CALL(sys_llseek)
@@ -175,7 +175,7 @@
CALL(sys_arm_mremap)
CALL(sys_setresuid16)
/* 165 */ CALL(sys_getresuid16)
- CALL(sys_ni_syscall)
+ CALL(sys_ni_syscall) /* vm86 */
CALL(sys_ni_syscall) /* was sys_query_module */
CALL(sys_poll)
CALL(sys_nfsservctl)
@@ -197,8 +197,8 @@
/* 185 */ CALL(sys_capset)
CALL(sys_sigaltstack_wrapper)
CALL(sys_sendfile)
- CALL(sys_ni_syscall)
- CALL(sys_ni_syscall)
+ CALL(sys_ni_syscall) /* getpmsg */
+ CALL(sys_ni_syscall) /* putpmsg */
/* 190 */ CALL(sys_vfork_wrapper)
CALL(sys_getrlimit)
CALL(sys_mmap2)
@@ -331,6 +331,31 @@
CALL(sys_mbind)
/* 320 */ CALL(sys_get_mempolicy)
CALL(sys_set_mempolicy)
+ CALL(sys_openat)
+ CALL(sys_mkdirat)
+ CALL(sys_mknodat)
+/* 325 */ CALL(sys_fchownat)
+ CALL(sys_futimesat)
+ CALL(sys_fstatat64)
+ CALL(sys_unlinkat)
+ CALL(sys_renameat)
+/* 330 */ CALL(sys_linkat)
+ CALL(sys_symlinkat)
+ CALL(sys_readlinkat)
+ CALL(sys_fchmodat)
+ CALL(sys_faccessat)
+/* 335 */ CALL(sys_ni_syscall) /* eventually pselect6 */
+ CALL(sys_ni_syscall) /* eventually ppoll */
+ CALL(sys_unshare)
+ CALL(sys_set_robust_list)
+ CALL(sys_get_robust_list)
+/* 340 */ CALL(sys_splice)
+ CALL(sys_arm_sync_file_range)
+ CALL(sys_tee)
+ CALL(sys_vmsplice)
+ CALL(sys_move_pages)
+/* 345 */ CALL(sys_getcpu)
+ CALL(sys_ni_syscall) /* eventually epoll_pwait */
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index b27513a0f11e..71257e3d513f 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -353,7 +353,7 @@ int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num)
}
if (c_id(&excd) == 0x80) { /* loader */
if (!ec->loader) {
- ec->loader = (loader_t)kmalloc(c_len(&excd),
+ ec->loader = kmalloc(c_len(&excd),
GFP_KERNEL);
if (ec->loader)
ecard_readbytes(ec->loader, ec,
@@ -529,7 +529,7 @@ static void ecard_dump_irq_state(void)
}
}
-static void ecard_check_lockup(struct irqdesc *desc)
+static void ecard_check_lockup(struct irq_desc *desc)
{
static unsigned long last;
static int lockup;
@@ -567,7 +567,7 @@ static void ecard_check_lockup(struct irqdesc *desc)
}
static void
-ecard_irq_handler(unsigned int irq, struct irqdesc *desc)
+ecard_irq_handler(unsigned int irq, struct irq_desc *desc)
{
ecard_t *ec;
int called = 0;
@@ -585,7 +585,7 @@ ecard_irq_handler(unsigned int irq, struct irqdesc *desc)
pending = ecard_default_ops.irqpending(ec);
if (pending) {
- struct irqdesc *d = irq_desc + ec->irq;
+ struct irq_desc *d = irq_desc + ec->irq;
desc_handle_irq(ec->irq, d);
called ++;
}
@@ -609,7 +609,7 @@ static unsigned char first_set[] =
};
static void
-ecard_irqexp_handler(unsigned int irq, struct irqdesc *desc)
+ecard_irqexp_handler(unsigned int irq, struct irq_desc *desc)
{
const unsigned int statusmask = 15;
unsigned int status;
@@ -1022,7 +1022,7 @@ ecard_probe(int slot, card_type_t type)
if (slot < 8) {
ec->irq = 32 + slot;
set_irq_chip(ec->irq, &ecard_chip);
- set_irq_handler(ec->irq, do_level_IRQ);
+ set_irq_handler(ec->irq, handle_level_irq);
set_irq_flags(ec->irq, IRQF_VALID);
}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index bd623b73445f..8517c3c3eb33 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -436,7 +436,7 @@ __und_usr:
usr_entry
tst r3, #PSR_T_BIT @ Thumb mode?
- bne fpundefinstr @ ignore FP
+ bne __und_usr_unknown @ ignore FP
sub r4, r2, #4
@
@@ -448,7 +448,7 @@ __und_usr:
@
1: ldrt r0, [r4]
adr r9, ret_from_exception
- adr lr, fpundefinstr
+ adr lr, __und_usr_unknown
@
@ fallthrough to call_fpe
@
@@ -476,7 +476,9 @@ __und_usr:
* Emulators may wish to make use of the following registers:
* r0 = instruction opcode.
* r2 = PC+4
+ * r9 = normal "successful" return address
* r10 = this threads thread_info structure.
+ * lr = unrecognised instruction return address
*/
call_fpe:
tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
@@ -545,10 +547,12 @@ do_fpe:
.data
ENTRY(fp_enter)
- .word fpundefinstr
+ .word no_fp
.text
-fpundefinstr:
+no_fp: mov pc, lr
+
+__und_usr_unknown:
mov r0, sp
adr lr, ret_from_exception
b do_undefinstr
@@ -589,10 +593,6 @@ ENTRY(__switch_to)
strex r5, r4, [ip] @ Clear exclusive monitor
#endif
#endif
-#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
- mra r4, r5, acc0
- stmia ip, {r4, r5}
-#endif
#if defined(CONFIG_HAS_TLS_REG)
mcr p15, 0, r3, c13, c0, 3 @ set TLS register
#elif !defined(CONFIG_TLS_REG_EMUL)
@@ -602,11 +602,6 @@ ENTRY(__switch_to)
#ifdef CONFIG_MMU
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#endif
-#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
- add r4, r2, #TI_CPU_DOMAIN + 40 @ cpu_context_save->extra
- ldmib r4, {r4, r5}
- mar acc0, r4, r5
-#endif
mov r5, r0
add r4, r2, #TI_CPU_SAVE
ldr r0, =thread_notify_head
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index f359a189dcf2..0119c0d5f978 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -16,7 +16,6 @@
#include <asm/assembler.h>
#include <asm/mach-types.h>
-#include <asm/procinfo.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index ebc3e74a7947..d994561816a1 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -16,37 +16,37 @@
#include <asm/assembler.h>
#include <asm/domain.h>
-#include <asm/procinfo.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/system.h>
-#define KERNEL_RAM_ADDR (PAGE_OFFSET + TEXT_OFFSET)
+#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
+#define KERNEL_RAM_PADDR (PHYS_OFFSET + TEXT_OFFSET)
/*
* swapper_pg_dir is the virtual address of the initial page table.
- * We place the page tables 16K below KERNEL_RAM_ADDR. Therefore, we must
- * make sure that KERNEL_RAM_ADDR is correctly set. Currently, we expect
+ * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must
+ * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect
* the least significant 16 bits to be 0x8000, but we could probably
- * relax this restriction to KERNEL_RAM_ADDR >= PAGE_OFFSET + 0x4000.
+ * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
*/
-#if (KERNEL_RAM_ADDR & 0xffff) != 0x8000
-#error KERNEL_RAM_ADDR must start at 0xXXXX8000
+#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
+#error KERNEL_RAM_VADDR must start at 0xXXXX8000
#endif
.globl swapper_pg_dir
- .equ swapper_pg_dir, KERNEL_RAM_ADDR - 0x4000
+ .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000
.macro pgtbl, rd
- ldr \rd, =(__virt_to_phys(KERNEL_RAM_ADDR - 0x4000))
+ ldr \rd, =(KERNEL_RAM_PADDR - 0x4000)
.endm
#ifdef CONFIG_XIP_KERNEL
#define TEXTADDR XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
#else
-#define TEXTADDR KERNEL_RAM_ADDR
+#define TEXTADDR KERNEL_RAM_VADDR
#endif
/*
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 2c4ff1cbe334..ec01f08f5642 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -112,7 +112,7 @@ static struct irq_desc bad_irq_desc = {
asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- struct irqdesc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_desc + irq;
/*
* Some hardware gives randomly wrong interrupts. Rather
@@ -134,7 +134,7 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
void set_irq_flags(unsigned int irq, unsigned int iflags)
{
- struct irqdesc *desc;
+ struct irq_desc *desc;
unsigned long flags;
if (irq >= NR_IRQS) {
@@ -171,7 +171,7 @@ void __init init_IRQ(void)
#ifdef CONFIG_HOTPLUG_CPU
-static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu)
+static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
{
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
@@ -190,7 +190,7 @@ void migrate_irqs(void)
unsigned int i, cpu = smp_processor_id();
for (i = 0; i < NR_IRQS; i++) {
- struct irqdesc *desc = irq_desc + i;
+ struct irq_desc *desc = irq_desc + i;
if (desc->cpu == cpu) {
unsigned int newcpu = any_online_cpu(desc->affinity);
diff --git a/arch/arm/kernel/iwmmxt-notifier.c b/arch/arm/kernel/iwmmxt-notifier.c
deleted file mode 100644
index 0d1a1db40062..000000000000
--- a/arch/arm/kernel/iwmmxt-notifier.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * linux/arch/arm/kernel/iwmmxt-notifier.c
- *
- * XScale iWMMXt (Concan) context switching and handling
- *
- * Initial code:
- * Copyright (c) 2003, Intel Corporation
- *
- * Full lazy switching support, optimizations and more, by Nicolas Pitre
- * Copyright (c) 2003-2004, MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <asm/thread_notify.h>
-#include <asm/io.h>
-
-static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
-{
- struct thread_info *thread = t;
-
- switch (cmd) {
- case THREAD_NOTIFY_FLUSH:
- /*
- * flush_thread() zeroes thread->fpstate, so no need
- * to do anything here.
- *
- * FALLTHROUGH: Ensure we don't try to overwrite our newly
- * initialised state information on the first fault.
- */
-
- case THREAD_NOTIFY_RELEASE:
- iwmmxt_task_release(thread);
- break;
-
- case THREAD_NOTIFY_SWITCH:
- iwmmxt_task_switch(thread);
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block iwmmxt_notifier_block = {
- .notifier_call = iwmmxt_do,
-};
-
-static int __init iwmmxt_init(void)
-{
- thread_register_notifier(&iwmmxt_notifier_block);
-
- return 0;
-}
-
-late_initcall(iwmmxt_init);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index bf35c178a877..a9e8f7e55fd6 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -281,67 +281,6 @@ void show_fpregs(struct user_fp *regs)
}
/*
- * Task structure and kernel stack allocation.
- */
-struct thread_info_list {
- unsigned long *head;
- unsigned int nr;
-};
-
-static DEFINE_PER_CPU(struct thread_info_list, thread_info_list) = { NULL, 0 };
-
-#define EXTRA_TASK_STRUCT 4
-
-struct thread_info *alloc_thread_info(struct task_struct *task)
-{
- struct thread_info *thread = NULL;
-
- if (EXTRA_TASK_STRUCT) {
- struct thread_info_list *th = &get_cpu_var(thread_info_list);
- unsigned long *p = th->head;
-
- if (p) {
- th->head = (unsigned long *)p[0];
- th->nr -= 1;
- }
- put_cpu_var(thread_info_list);
-
- thread = (struct thread_info *)p;
- }
-
- if (!thread)
- thread = (struct thread_info *)
- __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
-
-#ifdef CONFIG_DEBUG_STACK_USAGE
- /*
- * The stack must be cleared if you want SYSRQ-T to
- * give sensible stack usage information
- */
- if (thread)
- memzero(thread, THREAD_SIZE);
-#endif
- return thread;
-}
-
-void free_thread_info(struct thread_info *thread)
-{
- if (EXTRA_TASK_STRUCT) {
- struct thread_info_list *th = &get_cpu_var(thread_info_list);
- if (th->nr < EXTRA_TASK_STRUCT) {
- unsigned long *p = (unsigned long *)thread;
- p[0] = (unsigned long)th->head;
- th->head = p;
- th->nr += 1;
- put_cpu_var(thread_info_list);
- return;
- }
- put_cpu_var(thread_info_list);
- }
- free_pages((unsigned long)thread, THREAD_SIZE_ORDER);
-}
-
-/*
* Free current thread data structures etc..
*/
void exit_thread(void)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 29efc9f82057..bbab134cd82d 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -354,12 +354,6 @@ static void __init setup_processor(void)
#ifndef CONFIG_ARM_THUMB
elf_hwcap &= ~HWCAP_THUMB;
#endif
-#ifndef CONFIG_VFP
- elf_hwcap &= ~HWCAP_VFP;
-#endif
-#ifndef CONFIG_IWMMXT
- elf_hwcap &= ~HWCAP_IWMMXT;
-#endif
cpu_proc_init();
}
@@ -441,16 +435,19 @@ __early_param("initrd=", early_initrd);
static void __init arm_add_memory(unsigned long start, unsigned long size)
{
+ struct membank *bank;
+
/*
* Ensure that start/size are aligned to a page boundary.
* Size is appropriately rounded down, start is rounded up.
*/
size -= start & ~PAGE_MASK;
- meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start);
- meminfo.bank[meminfo.nr_banks].size = size & PAGE_MASK;
- meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start);
- meminfo.nr_banks += 1;
+ bank = &meminfo.bank[meminfo.nr_banks++];
+
+ bank->start = PAGE_ALIGN(start);
+ bank->size = size & PAGE_MASK;
+ bank->node = PHYS_TO_NID(start);
}
/*
@@ -858,6 +855,7 @@ static const char *hwcap_str[] = {
"edsp",
"java",
"iwmmxt",
+ "crunch",
NULL
};
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 48cf7fffddf2..3843d3bab2dd 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -11,7 +11,9 @@
#include <linux/signal.h>
#include <linux/ptrace.h>
#include <linux/personality.h>
+#include <linux/freezer.h>
+#include <asm/elf.h>
#include <asm/cacheflush.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index 00c18d35913c..3d4fcbc16276 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -328,3 +328,16 @@ asmlinkage long sys_arm_fadvise64_64(int fd, int advice,
{
return sys_fadvise64_64(fd, offset, len, advice);
}
+
+/*
+ * Yet more syscall fsckage - we can't fit sys_sync_file_range's
+ * arguments into the available registers with EABI. So, let's
+ * create an ARM specific syscall for this which has _sane_
+ * arguments. (This incidentally also has an ABI-independent
+ * argument layout.)
+ */
+asmlinkage long sys_arm_sync_file_range(int fd, unsigned int flags,
+ loff_t offset, loff_t nbytes)
+{
+ return sys_sync_file_range(fd, offset, nbytes, flags);
+}
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 6ff5e3ff6cb5..3c8cdcfe8d4a 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -29,6 +29,8 @@
#include <linux/timer.h>
#include <linux/irq.h>
+#include <linux/mc146818rtc.h>
+
#include <asm/leds.h>
#include <asm/thread_info.h>
#include <asm/mach/time.h>
@@ -85,6 +87,17 @@ unsigned long long __attribute__((weak)) sched_clock(void)
return (unsigned long long)jiffies * (1000000000 / HZ);
}
+/*
+ * An implementation of printk_clock() independent from
+ * sched_clock(). This avoids non-bootable kernels when
+ * printk_clock is enabled.
+ */
+unsigned long long printk_clock(void)
+{
+ return (unsigned long long)(jiffies - INITIAL_JIFFIES) *
+ (1000000000 / HZ);
+}
+
static unsigned long next_rtc_update;
/*
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index bede380c07a9..908915675edc 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -27,6 +27,7 @@
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/traps.h>
+#include <asm/io.h>
#include "ptrace.h"
#include "signal.h"
@@ -631,12 +632,9 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
notify_die("unknown data abort code", regs, &info, instr, 0);
}
-void __attribute__((noreturn)) __bug(const char *file, int line, void *data)
+void __attribute__((noreturn)) __bug(const char *file, int line)
{
- printk(KERN_CRIT"kernel BUG at %s:%d!", file, line);
- if (data)
- printk(" - extra data = %p", data);
- printk("\n");
+ printk(KERN_CRIT"kernel BUG at %s:%d!\n", file, line);
*(int *)0 = 0;
/* Avoid "noreturn function does return" */
diff --git a/arch/arm/kernel/xscale-cp0.c b/arch/arm/kernel/xscale-cp0.c
new file mode 100644
index 000000000000..180000bfdc8f
--- /dev/null
+++ b/arch/arm/kernel/xscale-cp0.c
@@ -0,0 +1,179 @@
+/*
+ * linux/arch/arm/kernel/xscale-cp0.c
+ *
+ * XScale DSP and iWMMXt coprocessor context switching and handling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <asm/thread_notify.h>
+#include <asm/io.h>
+
+static inline void dsp_save_state(u32 *state)
+{
+ __asm__ __volatile__ (
+ "mrrc p0, 0, %0, %1, c0\n"
+ : "=r" (state[0]), "=r" (state[1]));
+}
+
+static inline void dsp_load_state(u32 *state)
+{
+ __asm__ __volatile__ (
+ "mcrr p0, 0, %0, %1, c0\n"
+ : : "r" (state[0]), "r" (state[1]));
+}
+
+static int dsp_do(struct notifier_block *self, unsigned long cmd, void *t)
+{
+ struct thread_info *thread = t;
+
+ switch (cmd) {
+ case THREAD_NOTIFY_FLUSH:
+ thread->cpu_context.extra[0] = 0;
+ thread->cpu_context.extra[1] = 0;
+ break;
+
+ case THREAD_NOTIFY_SWITCH:
+ dsp_save_state(current_thread_info()->cpu_context.extra);
+ dsp_load_state(thread->cpu_context.extra);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block dsp_notifier_block = {
+ .notifier_call = dsp_do,
+};
+
+
+#ifdef CONFIG_IWMMXT
+static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
+{
+ struct thread_info *thread = t;
+
+ switch (cmd) {
+ case THREAD_NOTIFY_FLUSH:
+ /*
+ * flush_thread() zeroes thread->fpstate, so no need
+ * to do anything here.
+ *
+ * FALLTHROUGH: Ensure we don't try to overwrite our newly
+ * initialised state information on the first fault.
+ */
+
+ case THREAD_NOTIFY_RELEASE:
+ iwmmxt_task_release(thread);
+ break;
+
+ case THREAD_NOTIFY_SWITCH:
+ iwmmxt_task_switch(thread);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block iwmmxt_notifier_block = {
+ .notifier_call = iwmmxt_do,
+};
+#endif
+
+
+static u32 __init xscale_cp_access_read(void)
+{
+ u32 value;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c15, c1, 0\n\t"
+ : "=r" (value));
+
+ return value;
+}
+
+static void __init xscale_cp_access_write(u32 value)
+{
+ u32 temp;
+
+ __asm__ __volatile__ (
+ "mcr p15, 0, %1, c15, c1, 0\n\t"
+ "mrc p15, 0, %0, c15, c1, 0\n\t"
+ "mov %0, %0\n\t"
+ "sub pc, pc, #4\n\t"
+ : "=r" (temp) : "r" (value));
+}
+
+/*
+ * Detect whether we have a MAC coprocessor (40 bit register) or an
+ * iWMMXt coprocessor (64 bit registers) by loading 00000100:00000000
+ * into a coprocessor register and reading it back, and checking
+ * whether the upper word survived intact.
+ */
+static int __init cpu_has_iwmmxt(void)
+{
+ u32 lo;
+ u32 hi;
+
+ /*
+ * This sequence is interpreted by the DSP coprocessor as:
+ * mar acc0, %2, %3
+ * mra %0, %1, acc0
+ *
+ * And by the iWMMXt coprocessor as:
+ * tmcrr wR0, %2, %3
+ * tmrrc %0, %1, wR0
+ */
+ __asm__ __volatile__ (
+ "mcrr p0, 0, %2, %3, c0\n"
+ "mrrc p0, 0, %0, %1, c0\n"
+ : "=r" (lo), "=r" (hi)
+ : "r" (0), "r" (0x100));
+
+ return !!hi;
+}
+
+
+/*
+ * If we detect that the CPU has iWMMXt (and CONFIG_IWMMXT=y), we
+ * disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
+ * switch code handle iWMMXt context switching. If on the other
+ * hand the CPU has a DSP coprocessor, we keep access to CP0 enabled
+ * all the time, and save/restore acc0 on context switch in non-lazy
+ * fashion.
+ */
+static int __init xscale_cp0_init(void)
+{
+ u32 cp_access;
+
+ cp_access = xscale_cp_access_read() & ~3;
+ xscale_cp_access_write(cp_access | 1);
+
+ if (cpu_has_iwmmxt()) {
+#ifndef CONFIG_IWMMXT
+ printk(KERN_WARNING "CAUTION: XScale iWMMXt coprocessor "
+ "detected, but kernel support is missing.\n");
+#else
+ printk(KERN_INFO "XScale iWMMXt coprocessor detected.\n");
+ elf_hwcap |= HWCAP_IWMMXT;
+ thread_register_notifier(&iwmmxt_notifier_block);
+#endif
+ } else {
+ printk(KERN_INFO "XScale DSP coprocessor detected.\n");
+ thread_register_notifier(&dsp_notifier_block);
+ cp_access |= 1;
+ }
+
+ xscale_cp_access_write(cp_access);
+
+ return 0;
+}
+
+late_initcall(xscale_cp0_init);