summaryrefslogtreecommitdiff
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/Makefile5
-rw-r--r--arch/ia64/kernel/acpi-ext.c22
-rw-r--r--arch/ia64/kernel/acpi-processor.c67
-rw-r--r--arch/ia64/kernel/acpi.c6
-rw-r--r--arch/ia64/kernel/cpufreq/Makefile1
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c51
-rw-r--r--arch/ia64/kernel/efi.c160
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/ia64/kernel/fsys.S1
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/jprobes.S27
-rw-r--r--arch/ia64/kernel/kprobes.c68
-rw-r--r--arch/ia64/kernel/mca.c4
-rw-r--r--arch/ia64/kernel/mca_asm.S2
-rw-r--r--arch/ia64/kernel/perfmon.c46
-rw-r--r--arch/ia64/kernel/perfmon_montecito.h269
-rw-r--r--arch/ia64/kernel/process.c12
-rw-r--r--arch/ia64/kernel/ptrace.c33
-rw-r--r--arch/ia64/kernel/salinfo.c171
-rw-r--r--arch/ia64/kernel/setup.c18
-rw-r--r--arch/ia64/kernel/signal.c10
-rw-r--r--arch/ia64/kernel/sys_ia64.c2
-rw-r--r--arch/ia64/kernel/traps.c26
-rw-r--r--arch/ia64/kernel/uncached.c1
25 files changed, 772 insertions, 235 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 307514f7a282..09a0dbc17fb6 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -13,6 +13,11 @@ obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
+
+ifneq ($(CONFIG_ACPI_PROCESSOR),)
+obj-y += acpi-processor.o
+endif
+
obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c
index 13a5b3b49bf8..4a5574ff007b 100644
--- a/arch/ia64/kernel/acpi-ext.c
+++ b/arch/ia64/kernel/acpi-ext.c
@@ -33,33 +33,33 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
struct acpi_vendor_info *info = (struct acpi_vendor_info *)context;
struct acpi_resource_vendor *vendor;
struct acpi_vendor_descriptor *descriptor;
- u32 length;
+ u32 byte_length;
- if (resource->id != ACPI_RSTYPE_VENDOR)
+ if (resource->type != ACPI_RESOURCE_TYPE_VENDOR)
return AE_OK;
vendor = (struct acpi_resource_vendor *)&resource->data;
- descriptor = (struct acpi_vendor_descriptor *)vendor->reserved;
- if (vendor->length <= sizeof(*info->descriptor) ||
+ descriptor = (struct acpi_vendor_descriptor *)vendor->byte_data;
+ if (vendor->byte_length <= sizeof(*info->descriptor) ||
descriptor->guid_id != info->descriptor->guid_id ||
efi_guidcmp(descriptor->guid, info->descriptor->guid))
return AE_OK;
- length = vendor->length - sizeof(struct acpi_vendor_descriptor);
- info->data = acpi_os_allocate(length);
+ byte_length = vendor->byte_length - sizeof(struct acpi_vendor_descriptor);
+ info->data = acpi_os_allocate(byte_length);
if (!info->data)
return AE_NO_MEMORY;
memcpy(info->data,
- vendor->reserved + sizeof(struct acpi_vendor_descriptor),
- length);
- info->length = length;
+ vendor->byte_data + sizeof(struct acpi_vendor_descriptor),
+ byte_length);
+ info->length = byte_length;
return AE_CTRL_TERMINATE;
}
acpi_status
acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
- u8 ** data, u32 * length)
+ u8 ** data, u32 * byte_length)
{
struct acpi_vendor_info info;
@@ -72,7 +72,7 @@ acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
return AE_NOT_FOUND;
*data = info.data;
- *length = info.length;
+ *byte_length = info.length;
return AE_OK;
}
diff --git a/arch/ia64/kernel/acpi-processor.c b/arch/ia64/kernel/acpi-processor.c
new file mode 100644
index 000000000000..e683630c8ce2
--- /dev/null
+++ b/arch/ia64/kernel/acpi-processor.c
@@ -0,0 +1,67 @@
+/*
+ * arch/ia64/kernel/cpufreq/processor.c
+ *
+ * Copyright (C) 2005 Intel Corporation
+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ * - Added _PDC for platforms with Intel CPUs
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+
+#include <acpi/processor.h>
+#include <asm/acpi.h>
+
+static void init_intel_pdc(struct acpi_processor *pr)
+{
+ struct acpi_object_list *obj_list;
+ union acpi_object *obj;
+ u32 *buf;
+
+ /* allocate and initialize pdc. It will be used later. */
+ obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
+ if (!obj_list) {
+ printk(KERN_ERR "Memory allocation error\n");
+ return;
+ }
+
+ obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
+ if (!obj) {
+ printk(KERN_ERR "Memory allocation error\n");
+ kfree(obj_list);
+ return;
+ }
+
+ buf = kmalloc(12, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "Memory allocation error\n");
+ kfree(obj);
+ kfree(obj_list);
+ return;
+ }
+
+ buf[0] = ACPI_PDC_REVISION_ID;
+ buf[1] = 1;
+ buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
+
+ obj->type = ACPI_TYPE_BUFFER;
+ obj->buffer.length = 12;
+ obj->buffer.pointer = (u8 *) buf;
+ obj_list->count = 1;
+ obj_list->pointer = obj;
+ pr->pdc = obj_list;
+
+ return;
+}
+
+/* Initialize _PDC data based on the CPU vendor */
+void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
+{
+ pr->pdc = NULL;
+ init_intel_pdc(pr);
+ return;
+}
+
+EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 9ad94ddf6687..d2702c419cf8 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -567,16 +567,16 @@ void __init acpi_numa_arch_fixup(void)
* success: return IRQ number (>=0)
* failure: return < 0
*/
-int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
+int acpi_register_gsi(u32 gsi, int triggering, int polarity)
{
if (has_8259 && gsi < 16)
return isa_irq_to_vector(gsi);
return iosapic_register_intr(gsi,
- (active_high_low ==
+ (polarity ==
ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
IOSAPIC_POL_LOW,
- (edge_level ==
+ (triggering ==
ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
IOSAPIC_LEVEL);
}
diff --git a/arch/ia64/kernel/cpufreq/Makefile b/arch/ia64/kernel/cpufreq/Makefile
index f748d34c02f0..4838f2a57c7a 100644
--- a/arch/ia64/kernel/cpufreq/Makefile
+++ b/arch/ia64/kernel/cpufreq/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o
+
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index da4d5cf80a48..5a1bf815282d 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -269,48 +269,6 @@ acpi_cpufreq_verify (
}
-/*
- * processor_init_pdc - let BIOS know about the SMP capabilities
- * of this driver
- * @perf: processor-specific acpi_io_data struct
- * @cpu: CPU being initialized
- *
- * To avoid issues with legacy OSes, some BIOSes require to be informed of
- * the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
- * accordingly. Actual call to _PDC is done in driver/acpi/processor.c
- */
-static void
-processor_init_pdc (
- struct acpi_processor_performance *perf,
- unsigned int cpu,
- struct acpi_object_list *obj_list
- )
-{
- union acpi_object *obj;
- u32 *buf;
-
- dprintk("processor_init_pdc\n");
-
- perf->pdc = NULL;
- /* Initialize pdc. It will be used later. */
- if (!obj_list)
- return;
-
- if (!(obj_list->count && obj_list->pointer))
- return;
-
- obj = obj_list->pointer;
- if ((obj->buffer.length == 12) && obj->buffer.pointer) {
- buf = (u32 *)obj->buffer.pointer;
- buf[0] = ACPI_PDC_REVISION_ID;
- buf[1] = 1;
- buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
- perf->pdc = obj_list;
- }
- return;
-}
-
-
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
@@ -320,14 +278,7 @@ acpi_cpufreq_cpu_init (
struct cpufreq_acpi_io *data;
unsigned int result = 0;
- union acpi_object arg0 = {ACPI_TYPE_BUFFER};
- u32 arg0_buf[3];
- struct acpi_object_list arg_list = {1, &arg0};
-
dprintk("acpi_cpufreq_cpu_init\n");
- /* setup arg_list for _PDC settings */
- arg0.buffer.length = 12;
- arg0.buffer.pointer = (u8 *) arg0_buf;
data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
@@ -337,9 +288,7 @@ acpi_cpufreq_cpu_init (
acpi_io_data[cpu] = data;
- processor_init_pdc(&data->acpi_data, cpu, &arg_list);
result = acpi_processor_register_performance(&data->acpi_data, cpu);
- data->acpi_data.pdc = NULL;
if (result)
goto err_free;
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index a3aa45cbcfa0..c485a3b32ba8 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -247,6 +247,32 @@ typedef struct kern_memdesc {
static kern_memdesc_t *kern_memmap;
+#define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
+
+static inline u64
+kmd_end(kern_memdesc_t *kmd)
+{
+ return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
+}
+
+static inline u64
+efi_md_end(efi_memory_desc_t *md)
+{
+ return (md->phys_addr + efi_md_size(md));
+}
+
+static inline int
+efi_wb(efi_memory_desc_t *md)
+{
+ return (md->attribute & EFI_MEMORY_WB);
+}
+
+static inline int
+efi_uc(efi_memory_desc_t *md)
+{
+ return (md->attribute & EFI_MEMORY_UC);
+}
+
static void
walk (efi_freemem_callback_t callback, void *arg, u64 attr)
{
@@ -595,8 +621,8 @@ efi_get_iobase (void)
return 0;
}
-u32
-efi_mem_type (unsigned long phys_addr)
+static efi_memory_desc_t *
+efi_memory_descriptor (unsigned long phys_addr)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
@@ -610,13 +636,13 @@ efi_mem_type (unsigned long phys_addr)
md = p;
if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
- return md->type;
+ return md;
}
return 0;
}
-u64
-efi_mem_attributes (unsigned long phys_addr)
+static int
+efi_memmap_has_mmio (void)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
@@ -629,36 +655,98 @@ efi_mem_attributes (unsigned long phys_addr)
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p;
- if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
- return md->attribute;
+ if (md->type == EFI_MEMORY_MAPPED_IO)
+ return 1;
}
return 0;
}
+
+u32
+efi_mem_type (unsigned long phys_addr)
+{
+ efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
+
+ if (md)
+ return md->type;
+ return 0;
+}
+
+u64
+efi_mem_attributes (unsigned long phys_addr)
+{
+ efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
+
+ if (md)
+ return md->attribute;
+ return 0;
+}
EXPORT_SYMBOL(efi_mem_attributes);
+/*
+ * Determines whether the memory at phys_addr supports the desired
+ * attribute (WB, UC, etc). If this returns 1, the caller can safely
+ * access *size bytes at phys_addr with the specified attribute.
+ */
+static int
+efi_mem_attribute_range (unsigned long phys_addr, unsigned long *size, u64 attr)
+{
+ efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
+ unsigned long md_end;
+
+ if (!md || (md->attribute & attr) != attr)
+ return 0;
+
+ do {
+ md_end = efi_md_end(md);
+ if (phys_addr + *size <= md_end)
+ return 1;
+
+ md = efi_memory_descriptor(md_end);
+ if (!md || (md->attribute & attr) != attr) {
+ *size = md_end - phys_addr;
+ return 1;
+ }
+ } while (md);
+ return 0;
+}
+
+/*
+ * For /dev/mem, we only allow read & write system calls to access
+ * write-back memory, because read & write don't allow the user to
+ * control access size.
+ */
int
valid_phys_addr_range (unsigned long phys_addr, unsigned long *size)
{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
+ return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB);
+}
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
+/*
+ * We allow mmap of anything in the EFI memory map that supports
+ * either write-back or uncacheable access. For uncacheable regions,
+ * the supported access sizes are system-dependent, and the user is
+ * responsible for using the correct size.
+ *
+ * Note that this doesn't currently allow access to hot-added memory,
+ * because that doesn't appear in the boot-time EFI memory map.
+ */
+int
+valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long *size)
+{
+ if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB))
+ return 1;
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
+ if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC))
+ return 1;
- if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) {
- if (!(md->attribute & EFI_MEMORY_WB))
- return 0;
+ /*
+ * Some firmware doesn't report MMIO regions in the EFI memory map.
+ * The Intel BigSur (a.k.a. HP i2000) has this problem. In this
+ * case, we can't use the EFI memory map to validate mmap requests.
+ */
+ if (!efi_memmap_has_mmio())
+ return 1;
- if (*size > md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr)
- *size = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr;
- return 1;
- }
- }
return 0;
}
@@ -707,32 +795,6 @@ efi_uart_console_only(void)
return 0;
}
-#define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
-
-static inline u64
-kmd_end(kern_memdesc_t *kmd)
-{
- return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
-}
-
-static inline u64
-efi_md_end(efi_memory_desc_t *md)
-{
- return (md->phys_addr + efi_md_size(md));
-}
-
-static inline int
-efi_wb(efi_memory_desc_t *md)
-{
- return (md->attribute & EFI_MEMORY_WB);
-}
-
-static inline int
-efi_uc(efi_memory_desc_t *md)
-{
- return (md->attribute & EFI_MEMORY_UC);
-}
-
/*
* Look for the first granule aligned memory descriptor memory
* that is big enough to hold EFI memory map. Make sure this
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 0741b066b98f..7a6ffd613789 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1600,5 +1600,6 @@ sys_call_table:
data8 sys_inotify_init
data8 sys_inotify_add_watch
data8 sys_inotify_rm_watch
+ data8 sys_migrate_pages // 1280
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 2ddbac6f4999..ce423910ca97 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -903,5 +903,6 @@ fsyscall_table:
data8 0
data8 0
data8 0
+ data8 0 // 1280
.org fsyscall_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index bfe65b2e8621..fbc7ea35dd57 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1060,7 +1060,7 @@ SET_REG(b5);
* the clobber lists for spin_lock() in include/asm-ia64/spinlock.h.
*/
-#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
.prologue
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 5db9d3bcbbcb..e72de580ebbf 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -103,7 +103,7 @@ EXPORT_SYMBOL(unw_init_running);
#ifdef ASM_SUPPORTED
# ifdef CONFIG_SMP
-# if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+# if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
/*
* This is not a normal routine and we don't want a function descriptor for it, so we use
* a fake declaration here.
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
index 2323377e3695..5cd6226f44f2 100644
--- a/arch/ia64/kernel/jprobes.S
+++ b/arch/ia64/kernel/jprobes.S
@@ -60,3 +60,30 @@ END(jprobe_break)
GLOBAL_ENTRY(jprobe_inst_return)
br.call.sptk.many b0=jprobe_break
END(jprobe_inst_return)
+
+GLOBAL_ENTRY(invalidate_stacked_regs)
+ movl r16=invalidate_restore_cfm
+ ;;
+ mov b6=r16
+ ;;
+ br.ret.sptk.many b6
+ ;;
+invalidate_restore_cfm:
+ mov r16=ar.rsc
+ ;;
+ mov ar.rsc=r0
+ ;;
+ loadrs
+ ;;
+ mov ar.rsc=r16
+ ;;
+ br.cond.sptk.many rp
+END(invalidate_stacked_regs)
+
+GLOBAL_ENTRY(flush_register_stack)
+ // flush dirty regs to backing store (must be first in insn group)
+ flushrs
+ ;;
+ br.ret.sptk.many rp
+END(flush_register_stack)
+
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 89a70400c4f6..50ae8c7d453d 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -467,10 +467,6 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
}
-void __kprobes arch_remove_kprobe(struct kprobe *p)
-{
-}
-
/*
* We are resuming execution after a single step fault, so the pt_regs
* structure reflects the register state after we executed the instruction
@@ -642,6 +638,13 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
}
+ } else if (!is_ia64_break_inst(regs)) {
+ /* The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
+ goto no_kprobe;
} else {
/* Not our break */
goto no_kprobe;
@@ -763,11 +766,56 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
return ret;
}
+struct param_bsp_cfm {
+ unsigned long ip;
+ unsigned long *bsp;
+ unsigned long cfm;
+};
+
+static void ia64_get_bsp_cfm(struct unw_frame_info *info, void *arg)
+{
+ unsigned long ip;
+ struct param_bsp_cfm *lp = arg;
+
+ do {
+ unw_get_ip(info, &ip);
+ if (ip == 0)
+ break;
+ if (ip == lp->ip) {
+ unw_get_bsp(info, (unsigned long*)&lp->bsp);
+ unw_get_cfm(info, (unsigned long*)&lp->cfm);
+ return;
+ }
+ } while (unw_unwind(info) >= 0);
+ lp->bsp = 0;
+ lp->cfm = 0;
+ return;
+}
+
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ struct param_bsp_cfm pa;
+ int bytes;
+
+ /*
+ * Callee owns the argument space and could overwrite it, eg
+ * tail call optimization. So to be absolutely safe
+ * we save the argument space before transfering the control
+ * to instrumented jprobe function which runs in
+ * the process context
+ */
+ pa.ip = regs->cr_iip;
+ unw_init_running(ia64_get_bsp_cfm, &pa);
+ bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f)
+ - (char *)pa.bsp;
+ memcpy( kcb->jprobes_saved_stacked_regs,
+ pa.bsp,
+ bytes );
+ kcb->bsp = pa.bsp;
+ kcb->cfm = pa.cfm;
/* save architectural state */
kcb->jprobe_saved_regs = *regs;
@@ -789,8 +837,20 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ int bytes;
+ /* restoring architectural state */
*regs = kcb->jprobe_saved_regs;
+
+ /* restoring the original argument space */
+ flush_register_stack();
+ bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f)
+ - (char *)kcb->bsp;
+ memcpy( kcb->bsp,
+ kcb->jprobes_saved_stacked_regs,
+ bytes );
+ invalidate_stacked_regs();
+
preempt_enable_no_resched();
return 1;
}
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 355af15287c7..ee7eec9ee576 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -766,7 +766,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
l = strlen(previous_current->comm);
snprintf(comm, sizeof(comm), "%s %*s %d",
current->comm, l, previous_current->comm,
- previous_current->thread_info->cpu);
+ task_thread_info(previous_current)->cpu);
}
memcpy(current->comm, comm, sizeof(current->comm));
@@ -1423,7 +1423,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
struct thread_info *ti;
memset(p, 0, KERNEL_STACK_SIZE);
- ti = (struct thread_info *)((char *)p + IA64_TASK_SIZE);
+ ti = task_thread_info(p);
ti->flags = _TIF_MCA_INIT;
ti->preempt_count = 1;
ti->task = p;
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index db32fc1d3935..403a80a58c13 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -847,7 +847,7 @@ ia64_state_restore:
;;
mov cr.iim=temp3
mov cr.iha=temp4
- dep r22=0,r22,62,2 // pal_min_state, physical, uncached
+ dep r22=0,r22,62,1 // pal_min_state, physical, uncached
mov IA64_KR(CURRENT)=r21
ld8 r8=[temp1] // os_status
ld8 r10=[temp2] // context
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 410d4804fa6e..9c5194b385da 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -38,7 +38,9 @@
#include <linux/pagemap.h>
#include <linux/mount.h>
#include <linux/bitops.h>
+#include <linux/capability.h>
#include <linux/rcupdate.h>
+#include <linux/completion.h>
#include <asm/errno.h>
#include <asm/intrinsics.h>
@@ -285,7 +287,7 @@ typedef struct pfm_context {
unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
- struct semaphore ctx_restart_sem; /* use for blocking notification mode */
+ struct completion ctx_restart_done; /* use for blocking notification mode */
unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
@@ -627,9 +629,11 @@ static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count,
#include "perfmon_itanium.h"
#include "perfmon_mckinley.h"
+#include "perfmon_montecito.h"
#include "perfmon_generic.h"
static pmu_config_t *pmu_confs[]={
+ &pmu_conf_mont,
&pmu_conf_mck,
&pmu_conf_ita,
&pmu_conf_gen, /* must be last */
@@ -1709,7 +1713,7 @@ static void
pfm_syswide_force_stop(void *info)
{
pfm_context_t *ctx = (pfm_context_t *)info;
- struct pt_regs *regs = ia64_task_regs(current);
+ struct pt_regs *regs = task_pt_regs(current);
struct task_struct *owner;
unsigned long flags;
int ret;
@@ -1814,7 +1818,7 @@ pfm_flush(struct file *filp)
is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx);
- regs = ia64_task_regs(task);
+ regs = task_pt_regs(task);
DPRINT(("ctx_state=%d is_current=%d\n",
state,
@@ -1944,7 +1948,7 @@ pfm_close(struct inode *inode, struct file *filp)
is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx);
- regs = ia64_task_regs(task);
+ regs = task_pt_regs(task);
DPRINT(("ctx_state=%d is_current=%d\n",
state,
@@ -1988,7 +1992,7 @@ pfm_close(struct inode *inode, struct file *filp)
/*
* force task to wake up from MASKED state
*/
- up(&ctx->ctx_restart_sem);
+ complete(&ctx->ctx_restart_done);
DPRINT(("waking up ctx_state=%d\n", state));
@@ -2703,7 +2707,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
/*
* init restart semaphore to locked
*/
- sema_init(&ctx->ctx_restart_sem, 0);
+ init_completion(&ctx->ctx_restart_done);
/*
* activation is used in SMP only
@@ -3684,7 +3688,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*/
if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
DPRINT(("unblocking [%d] \n", task->pid));
- up(&ctx->ctx_restart_sem);
+ complete(&ctx->ctx_restart_done);
} else {
DPRINT(("[%d] armed exit trap\n", task->pid));
@@ -4051,7 +4055,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*/
ia64_psr(regs)->up = 0;
} else {
- tregs = ia64_task_regs(task);
+ tregs = task_pt_regs(task);
/*
* stop monitoring at the user level
@@ -4133,7 +4137,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
ia64_psr(regs)->up = 1;
} else {
- tregs = ia64_task_regs(ctx->ctx_task);
+ tregs = task_pt_regs(ctx->ctx_task);
/*
* start monitoring at the kernel level the next
@@ -4403,7 +4407,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/*
* when not current, task MUST be stopped, so this is safe
*/
- regs = ia64_task_regs(task);
+ regs = task_pt_regs(task);
/* force a full reload */
ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
@@ -4529,7 +4533,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
/*
* per-task mode
*/
- tregs = task == current ? regs : ia64_task_regs(task);
+ tregs = task == current ? regs : task_pt_regs(task);
if (task == current) {
/*
@@ -4592,7 +4596,7 @@ pfm_exit_thread(struct task_struct *task)
{
pfm_context_t *ctx;
unsigned long flags;
- struct pt_regs *regs = ia64_task_regs(task);
+ struct pt_regs *regs = task_pt_regs(task);
int ret, state;
int free_ok = 0;
@@ -4925,7 +4929,7 @@ restart_args:
if (unlikely(ret)) goto abort_locked;
skip_fd:
- ret = (*func)(ctx, args_k, count, ia64_task_regs(current));
+ ret = (*func)(ctx, args_k, count, task_pt_regs(current));
call_made = 1;
@@ -5049,7 +5053,7 @@ pfm_handle_work(void)
pfm_clear_task_notify();
- regs = ia64_task_regs(current);
+ regs = task_pt_regs(current);
/*
* extract reason for being here and clear
@@ -5086,7 +5090,7 @@ pfm_handle_work(void)
* may go through without blocking on SMP systems
* if restart has been received already by the time we call down()
*/
- ret = down_interruptible(&ctx->ctx_restart_sem);
+ ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
DPRINT(("after block sleeping ret=%d\n", ret));
@@ -5793,7 +5797,7 @@ pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_c
* on every CPU, so we can rely on the pid to identify the idle task.
*/
if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
- regs = ia64_task_regs(task);
+ regs = task_pt_regs(task);
ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
return;
}
@@ -5876,7 +5880,7 @@ pfm_save_regs(struct task_struct *task)
flags = pfm_protect_ctx_ctxsw(ctx);
if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
- struct pt_regs *regs = ia64_task_regs(task);
+ struct pt_regs *regs = task_pt_regs(task);
pfm_clear_psr_up();
@@ -6076,7 +6080,7 @@ pfm_load_regs (struct task_struct *task)
BUG_ON(psr & IA64_PSR_I);
if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
- struct pt_regs *regs = ia64_task_regs(task);
+ struct pt_regs *regs = task_pt_regs(task);
BUG_ON(ctx->ctx_smpl_hdr);
@@ -6445,7 +6449,7 @@ pfm_alt_save_pmu_state(void *data)
{
struct pt_regs *regs;
- regs = ia64_task_regs(current);
+ regs = task_pt_regs(current);
DPRINT(("called\n"));
@@ -6471,7 +6475,7 @@ pfm_alt_restore_pmu_state(void *data)
{
struct pt_regs *regs;
- regs = ia64_task_regs(current);
+ regs = task_pt_regs(current);
DPRINT(("called\n"));
@@ -6753,7 +6757,7 @@ dump_pmu_state(const char *from)
local_irq_save(flags);
this_cpu = smp_processor_id();
- regs = ia64_task_regs(current);
+ regs = task_pt_regs(current);
info = PFM_CPUINFO_GET();
dcr = ia64_getreg(_IA64_REG_CR_DCR);
diff --git a/arch/ia64/kernel/perfmon_montecito.h b/arch/ia64/kernel/perfmon_montecito.h
new file mode 100644
index 000000000000..cd06ac6a686c
--- /dev/null
+++ b/arch/ia64/kernel/perfmon_montecito.h
@@ -0,0 +1,269 @@
+/*
+ * This file contains the Montecito PMU register description tables
+ * and pmc checker used by perfmon.c.
+ *
+ * Copyright (c) 2005-2006 Hewlett-Packard Development Company, L.P.
+ * Contributed by Stephane Eranian <eranian@hpl.hp.com>
+ */
+static int pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
+
+#define RDEP_MONT_ETB (RDEP(38)|RDEP(39)|RDEP(48)|RDEP(49)|RDEP(50)|RDEP(51)|RDEP(52)|RDEP(53)|RDEP(54)|\
+ RDEP(55)|RDEP(56)|RDEP(57)|RDEP(58)|RDEP(59)|RDEP(60)|RDEP(61)|RDEP(62)|RDEP(63))
+#define RDEP_MONT_DEAR (RDEP(32)|RDEP(33)|RDEP(36))
+#define RDEP_MONT_IEAR (RDEP(34)|RDEP(35))
+
+static pfm_reg_desc_t pfm_mont_pmc_desc[PMU_MAX_PMCS]={
+/* pmc0 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc4 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(4),0, 0, 0}, {0,0, 0, 0}},
+/* pmc5 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(5),0, 0, 0}, {0,0, 0, 0}},
+/* pmc6 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(6),0, 0, 0}, {0,0, 0, 0}},
+/* pmc7 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(7),0, 0, 0}, {0,0, 0, 0}},
+/* pmc8 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(8),0, 0, 0}, {0,0, 0, 0}},
+/* pmc9 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(9),0, 0, 0}, {0,0, 0, 0}},
+/* pmc10 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(10),0, 0, 0}, {0,0, 0, 0}},
+/* pmc11 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(11),0, 0, 0}, {0,0, 0, 0}},
+/* pmc12 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(12),0, 0, 0}, {0,0, 0, 0}},
+/* pmc13 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(13),0, 0, 0}, {0,0, 0, 0}},
+/* pmc14 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(14),0, 0, 0}, {0,0, 0, 0}},
+/* pmc15 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(15),0, 0, 0}, {0,0, 0, 0}},
+/* pmc16 */ { PFM_REG_NOTIMPL, },
+/* pmc17 */ { PFM_REG_NOTIMPL, },
+/* pmc18 */ { PFM_REG_NOTIMPL, },
+/* pmc19 */ { PFM_REG_NOTIMPL, },
+/* pmc20 */ { PFM_REG_NOTIMPL, },
+/* pmc21 */ { PFM_REG_NOTIMPL, },
+/* pmc22 */ { PFM_REG_NOTIMPL, },
+/* pmc23 */ { PFM_REG_NOTIMPL, },
+/* pmc24 */ { PFM_REG_NOTIMPL, },
+/* pmc25 */ { PFM_REG_NOTIMPL, },
+/* pmc26 */ { PFM_REG_NOTIMPL, },
+/* pmc27 */ { PFM_REG_NOTIMPL, },
+/* pmc28 */ { PFM_REG_NOTIMPL, },
+/* pmc29 */ { PFM_REG_NOTIMPL, },
+/* pmc30 */ { PFM_REG_NOTIMPL, },
+/* pmc31 */ { PFM_REG_NOTIMPL, },
+/* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffff, 0x30f01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffff, 0xf01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}},
+/* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
+/* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}},
+/* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefe, 0x1e00018181818, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
+ { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
+};
+
+static pfm_reg_desc_t pfm_mont_pmd_desc[PMU_MAX_PMDS]={
+/* pmd0 */ { PFM_REG_NOTIMPL, },
+/* pmd1 */ { PFM_REG_NOTIMPL, },
+/* pmd2 */ { PFM_REG_NOTIMPL, },
+/* pmd3 */ { PFM_REG_NOTIMPL, },
+/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(4),0, 0, 0}},
+/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(5),0, 0, 0}},
+/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(6),0, 0, 0}},
+/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(7),0, 0, 0}},
+/* pmd8 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(8),0, 0, 0}},
+/* pmd9 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(9),0, 0, 0}},
+/* pmd10 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(10),0, 0, 0}},
+/* pmd11 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(11),0, 0, 0}},
+/* pmd12 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(12),0, 0, 0}},
+/* pmd13 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(13),0, 0, 0}},
+/* pmd14 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(14),0, 0, 0}},
+/* pmd15 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(15),0, 0, 0}},
+/* pmd16 */ { PFM_REG_NOTIMPL, },
+/* pmd17 */ { PFM_REG_NOTIMPL, },
+/* pmd18 */ { PFM_REG_NOTIMPL, },
+/* pmd19 */ { PFM_REG_NOTIMPL, },
+/* pmd20 */ { PFM_REG_NOTIMPL, },
+/* pmd21 */ { PFM_REG_NOTIMPL, },
+/* pmd22 */ { PFM_REG_NOTIMPL, },
+/* pmd23 */ { PFM_REG_NOTIMPL, },
+/* pmd24 */ { PFM_REG_NOTIMPL, },
+/* pmd25 */ { PFM_REG_NOTIMPL, },
+/* pmd26 */ { PFM_REG_NOTIMPL, },
+/* pmd27 */ { PFM_REG_NOTIMPL, },
+/* pmd28 */ { PFM_REG_NOTIMPL, },
+/* pmd29 */ { PFM_REG_NOTIMPL, },
+/* pmd30 */ { PFM_REG_NOTIMPL, },
+/* pmd31 */ { PFM_REG_NOTIMPL, },
+/* pmd32 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(33)|RDEP(36),0, 0, 0}, {RDEP(40),0, 0, 0}},
+/* pmd33 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(32)|RDEP(36),0, 0, 0}, {RDEP(40),0, 0, 0}},
+/* pmd34 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(35),0, 0, 0}, {RDEP(37),0, 0, 0}},
+/* pmd35 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(34),0, 0, 0}, {RDEP(37),0, 0, 0}},
+/* pmd36 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(32)|RDEP(33),0, 0, 0}, {RDEP(40),0, 0, 0}},
+/* pmd37 */ { PFM_REG_NOTIMPL, },
+/* pmd38 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd39 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd40 */ { PFM_REG_NOTIMPL, },
+/* pmd41 */ { PFM_REG_NOTIMPL, },
+/* pmd42 */ { PFM_REG_NOTIMPL, },
+/* pmd43 */ { PFM_REG_NOTIMPL, },
+/* pmd44 */ { PFM_REG_NOTIMPL, },
+/* pmd45 */ { PFM_REG_NOTIMPL, },
+/* pmd46 */ { PFM_REG_NOTIMPL, },
+/* pmd47 */ { PFM_REG_NOTIMPL, },
+/* pmd48 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd49 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd50 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd51 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd52 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd53 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd54 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd55 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd56 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd57 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd58 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd59 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd60 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd61 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd62 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd63 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+ { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
+};
+
+/*
+ * PMC reserved fields must have their power-up values preserved
+ */
+static int
+pfm_mont_reserved(unsigned int cnum, unsigned long *val, struct pt_regs *regs)
+{
+ unsigned long tmp1, tmp2, ival = *val;
+
+ /* remove reserved areas from user value */
+ tmp1 = ival & PMC_RSVD_MASK(cnum);
+
+ /* get reserved fields values */
+ tmp2 = PMC_DFL_VAL(cnum) & ~PMC_RSVD_MASK(cnum);
+
+ *val = tmp1 | tmp2;
+
+ DPRINT(("pmc[%d]=0x%lx, mask=0x%lx, reset=0x%lx, val=0x%lx\n",
+ cnum, ival, PMC_RSVD_MASK(cnum), PMC_DFL_VAL(cnum), *val));
+ return 0;
+}
+
+/*
+ * task can be NULL if the context is unloaded
+ */
+static int
+pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
+{
+ int ret = 0;
+ unsigned long val32 = 0, val38 = 0, val41 = 0;
+ unsigned long tmpval;
+ int check_case1 = 0;
+ int is_loaded;
+
+ /* first preserve the reserved fields */
+ pfm_mont_reserved(cnum, val, regs);
+
+ tmpval = *val;
+
+ /* sanity check */
+ if (ctx == NULL) return -EINVAL;
+
+ is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED;
+
+ /*
+ * we must clear the debug registers if pmc41 has a value which enable
+ * memory pipeline event constraints. In this case we need to clear the
+ * the debug registers if they have not yet been accessed. This is required
+ * to avoid picking stale state.
+ * PMC41 is "active" if:
+ * one of the pmc41.cfg_dtagXX field is different from 0x3
+ * AND
+ * at the corresponding pmc41.en_dbrpXX is set.
+ * AND
+ * ctx_fl_using_dbreg == 0 (i.e., dbr not yet used)
+ */
+ DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded));
+
+ if (cnum == 41 && is_loaded
+ && (tmpval & 0x1e00000000000) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
+
+ DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval));
+
+ /* don't mix debug with perfmon */
+ if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
+
+ /*
+ * a count of 0 will mark the debug registers if:
+ * AND
+ */
+ ret = pfm_write_ibr_dbr(PFM_DATA_RR, ctx, NULL, 0, regs);
+ if (ret) return ret;
+ }
+ /*
+ * we must clear the (instruction) debug registers if:
+ * pmc38.ig_ibrpX is 0 (enabled)
+ * AND
+ * ctx_fl_using_dbreg == 0 (i.e., dbr not yet used)
+ */
+ if (cnum == 38 && is_loaded && ((tmpval & 0x492UL) != 0x492UL) && ctx->ctx_fl_using_dbreg == 0) {
+
+ DPRINT(("pmc38=0x%lx has active pmc38 settings, clearing ibr\n", tmpval));
+
+ /* don't mix debug with perfmon */
+ if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
+
+ /*
+ * a count of 0 will mark the debug registers as in use and also
+ * ensure that they are properly cleared.
+ */
+ ret = pfm_write_ibr_dbr(PFM_CODE_RR, ctx, NULL, 0, regs);
+ if (ret) return ret;
+
+ }
+ switch(cnum) {
+ case 32: val32 = *val;
+ val38 = ctx->ctx_pmcs[38];
+ val41 = ctx->ctx_pmcs[41];
+ check_case1 = 1;
+ break;
+ case 38: val38 = *val;
+ val32 = ctx->ctx_pmcs[32];
+ val41 = ctx->ctx_pmcs[41];
+ check_case1 = 1;
+ break;
+ case 41: val41 = *val;
+ val32 = ctx->ctx_pmcs[32];
+ val38 = ctx->ctx_pmcs[38];
+ check_case1 = 1;
+ break;
+ }
+ /* check illegal configuration which can produce inconsistencies in tagging
+ * i-side events in L1D and L2 caches
+ */
+ if (check_case1) {
+ ret = (((val41 >> 45) & 0xf) == 0 && ((val32>>57) & 0x1) == 0)
+ && ((((val38>>1) & 0x3) == 0x2 || ((val38>>1) & 0x3) == 0)
+ || (((val38>>4) & 0x3) == 0x2 || ((val38>>4) & 0x3) == 0));
+ if (ret) {
+ DPRINT(("invalid config pmc38=0x%lx pmc41=0x%lx pmc32=0x%lx\n", val38, val41, val32));
+ return -EINVAL;
+ }
+ }
+ *val = tmpval;
+ return 0;
+}
+
+/*
+ * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
+ */
+static pmu_config_t pmu_conf_mont={
+ .pmu_name = "Montecito",
+ .pmu_family = 0x20,
+ .flags = PFM_PMU_IRQ_RESEND,
+ .ovfl_val = (1UL << 47) - 1,
+ .pmd_desc = pfm_mont_pmd_desc,
+ .pmc_desc = pfm_mont_pmc_desc,
+ .num_ibrs = 8,
+ .num_dbrs = 8,
+ .use_rr_dbregs = 1 /* debug register are use for range retrictions */
+};
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index e9904c74d2ba..309d59658e5f 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -328,7 +328,7 @@ ia64_save_extra (struct task_struct *task)
#endif
#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(task)))
+ if (IS_IA32_PROCESS(task_pt_regs(task)))
ia32_save_state(task);
#endif
}
@@ -353,7 +353,7 @@ ia64_load_extra (struct task_struct *task)
#endif
#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(task)))
+ if (IS_IA32_PROCESS(task_pt_regs(task)))
ia32_load_state(task);
#endif
}
@@ -488,7 +488,7 @@ copy_thread (int nr, unsigned long clone_flags,
* If we're cloning an IA32 task then save the IA32 extra
* state from the current task to the new task
*/
- if (IS_IA32_PROCESS(ia64_task_regs(current))) {
+ if (IS_IA32_PROCESS(task_pt_regs(current))) {
ia32_save_state(p);
if (clone_flags & CLONE_SETTLS)
retval = ia32_clone_tls(p, child_ptregs);
@@ -701,7 +701,7 @@ int
kernel_thread_helper (int (*fn)(void *), void *arg)
{
#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(current))) {
+ if (IS_IA32_PROCESS(task_pt_regs(current))) {
/* A kernel thread is always a 64-bit process. */
current->thread.map_base = DEFAULT_MAP_BASE;
current->thread.task_size = DEFAULT_TASK_SIZE;
@@ -722,7 +722,7 @@ flush_thread (void)
current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
ia64_drop_fpu(current);
#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(current))) {
+ if (IS_IA32_PROCESS(task_pt_regs(current))) {
ia32_drop_partial_page_list(current);
current->thread.task_size = IA32_PAGE_OFFSET;
set_fs(USER_DS);
@@ -755,7 +755,7 @@ exit_thread (void)
if (current->thread.flags & IA64_THREAD_DBG_VALID)
pfm_release_debug_registers(current);
#endif
- if (IS_IA32_PROCESS(ia64_task_regs(current)))
+ if (IS_IA32_PROCESS(task_pt_regs(current)))
ia32_drop_partial_page_list(current);
}
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 4b19d0410632..eaed14aac6aa 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -254,7 +254,7 @@ get_rnat (struct task_struct *task, struct switch_stack *sw,
long num_regs, nbits;
struct pt_regs *pt;
- pt = ia64_task_regs(task);
+ pt = task_pt_regs(task);
kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore;
@@ -314,7 +314,7 @@ put_rnat (struct task_struct *task, struct switch_stack *sw,
struct pt_regs *pt;
unsigned long cfm, *urbs_kargs;
- pt = ia64_task_regs(task);
+ pt = task_pt_regs(task);
kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore;
@@ -407,7 +407,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
urbs_end = (long *) user_rbs_end;
laddr = (unsigned long *) addr;
- child_regs = ia64_task_regs(child);
+ child_regs = task_pt_regs(child);
bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
if (on_kernel_rbs(addr, (unsigned long) bspstore,
@@ -467,7 +467,7 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
struct pt_regs *child_regs;
laddr = (unsigned long *) addr;
- child_regs = ia64_task_regs(child);
+ child_regs = task_pt_regs(child);
bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
if (on_kernel_rbs(addr, (unsigned long) bspstore,
@@ -567,7 +567,7 @@ thread_matches (struct task_struct *thread, unsigned long addr)
*/
return 0;
- thread_regs = ia64_task_regs(thread);
+ thread_regs = task_pt_regs(thread);
thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
return 0;
@@ -627,7 +627,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
inline void
ia64_flush_fph (struct task_struct *task)
{
- struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
+ struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
/*
* Prevent migrating this task while
@@ -653,7 +653,7 @@ ia64_flush_fph (struct task_struct *task)
void
ia64_sync_fph (struct task_struct *task)
{
- struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
+ struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
ia64_flush_fph(task);
if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
@@ -794,7 +794,7 @@ access_uarea (struct task_struct *child, unsigned long addr,
+ offsetof(struct pt_regs, reg)))
- pt = ia64_task_regs(child);
+ pt = task_pt_regs(child);
sw = (struct switch_stack *) (child->thread.ksp + 16);
if ((addr & 0x7) != 0) {
@@ -1120,7 +1120,7 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
return -EIO;
- pt = ia64_task_regs(child);
+ pt = task_pt_regs(child);
sw = (struct switch_stack *) (child->thread.ksp + 16);
unw_init_from_blocked_task(&info, child);
if (unw_unwind_to_user(&info) < 0) {
@@ -1265,7 +1265,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
return -EIO;
- pt = ia64_task_regs(child);
+ pt = task_pt_regs(child);
sw = (struct switch_stack *) (child->thread.ksp + 16);
unw_init_from_blocked_task(&info, child);
if (unw_unwind_to_user(&info) < 0) {
@@ -1403,7 +1403,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
void
ptrace_disable (struct task_struct *child)
{
- struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child));
+ struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
/* make sure the single step/taken-branch trap bits are not set: */
child_psr->ss = 0;
@@ -1422,14 +1422,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
lock_kernel();
ret = -EPERM;
if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
- current->ptrace |= PT_PTRACED;
- ret = 0;
+ ret = ptrace_traceme();
goto out;
}
@@ -1463,7 +1456,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
if (ret < 0)
goto out_tsk;
- pt = ia64_task_regs(child);
+ pt = task_pt_regs(child);
sw = (struct switch_stack *) (child->thread.ksp + 16);
switch (request) {
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 1461dc660b43..9d5a823479a3 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -3,7 +3,7 @@
*
* Creates entries in /proc/sal for various system features.
*
- * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2003, 2006 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2003 Hewlett-Packard Co
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
@@ -27,8 +27,17 @@
* mca.c may not pass a buffer, a NULL buffer just indicates that a new
* record is available in SAL.
* Replace some NR_CPUS by cpus_online, for hotplug cpu.
+ *
+ * Jan 5 2006 kaos@sgi.com
+ * Handle hotplug cpus coming online.
+ * Handle hotplug cpus going offline while they still have outstanding records.
+ * Use the cpu_* macros consistently.
+ * Replace the counting semaphore with a mutex and a test if the cpumask is non-empty.
+ * Modify the locking to make the test for "work to do" an atomic operation.
*/
+#include <linux/capability.h>
+#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/module.h>
@@ -131,8 +140,8 @@ enum salinfo_state {
};
struct salinfo_data {
- volatile cpumask_t cpu_event; /* which cpus have outstanding events */
- struct semaphore sem; /* count of cpus with outstanding events (bits set in cpu_event) */
+ cpumask_t cpu_event; /* which cpus have outstanding events */
+ struct semaphore mutex;
u8 *log_buffer;
u64 log_size;
u8 *oemdata; /* decoded oem data */
@@ -173,6 +182,21 @@ struct salinfo_platform_oemdata_parms {
int ret;
};
+/* Kick the mutex that tells user space that there is work to do. Instead of
+ * trying to track the state of the mutex across multiple cpus, in user
+ * context, interrupt context, non-maskable interrupt context and hotplug cpu,
+ * it is far easier just to grab the mutex if it is free then release it.
+ *
+ * This routine must be called with data_saved_lock held, to make the down/up
+ * operation atomic.
+ */
+static void
+salinfo_work_to_do(struct salinfo_data *data)
+{
+ down_trylock(&data->mutex);
+ up(&data->mutex);
+}
+
static void
salinfo_platform_oemdata_cpu(void *context)
{
@@ -211,9 +235,9 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
BUG_ON(type >= ARRAY_SIZE(salinfo_log_name));
+ if (irqsafe)
+ spin_lock_irqsave(&data_saved_lock, flags);
if (buffer) {
- if (irqsafe)
- spin_lock_irqsave(&data_saved_lock, flags);
for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) {
if (!data_saved->buffer)
break;
@@ -231,13 +255,11 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
data_saved->size = size;
data_saved->buffer = buffer;
}
- if (irqsafe)
- spin_unlock_irqrestore(&data_saved_lock, flags);
}
-
- if (!test_and_set_bit(smp_processor_id(), &data->cpu_event)) {
- if (irqsafe)
- up(&data->sem);
+ cpu_set(smp_processor_id(), data->cpu_event);
+ if (irqsafe) {
+ salinfo_work_to_do(data);
+ spin_unlock_irqrestore(&data_saved_lock, flags);
}
}
@@ -248,20 +270,17 @@ static struct timer_list salinfo_timer;
static void
salinfo_timeout_check(struct salinfo_data *data)
{
- int i;
+ unsigned long flags;
if (!data->open)
return;
- for_each_online_cpu(i) {
- if (test_bit(i, &data->cpu_event)) {
- /* double up() is not a problem, user space will see no
- * records for the additional "events".
- */
- up(&data->sem);
- }
+ if (!cpus_empty(data->cpu_event)) {
+ spin_lock_irqsave(&data_saved_lock, flags);
+ salinfo_work_to_do(data);
+ spin_unlock_irqrestore(&data_saved_lock, flags);
}
}
-static void
+static void
salinfo_timeout (unsigned long arg)
{
salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA);
@@ -289,16 +308,20 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t
int i, n, cpu = -1;
retry:
- if (down_trylock(&data->sem)) {
+ if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
- if (down_interruptible(&data->sem))
+ if (down_interruptible(&data->mutex))
return -EINTR;
}
n = data->cpu_check;
for (i = 0; i < NR_CPUS; i++) {
- if (test_bit(n, &data->cpu_event) && cpu_online(n)) {
+ if (cpu_isset(n, data->cpu_event)) {
+ if (!cpu_online(n)) {
+ cpu_clear(n, data->cpu_event);
+ continue;
+ }
cpu = n;
break;
}
@@ -309,9 +332,6 @@ retry:
if (cpu == -1)
goto retry;
- /* events are sticky until the user says "clear" */
- up(&data->sem);
-
/* for next read, start checking at next CPU */
data->cpu_check = cpu;
if (++data->cpu_check == NR_CPUS)
@@ -380,10 +400,8 @@ salinfo_log_release(struct inode *inode, struct file *file)
static void
call_on_cpu(int cpu, void (*fn)(void *), void *arg)
{
- cpumask_t save_cpus_allowed, new_cpus_allowed;
- memcpy(&save_cpus_allowed, &current->cpus_allowed, sizeof(save_cpus_allowed));
- memset(&new_cpus_allowed, 0, sizeof(new_cpus_allowed));
- set_bit(cpu, &new_cpus_allowed);
+ cpumask_t save_cpus_allowed = current->cpus_allowed;
+ cpumask_t new_cpus_allowed = cpumask_of_cpu(cpu);
set_cpus_allowed(current, new_cpus_allowed);
(*fn)(arg);
set_cpus_allowed(current, save_cpus_allowed);
@@ -432,10 +450,10 @@ retry:
if (!data->saved_num)
call_on_cpu(cpu, salinfo_log_read_cpu, data);
if (!data->log_size) {
- data->state = STATE_NO_DATA;
- clear_bit(cpu, &data->cpu_event);
+ data->state = STATE_NO_DATA;
+ cpu_clear(cpu, data->cpu_event);
} else {
- data->state = STATE_LOG_RECORD;
+ data->state = STATE_LOG_RECORD;
}
}
@@ -472,27 +490,31 @@ static int
salinfo_log_clear(struct salinfo_data *data, int cpu)
{
sal_log_record_header_t *rh;
+ unsigned long flags;
+ spin_lock_irqsave(&data_saved_lock, flags);
data->state = STATE_NO_DATA;
- if (!test_bit(cpu, &data->cpu_event))
+ if (!cpu_isset(cpu, data->cpu_event)) {
+ spin_unlock_irqrestore(&data_saved_lock, flags);
return 0;
- down(&data->sem);
- clear_bit(cpu, &data->cpu_event);
+ }
+ cpu_clear(cpu, data->cpu_event);
if (data->saved_num) {
- unsigned long flags;
- spin_lock_irqsave(&data_saved_lock, flags);
- shift1_data_saved(data, data->saved_num - 1 );
+ shift1_data_saved(data, data->saved_num - 1);
data->saved_num = 0;
- spin_unlock_irqrestore(&data_saved_lock, flags);
}
+ spin_unlock_irqrestore(&data_saved_lock, flags);
rh = (sal_log_record_header_t *)(data->log_buffer);
/* Corrected errors have already been cleared from SAL */
if (rh->severity != sal_log_severity_corrected)
call_on_cpu(cpu, salinfo_log_clear_cpu, data);
/* clearing a record may make a new record visible */
salinfo_log_new_read(cpu, data);
- if (data->state == STATE_LOG_RECORD &&
- !test_and_set_bit(cpu, &data->cpu_event))
- up(&data->sem);
+ if (data->state == STATE_LOG_RECORD) {
+ spin_lock_irqsave(&data_saved_lock, flags);
+ cpu_set(cpu, data->cpu_event);
+ salinfo_work_to_do(data);
+ spin_unlock_irqrestore(&data_saved_lock, flags);
+ }
return 0;
}
@@ -549,6 +571,53 @@ static struct file_operations salinfo_data_fops = {
.write = salinfo_log_write,
};
+#ifdef CONFIG_HOTPLUG_CPU
+static int __devinit
+salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
+{
+ unsigned int i, cpu = (unsigned long)hcpu;
+ unsigned long flags;
+ struct salinfo_data *data;
+ switch (action) {
+ case CPU_ONLINE:
+ spin_lock_irqsave(&data_saved_lock, flags);
+ for (i = 0, data = salinfo_data;
+ i < ARRAY_SIZE(salinfo_data);
+ ++i, ++data) {
+ cpu_set(cpu, data->cpu_event);
+ salinfo_work_to_do(data);
+ }
+ spin_unlock_irqrestore(&data_saved_lock, flags);
+ break;
+ case CPU_DEAD:
+ spin_lock_irqsave(&data_saved_lock, flags);
+ for (i = 0, data = salinfo_data;
+ i < ARRAY_SIZE(salinfo_data);
+ ++i, ++data) {
+ struct salinfo_data_saved *data_saved;
+ int j;
+ for (j = ARRAY_SIZE(data->data_saved) - 1, data_saved = data->data_saved + j;
+ j >= 0;
+ --j, --data_saved) {
+ if (data_saved->buffer && data_saved->cpu == cpu) {
+ shift1_data_saved(data, j);
+ }
+ }
+ cpu_clear(cpu, data->cpu_event);
+ }
+ spin_unlock_irqrestore(&data_saved_lock, flags);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block salinfo_cpu_notifier =
+{
+ .notifier_call = salinfo_cpu_callback,
+ .priority = 0,
+};
+#endif /* CONFIG_HOTPLUG_CPU */
+
static int __init
salinfo_init(void)
{
@@ -556,7 +625,7 @@ salinfo_init(void)
struct proc_dir_entry **sdir = salinfo_proc_entries; /* keeps track of every entry */
struct proc_dir_entry *dir, *entry;
struct salinfo_data *data;
- int i, j, online;
+ int i, j;
salinfo_dir = proc_mkdir("sal", NULL);
if (!salinfo_dir)
@@ -571,7 +640,7 @@ salinfo_init(void)
for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
data = salinfo_data + i;
data->type = i;
- sema_init(&data->sem, 0);
+ init_MUTEX(&data->mutex);
dir = proc_mkdir(salinfo_log_name[i], salinfo_dir);
if (!dir)
continue;
@@ -591,12 +660,8 @@ salinfo_init(void)
*sdir++ = entry;
/* we missed any events before now */
- online = 0;
- for_each_online_cpu(j) {
- set_bit(j, &data->cpu_event);
- ++online;
- }
- sema_init(&data->sem, online);
+ for_each_online_cpu(j)
+ cpu_set(j, data->cpu_event);
*sdir++ = dir;
}
@@ -608,6 +673,10 @@ salinfo_init(void)
salinfo_timer.function = &salinfo_timeout;
add_timer(&salinfo_timer);
+#ifdef CONFIG_HOTPLUG_CPU
+ register_cpu_notifier(&salinfo_cpu_notifier);
+#endif
+
return 0;
}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index c33305d8e5eb..c0766575a3a2 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -60,6 +60,7 @@
#include <asm/smp.h>
#include <asm/system.h>
#include <asm/unistd.h>
+#include <asm/system.h>
#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
# error "struct cpuinfo_ia64 too big!"
@@ -695,6 +696,7 @@ static void
get_max_cacheline_size (void)
{
unsigned long line_size, max = 1;
+ unsigned int cache_size = 0;
u64 l, levels, unique_caches;
pal_cache_config_info_t cci;
s64 status;
@@ -724,6 +726,8 @@ get_max_cacheline_size (void)
line_size = 1 << cci.pcci_line_size;
if (line_size > max)
max = line_size;
+ if (cache_size < cci.pcci_cache_size)
+ cache_size = cci.pcci_cache_size;
if (!cci.pcci_unified) {
status = ia64_pal_cache_config_info(l,
/* cache_type (instruction)= */ 1,
@@ -740,6 +744,9 @@ get_max_cacheline_size (void)
ia64_i_cache_stride_shift = cci.pcci_stride;
}
out:
+#ifdef CONFIG_SMP
+ max_cache_size = max(max_cache_size, cache_size);
+#endif
if (max > ia64_max_cacheline_size)
ia64_max_cacheline_size = max;
}
@@ -794,7 +801,7 @@ cpu_init (void)
#endif
/* Clear the stack memory reserved for pt_regs: */
- memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
+ memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
ia64_set_kr(IA64_KR_FPU_OWNER, 0);
@@ -870,6 +877,15 @@ cpu_init (void)
pm_idle = default_idle;
}
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ */
+void sched_cacheflush(void)
+{
+ ia64_sal_cache_flush(3);
+}
+
void
check_bugs (void)
{
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 58ce07efc56e..463f6bb44d07 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -655,11 +655,11 @@ set_sigdelayed(pid_t pid, int signo, int code, void __user *addr)
if (!t)
return;
- t->thread_info->sigdelayed.signo = signo;
- t->thread_info->sigdelayed.code = code;
- t->thread_info->sigdelayed.addr = addr;
- t->thread_info->sigdelayed.start_time = start_time;
- t->thread_info->sigdelayed.pid = pid;
+ task_thread_info(t)->sigdelayed.signo = signo;
+ task_thread_info(t)->sigdelayed.code = code;
+ task_thread_info(t)->sigdelayed.addr = addr;
+ task_thread_info(t)->sigdelayed.start_time = start_time;
+ task_thread_info(t)->sigdelayed.pid = pid;
wmb();
set_tsk_thread_flag(t, TIF_SIGDELAYED);
}
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index f2dbcd1db0d4..c7b943f10199 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -151,7 +151,7 @@ out:
asmlinkage long
sys_pipe (void)
{
- struct pt_regs *regs = ia64_task_regs(current);
+ struct pt_regs *regs = task_pt_regs(current);
int fd[2];
int retval;
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index d3e0ecb56d62..55391901b013 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -530,12 +530,15 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
if (fsys_mode(current, &regs)) {
extern char __kernel_syscall_via_break[];
/*
- * Got a trap in fsys-mode: Taken Branch Trap and Single Step trap
- * need special handling; Debug trap is not supposed to happen.
+ * Got a trap in fsys-mode: Taken Branch Trap
+ * and Single Step trap need special handling;
+ * Debug trap is ignored (we disable it here
+ * and re-enable it in the lower-privilege trap).
*/
if (unlikely(vector == 29)) {
- die("Got debug trap in fsys-mode---not supposed to happen!",
- &regs, 0);
+ set_thread_flag(TIF_DB_DISABLED);
+ ia64_psr(&regs)->db = 0;
+ ia64_psr(&regs)->lp = 1;
return;
}
/* re-do the system call via break 0x100000: */
@@ -589,10 +592,19 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 34:
if (isr & 0x2) {
/* Lower-Privilege Transfer Trap */
+
+ /* If we disabled debug traps during an fsyscall,
+ * re-enable them here.
+ */
+ if (test_thread_flag(TIF_DB_DISABLED)) {
+ clear_thread_flag(TIF_DB_DISABLED);
+ ia64_psr(&regs)->db = 1;
+ }
+
/*
- * Just clear PSR.lp and then return immediately: all the
- * interesting work (e.g., signal delivery is done in the kernel
- * exit path).
+ * Just clear PSR.lp and then return immediately:
+ * all the interesting work (e.g., signal delivery)
+ * is done in the kernel exit path.
*/
ia64_psr(&regs)->lp = 0;
return;
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index b631cf86ed44..fcd2bad0286f 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -210,6 +210,7 @@ uncached_build_memmap(unsigned long start, unsigned long end, void *arg)
dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);
+ touch_softlockup_watchdog();
memset((char *)start, 0, length);
node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET);