summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/mmu.h1
-rw-r--r--arch/arm/mm/dma-mapping.c28
-rw-r--r--arch/blackfin/include/asm/mmu.h1
-rw-r--r--arch/blackfin/kernel/ptrace.c6
-rw-r--r--arch/blackfin/kernel/traps.c11
-rw-r--r--arch/frv/kernel/ptrace.c11
-rw-r--r--arch/h8300/include/asm/mmu.h1
-rw-r--r--arch/m68knommu/include/asm/mmu.h1
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/byteorder.h66
-rw-r--r--arch/parisc/include/asm/swab.h66
-rw-r--r--arch/powerpc/include/asm/cell-pmu.h2
-rw-r--r--arch/powerpc/include/asm/oprofile_impl.h6
-rw-r--r--arch/powerpc/oprofile/cell/pr_util.h11
-rw-r--r--arch/powerpc/oprofile/cell/spu_profiler.c56
-rw-r--r--arch/powerpc/oprofile/common.c22
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c748
-rw-r--r--arch/s390/include/asm/chpid.h2
-rw-r--r--arch/s390/include/asm/chsc.h1
-rw-r--r--arch/s390/include/asm/cmb.h3
-rw-r--r--arch/s390/include/asm/dasd.h2
-rw-r--r--arch/s390/include/asm/kvm.h2
-rw-r--r--arch/s390/include/asm/posix_types.h4
-rw-r--r--arch/s390/include/asm/ptrace.h7
-rw-r--r--arch/s390/include/asm/qeth.h1
-rw-r--r--arch/s390/include/asm/schid.h2
-rw-r--r--arch/s390/include/asm/swab.h2
-rw-r--r--arch/s390/include/asm/types.h6
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/s390/kernel/sys_s390.c19
-rw-r--r--arch/s390/kernel/vdso.c3
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S4
-rw-r--r--arch/s390/kvm/diag.c2
-rw-r--r--arch/s390/kvm/interrupt.c6
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/sh/include/asm/mmu.h1
-rw-r--r--arch/x86/include/asm/bitops.h2
-rw-r--r--arch/x86/kernel/early-quirks.c22
-rw-r--r--arch/x86/oprofile/op_model_amd.c149
40 files changed, 897 insertions, 388 deletions
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 53099d4ee421..b561584d04a1 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -24,7 +24,6 @@ typedef struct {
* modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
*/
typedef struct {
- struct vm_list_struct *vmlist;
unsigned long end_brk;
} mm_context_t;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 67960017dc8f..310e479309ef 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -71,7 +71,7 @@ static DEFINE_SPINLOCK(consistent_lock);
* the amount of RAM found at boot time.) I would imagine that get_vm_area()
* would have to initialise this each time prior to calling vm_region_alloc().
*/
-struct vm_region {
+struct arm_vm_region {
struct list_head vm_list;
unsigned long vm_start;
unsigned long vm_end;
@@ -79,20 +79,20 @@ struct vm_region {
int vm_active;
};
-static struct vm_region consistent_head = {
+static struct arm_vm_region consistent_head = {
.vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
.vm_start = CONSISTENT_BASE,
.vm_end = CONSISTENT_END,
};
-static struct vm_region *
-vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
+static struct arm_vm_region *
+arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp)
{
unsigned long addr = head->vm_start, end = head->vm_end - size;
unsigned long flags;
- struct vm_region *c, *new;
+ struct arm_vm_region *c, *new;
- new = kmalloc(sizeof(struct vm_region), gfp);
+ new = kmalloc(sizeof(struct arm_vm_region), gfp);
if (!new)
goto out;
@@ -127,9 +127,9 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
return NULL;
}
-static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
+static struct arm_vm_region *arm_vm_region_find(struct arm_vm_region *head, unsigned long addr)
{
- struct vm_region *c;
+ struct arm_vm_region *c;
list_for_each_entry(c, &head->vm_list, vm_list) {
if (c->vm_active && c->vm_start == addr)
@@ -149,7 +149,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
pgprot_t prot)
{
struct page *page;
- struct vm_region *c;
+ struct arm_vm_region *c;
unsigned long order;
u64 mask = ISA_DMA_THRESHOLD, limit;
@@ -214,7 +214,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
/*
* Allocate a virtual address in the consistent mapping region.
*/
- c = vm_region_alloc(&consistent_head, size,
+ c = arm_vm_region_alloc(&consistent_head, size,
gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
if (c) {
pte_t *pte;
@@ -311,13 +311,13 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
unsigned long flags, user_size, kern_size;
- struct vm_region *c;
+ struct arm_vm_region *c;
int ret = -ENXIO;
user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
spin_lock_irqsave(&consistent_lock, flags);
- c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
+ c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
spin_unlock_irqrestore(&consistent_lock, flags);
if (c) {
@@ -359,7 +359,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
*/
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
- struct vm_region *c;
+ struct arm_vm_region *c;
unsigned long flags, addr;
pte_t *ptep;
int idx;
@@ -378,7 +378,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
size = PAGE_ALIGN(size);
spin_lock_irqsave(&consistent_lock, flags);
- c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
+ c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
if (!c)
goto no_area;
diff --git a/arch/blackfin/include/asm/mmu.h b/arch/blackfin/include/asm/mmu.h
index 757e43906ed4..dbfd686360e6 100644
--- a/arch/blackfin/include/asm/mmu.h
+++ b/arch/blackfin/include/asm/mmu.h
@@ -10,7 +10,6 @@ struct sram_list_struct {
};
typedef struct {
- struct vm_list_struct *vmlist;
unsigned long end_brk;
unsigned long stack_start;
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index d2d388536630..594e325b40e4 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -160,15 +160,15 @@ put_reg(struct task_struct *task, int regno, unsigned long data)
static inline int is_user_addr_valid(struct task_struct *child,
unsigned long start, unsigned long len)
{
- struct vm_list_struct *vml;
+ struct vm_area_struct *vma;
struct sram_list_struct *sraml;
/* overflow */
if (start + len < start)
return -EIO;
- for (vml = child->mm->context.vmlist; vml; vml = vml->next)
- if (start >= vml->vma->vm_start && start + len < vml->vma->vm_end)
+ vma = find_vma(child->mm, start);
+ if (vma && start >= vma->vm_start && start + len <= vma->vm_end)
return 0;
for (sraml = child->mm->context.sram_list; sraml; sraml = sraml->next)
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index 17d8e4172896..5b0667da8d05 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/fs.h>
+#include <linux/rbtree.h>
#include <asm/traps.h>
#include <asm/cacheflush.h>
#include <asm/cplb.h>
@@ -83,6 +84,7 @@ static void decode_address(char *buf, unsigned long address)
struct mm_struct *mm;
unsigned long flags, offset;
unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
+ struct rb_node *n;
#ifdef CONFIG_KALLSYMS
unsigned long symsize;
@@ -128,9 +130,10 @@ static void decode_address(char *buf, unsigned long address)
if (!mm)
continue;
- vml = mm->context.vmlist;
- while (vml) {
- struct vm_area_struct *vma = vml->vma;
+ for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
+ struct vm_area_struct *vma;
+
+ vma = rb_entry(n, struct vm_area_struct, vm_rb);
if (address >= vma->vm_start && address < vma->vm_end) {
char _tmpbuf[256];
@@ -176,8 +179,6 @@ static void decode_address(char *buf, unsigned long address)
goto done;
}
-
- vml = vml->next;
}
if (!in_atomic)
mmput(mm);
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index 709e9bdc6126..5e7d401d21e7 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -69,7 +69,8 @@ static inline int put_reg(struct task_struct *task, int regno,
}
/*
- * check that an address falls within the bounds of the target process's memory mappings
+ * check that an address falls within the bounds of the target process's memory
+ * mappings
*/
static inline int is_user_addr_valid(struct task_struct *child,
unsigned long start, unsigned long len)
@@ -79,11 +80,11 @@ static inline int is_user_addr_valid(struct task_struct *child,
return -EIO;
return 0;
#else
- struct vm_list_struct *vml;
+ struct vm_area_struct *vma;
- for (vml = child->mm->context.vmlist; vml; vml = vml->next)
- if (start >= vml->vma->vm_start && start + len <= vml->vma->vm_end)
- return 0;
+ vma = find_vma(child->mm, start);
+ if (vma && start >= vma->vm_start && start + len <= vma->vm_end)
+ return 0;
return -EIO;
#endif
diff --git a/arch/h8300/include/asm/mmu.h b/arch/h8300/include/asm/mmu.h
index 2ce06ea46104..31309969df70 100644
--- a/arch/h8300/include/asm/mmu.h
+++ b/arch/h8300/include/asm/mmu.h
@@ -4,7 +4,6 @@
/* Copyright (C) 2002, David McCullough <davidm@snapgear.com> */
typedef struct {
- struct vm_list_struct *vmlist;
unsigned long end_brk;
} mm_context_t;
diff --git a/arch/m68knommu/include/asm/mmu.h b/arch/m68knommu/include/asm/mmu.h
index 5fa6b68353ba..e2da1e6f09fe 100644
--- a/arch/m68knommu/include/asm/mmu.h
+++ b/arch/m68knommu/include/asm/mmu.h
@@ -4,7 +4,6 @@
/* Copyright (C) 2002, David McCullough <davidm@snapgear.com> */
typedef struct {
- struct vm_list_struct *vmlist;
unsigned long end_brk;
} mm_context_t;
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index f88b252e419c..2121d99f8364 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -1,3 +1,4 @@
include include/asm-generic/Kbuild.asm
unifdef-y += pdc.h
+unifdef-y += swab.h
diff --git a/arch/parisc/include/asm/byteorder.h b/arch/parisc/include/asm/byteorder.h
index 83095c5bb379..da66029c4cb2 100644
--- a/arch/parisc/include/asm/byteorder.h
+++ b/arch/parisc/include/asm/byteorder.h
@@ -1,69 +1,7 @@
#ifndef _PARISC_BYTEORDER_H
#define _PARISC_BYTEORDER_H
-#include <asm/types.h>
-#include <linux/compiler.h>
-
-#define __BIG_ENDIAN
-#define __SWAB_64_THRU_32__
-
-static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
-{
- __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */
- "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */
- : "=r" (x)
- : "0" (x));
- return x;
-}
-#define __arch_swab16 __arch_swab16
-
-static inline __attribute_const__ __u32 __arch_swab24(__u32 x)
-{
- __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */
- "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */
- "shd %%r0, %0, 8, %0" /* shift 0000cbab -> 0cba */
- : "=r" (x)
- : "0" (x));
- return x;
-}
-
-static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
-{
- unsigned int temp;
- __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */
- "dep %1, 15, 8, %1\n\t" /* deposit cdab -> cbab */
- "shd %0, %1, 8, %0" /* shift abcdcbab -> dcba */
- : "=r" (x), "=&r" (temp)
- : "0" (x));
- return x;
-}
-#define __arch_swab32 __arch_swab32
-
-#if BITS_PER_LONG > 32
-/*
-** From "PA-RISC 2.0 Architecture", HP Professional Books.
-** See Appendix I page 8 , "Endian Byte Swapping".
-**
-** Pretty cool algorithm: (* == zero'd bits)
-** PERMH 01234567 -> 67452301 into %0
-** HSHL 67452301 -> 7*5*3*1* into %1
-** HSHR 67452301 -> *6*4*2*0 into %0
-** OR %0 | %1 -> 76543210 into %0 (all done!)
-*/
-static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
-{
- __u64 temp;
- __asm__("permh,3210 %0, %0\n\t"
- "hshl %0, 8, %1\n\t"
- "hshr,u %0, 8, %0\n\t"
- "or %1, %0, %0"
- : "=r" (x), "=&r" (temp)
- : "0" (x));
- return x;
-}
-#define __arch_swab64 __arch_swab64
-#endif /* BITS_PER_LONG > 32 */
-
-#include <linux/byteorder.h>
+#include <asm/swab.h>
+#include <linux/byteorder/big_endian.h>
#endif /* _PARISC_BYTEORDER_H */
diff --git a/arch/parisc/include/asm/swab.h b/arch/parisc/include/asm/swab.h
new file mode 100644
index 000000000000..3ff16c5a3358
--- /dev/null
+++ b/arch/parisc/include/asm/swab.h
@@ -0,0 +1,66 @@
+#ifndef _PARISC_SWAB_H
+#define _PARISC_SWAB_H
+
+#include <asm/types.h>
+#include <linux/compiler.h>
+
+#define __SWAB_64_THRU_32__
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+ __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */
+ "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */
+ : "=r" (x)
+ : "0" (x));
+ return x;
+}
+#define __arch_swab16 __arch_swab16
+
+static inline __attribute_const__ __u32 __arch_swab24(__u32 x)
+{
+ __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */
+ "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */
+ "shd %%r0, %0, 8, %0" /* shift 0000cbab -> 0cba */
+ : "=r" (x)
+ : "0" (x));
+ return x;
+}
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+ unsigned int temp;
+ __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */
+ "dep %1, 15, 8, %1\n\t" /* deposit cdab -> cbab */
+ "shd %0, %1, 8, %0" /* shift abcdcbab -> dcba */
+ : "=r" (x), "=&r" (temp)
+ : "0" (x));
+ return x;
+}
+#define __arch_swab32 __arch_swab32
+
+#if BITS_PER_LONG > 32
+/*
+** From "PA-RISC 2.0 Architecture", HP Professional Books.
+** See Appendix I page 8 , "Endian Byte Swapping".
+**
+** Pretty cool algorithm: (* == zero'd bits)
+** PERMH 01234567 -> 67452301 into %0
+** HSHL 67452301 -> 7*5*3*1* into %1
+** HSHR 67452301 -> *6*4*2*0 into %0
+** OR %0 | %1 -> 76543210 into %0 (all done!)
+*/
+static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
+{
+ __u64 temp;
+ __asm__("permh,3210 %0, %0\n\t"
+ "hshl %0, 8, %1\n\t"
+ "hshr,u %0, 8, %0\n\t"
+ "or %1, %0, %0"
+ : "=r" (x), "=&r" (temp)
+ : "0" (x));
+ return x;
+}
+#define __arch_swab64 __arch_swab64
+#endif /* BITS_PER_LONG > 32 */
+
+#endif /* _PARISC_SWAB_H */
diff --git a/arch/powerpc/include/asm/cell-pmu.h b/arch/powerpc/include/asm/cell-pmu.h
index 8066eede3a0c..b4b7338ad79e 100644
--- a/arch/powerpc/include/asm/cell-pmu.h
+++ b/arch/powerpc/include/asm/cell-pmu.h
@@ -37,9 +37,11 @@
#define CBE_PM_STOP_AT_MAX 0x40000000
#define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3)
#define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28)
+#define CBE_PM_TRACE_BUF_OVFLW(bit) (((bit) & 0x1) << 17)
#define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18)
#define CBE_PM_FREEZE_ALL_CTRS 0x00100000
#define CBE_PM_ENABLE_EXT_TRACE 0x00008000
+#define CBE_PM_SPU_ADDR_TRACE_SET(msk) (((msk) & 0x3) << 9)
/* Macros for the trace_address register. */
#define CBE_PM_TRACE_BUF_FULL 0x00000800
diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h
index 95035c602ba6..639dc96077ab 100644
--- a/arch/powerpc/include/asm/oprofile_impl.h
+++ b/arch/powerpc/include/asm/oprofile_impl.h
@@ -32,6 +32,12 @@ struct op_system_config {
unsigned long mmcr0;
unsigned long mmcr1;
unsigned long mmcra;
+#ifdef CONFIG_OPROFILE_CELL
+ /* Register for oprofile user tool to check cell kernel profiling
+ * suport.
+ */
+ unsigned long cell_support;
+#endif
#endif
unsigned long enable_kernel;
unsigned long enable_user;
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h
index dfdbffa06818..964b93974d89 100644
--- a/arch/powerpc/oprofile/cell/pr_util.h
+++ b/arch/powerpc/oprofile/cell/pr_util.h
@@ -30,6 +30,10 @@
extern struct delayed_work spu_work;
extern int spu_prof_running;
+#define TRACE_ARRAY_SIZE 1024
+
+extern spinlock_t oprof_spu_smpl_arry_lck;
+
struct spu_overlay_info { /* map of sections within an SPU overlay */
unsigned int vma; /* SPU virtual memory address from elf */
unsigned int size; /* size of section from elf */
@@ -89,10 +93,11 @@ void vma_map_free(struct vma_to_fileoffset_map *map);
* Entry point for SPU profiling.
* cycles_reset is the SPU_CYCLES count value specified by the user.
*/
-int start_spu_profiling(unsigned int cycles_reset);
-
-void stop_spu_profiling(void);
+int start_spu_profiling_cycles(unsigned int cycles_reset);
+void start_spu_profiling_events(void);
+void stop_spu_profiling_cycles(void);
+void stop_spu_profiling_events(void);
/* add the necessary profiling hooks */
int spu_sync_start(void);
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
index 83faa958b9d4..9305ddaac512 100644
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ b/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -18,11 +18,21 @@
#include <asm/cell-pmu.h>
#include "pr_util.h"
-#define TRACE_ARRAY_SIZE 1024
#define SCALE_SHIFT 14
static u32 *samples;
+/* spu_prof_running is a flag used to indicate if spu profiling is enabled
+ * or not. It is set by the routines start_spu_profiling_cycles() and
+ * start_spu_profiling_events(). The flag is cleared by the routines
+ * stop_spu_profiling_cycles() and stop_spu_profiling_events(). These
+ * routines are called via global_start() and global_stop() which are called in
+ * op_powerpc_start() and op_powerpc_stop(). These routines are called once
+ * per system as a result of the user starting/stopping oprofile. Hence, only
+ * one CPU per user at a time will be changing the value of spu_prof_running.
+ * In general, OProfile does not protect against multiple users trying to run
+ * OProfile at a time.
+ */
int spu_prof_running;
static unsigned int profiling_interval;
@@ -31,8 +41,8 @@ static unsigned int profiling_interval;
#define SPU_PC_MASK 0xFFFF
-static DEFINE_SPINLOCK(sample_array_lock);
-unsigned long sample_array_lock_flags;
+DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
+unsigned long oprof_spu_smpl_arry_lck_flags;
void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
{
@@ -145,13 +155,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
* sample array must be loaded and then processed for a given
* cpu. The sample array is not per cpu.
*/
- spin_lock_irqsave(&sample_array_lock,
- sample_array_lock_flags);
+ spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
+ oprof_spu_smpl_arry_lck_flags);
num_samples = cell_spu_pc_collection(cpu);
if (num_samples == 0) {
- spin_unlock_irqrestore(&sample_array_lock,
- sample_array_lock_flags);
+ spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
+ oprof_spu_smpl_arry_lck_flags);
continue;
}
@@ -162,8 +172,8 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
num_samples);
}
- spin_unlock_irqrestore(&sample_array_lock,
- sample_array_lock_flags);
+ spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
+ oprof_spu_smpl_arry_lck_flags);
}
smp_wmb(); /* insure spu event buffer updates are written */
@@ -182,13 +192,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
static struct hrtimer timer;
/*
- * Entry point for SPU profiling.
+ * Entry point for SPU cycle profiling.
* NOTE: SPU profiling is done system-wide, not per-CPU.
*
* cycles_reset is the count value specified by the user when
* setting up OProfile to count SPU_CYCLES.
*/
-int start_spu_profiling(unsigned int cycles_reset)
+int start_spu_profiling_cycles(unsigned int cycles_reset)
{
ktime_t kt;
@@ -212,10 +222,30 @@ int start_spu_profiling(unsigned int cycles_reset)
return 0;
}
-void stop_spu_profiling(void)
+/*
+ * Entry point for SPU event profiling.
+ * NOTE: SPU profiling is done system-wide, not per-CPU.
+ *
+ * cycles_reset is the count value specified by the user when
+ * setting up OProfile to count SPU_CYCLES.
+ */
+void start_spu_profiling_events(void)
+{
+ spu_prof_running = 1;
+ schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
+
+ return;
+}
+
+void stop_spu_profiling_cycles(void)
{
spu_prof_running = 0;
hrtimer_cancel(&timer);
kfree(samples);
- pr_debug("SPU_PROF: stop_spu_profiling issued\n");
+ pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
+}
+
+void stop_spu_profiling_events(void)
+{
+ spu_prof_running = 0;
}
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 17807acb05d9..21f16edf6c8d 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -132,6 +132,28 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
+#ifdef CONFIG_OPROFILE_CELL
+ /* create a file the user tool can check to see what level of profiling
+ * support exits with this kernel. Initialize bit mask to indicate
+ * what support the kernel has:
+ * bit 0 - Supports SPU event profiling in addition to PPU
+ * event and cycles; and SPU cycle profiling
+ * bits 1-31 - Currently unused.
+ *
+ * If the file does not exist, then the kernel only supports SPU
+ * cycle profiling, PPU event and cycle profiling.
+ */
+ oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support);
+ sys.cell_support = 0x1; /* Note, the user OProfile tool must check
+ * that this bit is set before attempting to
+ * user SPU event profiling. Older kernels
+ * will not have this file, hence the user
+ * tool is not allowed to do SPU event
+ * profiling on older kernels. Older kernels
+ * will accept SPU events but collected data
+ * is garbage.
+ */
+#endif
#endif
for (i = 0; i < model->num_counters; ++i) {
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index 25a4ec2514a3..ae06c6236d9c 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -40,14 +40,15 @@
#include "../platforms/cell/interrupt.h"
#include "cell/pr_util.h"
-static void cell_global_stop_spu(void);
+#define PPU_PROFILING 0
+#define SPU_PROFILING_CYCLES 1
+#define SPU_PROFILING_EVENTS 2
-/*
- * spu_cycle_reset is the number of cycles between samples.
- * This variable is used for SPU profiling and should ONLY be set
- * at the beginning of cell_reg_setup; otherwise, it's read-only.
- */
-static unsigned int spu_cycle_reset;
+#define SPU_EVENT_NUM_START 4100
+#define SPU_EVENT_NUM_STOP 4399
+#define SPU_PROFILE_EVENT_ADDR 4363 /* spu, address trace, decimal */
+#define SPU_PROFILE_EVENT_ADDR_MASK_A 0x146 /* sub unit set to zero */
+#define SPU_PROFILE_EVENT_ADDR_MASK_B 0x186 /* sub unit set to zero */
#define NUM_SPUS_PER_NODE 8
#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
@@ -66,6 +67,21 @@ static unsigned int spu_cycle_reset;
#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
+/* Minumum HW interval timer setting to send value to trace buffer is 10 cycle.
+ * To configure counter to send value every N cycles set counter to
+ * 2^32 - 1 - N.
+ */
+#define NUM_INTERVAL_CYC 0xFFFFFFFF - 10
+
+/*
+ * spu_cycle_reset is the number of cycles between samples.
+ * This variable is used for SPU profiling and should ONLY be set
+ * at the beginning of cell_reg_setup; otherwise, it's read-only.
+ */
+static unsigned int spu_cycle_reset;
+static unsigned int profiling_mode;
+static int spu_evnt_phys_spu_indx;
+
struct pmc_cntrl_data {
unsigned long vcntr;
unsigned long evnts;
@@ -105,6 +121,8 @@ struct pm_cntrl {
u16 trace_mode;
u16 freeze;
u16 count_mode;
+ u16 spu_addr_trace;
+ u8 trace_buf_ovflw;
};
static struct {
@@ -122,7 +140,7 @@ static struct {
#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
-
+static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE];
static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
/*
@@ -152,6 +170,7 @@ static u32 hdw_thread;
static u32 virt_cntr_inter_mask;
static struct timer_list timer_virt_cntr;
+static struct timer_list timer_spu_event_swap;
/*
* pm_signal needs to be global since it is initialized in
@@ -165,7 +184,7 @@ static int spu_rtas_token; /* token for SPU cycle profiling */
static u32 reset_value[NR_PHYS_CTRS];
static int num_counters;
static int oprofile_running;
-static DEFINE_SPINLOCK(virt_cntr_lock);
+static DEFINE_SPINLOCK(cntr_lock);
static u32 ctr_enabled;
@@ -336,13 +355,13 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
if (bus_word & (1 << i)) {
pm_regs.debug_bus_control |=
- (bus_type << (30 - (2 * i)));
+ (bus_type << (30 - (2 * i)));
for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
if (input_bus[j] == 0xff) {
input_bus[j] = i;
pm_regs.group_control |=
- (i << (30 - (2 * j)));
+ (i << (30 - (2 * j)));
break;
}
@@ -367,12 +386,16 @@ static void write_pm_cntrl(int cpu)
if (pm_regs.pm_cntrl.stop_at_max == 1)
val |= CBE_PM_STOP_AT_MAX;
- if (pm_regs.pm_cntrl.trace_mode == 1)
+ if (pm_regs.pm_cntrl.trace_mode != 0)
val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
+ if (pm_regs.pm_cntrl.trace_buf_ovflw == 1)
+ val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw);
if (pm_regs.pm_cntrl.freeze == 1)
val |= CBE_PM_FREEZE_ALL_CTRS;
+ val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace);
+
/*
* Routine set_count_mode must be called previously to set
* the count mode based on the user selection of user and kernel.
@@ -441,7 +464,7 @@ static void cell_virtual_cntr(unsigned long data)
* not both playing with the counters on the same node.
*/
- spin_lock_irqsave(&virt_cntr_lock, flags);
+ spin_lock_irqsave(&cntr_lock, flags);
prev_hdw_thread = hdw_thread;
@@ -480,7 +503,7 @@ static void cell_virtual_cntr(unsigned long data)
cbe_disable_pm_interrupts(cpu);
for (i = 0; i < num_counters; i++) {
per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
- = cbe_read_ctr(cpu, i);
+ = cbe_read_ctr(cpu, i);
if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
== 0xFFFFFFFF)
@@ -527,7 +550,7 @@ static void cell_virtual_cntr(unsigned long data)
cbe_enable_pm(cpu);
}
- spin_unlock_irqrestore(&virt_cntr_lock, flags);
+ spin_unlock_irqrestore(&cntr_lock, flags);
mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
}
@@ -541,38 +564,146 @@ static void start_virt_cntrs(void)
add_timer(&timer_virt_cntr);
}
-/* This function is called once for all cpus combined */
-static int cell_reg_setup(struct op_counter_config *ctr,
+static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
struct op_system_config *sys, int num_ctrs)
{
- int i, j, cpu;
- spu_cycle_reset = 0;
+ spu_cycle_reset = ctr[0].count;
- if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
- spu_cycle_reset = ctr[0].count;
+ /*
+ * Each node will need to make the rtas call to start
+ * and stop SPU profiling. Get the token once and store it.
+ */
+ spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
+
+ if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
+ printk(KERN_ERR
+ "%s: rtas token ibm,cbe-spu-perftools unknown\n",
+ __func__);
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Unfortunately, the hardware will only support event profiling
+ * on one SPU per node at a time. Therefore, we must time slice
+ * the profiling across all SPUs in the node. Note, we do this
+ * in parallel for each node. The following routine is called
+ * periodically based on kernel timer to switch which SPU is
+ * being monitored in a round robbin fashion.
+ */
+static void spu_evnt_swap(unsigned long data)
+{
+ int node;
+ int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
+ unsigned long flags;
+ int cpu;
+ int ret;
+ u32 interrupt_mask;
+
+
+ /* enable interrupts on cntr 0 */
+ interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0);
+
+ hdw_thread = 0;
+
+ /* Make sure spu event interrupt handler and spu event swap
+ * don't access the counters simultaneously.
+ */
+ spin_lock_irqsave(&cntr_lock, flags);
+
+ cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx;
+
+ if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE)
+ spu_evnt_phys_spu_indx = 0;
+
+ pm_signal[0].sub_unit = spu_evnt_phys_spu_indx;
+ pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
+ pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
+
+ /* switch the SPU being profiled on each node */
+ for_each_online_cpu(cpu) {
+ if (cbe_get_hw_thread_id(cpu))
+ continue;
+
+ node = cbe_cpu_to_node(cpu);
+ cur_phys_spu = (node * NUM_SPUS_PER_NODE)
+ + cur_spu_evnt_phys_spu_indx;
+ nxt_phys_spu = (node * NUM_SPUS_PER_NODE)
+ + spu_evnt_phys_spu_indx;
/*
- * Each node will need to make the rtas call to start
- * and stop SPU profiling. Get the token once and store it.
+ * stop counters, save counter values, restore counts
+ * for previous physical SPU
*/
- spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
+ cbe_disable_pm(cpu);
+ cbe_disable_pm_interrupts(cpu);
- if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
- printk(KERN_ERR
- "%s: rtas token ibm,cbe-spu-perftools unknown\n",
- __func__);
- return -EIO;
- }
+ spu_pm_cnt[cur_phys_spu]
+ = cbe_read_ctr(cpu, 0);
+
+ /* restore previous count for the next spu to sample */
+ /* NOTE, hardware issue, counter will not start if the
+ * counter value is at max (0xFFFFFFFF).
+ */
+ if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF)
+ cbe_write_ctr(cpu, 0, 0xFFFFFFF0);
+ else
+ cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]);
+
+ pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+ /* setup the debug bus measure the one event and
+ * the two events to route the next SPU's PC on
+ * the debug bus
+ */
+ ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3);
+ if (ret)
+ printk(KERN_ERR "%s: pm_rtas_activate_signals failed, "
+ "SPU event swap\n", __func__);
+
+ /* clear the trace buffer, don't want to take PC for
+ * previous SPU*/
+ cbe_write_pm(cpu, trace_address, 0);
+
+ enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
+
+ /* Enable interrupts on the CPU thread that is starting */
+ cbe_enable_pm_interrupts(cpu, hdw_thread,
+ interrupt_mask);
+ cbe_enable_pm(cpu);
}
- pm_rtas_token = rtas_token("ibm,cbe-perftools");
+ spin_unlock_irqrestore(&cntr_lock, flags);
+ /* swap approximately every 0.1 seconds */
+ mod_timer(&timer_spu_event_swap, jiffies + HZ / 25);
+}
+
+static void start_spu_event_swap(void)
+{
+ init_timer(&timer_spu_event_swap);
+ timer_spu_event_swap.function = spu_evnt_swap;
+ timer_spu_event_swap.data = 0UL;
+ timer_spu_event_swap.expires = jiffies + HZ / 25;
+ add_timer(&timer_spu_event_swap);
+}
+
+static int cell_reg_setup_spu_events(struct op_counter_config *ctr,
+ struct op_system_config *sys, int num_ctrs)
+{
+ int i;
+
+ /* routine is called once for all nodes */
+
+ spu_evnt_phys_spu_indx = 0;
/*
- * For all events excetp PPU CYCLEs, each node will need to make
+ * For all events except PPU CYCLEs, each node will need to make
* the rtas cbe-perftools call to setup and reset the debug bus.
* Make the token lookup call once and store it in the global
* variable pm_rtas_token.
*/
+ pm_rtas_token = rtas_token("ibm,cbe-perftools");
+
if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
printk(KERN_ERR
"%s: rtas token ibm,cbe-perftools unknown\n",
@@ -580,6 +711,58 @@ static int cell_reg_setup(struct op_counter_config *ctr,
return -EIO;
}
+ /* setup the pm_control register settings,
+ * settings will be written per node by the
+ * cell_cpu_setup() function.
+ */
+ pm_regs.pm_cntrl.trace_buf_ovflw = 1;
+
+ /* Use the occurrence trace mode to have SPU PC saved
+ * to the trace buffer. Occurrence data in trace buffer
+ * is not used. Bit 2 must be set to store SPU addresses.
+ */
+ pm_regs.pm_cntrl.trace_mode = 2;
+
+ pm_regs.pm_cntrl.spu_addr_trace = 0x1; /* using debug bus
+ event 2 & 3 */
+
+ /* setup the debug bus event array with the SPU PC routing events.
+ * Note, pm_signal[0] will be filled in by set_pm_event() call below.
+ */
+ pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
+ pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A);
+ pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100;
+ pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
+
+ pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
+ pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B);
+ pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100;
+ pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
+
+ /* Set the user selected spu event to profile on,
+ * note, only one SPU profiling event is supported
+ */
+ num_counters = 1; /* Only support one SPU event at a time */
+ set_pm_event(0, ctr[0].event, ctr[0].unit_mask);
+
+ reset_value[0] = 0xFFFFFFFF - ctr[0].count;
+
+ /* global, used by cell_cpu_setup */
+ ctr_enabled |= 1;
+
+ /* Initialize the count for each SPU to the reset value */
+ for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++)
+ spu_pm_cnt[i] = reset_value[0];
+
+ return 0;
+}
+
+static int cell_reg_setup_ppu(struct op_counter_config *ctr,
+ struct op_system_config *sys, int num_ctrs)
+{
+ /* routine is called once for all nodes */
+ int i, j, cpu;
+
num_counters = num_ctrs;
if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
@@ -589,14 +772,6 @@ static int cell_reg_setup(struct op_counter_config *ctr,
__func__);
return -EIO;
}
- pm_regs.group_control = 0;
- pm_regs.debug_bus_control = 0;
-
- /* setup the pm_control register */
- memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl));
- pm_regs.pm_cntrl.stop_at_max = 1;
- pm_regs.pm_cntrl.trace_mode = 0;
- pm_regs.pm_cntrl.freeze = 1;
set_count_mode(sys->enable_kernel, sys->enable_user);
@@ -665,6 +840,63 @@ static int cell_reg_setup(struct op_counter_config *ctr,
}
+/* This function is called once for all cpus combined */
+static int cell_reg_setup(struct op_counter_config *ctr,
+ struct op_system_config *sys, int num_ctrs)
+{
+ int ret=0;
+ spu_cycle_reset = 0;
+
+ /* initialize the spu_arr_trace value, will be reset if
+ * doing spu event profiling.
+ */
+ pm_regs.group_control = 0;
+ pm_regs.debug_bus_control = 0;
+ pm_regs.pm_cntrl.stop_at_max = 1;
+ pm_regs.pm_cntrl.trace_mode = 0;
+ pm_regs.pm_cntrl.freeze = 1;
+ pm_regs.pm_cntrl.trace_buf_ovflw = 0;
+ pm_regs.pm_cntrl.spu_addr_trace = 0;
+
+ /*
+ * For all events except PPU CYCLEs, each node will need to make
+ * the rtas cbe-perftools call to setup and reset the debug bus.
+ * Make the token lookup call once and store it in the global
+ * variable pm_rtas_token.
+ */
+ pm_rtas_token = rtas_token("ibm,cbe-perftools");
+
+ if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
+ printk(KERN_ERR
+ "%s: rtas token ibm,cbe-perftools unknown\n",
+ __func__);
+ return -EIO;
+ }
+
+ if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
+ profiling_mode = SPU_PROFILING_CYCLES;
+ ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
+ } else if ((ctr[0].event >= SPU_EVENT_NUM_START) &&
+ (ctr[0].event <= SPU_EVENT_NUM_STOP)) {
+ profiling_mode = SPU_PROFILING_EVENTS;
+ spu_cycle_reset = ctr[0].count;
+
+ /* for SPU event profiling, need to setup the
+ * pm_signal array with the events to route the
+ * SPU PC before making the FW call. Note, only
+ * one SPU event for profiling can be specified
+ * at a time.
+ */
+ cell_reg_setup_spu_events(ctr, sys, num_ctrs);
+ } else {
+ profiling_mode = PPU_PROFILING;
+ ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
+ }
+
+ return ret;
+}
+
+
/* This function is called once for each cpu */
static int cell_cpu_setup(struct op_counter_config *cntr)
@@ -672,8 +904,13 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
u32 cpu = smp_processor_id();
u32 num_enabled = 0;
int i;
+ int ret;
- if (spu_cycle_reset)
+ /* Cycle based SPU profiling does not use the performance
+ * counters. The trace array is configured to collect
+ * the data.
+ */
+ if (profiling_mode == SPU_PROFILING_CYCLES)
return 0;
/* There is one performance monitor per processor chip (i.e. node),
@@ -686,7 +923,6 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
cbe_disable_pm(cpu);
cbe_disable_pm_interrupts(cpu);
- cbe_write_pm(cpu, pm_interval, 0);
cbe_write_pm(cpu, pm_start_stop, 0);
cbe_write_pm(cpu, group_control, pm_regs.group_control);
cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
@@ -703,7 +939,20 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
* The pm_rtas_activate_signals will return -EIO if the FW
* call failed.
*/
- return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);
+ if (profiling_mode == SPU_PROFILING_EVENTS) {
+ /* For SPU event profiling also need to setup the
+ * pm interval timer
+ */
+ ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
+ num_enabled+2);
+ /* store PC from debug bus to Trace buffer as often
+ * as possible (every 10 cycles)
+ */
+ cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
+ return ret;
+ } else
+ return pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
+ num_enabled);
}
#define ENTRIES 303
@@ -885,7 +1134,122 @@ static struct notifier_block cpu_freq_notifier_block = {
};
#endif
-static int cell_global_start_spu(struct op_counter_config *ctr)
+/*
+ * Note the generic OProfile stop calls do not support returning
+ * an error on stop. Hence, will not return an error if the FW
+ * calls fail on stop. Failure to reset the debug bus is not an issue.
+ * Failure to disable the SPU profiling is not an issue. The FW calls
+ * to enable the performance counters and debug bus will work even if
+ * the hardware was not cleanly reset.
+ */
+static void cell_global_stop_spu_cycles(void)
+{
+ int subfunc, rtn_value;
+ unsigned int lfsr_value;
+ int cpu;
+
+ oprofile_running = 0;
+ smp_wmb();
+
+#ifdef CONFIG_CPU_FREQ
+ cpufreq_unregister_notifier(&cpu_freq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+
+ for_each_online_cpu(cpu) {
+ if (cbe_get_hw_thread_id(cpu))
+ continue;
+
+ subfunc = 3; /*
+ * 2 - activate SPU tracing,
+ * 3 - deactivate
+ */
+ lfsr_value = 0x8f100000;
+
+ rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
+ subfunc, cbe_cpu_to_node(cpu),
+ lfsr_value);
+
+ if (unlikely(rtn_value != 0)) {
+ printk(KERN_ERR
+ "%s: rtas call ibm,cbe-spu-perftools " \
+ "failed, return = %d\n",
+ __func__, rtn_value);
+ }
+
+ /* Deactivate the signals */
+ pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+ }
+
+ stop_spu_profiling_cycles();
+}
+
+static void cell_global_stop_spu_events(void)
+{
+ int cpu;
+ oprofile_running = 0;
+
+ stop_spu_profiling_events();
+ smp_wmb();
+
+ for_each_online_cpu(cpu) {
+ if (cbe_get_hw_thread_id(cpu))
+ continue;
+
+ cbe_sync_irq(cbe_cpu_to_node(cpu));
+ /* Stop the counters */
+ cbe_disable_pm(cpu);
+ cbe_write_pm07_control(cpu, 0, 0);
+
+ /* Deactivate the signals */
+ pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+ /* Deactivate interrupts */
+ cbe_disable_pm_interrupts(cpu);
+ }
+ del_timer_sync(&timer_spu_event_swap);
+}
+
+static void cell_global_stop_ppu(void)
+{
+ int cpu;
+
+ /*
+ * This routine will be called once for the system.
+ * There is one performance monitor per node, so we
+ * only need to perform this function once per node.
+ */
+ del_timer_sync(&timer_virt_cntr);
+ oprofile_running = 0;
+ smp_wmb();
+
+ for_each_online_cpu(cpu) {
+ if (cbe_get_hw_thread_id(cpu))
+ continue;
+
+ cbe_sync_irq(cbe_cpu_to_node(cpu));
+ /* Stop the counters */
+ cbe_disable_pm(cpu);
+
+ /* Deactivate the signals */
+ pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+ /* Deactivate interrupts */
+ cbe_disable_pm_interrupts(cpu);
+ }
+}
+
+static void cell_global_stop(void)
+{
+ if (profiling_mode == PPU_PROFILING)
+ cell_global_stop_ppu();
+ else if (profiling_mode == SPU_PROFILING_EVENTS)
+ cell_global_stop_spu_events();
+ else
+ cell_global_stop_spu_cycles();
+}
+
+static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
{
int subfunc;
unsigned int lfsr_value;
@@ -951,18 +1315,18 @@ static int cell_global_start_spu(struct op_counter_config *ctr)
/* start profiling */
ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
- cbe_cpu_to_node(cpu), lfsr_value);
+ cbe_cpu_to_node(cpu), lfsr_value);
if (unlikely(ret != 0)) {
printk(KERN_ERR
- "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
- __func__, ret);
+ "%s: rtas call ibm,cbe-spu-perftools failed, " \
+ "return = %d\n", __func__, ret);
rtas_error = -EIO;
goto out;
}
}
- rtas_error = start_spu_profiling(spu_cycle_reset);
+ rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
if (rtas_error)
goto out_stop;
@@ -970,11 +1334,74 @@ static int cell_global_start_spu(struct op_counter_config *ctr)
return 0;
out_stop:
- cell_global_stop_spu(); /* clean up the PMU/debug bus */
+ cell_global_stop_spu_cycles(); /* clean up the PMU/debug bus */
out:
return rtas_error;
}
+static int cell_global_start_spu_events(struct op_counter_config *ctr)
+{
+ int cpu;
+ u32 interrupt_mask = 0;
+ int rtn = 0;
+
+ hdw_thread = 0;
+
+ /* spu event profiling, uses the performance counters to generate
+ * an interrupt. The hardware is setup to store the SPU program
+ * counter into the trace array. The occurrence mode is used to
+ * enable storing data to the trace buffer. The bits are set
+ * to send/store the SPU address in the trace buffer. The debug
+ * bus must be setup to route the SPU program counter onto the
+ * debug bus. The occurrence data in the trace buffer is not used.
+ */
+
+ /* This routine gets called once for the system.
+ * There is one performance monitor per node, so we
+ * only need to perform this function once per node.
+ */
+
+ for_each_online_cpu(cpu) {
+ if (cbe_get_hw_thread_id(cpu))
+ continue;
+
+ /*
+ * Setup SPU event-based profiling.
+ * Set perf_mon_control bit 0 to a zero before
+ * enabling spu collection hardware.
+ *
+ * Only support one SPU event on one SPU per node.
+ */
+ if (ctr_enabled & 1) {
+ cbe_write_ctr(cpu, 0, reset_value[0]);
+ enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
+ interrupt_mask |=
+ CBE_PM_CTR_OVERFLOW_INTR(0);
+ } else {
+ /* Disable counter */
+ cbe_write_pm07_control(cpu, 0, 0);
+ }
+
+ cbe_get_and_clear_pm_interrupts(cpu);
+ cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
+ cbe_enable_pm(cpu);
+
+ /* clear the trace buffer */
+ cbe_write_pm(cpu, trace_address, 0);
+ }
+
+ /* Start the timer to time slice collecting the event profile
+ * on each of the SPUs. Note, can collect profile on one SPU
+ * per node at a time.
+ */
+ start_spu_event_swap();
+ start_spu_profiling_events();
+ oprofile_running = 1;
+ smp_wmb();
+
+ return rtn;
+}
+
static int cell_global_start_ppu(struct op_counter_config *ctr)
{
u32 cpu, i;
@@ -994,8 +1421,7 @@ static int cell_global_start_ppu(struct op_counter_config *ctr)
if (ctr_enabled & (1 << i)) {
cbe_write_ctr(cpu, i, reset_value[i]);
enable_ctr(cpu, i, pm_regs.pm07_cntrl);
- interrupt_mask |=
- CBE_PM_CTR_OVERFLOW_INTR(i);
+ interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i);
} else {
/* Disable counter */
cbe_write_pm07_control(cpu, i, 0);
@@ -1024,99 +1450,162 @@ static int cell_global_start_ppu(struct op_counter_config *ctr)
static int cell_global_start(struct op_counter_config *ctr)
{
- if (spu_cycle_reset)
- return cell_global_start_spu(ctr);
+ if (profiling_mode == SPU_PROFILING_CYCLES)
+ return cell_global_start_spu_cycles(ctr);
+ else if (profiling_mode == SPU_PROFILING_EVENTS)
+ return cell_global_start_spu_events(ctr);
else
return cell_global_start_ppu(ctr);
}
-/*
- * Note the generic OProfile stop calls do not support returning
- * an error on stop. Hence, will not return an error if the FW
- * calls fail on stop. Failure to reset the debug bus is not an issue.
- * Failure to disable the SPU profiling is not an issue. The FW calls
- * to enable the performance counters and debug bus will work even if
- * the hardware was not cleanly reset.
+
+/* The SPU interrupt handler
+ *
+ * SPU event profiling works as follows:
+ * The pm_signal[0] holds the one SPU event to be measured. It is routed on
+ * the debug bus using word 0 or 1. The value of pm_signal[1] and
+ * pm_signal[2] contain the necessary events to route the SPU program
+ * counter for the selected SPU onto the debug bus using words 2 and 3.
+ * The pm_interval register is setup to write the SPU PC value into the
+ * trace buffer at the maximum rate possible. The trace buffer is configured
+ * to store the PCs, wrapping when it is full. The performance counter is
+ * intialized to the max hardware count minus the number of events, N, between
+ * samples. Once the N events have occured, a HW counter overflow occurs
+ * causing the generation of a HW counter interrupt which also stops the
+ * writing of the SPU PC values to the trace buffer. Hence the last PC
+ * written to the trace buffer is the SPU PC that we want. Unfortunately,
+ * we have to read from the beginning of the trace buffer to get to the
+ * last value written. We just hope the PPU has nothing better to do then
+ * service this interrupt. The PC for the specific SPU being profiled is
+ * extracted from the trace buffer processed and stored. The trace buffer
+ * is cleared, interrupts are cleared, the counter is reset to max - N.
+ * A kernel timer is used to periodically call the routine spu_evnt_swap()
+ * to switch to the next physical SPU in the node to profile in round robbin
+ * order. This way data is collected for all SPUs on the node. It does mean
+ * that we need to use a relatively small value of N to ensure enough samples
+ * on each SPU are collected each SPU is being profiled 1/8 of the time.
+ * It may also be necessary to use a longer sample collection period.
*/
-static void cell_global_stop_spu(void)
+static void cell_handle_interrupt_spu(struct pt_regs *regs,
+ struct op_counter_config *ctr)
{
- int subfunc, rtn_value;
- unsigned int lfsr_value;
- int cpu;
+ u32 cpu, cpu_tmp;
+ u64 trace_entry;
+ u32 interrupt_mask;
+ u64 trace_buffer[2];
+ u64 last_trace_buffer;
+ u32 sample;
+ u32 trace_addr;
+ unsigned long sample_array_lock_flags;
+ int spu_num;
+ unsigned long flags;
- oprofile_running = 0;
+ /* Make sure spu event interrupt handler and spu event swap
+ * don't access the counters simultaneously.
+ */
+ cpu = smp_processor_id();
+ spin_lock_irqsave(&cntr_lock, flags);
-#ifdef CONFIG_CPU_FREQ
- cpufreq_unregister_notifier(&cpu_freq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-#endif
+ cpu_tmp = cpu;
+ cbe_disable_pm(cpu);
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
+ interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
- subfunc = 3; /*
- * 2 - activate SPU tracing,
- * 3 - deactivate
- */
- lfsr_value = 0x8f100000;
+ sample = 0xABCDEF;
+ trace_entry = 0xfedcba;
+ last_trace_buffer = 0xdeadbeaf;
- rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
- subfunc, cbe_cpu_to_node(cpu),
- lfsr_value);
+ if ((oprofile_running == 1) && (interrupt_mask != 0)) {
+ /* disable writes to trace buff */
+ cbe_write_pm(cpu, pm_interval, 0);
- if (unlikely(rtn_value != 0)) {
- printk(KERN_ERR
- "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
- __func__, rtn_value);
+ /* only have one perf cntr being used, cntr 0 */
+ if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0))
+ && ctr[0].enabled)
+ /* The SPU PC values will be read
+ * from the trace buffer, reset counter
+ */
+
+ cbe_write_ctr(cpu, 0, reset_value[0]);
+
+ trace_addr = cbe_read_pm(cpu, trace_address);
+
+ while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
+ /* There is data in the trace buffer to process
+ * Read the buffer until you get to the last
+ * entry. This is the value we want.
+ */
+
+ cbe_read_trace_buffer(cpu, trace_buffer);
+ trace_addr = cbe_read_pm(cpu, trace_address);
}
- /* Deactivate the signals */
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
- }
+ /* SPU Address 16 bit count format for 128 bit
+ * HW trace buffer is used for the SPU PC storage
+ * HDR bits 0:15
+ * SPU Addr 0 bits 16:31
+ * SPU Addr 1 bits 32:47
+ * unused bits 48:127
+ *
+ * HDR: bit4 = 1 SPU Address 0 valid
+ * HDR: bit5 = 1 SPU Address 1 valid
+ * - unfortunately, the valid bits don't seem to work
+ *
+ * Note trace_buffer[0] holds bits 0:63 of the HW
+ * trace buffer, trace_buffer[1] holds bits 64:127
+ */
- stop_spu_profiling();
-}
+ trace_entry = trace_buffer[0]
+ & 0x00000000FFFF0000;
-static void cell_global_stop_ppu(void)
-{
- int cpu;
+ /* only top 16 of the 18 bit SPU PC address
+ * is stored in trace buffer, hence shift right
+ * by 16 -2 bits */
+ sample = trace_entry >> 14;
+ last_trace_buffer = trace_buffer[0];
- /*
- * This routine will be called once for the system.
- * There is one performance monitor per node, so we
- * only need to perform this function once per node.
- */
- del_timer_sync(&timer_virt_cntr);
- oprofile_running = 0;
- smp_wmb();
+ spu_num = spu_evnt_phys_spu_indx
+ + (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE);
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
+ /* make sure only one process at a time is calling
+ * spu_sync_buffer()
+ */
+ spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
+ sample_array_lock_flags);
+ spu_sync_buffer(spu_num, &sample, 1);
+ spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
+ sample_array_lock_flags);
- cbe_sync_irq(cbe_cpu_to_node(cpu));
- /* Stop the counters */
- cbe_disable_pm(cpu);
+ smp_wmb(); /* insure spu event buffer updates are written
+ * don't want events intermingled... */
- /* Deactivate the signals */
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+ /* The counters were frozen by the interrupt.
+ * Reenable the interrupt and restart the counters.
+ */
+ cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
+ cbe_enable_pm_interrupts(cpu, hdw_thread,
+ virt_cntr_inter_mask);
- /* Deactivate interrupts */
- cbe_disable_pm_interrupts(cpu);
- }
-}
+ /* clear the trace buffer, re-enable writes to trace buff */
+ cbe_write_pm(cpu, trace_address, 0);
+ cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
-static void cell_global_stop(void)
-{
- if (spu_cycle_reset)
- cell_global_stop_spu();
- else
- cell_global_stop_ppu();
+ /* The writes to the various performance counters only writes
+ * to a latch. The new values (interrupt setting bits, reset
+ * counter value etc.) are not copied to the actual registers
+ * until the performance monitor is enabled. In order to get
+ * this to work as desired, the permormance monitor needs to
+ * be disabled while writing to the latches. This is a
+ * HW design issue.
+ */
+ write_pm_cntrl(cpu);
+ cbe_enable_pm(cpu);
+ }
+ spin_unlock_irqrestore(&cntr_lock, flags);
}
-static void cell_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
+static void cell_handle_interrupt_ppu(struct pt_regs *regs,
+ struct op_counter_config *ctr)
{
u32 cpu;
u64 pc;
@@ -1132,7 +1621,7 @@ static void cell_handle_interrupt(struct pt_regs *regs,
* routine are not running at the same time. See the
* cell_virtual_cntr() routine for additional comments.
*/
- spin_lock_irqsave(&virt_cntr_lock, flags);
+ spin_lock_irqsave(&cntr_lock, flags);
/*
* Need to disable and reenable the performance counters
@@ -1185,7 +1674,16 @@ static void cell_handle_interrupt(struct pt_regs *regs,
*/
cbe_enable_pm(cpu);
}
- spin_unlock_irqrestore(&virt_cntr_lock, flags);
+ spin_unlock_irqrestore(&cntr_lock, flags);
+}
+
+static void cell_handle_interrupt(struct pt_regs *regs,
+ struct op_counter_config *ctr)
+{
+ if (profiling_mode == PPU_PROFILING)
+ cell_handle_interrupt_ppu(regs, ctr);
+ else
+ cell_handle_interrupt_spu(regs, ctr);
}
/*
@@ -1195,7 +1693,8 @@ static void cell_handle_interrupt(struct pt_regs *regs,
*/
static int cell_sync_start(void)
{
- if (spu_cycle_reset)
+ if ((profiling_mode == SPU_PROFILING_CYCLES) ||
+ (profiling_mode == SPU_PROFILING_EVENTS))
return spu_sync_start();
else
return DO_GENERIC_SYNC;
@@ -1203,7 +1702,8 @@ static int cell_sync_start(void)
static int cell_sync_stop(void)
{
- if (spu_cycle_reset)
+ if ((profiling_mode == SPU_PROFILING_CYCLES) ||
+ (profiling_mode == SPU_PROFILING_EVENTS))
return spu_sync_stop();
else
return 1;
diff --git a/arch/s390/include/asm/chpid.h b/arch/s390/include/asm/chpid.h
index dfe3c7f3439a..fc71d8a6709b 100644
--- a/arch/s390/include/asm/chpid.h
+++ b/arch/s390/include/asm/chpid.h
@@ -9,7 +9,7 @@
#define _ASM_S390_CHPID_H _ASM_S390_CHPID_H
#include <linux/string.h>
-#include <asm/types.h>
+#include <linux/types.h>
#define __MAX_CHPID 255
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index d38d0cf62d4b..807997f7414b 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -8,6 +8,7 @@
#ifndef _ASM_CHSC_H
#define _ASM_CHSC_H
+#include <linux/types.h>
#include <asm/chpid.h>
#include <asm/schid.h>
diff --git a/arch/s390/include/asm/cmb.h b/arch/s390/include/asm/cmb.h
index 50196857d27a..39ae03294794 100644
--- a/arch/s390/include/asm/cmb.h
+++ b/arch/s390/include/asm/cmb.h
@@ -1,5 +1,8 @@
#ifndef S390_CMB_H
#define S390_CMB_H
+
+#include <linux/types.h>
+
/**
* struct cmbdata - channel measurement block data for user space
* @size: size of the stored data
diff --git a/arch/s390/include/asm/dasd.h b/arch/s390/include/asm/dasd.h
index 55b2b80cdf6e..e2db6f16d9c8 100644
--- a/arch/s390/include/asm/dasd.h
+++ b/arch/s390/include/asm/dasd.h
@@ -14,6 +14,7 @@
#ifndef DASD_H
#define DASD_H
+#include <linux/types.h>
#include <linux/ioctl.h>
#define DASD_IOCTL_LETTER 'D'
@@ -78,6 +79,7 @@ typedef struct dasd_information2_t {
#define DASD_FEATURE_USEDIAG 0x02
#define DASD_FEATURE_INITIAL_ONLINE 0x04
#define DASD_FEATURE_ERPLOG 0x08
+#define DASD_FEATURE_FAILFAST 0x10
#define DASD_PARTN_BITS 2
diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h
index d74002f95794..e1f54654e3ae 100644
--- a/arch/s390/include/asm/kvm.h
+++ b/arch/s390/include/asm/kvm.h
@@ -13,7 +13,7 @@
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
*/
-#include <asm/types.h>
+#include <linux/types.h>
/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
struct kvm_pic_state {
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index 397d93fba3a7..8cc113f92523 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -68,11 +68,7 @@ typedef unsigned short __kernel_old_dev_t;
#endif /* __s390x__ */
typedef struct {
-#if defined(__KERNEL__) || defined(__USE_ALL)
int val[2];
-#else /* !defined(__KERNEL__) && !defined(__USE_ALL)*/
- int __val[2];
-#endif /* !defined(__KERNEL__) && !defined(__USE_ALL)*/
} __kernel_fsid_t;
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 5396f9f12263..8920025c3c02 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -272,12 +272,15 @@ typedef struct
#define PSW_ASC_SECONDARY 0x0000800000000000UL
#define PSW_ASC_HOME 0x0000C00000000000UL
-extern long psw_user32_bits;
-
#endif /* __s390x__ */
+#ifdef __KERNEL__
extern long psw_kernel_bits;
extern long psw_user_bits;
+#ifdef CONFIG_64BIT
+extern long psw_user32_bits;
+#endif
+#endif
/* This macro merges a NEW PSW mask specified by the user into
the currently active PSW mask CURRENT, modifying only those
diff --git a/arch/s390/include/asm/qeth.h b/arch/s390/include/asm/qeth.h
index 930d378ef75a..06cbd1e8c943 100644
--- a/arch/s390/include/asm/qeth.h
+++ b/arch/s390/include/asm/qeth.h
@@ -10,6 +10,7 @@
*/
#ifndef __ASM_S390_QETH_IOCTL_H__
#define __ASM_S390_QETH_IOCTL_H__
+#include <linux/types.h>
#include <linux/ioctl.h>
#define SIOC_QETH_ARP_SET_NO_ENTRIES (SIOCDEVPRIVATE)
diff --git a/arch/s390/include/asm/schid.h b/arch/s390/include/asm/schid.h
index 825503cf3dc2..3e4d401b4e45 100644
--- a/arch/s390/include/asm/schid.h
+++ b/arch/s390/include/asm/schid.h
@@ -1,6 +1,8 @@
#ifndef ASM_SCHID_H
#define ASM_SCHID_H
+#include <linux/types.h>
+
struct subchannel_id {
__u32 cssid : 8;
__u32 : 4;
diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h
index bd9321aa55a9..eb18dc1f327b 100644
--- a/arch/s390/include/asm/swab.h
+++ b/arch/s390/include/asm/swab.h
@@ -9,7 +9,7 @@
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
-#include <asm/types.h>
+#include <linux/types.h>
#ifndef __s390x__
# define __SWAB_64_THRU_32__
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index 41c547656130..3dc3fc228812 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -9,11 +9,7 @@
#ifndef _S390_TYPES_H
#define _S390_TYPES_H
-#ifndef __s390x__
-# include <asm-generic/int-ll64.h>
-#else
-# include <asm-generic/int-l64.h>
-#endif
+#include <asm-generic/int-ll64.h>
#ifndef __ASSEMBLY__
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 6b1896345eda..a65afc91e8aa 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -54,7 +54,5 @@ long sys_sigreturn(void);
long sys_rt_sigreturn(void);
long sys32_sigreturn(void);
long sys32_rt_sigreturn(void);
-long old_select(struct sel_arg_struct __user *arg);
-long sys_ptrace(long request, long pid, long addr, long data);
#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 9c0ccb532a45..2d337cbb9329 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -685,7 +685,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (MACHINE_HAS_IEEE)
lowcore->extended_save_area_addr = (u32) save_area;
#else
- BUG_ON(vdso_alloc_per_cpu(smp_processor_id(), lowcore));
+ if (vdso_alloc_per_cpu(smp_processor_id(), lowcore))
+ BUG();
#endif
set_prefix((u32)(unsigned long) lowcore);
local_mcck_enable();
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 4fe952e557ac..c34be4568b80 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -103,25 +103,6 @@ out:
return error;
}
-#ifndef CONFIG_64BIT
-struct sel_arg_struct {
- unsigned long n;
- fd_set __user *inp, *outp, *exp;
- struct timeval __user *tvp;
-};
-
-asmlinkage long old_select(struct sel_arg_struct __user *arg)
-{
- struct sel_arg_struct a;
-
- if (copy_from_user(&a, arg, sizeof(a)))
- return -EFAULT;
- /* sys_select() does the appropriate kernel locking */
- return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
-
-}
-#endif /* CONFIG_64BIT */
-
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
*
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 25a6a82f1c02..690e17819686 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -322,7 +322,8 @@ static int __init vdso_init(void)
vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
vdso64_pagelist[vdso64_pages] = NULL;
#ifndef CONFIG_SMP
- BUG_ON(vdso_alloc_per_cpu(0, S390_lowcore));
+ if (vdso_alloc_per_cpu(0, &S390_lowcore))
+ BUG();
#endif
vdso_init_cr5();
#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index c32f29c3d70c..ad8acfc949fb 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -13,10 +13,6 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
-#include <asm/vdso.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-
.text
.align 4
.globl __kernel_gettimeofday
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index a0775e1f08df..8300309698fa 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -47,7 +47,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
- VCPU_EVENT(vcpu, 3, "requesting userspace resets %lx",
+ VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
vcpu->run->s390_reset_flags);
return -EREMOTE;
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 2960702b4824..f4fe28a2521a 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -160,7 +160,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
break;
case KVM_S390_INT_VIRTIO:
- VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx",
+ VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
inti->ext.ext_params, inti->ext.ext_params2);
vcpu->stat.deliver_virtio_interrupt++;
rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
@@ -360,7 +360,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
vcpu->arch.ckc_timer.expires = jiffies + sltime;
add_timer(&vcpu->arch.ckc_timer);
- VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime);
+ VCPU_EVENT(vcpu, 5, "enabled wait timer:%llx jiffies", sltime);
no_timer:
spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
spin_lock_bh(&vcpu->arch.local_int.lock);
@@ -491,7 +491,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
switch (s390int->type) {
case KVM_S390_INT_VIRTIO:
- VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx",
+ VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
s390int->parm, s390int->parm64);
inti->type = s390int->type;
inti->ext.ext_params = s390int->parm;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index cce40ff2913b..3605df45dd41 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -118,7 +118,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
goto out;
}
- VCPU_EVENT(vcpu, 5, "storing cpu address to %lx", useraddr);
+ VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
out:
return 0;
}
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index fdcb93bc6d11..6c43625bb1a5 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -9,7 +9,6 @@ typedef struct {
mm_context_id_t id;
void *vdso;
#else
- struct vm_list_struct *vmlist;
unsigned long end_brk;
#endif
#ifdef CONFIG_BINFMT_ELF_FDPIC
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 9fa9dcdf344b..e02a359d2aa5 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -300,7 +300,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
return oldbit;
}
-static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
+static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 744aa7fc49d5..76b8cd953dee 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -201,6 +201,12 @@ struct chipset {
void (*f)(int num, int slot, int func);
};
+/*
+ * Only works for devices on the root bus. If you add any devices
+ * not on bus 0 readd another loop level in early_quirks(). But
+ * be careful because at least the Nvidia quirk here relies on
+ * only matching on bus 0.
+ */
static struct chipset early_qrk[] __initdata = {
{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
@@ -267,17 +273,17 @@ static int __init check_dev_quirk(int num, int slot, int func)
void __init early_quirks(void)
{
- int num, slot, func;
+ int slot, func;
if (!early_pci_allowed())
return;
/* Poor man's PCI discovery */
- for (num = 0; num < 32; num++)
- for (slot = 0; slot < 32; slot++)
- for (func = 0; func < 8; func++) {
- /* Only probe function 0 on single fn devices */
- if (check_dev_quirk(num, slot, func))
- break;
- }
+ /* Only scan the root bus */
+ for (slot = 0; slot < 32; slot++)
+ for (func = 0; func < 8; func++) {
+ /* Only probe function 0 on single fn devices */
+ if (check_dev_quirk(0, slot, func))
+ break;
+ }
}
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 98658f25f542..8fdf06e4edf9 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -2,7 +2,7 @@
* @file op_model_amd.c
* athlon / K7 / K8 / Family 10h model-specific MSR operations
*
- * @remark Copyright 2002-2008 OProfile authors
+ * @remark Copyright 2002-2009 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon
@@ -10,7 +10,7 @@
* @author Graydon Hoare
* @author Robert Richter <robert.richter@amd.com>
* @author Barry Kasindorf
-*/
+ */
#include <linux/oprofile.h>
#include <linux/device.h>
@@ -60,53 +60,10 @@ static unsigned long reset_value[NUM_COUNTERS];
#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */
#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */
-/* Codes used in cpu_buffer.c */
-/* This produces duplicate code, need to be fixed */
-#define IBS_FETCH_BEGIN 3
-#define IBS_OP_BEGIN 4
-
-/*
- * The function interface needs to be fixed, something like add
- * data. Should then be added to linux/oprofile.h.
- */
-extern void
-oprofile_add_ibs_sample(struct pt_regs * const regs,
- unsigned int * const ibs_sample, int ibs_code);
-
-struct ibs_fetch_sample {
- /* MSRC001_1031 IBS Fetch Linear Address Register */
- unsigned int ibs_fetch_lin_addr_low;
- unsigned int ibs_fetch_lin_addr_high;
- /* MSRC001_1030 IBS Fetch Control Register */
- unsigned int ibs_fetch_ctl_low;
- unsigned int ibs_fetch_ctl_high;
- /* MSRC001_1032 IBS Fetch Physical Address Register */
- unsigned int ibs_fetch_phys_addr_low;
- unsigned int ibs_fetch_phys_addr_high;
-};
-
-struct ibs_op_sample {
- /* MSRC001_1034 IBS Op Logical Address Register (IbsRIP) */
- unsigned int ibs_op_rip_low;
- unsigned int ibs_op_rip_high;
- /* MSRC001_1035 IBS Op Data Register */
- unsigned int ibs_op_data1_low;
- unsigned int ibs_op_data1_high;
- /* MSRC001_1036 IBS Op Data 2 Register */
- unsigned int ibs_op_data2_low;
- unsigned int ibs_op_data2_high;
- /* MSRC001_1037 IBS Op Data 3 Register */
- unsigned int ibs_op_data3_low;
- unsigned int ibs_op_data3_high;
- /* MSRC001_1038 IBS DC Linear Address Register (IbsDcLinAd) */
- unsigned int ibs_dc_linear_low;
- unsigned int ibs_dc_linear_high;
- /* MSRC001_1039 IBS DC Physical Address Register (IbsDcPhysAd) */
- unsigned int ibs_dc_phys_low;
- unsigned int ibs_dc_phys_high;
-};
+#define IBS_FETCH_SIZE 6
+#define IBS_OP_SIZE 12
-static int ibs_allowed; /* AMD Family10h and later */
+static int has_ibs; /* AMD Family10h and later */
struct op_ibs_config {
unsigned long op_enabled;
@@ -197,31 +154,29 @@ static inline int
op_amd_handle_ibs(struct pt_regs * const regs,
struct op_msrs const * const msrs)
{
- unsigned int low, high;
- struct ibs_fetch_sample ibs_fetch;
- struct ibs_op_sample ibs_op;
+ u32 low, high;
+ u64 msr;
+ struct op_entry entry;
- if (!ibs_allowed)
+ if (!has_ibs)
return 1;
if (ibs_config.fetch_enabled) {
rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
if (high & IBS_FETCH_HIGH_VALID_BIT) {
- ibs_fetch.ibs_fetch_ctl_high = high;
- ibs_fetch.ibs_fetch_ctl_low = low;
- rdmsr(MSR_AMD64_IBSFETCHLINAD, low, high);
- ibs_fetch.ibs_fetch_lin_addr_high = high;
- ibs_fetch.ibs_fetch_lin_addr_low = low;
- rdmsr(MSR_AMD64_IBSFETCHPHYSAD, low, high);
- ibs_fetch.ibs_fetch_phys_addr_high = high;
- ibs_fetch.ibs_fetch_phys_addr_low = low;
-
- oprofile_add_ibs_sample(regs,
- (unsigned int *)&ibs_fetch,
- IBS_FETCH_BEGIN);
+ rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr);
+ oprofile_write_reserve(&entry, regs, msr,
+ IBS_FETCH_CODE, IBS_FETCH_SIZE);
+ oprofile_add_data(&entry, (u32)msr);
+ oprofile_add_data(&entry, (u32)(msr >> 32));
+ oprofile_add_data(&entry, low);
+ oprofile_add_data(&entry, high);
+ rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr);
+ oprofile_add_data(&entry, (u32)msr);
+ oprofile_add_data(&entry, (u32)(msr >> 32));
+ oprofile_write_commit(&entry);
/* reenable the IRQ */
- rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
high &= ~IBS_FETCH_HIGH_VALID_BIT;
high |= IBS_FETCH_HIGH_ENABLE;
low &= IBS_FETCH_LOW_MAX_CNT_MASK;
@@ -232,30 +187,29 @@ op_amd_handle_ibs(struct pt_regs * const regs,
if (ibs_config.op_enabled) {
rdmsr(MSR_AMD64_IBSOPCTL, low, high);
if (low & IBS_OP_LOW_VALID_BIT) {
- rdmsr(MSR_AMD64_IBSOPRIP, low, high);
- ibs_op.ibs_op_rip_low = low;
- ibs_op.ibs_op_rip_high = high;
- rdmsr(MSR_AMD64_IBSOPDATA, low, high);
- ibs_op.ibs_op_data1_low = low;
- ibs_op.ibs_op_data1_high = high;
- rdmsr(MSR_AMD64_IBSOPDATA2, low, high);
- ibs_op.ibs_op_data2_low = low;
- ibs_op.ibs_op_data2_high = high;
- rdmsr(MSR_AMD64_IBSOPDATA3, low, high);
- ibs_op.ibs_op_data3_low = low;
- ibs_op.ibs_op_data3_high = high;
- rdmsr(MSR_AMD64_IBSDCLINAD, low, high);
- ibs_op.ibs_dc_linear_low = low;
- ibs_op.ibs_dc_linear_high = high;
- rdmsr(MSR_AMD64_IBSDCPHYSAD, low, high);
- ibs_op.ibs_dc_phys_low = low;
- ibs_op.ibs_dc_phys_high = high;
+ rdmsrl(MSR_AMD64_IBSOPRIP, msr);
+ oprofile_write_reserve(&entry, regs, msr,
+ IBS_OP_CODE, IBS_OP_SIZE);
+ oprofile_add_data(&entry, (u32)msr);
+ oprofile_add_data(&entry, (u32)(msr >> 32));
+ rdmsrl(MSR_AMD64_IBSOPDATA, msr);
+ oprofile_add_data(&entry, (u32)msr);
+ oprofile_add_data(&entry, (u32)(msr >> 32));
+ rdmsrl(MSR_AMD64_IBSOPDATA2, msr);
+ oprofile_add_data(&entry, (u32)msr);
+ oprofile_add_data(&entry, (u32)(msr >> 32));
+ rdmsrl(MSR_AMD64_IBSOPDATA3, msr);
+ oprofile_add_data(&entry, (u32)msr);
+ oprofile_add_data(&entry, (u32)(msr >> 32));
+ rdmsrl(MSR_AMD64_IBSDCLINAD, msr);
+ oprofile_add_data(&entry, (u32)msr);
+ oprofile_add_data(&entry, (u32)(msr >> 32));
+ rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr);
+ oprofile_add_data(&entry, (u32)msr);
+ oprofile_add_data(&entry, (u32)(msr >> 32));
+ oprofile_write_commit(&entry);
/* reenable the IRQ */
- oprofile_add_ibs_sample(regs,
- (unsigned int *)&ibs_op,
- IBS_OP_BEGIN);
- rdmsr(MSR_AMD64_IBSOPCTL, low, high);
high = 0;
low &= ~IBS_OP_LOW_VALID_BIT;
low |= IBS_OP_LOW_ENABLE;
@@ -305,14 +259,14 @@ static void op_amd_start(struct op_msrs const * const msrs)
}
#ifdef CONFIG_OPROFILE_IBS
- if (ibs_allowed && ibs_config.fetch_enabled) {
+ if (has_ibs && ibs_config.fetch_enabled) {
low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */
+ IBS_FETCH_HIGH_ENABLE;
wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
}
- if (ibs_allowed && ibs_config.op_enabled) {
+ if (has_ibs && ibs_config.op_enabled) {
low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF)
+ ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */
+ IBS_OP_LOW_ENABLE;
@@ -341,14 +295,14 @@ static void op_amd_stop(struct op_msrs const * const msrs)
}
#ifdef CONFIG_OPROFILE_IBS
- if (ibs_allowed && ibs_config.fetch_enabled) {
+ if (has_ibs && ibs_config.fetch_enabled) {
/* clear max count and enable */
low = 0;
high = 0;
wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
}
- if (ibs_allowed && ibs_config.op_enabled) {
+ if (has_ibs && ibs_config.op_enabled) {
/* clear max count and enable */
low = 0;
high = 0;
@@ -409,6 +363,7 @@ static int init_ibs_nmi(void)
| IBSCTL_LVTOFFSETVAL);
pci_read_config_dword(cpu_cfg, IBSCTL, &value);
if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
+ pci_dev_put(cpu_cfg);
printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
"IBSCTL = 0x%08x", value);
return 1;
@@ -436,20 +391,20 @@ static int init_ibs_nmi(void)
/* uninitialize the APIC for the IBS interrupts if needed */
static void clear_ibs_nmi(void)
{
- if (ibs_allowed)
+ if (has_ibs)
on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
}
/* initialize the APIC for the IBS interrupts if available */
static void ibs_init(void)
{
- ibs_allowed = boot_cpu_has(X86_FEATURE_IBS);
+ has_ibs = boot_cpu_has(X86_FEATURE_IBS);
- if (!ibs_allowed)
+ if (!has_ibs)
return;
if (init_ibs_nmi()) {
- ibs_allowed = 0;
+ has_ibs = 0;
return;
}
@@ -458,7 +413,7 @@ static void ibs_init(void)
static void ibs_exit(void)
{
- if (!ibs_allowed)
+ if (!has_ibs)
return;
clear_ibs_nmi();
@@ -478,7 +433,7 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
if (ret)
return ret;
- if (!ibs_allowed)
+ if (!has_ibs)
return ret;
/* model specific files */