diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-15 15:46:29 +0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-15 15:46:29 +0400 |
commit | b2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (patch) | |
tree | 53ccb1c2c14751fe69cf93102e76e97021f6df07 /arch/sparc64/mm | |
parent | 4f962d4d65923d7b722192e729840cfb79af0a5a (diff) | |
parent | 278429cff8809958d25415ba0ed32b59866ab1a8 (diff) | |
download | linux-b2aaf8f74cdc84a9182f6cabf198b7763bcb9d40.tar.xz |
Merge branch 'linus' into stackprotector
Conflicts:
arch/x86/kernel/Makefile
include/asm-x86/pda.h
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r-- | arch/sparc64/mm/fault.c | 42 | ||||
-rw-r--r-- | arch/sparc64/mm/hugetlbpage.c | 12 | ||||
-rw-r--r-- | arch/sparc64/mm/init.c | 175 | ||||
-rw-r--r-- | arch/sparc64/mm/init.h | 49 | ||||
-rw-r--r-- | arch/sparc64/mm/tlb.c | 2 | ||||
-rw-r--r-- | arch/sparc64/mm/tsb.c | 11 | ||||
-rw-r--r-- | arch/sparc64/mm/ultra.S | 49 |
7 files changed, 159 insertions, 181 deletions
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index 236f4d228d2b..a9e474bf6385 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -1,7 +1,7 @@ /* * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc. * - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net) * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) */ @@ -18,7 +18,6 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kprobes.h> -#include <linux/kallsyms.h> #include <linux/kdebug.h> #include <asm/page.h> @@ -52,43 +51,6 @@ static inline int notify_page_fault(struct pt_regs *regs) } #endif -/* - * To debug kernel to catch accesses to certain virtual/physical addresses. - * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints. - * flags = VM_READ watches memread accesses, flags = VM_WRITE watches memwrite accesses. - * Caller passes in a 64bit aligned addr, with mask set to the bytes that need to be - * watched. This is only useful on a single cpu machine for now. After the watchpoint - * is detected, the process causing it will be killed, thus preventing an infinite loop. - */ -void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode) -{ - unsigned long lsubits; - - __asm__ __volatile__("ldxa [%%g0] %1, %0" - : "=r" (lsubits) - : "i" (ASI_LSU_CONTROL)); - lsubits &= ~(LSU_CONTROL_PM | LSU_CONTROL_VM | - LSU_CONTROL_PR | LSU_CONTROL_VR | - LSU_CONTROL_PW | LSU_CONTROL_VW); - - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (addr), "r" (mode ? VIRT_WATCHPOINT : PHYS_WATCHPOINT), - "i" (ASI_DMMU)); - - lsubits |= ((unsigned long)mask << (mode ? 25 : 33)); - if (flags & VM_READ) - lsubits |= (mode ? LSU_CONTROL_VR : LSU_CONTROL_PR); - if (flags & VM_WRITE) - lsubits |= (mode ? LSU_CONTROL_VW : LSU_CONTROL_PW); - __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (lsubits), "i" (ASI_LSU_CONTROL) - : "memory"); -} - static void __kprobes unhandled_fault(unsigned long address, struct task_struct *tsk, struct pt_regs *regs) @@ -115,7 +77,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", regs->tpc); printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); - print_symbol("RPC: <%s>\n", regs->u_regs[15]); + printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]); printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); dump_stack(); unhandled_fault(regs->tpc, current, regs); diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 6cfab2e4d340..f27d10369e0c 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c @@ -175,7 +175,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return -ENOMEM; if (flags & MAP_FIXED) { - if (prepare_hugepage_range(addr, len)) + if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } @@ -195,7 +195,8 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, pgoff, flags); } -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz) { pgd_t *pgd; pud_t *pud; @@ -294,6 +295,11 @@ int pmd_huge(pmd_t pmd) return 0; } +int pud_huge(pud_t pud) +{ + return 0; +} + struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { @@ -344,7 +350,7 @@ void hugetlb_prefault_arch_hook(struct mm_struct *mm) * also executing in this address space. */ mm->context.sparc64_ctx_val = ctx; - on_each_cpu(context_reload, mm, 0, 0); + on_each_cpu(context_reload, mm, 0); } spin_unlock(&ctx_alloc_lock); } diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 84898c44dd4d..3c10daf8fc01 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -46,14 +46,11 @@ #include <asm/tsb.h> #include <asm/hypervisor.h> #include <asm/prom.h> -#include <asm/sstate.h> #include <asm/mdesc.h> #include <asm/cpudata.h> +#include <asm/irq.h> -#define MAX_PHYS_ADDRESS (1UL << 42UL) -#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) -#define KPTE_BITMAP_BYTES \ - ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8) +#include "init.h" unsigned long kern_linear_pte_xor[2] __read_mostly; @@ -392,51 +389,6 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end) } } -void show_mem(void) -{ - unsigned long total = 0, reserved = 0; - unsigned long shared = 0, cached = 0; - pg_data_t *pgdat; - - printk(KERN_INFO "Mem-info:\n"); - show_free_areas(); - printk(KERN_INFO "Free swap: %6ldkB\n", - nr_swap_pages << (PAGE_SHIFT-10)); - for_each_online_pgdat(pgdat) { - unsigned long i, flags; - - pgdat_resize_lock(pgdat, &flags); - for (i = 0; i < pgdat->node_spanned_pages; i++) { - struct page *page = pgdat_page_nr(pgdat, i); - total++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (page_count(page)) - shared += page_count(page) - 1; - } - pgdat_resize_unlock(pgdat, &flags); - } - - printk(KERN_INFO "%lu pages of RAM\n", total); - printk(KERN_INFO "%lu reserved pages\n", reserved); - printk(KERN_INFO "%lu pages shared\n", shared); - printk(KERN_INFO "%lu pages swap cached\n", cached); - - printk(KERN_INFO "%lu pages dirty\n", - global_page_state(NR_FILE_DIRTY)); - printk(KERN_INFO "%lu pages writeback\n", - global_page_state(NR_WRITEBACK)); - printk(KERN_INFO "%lu pages mapped\n", - global_page_state(NR_FILE_MAPPED)); - printk(KERN_INFO "%lu pages slab\n", - global_page_state(NR_SLAB_RECLAIMABLE) + - global_page_state(NR_SLAB_UNRECLAIMABLE)); - printk(KERN_INFO "%lu pages pagetables\n", - global_page_state(NR_PAGETABLE)); -} - void mmu_info(struct seq_file *m) { if (tlb_type == cheetah) @@ -460,17 +412,9 @@ void mmu_info(struct seq_file *m) #endif /* CONFIG_DEBUG_DCFLUSH */ } -struct linux_prom_translation { - unsigned long virt; - unsigned long size; - unsigned long data; -}; - -/* Exported for kernel TLB miss handling in ktlb.S */ struct linux_prom_translation prom_trans[512] __read_mostly; unsigned int prom_trans_ents __read_mostly; -/* Exported for SMP bootup purposes. */ unsigned long kern_locked_tte_data; /* The obp translations are saved based on 8k pagesize, since obp can @@ -788,7 +732,6 @@ int numa_cpu_lookup_table[NR_CPUS]; cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; #ifdef CONFIG_NEED_MULTIPLE_NODES -static bootmem_data_t plat_node_bdata[MAX_NUMNODES]; struct mdesc_mblock { u64 base; @@ -841,6 +784,9 @@ static unsigned long nid_range(unsigned long start, unsigned long end, start += PAGE_SIZE; } + if (start > end) + start = end; + return start; } #else @@ -871,7 +817,7 @@ static void __init allocate_node_data(int nid) NODE_DATA(nid) = __va(paddr); memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); - NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; + NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; #endif p = NODE_DATA(nid); @@ -980,6 +926,10 @@ int of_node_to_nid(struct device_node *dp) int count, nid; u64 grp; + /* This is the right thing to do on currently supported + * SUN4U NUMA platforms as well, as the PCI controller does + * not sit behind any particular memory controller. + */ if (!mlgroups) return -1; @@ -1248,8 +1198,44 @@ out: return err; } +static int __init numa_parse_jbus(void) +{ + unsigned long cpu, index; + + /* NUMA node id is encoded in bits 36 and higher, and there is + * a 1-to-1 mapping from CPU ID to NUMA node ID. + */ + index = 0; + for_each_present_cpu(cpu) { + numa_cpu_lookup_table[cpu] = index; + numa_cpumask_lookup_table[index] = cpumask_of_cpu(cpu); + node_masks[index].mask = ~((1UL << 36UL) - 1UL); + node_masks[index].val = cpu << 36UL; + + index++; + } + num_node_masks = index; + + add_node_ranges(); + + for (index = 0; index < num_node_masks; index++) { + allocate_node_data(index); + node_set_online(index); + } + + return 0; +} + static int __init numa_parse_sun4u(void) { + if (tlb_type == cheetah || tlb_type == cheetah_plus) { + unsigned long ver; + + __asm__ ("rdpr %%ver, %0" : "=r" (ver)); + if ((ver >> 32UL) == __JALAPENO_ID || + (ver >> 32UL) == __SERRANO_ID) + return numa_parse_jbus(); + } return -1; } @@ -1675,8 +1661,6 @@ void __cpuinit sun4v_ktsb_register(void) /* paging_init() sets up the page tables */ -extern void central_probe(void); - static unsigned long last_valid_pfn; pgd_t swapper_pg_dir[2048]; @@ -1721,8 +1705,6 @@ void __init paging_init(void) kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; - sstate_booting(); - /* Invalidate both kernel TSBs. */ memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); #ifndef CONFIG_DEBUG_PAGEALLOC @@ -1768,8 +1750,7 @@ void __init paging_init(void) find_ramdisk(phys_base); - if (cmdline_memory_size) - lmb_enforce_memory_limit(phys_base + cmdline_memory_size); + lmb_enforce_memory_limit(cmdline_memory_size); lmb_analyze(); lmb_dump_all(); @@ -1817,6 +1798,16 @@ void __init paging_init(void) if (tlb_type == hypervisor) sun4v_mdesc_init(); + /* Once the OF device tree and MDESC have been setup, we know + * the list of possible cpus. Therefore we can allocate the + * IRQ stacks. + */ + for_each_possible_cpu(i) { + /* XXX Use node local allocations... XXX */ + softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + } + /* Setup bootmem... */ last_valid_pfn = end_pfn = bootmem_init(phys_base); @@ -1836,9 +1827,6 @@ void __init paging_init(void) } printk("Booting Linux...\n"); - - central_probe(); - cpu_probe(); } int __init page_in_phys_avail(unsigned long paddr) @@ -1876,7 +1864,7 @@ static int pavail_rescan_ents __initdata; * memory list again, and make sure it provides at least as much * memory as 'pavail' does. */ -static void setup_valid_addr_bitmap_from_pavail(void) +static void __init setup_valid_addr_bitmap_from_pavail(void) { int i; @@ -1996,6 +1984,15 @@ void __init mem_init(void) void free_initmem(void) { unsigned long addr, initend; + int do_free = 1; + + /* If the physical memory maps were trimmed by kernel command + * line options, don't even try freeing this initmem stuff up. + * The kernel image could have been in the trimmed out region + * and if so the freeing below will free invalid page structs. + */ + if (cmdline_memory_size) + do_free = 0; /* * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. @@ -2010,13 +2007,16 @@ void free_initmem(void) ((unsigned long) __va(kern_base)) - ((unsigned long) KERNBASE)); memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); - p = virt_to_page(page); - ClearPageReserved(p); - init_page_count(p); - __free_page(p); - num_physpages++; - totalram_pages++; + if (do_free) { + p = virt_to_page(page); + + ClearPageReserved(p); + init_page_count(p); + __free_page(p); + num_physpages++; + totalram_pages++; + } } } @@ -2053,7 +2053,6 @@ pgprot_t PAGE_COPY __read_mostly; pgprot_t PAGE_SHARED __read_mostly; EXPORT_SYMBOL(PAGE_SHARED); -pgprot_t PAGE_EXEC __read_mostly; unsigned long pg_iobits __read_mostly; unsigned long _PAGE_IE __read_mostly; @@ -2066,14 +2065,6 @@ unsigned long _PAGE_CACHE __read_mostly; EXPORT_SYMBOL(_PAGE_CACHE); #ifdef CONFIG_SPARSEMEM_VMEMMAP - -#define VMEMMAP_CHUNK_SHIFT 22 -#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT) -#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL) -#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK) - -#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \ - sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT) unsigned long vmemmap_table[VMEMMAP_SIZE]; int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) @@ -2157,7 +2148,6 @@ static void __init sun4u_pgprot_init(void) _PAGE_CACHE_4U | _PAGE_P_4U | __ACCESS_BITS_4U | __DIRTY_BITS_4U | _PAGE_EXEC_4U | _PAGE_L_4U); - PAGE_EXEC = __pgprot(_PAGE_EXEC_4U); _PAGE_IE = _PAGE_IE_4U; _PAGE_E = _PAGE_E_4U; @@ -2168,10 +2158,10 @@ static void __init sun4u_pgprot_init(void) #ifdef CONFIG_DEBUG_PAGEALLOC kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^ - 0xfffff80000000000; + 0xfffff80000000000UL; #else kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ - 0xfffff80000000000; + 0xfffff80000000000UL; #endif kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | _PAGE_W_4U); @@ -2209,7 +2199,6 @@ static void __init sun4v_pgprot_init(void) __ACCESS_BITS_4V | __DIRTY_BITS_4V | _PAGE_EXEC_4V); PAGE_KERNEL_LOCKED = PAGE_KERNEL; - PAGE_EXEC = __pgprot(_PAGE_EXEC_4V); _PAGE_IE = _PAGE_IE_4V; _PAGE_E = _PAGE_E_4V; @@ -2217,20 +2206,20 @@ static void __init sun4v_pgprot_init(void) #ifdef CONFIG_DEBUG_PAGEALLOC kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ - 0xfffff80000000000; + 0xfffff80000000000UL; #else kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ - 0xfffff80000000000; + 0xfffff80000000000UL; #endif kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | _PAGE_W_4V); #ifdef CONFIG_DEBUG_PAGEALLOC kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ - 0xfffff80000000000; + 0xfffff80000000000UL; #else kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ - 0xfffff80000000000; + 0xfffff80000000000UL; #endif kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | _PAGE_W_4V); diff --git a/arch/sparc64/mm/init.h b/arch/sparc64/mm/init.h new file mode 100644 index 000000000000..16063870a489 --- /dev/null +++ b/arch/sparc64/mm/init.h @@ -0,0 +1,49 @@ +#ifndef _SPARC64_MM_INIT_H +#define _SPARC64_MM_INIT_H + +/* Most of the symbols in this file are defined in init.c and + * marked non-static so that assembler code can get at them. + */ + +#define MAX_PHYS_ADDRESS (1UL << 42UL) +#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) +#define KPTE_BITMAP_BYTES \ + ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8) + +extern unsigned long kern_linear_pte_xor[2]; +extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; +extern unsigned int sparc64_highest_unlocked_tlb_ent; +extern unsigned long sparc64_kern_pri_context; +extern unsigned long sparc64_kern_pri_nuc_bits; +extern unsigned long sparc64_kern_sec_context; +extern void mmu_info(struct seq_file *m); + +struct linux_prom_translation { + unsigned long virt; + unsigned long size; + unsigned long data; +}; + +/* Exported for kernel TLB miss handling in ktlb.S */ +extern struct linux_prom_translation prom_trans[512]; +extern unsigned int prom_trans_ents; + +/* Exported for SMP bootup purposes. */ +extern unsigned long kern_locked_tte_data; + +extern void prom_world(int enter); + +extern void free_initmem(void); + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +#define VMEMMAP_CHUNK_SHIFT 22 +#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT) +#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL) +#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK) + +#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \ + sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT) +extern unsigned long vmemmap_table[VMEMMAP_SIZE]; +#endif + +#endif /* _SPARC64_MM_INIT_H */ diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c index ae24919cba7c..d8f21e24a82f 100644 --- a/arch/sparc64/mm/tlb.c +++ b/arch/sparc64/mm/tlb.c @@ -19,7 +19,7 @@ /* Heavily inspired by the ppc64 code. */ -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, }; +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); void flush_tlb_pending(void) { diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index fe70c8a557b5..587f8efb2e05 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -1,9 +1,10 @@ /* arch/sparc64/mm/tsb.c * - * Copyright (C) 2006 David S. Miller <davem@davemloft.net> + * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net> */ #include <linux/kernel.h> +#include <linux/preempt.h> #include <asm/system.h> #include <asm/page.h> #include <asm/tlbflush.h> @@ -96,12 +97,6 @@ void flush_tsb_user(struct mmu_gather *mp) #elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB) #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K -#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB) -#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_512K -#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_512K -#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB) -#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_4MB -#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_4MB #else #error Broken base page size setting... #endif @@ -421,7 +416,9 @@ retry_tsb_alloc: tsb_context_switch(mm); /* Now force other processors to do the same. */ + preempt_disable(); smp_tsb_sync(mm); + preempt_enable(); /* Now it is safe to free the old tsb. */ kmem_cache_free(tsb_caches[old_cache_index], old_tsb); diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 9bb2d90a9df6..86773e89dc1b 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -480,42 +480,6 @@ xcall_sync_tick: b rtrap_xcall ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 - /* NOTE: This is SPECIAL!! We do etrap/rtrap however - * we choose to deal with the "BH's run with - * %pil==15" problem (described in asm/pil.h) - * by just invoking rtrap directly past where - * BH's are checked for. - * - * We do it like this because we do not want %pil==15 - * lockups to prevent regs being reported. - */ - .globl xcall_report_regs -xcall_report_regs: - -661: rdpr %pstate, %g2 - wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate - .section .sun4v_2insn_patch, "ax" - .word 661b - nop - nop - .previous - - rdpr %pil, %g2 - wrpr %g0, 15, %pil - sethi %hi(109f), %g7 - b,pt %xcc, etrap_irq -109: or %g7, %lo(109b), %g7 -#ifdef CONFIG_TRACE_IRQFLAGS - call trace_hardirqs_off - nop -#endif - call __show_regs - add %sp, PTREGS_OFF, %o0 - /* Has to be a non-v9 branch due to the large distance. */ - b rtrap_xcall - ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 - -#ifdef CONFIG_MAGIC_SYSRQ .globl xcall_fetch_glob_regs xcall_fetch_glob_regs: sethi %hi(global_reg_snapshot), %g1 @@ -531,6 +495,13 @@ xcall_fetch_glob_regs: stx %g7, [%g1 + GR_SNAP_TNPC] stx %o7, [%g1 + GR_SNAP_O7] stx %i7, [%g1 + GR_SNAP_I7] + /* Don't try this at home kids... */ + rdpr %cwp, %g2 + sub %g2, 1, %g7 + wrpr %g7, %cwp + mov %i7, %g7 + wrpr %g2, %cwp + stx %g7, [%g1 + GR_SNAP_RPC] sethi %hi(trap_block), %g7 or %g7, %lo(trap_block), %g7 sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2 @@ -539,7 +510,6 @@ xcall_fetch_glob_regs: membar #StoreStore stx %g3, [%g1 + GR_SNAP_THREAD] retry -#endif /* CONFIG_MAGIC_SYSRQ */ #ifdef DCACHE_ALIASING_POSSIBLE .align 32 @@ -688,6 +658,11 @@ xcall_call_function: wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint retry + .globl xcall_call_function_single +xcall_call_function_single: + wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint + retry + .globl xcall_receive_signal xcall_receive_signal: wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint |