diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 3 | ||||
-rw-r--r-- | arch/ia64/configs/zx1_defconfig | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/elf.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/pgtable.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/processor.h | 4 | ||||
-rw-r--r-- | arch/ia64/include/asm/sal.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/sections.h | 24 | ||||
-rw-r--r-- | arch/ia64/include/asm/thread_info.h | 8 | ||||
-rw-r--r-- | arch/ia64/include/asm/uaccess.h | 26 | ||||
-rw-r--r-- | arch/ia64/include/asm/xor.h | 21 | ||||
-rw-r--r-- | arch/ia64/include/uapi/asm/signal.h | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/module.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/topology.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/unaligned.c | 60 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 11 |
15 files changed, 78 insertions, 103 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index a7e01573abd8..cb93769a9f2a 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -8,6 +8,7 @@ menu "Processor type and features" config IA64 bool + select ARCH_BINFMT_ELF_EXTRA_PHDRS select ARCH_HAS_DMA_MARK_CLEAN select ARCH_HAS_STRNCPY_FROM_USER select ARCH_HAS_STRNLEN_USER @@ -35,6 +36,7 @@ config IA64 select HAVE_SETUP_PER_CPU_AREA select TTY select HAVE_ARCH_TRACEHOOK + select HAVE_FUNCTION_DESCRIPTORS select HAVE_VIRT_CPU_ACCOUNTING select HUGETLB_PAGE_SIZE_VARIABLE if HUGETLB_PAGE select VIRT_TO_BUS @@ -61,7 +63,6 @@ config IA64 select NEED_SG_DMA_LENGTH select NUMA if !FLATMEM select PCI_MSI_ARCH_FALLBACKS if PCI_MSI - select SET_FS select ZONE_DMA32 default y help diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig index 629cb9cdf723..851d8594cdb8 100644 --- a/arch/ia64/configs/zx1_defconfig +++ b/arch/ia64/configs/zx1_defconfig @@ -106,7 +106,6 @@ CONFIG_HUGETLBFS=y CONFIG_NFS_FS=y CONFIG_NFS_V4=y CONFIG_NFSD=y -CONFIG_NFSD_V3=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=y CONFIG_NLS_CODEPAGE_775=y diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index 6629301a2620..2ef5f9966ad1 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h @@ -226,7 +226,7 @@ struct got_entry { * Layout of the Function Descriptor */ struct fdesc { - uint64_t ip; + uint64_t addr; uint64_t gp; }; diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 9584b2c5f394..7aa8f2330fb1 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -267,6 +267,7 @@ ia64_phys_addr_valid (unsigned long addr) #define pmd_present(pmd) (pmd_val(pmd) != 0UL) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK)) +#define pmd_pfn(pmd) ((pmd_val(pmd) & _PFN_MASK) >> PAGE_SHIFT) #define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET)) #define pud_none(pud) (!pud_val(pud)) diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 45365c2ef598..7cbce290f4e5 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -243,10 +243,6 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); extern void print_cpu_info (struct cpuinfo_ia64 *); -typedef struct { - unsigned long seg; -} mm_segment_t; - #define SET_UNALIGN_CTL(task,value) \ ({ \ (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \ diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h index 78f4f7b40435..22749a201e92 100644 --- a/arch/ia64/include/asm/sal.h +++ b/arch/ia64/include/asm/sal.h @@ -420,7 +420,7 @@ typedef struct sal_log_processor_info { * The rest of this structure consists of variable-length arrays, which can't be * expressed in C. */ - sal_log_mod_error_info_t info[0]; + sal_log_mod_error_info_t info[]; /* * This is what the rest looked like if C supported variable-length arrays: * diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h index 3a033d2008b3..8e0875cf6071 100644 --- a/arch/ia64/include/asm/sections.h +++ b/arch/ia64/include/asm/sections.h @@ -9,6 +9,9 @@ #include <linux/elf.h> #include <linux/uaccess.h> + +typedef struct fdesc func_desc_t; + #include <asm-generic/sections.h> extern char __phys_per_cpu_start[]; @@ -27,25 +30,4 @@ extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_b extern char __start_unwind[], __end_unwind[]; extern char __start_ivt_text[], __end_ivt_text[]; -#define HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR 1 - -#undef dereference_function_descriptor -static inline void *dereference_function_descriptor(void *ptr) -{ - struct fdesc *desc = ptr; - void *p; - - if (!get_kernel_nofault(p, (void *)&desc->ip)) - ptr = p; - return ptr; -} - -#undef dereference_kernel_function_descriptor -static inline void *dereference_kernel_function_descriptor(void *ptr) -{ - if (ptr < (void *)__start_opd || ptr >= (void *)__end_opd) - return ptr; - return dereference_function_descriptor(ptr); -} - #endif /* _ASM_IA64_SECTIONS_H */ diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index 51d20cb37706..21b257117e0a 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -27,7 +27,6 @@ struct thread_info { __u32 cpu; /* current CPU */ __u32 last_cpu; /* Last CPU thread ran on */ __u32 status; /* Thread synchronous flags */ - mm_segment_t addr_limit; /* user-level address space limit */ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE __u64 utime; @@ -48,22 +47,21 @@ struct thread_info { .task = &tsk, \ .flags = 0, \ .cpu = 0, \ - .addr_limit = KERNEL_DS, \ .preempt_count = INIT_PREEMPT_COUNT, \ } #ifndef ASM_OFFSETS_C /* how to get the thread information struct from C */ #define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) -#define alloc_thread_stack_node(tsk, node) \ +#define arch_alloc_thread_stack_node(tsk, node) \ ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE)) #define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) #else #define current_thread_info() ((struct thread_info *) 0) -#define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0) +#define arch_alloc_thread_stack_node(tsk, node) ((unsigned long *) 0) #define task_thread_info(tsk) ((struct thread_info *) 0) #endif -#define free_thread_stack(tsk) /* nothing */ +#define arch_free_thread_stack(tsk) /* nothing */ #define task_stack_page(tsk) ((void *)(tsk)) #define __HAVE_THREAD_FUNCTIONS diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index e19d2dcc0ced..60adadeb3e9e 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -42,30 +42,20 @@ #include <asm/extable.h> /* - * For historical reasons, the following macros are grossly misnamed: - */ -#define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ -#define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ - -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) - -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - -/* - * When accessing user memory, we need to make sure the entire area really is in - * user-level space. In order to do this efficiently, we make sure that the page at - * address TASK_SIZE is never valid. We also need to make sure that the address doesn't + * When accessing user memory, we need to make sure the entire area really is + * in user-level space. We also need to make sure that the address doesn't * point inside the virtually mapped linear page table. */ static inline int __access_ok(const void __user *p, unsigned long size) { + unsigned long limit = TASK_SIZE; unsigned long addr = (unsigned long)p; - unsigned long seg = get_fs().seg; - return likely(addr <= seg) && - (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); + + return likely((size <= limit) && (addr <= (limit - size)) && + likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); } -#define access_ok(addr, size) __access_ok((addr), (size)) +#define __access_ok __access_ok +#include <asm-generic/access_ok.h> /* * These are the main single-value transfer routines. They automatically diff --git a/arch/ia64/include/asm/xor.h b/arch/ia64/include/asm/xor.h index 673051bf9d7d..6785f70d3208 100644 --- a/arch/ia64/include/asm/xor.h +++ b/arch/ia64/include/asm/xor.h @@ -4,13 +4,20 @@ */ -extern void xor_ia64_2(unsigned long, unsigned long *, unsigned long *); -extern void xor_ia64_3(unsigned long, unsigned long *, unsigned long *, - unsigned long *); -extern void xor_ia64_4(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *); -extern void xor_ia64_5(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *, unsigned long *); +extern void xor_ia64_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2); +extern void xor_ia64_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3); +extern void xor_ia64_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4); +extern void xor_ia64_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5); static struct xor_block_template xor_block_ia64 = { .name = "ia64", diff --git a/arch/ia64/include/uapi/asm/signal.h b/arch/ia64/include/uapi/asm/signal.h index 38166a88e4c9..63d574e802a2 100644 --- a/arch/ia64/include/uapi/asm/signal.h +++ b/arch/ia64/include/uapi/asm/signal.h @@ -90,7 +90,7 @@ struct siginfo; typedef struct sigaltstack { void __user *ss_sp; int ss_flags; - size_t ss_size; + __kernel_size_t ss_size; } stack_t; diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 360f36b0eb3f..8f62cf97f691 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -602,15 +602,15 @@ get_fdesc (struct module *mod, uint64_t value, int *okp) return value; /* Look for existing function descriptor. */ - while (fdesc->ip) { - if (fdesc->ip == value) + while (fdesc->addr) { + if (fdesc->addr == value) return (uint64_t)fdesc; if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size) BUG(); } /* Create new one */ - fdesc->ip = value; + fdesc->addr = value; fdesc->gp = mod->arch.gp; return (uint64_t) fdesc; } diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index e4992917a24b..94a848b06f15 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -70,16 +70,6 @@ static int __init topology_init(void) { int i, err = 0; -#ifdef CONFIG_NUMA - /* - * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes? - */ - for_each_online_node(i) { - if ((err = register_one_node(i))) - goto out; - } -#endif - sysfs_cpus = kcalloc(NR_CPUS, sizeof(struct ia64_cpu), GFP_KERNEL); if (!sysfs_cpus) panic("kzalloc in topology_init failed - NR_CPUS too big?"); diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 6c1a8951dfbb..0acb5a0cd7ab 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -749,9 +749,25 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi } } +static int emulate_store(unsigned long ifa, void *val, int len, bool kernel_mode) +{ + if (kernel_mode) + return copy_to_kernel_nofault((void *)ifa, val, len); + + return copy_to_user((void __user *)ifa, val, len); +} + +static int emulate_load(void *val, unsigned long ifa, int len, bool kernel_mode) +{ + if (kernel_mode) + return copy_from_kernel_nofault(val, (void *)ifa, len); + + return copy_from_user(val, (void __user *)ifa, len); +} static int -emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) +emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs, + bool kernel_mode) { unsigned int len = 1 << ld.x6_sz; unsigned long val = 0; @@ -774,7 +790,7 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) return -1; } /* this assumes little-endian byte-order: */ - if (copy_from_user(&val, (void __user *) ifa, len)) + if (emulate_load(&val, ifa, len, kernel_mode)) return -1; setreg(ld.r1, val, 0, regs); @@ -872,7 +888,8 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) } static int -emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) +emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs, + bool kernel_mode) { unsigned long r2; unsigned int len = 1 << ld.x6_sz; @@ -901,7 +918,7 @@ emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) } /* this assumes little-endian byte-order: */ - if (copy_to_user((void __user *) ifa, &r2, len)) + if (emulate_store(ifa, &r2, len, kernel_mode)) return -1; /* @@ -1021,7 +1038,7 @@ float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final) } static int -emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs) +emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs, bool kernel_mode) { struct ia64_fpreg fpr_init[2]; struct ia64_fpreg fpr_final[2]; @@ -1050,8 +1067,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs * This assumes little-endian byte-order. Note that there is no "ldfpe" * instruction: */ - if (copy_from_user(&fpr_init[0], (void __user *) ifa, len) - || copy_from_user(&fpr_init[1], (void __user *) (ifa + len), len)) + if (emulate_load(&fpr_init[0], ifa, len, kernel_mode) + || emulate_load(&fpr_init[1], (ifa + len), len, kernel_mode)) return -1; DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz); @@ -1126,7 +1143,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs static int -emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) +emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs, + bool kernel_mode) { struct ia64_fpreg fpr_init; struct ia64_fpreg fpr_final; @@ -1152,7 +1170,7 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) * See comments in ldX for descriptions on how the various loads are handled. */ if (ld.x6_op != 0x2) { - if (copy_from_user(&fpr_init, (void __user *) ifa, len)) + if (emulate_load(&fpr_init, ifa, len, kernel_mode)) return -1; DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz); @@ -1202,7 +1220,8 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) static int -emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) +emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs, + bool kernel_mode) { struct ia64_fpreg fpr_init; struct ia64_fpreg fpr_final; @@ -1244,7 +1263,7 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) DDUMP("fpr_init =", &fpr_init, len); DDUMP("fpr_final =", &fpr_final, len); - if (copy_to_user((void __user *) ifa, &fpr_final, len)) + if (emulate_store(ifa, &fpr_final, len, kernel_mode)) return -1; /* @@ -1295,7 +1314,6 @@ void ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) { struct ia64_psr *ipsr = ia64_psr(regs); - mm_segment_t old_fs = get_fs(); unsigned long bundle[2]; unsigned long opcode; const struct exception_table_entry *eh = NULL; @@ -1304,6 +1322,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) load_store_t insn; } u; int ret = -1; + bool kernel_mode = false; if (ia64_psr(regs)->be) { /* we don't support big-endian accesses */ @@ -1367,13 +1386,13 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) if (unaligned_dump_stack) dump_stack(); } - set_fs(KERNEL_DS); + kernel_mode = true; } DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n", regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it); - if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16)) + if (emulate_load(bundle, regs->cr_iip, 16, kernel_mode)) goto failure; /* @@ -1467,7 +1486,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) case LDCCLR_IMM_OP: case LDCNC_IMM_OP: case LDCCLRACQ_IMM_OP: - ret = emulate_load_int(ifa, u.insn, regs); + ret = emulate_load_int(ifa, u.insn, regs, kernel_mode); break; case ST_OP: @@ -1478,7 +1497,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) fallthrough; case ST_IMM_OP: case STREL_IMM_OP: - ret = emulate_store_int(ifa, u.insn, regs); + ret = emulate_store_int(ifa, u.insn, regs, kernel_mode); break; case LDF_OP: @@ -1486,21 +1505,21 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) case LDFCCLR_OP: case LDFCNC_OP: if (u.insn.x) - ret = emulate_load_floatpair(ifa, u.insn, regs); + ret = emulate_load_floatpair(ifa, u.insn, regs, kernel_mode); else - ret = emulate_load_float(ifa, u.insn, regs); + ret = emulate_load_float(ifa, u.insn, regs, kernel_mode); break; case LDF_IMM_OP: case LDFA_IMM_OP: case LDFCCLR_IMM_OP: case LDFCNC_IMM_OP: - ret = emulate_load_float(ifa, u.insn, regs); + ret = emulate_load_float(ifa, u.insn, regs, kernel_mode); break; case STF_OP: case STF_IMM_OP: - ret = emulate_store_float(ifa, u.insn, regs); + ret = emulate_store_float(ifa, u.insn, regs, kernel_mode); break; default: @@ -1521,7 +1540,6 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip); done: - set_fs(old_fs); /* restore original address limit */ return; failure: diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 791d4176e4a6..73d0db36edb6 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -608,17 +608,11 @@ void __init paging_init(void) zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); } -#ifdef CONFIG_MEMORY_HOTPLUG -pg_data_t *arch_alloc_nodedata(int nid) +pg_data_t * __init arch_alloc_nodedata(int nid) { unsigned long size = compute_pernodesize(nid); - return kzalloc(size, GFP_KERNEL); -} - -void arch_free_nodedata(pg_data_t *pgdat) -{ - kfree(pgdat); + return memblock_alloc(size, SMP_CACHE_BYTES); } void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) @@ -626,7 +620,6 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) pgdat_list[update_node] = update_pgdat; scatter_node_data(); } -#endif #ifdef CONFIG_SPARSEMEM_VMEMMAP int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |