diff options
Diffstat (limited to 'arch/ia64/include')
-rw-r--r-- | arch/ia64/include/asm/acpi.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/atomic.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/cacheflush.h | 30 | ||||
-rw-r--r-- | arch/ia64/include/asm/checksum.h | 10 | ||||
-rw-r--r-- | arch/ia64/include/asm/device.h | 3 | ||||
-rw-r--r-- | arch/ia64/include/asm/elf.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/hugetlb.h | 5 | ||||
-rw-r--r-- | arch/ia64/include/asm/pgalloc.h | 28 | ||||
-rw-r--r-- | arch/ia64/include/asm/pgtable.h | 52 | ||||
-rw-r--r-- | arch/ia64/include/asm/ptrace.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/sections.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/smp.h | 35 | ||||
-rw-r--r-- | arch/ia64/include/asm/tlb.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/uaccess.h | 4 | ||||
-rw-r--r-- | arch/ia64/include/asm/xtp.h | 46 |
15 files changed, 70 insertions, 152 deletions
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index b66ba907019c..87927eb824cc 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -74,8 +74,6 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf) buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; } -#define acpi_unlazy_tlb(x) - #ifdef CONFIG_ACPI_NUMA extern cpumask_t early_cpu_possible_map; #define for_each_possible_early_cpu(cpu) \ diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 50440f3ddc43..f267d956458f 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -19,7 +19,6 @@ #include <asm/barrier.h> -#define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) READ_ONCE((v)->counter) diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h index 6d3478f8abc8..708c0fa5d975 100644 --- a/arch/ia64/include/asm/cacheflush.h +++ b/arch/ia64/include/asm/cacheflush.h @@ -12,44 +12,22 @@ #include <asm/page.h> -/* - * Cache flushing routines. This is the kind of stuff that can be very expensive, so try - * to avoid them whenever possible. - */ - -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_dup_mm(mm) do { } while (0) -#define flush_cache_range(vma, start, end) do { } while (0) -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define flush_icache_page(vma,page) do { } while (0) -#define flush_cache_vmap(start, end) do { } while (0) -#define flush_cache_vunmap(start, end) do { } while (0) - #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define flush_dcache_page(page) \ do { \ clear_bit(PG_arch_1, &(page)->flags); \ } while (0) -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) - -extern void flush_icache_range (unsigned long start, unsigned long end); +extern void flush_icache_range(unsigned long start, unsigned long end); +#define flush_icache_range flush_icache_range extern void clflush_cache_range(void *addr, int size); - -#define flush_icache_user_range(vma, page, user_addr, len) \ +#define flush_icache_user_page(vma, page, user_addr, len) \ do { \ unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \ flush_icache_range(_addr, _addr + (len)); \ } while (0) -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ -do { memcpy(dst, src, len); \ - flush_icache_user_range(vma, page, vaddr, len); \ -} while (0) -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) +#include <asm-generic/cacheflush.h> #endif /* _ASM_IA64_CACHEFLUSH_H */ diff --git a/arch/ia64/include/asm/checksum.h b/arch/ia64/include/asm/checksum.h index 0ed18bc3f6cf..2a1c64629cdc 100644 --- a/arch/ia64/include/asm/checksum.h +++ b/arch/ia64/include/asm/checksum.h @@ -37,16 +37,6 @@ extern __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, */ extern __wsum csum_partial(const void *buff, int len, __wsum sum); -/* - * Same as csum_partial, but copies from src while it checksums. - * - * Here it is even more important to align src and dst on a 32-bit (or - * even better 64-bit) boundary. - */ -extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, - int *errp); - extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h index 3eb397415381..918b198cd5bb 100644 --- a/arch/ia64/include/asm/device.h +++ b/arch/ia64/include/asm/device.h @@ -6,9 +6,6 @@ #define _ASM_IA64_DEVICE_H struct dev_archdata { -#ifdef CONFIG_IOMMU_API - void *iommu; /* hook for IOMMU specific extension */ -#endif }; struct pdev_archdata { diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index c70bb9c11f52..6629301a2620 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h @@ -179,8 +179,6 @@ extern void ia64_init_addr_space (void); #define ELF_AR_SSD_OFFSET (56 * sizeof(elf_greg_t)) #define ELF_AR_END_OFFSET (57 * sizeof(elf_greg_t)) -typedef unsigned long elf_fpxregset_t; - typedef unsigned long elf_greg_t; typedef elf_greg_t elf_gregset_t[ELF_NGREG]; diff --git a/arch/ia64/include/asm/hugetlb.h b/arch/ia64/include/asm/hugetlb.h index 36cc0396b214..7e46ebde8c0c 100644 --- a/arch/ia64/include/asm/hugetlb.h +++ b/arch/ia64/include/asm/hugetlb.h @@ -20,6 +20,7 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, return (REGION_NUMBER(addr) == RGN_HPAGE || REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE); } +#define is_hugepage_only_range is_hugepage_only_range #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, @@ -27,10 +28,6 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, { } -static inline void arch_clear_hugepage_flags(struct page *page) -{ -} - #include <asm-generic/hugetlb.h> #endif /* _ASM_IA64_HUGETLB_H */ diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h index f4c491044882..9601cfe83c94 100644 --- a/arch/ia64/include/asm/pgalloc.h +++ b/arch/ia64/include/asm/pgalloc.h @@ -29,27 +29,13 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); } -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_page((unsigned long)pgd); -} - #if CONFIG_PGTABLE_LEVELS == 4 static inline void -pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) +p4d_populate(struct mm_struct *mm, p4d_t * p4d_entry, pud_t * pud) { - pgd_val(*pgd_entry) = __pa(pud); + p4d_val(*p4d_entry) = __pa(pud); } -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return (pud_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); -} - -static inline void pud_free(struct mm_struct *mm, pud_t *pud) -{ - free_page((unsigned long)pud); -} #define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud) #endif /* CONFIG_PGTABLE_LEVELS == 4 */ @@ -59,16 +45,6 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) pud_val(*pud_entry) = __pa(pmd); } -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return (pmd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); -} - -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - free_page((unsigned long)pmd); -} - #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) static inline void diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 0e7b645b76c6..779b6972aa84 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -283,12 +283,12 @@ extern unsigned long VMALLOC_END; #define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET)) #if CONFIG_PGTABLE_LEVELS == 4 -#define pgd_none(pgd) (!pgd_val(pgd)) -#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd))) -#define pgd_present(pgd) (pgd_val(pgd) != 0UL) -#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL) -#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK)) -#define pgd_page(pgd) virt_to_page((pgd_val(pgd) + PAGE_OFFSET)) +#define p4d_none(p4d) (!p4d_val(p4d)) +#define p4d_bad(p4d) (!ia64_phys_addr_valid(p4d_val(p4d))) +#define p4d_present(p4d) (p4d_val(p4d) != 0UL) +#define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL) +#define p4d_page_vaddr(p4d) ((unsigned long) __va(p4d_val(p4d) & _PFN_MASK)) +#define p4d_page(p4d) virt_to_page((p4d_val(p4d) + PAGE_OFFSET)) #endif /* @@ -364,17 +364,14 @@ pgd_index (unsigned long address) return (region << (PAGE_SHIFT - 6)) | l1index; } +#define pgd_index pgd_index -/* The offset in the 1-level directory is given by the 3 region bits - (61..63) and the level-1 bits. */ -static inline pgd_t* -pgd_offset (const struct mm_struct *mm, unsigned long address) -{ - return mm->pgd + pgd_index(address); -} - -/* In the kernel's mapped region we completely ignore the region number - (since we know it's in region number 5). */ +/* + * In the kernel's mapped region we know everything is in region number 5, so + * as an optimisation its PGD already points to the area for that region. + * However, this also means that we cannot use pgd_index() and we must + * never add the region here. + */ #define pgd_offset_k(addr) \ (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))) @@ -383,25 +380,6 @@ pgd_offset (const struct mm_struct *mm, unsigned long address) here. */ #define pgd_offset_gate(mm, addr) pgd_offset_k(addr) -#if CONFIG_PGTABLE_LEVELS == 4 -/* Find an entry in the second-level page table.. */ -#define pud_offset(dir,addr) \ - ((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) -#endif - -/* Find an entry in the third-level page table.. */ -#define pmd_offset(dir,addr) \ - ((pmd_t *) pud_page_vaddr(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) - -/* - * Find an entry in the third-level page table. This looks more complicated than it - * should be because some platforms place page tables in high memory. - */ -#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) -#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) -#define pte_unmap(pte) do { } while (0) - /* atomic versions of the some PTE manipulations: */ static inline int @@ -580,10 +558,8 @@ extern struct page *zero_page_memmap_ptr; #if CONFIG_PGTABLE_LEVELS == 3 -#define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopud.h> #endif -#include <asm-generic/5level-fixup.h> -#include <asm-generic/pgtable.h> +#include <asm-generic/pgtable-nop4d.h> #endif /* _ASM_IA64_PGTABLE_H */ diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h index 7ff574d56429..b3aa46090101 100644 --- a/arch/ia64/include/asm/ptrace.h +++ b/arch/ia64/include/asm/ptrace.h @@ -114,7 +114,6 @@ static inline long regs_return_value(struct pt_regs *regs) struct task_struct; /* forward decl */ struct unw_frame_info; /* forward decl */ - extern void ia64_do_show_stack (struct unw_frame_info *, void *); extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *, unsigned long *); extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long, diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h index cea15f2dd38d..3a033d2008b3 100644 --- a/arch/ia64/include/asm/sections.h +++ b/arch/ia64/include/asm/sections.h @@ -35,7 +35,7 @@ static inline void *dereference_function_descriptor(void *ptr) struct fdesc *desc = ptr; void *p; - if (!probe_kernel_address(&desc->ip, p)) + if (!get_kernel_nofault(p, (void *)&desc->ip)) ptr = p; return ptr; } diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h index 7847ae40a181..aa92234c0142 100644 --- a/arch/ia64/include/asm/smp.h +++ b/arch/ia64/include/asm/smp.h @@ -18,7 +18,6 @@ #include <linux/bitops.h> #include <linux/irqreturn.h> -#include <asm/io.h> #include <asm/param.h> #include <asm/processor.h> #include <asm/ptrace.h> @@ -44,11 +43,6 @@ ia64_get_lid (void) #ifdef CONFIG_SMP -#define XTP_OFFSET 0x1e0008 - -#define SMP_IRQ_REDIRECTION (1 << 0) -#define SMP_IPI_REDIRECTION (1 << 1) - #define raw_smp_processor_id() (current_thread_info()->cpu) extern struct smp_boot_data { @@ -62,7 +56,6 @@ extern cpumask_t cpu_core_map[NR_CPUS]; DECLARE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); extern int smp_num_siblings; extern void __iomem *ipi_base_addr; -extern unsigned char smp_int_redirect; extern volatile int ia64_cpu_to_sapicid[]; #define cpu_physical_id(i) ia64_cpu_to_sapicid[i] @@ -84,34 +77,6 @@ cpu_logical_id (int cpuid) return i; } -/* - * XTP control functions: - * min_xtp : route all interrupts to this CPU - * normal_xtp: nominal XTP value - * max_xtp : never deliver interrupts to this CPU. - */ - -static inline void -min_xtp (void) -{ - if (smp_int_redirect & SMP_IRQ_REDIRECTION) - writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */ -} - -static inline void -normal_xtp (void) -{ - if (smp_int_redirect & SMP_IRQ_REDIRECTION) - writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */ -} - -static inline void -max_xtp (void) -{ - if (smp_int_redirect & SMP_IRQ_REDIRECTION) - writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */ -} - /* Upping and downing of CPUs */ extern int __cpu_disable (void); extern void __cpu_die (unsigned int cpu); diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index f1f257d632b3..8d9da6f08a62 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -42,7 +42,6 @@ #include <linux/pagemap.h> #include <linux/swap.h> -#include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/tlbflush.h> diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 5c7e79eccaee..179243c3dfc7 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -37,7 +37,7 @@ #include <linux/page-flags.h> #include <asm/intrinsics.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #include <asm/io.h> #include <asm/extable.h> @@ -50,7 +50,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) /* * When accessing user memory, we need to make sure the entire area really is in diff --git a/arch/ia64/include/asm/xtp.h b/arch/ia64/include/asm/xtp.h new file mode 100644 index 000000000000..5bf1d70ad860 --- /dev/null +++ b/arch/ia64/include/asm/xtp.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_IA64_XTP_H +#define _ASM_IA64_XTP_H + +#include <asm/io.h> + +#ifdef CONFIG_SMP + +#define XTP_OFFSET 0x1e0008 + +#define SMP_IRQ_REDIRECTION (1 << 0) +#define SMP_IPI_REDIRECTION (1 << 1) + +extern unsigned char smp_int_redirect; + +/* + * XTP control functions: + * min_xtp : route all interrupts to this CPU + * normal_xtp: nominal XTP value + * max_xtp : never deliver interrupts to this CPU. + */ + +static inline void +min_xtp (void) +{ + if (smp_int_redirect & SMP_IRQ_REDIRECTION) + writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */ +} + +static inline void +normal_xtp (void) +{ + if (smp_int_redirect & SMP_IRQ_REDIRECTION) + writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */ +} + +static inline void +max_xtp (void) +{ + if (smp_int_redirect & SMP_IRQ_REDIRECTION) + writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */ +} + +#endif /* CONFIG_SMP */ + +#endif /* _ASM_IA64_XTP_Hy */ |