diff options
Diffstat (limited to 'arch/parisc')
-rw-r--r-- | arch/parisc/Kconfig | 4 | ||||
-rw-r--r-- | arch/parisc/Makefile | 11 | ||||
-rw-r--r-- | arch/parisc/include/asm/assembly.h | 12 | ||||
-rw-r--r-- | arch/parisc/include/asm/cache.h | 1 | ||||
-rw-r--r-- | arch/parisc/include/asm/cacheflush.h | 31 | ||||
-rw-r--r-- | arch/parisc/include/asm/compat.h | 45 | ||||
-rw-r--r-- | arch/parisc/include/asm/fixmap.h | 25 | ||||
-rw-r--r-- | arch/parisc/include/asm/hugetlb.h | 5 | ||||
-rw-r--r-- | arch/parisc/include/asm/page.h | 6 | ||||
-rw-r--r-- | arch/parisc/include/asm/timex.h | 3 | ||||
-rw-r--r-- | arch/parisc/include/asm/unistd.h | 3 | ||||
-rw-r--r-- | arch/parisc/include/uapi/asm/socket.h | 2 | ||||
-rwxr-xr-x[-rw-r--r--] | arch/parisc/install.sh | 28 | ||||
-rw-r--r-- | arch/parisc/kernel/cache.c | 329 | ||||
-rw-r--r-- | arch/parisc/kernel/entry.S | 22 | ||||
-rw-r--r-- | arch/parisc/kernel/pacache.S | 94 | ||||
-rw-r--r-- | arch/parisc/kernel/patch.c | 25 | ||||
-rw-r--r-- | arch/parisc/kernel/process.c | 4 | ||||
-rw-r--r-- | arch/parisc/mm/fault.c | 6 | ||||
-rw-r--r-- | arch/parisc/mm/init.c | 6 |
20 files changed, 336 insertions, 326 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index bd22578859d0..5f2448dc5a2b 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -332,10 +332,6 @@ config COMPAT def_bool y depends on 64BIT -config SYSVIPC_COMPAT - def_bool y - depends on COMPAT && SYSVIPC - config AUDIT_ARCH def_bool y diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 7583fc39ab2d..aca1710fd658 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -184,12 +184,11 @@ vdso_install: $(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso $@ $(if $(CONFIG_COMPAT_VDSO), \ $(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso32 $@) -install: - $(CONFIG_SHELL) $(srctree)/arch/parisc/install.sh \ - $(KERNELRELEASE) vmlinux System.map "$(INSTALL_PATH)" -zinstall: - $(CONFIG_SHELL) $(srctree)/arch/parisc/install.sh \ - $(KERNELRELEASE) vmlinuz System.map "$(INSTALL_PATH)" + +install: KBUILD_IMAGE := vmlinux +zinstall: KBUILD_IMAGE := vmlinuz +install zinstall: + $(call cmd,install) CLEAN_FILES += lifimage MRPROPER_FILES += palo.conf diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h index ea0cb318b13d..0f0d4a496fef 100644 --- a/arch/parisc/include/asm/assembly.h +++ b/arch/parisc/include/asm/assembly.h @@ -143,7 +143,7 @@ depd,z \r, 63-(\sa), 64-(\sa), \t .endm - /* Shift Right - note the r and t can NOT be the same! */ + /* Shift Right for 32-bit. Clobbers upper 32-bit on PA2.0. */ .macro shr r, sa, t extru \r, 31-(\sa), 32-(\sa), \t .endm @@ -174,6 +174,16 @@ #endif .endm + /* The depw instruction leaves the most significant 32 bits of the + * target register in an undefined state on PA 2.0 systems. */ + .macro dep_safe i, p, len, t +#ifdef CONFIG_64BIT + depd \i, 32+(\p), \len, \t +#else + depw \i, \p, \len, \t +#endif + .endm + /* load 32-bit 'value' into 'reg' compensating for the ldil * sign-extension when running in wide mode. * WARNING!! neither 'value' nor 'reg' can be expressions diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index 5032e758594e..e23d06b51a20 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -54,6 +54,7 @@ void parisc_setup_cache_timing(void); #define asm_io_sync() asm volatile("sync" \ ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \ ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory") +#define asm_syncdma() asm volatile("syncdma" :::"memory") #endif /* ! __ASSEMBLY__ */ diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index e8b4a03343d3..8d03b3b26229 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -59,20 +59,12 @@ void flush_dcache_page(struct page *page); flush_kernel_icache_range_asm(s,e); \ } while (0) -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ -do { \ - flush_cache_page(vma, vaddr, page_to_pfn(page)); \ - memcpy(dst, src, len); \ - flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \ -} while (0) - -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ -do { \ - flush_cache_page(vma, vaddr, page_to_pfn(page)); \ - memcpy(dst, src, len); \ -} while (0) - -void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn); +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long user_vaddr, void *dst, void *src, int len); +void copy_from_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long user_vaddr, void *dst, void *src, int len); +void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, + unsigned long pfn); void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); @@ -80,16 +72,7 @@ void flush_cache_range(struct vm_area_struct *vma, void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); #define ARCH_HAS_FLUSH_ANON_PAGE -static inline void -flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) -{ - if (PageAnon(page)) { - flush_tlb_page(vma, vmaddr); - preempt_disable(); - flush_dcache_page_asm(page_to_phys(page), vmaddr); - preempt_enable(); - } -} +void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr); #define ARCH_HAS_FLUSH_ON_KUNMAP static inline void kunmap_flush_on_unmap(void *addr) diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index c04f5a637c39..339d1b833fa7 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -11,16 +11,16 @@ #define compat_mode_t compat_mode_t typedef u16 compat_mode_t; +#define compat_ipc_pid_t compat_ipc_pid_t +typedef u16 compat_ipc_pid_t; + +#define compat_ipc64_perm compat_ipc64_perm + #include <asm-generic/compat.h> -#define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "parisc\0\0" -typedef u32 __compat_uid_t; -typedef u32 __compat_gid_t; -typedef u32 compat_dev_t; typedef u16 compat_nlink_t; -typedef u16 compat_ipc_pid_t; struct compat_stat { compat_dev_t st_dev; /* dev_t is 32 bits on parisc */ @@ -53,37 +53,6 @@ struct compat_stat { u32 st_spare4[3]; }; -struct compat_flock { - short l_type; - short l_whence; - compat_off_t l_start; - compat_off_t l_len; - compat_pid_t l_pid; -}; - -struct compat_flock64 { - short l_type; - short l_whence; - compat_loff_t l_start; - compat_loff_t l_len; - compat_pid_t l_pid; -}; - -struct compat_statfs { - s32 f_type; - s32 f_bsize; - s32 f_blocks; - s32 f_bfree; - s32 f_bavail; - s32 f_files; - s32 f_ffree; - __kernel_fsid_t f_fsid; - s32 f_namelen; - s32 f_frsize; - s32 f_flags; - s32 f_spare[4]; -}; - struct compat_sigcontext { compat_int_t sc_flags; compat_int_t sc_gr[32]; /* PSW in sc_gr[0] */ @@ -93,10 +62,6 @@ struct compat_sigcontext { compat_int_t sc_sar; /* cr11 */ }; -#define COMPAT_RLIM_INFINITY 0xffffffff - -#define COMPAT_OFF_T_MAX 0x7fffffff - struct compat_ipc64_perm { compat_key_t key; __compat_uid_t uid; diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h index e480b2c05407..5cd80ce1163a 100644 --- a/arch/parisc/include/asm/fixmap.h +++ b/arch/parisc/include/asm/fixmap.h @@ -9,12 +9,27 @@ * * All of the values in this file must be <4GB (because of assembly * loading restrictions). If you place this region anywhere above - * __PAGE_OFFSET, you must adjust the memory map accordingly */ + * __PAGE_OFFSET, you must adjust the memory map accordingly + */ -/* The alias region is used in kernel space to do copy/clear to or - * from areas congruently mapped with user space. It is 8MB large - * and must be 16MB aligned */ -#define TMPALIAS_MAP_START ((__PAGE_OFFSET) - 16*1024*1024) +/* + * The tmpalias region is used in kernel space to copy/clear/flush data + * from pages congruently mapped with user space. It is comprised of + * a pair regions. The size of these regions is determined by the largest + * cache aliasing boundary for machines that support equivalent aliasing. + * + * The c3750 with PA8700 processor returns an alias value of 11. This + * indicates that it has an alias boundary of 4 MB. It also supports + * non-equivalent aliasing without a performance penalty. + * + * Machines with PA8800/PA8900 processors return an alias value of 0. + * This indicates the alias boundary is unknown and may be larger than + * 16 MB. Non-equivalent aliasing is not supported. + * + * Here we assume the maximum alias boundary is 4 MB. + */ +#define TMPALIAS_SIZE_BITS 22 /* 4 MB */ +#define TMPALIAS_MAP_START ((__PAGE_OFFSET) - (2 << TMPALIAS_SIZE_BITS)) #define FIXMAP_SIZE (FIX_BITMAP_COUNT << PAGE_SHIFT) #define FIXMAP_START (TMPALIAS_MAP_START - FIXMAP_SIZE) diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h index a69cf9efb0c1..f7f078c2872c 100644 --- a/arch/parisc/include/asm/hugetlb.h +++ b/arch/parisc/include/asm/hugetlb.h @@ -28,9 +28,10 @@ static inline int prepare_hugepage_range(struct file *file, } #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH -static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) +static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) { + return *ptep; } #define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index 0561568f7b48..6faaaa3ebe9b 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -26,12 +26,14 @@ #define copy_page(to, from) copy_page_asm((void *)(to), (void *)(from)) struct page; +struct vm_area_struct; void clear_page_asm(void *page); void copy_page_asm(void *to, void *from); #define clear_user_page(vto, vaddr, page) clear_page_asm(vto) -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, - struct page *pg); +void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, + struct vm_area_struct *vma); +#define __HAVE_ARCH_COPY_USER_HIGHPAGE /* * These are used to make use of C type-checking.. diff --git a/arch/parisc/include/asm/timex.h b/arch/parisc/include/asm/timex.h index 06b510f8172e..b4622cb06a75 100644 --- a/arch/parisc/include/asm/timex.h +++ b/arch/parisc/include/asm/timex.h @@ -13,9 +13,10 @@ typedef unsigned long cycles_t; -static inline cycles_t get_cycles (void) +static inline cycles_t get_cycles(void) { return mfctl(16); } +#define get_cycles get_cycles #endif diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index 7708a5806f09..e38f9a90ac15 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -142,7 +142,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ } #define __ARCH_WANT_NEW_STAT -#define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME @@ -156,7 +155,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ #define __ARCH_WANT_SYS_FADVISE64 #define __ARCH_WANT_SYS_GETPGRP #define __ARCH_WANT_SYS_NICE -#define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_FORK @@ -164,6 +162,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_CLONE3 #define __ARCH_WANT_COMPAT_SYS_SENDFILE +#define __ARCH_WANT_COMPAT_STAT #ifdef CONFIG_64BIT #define __ARCH_WANT_SYS_TIME diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 654061e0964e..f486d3dfb6bb 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -127,6 +127,8 @@ #define SO_TXREHASH 0x4048 +#define SO_RCVMARK 0x4049 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh index 70d3cffb0251..933d031c249a 100644..100755 --- a/arch/parisc/install.sh +++ b/arch/parisc/install.sh @@ -1,7 +1,5 @@ #!/bin/sh # -# arch/parisc/install.sh, derived from arch/i386/boot/install.sh -# # This file is subject to the terms and conditions of the GNU General Public # License. See the file "COPYING" in the main directory of this archive # for more details. @@ -17,32 +15,6 @@ # $2 - kernel image file # $3 - kernel map file # $4 - default install path (blank if root directory) -# - -verify () { - if [ ! -f "$1" ]; then - echo "" 1>&2 - echo " *** Missing file: $1" 1>&2 - echo ' *** You need to run "make" before "make install".' 1>&2 - echo "" 1>&2 - exit 1 - fi -} - -# Make sure the files actually exist - -verify "$2" -verify "$3" - -# User may have a custom install script - -if [ -n "${INSTALLKERNEL}" ]; then - if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi - if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi - if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi -fi - -# Default install if [ "$(basename $2)" = "vmlinuz" ]; then # Compressed install diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index e7911225a4f8..c8a11fcecf4c 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -27,6 +27,7 @@ #include <asm/processor.h> #include <asm/sections.h> #include <asm/shmparam.h> +#include <asm/mmu_context.h> int split_tlb __ro_after_init; int dcache_stride __ro_after_init; @@ -91,7 +92,7 @@ static inline void flush_data_cache(void) } -/* Virtual address of pfn. */ +/* Kernel virtual address of pfn. */ #define pfn_va(pfn) __va(PFN_PHYS(pfn)) void @@ -124,11 +125,13 @@ show_cache_info(struct seq_file *m) cache_info.ic_size/1024 ); if (cache_info.dc_loop != 1) snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop); - seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n", + seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n", cache_info.dc_size/1024, (cache_info.dc_conf.cc_wt ? "WT":"WB"), (cache_info.dc_conf.cc_sh ? ", shared I/D":""), - ((cache_info.dc_loop == 1) ? "direct mapped" : buf)); + ((cache_info.dc_loop == 1) ? "direct mapped" : buf), + cache_info.dc_conf.cc_alias + ); seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n", cache_info.it_size, cache_info.dt_size, @@ -324,25 +327,81 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, preempt_enable(); } -static inline void -__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, - unsigned long physaddr) +static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) { - if (!static_branch_likely(&parisc_has_cache)) - return; + unsigned long flags, space, pgd, prot; +#ifdef CONFIG_TLB_PTLOCK + unsigned long pgd_lock; +#endif + + vmaddr &= PAGE_MASK; + preempt_disable(); - purge_dcache_page_asm(physaddr, vmaddr); + + /* Set context for flush */ + local_irq_save(flags); + prot = mfctl(8); + space = mfsp(SR_USER); + pgd = mfctl(25); +#ifdef CONFIG_TLB_PTLOCK + pgd_lock = mfctl(28); +#endif + switch_mm_irqs_off(NULL, vma->vm_mm, NULL); + local_irq_restore(flags); + + flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE); if (vma->vm_flags & VM_EXEC) - flush_icache_page_asm(physaddr, vmaddr); + flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE); + flush_tlb_page(vma, vmaddr); + + /* Restore previous context */ + local_irq_save(flags); +#ifdef CONFIG_TLB_PTLOCK + mtctl(pgd_lock, 28); +#endif + mtctl(pgd, 25); + mtsp(space, SR_USER); + mtctl(prot, 8); + local_irq_restore(flags); + preempt_enable(); } +static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr) +{ + pte_t *ptep = NULL; + pgd_t *pgd = mm->pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + if (!pgd_none(*pgd)) { + p4d = p4d_offset(pgd, addr); + if (!p4d_none(*p4d)) { + pud = pud_offset(p4d, addr); + if (!pud_none(*pud)) { + pmd = pmd_offset(pud, addr); + if (!pmd_none(*pmd)) + ptep = pte_offset_map(pmd, addr); + } + } + } + return ptep; +} + +static inline bool pte_needs_flush(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)) + == (_PAGE_PRESENT | _PAGE_ACCESSED); +} + void flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping_file(page); struct vm_area_struct *mpnt; unsigned long offset; unsigned long addr, old_addr = 0; + unsigned long count = 0; pgoff_t pgoff; if (mapping && !mapping_mapped(mapping)) { @@ -357,33 +416,52 @@ void flush_dcache_page(struct page *page) pgoff = page->index; - /* We have carefully arranged in arch_get_unmapped_area() that + /* + * We have carefully arranged in arch_get_unmapped_area() that * *any* mappings of a file are always congruently mapped (whether * declared as MAP_PRIVATE or MAP_SHARED), so we only need - * to flush one address here for them all to become coherent */ - + * to flush one address here for them all to become coherent + * on machines that support equivalent aliasing + */ flush_dcache_mmap_lock(mapping); vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; addr = mpnt->vm_start + offset; + if (parisc_requires_coherency()) { + pte_t *ptep; - /* The TLB is the engine of coherence on parisc: The - * CPU is entitled to speculate any page with a TLB - * mapping, so here we kill the mapping then flush the - * page along a special flush only alias mapping. - * This guarantees that the page is no-longer in the - * cache for any process and nor may it be - * speculatively read in (until the user or kernel - * specifically accesses it, of course) */ - - flush_tlb_page(mpnt, addr); - if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) - != (addr & (SHM_COLOUR - 1))) { - __flush_cache_page(mpnt, addr, page_to_phys(page)); - if (parisc_requires_coherency() && old_addr) - printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file); - old_addr = addr; + ptep = get_ptep(mpnt->vm_mm, addr); + if (ptep && pte_needs_flush(*ptep)) + flush_user_cache_page(mpnt, addr); + } else { + /* + * The TLB is the engine of coherence on parisc: + * The CPU is entitled to speculate any page + * with a TLB mapping, so here we kill the + * mapping then flush the page along a special + * flush only alias mapping. This guarantees that + * the page is no-longer in the cache for any + * process and nor may it be speculatively read + * in (until the user or kernel specifically + * accesses it, of course) + */ + flush_tlb_page(mpnt, addr); + if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) + != (addr & (SHM_COLOUR - 1))) { + __flush_cache_page(mpnt, addr, page_to_phys(page)); + /* + * Software is allowed to have any number + * of private mappings to a page. + */ + if (!(mpnt->vm_flags & VM_SHARED)) + continue; + if (old_addr) + pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", + old_addr, addr, mpnt->vm_file); + old_addr = addr; + } } + WARN_ON(++count == 4096); } flush_dcache_mmap_unlock(mapping); } @@ -403,7 +481,7 @@ void __init parisc_setup_cache_timing(void) { unsigned long rangetime, alltime; unsigned long size; - unsigned long threshold; + unsigned long threshold, threshold2; alltime = mfctl(16); flush_data_cache(); @@ -417,11 +495,16 @@ void __init parisc_setup_cache_timing(void) printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", alltime, size, rangetime); - threshold = L1_CACHE_ALIGN(size * alltime / rangetime); - if (threshold > cache_info.dc_size) - threshold = cache_info.dc_size; - if (threshold) - parisc_cache_flush_threshold = threshold; + threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime)); + pr_info("Calculated flush threshold is %lu KiB\n", + threshold/1024); + + /* + * The threshold computed above isn't very reliable. The following + * heuristic works reasonably well on c8000/rp3440. + */ + threshold2 = cache_info.dc_size * num_online_cpus(); + parisc_cache_flush_threshold = threshold2; printk(KERN_INFO "Cache flush threshold set to %lu KiB\n", parisc_cache_flush_threshold/1024); @@ -477,19 +560,47 @@ void flush_kernel_dcache_page_addr(void *addr) } EXPORT_SYMBOL(flush_kernel_dcache_page_addr); -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, - struct page *pg) +static void flush_cache_page_if_present(struct vm_area_struct *vma, + unsigned long vmaddr, unsigned long pfn) { - /* Copy using kernel mapping. No coherency is needed (all in - kunmap) for the `to' page. However, the `from' page needs to - be flushed through a mapping equivalent to the user mapping - before it can be accessed through the kernel mapping. */ - preempt_disable(); - flush_dcache_page_asm(__pa(vfrom), vaddr); - copy_page_asm(vto, vfrom); - preempt_enable(); + pte_t *ptep = get_ptep(vma->vm_mm, vmaddr); + + /* + * The pte check is racy and sometimes the flush will trigger + * a non-access TLB miss. Hopefully, the page has already been + * flushed. + */ + if (ptep && pte_needs_flush(*ptep)) + flush_cache_page(vma, vmaddr, pfn); +} + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + void *kto, *kfrom; + + kfrom = kmap_local_page(from); + kto = kmap_local_page(to); + flush_cache_page_if_present(vma, vaddr, page_to_pfn(from)); + copy_page_asm(kto, kfrom); + kunmap_local(kto); + kunmap_local(kfrom); +} + +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long user_vaddr, void *dst, void *src, int len) +{ + flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page)); + memcpy(dst, src, len); + flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); +} + +void copy_from_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long user_vaddr, void *dst, void *src, int len) +{ + flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page)); + memcpy(dst, src, len); } -EXPORT_SYMBOL(copy_user_page); /* __flush_tlb_range() * @@ -520,92 +631,105 @@ int __flush_tlb_range(unsigned long sid, unsigned long start, return 0; } -static inline unsigned long mm_total_size(struct mm_struct *mm) +static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - struct vm_area_struct *vma; - unsigned long usize = 0; - - for (vma = mm->mmap; vma; vma = vma->vm_next) - usize += vma->vm_end - vma->vm_start; - return usize; -} - -static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr) -{ - pte_t *ptep = NULL; + unsigned long addr, pfn; + pte_t *ptep; - if (!pgd_none(*pgd)) { - p4d_t *p4d = p4d_offset(pgd, addr); - if (!p4d_none(*p4d)) { - pud_t *pud = pud_offset(p4d, addr); - if (!pud_none(*pud)) { - pmd_t *pmd = pmd_offset(pud, addr); - if (!pmd_none(*pmd)) - ptep = pte_offset_map(pmd, addr); + for (addr = start; addr < end; addr += PAGE_SIZE) { + /* + * The vma can contain pages that aren't present. Although + * the pte search is expensive, we need the pte to find the + * page pfn and to check whether the page should be flushed. + */ + ptep = get_ptep(vma->vm_mm, addr); + if (ptep && pte_needs_flush(*ptep)) { + if (parisc_requires_coherency()) { + flush_user_cache_page(vma, addr); + } else { + pfn = pte_pfn(*ptep); + if (WARN_ON(!pfn_valid(pfn))) + return; + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); } } } - return ptep; } -static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm, - unsigned long start, unsigned long end) +static inline unsigned long mm_total_size(struct mm_struct *mm) { - unsigned long addr, pfn; - pte_t *ptep; + struct vm_area_struct *vma; + unsigned long usize = 0; - for (addr = start; addr < end; addr += PAGE_SIZE) { - ptep = get_ptep(mm->pgd, addr); - if (ptep) { - pfn = pte_pfn(*ptep); - flush_cache_page(vma, addr, pfn); - } - } + for (vma = mm->mmap; vma && usize < parisc_cache_flush_threshold; vma = vma->vm_next) + usize += vma->vm_end - vma->vm_start; + return usize; } void flush_cache_mm(struct mm_struct *mm) { struct vm_area_struct *vma; - /* Flushing the whole cache on each cpu takes forever on - rp3440, etc. So, avoid it if the mm isn't too big. */ - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - mm_total_size(mm) >= parisc_cache_flush_threshold) { - if (mm->context.space_id) - flush_tlb_all(); + /* + * Flushing the whole cache on each cpu takes forever on + * rp3440, etc. So, avoid it if the mm isn't too big. + * + * Note that we must flush the entire cache on machines + * with aliasing caches to prevent random segmentation + * faults. + */ + if (!parisc_requires_coherency() + || mm_total_size(mm) >= parisc_cache_flush_threshold) { + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) + return; + flush_tlb_all(); flush_cache_all(); return; } + /* Flush mm */ for (vma = mm->mmap; vma; vma = vma->vm_next) - flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end); + flush_cache_pages(vma, vma->vm_start, vma->vm_end); } -void flush_cache_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - end - start >= parisc_cache_flush_threshold) { - if (vma->vm_mm->context.space_id) - flush_tlb_range(vma, start, end); + if (!parisc_requires_coherency() + || end - start >= parisc_cache_flush_threshold) { + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) + return; + flush_tlb_range(vma, start, end); flush_cache_all(); return; } - flush_cache_pages(vma, vma->vm_mm, start, end); + flush_cache_pages(vma, start, end); } -void -flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) +void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { - if (pfn_valid(pfn)) { - if (likely(vma->vm_mm->context.space_id)) { - flush_tlb_page(vma, vmaddr); - __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); - } else { - __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn)); - } + if (WARN_ON(!pfn_valid(pfn))) + return; + if (parisc_requires_coherency()) + flush_user_cache_page(vma, vmaddr); + else + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); +} + +void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) +{ + if (!PageAnon(page)) + return; + + if (parisc_requires_coherency()) { + flush_user_cache_page(vma, vmaddr); + return; } + + flush_tlb_page(vma, vmaddr); + preempt_disable(); + flush_dcache_page_asm(page_to_phys(page), vmaddr); + preempt_enable(); } void flush_kernel_vmap_range(void *vaddr, int size) @@ -630,6 +754,9 @@ void invalidate_kernel_vmap_range(void *vaddr, int size) unsigned long start = (unsigned long)vaddr; unsigned long end = start + size; + /* Ensure DMA is complete */ + asm_syncdma(); + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && (unsigned long)size >= parisc_cache_flush_threshold) { flush_tlb_kernel_range(start, end); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index ecf50159359e..df8102fb435f 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -554,8 +554,9 @@ extrd,s \pte,63,25,\pte .endm - /* The alias region is an 8MB aligned 16MB to do clear and - * copy user pages at addresses congruent with the user + /* The alias region is comprised of a pair of 4 MB regions + * aligned to 8 MB. It is used to clear/copy/flush user pages + * using kernel virtual addresses congruent with the user * virtual address. * * To use the alias page, you set %r26 up with the to TLB @@ -565,13 +566,8 @@ .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype cmpib,COND(<>),n 0,\spc,\fault ldil L%(TMPALIAS_MAP_START),\tmp -#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) - /* on LP64, ldi will sign extend into the upper 32 bits, - * which is behaviour we don't want */ - depdi 0,31,32,\tmp -#endif copy \va,\tmp1 - depi 0,31,23,\tmp1 + depi_safe 0,31,TMPALIAS_SIZE_BITS+1,\tmp1 cmpb,COND(<>),n \tmp,\tmp1,\fault mfctl %cr19,\tmp /* iir */ /* get the opcode (first six bits) into \tmp */ @@ -604,13 +600,13 @@ * OK, it is in the temp alias region, check whether "from" or "to". * Check "subtle" note in pacache.S re: r23/r26. */ -#ifdef CONFIG_64BIT - extrd,u,*= \va,41,1,%r0 -#else - extrw,u,= \va,9,1,%r0 -#endif + extrw,u,= \va,31-TMPALIAS_SIZE_BITS,1,%r0 or,COND(tr) %r23,%r0,\pte or %r26,%r0,\pte + + /* convert phys addr in \pte (from r23 or r26) to tlb insert format */ + SHRREG \pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte + depi_safe _PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte .endm diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index b4c3f01e2399..9a0018f1f42c 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -300,7 +300,6 @@ fdoneloop2: fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */ fdsync: - syncdma sync mtsm %r22 /* restore I-bit */ 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) @@ -488,6 +487,8 @@ ENDPROC_CFI(copy_page_asm) * parisc chip designers that there will not ever be a parisc * chip with a larger alias boundary (Never say never :-) ). * + * Yah, what about the PA8800 and PA8900 processors? + * * Subtle: the dtlb miss handlers support the temp alias region by * "knowing" that if a dtlb miss happens within the temp alias * region it must have occurred while in clear_user_page. Since @@ -499,19 +500,10 @@ ENDPROC_CFI(copy_page_asm) * miss on the translation, the dtlb miss handler inserts the * translation into the tlb using these values: * - * %r26 physical page (shifted for tlb insert) of "to" translation - * %r23 physical page (shifted for tlb insert) of "from" translation + * %r26 physical address of "to" translation + * %r23 physical address of "from" translation */ - /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ - #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) - .macro convert_phys_for_tlb_insert20 phys - extrd,u \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys -#if _PAGE_SIZE_ENCODING_DEFAULT - depdi _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys -#endif - .endm - /* * copy_user_page_asm() performs a page copy using mappings * equivalent to the user page mappings. It can be used to @@ -540,24 +532,10 @@ ENTRY_CFI(copy_user_page_asm) sub %r25, %r1, %r23 ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */ - depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ - copy %r28, %r29 - depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */ - depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ + dep_safe %r24, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ copy %r28, %r29 - depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */ -#endif + depi_safe 1, 31-TMPALIAS_SIZE_BITS,1, %r29 /* Form aliased virtual address 'from' */ /* Purge any old translations */ @@ -687,18 +665,8 @@ ENTRY_CFI(clear_user_page_asm) tophys_r1 %r26 ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#endif + dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ /* Purge any old translation */ @@ -763,18 +731,8 @@ ENDPROC_CFI(clear_user_page_asm) ENTRY_CFI(flush_dcache_page_asm) ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#endif + dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ /* Purge any old translation */ @@ -822,18 +780,8 @@ ENDPROC_CFI(flush_dcache_page_asm) ENTRY_CFI(purge_dcache_page_asm) ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#endif + dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ /* Purge any old translation */ @@ -881,18 +829,8 @@ ENDPROC_CFI(purge_dcache_page_asm) ENTRY_CFI(flush_icache_page_asm) ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#endif + dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ /* Purge any old translation. Note that the FIC instruction * may use either the instruction or data TLB. Given that we @@ -1098,7 +1036,6 @@ ENTRY_CFI(flush_kernel_dcache_range_asm) sync 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) - syncdma bv %r0(%r2) nop ENDPROC_CFI(flush_kernel_dcache_range_asm) @@ -1140,7 +1077,6 @@ ENTRY_CFI(purge_kernel_dcache_range_asm) sync 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) - syncdma bv %r0(%r2) nop ENDPROC_CFI(purge_kernel_dcache_range_asm) diff --git a/arch/parisc/kernel/patch.c b/arch/parisc/kernel/patch.c index 80a0ab372802..e59574f65e64 100644 --- a/arch/parisc/kernel/patch.c +++ b/arch/parisc/kernel/patch.c @@ -40,10 +40,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags, *need_unmap = 1; set_fixmap(fixmap, page_to_phys(page)); - if (flags) - raw_spin_lock_irqsave(&patch_lock, *flags); - else - __acquire(&patch_lock); + raw_spin_lock_irqsave(&patch_lock, *flags); return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); } @@ -52,10 +49,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags) { clear_fixmap(fixmap); - if (flags) - raw_spin_unlock_irqrestore(&patch_lock, *flags); - else - __release(&patch_lock); + raw_spin_unlock_irqrestore(&patch_lock, *flags); } void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) @@ -67,8 +61,9 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) int mapped; /* Make sure we don't have any aliases in cache */ - flush_kernel_vmap_range(addr, len); - flush_icache_range(start, end); + flush_kernel_dcache_range_asm(start, end); + flush_kernel_icache_range_asm(start, end); + flush_tlb_kernel_range(start, end); p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped); @@ -81,8 +76,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) * We're crossing a page boundary, so * need to remap */ - flush_kernel_vmap_range((void *)fixmap, - (p-fixmap) * sizeof(*p)); + flush_kernel_dcache_range_asm((unsigned long)fixmap, + (unsigned long)p); + flush_tlb_kernel_range((unsigned long)fixmap, + (unsigned long)p); if (mapped) patch_unmap(FIX_TEXT_POKE0, &flags); p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, @@ -90,10 +87,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) } } - flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p)); + flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p); + flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p); if (mapped) patch_unmap(FIX_TEXT_POKE0, &flags); - flush_icache_range(start, end); } void __kprobes __patch_text(void *addr, u32 insn) diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 28b6a2a5574c..d145184696ea 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -26,6 +26,7 @@ #include <linux/module.h> #include <linux/personality.h> #include <linux/ptrace.h> +#include <linux/reboot.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task.h> @@ -116,8 +117,7 @@ void machine_power_off(void) pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); /* ipmi_poweroff may have been installed. */ - if (pm_power_off) - pm_power_off(); + do_kernel_power_off(); /* It seems we have no way to power the system off via * software. The user has to press the button himself. */ diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index f114e102aaf2..84bc437be5cd 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -22,6 +22,8 @@ #include <asm/traps.h> +#define DEBUG_NATLB 0 + /* Various important other fields */ #define bit22set(x) (x & 0x00000200) #define bits23_25set(x) (x & 0x000001c0) @@ -450,8 +452,8 @@ handle_nadtlb_fault(struct pt_regs *regs) fallthrough; case 0x380: /* PDC and FIC instructions */ - if (printk_ratelimit()) { - pr_warn("BUG: nullifying cache flush/purge instruction\n"); + if (DEBUG_NATLB && printk_ratelimit()) { + pr_warn("WARNING: nullifying cache flush/purge instruction\n"); show_regs(regs); } if (insn & 0x20) { diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 1dc2e88e7b04..0a81499dd35e 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -555,6 +555,12 @@ void __init mem_init(void) BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD); #endif +#ifdef CONFIG_64BIT + /* avoid ldil_%L() asm statements to sign-extend into upper 32-bits */ + BUILD_BUG_ON(__PAGE_OFFSET >= 0x80000000); + BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000); +#endif + high_memory = __va((max_pfn << PAGE_SHIFT)); set_max_mapnr(max_low_pfn); memblock_free_all(); |