diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/parisc/Makefile | 15 | ||||
-rw-r--r-- | arch/parisc/defpalo.conf | 8 | ||||
-rw-r--r-- | arch/parisc/include/asm/special_insns.h | 9 | ||||
-rw-r--r-- | arch/parisc/include/asm/tlbflush.h | 5 | ||||
-rw-r--r-- | arch/parisc/install.sh | 6 | ||||
-rw-r--r-- | arch/parisc/kernel/cache.c | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/processor.c | 19 | ||||
-rw-r--r-- | arch/parisc/lib/memcpy.c | 79 |
8 files changed, 94 insertions, 49 deletions
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 96ec3982be8d..e02f665f804a 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -17,6 +17,8 @@ # Mike Shaver, Helge Deller and Martin K. Petersen # +KBUILD_IMAGE := vmlinuz + KBUILD_DEFCONFIG := default_defconfig NM = sh $(srctree)/arch/parisc/nm @@ -92,7 +94,7 @@ PALOCONF := $(shell if [ -f $(src)/palo.conf ]; then echo $(src)/palo.conf; \ else echo $(obj)/palo.conf; \ fi) -palo: vmlinux +palo: vmlinuz @if test ! -x "$(PALO)"; then \ echo 'ERROR: Please install palo first (apt-get install palo)';\ echo 'or build it from source and install it somewhere in your $$PATH';\ @@ -107,10 +109,14 @@ palo: vmlinux fi $(PALO) -f $(PALOCONF) -# Shorthands for known targets not supported by parisc, use vmlinux as default -Image zImage bzImage: vmlinux +# Shorthands for known targets not supported by parisc, use vmlinux/vmlinuz as default +Image: vmlinux +zImage bzImage: vmlinuz + +vmlinuz: vmlinux + @gzip -cf -9 $< > $@ -install: vmlinux +install: vmlinuz sh $(src)/arch/parisc/install.sh \ $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)" @@ -119,6 +125,7 @@ MRPROPER_FILES += palo.conf define archhelp @echo '* vmlinux - Uncompressed kernel image (./vmlinux)' + @echo ' vmlinuz - Compressed kernel image (./vmlinuz)' @echo ' palo - Bootable image (./lifimage)' @echo ' install - Install kernel using' @echo ' (your) ~/bin/$(INSTALLKERNEL) or' diff --git a/arch/parisc/defpalo.conf b/arch/parisc/defpalo.conf index 4e1ae25b08d1..208ff3b41487 100644 --- a/arch/parisc/defpalo.conf +++ b/arch/parisc/defpalo.conf @@ -4,7 +4,7 @@ # Most people using 'make palo' want a bootable file, usable for # network or tape booting for example. --init-tape=lifimage ---recoverykernel=vmlinux +--recoverykernel=vmlinuz ########## Pick your ROOT here! ########## # You need at least one 'root='! @@ -12,10 +12,10 @@ # If you want a root ramdisk, use the next 2 lines # (Edit the ramdisk image name!!!!) --ramdisk=ram-disk-image-file ---commandline=0/vmlinux HOME=/ root=/dev/ram initrd=0/ramdisk +--commandline=0/vmlinuz HOME=/ root=/dev/ram initrd=0/ramdisk panic_timeout=60 panic=-1 # If you want NFS root, use the following command line (Edit the HOSTNAME!!!) -#--commandline=0/vmlinux HOME=/ root=/dev/nfs nfsroot=HOSTNAME ip=bootp +#--commandline=0/vmlinuz HOME=/ root=/dev/nfs nfsroot=HOSTNAME ip=bootp # If you have root on a disk partition, use this (Edit the partition name!!!) -#--commandline=0/vmlinux HOME=/ root=/dev/sda1 +#--commandline=0/vmlinuz HOME=/ root=/dev/sda1 diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h index d306b75bc77f..e1509308899f 100644 --- a/arch/parisc/include/asm/special_insns.h +++ b/arch/parisc/include/asm/special_insns.h @@ -32,9 +32,12 @@ static inline void set_eiem(unsigned long val) cr; \ }) -#define mtsp(gr, cr) \ - __asm__ __volatile__("mtsp %0,%1" \ +#define mtsp(val, cr) \ + { if (__builtin_constant_p(val) && ((val) == 0)) \ + __asm__ __volatile__("mtsp %%r0,%0" : : "i" (cr) : "memory"); \ + else \ + __asm__ __volatile__("mtsp %0,%1" \ : /* no outputs */ \ - : "r" (gr), "i" (cr) : "memory") + : "r" (val), "i" (cr) : "memory"); } #endif /* __PARISC_SPECIAL_INSNS_H */ diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h index 5273da991e06..9d086a599fa0 100644 --- a/arch/parisc/include/asm/tlbflush.h +++ b/arch/parisc/include/asm/tlbflush.h @@ -63,13 +63,14 @@ static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { - unsigned long flags; + unsigned long flags, sid; /* For one page, it's not worth testing the split_tlb variable */ mb(); - mtsp(vma->vm_mm->context,1); + sid = vma->vm_mm->context; purge_tlb_start(flags); + mtsp(sid, 1); pdtlb(addr); pitlb(addr); purge_tlb_end(flags); diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh index e593fc8d58bc..4da682b466d0 100644 --- a/arch/parisc/install.sh +++ b/arch/parisc/install.sh @@ -26,13 +26,13 @@ if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi # Default install -if [ -f $4/vmlinux ]; then - mv $4/vmlinux $4/vmlinux.old +if [ -f $4/vmlinuz ]; then + mv $4/vmlinuz $4/vmlinuz.old fi if [ -f $4/System.map ]; then mv $4/System.map $4/System.old fi -cat $2 > $4/vmlinux +cat $2 > $4/vmlinuz cp $3 $4/System.map diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 65fb4cbc3a0f..2e65aa54bd10 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -440,8 +440,8 @@ void __flush_tlb_range(unsigned long sid, unsigned long start, else { unsigned long flags; - mtsp(sid, 1); purge_tlb_start(flags); + mtsp(sid, 1); if (split_tlb) { while (npages--) { pdtlb(start); diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index c8fb61ed32f4..8a96c8ab9fe6 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -371,10 +371,23 @@ show_cpuinfo (struct seq_file *m, void *v) seq_printf(m, "capabilities\t:"); if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32) - seq_printf(m, " os32"); + seq_puts(m, " os32"); if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64) - seq_printf(m, " os64"); - seq_printf(m, "\n"); + seq_puts(m, " os64"); + if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) + seq_puts(m, " iopdir_fdc"); + switch (boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) { + case PDC_MODEL_NVA_SUPPORTED: + seq_puts(m, " nva_supported"); + break; + case PDC_MODEL_NVA_SLOW: + seq_puts(m, " nva_slow"); + break; + case PDC_MODEL_NVA_UNSUPPORTED: + seq_puts(m, " needs_equivalent_aliasing"); + break; + } + seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities); seq_printf(m, "model\t\t: %s\n" "model name\t: %s\n", diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index a49cc812df8a..ac4370b1ca40 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -2,6 +2,7 @@ * Optimized memory copy routines. * * Copyright (C) 2004 Randolph Chung <tausq@debian.org> + * Copyright (C) 2013 Helge Deller <deller@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -153,17 +154,21 @@ static inline void prefetch_dst(const void *addr) #define prefetch_dst(addr) do { } while(0) #endif +#define PA_MEMCPY_OK 0 +#define PA_MEMCPY_LOAD_ERROR 1 +#define PA_MEMCPY_STORE_ERROR 2 + /* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words * per loop. This code is derived from glibc. */ -static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src, unsigned long len, unsigned long o_dst, unsigned long o_src, unsigned long o_len) +static inline unsigned long copy_dstaligned(unsigned long dst, + unsigned long src, unsigned long len) { /* gcc complains that a2 and a3 may be uninitialized, but actually * they cannot be. Initialize a2/a3 to shut gcc up. */ register unsigned int a0, a1, a2 = 0, a3 = 0; int sh_1, sh_2; - struct exception_data *d; /* prefetch_src((const void *)src); */ @@ -197,7 +202,7 @@ static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src goto do2; case 0: if (len == 0) - return 0; + return PA_MEMCPY_OK; /* a3 = ((unsigned int *) src)[0]; a0 = ((unsigned int *) src)[1]; */ ldw(s_space, 0, src, a3, cda_ldw_exc); @@ -256,42 +261,35 @@ do0: preserve_branch(handle_load_error); preserve_branch(handle_store_error); - return 0; + return PA_MEMCPY_OK; handle_load_error: __asm__ __volatile__ ("cda_ldw_exc:\n"); - d = &__get_cpu_var(exception_data); - DPRINTF("cda_ldw_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n", - o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src); - return o_len * 4 - d->fault_addr + o_src; + return PA_MEMCPY_LOAD_ERROR; handle_store_error: __asm__ __volatile__ ("cda_stw_exc:\n"); - d = &__get_cpu_var(exception_data); - DPRINTF("cda_stw_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n", - o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst); - return o_len * 4 - d->fault_addr + o_dst; + return PA_MEMCPY_STORE_ERROR; } -/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ -static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) +/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR. + * In case of an access fault the faulty address can be read from the per_cpu + * exception data struct. */ +static unsigned long pa_memcpy_internal(void *dstp, const void *srcp, + unsigned long len) { register unsigned long src, dst, t1, t2, t3; register unsigned char *pcs, *pcd; register unsigned int *pws, *pwd; register double *pds, *pdd; - unsigned long ret = 0; - unsigned long o_dst, o_src, o_len; - struct exception_data *d; + unsigned long ret; src = (unsigned long)srcp; dst = (unsigned long)dstp; pcs = (unsigned char *)srcp; pcd = (unsigned char *)dstp; - o_dst = dst; o_src = src; o_len = len; - /* prefetch_src((const void *)srcp); */ if (len < THRESHOLD) @@ -401,7 +399,7 @@ byte_copy: len--; } - return 0; + return PA_MEMCPY_OK; unaligned_copy: /* possibly we are aligned on a word, but not on a double... */ @@ -438,8 +436,7 @@ unaligned_copy: src = (unsigned long)pcs; } - ret = copy_dstaligned(dst, src, len / sizeof(unsigned int), - o_dst, o_src, o_len); + ret = copy_dstaligned(dst, src, len / sizeof(unsigned int)); if (ret) return ret; @@ -454,17 +451,41 @@ unaligned_copy: handle_load_error: __asm__ __volatile__ ("pmc_load_exc:\n"); - d = &__get_cpu_var(exception_data); - DPRINTF("pmc_load_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n", - o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src); - return o_len - d->fault_addr + o_src; + return PA_MEMCPY_LOAD_ERROR; handle_store_error: __asm__ __volatile__ ("pmc_store_exc:\n"); + return PA_MEMCPY_STORE_ERROR; +} + + +/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ +static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) +{ + unsigned long ret, fault_addr, reference; + struct exception_data *d; + + ret = pa_memcpy_internal(dstp, srcp, len); + if (likely(ret == PA_MEMCPY_OK)) + return 0; + + /* if a load or store fault occured we can get the faulty addr */ d = &__get_cpu_var(exception_data); - DPRINTF("pmc_store_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n", - o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst); - return o_len - d->fault_addr + o_dst; + fault_addr = d->fault_addr; + + /* error in load or store? */ + if (ret == PA_MEMCPY_LOAD_ERROR) + reference = (unsigned long) srcp; + else + reference = (unsigned long) dstp; + + DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n", + ret, len, fault_addr, reference); + + if (fault_addr >= reference) + return len - (fault_addr - reference); + else + return len; } #ifdef __KERNEL__ |