diff options
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/Kconfig | 28 | ||||
-rw-r--r-- | arch/sparc/Kconfig.debug | 5 | ||||
-rw-r--r-- | arch/sparc/crypto/md5_glue.c | 1 | ||||
-rw-r--r-- | arch/sparc/crypto/sha1_glue.c | 1 | ||||
-rw-r--r-- | arch/sparc/crypto/sha256_glue.c | 2 | ||||
-rw-r--r-- | arch/sparc/crypto/sha512_glue.c | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/atomic_32.h | 24 | ||||
-rw-r--r-- | arch/sparc/include/asm/atomic_64.h | 65 | ||||
-rw-r--r-- | arch/sparc/include/asm/io_64.h | 19 | ||||
-rw-r--r-- | arch/sparc/include/asm/kprobes.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/msi.h | 32 | ||||
-rw-r--r-- | arch/sparc/include/uapi/asm/socket.h | 3 | ||||
-rw-r--r-- | arch/sparc/kernel/kprobes.c | 65 | ||||
-rw-r--r-- | arch/sparc/kernel/time_64.c | 2 | ||||
-rw-r--r-- | arch/sparc/lib/atomic32.c | 4 | ||||
-rw-r--r-- | arch/sparc/mm/fault_32.c | 3 | ||||
-rw-r--r-- | arch/sparc/mm/fault_64.c | 3 | ||||
-rw-r--r-- | arch/sparc/mm/srmmu.c | 20 |
19 files changed, 53 insertions, 228 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 0f535debf802..2d58c26bff9a 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -155,10 +155,6 @@ config PGTABLE_LEVELS config ARCH_SUPPORTS_UPROBES def_bool y if SPARC64 -source "init/Kconfig" - -source "kernel/Kconfig.freezer" - menu "Processor type and features" config SMP @@ -331,8 +327,6 @@ config FORCE_MAX_ZONEORDER This config option is actually maximum order plus one. For example, a value of 13 means that the largest free memory block is 2^12 pages. -source "mm/Kconfig" - if SPARC64 source "kernel/power/Kconfig" endif @@ -355,8 +349,6 @@ config SCHED_MC making when dealing with multi-core CPU chips at a cost of slightly increased overhead in some places. If unsure say N here. -source "kernel/Kconfig.preempt" - config CMDLINE_BOOL bool "Default bootloader kernel arguments" depends on SPARC64 @@ -556,10 +548,6 @@ config SPARC64_PCI_MSI endmenu -menu "Executable file formats" - -source "fs/Kconfig.binfmt" - config COMPAT bool depends on SPARC64 @@ -574,20 +562,4 @@ config SYSVIPC_COMPAT depends on COMPAT && SYSVIPC default y -endmenu - -source "net/Kconfig" - -source "drivers/Kconfig" - source "drivers/sbus/char/Kconfig" - -source "fs/Kconfig" - -source "arch/sparc/Kconfig.debug" - -source "security/Kconfig" - -source "crypto/Kconfig" - -source "lib/Kconfig" diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug index 4aef29a11925..50a918d496c8 100644 --- a/arch/sparc/Kconfig.debug +++ b/arch/sparc/Kconfig.debug @@ -1,12 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 -menu "Kernel hacking" config TRACE_IRQFLAGS_SUPPORT bool default y -source "lib/Kconfig.debug" - config DEBUG_DCFLUSH bool "D-cache flush debugging" depends on SPARC64 && DEBUG_KERNEL @@ -21,5 +18,3 @@ config FRAME_POINTER bool depends on MCOUNT default y - -endmenu diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c index c9d2b922734b..bc9cc26efa3d 100644 --- a/arch/sparc/crypto/md5_glue.c +++ b/arch/sparc/crypto/md5_glue.c @@ -144,7 +144,6 @@ static struct shash_alg alg = { .cra_name = "md5", .cra_driver_name= "md5-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c index 1b3e47accc74..4d6d7faf728e 100644 --- a/arch/sparc/crypto/sha1_glue.c +++ b/arch/sparc/crypto/sha1_glue.c @@ -139,7 +139,6 @@ static struct shash_alg alg = { .cra_name = "sha1", .cra_driver_name= "sha1-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c index 285268ca9279..54c4de2db188 100644 --- a/arch/sparc/crypto/sha256_glue.c +++ b/arch/sparc/crypto/sha256_glue.c @@ -169,7 +169,6 @@ static struct shash_alg sha256 = { .cra_name = "sha256", .cra_driver_name= "sha256-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -185,7 +184,6 @@ static struct shash_alg sha224 = { .cra_name = "sha224", .cra_driver_name= "sha224-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c index 11eb36c3fc8c..4c55e97a4408 100644 --- a/arch/sparc/crypto/sha512_glue.c +++ b/arch/sparc/crypto/sha512_glue.c @@ -154,7 +154,6 @@ static struct shash_alg sha512 = { .cra_name = "sha512", .cra_driver_name= "sha512-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -170,7 +169,6 @@ static struct shash_alg sha384 = { .cra_name = "sha384", .cra_driver_name= "sha384-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index ac67828da201..410b263ef5c8 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -13,6 +13,7 @@ generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += module.h +generic-y += msi.h generic-y += preempt.h generic-y += rwsem.h generic-y += serial.h diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index d13ce517f4b9..94c930f0bc62 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -27,17 +27,17 @@ int atomic_fetch_or(int, atomic_t *); int atomic_fetch_xor(int, atomic_t *); int atomic_cmpxchg(atomic_t *, int, int); int atomic_xchg(atomic_t *, int); -int __atomic_add_unless(atomic_t *, int, int); +int atomic_fetch_add_unless(atomic_t *, int, int); void atomic_set(atomic_t *, int); +#define atomic_fetch_add_unless atomic_fetch_add_unless + #define atomic_set_release(v, i) atomic_set((v), (i)) #define atomic_read(v) READ_ONCE((v)->counter) #define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v))) #define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v))) -#define atomic_inc(v) ((void)atomic_add_return( 1, (v))) -#define atomic_dec(v) ((void)atomic_add_return( -1, (v))) #define atomic_and(i, v) ((void)atomic_fetch_and((i), (v))) #define atomic_or(i, v) ((void)atomic_fetch_or((i), (v))) @@ -46,22 +46,4 @@ void atomic_set(atomic_t *, int); #define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v))) #define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v))) -#define atomic_inc_return(v) (atomic_add_return( 1, (v))) -#define atomic_dec_return(v) (atomic_add_return( -1, (v))) - -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) - -/* - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - -#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) - #endif /* !(__ARCH_SPARC_ATOMIC__) */ diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 28db058d471b..6963482c81d8 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -50,38 +50,6 @@ ATOMIC_OPS(xor) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define atomic_dec_return(v) atomic_sub_return(1, v) -#define atomic64_dec_return(v) atomic64_sub_return(1, v) - -#define atomic_inc_return(v) atomic_add_return(1, v) -#define atomic64_inc_return(v) atomic64_add_return(1, v) - -/* - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) - -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) -#define atomic64_sub_and_test(i, v) (atomic64_sub_return(i, v) == 0) - -#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) -#define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0) - -#define atomic_inc(v) atomic_add(1, v) -#define atomic64_inc(v) atomic64_add(1, v) - -#define atomic_dec(v) atomic_sub(1, v) -#define atomic64_dec(v) atomic64_sub(1, v) - -#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) -#define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0) - #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) static inline int atomic_xchg(atomic_t *v, int new) @@ -89,42 +57,11 @@ static inline int atomic_xchg(atomic_t *v, int new) return xchg(&v->counter, new); } -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c; -} - #define atomic64_cmpxchg(v, o, n) \ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -static inline long atomic64_add_unless(atomic64_t *v, long a, long u) -{ - long c, old; - c = atomic64_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic64_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c != (u); -} - -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) - long atomic64_dec_if_positive(atomic64_t *v); +#define atomic64_dec_if_positive atomic64_dec_if_positive #endif /* !(__ARCH_SPARC64_ATOMIC__) */ diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h index 9a1e9cbc7e6d..b162c23ae8c2 100644 --- a/arch/sparc/include/asm/io_64.h +++ b/arch/sparc/include/asm/io_64.h @@ -243,35 +243,42 @@ void insb(unsigned long, void *, unsigned long); void insw(unsigned long, void *, unsigned long); void insl(unsigned long, void *, unsigned long); -static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count) +static inline void readsb(void __iomem *port, void *buf, unsigned long count) { insb((unsigned long __force)port, buf, count); } -static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count) +static inline void readsw(void __iomem *port, void *buf, unsigned long count) { insw((unsigned long __force)port, buf, count); } -static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count) +static inline void readsl(void __iomem *port, void *buf, unsigned long count) { insl((unsigned long __force)port, buf, count); } -static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count) +static inline void writesb(void __iomem *port, const void *buf, unsigned long count) { outsb((unsigned long __force)port, buf, count); } -static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count) +static inline void writesw(void __iomem *port, const void *buf, unsigned long count) { outsw((unsigned long __force)port, buf, count); } -static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count) +static inline void writesl(void __iomem *port, const void *buf, unsigned long count) { outsl((unsigned long __force)port, buf, count); } +#define ioread8_rep(p,d,l) readsb(p,d,l) +#define ioread16_rep(p,d,l) readsw(p,d,l) +#define ioread32_rep(p,d,l) readsl(p,d,l) +#define iowrite8_rep(p,d,l) writesb(p,d,l) +#define iowrite16_rep(p,d,l) writesw(p,d,l) +#define iowrite32_rep(p,d,l) writesl(p,d,l) + /* Valid I/O Space regions are anywhere, because each PCI bus supported * can live in an arbitrary area of the physical address range. */ diff --git a/arch/sparc/include/asm/kprobes.h b/arch/sparc/include/asm/kprobes.h index 3704490b4488..bfcaa6326c20 100644 --- a/arch/sparc/include/asm/kprobes.h +++ b/arch/sparc/include/asm/kprobes.h @@ -44,7 +44,6 @@ struct kprobe_ctlblk { unsigned long kprobe_status; unsigned long kprobe_orig_tnpc; unsigned long kprobe_orig_tstate_pil; - struct pt_regs jprobe_saved_regs; struct prev_kprobe prev_kprobe; }; diff --git a/arch/sparc/include/asm/msi.h b/arch/sparc/include/asm/msi.h deleted file mode 100644 index 3c17c1074431..000000000000 --- a/arch/sparc/include/asm/msi.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * msi.h: Defines specific to the MBus - Sbus - Interface. - * - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) - */ - -#ifndef _SPARC_MSI_H -#define _SPARC_MSI_H - -/* - * Locations of MSI Registers. - */ -#define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */ - -/* - * Useful bits in the MSI Registers. - */ -#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */ - - -static inline void msi_set_sync(void) -{ - __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t" - "andn %%g3, %2, %%g3\n\t" - "sta %%g3, [%0] %1\n\t" : : - "r" (MSI_MBUS_ARBEN), - "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3"); -} - -#endif /* !(_SPARC_MSI_H) */ diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index d58520c2e6ff..7ea35e5601b6 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -101,6 +101,9 @@ #define SO_ZEROCOPY 0x003e +#define SO_TXTIME 0x003f +#define SCM_TXTIME SO_TXTIME + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index ab4ba4347941..dfbca2470536 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -147,18 +147,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) kcb->kprobe_status = KPROBE_REENTER; prepare_singlestep(p, regs, kcb); return 1; - } else { - if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { + } else if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { /* The breakpoint instruction was removed by * another cpu right after we hit, no further * handling of this interrupt is appropriate */ - ret = 1; - goto no_kprobe; - } - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) - goto ss_probe; + ret = 1; } goto no_kprobe; } @@ -181,10 +175,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) set_current_kprobe(p, regs, kcb); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (p->pre_handler && p->pre_handler(p, regs)) + if (p->pre_handler && p->pre_handler(p, regs)) { + reset_current_kprobe(); + preempt_enable_no_resched(); return 1; + } -ss_probe: prepare_singlestep(p, regs, kcb); kcb->kprobe_status = KPROBE_HIT_SS; return 1; @@ -441,53 +437,6 @@ out: exception_exit(prev_state); } -/* Jprobes support. */ -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs)); - - regs->tpc = (unsigned long) jp->entry; - regs->tnpc = ((unsigned long) jp->entry) + 0x4UL; - regs->tstate |= TSTATE_PIL; - - return 1; -} - -void __kprobes jprobe_return(void) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - register unsigned long orig_fp asm("g1"); - - orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP]; - __asm__ __volatile__("\n" -"1: cmp %%sp, %0\n\t" - "blu,a,pt %%xcc, 1b\n\t" - " restore\n\t" - ".globl jprobe_return_trap_instruction\n" -"jprobe_return_trap_instruction:\n\t" - "ta 0x70" - : /* no outputs */ - : "r" (orig_fp)); -} - -extern void jprobe_return_trap_instruction(void); - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - u32 *addr = (u32 *) regs->tpc; - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - if (addr == (u32 *) jprobe_return_trap_instruction) { - memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs)); - preempt_enable_no_resched(); - return 1; - } - return 0; -} - /* The value stored in the return address register is actually 2 * instructions before where the callee will return to. * Sequences usually look something like this @@ -562,9 +511,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, regs->tpc = orig_ret_address; regs->tnpc = orig_ret_address + 4; - reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 2ef8cfa9677e..f0eba72aa1ad 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -814,7 +814,7 @@ static void __init get_tick_patch(void) } } -static void init_tick_ops(struct sparc64_tick_ops *ops) +static void __init init_tick_ops(struct sparc64_tick_ops *ops) { unsigned long freq, quotient, tick; diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index 465a901a0ada..281fa634bb1a 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -95,7 +95,7 @@ int atomic_cmpxchg(atomic_t *v, int old, int new) } EXPORT_SYMBOL(atomic_cmpxchg); -int __atomic_add_unless(atomic_t *v, int a, int u) +int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int ret; unsigned long flags; @@ -107,7 +107,7 @@ int __atomic_add_unless(atomic_t *v, int a, int u) spin_unlock_irqrestore(ATOMIC_HASH(v), flags); return ret; } -EXPORT_SYMBOL(__atomic_add_unless); +EXPORT_SYMBOL(atomic_fetch_add_unless); /* Atomic operations are already serializing */ void atomic_set(atomic_t *v, int i) diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 9f75b6444bf1..b0440b0edd97 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -166,7 +166,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, unsigned int fixup; unsigned long g2; int from_user = !(regs->psr & PSR_PS); - int fault, code; + int code; + vm_fault_t fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; if (text_fault) diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 63166fcf9e25..8f8a604c1300 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -278,7 +278,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned int insn = 0; - int si_code, fault_code, fault; + int si_code, fault_code; + vm_fault_t fault; unsigned long address, mm_rss; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 1d70c3f6d986..be9cb0065179 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -37,7 +37,6 @@ #include <asm/mbus.h> #include <asm/page.h> #include <asm/asi.h> -#include <asm/msi.h> #include <asm/smp.h> #include <asm/io.h> @@ -116,6 +115,25 @@ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) set_pte((pte_t *)ctxp, pte); } +/* + * Locations of MSI Registers. + */ +#define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */ + +/* + * Useful bits in the MSI Registers. + */ +#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */ + +static void msi_set_sync(void) +{ + __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t" + "andn %%g3, %2, %%g3\n\t" + "sta %%g3, [%0] %1\n\t" : : + "r" (MSI_MBUS_ARBEN), + "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3"); +} + void pmd_set(pmd_t *pmdp, pte_t *ptep) { unsigned long ptp; /* Physical address, shifted right by 4 */ |