diff options
Diffstat (limited to 'arch/sparc/include')
-rw-r--r-- | arch/sparc/include/asm/cmpxchg_64.h | 76 | ||||
-rw-r--r-- | arch/sparc/include/asm/dma-mapping.h | 8 | ||||
-rw-r--r-- | arch/sparc/include/asm/ldc.h | 8 | ||||
-rw-r--r-- | arch/sparc/include/asm/mdesc.h | 24 | ||||
-rw-r--r-- | arch/sparc/include/asm/processor_32.h | 3 | ||||
-rw-r--r-- | arch/sparc/include/asm/processor_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/qrwlock.h | 7 | ||||
-rw-r--r-- | arch/sparc/include/asm/qspinlock.h | 7 | ||||
-rw-r--r-- | arch/sparc/include/asm/setup.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/siginfo.h | 13 | ||||
-rw-r--r-- | arch/sparc/include/asm/spinlock_64.h | 208 | ||||
-rw-r--r-- | arch/sparc/include/asm/spinlock_types.h | 12 | ||||
-rw-r--r-- | arch/sparc/include/asm/timer_64.h | 67 | ||||
-rw-r--r-- | arch/sparc/include/asm/uaccess_32.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/uaccess_64.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/vio.h | 13 | ||||
-rw-r--r-- | arch/sparc/include/uapi/asm/ioctls.h | 3 | ||||
-rw-r--r-- | arch/sparc/include/uapi/asm/socket.h | 4 |
18 files changed, 207 insertions, 252 deletions
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h index faa2f61058c2..4028f4f1e561 100644 --- a/arch/sparc/include/asm/cmpxchg_64.h +++ b/arch/sparc/include/asm/cmpxchg_64.h @@ -6,6 +6,17 @@ #ifndef __ARCH_SPARC64_CMPXCHG__ #define __ARCH_SPARC64_CMPXCHG__ +static inline unsigned long +__cmpxchg_u32(volatile int *m, int old, int new) +{ + __asm__ __volatile__("cas [%2], %3, %0" + : "=&r" (new) + : "0" (new), "r" (m), "r" (old) + : "memory"); + + return new; +} + static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) { unsigned long tmp1, tmp2; @@ -44,10 +55,38 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long void __xchg_called_with_bad_pointer(void); +/* + * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic + * here is to get the bit shift of the byte we are interested in. + * The XOR is handy for reversing the bits for big-endian byte order. + */ +static inline unsigned long +xchg16(__volatile__ unsigned short *m, unsigned short val) +{ + unsigned long maddr = (unsigned long)m; + int bit_shift = (((unsigned long)m & 2) ^ 2) << 3; + unsigned int mask = 0xffff << bit_shift; + unsigned int *ptr = (unsigned int *) (maddr & ~2); + unsigned int old32, new32, load32; + + /* Read the old value */ + load32 = *ptr; + + do { + old32 = load32; + new32 = (load32 & (~mask)) | val << bit_shift; + load32 = __cmpxchg_u32(ptr, old32, new32); + } while (load32 != old32); + + return (load32 & mask) >> bit_shift; +} + static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) { switch (size) { + case 2: + return xchg16(ptr, x); case 4: return xchg32(ptr, x); case 8: @@ -65,10 +104,11 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, #include <asm-generic/cmpxchg-local.h> + static inline unsigned long -__cmpxchg_u32(volatile int *m, int old, int new) +__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) { - __asm__ __volatile__("cas [%2], %3, %0" + __asm__ __volatile__("casx [%2], %3, %0" : "=&r" (new) : "0" (new), "r" (m), "r" (old) : "memory"); @@ -76,15 +116,31 @@ __cmpxchg_u32(volatile int *m, int old, int new) return new; } +/* + * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic + * here is to get the bit shift of the byte we are interested in. + * The XOR is handy for reversing the bits for big-endian byte order + */ static inline unsigned long -__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) +__cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new) { - __asm__ __volatile__("casx [%2], %3, %0" - : "=&r" (new) - : "0" (new), "r" (m), "r" (old) - : "memory"); - - return new; + unsigned long maddr = (unsigned long)m; + int bit_shift = (((unsigned long)m & 3) ^ 3) << 3; + unsigned int mask = 0xff << bit_shift; + unsigned int *ptr = (unsigned int *) (maddr & ~3); + unsigned int old32, new32, load; + unsigned int load32 = *ptr; + + do { + new32 = (load32 & ~mask) | (new << bit_shift); + old32 = (load32 & ~mask) | (old << bit_shift); + load32 = __cmpxchg_u32(ptr, old32, new32); + if (load32 == old32) + return old; + load = (load32 & mask) >> bit_shift; + } while (load == old); + + return load; } /* This function doesn't exist, so you'll get a linker error @@ -95,6 +151,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { switch (size) { + case 1: + return __cmpxchg_u8(ptr, old, new); case 4: return __cmpxchg_u32(ptr, old, new); case 8: diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 69cc627779f2..60bf1633d554 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -5,11 +5,6 @@ #include <linux/mm.h> #include <linux/dma-debug.h> -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) - -#define HAVE_ARCH_DMA_SUPPORTED 1 -int dma_supported(struct device *dev, u64 mask); - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir) { @@ -19,7 +14,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, } extern const struct dma_map_ops *dma_ops; -extern const struct dma_map_ops *leon_dma_ops; extern const struct dma_map_ops pci32_dma_ops; extern struct bus_type pci_bus_type; @@ -28,7 +22,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { #ifdef CONFIG_SPARC_LEON if (sparc_cpu_model == sparc_leon) - return leon_dma_ops; + return &pci32_dma_ops; #endif #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) if (bus == &pci_bus_type) diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h index 6e9004aa6f25..698738a10414 100644 --- a/arch/sparc/include/asm/ldc.h +++ b/arch/sparc/include/asm/ldc.h @@ -48,6 +48,8 @@ struct ldc_channel_config { #define LDC_STATE_READY 0x03 #define LDC_STATE_CONNECTED 0x04 +#define LDC_PACKET_SIZE 64 + struct ldc_channel; /* Allocate state for a channel. */ @@ -72,6 +74,12 @@ int ldc_connect(struct ldc_channel *lp); int ldc_disconnect(struct ldc_channel *lp); int ldc_state(struct ldc_channel *lp); +void ldc_set_state(struct ldc_channel *lp, u8 state); +int ldc_mode(struct ldc_channel *lp); +void __ldc_print(struct ldc_channel *lp, const char *caller); +int ldc_rx_reset(struct ldc_channel *lp); + +#define ldc_print(chan) __ldc_print(chan, __func__) /* Read and write operations. Only valid when the link is up. */ int ldc_write(struct ldc_channel *lp, const void *buf, diff --git a/arch/sparc/include/asm/mdesc.h b/arch/sparc/include/asm/mdesc.h index aebeb88f70db..e8a4c413a1c7 100644 --- a/arch/sparc/include/asm/mdesc.h +++ b/arch/sparc/include/asm/mdesc.h @@ -16,6 +16,7 @@ struct mdesc_handle *mdesc_grab(void); void mdesc_release(struct mdesc_handle *); #define MDESC_NODE_NULL (~(u64)0) +#define MDESC_MAX_STR_LEN 256 u64 mdesc_node_by_name(struct mdesc_handle *handle, u64 from_node, const char *name); @@ -62,15 +63,32 @@ u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc); void mdesc_update(void); struct mdesc_notifier_client { - void (*add)(struct mdesc_handle *handle, u64 node); - void (*remove)(struct mdesc_handle *handle, u64 node); - + void (*add)(struct mdesc_handle *handle, u64 node, + const char *node_name); + void (*remove)(struct mdesc_handle *handle, u64 node, + const char *node_name); const char *node_name; struct mdesc_notifier_client *next; }; void mdesc_register_notifier(struct mdesc_notifier_client *client); +union md_node_info { + struct vdev_port { + u64 id; /* id */ + u64 parent_cfg_hdl; /* parent config handle */ + const char *name; /* name (property) */ + } vdev_port; + struct ds_port { + u64 id; /* id */ + } ds_port; +}; + +u64 mdesc_get_node(struct mdesc_handle *hp, const char *node_name, + union md_node_info *node_info); +int mdesc_get_node_info(struct mdesc_handle *hp, u64 node, + const char *node_name, union md_node_info *node_info); + void mdesc_fill_in_cpu_data(cpumask_t *mask); void mdesc_populate_present_mask(cpumask_t *mask); void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask); diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h index dd27159819eb..b395e5620c0b 100644 --- a/arch/sparc/include/asm/processor_32.h +++ b/arch/sparc/include/asm/processor_32.h @@ -67,9 +67,6 @@ struct thread_struct { .current_ds = KERNEL_DS, \ } -/* Return saved PC of a blocked thread. */ -unsigned long thread_saved_pc(struct task_struct *t); - /* Do necessary setup to start up a newly executed thread. */ static inline void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h index b58ee9018433..f04dc5a43062 100644 --- a/arch/sparc/include/asm/processor_64.h +++ b/arch/sparc/include/asm/processor_64.h @@ -89,9 +89,7 @@ struct thread_struct { #include <linux/types.h> #include <asm/fpumacro.h> -/* Return saved PC of a blocked thread. */ struct task_struct; -unsigned long thread_saved_pc(struct task_struct *); /* On Uniprocessor, even in RMO processes see TSO semantics */ #ifdef CONFIG_SMP diff --git a/arch/sparc/include/asm/qrwlock.h b/arch/sparc/include/asm/qrwlock.h new file mode 100644 index 000000000000..d68a4b102100 --- /dev/null +++ b/arch/sparc/include/asm/qrwlock.h @@ -0,0 +1,7 @@ +#ifndef _ASM_SPARC_QRWLOCK_H +#define _ASM_SPARC_QRWLOCK_H + +#include <asm-generic/qrwlock_types.h> +#include <asm-generic/qrwlock.h> + +#endif /* _ASM_SPARC_QRWLOCK_H */ diff --git a/arch/sparc/include/asm/qspinlock.h b/arch/sparc/include/asm/qspinlock.h new file mode 100644 index 000000000000..5ae9a2802846 --- /dev/null +++ b/arch/sparc/include/asm/qspinlock.h @@ -0,0 +1,7 @@ +#ifndef _ASM_SPARC_QSPINLOCK_H +#define _ASM_SPARC_QSPINLOCK_H + +#include <asm-generic/qspinlock_types.h> +#include <asm-generic/qspinlock.h> + +#endif /* _ASM_SPARC_QSPINLOCK_H */ diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h index 3fae200dd251..8b32538084f7 100644 --- a/arch/sparc/include/asm/setup.h +++ b/arch/sparc/include/asm/setup.h @@ -1,5 +1,5 @@ /* - * Just a place holder. + * Just a place holder. */ #ifndef _SPARC_SETUP_H #define _SPARC_SETUP_H diff --git a/arch/sparc/include/asm/siginfo.h b/arch/sparc/include/asm/siginfo.h deleted file mode 100644 index 48c34c19f810..000000000000 --- a/arch/sparc/include/asm/siginfo.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __SPARC_SIGINFO_H -#define __SPARC_SIGINFO_H - -#include <uapi/asm/siginfo.h> - - -#ifdef CONFIG_COMPAT - -struct compat_siginfo; - -#endif /* CONFIG_COMPAT */ - -#endif /* !(__SPARC_SIGINFO_H) */ diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 07c9f2e9bf57..f7028f5e1a5a 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -10,216 +10,12 @@ #include <asm/processor.h> #include <asm/barrier.h> - -/* To get debugging spinlocks which detect and catch - * deadlock situations, set CONFIG_DEBUG_SPINLOCK - * and rebuild your kernel. - */ - -/* Because we play games to save cycles in the non-contention case, we - * need to be extra careful about branch targets into the "spinning" - * code. They live in their own section, but the newer V9 branches - * have a shorter range than the traditional 32-bit sparc branch - * variants. The rule is that the branches that go into and out of - * the spinner sections must be pre-V9 branches. - */ - -#define arch_spin_is_locked(lp) ((lp)->lock != 0) - -static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) -{ - smp_cond_load_acquire(&lock->lock, !VAL); -} - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - unsigned long tmp; - - __asm__ __volatile__( -"1: ldstub [%1], %0\n" -" brnz,pn %0, 2f\n" -" nop\n" -" .subsection 2\n" -"2: ldub [%1], %0\n" -" brnz,pt %0, 2b\n" -" nop\n" -" ba,a,pt %%xcc, 1b\n" -" .previous" - : "=&r" (tmp) - : "r" (lock) - : "memory"); -} - -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - unsigned long result; - - __asm__ __volatile__( -" ldstub [%1], %0\n" - : "=r" (result) - : "r" (lock) - : "memory"); - - return (result == 0UL); -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - __asm__ __volatile__( -" stb %%g0, [%0]" - : /* No outputs */ - : "r" (lock) - : "memory"); -} - -static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__( -"1: ldstub [%2], %0\n" -" brnz,pn %0, 2f\n" -" nop\n" -" .subsection 2\n" -"2: rdpr %%pil, %1\n" -" wrpr %3, %%pil\n" -"3: ldub [%2], %0\n" -" brnz,pt %0, 3b\n" -" nop\n" -" ba,pt %%xcc, 1b\n" -" wrpr %1, %%pil\n" -" .previous" - : "=&r" (tmp1), "=&r" (tmp2) - : "r"(lock), "r"(flags) - : "memory"); -} - -/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ - -static inline void arch_read_lock(arch_rwlock_t *lock) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__ ( -"1: ldsw [%2], %0\n" -" brlz,pn %0, 2f\n" -"4: add %0, 1, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" nop\n" -" .subsection 2\n" -"2: ldsw [%2], %0\n" -" brlz,pt %0, 2b\n" -" nop\n" -" ba,a,pt %%xcc, 4b\n" -" .previous" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock) - : "memory"); -} - -static inline int arch_read_trylock(arch_rwlock_t *lock) -{ - int tmp1, tmp2; - - __asm__ __volatile__ ( -"1: ldsw [%2], %0\n" -" brlz,a,pn %0, 2f\n" -" mov 0, %0\n" -" add %0, 1, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" mov 1, %0\n" -"2:" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock) - : "memory"); - - return tmp1; -} - -static inline void arch_read_unlock(arch_rwlock_t *lock) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__( -"1: lduw [%2], %0\n" -" sub %0, 1, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%xcc, 1b\n" -" nop" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock) - : "memory"); -} - -static inline void arch_write_lock(arch_rwlock_t *lock) -{ - unsigned long mask, tmp1, tmp2; - - mask = 0x80000000UL; - - __asm__ __volatile__( -"1: lduw [%2], %0\n" -" brnz,pn %0, 2f\n" -"4: or %0, %3, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" nop\n" -" .subsection 2\n" -"2: lduw [%2], %0\n" -" brnz,pt %0, 2b\n" -" nop\n" -" ba,a,pt %%xcc, 4b\n" -" .previous" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock), "r" (mask) - : "memory"); -} - -static inline void arch_write_unlock(arch_rwlock_t *lock) -{ - __asm__ __volatile__( -" stw %%g0, [%0]" - : /* no outputs */ - : "r" (lock) - : "memory"); -} - -static inline int arch_write_trylock(arch_rwlock_t *lock) -{ - unsigned long mask, tmp1, tmp2, result; - - mask = 0x80000000UL; - - __asm__ __volatile__( -" mov 0, %2\n" -"1: lduw [%3], %0\n" -" brnz,pn %0, 2f\n" -" or %0, %4, %1\n" -" cas [%3], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" nop\n" -" mov 1, %2\n" -"2:" - : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result) - : "r" (lock), "r" (mask) - : "memory"); - - return result; -} +#include <asm/qrwlock.h> +#include <asm/qspinlock.h> #define arch_read_lock_flags(p, f) arch_read_lock(p) #define arch_write_lock_flags(p, f) arch_write_lock(p) -#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) -#define arch_write_can_lock(rw) (!(rw)->lock) - #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 9c454fdeaad8..bce8ef44dfa9 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -1,20 +1,24 @@ #ifndef __SPARC_SPINLOCK_TYPES_H #define __SPARC_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif +#ifdef CONFIG_QUEUED_SPINLOCKS +#include <asm-generic/qspinlock_types.h> +#else typedef struct { volatile unsigned char lock; } arch_spinlock_t; #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } +#endif /* CONFIG_QUEUED_SPINLOCKS */ +#ifdef CONFIG_QUEUED_RWLOCKS +#include <asm-generic/qrwlock_types.h> +#else typedef struct { volatile unsigned int lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { 0 } - +#endif /* CONFIG_QUEUED_RWLOCKS */ #endif diff --git a/arch/sparc/include/asm/timer_64.h b/arch/sparc/include/asm/timer_64.h index fce415034000..51bc3bc54bfe 100644 --- a/arch/sparc/include/asm/timer_64.h +++ b/arch/sparc/include/asm/timer_64.h @@ -9,7 +9,12 @@ #include <linux/types.h> #include <linux/init.h> +/* The most frequently accessed fields should be first, + * to fit into the same cacheline. + */ struct sparc64_tick_ops { + unsigned long ticks_per_nsec_quotient; + unsigned long offset; unsigned long long (*get_tick)(void); int (*add_compare)(unsigned long); unsigned long softint_mask; @@ -17,6 +22,8 @@ struct sparc64_tick_ops { void (*init_tick)(void); unsigned long (*add_tick)(unsigned long); + unsigned long (*get_frequency)(void); + unsigned long frequency; char *name; }; @@ -27,4 +34,64 @@ unsigned long sparc64_get_clock_tick(unsigned int cpu); void setup_sparc64_timer(void); void __init time_init(void); +#define TICK_PRIV_BIT BIT(63) +#define TICKCMP_IRQ_BIT BIT(63) + +#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL +#define HBIRD_STICK_ADDR 0x1fe0000f070UL + +#define GET_TICK_NINSTR 13 +struct get_tick_patch { + unsigned int addr; + unsigned int tick[GET_TICK_NINSTR]; + unsigned int stick[GET_TICK_NINSTR]; +}; + +extern struct get_tick_patch __get_tick_patch; +extern struct get_tick_patch __get_tick_patch_end; + +static inline unsigned long get_tick(void) +{ + unsigned long tick, tmp1, tmp2; + + __asm__ __volatile__( + /* read hbtick 13 instructions */ + "661:\n" + " mov 0x1fe, %1\n" + " sllx %1, 0x20, %1\n" + " sethi %%hi(0xf000), %2\n" + " or %2, 0x70, %2\n" + " or %1, %2, %1\n" /* %1 = HBIRD_STICK_ADDR */ + " add %1, 8, %2\n" + " ldxa [%2]%3, %0\n" + " ldxa [%1]%3, %1\n" + " ldxa [%2]%3, %2\n" + " sub %2, %0, %0\n" /* don't modify %xcc */ + " brnz,pn %0, 661b\n" /* restart to save one register */ + " sllx %2, 32, %2\n" + " or %2, %1, %0\n" + /* Common/not patched code */ + " sllx %0, 1, %0\n" + " srlx %0, 1, %0\n" /* Clear TICK_PRIV_BIT */ + /* Beginning of patch section */ + " .section .get_tick_patch, \"ax\"\n" + " .word 661b\n" + /* read tick 2 instructions and 11 skipped */ + " ba 1f\n" + " rd %%tick, %0\n" + " .skip 4 * (%4 - 2)\n" + "1:\n" + /* read stick 2 instructions and 11 skipped */ + " ba 1f\n" + " rd %%asr24, %0\n" + " .skip 4 * (%4 - 2)\n" + "1:\n" + /* End of patch section */ + " .previous\n" + : "=&r" (tick), "=&r" (tmp1), "=&r" (tmp2) + : "i" (ASI_PHYS_BYPASS_EC_E), "i" (GET_TICK_NINSTR)); + + return tick; +} + #endif /* _SPARC64_TIMER_H */ diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 12ebee2d97c7..bdb1447aa1bb 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -277,7 +277,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n) return n; } -__must_check long strlen_user(const char __user *str); __must_check long strnlen_user(const char __user *str, long n); #endif /* _ASM_UACCESS_H */ diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 6096d671aa63..113d84eaa15e 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -194,7 +194,6 @@ unsigned long __must_check __clear_user(void __user *, unsigned long); #define clear_user __clear_user -__must_check long strlen_user(const char __user *str); __must_check long strnlen_user(const char __user *str, long n); struct pt_regs; diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index 9dca7a892978..d1c47e9f0090 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -316,24 +316,33 @@ static inline u32 vio_dring_prev(struct vio_dring_state *dr, u32 index) } #define VIO_MAX_TYPE_LEN 32 +#define VIO_MAX_NAME_LEN 32 #define VIO_MAX_COMPAT_LEN 64 struct vio_dev { u64 mp; struct device_node *dp; + char node_name[VIO_MAX_NAME_LEN]; char type[VIO_MAX_TYPE_LEN]; char compat[VIO_MAX_COMPAT_LEN]; int compat_len; u64 dev_no; - u64 id; + unsigned long port_id; unsigned long channel_id; unsigned int tx_irq; unsigned int rx_irq; u64 rx_ino; + u64 tx_ino; + + /* Handle to the root of "channel-devices" sub-tree in MDESC */ + u64 cdev_handle; + + /* MD specific data used to identify the vdev in MD */ + union md_node_info md_node_info; struct device dev; }; @@ -347,6 +356,7 @@ struct vio_driver { void (*shutdown)(struct vio_dev *dev); unsigned long driver_data; struct device_driver driver; + bool no_irq; }; struct vio_version { @@ -490,5 +500,6 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, void vio_port_up(struct vio_driver_state *vio); int vio_set_intr(unsigned long dev_ino, int state); +u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev); #endif /* _SPARC64_VIO_H */ diff --git a/arch/sparc/include/uapi/asm/ioctls.h b/arch/sparc/include/uapi/asm/ioctls.h index 06b3f6c3bb9a..6d27398632ea 100644 --- a/arch/sparc/include/uapi/asm/ioctls.h +++ b/arch/sparc/include/uapi/asm/ioctls.h @@ -27,7 +27,7 @@ #define TIOCGRS485 _IOR('T', 0x41, struct serial_rs485) #define TIOCSRS485 _IOWR('T', 0x42, struct serial_rs485) -/* Note that all the ioctls that are not available in Linux have a +/* Note that all the ioctls that are not available in Linux have a * double underscore on the front to: a) avoid some programs to * think we support some ioctls under Linux (autoconfiguration stuff) */ @@ -88,6 +88,7 @@ #define TIOCGPTN _IOR('t', 134, unsigned int) /* Get Pty Number */ #define TIOCSPTLCK _IOW('t', 135, int) /* Lock/unlock PTY */ #define TIOCSIG _IOW('t', 136, int) /* Generate signal on Pty slave */ +#define TIOCGPTPEER _IOR('t', 137, int) /* Safely open the slave */ /* Little f */ #define FIOCLEX _IO('f', 1) diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 3f4ad19d9ec7..186fd8199f54 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -94,6 +94,10 @@ #define SO_COOKIE 0x003b +#define SCM_TIMESTAMPING_PKTINFO 0x003c + +#define SO_PEERGROUPS 0x003d + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 |