diff options
Diffstat (limited to 'arch/sparc/include/asm/checksum_64.h')
-rw-r--r-- | arch/sparc/include/asm/checksum_64.h | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h index 019b9615e43c..b3c8b9ded364 100644 --- a/arch/sparc/include/asm/checksum_64.h +++ b/arch/sparc/include/asm/checksum_64.h @@ -29,7 +29,7 @@ * * it's best to have buff aligned on a 32-bit boundary */ -extern __wsum csum_partial(const void * buff, int len, __wsum sum); +__wsum csum_partial(const void * buff, int len, __wsum sum); /* the same as csum_partial, but copies from user space while it * checksums @@ -37,12 +37,12 @@ extern __wsum csum_partial(const void * buff, int len, __wsum sum); * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ -extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum); +__wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum); -extern long __csum_partial_copy_from_user(const void __user *src, - void *dst, int len, - __wsum sum); +long __csum_partial_copy_from_user(const void __user *src, + void *dst, int len, + __wsum sum); static inline __wsum csum_partial_copy_from_user(const void __user *src, @@ -59,9 +59,9 @@ csum_partial_copy_from_user(const void __user *src, * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -extern long __csum_partial_copy_to_user(const void *src, - void __user *dst, int len, - __wsum sum); +long __csum_partial_copy_to_user(const void *src, + void __user *dst, int len, + __wsum sum); static inline __wsum csum_and_copy_to_user(const void *src, @@ -77,7 +77,7 @@ csum_and_copy_to_user(const void *src, /* ihl is always 5 or greater, almost always is 5, and iph is word aligned * the majority of the time. */ -extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); +__sum16 ip_fast_csum(const void *iph, unsigned int ihl); /* Fold a partial checksum without adding pseudo headers. */ static inline __sum16 csum_fold(__wsum sum) @@ -96,9 +96,9 @@ static inline __sum16 csum_fold(__wsum sum) } static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned int len, - unsigned short proto, - __wsum sum) + unsigned int len, + unsigned short proto, + __wsum sum) { __asm__ __volatile__( " addcc %1, %0, %0\n" @@ -116,9 +116,9 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, * returns a 16-bit checksum, already complemented */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + unsigned short len, + unsigned short proto, + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } |