diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2006-11-14 21:20:28 -0800 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-12-02 21:23:15 -0800 |
commit | eb5a9658656c3d633cc973ec90ccfd6c439dabb6 (patch) | |
tree | e3b69b0750410b9389db369aa23f9eddecf96e37 /include/asm-arm/checksum.h | |
parent | a4f89fb7c072b8592b296c2ba216269c0c96db43 (diff) | |
download | blackbird-op-linux-eb5a9658656c3d633cc973ec90ccfd6c439dabb6.tar.gz blackbird-op-linux-eb5a9658656c3d633cc973ec90ccfd6c439dabb6.zip |
[NET]: ARM checksum annotations and cleanups.
* sanitize prototypes, annotate
* kill csum_partial_copy
* usual ntohs->shift, this time in assembler part
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-arm/checksum.h')
-rw-r--r-- | include/asm-arm/checksum.h | 83 |
1 files changed, 41 insertions, 42 deletions
diff --git a/include/asm-arm/checksum.h b/include/asm-arm/checksum.h index 747bdd31a74b..8c0bb5bb14ee 100644 --- a/include/asm-arm/checksum.h +++ b/include/asm-arm/checksum.h @@ -23,7 +23,7 @@ * * it's best to have buff aligned on a 32-bit boundary */ -unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); +__wsum csum_partial(const void *buff, int len, __wsum sum); /* * the same as csum_partial, but copies from src while it @@ -33,26 +33,18 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) * better 64-bit) boundary */ -unsigned int -csum_partial_copy_nocheck(const char *src, char *dst, int len, int sum); +__wsum +csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); -unsigned int -csum_partial_copy_from_user(const char __user *src, char *dst, int len, int sum, int *err_ptr); - -/* - * This is the old (and unsafe) way of doing checksums, a warning message will - * be printed if it is used and an exception occurs. - * - * this functions should go away after some time. - */ -#define csum_partial_copy(src,dst,len,sum) csum_partial_copy_nocheck(src,dst,len,sum) +__wsum +csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); /* * This is a version of ip_compute_csum() optimized for IP headers, * which always checksum on 4 octet boundaries. */ -static inline unsigned short -ip_fast_csum(unsigned char * iph, unsigned int ihl) +static inline __sum16 +ip_fast_csum(const void *iph, unsigned int ihl) { unsigned int sum, tmp1; @@ -78,14 +70,13 @@ ip_fast_csum(unsigned char * iph, unsigned int ihl) : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1) : "1" (iph), "2" (ihl) : "cc", "memory"); - return sum; + return (__force __sum16)sum; } /* * Fold a partial checksum without adding pseudo headers */ -static inline unsigned int -csum_fold(unsigned int sum) +static inline __sum16 csum_fold(__wsum sum) { __asm__( "adds %0, %1, %1, lsl #16 @ csum_fold \n\ @@ -93,21 +84,25 @@ csum_fold(unsigned int sum) : "=r" (sum) : "r" (sum) : "cc"); - return (~sum) >> 16; + return (__force __sum16)(~(__force u32)sum >> 16); } -static inline unsigned int -csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, - unsigned int proto, unsigned int sum) +static inline __wsum +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum) { __asm__( "adds %0, %1, %2 @ csum_tcpudp_nofold \n\ - adcs %0, %0, %3 \n\ - adcs %0, %0, %4 \n\ - adcs %0, %0, %5 \n\ + adcs %0, %0, %3 \n" +#ifdef __ARMEB__ + "adcs %0, %0, %4 \n" +#else + "adcs %0, %0, %4, lsl #8 \n" +#endif + "adcs %0, %0, %5 \n\ adc %0, %0, #0" : "=&r"(sum) - : "r" (sum), "r" (daddr), "r" (saddr), "r" (ntohs(len)), "Ir" (ntohs(proto)) + : "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto)) : "cc"); return sum; } @@ -115,23 +110,27 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented */ -static inline unsigned short int -csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, - unsigned int proto, unsigned int sum) +static inline __sum16 +csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum) { __asm__( "adds %0, %1, %2 @ csum_tcpudp_magic \n\ - adcs %0, %0, %3 \n\ - adcs %0, %0, %4 \n\ - adcs %0, %0, %5 \n\ + adcs %0, %0, %3 \n" +#ifdef __ARMEB__ + "adcs %0, %0, %4 \n" +#else + "adcs %0, %0, %4, lsl #8 \n" +#endif + "adcs %0, %0, %5 \n\ adc %0, %0, #0 \n\ adds %0, %0, %0, lsl #16 \n\ addcs %0, %0, #0x10000 \n\ mvn %0, %0" : "=&r"(sum) - : "r" (sum), "r" (daddr), "r" (saddr), "r" (ntohs(len)), "Ir" (ntohs(proto)) + : "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto)) : "cc"); - return sum >> 16; + return (__force __sum16)((__force u32)sum >> 16); } @@ -139,20 +138,20 @@ csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ -static inline unsigned short -ip_compute_csum(unsigned char * buff, int len) +static inline __sum16 +ip_compute_csum(const void *buff, int len) { return csum_fold(csum_partial(buff, len, 0)); } #define _HAVE_ARCH_IPV6_CSUM -extern unsigned long -__csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len, - __u32 proto, unsigned int sum); +extern __wsum +__csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len, + __be32 proto, __wsum sum); -static inline unsigned short int -csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len, - unsigned short proto, unsigned int sum) +static inline __sum16 +csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len, + unsigned short proto, __wsum sum) { return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len), htonl(proto), sum)); |