0001
0002 #ifndef __SPARC64_CHECKSUM_H
0003 #define __SPARC64_CHECKSUM_H
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/in6.h>
0020 #include <linux/uaccess.h>
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 __wsum csum_partial(const void * buff, int len, __wsum sum);
0034
0035
0036
0037
0038
0039
0040
0041 __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
0042 __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len);
0043 __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len);
0044
0045
0046
0047
0048 __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
0049
0050
0051 static inline __sum16 csum_fold(__wsum sum)
0052 {
0053 unsigned int tmp;
0054
0055 __asm__ __volatile__(
0056 " addcc %0, %1, %1\n"
0057 " srl %1, 16, %1\n"
0058 " addc %1, %%g0, %1\n"
0059 " xnor %%g0, %1, %0\n"
0060 : "=&r" (sum), "=r" (tmp)
0061 : "0" (sum), "1" ((__force u32)sum<<16)
0062 : "cc");
0063 return (__force __sum16)sum;
0064 }
0065
0066 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
0067 __u32 len, __u8 proto,
0068 __wsum sum)
0069 {
0070 __asm__ __volatile__(
0071 " addcc %1, %0, %0\n"
0072 " addccc %2, %0, %0\n"
0073 " addccc %3, %0, %0\n"
0074 " addc %0, %%g0, %0\n"
0075 : "=r" (sum), "=r" (saddr)
0076 : "r" (daddr), "r" (proto + len), "0" (sum), "1" (saddr)
0077 : "cc");
0078 return sum;
0079 }
0080
0081
0082
0083
0084
0085 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
0086 __u32 len, __u8 proto,
0087 __wsum sum)
0088 {
0089 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
0090 }
0091
0092 #define _HAVE_ARCH_IPV6_CSUM
0093
0094 static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
0095 const struct in6_addr *daddr,
0096 __u32 len, __u8 proto, __wsum sum)
0097 {
0098 __asm__ __volatile__ (
0099 " addcc %3, %4, %%g7\n"
0100 " addccc %5, %%g7, %%g7\n"
0101 " lduw [%2 + 0x0c], %%g2\n"
0102 " lduw [%2 + 0x08], %%g3\n"
0103 " addccc %%g2, %%g7, %%g7\n"
0104 " lduw [%2 + 0x04], %%g2\n"
0105 " addccc %%g3, %%g7, %%g7\n"
0106 " lduw [%2 + 0x00], %%g3\n"
0107 " addccc %%g2, %%g7, %%g7\n"
0108 " lduw [%1 + 0x0c], %%g2\n"
0109 " addccc %%g3, %%g7, %%g7\n"
0110 " lduw [%1 + 0x08], %%g3\n"
0111 " addccc %%g2, %%g7, %%g7\n"
0112 " lduw [%1 + 0x04], %%g2\n"
0113 " addccc %%g3, %%g7, %%g7\n"
0114 " lduw [%1 + 0x00], %%g3\n"
0115 " addccc %%g2, %%g7, %%g7\n"
0116 " addccc %%g3, %%g7, %0\n"
0117 " addc 0, %0, %0\n"
0118 : "=&r" (sum)
0119 : "r" (saddr), "r" (daddr), "r"(htonl(len)),
0120 "r"(htonl(proto)), "r"(sum)
0121 : "g2", "g3", "g7", "cc");
0122
0123 return csum_fold(sum);
0124 }
0125
0126
0127 static inline __sum16 ip_compute_csum(const void *buff, int len)
0128 {
0129 return csum_fold(csum_partial(buff, len, 0));
0130 }
0131
0132 #define HAVE_ARCH_CSUM_ADD
0133 static inline __wsum csum_add(__wsum csum, __wsum addend)
0134 {
0135 __asm__ __volatile__(
0136 "addcc %0, %1, %0\n"
0137 "addx %0, %%g0, %0"
0138 : "=r" (csum)
0139 : "r" (addend), "0" (csum));
0140
0141 return csum;
0142 }
0143
0144 #endif