0001
0002 #ifndef _ASM_POWERPC_CHECKSUM_H
0003 #define _ASM_POWERPC_CHECKSUM_H
0004 #ifdef __KERNEL__
0005
0006
0007
0008
0009 #include <linux/bitops.h>
0010 #include <linux/in6.h>
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 extern __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
0022
0023 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
0024 extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
0025 int len);
0026 #define HAVE_CSUM_COPY_USER
0027 extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
0028 int len);
0029
0030 #define _HAVE_ARCH_CSUM_AND_COPY
0031 #define csum_partial_copy_nocheck(src, dst, len) \
0032 csum_partial_copy_generic((src), (dst), (len))
0033
0034
0035
0036
0037
0038
0039 static inline __sum16 csum_fold(__wsum sum)
0040 {
0041 u32 tmp = (__force u32)sum;
0042
0043
0044
0045
0046
0047
0048
0049 return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16);
0050 }
0051
0052 static inline u32 from64to32(u64 x)
0053 {
0054 return (x + ror64(x, 32)) >> 32;
0055 }
0056
0057 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
0058 __u8 proto, __wsum sum)
0059 {
0060 #ifdef __powerpc64__
0061 u64 s = (__force u32)sum;
0062
0063 s += (__force u32)saddr;
0064 s += (__force u32)daddr;
0065 #ifdef __BIG_ENDIAN__
0066 s += proto + len;
0067 #else
0068 s += (proto + len) << 8;
0069 #endif
0070 return (__force __wsum) from64to32(s);
0071 #else
0072 __asm__("\n\
0073 addc %0,%0,%1 \n\
0074 adde %0,%0,%2 \n\
0075 adde %0,%0,%3 \n\
0076 addze %0,%0 \n\
0077 "
0078 : "=r" (sum)
0079 : "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum));
0080 return sum;
0081 #endif
0082 }
0083
0084
0085
0086
0087
0088 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
0089 __u8 proto, __wsum sum)
0090 {
0091 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
0092 }
0093
0094 #define HAVE_ARCH_CSUM_ADD
0095 static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
0096 {
0097 #ifdef __powerpc64__
0098 u64 res = (__force u64)csum;
0099
0100 res += (__force u64)addend;
0101 return (__force __wsum)((u32)res + (res >> 32));
0102 #else
0103 if (__builtin_constant_p(csum) && csum == 0)
0104 return addend;
0105 if (__builtin_constant_p(addend) && addend == 0)
0106 return csum;
0107
0108 asm("addc %0,%0,%1;"
0109 "addze %0,%0;"
0110 : "+r" (csum) : "r" (addend) : "xer");
0111 return csum;
0112 #endif
0113 }
0114
0115 #define HAVE_ARCH_CSUM_SHIFT
0116 static __always_inline __wsum csum_shift(__wsum sum, int offset)
0117 {
0118
0119 return (__force __wsum)rol32((__force u32)sum, (offset & 1) << 3);
0120 }
0121
0122
0123
0124
0125
0126
0127 static inline __wsum ip_fast_csum_nofold(const void *iph, unsigned int ihl)
0128 {
0129 const u32 *ptr = (const u32 *)iph + 1;
0130 #ifdef __powerpc64__
0131 unsigned int i;
0132 u64 s = *(const u32 *)iph;
0133
0134 for (i = 0; i < ihl - 1; i++, ptr++)
0135 s += *ptr;
0136 return (__force __wsum)from64to32(s);
0137 #else
0138 __wsum sum, tmp;
0139
0140 asm("mtctr %3;"
0141 "addc %0,%4,%5;"
0142 "1: lwzu %1, 4(%2);"
0143 "adde %0,%0,%1;"
0144 "bdnz 1b;"
0145 "addze %0,%0;"
0146 : "=r" (sum), "=r" (tmp), "+b" (ptr)
0147 : "r" (ihl - 2), "r" (*(const u32 *)iph), "r" (*ptr)
0148 : "ctr", "xer", "memory");
0149
0150 return sum;
0151 #endif
0152 }
0153
0154 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
0155 {
0156 return csum_fold(ip_fast_csum_nofold(iph, ihl));
0157 }
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171 __wsum __csum_partial(const void *buff, int len, __wsum sum);
0172
0173 static __always_inline __wsum csum_partial(const void *buff, int len, __wsum sum)
0174 {
0175 if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) {
0176 if (len == 2)
0177 sum = csum_add(sum, (__force __wsum)*(const u16 *)buff);
0178 if (len >= 4)
0179 sum = csum_add(sum, (__force __wsum)*(const u32 *)buff);
0180 if (len == 6)
0181 sum = csum_add(sum, (__force __wsum)
0182 *(const u16 *)(buff + 4));
0183 if (len >= 8)
0184 sum = csum_add(sum, (__force __wsum)
0185 *(const u32 *)(buff + 4));
0186 if (len == 10)
0187 sum = csum_add(sum, (__force __wsum)
0188 *(const u16 *)(buff + 8));
0189 if (len >= 12)
0190 sum = csum_add(sum, (__force __wsum)
0191 *(const u32 *)(buff + 8));
0192 if (len == 14)
0193 sum = csum_add(sum, (__force __wsum)
0194 *(const u16 *)(buff + 12));
0195 if (len >= 16)
0196 sum = csum_add(sum, (__force __wsum)
0197 *(const u32 *)(buff + 12));
0198 } else if (__builtin_constant_p(len) && (len & 3) == 0) {
0199 sum = csum_add(sum, ip_fast_csum_nofold(buff, len >> 2));
0200 } else {
0201 sum = __csum_partial(buff, len, sum);
0202 }
0203 return sum;
0204 }
0205
0206
0207
0208
0209
0210 static inline __sum16 ip_compute_csum(const void *buff, int len)
0211 {
0212 return csum_fold(csum_partial(buff, len, 0));
0213 }
0214
0215 #define _HAVE_ARCH_IPV6_CSUM
0216 __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
0217 const struct in6_addr *daddr,
0218 __u32 len, __u8 proto, __wsum sum);
0219
0220 #endif
0221 #endif