0001
0002 #ifndef _ASM_X86_CHECKSUM_64_H
0003 #define _ASM_X86_CHECKSUM_64_H
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/compiler.h>
0012 #include <linux/uaccess.h>
0013 #include <asm/byteorder.h>
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 static inline __sum16 csum_fold(__wsum sum)
0024 {
0025 asm(" addl %1,%0\n"
0026 " adcl $0xffff,%0"
0027 : "=r" (sum)
0028 : "r" ((__force u32)sum << 16),
0029 "0" ((__force u32)sum & 0xffff0000));
0030 return (__force __sum16)(~(__force u32)sum >> 16);
0031 }
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
0047 {
0048 unsigned int sum;
0049
0050 asm(" movl (%1), %0\n"
0051 " subl $4, %2\n"
0052 " jbe 2f\n"
0053 " addl 4(%1), %0\n"
0054 " adcl 8(%1), %0\n"
0055 " adcl 12(%1), %0\n"
0056 "1: adcl 16(%1), %0\n"
0057 " lea 4(%1), %1\n"
0058 " decl %2\n"
0059 " jne 1b\n"
0060 " adcl $0, %0\n"
0061 " movl %0, %2\n"
0062 " shrl $16, %0\n"
0063 " addw %w2, %w0\n"
0064 " adcl $0, %0\n"
0065 " notl %0\n"
0066 "2:"
0067
0068
0069
0070 : "=r" (sum), "=r" (iph), "=r" (ihl)
0071 : "1" (iph), "2" (ihl)
0072 : "memory");
0073 return (__force __sum16)sum;
0074 }
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087 static inline __wsum
0088 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
0089 __u8 proto, __wsum sum)
0090 {
0091 asm(" addl %1, %0\n"
0092 " adcl %2, %0\n"
0093 " adcl %3, %0\n"
0094 " adcl $0, %0\n"
0095 : "=r" (sum)
0096 : "g" (daddr), "g" (saddr),
0097 "g" ((len + proto)<<8), "0" (sum));
0098 return sum;
0099 }
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
0114 __u32 len, __u8 proto,
0115 __wsum sum)
0116 {
0117 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
0118 }
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130 extern __wsum csum_partial(const void *buff, int len, __wsum sum);
0131
0132
0133 extern __visible __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
0134
0135 extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len);
0136 extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len);
0137 extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 extern __sum16 ip_compute_csum(const void *buff, int len);
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162 struct in6_addr;
0163
0164 #define _HAVE_ARCH_IPV6_CSUM 1
0165 extern __sum16
0166 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
0167 __u32 len, __u8 proto, __wsum sum);
0168
0169 static inline unsigned add32_with_carry(unsigned a, unsigned b)
0170 {
0171 asm("addl %2,%0\n\t"
0172 "adcl $0,%0"
0173 : "=r" (a)
0174 : "0" (a), "rm" (b));
0175 return a;
0176 }
0177
0178 #define HAVE_ARCH_CSUM_ADD
0179 static inline __wsum csum_add(__wsum csum, __wsum addend)
0180 {
0181 return (__force __wsum)add32_with_carry((__force unsigned)csum,
0182 (__force unsigned)addend);
0183 }
0184
0185 #endif