Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  *  arch/arm/include/asm/checksum.h
0004  *
0005  * IP checksum routines
0006  *
0007  * Copyright (C) Original authors of ../asm-i386/checksum.h
0008  * Copyright (C) 1996-1999 Russell King
0009  */
0010 #ifndef __ASM_ARM_CHECKSUM_H
0011 #define __ASM_ARM_CHECKSUM_H
0012 
0013 #include <linux/in6.h>
0014 
0015 /*
0016  * computes the checksum of a memory block at buff, length len,
0017  * and adds in "sum" (32-bit)
0018  *
0019  * returns a 32-bit number suitable for feeding into itself
0020  * or csum_tcpudp_magic
0021  *
0022  * this function must be called with even lengths, except
0023  * for the last fragment, which may be odd
0024  *
0025  * it's best to have buff aligned on a 32-bit boundary
0026  */
0027 __wsum csum_partial(const void *buff, int len, __wsum sum);
0028 
0029 /*
0030  * the same as csum_partial, but copies from src while it
0031  * checksums, and handles user-space pointer exceptions correctly, when needed.
0032  *
0033  * here even more important to align src and dst on a 32-bit (or even
0034  * better 64-bit) boundary
0035  */
0036 
0037 __wsum
0038 csum_partial_copy_nocheck(const void *src, void *dst, int len);
0039 
0040 __wsum
0041 csum_partial_copy_from_user(const void __user *src, void *dst, int len);
0042 
0043 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
0044 #define _HAVE_ARCH_CSUM_AND_COPY
0045 static inline
0046 __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
0047 {
0048     if (!access_ok(src, len))
0049         return 0;
0050 
0051     return csum_partial_copy_from_user(src, dst, len);
0052 }
0053 
0054 /*
0055  *  Fold a partial checksum without adding pseudo headers
0056  */
0057 static inline __sum16 csum_fold(__wsum sum)
0058 {
0059     __asm__(
0060     "add    %0, %1, %1, ror #16 @ csum_fold"
0061     : "=r" (sum)
0062     : "r" (sum)
0063     : "cc");
0064     return (__force __sum16)(~(__force u32)sum >> 16);
0065 }
0066 
0067 /*
0068  *  This is a version of ip_compute_csum() optimized for IP headers,
0069  *  which always checksum on 4 octet boundaries.
0070  */
0071 static inline __sum16
0072 ip_fast_csum(const void *iph, unsigned int ihl)
0073 {
0074     unsigned int tmp1;
0075     __wsum sum;
0076 
0077     __asm__ __volatile__(
0078     "ldr    %0, [%1], #4        @ ip_fast_csum      \n\
0079     ldr %3, [%1], #4                    \n\
0080     sub %2, %2, #5                  \n\
0081     adds    %0, %0, %3                  \n\
0082     ldr %3, [%1], #4                    \n\
0083     adcs    %0, %0, %3                  \n\
0084     ldr %3, [%1], #4                    \n\
0085 1:  adcs    %0, %0, %3                  \n\
0086     ldr %3, [%1], #4                    \n\
0087     tst %2, #15         @ do this carefully \n\
0088     subne   %2, %2, #1      @ without destroying    \n\
0089     bne 1b          @ the carry flag    \n\
0090     adcs    %0, %0, %3                  \n\
0091     adc %0, %0, #0"
0092     : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
0093     : "1" (iph), "2" (ihl)
0094     : "cc", "memory");
0095     return csum_fold(sum);
0096 }
0097 
0098 static inline __wsum
0099 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
0100            __u8 proto, __wsum sum)
0101 {
0102     u32 lenprot = len + proto;
0103     if (__builtin_constant_p(sum) && sum == 0) {
0104         __asm__(
0105         "adds   %0, %1, %2  @ csum_tcpudp_nofold0   \n\t"
0106 #ifdef __ARMEB__
0107         "adcs   %0, %0, %3              \n\t"
0108 #else
0109         "adcs   %0, %0, %3, ror #8          \n\t"
0110 #endif
0111         "adc    %0, %0, #0"
0112         : "=&r" (sum)
0113         : "r" (daddr), "r" (saddr), "r" (lenprot)
0114         : "cc");
0115     } else {
0116         __asm__(
0117         "adds   %0, %1, %2  @ csum_tcpudp_nofold    \n\t"
0118         "adcs   %0, %0, %3              \n\t"
0119 #ifdef __ARMEB__
0120         "adcs   %0, %0, %4              \n\t"
0121 #else
0122         "adcs   %0, %0, %4, ror #8          \n\t"
0123 #endif
0124         "adc    %0, %0, #0"
0125         : "=&r"(sum)
0126         : "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot)
0127         : "cc");
0128     }
0129     return sum;
0130 }   
0131 /*
0132  * computes the checksum of the TCP/UDP pseudo-header
0133  * returns a 16-bit checksum, already complemented
0134  */
0135 static inline __sum16
0136 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
0137           __u8 proto, __wsum sum)
0138 {
0139     return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
0140 }
0141 
0142 
0143 /*
0144  * this routine is used for miscellaneous IP-like checksums, mainly
0145  * in icmp.c
0146  */
0147 static inline __sum16
0148 ip_compute_csum(const void *buff, int len)
0149 {
0150     return csum_fold(csum_partial(buff, len, 0));
0151 }
0152 
0153 #define _HAVE_ARCH_IPV6_CSUM
0154 extern __wsum
0155 __csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
0156         __be32 proto, __wsum sum);
0157 
0158 static inline __sum16
0159 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
0160         __u32 len, __u8 proto, __wsum sum)
0161 {
0162     return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
0163                        htonl(proto), sum));
0164 }
0165 #endif