Back to home page

OSCL-LXR

 
 

    


0001 ########################################################################
0002 # Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
0003 #
0004 # Copyright (c) 2013, Intel Corporation
0005 #
0006 # Authors:
0007 #     Erdinc Ozturk <erdinc.ozturk@intel.com>
0008 #     Vinodh Gopal <vinodh.gopal@intel.com>
0009 #     James Guilford <james.guilford@intel.com>
0010 #     Tim Chen <tim.c.chen@linux.intel.com>
0011 #
0012 # This software is available to you under a choice of one of two
0013 # licenses.  You may choose to be licensed under the terms of the GNU
0014 # General Public License (GPL) Version 2, available from the file
0015 # COPYING in the main directory of this source tree, or the
0016 # OpenIB.org BSD license below:
0017 #
0018 # Redistribution and use in source and binary forms, with or without
0019 # modification, are permitted provided that the following conditions are
0020 # met:
0021 #
0022 # * Redistributions of source code must retain the above copyright
0023 #   notice, this list of conditions and the following disclaimer.
0024 #
0025 # * Redistributions in binary form must reproduce the above copyright
0026 #   notice, this list of conditions and the following disclaimer in the
0027 #   documentation and/or other materials provided with the
0028 #   distribution.
0029 #
0030 # * Neither the name of the Intel Corporation nor the names of its
0031 #   contributors may be used to endorse or promote products derived from
0032 #   this software without specific prior written permission.
0033 #
0034 #
0035 # THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
0036 # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0037 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
0038 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
0039 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
0040 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
0041 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
0042 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
0043 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
0044 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
0045 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0046 #
0047 #       Reference paper titled "Fast CRC Computation for Generic
0048 #   Polynomials Using PCLMULQDQ Instruction"
0049 #       URL: http://www.intel.com/content/dam/www/public/us/en/documents
0050 #  /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
0051 #
0052 
0053 #include <linux/linkage.h>
0054 
0055 .text
0056 
0057 #define     init_crc    %edi
0058 #define     buf     %rsi
0059 #define     len     %rdx
0060 
0061 #define     FOLD_CONSTS %xmm10
0062 #define     BSWAP_MASK  %xmm11
0063 
0064 # Fold reg1, reg2 into the next 32 data bytes, storing the result back into
0065 # reg1, reg2.
0066 .macro  fold_32_bytes   offset, reg1, reg2
0067     movdqu  \offset(buf), %xmm9
0068     movdqu  \offset+16(buf), %xmm12
0069     pshufb  BSWAP_MASK, %xmm9
0070     pshufb  BSWAP_MASK, %xmm12
0071     movdqa  \reg1, %xmm8
0072     movdqa  \reg2, %xmm13
0073     pclmulqdq   $0x00, FOLD_CONSTS, \reg1
0074     pclmulqdq   $0x11, FOLD_CONSTS, %xmm8
0075     pclmulqdq   $0x00, FOLD_CONSTS, \reg2
0076     pclmulqdq   $0x11, FOLD_CONSTS, %xmm13
0077     pxor    %xmm9 , \reg1
0078     xorps   %xmm8 , \reg1
0079     pxor    %xmm12, \reg2
0080     xorps   %xmm13, \reg2
0081 .endm
0082 
0083 # Fold src_reg into dst_reg.
0084 .macro  fold_16_bytes   src_reg, dst_reg
0085     movdqa  \src_reg, %xmm8
0086     pclmulqdq   $0x11, FOLD_CONSTS, \src_reg
0087     pclmulqdq   $0x00, FOLD_CONSTS, %xmm8
0088     pxor    %xmm8, \dst_reg
0089     xorps   \src_reg, \dst_reg
0090 .endm
0091 
0092 #
0093 # u16 crc_t10dif_pcl(u16 init_crc, const *u8 buf, size_t len);
0094 #
0095 # Assumes len >= 16.
0096 #
0097 .align 16
0098 SYM_FUNC_START(crc_t10dif_pcl)
0099 
0100     movdqa  .Lbswap_mask(%rip), BSWAP_MASK
0101 
0102     # For sizes less than 256 bytes, we can't fold 128 bytes at a time.
0103     cmp $256, len
0104     jl  .Lless_than_256_bytes
0105 
0106     # Load the first 128 data bytes.  Byte swapping is necessary to make the
0107     # bit order match the polynomial coefficient order.
0108     movdqu  16*0(buf), %xmm0
0109     movdqu  16*1(buf), %xmm1
0110     movdqu  16*2(buf), %xmm2
0111     movdqu  16*3(buf), %xmm3
0112     movdqu  16*4(buf), %xmm4
0113     movdqu  16*5(buf), %xmm5
0114     movdqu  16*6(buf), %xmm6
0115     movdqu  16*7(buf), %xmm7
0116     add $128, buf
0117     pshufb  BSWAP_MASK, %xmm0
0118     pshufb  BSWAP_MASK, %xmm1
0119     pshufb  BSWAP_MASK, %xmm2
0120     pshufb  BSWAP_MASK, %xmm3
0121     pshufb  BSWAP_MASK, %xmm4
0122     pshufb  BSWAP_MASK, %xmm5
0123     pshufb  BSWAP_MASK, %xmm6
0124     pshufb  BSWAP_MASK, %xmm7
0125 
0126     # XOR the first 16 data *bits* with the initial CRC value.
0127     pxor    %xmm8, %xmm8
0128     pinsrw  $7, init_crc, %xmm8
0129     pxor    %xmm8, %xmm0
0130 
0131     movdqa  .Lfold_across_128_bytes_consts(%rip), FOLD_CONSTS
0132 
0133     # Subtract 128 for the 128 data bytes just consumed.  Subtract another
0134     # 128 to simplify the termination condition of the following loop.
0135     sub $256, len
0136 
0137     # While >= 128 data bytes remain (not counting xmm0-7), fold the 128
0138     # bytes xmm0-7 into them, storing the result back into xmm0-7.
0139 .Lfold_128_bytes_loop:
0140     fold_32_bytes   0, %xmm0, %xmm1
0141     fold_32_bytes   32, %xmm2, %xmm3
0142     fold_32_bytes   64, %xmm4, %xmm5
0143     fold_32_bytes   96, %xmm6, %xmm7
0144     add $128, buf
0145     sub $128, len
0146     jge .Lfold_128_bytes_loop
0147 
0148     # Now fold the 112 bytes in xmm0-xmm6 into the 16 bytes in xmm7.
0149 
0150     # Fold across 64 bytes.
0151     movdqa  .Lfold_across_64_bytes_consts(%rip), FOLD_CONSTS
0152     fold_16_bytes   %xmm0, %xmm4
0153     fold_16_bytes   %xmm1, %xmm5
0154     fold_16_bytes   %xmm2, %xmm6
0155     fold_16_bytes   %xmm3, %xmm7
0156     # Fold across 32 bytes.
0157     movdqa  .Lfold_across_32_bytes_consts(%rip), FOLD_CONSTS
0158     fold_16_bytes   %xmm4, %xmm6
0159     fold_16_bytes   %xmm5, %xmm7
0160     # Fold across 16 bytes.
0161     movdqa  .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS
0162     fold_16_bytes   %xmm6, %xmm7
0163 
0164     # Add 128 to get the correct number of data bytes remaining in 0...127
0165     # (not counting xmm7), following the previous extra subtraction by 128.
0166     # Then subtract 16 to simplify the termination condition of the
0167     # following loop.
0168     add $128-16, len
0169 
0170     # While >= 16 data bytes remain (not counting xmm7), fold the 16 bytes
0171     # xmm7 into them, storing the result back into xmm7.
0172     jl  .Lfold_16_bytes_loop_done
0173 .Lfold_16_bytes_loop:
0174     movdqa  %xmm7, %xmm8
0175     pclmulqdq   $0x11, FOLD_CONSTS, %xmm7
0176     pclmulqdq   $0x00, FOLD_CONSTS, %xmm8
0177     pxor    %xmm8, %xmm7
0178     movdqu  (buf), %xmm0
0179     pshufb  BSWAP_MASK, %xmm0
0180     pxor    %xmm0 , %xmm7
0181     add $16, buf
0182     sub $16, len
0183     jge .Lfold_16_bytes_loop
0184 
0185 .Lfold_16_bytes_loop_done:
0186     # Add 16 to get the correct number of data bytes remaining in 0...15
0187     # (not counting xmm7), following the previous extra subtraction by 16.
0188     add $16, len
0189     je  .Lreduce_final_16_bytes
0190 
0191 .Lhandle_partial_segment:
0192     # Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first 16
0193     # bytes are in xmm7 and the rest are the remaining data in 'buf'.  To do
0194     # this without needing a fold constant for each possible 'len', redivide
0195     # the bytes into a first chunk of 'len' bytes and a second chunk of 16
0196     # bytes, then fold the first chunk into the second.
0197 
0198     movdqa  %xmm7, %xmm2
0199 
0200     # xmm1 = last 16 original data bytes
0201     movdqu  -16(buf, len), %xmm1
0202     pshufb  BSWAP_MASK, %xmm1
0203 
0204     # xmm2 = high order part of second chunk: xmm7 left-shifted by 'len' bytes.
0205     lea .Lbyteshift_table+16(%rip), %rax
0206     sub len, %rax
0207     movdqu  (%rax), %xmm0
0208     pshufb  %xmm0, %xmm2
0209 
0210     # xmm7 = first chunk: xmm7 right-shifted by '16-len' bytes.
0211     pxor    .Lmask1(%rip), %xmm0
0212     pshufb  %xmm0, %xmm7
0213 
0214     # xmm1 = second chunk: 'len' bytes from xmm1 (low-order bytes),
0215     # then '16-len' bytes from xmm2 (high-order bytes).
0216     pblendvb    %xmm2, %xmm1    #xmm0 is implicit
0217 
0218     # Fold the first chunk into the second chunk, storing the result in xmm7.
0219     movdqa  %xmm7, %xmm8
0220     pclmulqdq   $0x11, FOLD_CONSTS, %xmm7
0221     pclmulqdq   $0x00, FOLD_CONSTS, %xmm8
0222     pxor    %xmm8, %xmm7
0223     pxor    %xmm1, %xmm7
0224 
0225 .Lreduce_final_16_bytes:
0226     # Reduce the 128-bit value M(x), stored in xmm7, to the final 16-bit CRC
0227 
0228     # Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
0229     movdqa  .Lfinal_fold_consts(%rip), FOLD_CONSTS
0230 
0231     # Fold the high 64 bits into the low 64 bits, while also multiplying by
0232     # x^64.  This produces a 128-bit value congruent to x^64 * M(x) and
0233     # whose low 48 bits are 0.
0234     movdqa  %xmm7, %xmm0
0235     pclmulqdq   $0x11, FOLD_CONSTS, %xmm7 # high bits * x^48 * (x^80 mod G(x))
0236     pslldq  $8, %xmm0
0237     pxor    %xmm0, %xmm7              # + low bits * x^64
0238 
0239     # Fold the high 32 bits into the low 96 bits.  This produces a 96-bit
0240     # value congruent to x^64 * M(x) and whose low 48 bits are 0.
0241     movdqa  %xmm7, %xmm0
0242     pand    .Lmask2(%rip), %xmm0          # zero high 32 bits
0243     psrldq  $12, %xmm7            # extract high 32 bits
0244     pclmulqdq   $0x00, FOLD_CONSTS, %xmm7 # high 32 bits * x^48 * (x^48 mod G(x))
0245     pxor    %xmm0, %xmm7              # + low bits
0246 
0247     # Load G(x) and floor(x^48 / G(x)).
0248     movdqa  .Lbarrett_reduction_consts(%rip), FOLD_CONSTS
0249 
0250     # Use Barrett reduction to compute the final CRC value.
0251     movdqa  %xmm7, %xmm0
0252     pclmulqdq   $0x11, FOLD_CONSTS, %xmm7 # high 32 bits * floor(x^48 / G(x))
0253     psrlq   $32, %xmm7            # /= x^32
0254     pclmulqdq   $0x00, FOLD_CONSTS, %xmm7 # *= G(x)
0255     psrlq   $48, %xmm0
0256     pxor    %xmm7, %xmm0             # + low 16 nonzero bits
0257     # Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0.
0258 
0259     pextrw  $0, %xmm0, %eax
0260     RET
0261 
0262 .align 16
0263 .Lless_than_256_bytes:
0264     # Checksumming a buffer of length 16...255 bytes
0265 
0266     # Load the first 16 data bytes.
0267     movdqu  (buf), %xmm7
0268     pshufb  BSWAP_MASK, %xmm7
0269     add $16, buf
0270 
0271     # XOR the first 16 data *bits* with the initial CRC value.
0272     pxor    %xmm0, %xmm0
0273     pinsrw  $7, init_crc, %xmm0
0274     pxor    %xmm0, %xmm7
0275 
0276     movdqa  .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS
0277     cmp $16, len
0278     je  .Lreduce_final_16_bytes     # len == 16
0279     sub $32, len
0280     jge .Lfold_16_bytes_loop        # 32 <= len <= 255
0281     add $16, len
0282     jmp .Lhandle_partial_segment    # 17 <= len <= 31
0283 SYM_FUNC_END(crc_t10dif_pcl)
0284 
0285 .section    .rodata, "a", @progbits
0286 .align 16
0287 
0288 # Fold constants precomputed from the polynomial 0x18bb7
0289 # G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
0290 .Lfold_across_128_bytes_consts:
0291     .quad       0x0000000000006123  # x^(8*128) mod G(x)
0292     .quad       0x0000000000002295  # x^(8*128+64)  mod G(x)
0293 .Lfold_across_64_bytes_consts:
0294     .quad       0x0000000000001069  # x^(4*128) mod G(x)
0295     .quad       0x000000000000dd31  # x^(4*128+64)  mod G(x)
0296 .Lfold_across_32_bytes_consts:
0297     .quad       0x000000000000857d  # x^(2*128) mod G(x)
0298     .quad       0x0000000000007acc  # x^(2*128+64)  mod G(x)
0299 .Lfold_across_16_bytes_consts:
0300     .quad       0x000000000000a010  # x^(1*128) mod G(x)
0301     .quad       0x0000000000001faa  # x^(1*128+64)  mod G(x)
0302 .Lfinal_fold_consts:
0303     .quad       0x1368000000000000  # x^48 * (x^48 mod G(x))
0304     .quad       0x2d56000000000000  # x^48 * (x^80 mod G(x))
0305 .Lbarrett_reduction_consts:
0306     .quad       0x0000000000018bb7  # G(x)
0307     .quad       0x00000001f65a57f8  # floor(x^48 / G(x))
0308 
0309 .section    .rodata.cst16.mask1, "aM", @progbits, 16
0310 .align 16
0311 .Lmask1:
0312     .octa   0x80808080808080808080808080808080
0313 
0314 .section    .rodata.cst16.mask2, "aM", @progbits, 16
0315 .align 16
0316 .Lmask2:
0317     .octa   0x00000000FFFFFFFFFFFFFFFFFFFFFFFF
0318 
0319 .section    .rodata.cst16.bswap_mask, "aM", @progbits, 16
0320 .align 16
0321 .Lbswap_mask:
0322     .octa   0x000102030405060708090A0B0C0D0E0F
0323 
0324 .section    .rodata.cst32.byteshift_table, "aM", @progbits, 32
0325 .align 16
0326 # For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - len]
0327 # is the index vector to shift left by 'len' bytes, and is also {0x80, ...,
0328 # 0x80} XOR the index vector to shift right by '16 - len' bytes.
0329 .Lbyteshift_table:
0330     .byte        0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
0331     .byte       0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
0332     .byte        0x0,  0x1,  0x2,  0x3,  0x4,  0x5,  0x6,  0x7
0333     .byte        0x8,  0x9,  0xa,  0xb,  0xc,  0xd,  0xe , 0x0