Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) Yann Collet, Facebook, Inc.
0003  * All rights reserved.
0004  *
0005  * This source code is licensed under both the BSD-style license (found in the
0006  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
0007  * in the COPYING file in the root directory of this source tree).
0008  * You may select, at your option, one of the above-listed licenses.
0009  */
0010 
0011 #include "zstd_compress_internal.h"  /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
0012 #include "zstd_fast.h"
0013 
0014 
0015 void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
0016                         const void* const end,
0017                         ZSTD_dictTableLoadMethod_e dtlm)
0018 {
0019     const ZSTD_compressionParameters* const cParams = &ms->cParams;
0020     U32* const hashTable = ms->hashTable;
0021     U32  const hBits = cParams->hashLog;
0022     U32  const mls = cParams->minMatch;
0023     const BYTE* const base = ms->window.base;
0024     const BYTE* ip = base + ms->nextToUpdate;
0025     const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
0026     const U32 fastHashFillStep = 3;
0027 
0028     /* Always insert every fastHashFillStep position into the hash table.
0029      * Insert the other positions if their hash entry is empty.
0030      */
0031     for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
0032         U32 const curr = (U32)(ip - base);
0033         size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
0034         hashTable[hash0] = curr;
0035         if (dtlm == ZSTD_dtlm_fast) continue;
0036         /* Only load extra positions for ZSTD_dtlm_full */
0037         {   U32 p;
0038             for (p = 1; p < fastHashFillStep; ++p) {
0039                 size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
0040                 if (hashTable[hash] == 0) {  /* not yet filled */
0041                     hashTable[hash] = curr + p;
0042     }   }   }   }
0043 }
0044 
0045 
0046 FORCE_INLINE_TEMPLATE size_t
0047 ZSTD_compressBlock_fast_generic(
0048         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
0049         void const* src, size_t srcSize,
0050         U32 const mls)
0051 {
0052     const ZSTD_compressionParameters* const cParams = &ms->cParams;
0053     U32* const hashTable = ms->hashTable;
0054     U32 const hlog = cParams->hashLog;
0055     /* support stepSize of 0 */
0056     size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
0057     const BYTE* const base = ms->window.base;
0058     const BYTE* const istart = (const BYTE*)src;
0059     /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
0060     const BYTE* ip0 = istart;
0061     const BYTE* ip1;
0062     const BYTE* anchor = istart;
0063     const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
0064     const U32   prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
0065     const BYTE* const prefixStart = base + prefixStartIndex;
0066     const BYTE* const iend = istart + srcSize;
0067     const BYTE* const ilimit = iend - HASH_READ_SIZE;
0068     U32 offset_1=rep[0], offset_2=rep[1];
0069     U32 offsetSaved = 0;
0070 
0071     /* init */
0072     DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
0073     ip0 += (ip0 == prefixStart);
0074     ip1 = ip0 + 1;
0075     {   U32 const curr = (U32)(ip0 - base);
0076         U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
0077         U32 const maxRep = curr - windowLow;
0078         if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
0079         if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
0080     }
0081 
0082     /* Main Search Loop */
0083 #ifdef __INTEL_COMPILER
0084     /* From intel 'The vector pragma indicates that the loop should be
0085      * vectorized if it is legal to do so'. Can be used together with
0086      * #pragma ivdep (but have opted to exclude that because intel
0087      * warns against using it).*/
0088     #pragma vector always
0089 #endif
0090     while (ip1 < ilimit) {   /* < instead of <=, because check at ip0+2 */
0091         size_t mLength;
0092         BYTE const* ip2 = ip0 + 2;
0093         size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
0094         U32 const val0 = MEM_read32(ip0);
0095         size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
0096         U32 const val1 = MEM_read32(ip1);
0097         U32 const current0 = (U32)(ip0-base);
0098         U32 const current1 = (U32)(ip1-base);
0099         U32 const matchIndex0 = hashTable[h0];
0100         U32 const matchIndex1 = hashTable[h1];
0101         BYTE const* repMatch = ip2 - offset_1;
0102         const BYTE* match0 = base + matchIndex0;
0103         const BYTE* match1 = base + matchIndex1;
0104         U32 offcode;
0105 
0106 #if defined(__aarch64__)
0107         PREFETCH_L1(ip0+256);
0108 #endif
0109 
0110         hashTable[h0] = current0;   /* update hash table */
0111         hashTable[h1] = current1;   /* update hash table */
0112 
0113         assert(ip0 + 1 == ip1);
0114 
0115         if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
0116             mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0;
0117             ip0 = ip2 - mLength;
0118             match0 = repMatch - mLength;
0119             mLength += 4;
0120             offcode = 0;
0121             goto _match;
0122         }
0123         if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
0124             /* found a regular match */
0125             goto _offset;
0126         }
0127         if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
0128             /* found a regular match after one literal */
0129             ip0 = ip1;
0130             match0 = match1;
0131             goto _offset;
0132         }
0133         {   size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
0134             assert(step >= 2);
0135             ip0 += step;
0136             ip1 += step;
0137             continue;
0138         }
0139 _offset: /* Requires: ip0, match0 */
0140         /* Compute the offset code */
0141         offset_2 = offset_1;
0142         offset_1 = (U32)(ip0-match0);
0143         offcode = offset_1 + ZSTD_REP_MOVE;
0144         mLength = 4;
0145         /* Count the backwards match length */
0146         while (((ip0>anchor) & (match0>prefixStart))
0147              && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
0148 
0149 _match: /* Requires: ip0, match0, offcode */
0150         /* Count the forward length */
0151         mLength += ZSTD_count(ip0+mLength, match0+mLength, iend);
0152         ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
0153         /* match found */
0154         ip0 += mLength;
0155         anchor = ip0;
0156 
0157         if (ip0 <= ilimit) {
0158             /* Fill Table */
0159             assert(base+current0+2 > istart);  /* check base overflow */
0160             hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2;  /* here because current+2 could be > iend-8 */
0161             hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
0162 
0163             if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */
0164                 while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
0165                     /* store sequence */
0166                     size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
0167                     { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
0168                     hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
0169                     ip0 += rLength;
0170                     ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
0171                     anchor = ip0;
0172                     continue;   /* faster when present (confirmed on gcc-8) ... (?) */
0173         }   }   }
0174         ip1 = ip0 + 1;
0175     }
0176 
0177     /* save reps for next block */
0178     rep[0] = offset_1 ? offset_1 : offsetSaved;
0179     rep[1] = offset_2 ? offset_2 : offsetSaved;
0180 
0181     /* Return the last literals size */
0182     return (size_t)(iend - anchor);
0183 }
0184 
0185 
0186 size_t ZSTD_compressBlock_fast(
0187         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
0188         void const* src, size_t srcSize)
0189 {
0190     U32 const mls = ms->cParams.minMatch;
0191     assert(ms->dictMatchState == NULL);
0192     switch(mls)
0193     {
0194     default: /* includes case 3 */
0195     case 4 :
0196         return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
0197     case 5 :
0198         return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
0199     case 6 :
0200         return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
0201     case 7 :
0202         return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
0203     }
0204 }
0205 
0206 FORCE_INLINE_TEMPLATE
0207 size_t ZSTD_compressBlock_fast_dictMatchState_generic(
0208         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
0209         void const* src, size_t srcSize, U32 const mls)
0210 {
0211     const ZSTD_compressionParameters* const cParams = &ms->cParams;
0212     U32* const hashTable = ms->hashTable;
0213     U32 const hlog = cParams->hashLog;
0214     /* support stepSize of 0 */
0215     U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
0216     const BYTE* const base = ms->window.base;
0217     const BYTE* const istart = (const BYTE*)src;
0218     const BYTE* ip = istart;
0219     const BYTE* anchor = istart;
0220     const U32   prefixStartIndex = ms->window.dictLimit;
0221     const BYTE* const prefixStart = base + prefixStartIndex;
0222     const BYTE* const iend = istart + srcSize;
0223     const BYTE* const ilimit = iend - HASH_READ_SIZE;
0224     U32 offset_1=rep[0], offset_2=rep[1];
0225     U32 offsetSaved = 0;
0226 
0227     const ZSTD_matchState_t* const dms = ms->dictMatchState;
0228     const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
0229     const U32* const dictHashTable = dms->hashTable;
0230     const U32 dictStartIndex       = dms->window.dictLimit;
0231     const BYTE* const dictBase     = dms->window.base;
0232     const BYTE* const dictStart    = dictBase + dictStartIndex;
0233     const BYTE* const dictEnd      = dms->window.nextSrc;
0234     const U32 dictIndexDelta       = prefixStartIndex - (U32)(dictEnd - dictBase);
0235     const U32 dictAndPrefixLength  = (U32)(ip - prefixStart + dictEnd - dictStart);
0236     const U32 dictHLog             = dictCParams->hashLog;
0237 
0238     /* if a dictionary is still attached, it necessarily means that
0239      * it is within window size. So we just check it. */
0240     const U32 maxDistance = 1U << cParams->windowLog;
0241     const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
0242     assert(endIndex - prefixStartIndex <= maxDistance);
0243     (void)maxDistance; (void)endIndex;   /* these variables are not used when assert() is disabled */
0244 
0245     /* ensure there will be no underflow
0246      * when translating a dict index into a local index */
0247     assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
0248 
0249     /* init */
0250     DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
0251     ip += (dictAndPrefixLength == 0);
0252     /* dictMatchState repCode checks don't currently handle repCode == 0
0253      * disabling. */
0254     assert(offset_1 <= dictAndPrefixLength);
0255     assert(offset_2 <= dictAndPrefixLength);
0256 
0257     /* Main Search Loop */
0258     while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
0259         size_t mLength;
0260         size_t const h = ZSTD_hashPtr(ip, hlog, mls);
0261         U32 const curr = (U32)(ip-base);
0262         U32 const matchIndex = hashTable[h];
0263         const BYTE* match = base + matchIndex;
0264         const U32 repIndex = curr + 1 - offset_1;
0265         const BYTE* repMatch = (repIndex < prefixStartIndex) ?
0266                                dictBase + (repIndex - dictIndexDelta) :
0267                                base + repIndex;
0268         hashTable[h] = curr;   /* update hash table */
0269 
0270         if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
0271           && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
0272             const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
0273             mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
0274             ip++;
0275             ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
0276         } else if ( (matchIndex <= prefixStartIndex) ) {
0277             size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
0278             U32 const dictMatchIndex = dictHashTable[dictHash];
0279             const BYTE* dictMatch = dictBase + dictMatchIndex;
0280             if (dictMatchIndex <= dictStartIndex ||
0281                 MEM_read32(dictMatch) != MEM_read32(ip)) {
0282                 assert(stepSize >= 1);
0283                 ip += ((ip-anchor) >> kSearchStrength) + stepSize;
0284                 continue;
0285             } else {
0286                 /* found a dict match */
0287                 U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
0288                 mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
0289                 while (((ip>anchor) & (dictMatch>dictStart))
0290                      && (ip[-1] == dictMatch[-1])) {
0291                     ip--; dictMatch--; mLength++;
0292                 } /* catch up */
0293                 offset_2 = offset_1;
0294                 offset_1 = offset;
0295                 ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
0296             }
0297         } else if (MEM_read32(match) != MEM_read32(ip)) {
0298             /* it's not a match, and we're not going to check the dictionary */
0299             assert(stepSize >= 1);
0300             ip += ((ip-anchor) >> kSearchStrength) + stepSize;
0301             continue;
0302         } else {
0303             /* found a regular match */
0304             U32 const offset = (U32)(ip-match);
0305             mLength = ZSTD_count(ip+4, match+4, iend) + 4;
0306             while (((ip>anchor) & (match>prefixStart))
0307                  && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
0308             offset_2 = offset_1;
0309             offset_1 = offset;
0310             ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
0311         }
0312 
0313         /* match found */
0314         ip += mLength;
0315         anchor = ip;
0316 
0317         if (ip <= ilimit) {
0318             /* Fill Table */
0319             assert(base+curr+2 > istart);  /* check base overflow */
0320             hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;  /* here because curr+2 could be > iend-8 */
0321             hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
0322 
0323             /* check immediate repcode */
0324             while (ip <= ilimit) {
0325                 U32 const current2 = (U32)(ip-base);
0326                 U32 const repIndex2 = current2 - offset_2;
0327                 const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
0328                         dictBase - dictIndexDelta + repIndex2 :
0329                         base + repIndex2;
0330                 if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
0331                    && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
0332                     const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
0333                     size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
0334                     U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
0335                     ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
0336                     hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
0337                     ip += repLength2;
0338                     anchor = ip;
0339                     continue;
0340                 }
0341                 break;
0342             }
0343         }
0344     }
0345 
0346     /* save reps for next block */
0347     rep[0] = offset_1 ? offset_1 : offsetSaved;
0348     rep[1] = offset_2 ? offset_2 : offsetSaved;
0349 
0350     /* Return the last literals size */
0351     return (size_t)(iend - anchor);
0352 }
0353 
0354 size_t ZSTD_compressBlock_fast_dictMatchState(
0355         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
0356         void const* src, size_t srcSize)
0357 {
0358     U32 const mls = ms->cParams.minMatch;
0359     assert(ms->dictMatchState != NULL);
0360     switch(mls)
0361     {
0362     default: /* includes case 3 */
0363     case 4 :
0364         return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
0365     case 5 :
0366         return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
0367     case 6 :
0368         return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
0369     case 7 :
0370         return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
0371     }
0372 }
0373 
0374 
0375 static size_t ZSTD_compressBlock_fast_extDict_generic(
0376         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
0377         void const* src, size_t srcSize, U32 const mls)
0378 {
0379     const ZSTD_compressionParameters* const cParams = &ms->cParams;
0380     U32* const hashTable = ms->hashTable;
0381     U32 const hlog = cParams->hashLog;
0382     /* support stepSize of 0 */
0383     U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
0384     const BYTE* const base = ms->window.base;
0385     const BYTE* const dictBase = ms->window.dictBase;
0386     const BYTE* const istart = (const BYTE*)src;
0387     const BYTE* ip = istart;
0388     const BYTE* anchor = istart;
0389     const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
0390     const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
0391     const U32   dictStartIndex = lowLimit;
0392     const BYTE* const dictStart = dictBase + dictStartIndex;
0393     const U32   dictLimit = ms->window.dictLimit;
0394     const U32   prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
0395     const BYTE* const prefixStart = base + prefixStartIndex;
0396     const BYTE* const dictEnd = dictBase + prefixStartIndex;
0397     const BYTE* const iend = istart + srcSize;
0398     const BYTE* const ilimit = iend - 8;
0399     U32 offset_1=rep[0], offset_2=rep[1];
0400 
0401     DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
0402 
0403     /* switch to "regular" variant if extDict is invalidated due to maxDistance */
0404     if (prefixStartIndex == dictStartIndex)
0405         return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
0406 
0407     /* Search Loop */
0408     while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
0409         const size_t h = ZSTD_hashPtr(ip, hlog, mls);
0410         const U32    matchIndex = hashTable[h];
0411         const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
0412         const BYTE*  match = matchBase + matchIndex;
0413         const U32    curr = (U32)(ip-base);
0414         const U32    repIndex = curr + 1 - offset_1;
0415         const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
0416         const BYTE* const repMatch = repBase + repIndex;
0417         hashTable[h] = curr;   /* update hash table */
0418         DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
0419         assert(offset_1 <= curr +1);   /* check repIndex */
0420 
0421         if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
0422            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
0423             const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
0424             size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
0425             ip++;
0426             ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
0427             ip += rLength;
0428             anchor = ip;
0429         } else {
0430             if ( (matchIndex < dictStartIndex) ||
0431                  (MEM_read32(match) != MEM_read32(ip)) ) {
0432                 assert(stepSize >= 1);
0433                 ip += ((ip-anchor) >> kSearchStrength) + stepSize;
0434                 continue;
0435             }
0436             {   const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
0437                 const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
0438                 U32 const offset = curr - matchIndex;
0439                 size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
0440                 while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
0441                 offset_2 = offset_1; offset_1 = offset;  /* update offset history */
0442                 ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
0443                 ip += mLength;
0444                 anchor = ip;
0445         }   }
0446 
0447         if (ip <= ilimit) {
0448             /* Fill Table */
0449             hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
0450             hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
0451             /* check immediate repcode */
0452             while (ip <= ilimit) {
0453                 U32 const current2 = (U32)(ip-base);
0454                 U32 const repIndex2 = current2 - offset_2;
0455                 const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
0456                 if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex))  /* intentional overflow */
0457                    && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
0458                     const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
0459                     size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
0460                     { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; }  /* swap offset_2 <=> offset_1 */
0461                     ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
0462                     hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
0463                     ip += repLength2;
0464                     anchor = ip;
0465                     continue;
0466                 }
0467                 break;
0468     }   }   }
0469 
0470     /* save reps for next block */
0471     rep[0] = offset_1;
0472     rep[1] = offset_2;
0473 
0474     /* Return the last literals size */
0475     return (size_t)(iend - anchor);
0476 }
0477 
0478 
0479 size_t ZSTD_compressBlock_fast_extDict(
0480         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
0481         void const* src, size_t srcSize)
0482 {
0483     U32 const mls = ms->cParams.minMatch;
0484     switch(mls)
0485     {
0486     default: /* includes case 3 */
0487     case 4 :
0488         return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
0489     case 5 :
0490         return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
0491     case 6 :
0492         return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
0493     case 7 :
0494         return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
0495     }
0496 }