Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) Yann Collet, Facebook, Inc.
0003  * All rights reserved.
0004  *
0005  * This source code is licensed under both the BSD-style license (found in the
0006  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
0007  * in the COPYING file in the root directory of this source tree).
0008  * You may select, at your option, one of the above-listed licenses.
0009  */
0010 
0011 #include "zstd_ldm.h"
0012 
0013 #include "../common/debug.h"
0014 #include <linux/xxhash.h>
0015 #include "zstd_fast.h"          /* ZSTD_fillHashTable() */
0016 #include "zstd_double_fast.h"   /* ZSTD_fillDoubleHashTable() */
0017 #include "zstd_ldm_geartab.h"
0018 
0019 #define LDM_BUCKET_SIZE_LOG 3
0020 #define LDM_MIN_MATCH_LENGTH 64
0021 #define LDM_HASH_RLOG 7
0022 
0023 typedef struct {
0024     U64 rolling;
0025     U64 stopMask;
0026 } ldmRollingHashState_t;
0027 
0028 /* ZSTD_ldm_gear_init():
0029  *
0030  * Initializes the rolling hash state such that it will honor the
0031  * settings in params. */
0032 static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
0033 {
0034     unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
0035     unsigned hashRateLog = params->hashRateLog;
0036 
0037     state->rolling = ~(U32)0;
0038 
0039     /* The choice of the splitting criterion is subject to two conditions:
0040      *   1. it has to trigger on average every 2^(hashRateLog) bytes;
0041      *   2. ideally, it has to depend on a window of minMatchLength bytes.
0042      *
0043      * In the gear hash algorithm, bit n depends on the last n bytes;
0044      * so in order to obtain a good quality splitting criterion it is
0045      * preferable to use bits with high weight.
0046      *
0047      * To match condition 1 we use a mask with hashRateLog bits set
0048      * and, because of the previous remark, we make sure these bits
0049      * have the highest possible weight while still respecting
0050      * condition 2.
0051      */
0052     if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
0053         state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
0054     } else {
0055         /* In this degenerate case we simply honor the hash rate. */
0056         state->stopMask = ((U64)1 << hashRateLog) - 1;
0057     }
0058 }
0059 
0060 /* ZSTD_ldm_gear_feed():
0061  *
0062  * Registers in the splits array all the split points found in the first
0063  * size bytes following the data pointer. This function terminates when
0064  * either all the data has been processed or LDM_BATCH_SIZE splits are
0065  * present in the splits array.
0066  *
0067  * Precondition: The splits array must not be full.
0068  * Returns: The number of bytes processed. */
0069 static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
0070                                  BYTE const* data, size_t size,
0071                                  size_t* splits, unsigned* numSplits)
0072 {
0073     size_t n;
0074     U64 hash, mask;
0075 
0076     hash = state->rolling;
0077     mask = state->stopMask;
0078     n = 0;
0079 
0080 #define GEAR_ITER_ONCE() do { \
0081         hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
0082         n += 1; \
0083         if (UNLIKELY((hash & mask) == 0)) { \
0084             splits[*numSplits] = n; \
0085             *numSplits += 1; \
0086             if (*numSplits == LDM_BATCH_SIZE) \
0087                 goto done; \
0088         } \
0089     } while (0)
0090 
0091     while (n + 3 < size) {
0092         GEAR_ITER_ONCE();
0093         GEAR_ITER_ONCE();
0094         GEAR_ITER_ONCE();
0095         GEAR_ITER_ONCE();
0096     }
0097     while (n < size) {
0098         GEAR_ITER_ONCE();
0099     }
0100 
0101 #undef GEAR_ITER_ONCE
0102 
0103 done:
0104     state->rolling = hash;
0105     return n;
0106 }
0107 
0108 void ZSTD_ldm_adjustParameters(ldmParams_t* params,
0109                                ZSTD_compressionParameters const* cParams)
0110 {
0111     params->windowLog = cParams->windowLog;
0112     ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
0113     DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
0114     if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
0115     if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
0116     if (params->hashLog == 0) {
0117         params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
0118         assert(params->hashLog <= ZSTD_HASHLOG_MAX);
0119     }
0120     if (params->hashRateLog == 0) {
0121         params->hashRateLog = params->windowLog < params->hashLog
0122                                    ? 0
0123                                    : params->windowLog - params->hashLog;
0124     }
0125     params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
0126 }
0127 
0128 size_t ZSTD_ldm_getTableSize(ldmParams_t params)
0129 {
0130     size_t const ldmHSize = ((size_t)1) << params.hashLog;
0131     size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
0132     size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
0133     size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
0134                            + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
0135     return params.enableLdm ? totalSize : 0;
0136 }
0137 
0138 size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
0139 {
0140     return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
0141 }
0142 
0143 /* ZSTD_ldm_getBucket() :
0144  *  Returns a pointer to the start of the bucket associated with hash. */
0145 static ldmEntry_t* ZSTD_ldm_getBucket(
0146         ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
0147 {
0148     return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
0149 }
0150 
0151 /* ZSTD_ldm_insertEntry() :
0152  *  Insert the entry with corresponding hash into the hash table */
0153 static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
0154                                  size_t const hash, const ldmEntry_t entry,
0155                                  ldmParams_t const ldmParams)
0156 {
0157     BYTE* const pOffset = ldmState->bucketOffsets + hash;
0158     unsigned const offset = *pOffset;
0159 
0160     *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
0161     *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
0162 
0163 }
0164 
0165 /* ZSTD_ldm_countBackwardsMatch() :
0166  *  Returns the number of bytes that match backwards before pIn and pMatch.
0167  *
0168  *  We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
0169 static size_t ZSTD_ldm_countBackwardsMatch(
0170             const BYTE* pIn, const BYTE* pAnchor,
0171             const BYTE* pMatch, const BYTE* pMatchBase)
0172 {
0173     size_t matchLength = 0;
0174     while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
0175         pIn--;
0176         pMatch--;
0177         matchLength++;
0178     }
0179     return matchLength;
0180 }
0181 
0182 /* ZSTD_ldm_countBackwardsMatch_2segments() :
0183  *  Returns the number of bytes that match backwards from pMatch,
0184  *  even with the backwards match spanning 2 different segments.
0185  *
0186  *  On reaching `pMatchBase`, start counting from mEnd */
0187 static size_t ZSTD_ldm_countBackwardsMatch_2segments(
0188                     const BYTE* pIn, const BYTE* pAnchor,
0189                     const BYTE* pMatch, const BYTE* pMatchBase,
0190                     const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
0191 {
0192     size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
0193     if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
0194         /* If backwards match is entirely in the extDict or prefix, immediately return */
0195         return matchLength;
0196     }
0197     DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
0198     matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
0199     DEBUGLOG(7, "final backwards match length = %zu", matchLength);
0200     return matchLength;
0201 }
0202 
0203 /* ZSTD_ldm_fillFastTables() :
0204  *
0205  *  Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
0206  *  This is similar to ZSTD_loadDictionaryContent.
0207  *
0208  *  The tables for the other strategies are filled within their
0209  *  block compressors. */
0210 static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
0211                                       void const* end)
0212 {
0213     const BYTE* const iend = (const BYTE*)end;
0214 
0215     switch(ms->cParams.strategy)
0216     {
0217     case ZSTD_fast:
0218         ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
0219         break;
0220 
0221     case ZSTD_dfast:
0222         ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
0223         break;
0224 
0225     case ZSTD_greedy:
0226     case ZSTD_lazy:
0227     case ZSTD_lazy2:
0228     case ZSTD_btlazy2:
0229     case ZSTD_btopt:
0230     case ZSTD_btultra:
0231     case ZSTD_btultra2:
0232         break;
0233     default:
0234         assert(0);  /* not possible : not a valid strategy id */
0235     }
0236 
0237     return 0;
0238 }
0239 
0240 void ZSTD_ldm_fillHashTable(
0241             ldmState_t* ldmState, const BYTE* ip,
0242             const BYTE* iend, ldmParams_t const* params)
0243 {
0244     U32 const minMatchLength = params->minMatchLength;
0245     U32 const hBits = params->hashLog - params->bucketSizeLog;
0246     BYTE const* const base = ldmState->window.base;
0247     BYTE const* const istart = ip;
0248     ldmRollingHashState_t hashState;
0249     size_t* const splits = ldmState->splitIndices;
0250     unsigned numSplits;
0251 
0252     DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
0253 
0254     ZSTD_ldm_gear_init(&hashState, params);
0255     while (ip < iend) {
0256         size_t hashed;
0257         unsigned n;
0258         
0259         numSplits = 0;
0260         hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
0261 
0262         for (n = 0; n < numSplits; n++) {
0263             if (ip + splits[n] >= istart + minMatchLength) {
0264                 BYTE const* const split = ip + splits[n] - minMatchLength;
0265                 U64 const xxhash = xxh64(split, minMatchLength, 0);
0266                 U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
0267                 ldmEntry_t entry;
0268 
0269                 entry.offset = (U32)(split - base);
0270                 entry.checksum = (U32)(xxhash >> 32);
0271                 ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
0272             }
0273         }
0274 
0275         ip += hashed;
0276     }
0277 }
0278 
0279 
0280 /* ZSTD_ldm_limitTableUpdate() :
0281  *
0282  *  Sets cctx->nextToUpdate to a position corresponding closer to anchor
0283  *  if it is far way
0284  *  (after a long match, only update tables a limited amount). */
0285 static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
0286 {
0287     U32 const curr = (U32)(anchor - ms->window.base);
0288     if (curr > ms->nextToUpdate + 1024) {
0289         ms->nextToUpdate =
0290             curr - MIN(512, curr - ms->nextToUpdate - 1024);
0291     }
0292 }
0293 
0294 static size_t ZSTD_ldm_generateSequences_internal(
0295         ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
0296         ldmParams_t const* params, void const* src, size_t srcSize)
0297 {
0298     /* LDM parameters */
0299     int const extDict = ZSTD_window_hasExtDict(ldmState->window);
0300     U32 const minMatchLength = params->minMatchLength;
0301     U32 const entsPerBucket = 1U << params->bucketSizeLog;
0302     U32 const hBits = params->hashLog - params->bucketSizeLog;
0303     /* Prefix and extDict parameters */
0304     U32 const dictLimit = ldmState->window.dictLimit;
0305     U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
0306     BYTE const* const base = ldmState->window.base;
0307     BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
0308     BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
0309     BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
0310     BYTE const* const lowPrefixPtr = base + dictLimit;
0311     /* Input bounds */
0312     BYTE const* const istart = (BYTE const*)src;
0313     BYTE const* const iend = istart + srcSize;
0314     BYTE const* const ilimit = iend - HASH_READ_SIZE;
0315     /* Input positions */
0316     BYTE const* anchor = istart;
0317     BYTE const* ip = istart;
0318     /* Rolling hash state */
0319     ldmRollingHashState_t hashState;
0320     /* Arrays for staged-processing */
0321     size_t* const splits = ldmState->splitIndices;
0322     ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
0323     unsigned numSplits;
0324 
0325     if (srcSize < minMatchLength)
0326         return iend - anchor;
0327 
0328     /* Initialize the rolling hash state with the first minMatchLength bytes */
0329     ZSTD_ldm_gear_init(&hashState, params);
0330     {
0331         size_t n = 0;
0332 
0333         while (n < minMatchLength) {
0334             numSplits = 0;
0335             n += ZSTD_ldm_gear_feed(&hashState, ip + n, minMatchLength - n,
0336                                     splits, &numSplits);
0337         }
0338         ip += minMatchLength;
0339     }
0340 
0341     while (ip < ilimit) {
0342         size_t hashed;
0343         unsigned n;
0344 
0345         numSplits = 0;
0346         hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
0347                                     splits, &numSplits);
0348 
0349         for (n = 0; n < numSplits; n++) {
0350             BYTE const* const split = ip + splits[n] - minMatchLength;
0351             U64 const xxhash = xxh64(split, minMatchLength, 0);
0352             U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
0353 
0354             candidates[n].split = split;
0355             candidates[n].hash = hash;
0356             candidates[n].checksum = (U32)(xxhash >> 32);
0357             candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
0358             PREFETCH_L1(candidates[n].bucket);
0359         }
0360 
0361         for (n = 0; n < numSplits; n++) {
0362             size_t forwardMatchLength = 0, backwardMatchLength = 0,
0363                    bestMatchLength = 0, mLength;
0364             BYTE const* const split = candidates[n].split;
0365             U32 const checksum = candidates[n].checksum;
0366             U32 const hash = candidates[n].hash;
0367             ldmEntry_t* const bucket = candidates[n].bucket;
0368             ldmEntry_t const* cur;
0369             ldmEntry_t const* bestEntry = NULL;
0370             ldmEntry_t newEntry;
0371 
0372             newEntry.offset = (U32)(split - base);
0373             newEntry.checksum = checksum;
0374 
0375             /* If a split point would generate a sequence overlapping with
0376              * the previous one, we merely register it in the hash table and
0377              * move on */
0378             if (split < anchor) {
0379                 ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
0380                 continue;
0381             }
0382 
0383             for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
0384                 size_t curForwardMatchLength, curBackwardMatchLength,
0385                        curTotalMatchLength;
0386                 if (cur->checksum != checksum || cur->offset <= lowestIndex) {
0387                     continue;
0388                 }
0389                 if (extDict) {
0390                     BYTE const* const curMatchBase =
0391                         cur->offset < dictLimit ? dictBase : base;
0392                     BYTE const* const pMatch = curMatchBase + cur->offset;
0393                     BYTE const* const matchEnd =
0394                         cur->offset < dictLimit ? dictEnd : iend;
0395                     BYTE const* const lowMatchPtr =
0396                         cur->offset < dictLimit ? dictStart : lowPrefixPtr;
0397                     curForwardMatchLength =
0398                         ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
0399                     if (curForwardMatchLength < minMatchLength) {
0400                         continue;
0401                     }
0402                     curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
0403                             split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
0404                 } else { /* !extDict */
0405                     BYTE const* const pMatch = base + cur->offset;
0406                     curForwardMatchLength = ZSTD_count(split, pMatch, iend);
0407                     if (curForwardMatchLength < minMatchLength) {
0408                         continue;
0409                     }
0410                     curBackwardMatchLength =
0411                         ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
0412                 }
0413                 curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
0414 
0415                 if (curTotalMatchLength > bestMatchLength) {
0416                     bestMatchLength = curTotalMatchLength;
0417                     forwardMatchLength = curForwardMatchLength;
0418                     backwardMatchLength = curBackwardMatchLength;
0419                     bestEntry = cur;
0420                 }
0421             }
0422 
0423             /* No match found -- insert an entry into the hash table
0424              * and process the next candidate match */
0425             if (bestEntry == NULL) {
0426                 ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
0427                 continue;
0428             }
0429 
0430             /* Match found */
0431             mLength = forwardMatchLength + backwardMatchLength;
0432             {
0433                 U32 const offset = (U32)(split - base) - bestEntry->offset;
0434                 rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
0435 
0436                 /* Out of sequence storage */
0437                 if (rawSeqStore->size == rawSeqStore->capacity)
0438                     return ERROR(dstSize_tooSmall);
0439                 seq->litLength = (U32)(split - backwardMatchLength - anchor);
0440                 seq->matchLength = (U32)mLength;
0441                 seq->offset = offset;
0442                 rawSeqStore->size++;
0443             }
0444 
0445             /* Insert the current entry into the hash table --- it must be
0446              * done after the previous block to avoid clobbering bestEntry */
0447             ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
0448 
0449             anchor = split + forwardMatchLength;
0450         }
0451 
0452         ip += hashed;
0453     }
0454 
0455     return iend - anchor;
0456 }
0457 
0458 /*! ZSTD_ldm_reduceTable() :
0459  *  reduce table indexes by `reducerValue` */
0460 static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
0461                                  U32 const reducerValue)
0462 {
0463     U32 u;
0464     for (u = 0; u < size; u++) {
0465         if (table[u].offset < reducerValue) table[u].offset = 0;
0466         else table[u].offset -= reducerValue;
0467     }
0468 }
0469 
0470 size_t ZSTD_ldm_generateSequences(
0471         ldmState_t* ldmState, rawSeqStore_t* sequences,
0472         ldmParams_t const* params, void const* src, size_t srcSize)
0473 {
0474     U32 const maxDist = 1U << params->windowLog;
0475     BYTE const* const istart = (BYTE const*)src;
0476     BYTE const* const iend = istart + srcSize;
0477     size_t const kMaxChunkSize = 1 << 20;
0478     size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
0479     size_t chunk;
0480     size_t leftoverSize = 0;
0481 
0482     assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
0483     /* Check that ZSTD_window_update() has been called for this chunk prior
0484      * to passing it to this function.
0485      */
0486     assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
0487     /* The input could be very large (in zstdmt), so it must be broken up into
0488      * chunks to enforce the maximum distance and handle overflow correction.
0489      */
0490     assert(sequences->pos <= sequences->size);
0491     assert(sequences->size <= sequences->capacity);
0492     for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
0493         BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
0494         size_t const remaining = (size_t)(iend - chunkStart);
0495         BYTE const *const chunkEnd =
0496             (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
0497         size_t const chunkSize = chunkEnd - chunkStart;
0498         size_t newLeftoverSize;
0499         size_t const prevSize = sequences->size;
0500 
0501         assert(chunkStart < iend);
0502         /* 1. Perform overflow correction if necessary. */
0503         if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
0504             U32 const ldmHSize = 1U << params->hashLog;
0505             U32 const correction = ZSTD_window_correctOverflow(
0506                 &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
0507             ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
0508             /* invalidate dictionaries on overflow correction */
0509             ldmState->loadedDictEnd = 0;
0510         }
0511         /* 2. We enforce the maximum offset allowed.
0512          *
0513          * kMaxChunkSize should be small enough that we don't lose too much of
0514          * the window through early invalidation.
0515          * TODO: * Test the chunk size.
0516          *       * Try invalidation after the sequence generation and test the
0517          *         the offset against maxDist directly.
0518          *
0519          * NOTE: Because of dictionaries + sequence splitting we MUST make sure
0520          * that any offset used is valid at the END of the sequence, since it may
0521          * be split into two sequences. This condition holds when using
0522          * ZSTD_window_enforceMaxDist(), but if we move to checking offsets
0523          * against maxDist directly, we'll have to carefully handle that case.
0524          */
0525         ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
0526         /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
0527         newLeftoverSize = ZSTD_ldm_generateSequences_internal(
0528             ldmState, sequences, params, chunkStart, chunkSize);
0529         if (ZSTD_isError(newLeftoverSize))
0530             return newLeftoverSize;
0531         /* 4. We add the leftover literals from previous iterations to the first
0532          *    newly generated sequence, or add the `newLeftoverSize` if none are
0533          *    generated.
0534          */
0535         /* Prepend the leftover literals from the last call */
0536         if (prevSize < sequences->size) {
0537             sequences->seq[prevSize].litLength += (U32)leftoverSize;
0538             leftoverSize = newLeftoverSize;
0539         } else {
0540             assert(newLeftoverSize == chunkSize);
0541             leftoverSize += chunkSize;
0542         }
0543     }
0544     return 0;
0545 }
0546 
0547 void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
0548     while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
0549         rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
0550         if (srcSize <= seq->litLength) {
0551             /* Skip past srcSize literals */
0552             seq->litLength -= (U32)srcSize;
0553             return;
0554         }
0555         srcSize -= seq->litLength;
0556         seq->litLength = 0;
0557         if (srcSize < seq->matchLength) {
0558             /* Skip past the first srcSize of the match */
0559             seq->matchLength -= (U32)srcSize;
0560             if (seq->matchLength < minMatch) {
0561                 /* The match is too short, omit it */
0562                 if (rawSeqStore->pos + 1 < rawSeqStore->size) {
0563                     seq[1].litLength += seq[0].matchLength;
0564                 }
0565                 rawSeqStore->pos++;
0566             }
0567             return;
0568         }
0569         srcSize -= seq->matchLength;
0570         seq->matchLength = 0;
0571         rawSeqStore->pos++;
0572     }
0573 }
0574 
0575 /*
0576  * If the sequence length is longer than remaining then the sequence is split
0577  * between this block and the next.
0578  *
0579  * Returns the current sequence to handle, or if the rest of the block should
0580  * be literals, it returns a sequence with offset == 0.
0581  */
0582 static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
0583                                  U32 const remaining, U32 const minMatch)
0584 {
0585     rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
0586     assert(sequence.offset > 0);
0587     /* Likely: No partial sequence */
0588     if (remaining >= sequence.litLength + sequence.matchLength) {
0589         rawSeqStore->pos++;
0590         return sequence;
0591     }
0592     /* Cut the sequence short (offset == 0 ==> rest is literals). */
0593     if (remaining <= sequence.litLength) {
0594         sequence.offset = 0;
0595     } else if (remaining < sequence.litLength + sequence.matchLength) {
0596         sequence.matchLength = remaining - sequence.litLength;
0597         if (sequence.matchLength < minMatch) {
0598             sequence.offset = 0;
0599         }
0600     }
0601     /* Skip past `remaining` bytes for the future sequences. */
0602     ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
0603     return sequence;
0604 }
0605 
0606 void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
0607     U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
0608     while (currPos && rawSeqStore->pos < rawSeqStore->size) {
0609         rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
0610         if (currPos >= currSeq.litLength + currSeq.matchLength) {
0611             currPos -= currSeq.litLength + currSeq.matchLength;
0612             rawSeqStore->pos++;
0613         } else {
0614             rawSeqStore->posInSequence = currPos;
0615             break;
0616         }
0617     }
0618     if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
0619         rawSeqStore->posInSequence = 0;
0620     }
0621 }
0622 
0623 size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
0624     ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
0625     void const* src, size_t srcSize)
0626 {
0627     const ZSTD_compressionParameters* const cParams = &ms->cParams;
0628     unsigned const minMatch = cParams->minMatch;
0629     ZSTD_blockCompressor const blockCompressor =
0630         ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
0631     /* Input bounds */
0632     BYTE const* const istart = (BYTE const*)src;
0633     BYTE const* const iend = istart + srcSize;
0634     /* Input positions */
0635     BYTE const* ip = istart;
0636 
0637     DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
0638     /* If using opt parser, use LDMs only as candidates rather than always accepting them */
0639     if (cParams->strategy >= ZSTD_btopt) {
0640         size_t lastLLSize;
0641         ms->ldmSeqStore = rawSeqStore;
0642         lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
0643         ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
0644         return lastLLSize;
0645     }
0646 
0647     assert(rawSeqStore->pos <= rawSeqStore->size);
0648     assert(rawSeqStore->size <= rawSeqStore->capacity);
0649     /* Loop through each sequence and apply the block compressor to the literals */
0650     while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
0651         /* maybeSplitSequence updates rawSeqStore->pos */
0652         rawSeq const sequence = maybeSplitSequence(rawSeqStore,
0653                                                    (U32)(iend - ip), minMatch);
0654         int i;
0655         /* End signal */
0656         if (sequence.offset == 0)
0657             break;
0658 
0659         assert(ip + sequence.litLength + sequence.matchLength <= iend);
0660 
0661         /* Fill tables for block compressor */
0662         ZSTD_ldm_limitTableUpdate(ms, ip);
0663         ZSTD_ldm_fillFastTables(ms, ip);
0664         /* Run the block compressor */
0665         DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
0666         {
0667             size_t const newLitLength =
0668                 blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
0669             ip += sequence.litLength;
0670             /* Update the repcodes */
0671             for (i = ZSTD_REP_NUM - 1; i > 0; i--)
0672                 rep[i] = rep[i-1];
0673             rep[0] = sequence.offset;
0674             /* Store the sequence */
0675             ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
0676                           sequence.offset + ZSTD_REP_MOVE,
0677                           sequence.matchLength - MINMATCH);
0678             ip += sequence.matchLength;
0679         }
0680     }
0681     /* Fill the tables for the block compressor */
0682     ZSTD_ldm_limitTableUpdate(ms, ip);
0683     ZSTD_ldm_fillFastTables(ms, ip);
0684     /* Compress the last literals */
0685     return blockCompressor(ms, seqStore, rep, ip, iend - ip);
0686 }