0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include "zstd_ldm.h"
0012
0013 #include "../common/debug.h"
0014 #include <linux/xxhash.h>
0015 #include "zstd_fast.h" /* ZSTD_fillHashTable() */
0016 #include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
0017 #include "zstd_ldm_geartab.h"
0018
0019 #define LDM_BUCKET_SIZE_LOG 3
0020 #define LDM_MIN_MATCH_LENGTH 64
0021 #define LDM_HASH_RLOG 7
0022
0023 typedef struct {
0024 U64 rolling;
0025 U64 stopMask;
0026 } ldmRollingHashState_t;
0027
0028
0029
0030
0031
0032 static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
0033 {
0034 unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
0035 unsigned hashRateLog = params->hashRateLog;
0036
0037 state->rolling = ~(U32)0;
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
0053 state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
0054 } else {
0055
0056 state->stopMask = ((U64)1 << hashRateLog) - 1;
0057 }
0058 }
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069 static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
0070 BYTE const* data, size_t size,
0071 size_t* splits, unsigned* numSplits)
0072 {
0073 size_t n;
0074 U64 hash, mask;
0075
0076 hash = state->rolling;
0077 mask = state->stopMask;
0078 n = 0;
0079
0080 #define GEAR_ITER_ONCE() do { \
0081 hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
0082 n += 1; \
0083 if (UNLIKELY((hash & mask) == 0)) { \
0084 splits[*numSplits] = n; \
0085 *numSplits += 1; \
0086 if (*numSplits == LDM_BATCH_SIZE) \
0087 goto done; \
0088 } \
0089 } while (0)
0090
0091 while (n + 3 < size) {
0092 GEAR_ITER_ONCE();
0093 GEAR_ITER_ONCE();
0094 GEAR_ITER_ONCE();
0095 GEAR_ITER_ONCE();
0096 }
0097 while (n < size) {
0098 GEAR_ITER_ONCE();
0099 }
0100
0101 #undef GEAR_ITER_ONCE
0102
0103 done:
0104 state->rolling = hash;
0105 return n;
0106 }
0107
0108 void ZSTD_ldm_adjustParameters(ldmParams_t* params,
0109 ZSTD_compressionParameters const* cParams)
0110 {
0111 params->windowLog = cParams->windowLog;
0112 ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
0113 DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
0114 if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
0115 if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
0116 if (params->hashLog == 0) {
0117 params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
0118 assert(params->hashLog <= ZSTD_HASHLOG_MAX);
0119 }
0120 if (params->hashRateLog == 0) {
0121 params->hashRateLog = params->windowLog < params->hashLog
0122 ? 0
0123 : params->windowLog - params->hashLog;
0124 }
0125 params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
0126 }
0127
0128 size_t ZSTD_ldm_getTableSize(ldmParams_t params)
0129 {
0130 size_t const ldmHSize = ((size_t)1) << params.hashLog;
0131 size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
0132 size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
0133 size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
0134 + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
0135 return params.enableLdm ? totalSize : 0;
0136 }
0137
0138 size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
0139 {
0140 return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
0141 }
0142
0143
0144
0145 static ldmEntry_t* ZSTD_ldm_getBucket(
0146 ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
0147 {
0148 return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
0149 }
0150
0151
0152
0153 static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
0154 size_t const hash, const ldmEntry_t entry,
0155 ldmParams_t const ldmParams)
0156 {
0157 BYTE* const pOffset = ldmState->bucketOffsets + hash;
0158 unsigned const offset = *pOffset;
0159
0160 *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
0161 *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
0162
0163 }
0164
0165
0166
0167
0168
0169 static size_t ZSTD_ldm_countBackwardsMatch(
0170 const BYTE* pIn, const BYTE* pAnchor,
0171 const BYTE* pMatch, const BYTE* pMatchBase)
0172 {
0173 size_t matchLength = 0;
0174 while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
0175 pIn--;
0176 pMatch--;
0177 matchLength++;
0178 }
0179 return matchLength;
0180 }
0181
0182
0183
0184
0185
0186
0187 static size_t ZSTD_ldm_countBackwardsMatch_2segments(
0188 const BYTE* pIn, const BYTE* pAnchor,
0189 const BYTE* pMatch, const BYTE* pMatchBase,
0190 const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
0191 {
0192 size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
0193 if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
0194
0195 return matchLength;
0196 }
0197 DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
0198 matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
0199 DEBUGLOG(7, "final backwards match length = %zu", matchLength);
0200 return matchLength;
0201 }
0202
0203
0204
0205
0206
0207
0208
0209
0210 static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
0211 void const* end)
0212 {
0213 const BYTE* const iend = (const BYTE*)end;
0214
0215 switch(ms->cParams.strategy)
0216 {
0217 case ZSTD_fast:
0218 ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
0219 break;
0220
0221 case ZSTD_dfast:
0222 ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
0223 break;
0224
0225 case ZSTD_greedy:
0226 case ZSTD_lazy:
0227 case ZSTD_lazy2:
0228 case ZSTD_btlazy2:
0229 case ZSTD_btopt:
0230 case ZSTD_btultra:
0231 case ZSTD_btultra2:
0232 break;
0233 default:
0234 assert(0);
0235 }
0236
0237 return 0;
0238 }
0239
0240 void ZSTD_ldm_fillHashTable(
0241 ldmState_t* ldmState, const BYTE* ip,
0242 const BYTE* iend, ldmParams_t const* params)
0243 {
0244 U32 const minMatchLength = params->minMatchLength;
0245 U32 const hBits = params->hashLog - params->bucketSizeLog;
0246 BYTE const* const base = ldmState->window.base;
0247 BYTE const* const istart = ip;
0248 ldmRollingHashState_t hashState;
0249 size_t* const splits = ldmState->splitIndices;
0250 unsigned numSplits;
0251
0252 DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
0253
0254 ZSTD_ldm_gear_init(&hashState, params);
0255 while (ip < iend) {
0256 size_t hashed;
0257 unsigned n;
0258
0259 numSplits = 0;
0260 hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
0261
0262 for (n = 0; n < numSplits; n++) {
0263 if (ip + splits[n] >= istart + minMatchLength) {
0264 BYTE const* const split = ip + splits[n] - minMatchLength;
0265 U64 const xxhash = xxh64(split, minMatchLength, 0);
0266 U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
0267 ldmEntry_t entry;
0268
0269 entry.offset = (U32)(split - base);
0270 entry.checksum = (U32)(xxhash >> 32);
0271 ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
0272 }
0273 }
0274
0275 ip += hashed;
0276 }
0277 }
0278
0279
0280
0281
0282
0283
0284
0285 static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
0286 {
0287 U32 const curr = (U32)(anchor - ms->window.base);
0288 if (curr > ms->nextToUpdate + 1024) {
0289 ms->nextToUpdate =
0290 curr - MIN(512, curr - ms->nextToUpdate - 1024);
0291 }
0292 }
0293
0294 static size_t ZSTD_ldm_generateSequences_internal(
0295 ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
0296 ldmParams_t const* params, void const* src, size_t srcSize)
0297 {
0298
0299 int const extDict = ZSTD_window_hasExtDict(ldmState->window);
0300 U32 const minMatchLength = params->minMatchLength;
0301 U32 const entsPerBucket = 1U << params->bucketSizeLog;
0302 U32 const hBits = params->hashLog - params->bucketSizeLog;
0303
0304 U32 const dictLimit = ldmState->window.dictLimit;
0305 U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
0306 BYTE const* const base = ldmState->window.base;
0307 BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
0308 BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
0309 BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
0310 BYTE const* const lowPrefixPtr = base + dictLimit;
0311
0312 BYTE const* const istart = (BYTE const*)src;
0313 BYTE const* const iend = istart + srcSize;
0314 BYTE const* const ilimit = iend - HASH_READ_SIZE;
0315
0316 BYTE const* anchor = istart;
0317 BYTE const* ip = istart;
0318
0319 ldmRollingHashState_t hashState;
0320
0321 size_t* const splits = ldmState->splitIndices;
0322 ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
0323 unsigned numSplits;
0324
0325 if (srcSize < minMatchLength)
0326 return iend - anchor;
0327
0328
0329 ZSTD_ldm_gear_init(&hashState, params);
0330 {
0331 size_t n = 0;
0332
0333 while (n < minMatchLength) {
0334 numSplits = 0;
0335 n += ZSTD_ldm_gear_feed(&hashState, ip + n, minMatchLength - n,
0336 splits, &numSplits);
0337 }
0338 ip += minMatchLength;
0339 }
0340
0341 while (ip < ilimit) {
0342 size_t hashed;
0343 unsigned n;
0344
0345 numSplits = 0;
0346 hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
0347 splits, &numSplits);
0348
0349 for (n = 0; n < numSplits; n++) {
0350 BYTE const* const split = ip + splits[n] - minMatchLength;
0351 U64 const xxhash = xxh64(split, minMatchLength, 0);
0352 U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
0353
0354 candidates[n].split = split;
0355 candidates[n].hash = hash;
0356 candidates[n].checksum = (U32)(xxhash >> 32);
0357 candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
0358 PREFETCH_L1(candidates[n].bucket);
0359 }
0360
0361 for (n = 0; n < numSplits; n++) {
0362 size_t forwardMatchLength = 0, backwardMatchLength = 0,
0363 bestMatchLength = 0, mLength;
0364 BYTE const* const split = candidates[n].split;
0365 U32 const checksum = candidates[n].checksum;
0366 U32 const hash = candidates[n].hash;
0367 ldmEntry_t* const bucket = candidates[n].bucket;
0368 ldmEntry_t const* cur;
0369 ldmEntry_t const* bestEntry = NULL;
0370 ldmEntry_t newEntry;
0371
0372 newEntry.offset = (U32)(split - base);
0373 newEntry.checksum = checksum;
0374
0375
0376
0377
0378 if (split < anchor) {
0379 ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
0380 continue;
0381 }
0382
0383 for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
0384 size_t curForwardMatchLength, curBackwardMatchLength,
0385 curTotalMatchLength;
0386 if (cur->checksum != checksum || cur->offset <= lowestIndex) {
0387 continue;
0388 }
0389 if (extDict) {
0390 BYTE const* const curMatchBase =
0391 cur->offset < dictLimit ? dictBase : base;
0392 BYTE const* const pMatch = curMatchBase + cur->offset;
0393 BYTE const* const matchEnd =
0394 cur->offset < dictLimit ? dictEnd : iend;
0395 BYTE const* const lowMatchPtr =
0396 cur->offset < dictLimit ? dictStart : lowPrefixPtr;
0397 curForwardMatchLength =
0398 ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
0399 if (curForwardMatchLength < minMatchLength) {
0400 continue;
0401 }
0402 curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
0403 split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
0404 } else {
0405 BYTE const* const pMatch = base + cur->offset;
0406 curForwardMatchLength = ZSTD_count(split, pMatch, iend);
0407 if (curForwardMatchLength < minMatchLength) {
0408 continue;
0409 }
0410 curBackwardMatchLength =
0411 ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
0412 }
0413 curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
0414
0415 if (curTotalMatchLength > bestMatchLength) {
0416 bestMatchLength = curTotalMatchLength;
0417 forwardMatchLength = curForwardMatchLength;
0418 backwardMatchLength = curBackwardMatchLength;
0419 bestEntry = cur;
0420 }
0421 }
0422
0423
0424
0425 if (bestEntry == NULL) {
0426 ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
0427 continue;
0428 }
0429
0430
0431 mLength = forwardMatchLength + backwardMatchLength;
0432 {
0433 U32 const offset = (U32)(split - base) - bestEntry->offset;
0434 rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
0435
0436
0437 if (rawSeqStore->size == rawSeqStore->capacity)
0438 return ERROR(dstSize_tooSmall);
0439 seq->litLength = (U32)(split - backwardMatchLength - anchor);
0440 seq->matchLength = (U32)mLength;
0441 seq->offset = offset;
0442 rawSeqStore->size++;
0443 }
0444
0445
0446
0447 ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
0448
0449 anchor = split + forwardMatchLength;
0450 }
0451
0452 ip += hashed;
0453 }
0454
0455 return iend - anchor;
0456 }
0457
0458
0459
0460 static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
0461 U32 const reducerValue)
0462 {
0463 U32 u;
0464 for (u = 0; u < size; u++) {
0465 if (table[u].offset < reducerValue) table[u].offset = 0;
0466 else table[u].offset -= reducerValue;
0467 }
0468 }
0469
0470 size_t ZSTD_ldm_generateSequences(
0471 ldmState_t* ldmState, rawSeqStore_t* sequences,
0472 ldmParams_t const* params, void const* src, size_t srcSize)
0473 {
0474 U32 const maxDist = 1U << params->windowLog;
0475 BYTE const* const istart = (BYTE const*)src;
0476 BYTE const* const iend = istart + srcSize;
0477 size_t const kMaxChunkSize = 1 << 20;
0478 size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
0479 size_t chunk;
0480 size_t leftoverSize = 0;
0481
0482 assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
0483
0484
0485
0486 assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
0487
0488
0489
0490 assert(sequences->pos <= sequences->size);
0491 assert(sequences->size <= sequences->capacity);
0492 for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
0493 BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
0494 size_t const remaining = (size_t)(iend - chunkStart);
0495 BYTE const *const chunkEnd =
0496 (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
0497 size_t const chunkSize = chunkEnd - chunkStart;
0498 size_t newLeftoverSize;
0499 size_t const prevSize = sequences->size;
0500
0501 assert(chunkStart < iend);
0502
0503 if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
0504 U32 const ldmHSize = 1U << params->hashLog;
0505 U32 const correction = ZSTD_window_correctOverflow(
0506 &ldmState->window, 0, maxDist, chunkStart);
0507 ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
0508
0509 ldmState->loadedDictEnd = 0;
0510 }
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525 ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
0526
0527 newLeftoverSize = ZSTD_ldm_generateSequences_internal(
0528 ldmState, sequences, params, chunkStart, chunkSize);
0529 if (ZSTD_isError(newLeftoverSize))
0530 return newLeftoverSize;
0531
0532
0533
0534
0535
0536 if (prevSize < sequences->size) {
0537 sequences->seq[prevSize].litLength += (U32)leftoverSize;
0538 leftoverSize = newLeftoverSize;
0539 } else {
0540 assert(newLeftoverSize == chunkSize);
0541 leftoverSize += chunkSize;
0542 }
0543 }
0544 return 0;
0545 }
0546
0547 void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
0548 while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
0549 rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
0550 if (srcSize <= seq->litLength) {
0551
0552 seq->litLength -= (U32)srcSize;
0553 return;
0554 }
0555 srcSize -= seq->litLength;
0556 seq->litLength = 0;
0557 if (srcSize < seq->matchLength) {
0558
0559 seq->matchLength -= (U32)srcSize;
0560 if (seq->matchLength < minMatch) {
0561
0562 if (rawSeqStore->pos + 1 < rawSeqStore->size) {
0563 seq[1].litLength += seq[0].matchLength;
0564 }
0565 rawSeqStore->pos++;
0566 }
0567 return;
0568 }
0569 srcSize -= seq->matchLength;
0570 seq->matchLength = 0;
0571 rawSeqStore->pos++;
0572 }
0573 }
0574
0575
0576
0577
0578
0579
0580
0581
0582 static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
0583 U32 const remaining, U32 const minMatch)
0584 {
0585 rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
0586 assert(sequence.offset > 0);
0587
0588 if (remaining >= sequence.litLength + sequence.matchLength) {
0589 rawSeqStore->pos++;
0590 return sequence;
0591 }
0592
0593 if (remaining <= sequence.litLength) {
0594 sequence.offset = 0;
0595 } else if (remaining < sequence.litLength + sequence.matchLength) {
0596 sequence.matchLength = remaining - sequence.litLength;
0597 if (sequence.matchLength < minMatch) {
0598 sequence.offset = 0;
0599 }
0600 }
0601
0602 ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
0603 return sequence;
0604 }
0605
0606 void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
0607 U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
0608 while (currPos && rawSeqStore->pos < rawSeqStore->size) {
0609 rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
0610 if (currPos >= currSeq.litLength + currSeq.matchLength) {
0611 currPos -= currSeq.litLength + currSeq.matchLength;
0612 rawSeqStore->pos++;
0613 } else {
0614 rawSeqStore->posInSequence = currPos;
0615 break;
0616 }
0617 }
0618 if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
0619 rawSeqStore->posInSequence = 0;
0620 }
0621 }
0622
0623 size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
0624 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
0625 void const* src, size_t srcSize)
0626 {
0627 const ZSTD_compressionParameters* const cParams = &ms->cParams;
0628 unsigned const minMatch = cParams->minMatch;
0629 ZSTD_blockCompressor const blockCompressor =
0630 ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
0631
0632 BYTE const* const istart = (BYTE const*)src;
0633 BYTE const* const iend = istart + srcSize;
0634
0635 BYTE const* ip = istart;
0636
0637 DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
0638
0639 if (cParams->strategy >= ZSTD_btopt) {
0640 size_t lastLLSize;
0641 ms->ldmSeqStore = rawSeqStore;
0642 lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
0643 ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
0644 return lastLLSize;
0645 }
0646
0647 assert(rawSeqStore->pos <= rawSeqStore->size);
0648 assert(rawSeqStore->size <= rawSeqStore->capacity);
0649
0650 while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
0651
0652 rawSeq const sequence = maybeSplitSequence(rawSeqStore,
0653 (U32)(iend - ip), minMatch);
0654 int i;
0655
0656 if (sequence.offset == 0)
0657 break;
0658
0659 assert(ip + sequence.litLength + sequence.matchLength <= iend);
0660
0661
0662 ZSTD_ldm_limitTableUpdate(ms, ip);
0663 ZSTD_ldm_fillFastTables(ms, ip);
0664
0665 DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
0666 {
0667 size_t const newLitLength =
0668 blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
0669 ip += sequence.litLength;
0670
0671 for (i = ZSTD_REP_NUM - 1; i > 0; i--)
0672 rep[i] = rep[i-1];
0673 rep[0] = sequence.offset;
0674
0675 ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
0676 sequence.offset + ZSTD_REP_MOVE,
0677 sequence.matchLength - MINMATCH);
0678 ip += sequence.matchLength;
0679 }
0680 }
0681
0682 ZSTD_ldm_limitTableUpdate(ms, ip);
0683 ZSTD_ldm_fillFastTables(ms, ip);
0684
0685 return blockCompressor(ms, seqStore, rep, ip, iend - ip);
0686 }