0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #ifndef ZSTD_CWKSP_H
0012 #define ZSTD_CWKSP_H
0013
0014
0015
0016
0017 #include "../common/zstd_internal.h"
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
0032 #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
0033 #endif
0034
0035
0036
0037
0038 typedef enum {
0039 ZSTD_cwksp_alloc_objects,
0040 ZSTD_cwksp_alloc_buffers,
0041 ZSTD_cwksp_alloc_aligned
0042 } ZSTD_cwksp_alloc_phase_e;
0043
0044
0045
0046
0047
0048
0049 typedef enum {
0050 ZSTD_cwksp_dynamic_alloc,
0051 ZSTD_cwksp_static_alloc
0052 } ZSTD_cwksp_static_alloc_e;
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138 typedef struct {
0139 void* workspace;
0140 void* workspaceEnd;
0141
0142 void* objectEnd;
0143 void* tableEnd;
0144 void* tableValidEnd;
0145 void* allocStart;
0146
0147 BYTE allocFailed;
0148 int workspaceOversizedDuration;
0149 ZSTD_cwksp_alloc_phase_e phase;
0150 ZSTD_cwksp_static_alloc_e isStatic;
0151 } ZSTD_cwksp;
0152
0153
0154
0155
0156
0157 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
0158
0159 MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
0160 (void)ws;
0161 assert(ws->workspace <= ws->objectEnd);
0162 assert(ws->objectEnd <= ws->tableEnd);
0163 assert(ws->objectEnd <= ws->tableValidEnd);
0164 assert(ws->tableEnd <= ws->allocStart);
0165 assert(ws->tableValidEnd <= ws->allocStart);
0166 assert(ws->allocStart <= ws->workspaceEnd);
0167 }
0168
0169
0170
0171
0172 MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
0173 size_t const mask = align - 1;
0174 assert((align & mask) == 0);
0175 return (size + mask) & ~mask;
0176 }
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188 MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
0189 if (size == 0)
0190 return 0;
0191 return size;
0192 }
0193
0194 MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
0195 ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
0196 assert(phase >= ws->phase);
0197 if (phase > ws->phase) {
0198 if (ws->phase < ZSTD_cwksp_alloc_buffers &&
0199 phase >= ZSTD_cwksp_alloc_buffers) {
0200 ws->tableValidEnd = ws->objectEnd;
0201 }
0202 if (ws->phase < ZSTD_cwksp_alloc_aligned &&
0203 phase >= ZSTD_cwksp_alloc_aligned) {
0204
0205
0206
0207
0208
0209
0210
0211 ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
0212 if (ws->allocStart < ws->tableValidEnd) {
0213 ws->tableValidEnd = ws->allocStart;
0214 }
0215 }
0216 ws->phase = phase;
0217 }
0218 }
0219
0220
0221
0222
0223 MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
0224 return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
0225 }
0226
0227
0228
0229
0230 MEM_STATIC void* ZSTD_cwksp_reserve_internal(
0231 ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
0232 void* alloc;
0233 void* bottom = ws->tableEnd;
0234 ZSTD_cwksp_internal_advance_phase(ws, phase);
0235 alloc = (BYTE *)ws->allocStart - bytes;
0236
0237 if (bytes == 0)
0238 return NULL;
0239
0240
0241 DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
0242 alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
0243 ZSTD_cwksp_assert_internal_consistency(ws);
0244 assert(alloc >= bottom);
0245 if (alloc < bottom) {
0246 DEBUGLOG(4, "cwksp: alloc failed!");
0247 ws->allocFailed = 1;
0248 return NULL;
0249 }
0250 if (alloc < ws->tableValidEnd) {
0251 ws->tableValidEnd = alloc;
0252 }
0253 ws->allocStart = alloc;
0254
0255
0256 return alloc;
0257 }
0258
0259
0260
0261
0262 MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
0263 return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
0264 }
0265
0266
0267
0268
0269 MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
0270 assert((bytes & (sizeof(U32)-1)) == 0);
0271 return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
0272 }
0273
0274
0275
0276
0277
0278
0279 MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
0280 const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
0281 void* alloc = ws->tableEnd;
0282 void* end = (BYTE *)alloc + bytes;
0283 void* top = ws->allocStart;
0284
0285 DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
0286 alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
0287 assert((bytes & (sizeof(U32)-1)) == 0);
0288 ZSTD_cwksp_internal_advance_phase(ws, phase);
0289 ZSTD_cwksp_assert_internal_consistency(ws);
0290 assert(end <= top);
0291 if (end > top) {
0292 DEBUGLOG(4, "cwksp: table alloc failed!");
0293 ws->allocFailed = 1;
0294 return NULL;
0295 }
0296 ws->tableEnd = end;
0297
0298
0299 return alloc;
0300 }
0301
0302
0303
0304
0305 MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
0306 size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
0307 void* alloc = ws->objectEnd;
0308 void* end = (BYTE*)alloc + roundedBytes;
0309
0310
0311 DEBUGLOG(5,
0312 "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
0313 alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
0314 assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
0315 assert((bytes & (sizeof(void*)-1)) == 0);
0316 ZSTD_cwksp_assert_internal_consistency(ws);
0317
0318 if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
0319 DEBUGLOG(4, "cwksp: object alloc failed!");
0320 ws->allocFailed = 1;
0321 return NULL;
0322 }
0323 ws->objectEnd = end;
0324 ws->tableEnd = end;
0325 ws->tableValidEnd = end;
0326
0327
0328 return alloc;
0329 }
0330
0331 MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
0332 DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
0333
0334
0335 assert(ws->tableValidEnd >= ws->objectEnd);
0336 assert(ws->tableValidEnd <= ws->allocStart);
0337 ws->tableValidEnd = ws->objectEnd;
0338 ZSTD_cwksp_assert_internal_consistency(ws);
0339 }
0340
0341 MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
0342 DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
0343 assert(ws->tableValidEnd >= ws->objectEnd);
0344 assert(ws->tableValidEnd <= ws->allocStart);
0345 if (ws->tableValidEnd < ws->tableEnd) {
0346 ws->tableValidEnd = ws->tableEnd;
0347 }
0348 ZSTD_cwksp_assert_internal_consistency(ws);
0349 }
0350
0351
0352
0353
0354 MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
0355 DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
0356 assert(ws->tableValidEnd >= ws->objectEnd);
0357 assert(ws->tableValidEnd <= ws->allocStart);
0358 if (ws->tableValidEnd < ws->tableEnd) {
0359 ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
0360 }
0361 ZSTD_cwksp_mark_tables_clean(ws);
0362 }
0363
0364
0365
0366
0367
0368 MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
0369 DEBUGLOG(4, "cwksp: clearing tables!");
0370
0371
0372 ws->tableEnd = ws->objectEnd;
0373 ZSTD_cwksp_assert_internal_consistency(ws);
0374 }
0375
0376
0377
0378
0379
0380 MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
0381 DEBUGLOG(4, "cwksp: clearing!");
0382
0383
0384
0385 ws->tableEnd = ws->objectEnd;
0386 ws->allocStart = ws->workspaceEnd;
0387 ws->allocFailed = 0;
0388 if (ws->phase > ZSTD_cwksp_alloc_buffers) {
0389 ws->phase = ZSTD_cwksp_alloc_buffers;
0390 }
0391 ZSTD_cwksp_assert_internal_consistency(ws);
0392 }
0393
0394
0395
0396
0397
0398
0399 MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
0400 DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
0401 assert(((size_t)start & (sizeof(void*)-1)) == 0);
0402 ws->workspace = start;
0403 ws->workspaceEnd = (BYTE*)start + size;
0404 ws->objectEnd = ws->workspace;
0405 ws->tableValidEnd = ws->objectEnd;
0406 ws->phase = ZSTD_cwksp_alloc_objects;
0407 ws->isStatic = isStatic;
0408 ZSTD_cwksp_clear(ws);
0409 ws->workspaceOversizedDuration = 0;
0410 ZSTD_cwksp_assert_internal_consistency(ws);
0411 }
0412
0413 MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
0414 void* workspace = ZSTD_customMalloc(size, customMem);
0415 DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
0416 RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
0417 ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
0418 return 0;
0419 }
0420
0421 MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
0422 void *ptr = ws->workspace;
0423 DEBUGLOG(4, "cwksp: freeing workspace");
0424 ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
0425 ZSTD_customFree(ptr, customMem);
0426 }
0427
0428
0429
0430
0431
0432 MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
0433 *dst = *src;
0434 ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
0435 }
0436
0437 MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
0438 return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
0439 }
0440
0441 MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
0442 return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
0443 + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
0444 }
0445
0446 MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
0447 return ws->allocFailed;
0448 }
0449
0450
0451
0452
0453
0454 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
0455 return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
0456 }
0457
0458 MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
0459 return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
0460 }
0461
0462 MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
0463 return ZSTD_cwksp_check_available(
0464 ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
0465 }
0466
0467 MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
0468 return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
0469 && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
0470 }
0471
0472 MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
0473 ZSTD_cwksp* ws, size_t additionalNeededSpace) {
0474 if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
0475 ws->workspaceOversizedDuration++;
0476 } else {
0477 ws->workspaceOversizedDuration = 0;
0478 }
0479 }
0480
0481
0482 #endif