0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 #include <asm/unaligned.h>
0042 #include <linux/errno.h>
0043 #include <linux/compiler.h>
0044 #include <linux/kernel.h>
0045 #include <linux/module.h>
0046 #include <linux/string.h>
0047 #include <linux/xxhash.h>
0048
0049
0050
0051
0052 #define xxh_rotl32(x, r) ((x << r) | (x >> (32 - r)))
0053 #define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r)))
0054
0055 #ifdef __LITTLE_ENDIAN
0056 # define XXH_CPU_LITTLE_ENDIAN 1
0057 #else
0058 # define XXH_CPU_LITTLE_ENDIAN 0
0059 #endif
0060
0061
0062
0063
0064 static const uint32_t PRIME32_1 = 2654435761U;
0065 static const uint32_t PRIME32_2 = 2246822519U;
0066 static const uint32_t PRIME32_3 = 3266489917U;
0067 static const uint32_t PRIME32_4 = 668265263U;
0068 static const uint32_t PRIME32_5 = 374761393U;
0069
0070 static const uint64_t PRIME64_1 = 11400714785074694791ULL;
0071 static const uint64_t PRIME64_2 = 14029467366897019727ULL;
0072 static const uint64_t PRIME64_3 = 1609587929392839161ULL;
0073 static const uint64_t PRIME64_4 = 9650029242287828579ULL;
0074 static const uint64_t PRIME64_5 = 2870177450012600261ULL;
0075
0076
0077
0078
0079 void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src)
0080 {
0081 memcpy(dst, src, sizeof(*dst));
0082 }
0083 EXPORT_SYMBOL(xxh32_copy_state);
0084
0085 void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src)
0086 {
0087 memcpy(dst, src, sizeof(*dst));
0088 }
0089 EXPORT_SYMBOL(xxh64_copy_state);
0090
0091
0092
0093
0094 static uint32_t xxh32_round(uint32_t seed, const uint32_t input)
0095 {
0096 seed += input * PRIME32_2;
0097 seed = xxh_rotl32(seed, 13);
0098 seed *= PRIME32_1;
0099 return seed;
0100 }
0101
0102 uint32_t xxh32(const void *input, const size_t len, const uint32_t seed)
0103 {
0104 const uint8_t *p = (const uint8_t *)input;
0105 const uint8_t *b_end = p + len;
0106 uint32_t h32;
0107
0108 if (len >= 16) {
0109 const uint8_t *const limit = b_end - 16;
0110 uint32_t v1 = seed + PRIME32_1 + PRIME32_2;
0111 uint32_t v2 = seed + PRIME32_2;
0112 uint32_t v3 = seed + 0;
0113 uint32_t v4 = seed - PRIME32_1;
0114
0115 do {
0116 v1 = xxh32_round(v1, get_unaligned_le32(p));
0117 p += 4;
0118 v2 = xxh32_round(v2, get_unaligned_le32(p));
0119 p += 4;
0120 v3 = xxh32_round(v3, get_unaligned_le32(p));
0121 p += 4;
0122 v4 = xxh32_round(v4, get_unaligned_le32(p));
0123 p += 4;
0124 } while (p <= limit);
0125
0126 h32 = xxh_rotl32(v1, 1) + xxh_rotl32(v2, 7) +
0127 xxh_rotl32(v3, 12) + xxh_rotl32(v4, 18);
0128 } else {
0129 h32 = seed + PRIME32_5;
0130 }
0131
0132 h32 += (uint32_t)len;
0133
0134 while (p + 4 <= b_end) {
0135 h32 += get_unaligned_le32(p) * PRIME32_3;
0136 h32 = xxh_rotl32(h32, 17) * PRIME32_4;
0137 p += 4;
0138 }
0139
0140 while (p < b_end) {
0141 h32 += (*p) * PRIME32_5;
0142 h32 = xxh_rotl32(h32, 11) * PRIME32_1;
0143 p++;
0144 }
0145
0146 h32 ^= h32 >> 15;
0147 h32 *= PRIME32_2;
0148 h32 ^= h32 >> 13;
0149 h32 *= PRIME32_3;
0150 h32 ^= h32 >> 16;
0151
0152 return h32;
0153 }
0154 EXPORT_SYMBOL(xxh32);
0155
0156 static uint64_t xxh64_round(uint64_t acc, const uint64_t input)
0157 {
0158 acc += input * PRIME64_2;
0159 acc = xxh_rotl64(acc, 31);
0160 acc *= PRIME64_1;
0161 return acc;
0162 }
0163
0164 static uint64_t xxh64_merge_round(uint64_t acc, uint64_t val)
0165 {
0166 val = xxh64_round(0, val);
0167 acc ^= val;
0168 acc = acc * PRIME64_1 + PRIME64_4;
0169 return acc;
0170 }
0171
0172 uint64_t xxh64(const void *input, const size_t len, const uint64_t seed)
0173 {
0174 const uint8_t *p = (const uint8_t *)input;
0175 const uint8_t *const b_end = p + len;
0176 uint64_t h64;
0177
0178 if (len >= 32) {
0179 const uint8_t *const limit = b_end - 32;
0180 uint64_t v1 = seed + PRIME64_1 + PRIME64_2;
0181 uint64_t v2 = seed + PRIME64_2;
0182 uint64_t v3 = seed + 0;
0183 uint64_t v4 = seed - PRIME64_1;
0184
0185 do {
0186 v1 = xxh64_round(v1, get_unaligned_le64(p));
0187 p += 8;
0188 v2 = xxh64_round(v2, get_unaligned_le64(p));
0189 p += 8;
0190 v3 = xxh64_round(v3, get_unaligned_le64(p));
0191 p += 8;
0192 v4 = xxh64_round(v4, get_unaligned_le64(p));
0193 p += 8;
0194 } while (p <= limit);
0195
0196 h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +
0197 xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);
0198 h64 = xxh64_merge_round(h64, v1);
0199 h64 = xxh64_merge_round(h64, v2);
0200 h64 = xxh64_merge_round(h64, v3);
0201 h64 = xxh64_merge_round(h64, v4);
0202
0203 } else {
0204 h64 = seed + PRIME64_5;
0205 }
0206
0207 h64 += (uint64_t)len;
0208
0209 while (p + 8 <= b_end) {
0210 const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));
0211
0212 h64 ^= k1;
0213 h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
0214 p += 8;
0215 }
0216
0217 if (p + 4 <= b_end) {
0218 h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1;
0219 h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
0220 p += 4;
0221 }
0222
0223 while (p < b_end) {
0224 h64 ^= (*p) * PRIME64_5;
0225 h64 = xxh_rotl64(h64, 11) * PRIME64_1;
0226 p++;
0227 }
0228
0229 h64 ^= h64 >> 33;
0230 h64 *= PRIME64_2;
0231 h64 ^= h64 >> 29;
0232 h64 *= PRIME64_3;
0233 h64 ^= h64 >> 32;
0234
0235 return h64;
0236 }
0237 EXPORT_SYMBOL(xxh64);
0238
0239
0240
0241
0242 void xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed)
0243 {
0244
0245 struct xxh32_state state;
0246
0247 memset(&state, 0, sizeof(state));
0248 state.v1 = seed + PRIME32_1 + PRIME32_2;
0249 state.v2 = seed + PRIME32_2;
0250 state.v3 = seed + 0;
0251 state.v4 = seed - PRIME32_1;
0252 memcpy(statePtr, &state, sizeof(state));
0253 }
0254 EXPORT_SYMBOL(xxh32_reset);
0255
0256 void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed)
0257 {
0258
0259 struct xxh64_state state;
0260
0261 memset(&state, 0, sizeof(state));
0262 state.v1 = seed + PRIME64_1 + PRIME64_2;
0263 state.v2 = seed + PRIME64_2;
0264 state.v3 = seed + 0;
0265 state.v4 = seed - PRIME64_1;
0266 memcpy(statePtr, &state, sizeof(state));
0267 }
0268 EXPORT_SYMBOL(xxh64_reset);
0269
0270 int xxh32_update(struct xxh32_state *state, const void *input, const size_t len)
0271 {
0272 const uint8_t *p = (const uint8_t *)input;
0273 const uint8_t *const b_end = p + len;
0274
0275 if (input == NULL)
0276 return -EINVAL;
0277
0278 state->total_len_32 += (uint32_t)len;
0279 state->large_len |= (len >= 16) | (state->total_len_32 >= 16);
0280
0281 if (state->memsize + len < 16) {
0282 memcpy((uint8_t *)(state->mem32) + state->memsize, input, len);
0283 state->memsize += (uint32_t)len;
0284 return 0;
0285 }
0286
0287 if (state->memsize) {
0288 const uint32_t *p32 = state->mem32;
0289
0290 memcpy((uint8_t *)(state->mem32) + state->memsize, input,
0291 16 - state->memsize);
0292
0293 state->v1 = xxh32_round(state->v1, get_unaligned_le32(p32));
0294 p32++;
0295 state->v2 = xxh32_round(state->v2, get_unaligned_le32(p32));
0296 p32++;
0297 state->v3 = xxh32_round(state->v3, get_unaligned_le32(p32));
0298 p32++;
0299 state->v4 = xxh32_round(state->v4, get_unaligned_le32(p32));
0300 p32++;
0301
0302 p += 16-state->memsize;
0303 state->memsize = 0;
0304 }
0305
0306 if (p <= b_end - 16) {
0307 const uint8_t *const limit = b_end - 16;
0308 uint32_t v1 = state->v1;
0309 uint32_t v2 = state->v2;
0310 uint32_t v3 = state->v3;
0311 uint32_t v4 = state->v4;
0312
0313 do {
0314 v1 = xxh32_round(v1, get_unaligned_le32(p));
0315 p += 4;
0316 v2 = xxh32_round(v2, get_unaligned_le32(p));
0317 p += 4;
0318 v3 = xxh32_round(v3, get_unaligned_le32(p));
0319 p += 4;
0320 v4 = xxh32_round(v4, get_unaligned_le32(p));
0321 p += 4;
0322 } while (p <= limit);
0323
0324 state->v1 = v1;
0325 state->v2 = v2;
0326 state->v3 = v3;
0327 state->v4 = v4;
0328 }
0329
0330 if (p < b_end) {
0331 memcpy(state->mem32, p, (size_t)(b_end-p));
0332 state->memsize = (uint32_t)(b_end-p);
0333 }
0334
0335 return 0;
0336 }
0337 EXPORT_SYMBOL(xxh32_update);
0338
0339 uint32_t xxh32_digest(const struct xxh32_state *state)
0340 {
0341 const uint8_t *p = (const uint8_t *)state->mem32;
0342 const uint8_t *const b_end = (const uint8_t *)(state->mem32) +
0343 state->memsize;
0344 uint32_t h32;
0345
0346 if (state->large_len) {
0347 h32 = xxh_rotl32(state->v1, 1) + xxh_rotl32(state->v2, 7) +
0348 xxh_rotl32(state->v3, 12) + xxh_rotl32(state->v4, 18);
0349 } else {
0350 h32 = state->v3 + PRIME32_5;
0351 }
0352
0353 h32 += state->total_len_32;
0354
0355 while (p + 4 <= b_end) {
0356 h32 += get_unaligned_le32(p) * PRIME32_3;
0357 h32 = xxh_rotl32(h32, 17) * PRIME32_4;
0358 p += 4;
0359 }
0360
0361 while (p < b_end) {
0362 h32 += (*p) * PRIME32_5;
0363 h32 = xxh_rotl32(h32, 11) * PRIME32_1;
0364 p++;
0365 }
0366
0367 h32 ^= h32 >> 15;
0368 h32 *= PRIME32_2;
0369 h32 ^= h32 >> 13;
0370 h32 *= PRIME32_3;
0371 h32 ^= h32 >> 16;
0372
0373 return h32;
0374 }
0375 EXPORT_SYMBOL(xxh32_digest);
0376
0377 int xxh64_update(struct xxh64_state *state, const void *input, const size_t len)
0378 {
0379 const uint8_t *p = (const uint8_t *)input;
0380 const uint8_t *const b_end = p + len;
0381
0382 if (input == NULL)
0383 return -EINVAL;
0384
0385 state->total_len += len;
0386
0387 if (state->memsize + len < 32) {
0388 memcpy(((uint8_t *)state->mem64) + state->memsize, input, len);
0389 state->memsize += (uint32_t)len;
0390 return 0;
0391 }
0392
0393 if (state->memsize) {
0394 uint64_t *p64 = state->mem64;
0395
0396 memcpy(((uint8_t *)p64) + state->memsize, input,
0397 32 - state->memsize);
0398
0399 state->v1 = xxh64_round(state->v1, get_unaligned_le64(p64));
0400 p64++;
0401 state->v2 = xxh64_round(state->v2, get_unaligned_le64(p64));
0402 p64++;
0403 state->v3 = xxh64_round(state->v3, get_unaligned_le64(p64));
0404 p64++;
0405 state->v4 = xxh64_round(state->v4, get_unaligned_le64(p64));
0406
0407 p += 32 - state->memsize;
0408 state->memsize = 0;
0409 }
0410
0411 if (p + 32 <= b_end) {
0412 const uint8_t *const limit = b_end - 32;
0413 uint64_t v1 = state->v1;
0414 uint64_t v2 = state->v2;
0415 uint64_t v3 = state->v3;
0416 uint64_t v4 = state->v4;
0417
0418 do {
0419 v1 = xxh64_round(v1, get_unaligned_le64(p));
0420 p += 8;
0421 v2 = xxh64_round(v2, get_unaligned_le64(p));
0422 p += 8;
0423 v3 = xxh64_round(v3, get_unaligned_le64(p));
0424 p += 8;
0425 v4 = xxh64_round(v4, get_unaligned_le64(p));
0426 p += 8;
0427 } while (p <= limit);
0428
0429 state->v1 = v1;
0430 state->v2 = v2;
0431 state->v3 = v3;
0432 state->v4 = v4;
0433 }
0434
0435 if (p < b_end) {
0436 memcpy(state->mem64, p, (size_t)(b_end-p));
0437 state->memsize = (uint32_t)(b_end - p);
0438 }
0439
0440 return 0;
0441 }
0442 EXPORT_SYMBOL(xxh64_update);
0443
0444 uint64_t xxh64_digest(const struct xxh64_state *state)
0445 {
0446 const uint8_t *p = (const uint8_t *)state->mem64;
0447 const uint8_t *const b_end = (const uint8_t *)state->mem64 +
0448 state->memsize;
0449 uint64_t h64;
0450
0451 if (state->total_len >= 32) {
0452 const uint64_t v1 = state->v1;
0453 const uint64_t v2 = state->v2;
0454 const uint64_t v3 = state->v3;
0455 const uint64_t v4 = state->v4;
0456
0457 h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +
0458 xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);
0459 h64 = xxh64_merge_round(h64, v1);
0460 h64 = xxh64_merge_round(h64, v2);
0461 h64 = xxh64_merge_round(h64, v3);
0462 h64 = xxh64_merge_round(h64, v4);
0463 } else {
0464 h64 = state->v3 + PRIME64_5;
0465 }
0466
0467 h64 += (uint64_t)state->total_len;
0468
0469 while (p + 8 <= b_end) {
0470 const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));
0471
0472 h64 ^= k1;
0473 h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
0474 p += 8;
0475 }
0476
0477 if (p + 4 <= b_end) {
0478 h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1;
0479 h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
0480 p += 4;
0481 }
0482
0483 while (p < b_end) {
0484 h64 ^= (*p) * PRIME64_5;
0485 h64 = xxh_rotl64(h64, 11) * PRIME64_1;
0486 p++;
0487 }
0488
0489 h64 ^= h64 >> 33;
0490 h64 *= PRIME64_2;
0491 h64 ^= h64 >> 29;
0492 h64 *= PRIME64_3;
0493 h64 ^= h64 >> 32;
0494
0495 return h64;
0496 }
0497 EXPORT_SYMBOL(xxh64_digest);
0498
0499 MODULE_LICENSE("Dual BSD/GPL");
0500 MODULE_DESCRIPTION("xxHash");