Line data Source code
1 : // SPDX-License-Identifier: GPL-3.0-or-later 2 : // Copyright (C) 2019 Andrea Mazzoleni 3 : 4 : /* 5 : * Derivative work from MetroHash128::Hash 6 : * 7 : * This version extends the original MetroHash128::Hash to accept a full 128-bit seed. 8 : * 9 : * Note: The original algorithm's behavior can be perfectly replicated by 10 : * setting the low and high part of the seed to the same 64-bit value. 11 : * 12 : * https://github.com/jandrewrogers/MetroHash/blob/master/src/metrohash128.cpp 13 : * 14 : * Copyright 2015-2018 J. Andrew Rogers 15 : * 16 : * Licensed under the Apache License, Version 2.0 (the "License"); 17 : * you may not use this file except in compliance with the License. 18 : * You may obtain a copy of the License at 19 : * 20 : * http://www.apache.org/licenses/LICENSE-2.0 21 : * 22 : * Unless required by applicable law or agreed to in writing, software 23 : * distributed under the License is distributed on an "AS IS" BASIS, 24 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 25 : * See the License for the specific language governing permissions and 26 : * limitations under the License. 27 : */ 28 : 29 : static const uint64_t k0 = 0xC83A91E1; 30 : static const uint64_t k1 = 0x8648DBDB; 31 : static const uint64_t k2 = 0x7BDEC03B; 32 : static const uint64_t k3 = 0x2F5870A5; 33 : 34 417 : void MetroHash128(const void* data, size_t size, const uint8_t* seed, uint8_t* digest) 35 : { 36 417 : const uint8_t* ptr = data; 37 : uint64_t v[4]; 38 : 39 : /* 40 : * EXTENSION: Initialize primary state with 128-bit seed (seedL and seedH) 41 : * Ensures both halves influence the hash even for short messages (< 32 bytes) 42 : * 43 : * Original code was: 44 : * v[0] = (static_cast<uint64_t>(seed) - k0) * k3; 45 : * v[1] = (static_cast<uint64_t>(seed) + k1) * k2; 46 : */ 47 417 : v[0] = (util_read64(seed) - k0) * k3; 48 417 : v[1] = (util_read64(seed + 8) + k1) * k2; 49 : 50 417 : if (size >= 32) { 51 : /* 52 : * EXTENSION: Initialize secondary state using both halves of the 128-bit seed 53 : * 54 : * Original code was: 55 : * v[2] = (static_cast<uint64_t>(seed) + k0) * k2; 56 : * v[3] = (static_cast<uint64_t>(seed) - k1) * k3; 57 : */ 58 381 : v[2] = (util_read64(seed) + k0) * k2; 59 381 : v[3] = (util_read64(seed + 8) - k1) * k3; 60 : 61 : do { 62 1246092 : v[0] += util_read64(ptr) * k0; ptr += 8; v[0] = util_rotr64(v[0], 29) + v[2]; 63 1246092 : v[1] += util_read64(ptr) * k1; ptr += 8; v[1] = util_rotr64(v[1], 29) + v[3]; 64 1246092 : v[2] += util_read64(ptr) * k2; ptr += 8; v[2] = util_rotr64(v[2], 29) + v[0]; 65 1246092 : v[3] += util_read64(ptr) * k3; ptr += 8; v[3] = util_rotr64(v[3], 29) + v[1]; 66 1246092 : size -= 32; 67 1246092 : } while (size >= 32); 68 : 69 381 : v[2] ^= util_rotr64(((v[0] + v[3]) * k0) + v[1], 21) * k1; 70 381 : v[3] ^= util_rotr64(((v[1] + v[2]) * k1) + v[0], 21) * k0; 71 381 : v[0] ^= util_rotr64(((v[0] + v[2]) * k0) + v[3], 21) * k1; 72 381 : v[1] ^= util_rotr64(((v[1] + v[3]) * k1) + v[2], 21) * k0; 73 : } 74 : 75 417 : if (size >= 16) { 76 132 : v[0] += util_read64(ptr) * k2; ptr += 8; v[0] = util_rotr64(v[0], 33) * k3; 77 132 : v[1] += util_read64(ptr) * k2; ptr += 8; v[1] = util_rotr64(v[1], 33) * k3; 78 132 : v[0] ^= util_rotr64((v[0] * k2) + v[1], 45) * k1; 79 132 : v[1] ^= util_rotr64((v[1] * k3) + v[0], 45) * k0; 80 132 : size -= 16; 81 : } 82 : 83 417 : if (size >= 8) { 84 134 : v[0] += util_read64(ptr) * k2; ptr += 8; v[0] = util_rotr64(v[0], 33) * k3; 85 134 : v[0] ^= util_rotr64((v[0] * k2) + v[1], 27) * k1; 86 134 : size -= 8; 87 : } 88 : 89 417 : if (size >= 4) { 90 132 : v[1] += util_read32(ptr) * k2; ptr += 4; v[1] = util_rotr64(v[1], 33) * k3; 91 132 : v[1] ^= util_rotr64((v[1] * k3) + v[0], 46) * k0; 92 132 : size -= 4; 93 : } 94 : 95 417 : if (size >= 2) { 96 135 : v[0] += util_read16(ptr) * k2; ptr += 2; v[0] = util_rotr64(v[0], 33) * k3; 97 135 : v[0] ^= util_rotr64((v[0] * k2) + v[1], 22) * k1; 98 135 : size -= 2; 99 : } 100 : 101 417 : if (size >= 1) { 102 133 : v[1] += util_read8(ptr) * k2; v[1] = util_rotr64(v[1], 33) * k3; 103 133 : v[1] ^= util_rotr64((v[1] * k3) + v[0], 58) * k0; 104 : } 105 : 106 417 : v[0] += util_rotr64((v[0] * k0) + v[1], 13); 107 417 : v[1] += util_rotr64((v[1] * k1) + v[0], 37); 108 417 : v[0] += util_rotr64((v[0] * k2) + v[1], 13); 109 417 : v[1] += util_rotr64((v[1] * k3) + v[0], 37); 110 : 111 417 : util_write64(digest, v[0]); 112 417 : util_write64(digest + 8, v[1]); 113 417 : } 114 :