1
0
mirror of https://passt.top/passt synced 2024-10-01 03:25:48 +00:00

siphash: Add siphash_feed() helper

We have macros or inlines for a number of common operations in the siphash
functions.  However, in a number of places we still open code feeding
another 64-bits of data into the hash function: an xor, followed by 2
rounds of shuffling, followed by another xor.

Implement an inline function for this, which results in somewhat shortened
code.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
This commit is contained in:
David Gibson 2023-09-28 11:20:55 +10:00 committed by Stefano Brivio
parent f7b2be2d21
commit 7a3153cbfb

View File

@ -91,11 +91,21 @@ static inline void sipround(uint64_t *v, int n)
} }
} }
/**
* siphash_feed() - Fold 64-bits of data into the hash state
* @v: siphash state (4 x 64-bit integers)
* @in: New value to fold into hash
*/
static inline void siphash_feed(uint64_t *v, uint64_t in)
{
v[3] ^= in;
sipround(v, 2);
v[0] ^= in;
}
#define POSTAMBLE \ #define POSTAMBLE \
do { \ do { \
v[3] ^= b; \ siphash_feed(v, b); \
sipround(v, 2); \
v[0] ^= b; \
v[2] ^= 0xff; \ v[2] ^= 0xff; \
sipround(v, 4); \ sipround(v, 4); \
b = (v[0] ^ v[1]) ^ (v[2] ^ v[3]); \ b = (v[0] ^ v[1]) ^ (v[2] ^ v[3]); \
@ -123,9 +133,7 @@ __attribute__((optimize("-fno-strict-aliasing")))
uint64_t siphash_8b(const uint8_t *in, const uint64_t *k) uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
{ {
PREAMBLE(8); PREAMBLE(8);
v[3] ^= *(uint64_t *)in; siphash_feed(v, *(uint64_t *)in);
sipround(v, 2);
v[0] ^= *(uint64_t *)in;
POSTAMBLE; POSTAMBLE;
return b; return b;
@ -144,14 +152,9 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_12b(const uint8_t *in, const uint64_t *k) uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
{ {
uint32_t *in32 = (uint32_t *)in; uint32_t *in32 = (uint32_t *)in;
uint64_t combined;
combined = (uint64_t)(*(in32 + 1)) << 32 | *in32;
PREAMBLE(12); PREAMBLE(12);
v[3] ^= combined; siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
sipround(v, 2);
v[0] ^= combined;
b |= *(in32 + 2); b |= *(in32 + 2);
POSTAMBLE; POSTAMBLE;
@ -174,13 +177,8 @@ uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
PREAMBLE(20); PREAMBLE(20);
for (i = 0; i < 2; i++, in32 += 2) { for (i = 0; i < 2; i++, in32 += 2)
uint64_t combined = (uint64_t)(*(in32 + 1)) << 32 | *in32; siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
v[3] ^= combined;
sipround(v, 2);
v[0] ^= combined;
}
b |= *in32; b |= *in32;
POSTAMBLE; POSTAMBLE;
@ -205,11 +203,8 @@ uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
PREAMBLE(32); PREAMBLE(32);
for (i = 0; i < 4; i++, in64++) { for (i = 0; i < 4; i++, in64++)
v[3] ^= *in64; siphash_feed(v, *in64);
sipround(v, 2);
v[0] ^= *in64;
}
POSTAMBLE; POSTAMBLE;
@ -232,13 +227,8 @@ uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
PREAMBLE(36); PREAMBLE(36);
for (i = 0; i < 4; i++, in32 += 2) { for (i = 0; i < 4; i++, in32 += 2)
uint64_t combined = (uint64_t)(*(in32 + 1)) << 32 | *in32; siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
v[3] ^= combined;
sipround(v, 2);
v[0] ^= combined;
}
b |= *in32; b |= *in32;
POSTAMBLE; POSTAMBLE;