1
0
mirror of https://passt.top/passt synced 2024-10-01 03:25:48 +00:00

siphash: Make sip round calculations an inline function rather than macro

The SIPROUND(n) macro implements n rounds of SipHash shuffling.  It relies
on 'v' and '__i' variables being available in the context it's used in
which isn't great hygeine.  Replace it with an inline function instead.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
This commit is contained in:
David Gibson 2023-09-28 11:20:54 +10:00 committed by Stefano Brivio
parent ca6e94702c
commit f7b2be2d21

View File

@ -68,29 +68,36 @@
v[__i] = k[__i % 2]; \ v[__i] = k[__i % 2]; \
} while (0) } while (0)
#define SIPROUND(n) \ /**
do { \ * sipround() - Perform rounds of SipHash scrambling
for (__i = 0; __i < (n); __i++) { \ * @v: siphash state (4 x 64-bit integers)
v[0] += v[1]; \ * @n: Number of rounds to apply
v[1] = ROTL(v[1], 13) ^ v[0]; \ */
v[0] = ROTL(v[0], 32); \ static inline void sipround(uint64_t *v, int n)
v[2] += v[3]; \ {
v[3] = ROTL(v[3], 16) ^ v[2]; \ int i;
v[0] += v[3]; \
v[3] = ROTL(v[3], 21) ^ v[0]; \ for (i = 0; i < n; i++) {
v[2] += v[1]; \ v[0] += v[1];
v[1] = ROTL(v[1], 17) ^ v[2]; \ v[1] = ROTL(v[1], 13) ^ v[0];
v[2] = ROTL(v[2], 32); \ v[0] = ROTL(v[0], 32);
} \ v[2] += v[3];
} while (0) v[3] = ROTL(v[3], 16) ^ v[2];
v[0] += v[3];
v[3] = ROTL(v[3], 21) ^ v[0];
v[2] += v[1];
v[1] = ROTL(v[1], 17) ^ v[2];
v[2] = ROTL(v[2], 32);
}
}
#define POSTAMBLE \ #define POSTAMBLE \
do { \ do { \
v[3] ^= b; \ v[3] ^= b; \
SIPROUND(2); \ sipround(v, 2); \
v[0] ^= b; \ v[0] ^= b; \
v[2] ^= 0xff; \ v[2] ^= 0xff; \
SIPROUND(4); \ sipround(v, 4); \
b = (v[0] ^ v[1]) ^ (v[2] ^ v[3]); \ b = (v[0] ^ v[1]) ^ (v[2] ^ v[3]); \
} while (0) } while (0)
@ -117,7 +124,7 @@ uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
{ {
PREAMBLE(8); PREAMBLE(8);
v[3] ^= *(uint64_t *)in; v[3] ^= *(uint64_t *)in;
SIPROUND(2); sipround(v, 2);
v[0] ^= *(uint64_t *)in; v[0] ^= *(uint64_t *)in;
POSTAMBLE; POSTAMBLE;
@ -143,7 +150,7 @@ uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
PREAMBLE(12); PREAMBLE(12);
v[3] ^= combined; v[3] ^= combined;
SIPROUND(2); sipround(v, 2);
v[0] ^= combined; v[0] ^= combined;
b |= *(in32 + 2); b |= *(in32 + 2);
POSTAMBLE; POSTAMBLE;
@ -171,7 +178,7 @@ uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
uint64_t combined = (uint64_t)(*(in32 + 1)) << 32 | *in32; uint64_t combined = (uint64_t)(*(in32 + 1)) << 32 | *in32;
v[3] ^= combined; v[3] ^= combined;
SIPROUND(2); sipround(v, 2);
v[0] ^= combined; v[0] ^= combined;
} }
@ -200,7 +207,7 @@ uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
for (i = 0; i < 4; i++, in64++) { for (i = 0; i < 4; i++, in64++) {
v[3] ^= *in64; v[3] ^= *in64;
SIPROUND(2); sipround(v, 2);
v[0] ^= *in64; v[0] ^= *in64;
} }
@ -229,7 +236,7 @@ uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
uint64_t combined = (uint64_t)(*(in32 + 1)) << 32 | *in32; uint64_t combined = (uint64_t)(*(in32 + 1)) << 32 | *in32;
v[3] ^= combined; v[3] ^= combined;
SIPROUND(2); sipround(v, 2);
v[0] ^= combined; v[0] ^= combined;
} }