Update xxHash to git commit cd0f5c22095c49c35104dca86620322801f14893

git-svn-id: https://svn.eduke32.com/eduke32@8142 1a8010ca-5511-0410-912e-c29ae57300e0

# Conflicts:
#	platform/Windows/build.vcxproj
#	platform/Windows/build.vcxproj.filters
This commit is contained in:
terminx 2019-10-19 23:41:50 +00:00 committed by Christoph Oelckers
parent 70171ea341
commit 4dfae31900
3 changed files with 2368 additions and 431 deletions

1632
source/build/include/xxh3.h Normal file

File diff suppressed because it is too large Load diff

View file

@ -49,10 +49,13 @@ Lookup3 1.2 GB/s 9 Bob Jenkins
SuperFastHash 1.2 GB/s 1 Paul Hsieh SuperFastHash 1.2 GB/s 1 Paul Hsieh
CityHash64 1.05 GB/s 10 Pike & Alakuijala CityHash64 1.05 GB/s 10 Pike & Alakuijala
FNV 0.55 GB/s 5 Fowler, Noll, Vo FNV 0.55 GB/s 5 Fowler, Noll, Vo
CRC32 0.43 GB/s 9 CRC32 0.43 GB/s 9
MD5-32 0.33 GB/s 10 Ronald L. Rivest MD5-32 0.33 GB/s 10 Ronald L. Rivest
SHA1-32 0.28 GB/s 10 SHA1-32 0.28 GB/s 10
Note : other CRC32 implementations can be over 40x faster than SMHasher's:
http://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735
Q.Score is a measure of quality of the hash function. Q.Score is a measure of quality of the hash function.
It depends on successfully passing SMHasher test set. It depends on successfully passing SMHasher test set.
10 is a perfect score. 10 is a perfect score.
@ -83,14 +86,16 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
* API modifier * API modifier
******************************/ ******************************/
/** XXH_INLINE_ALL (and XXH_PRIVATE_API) /** XXH_INLINE_ALL (and XXH_PRIVATE_API)
* This is useful to include xxhash functions in `static` mode * This build macro includes xxhash functions in `static` mode
* in order to inline them, and remove their symbol from the public list. * in order to inline them, and remove their symbol from the public list.
* Inlining can offer dramatic performance improvement on small keys. * Inlining offers great performance improvement on small keys,
* and dramatic ones when length is expressed as a compile-time constant.
* See https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html .
* Methodology : * Methodology :
* #define XXH_INLINE_ALL * #define XXH_INLINE_ALL
* #include "xxhash.h" * #include "xxhash.h"
* `xxhash.c` is automatically included. * `xxhash.c` is automatically included.
* It's not useful to compile and link it as a separate module. * It's not useful to compile and link it as a separate object.
*/ */
#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
# ifndef XXH_STATIC_LINKING_ONLY # ifndef XXH_STATIC_LINKING_ONLY
@ -107,7 +112,15 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
# define XXH_PUBLIC_API static # define XXH_PUBLIC_API static
# endif # endif
#else #else
# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
# ifdef XXH_EXPORT
# define XXH_PUBLIC_API __declspec(dllexport)
# elif XXH_IMPORT
# define XXH_PUBLIC_API __declspec(dllimport)
# endif
# else
# define XXH_PUBLIC_API /* do nothing */ # define XXH_PUBLIC_API /* do nothing */
# endif
#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
/*! XXH_NAMESPACE, aka Namespace Emulation : /*! XXH_NAMESPACE, aka Namespace Emulation :
@ -150,8 +163,8 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
* Version * Version
***************************************/ ***************************************/
#define XXH_VERSION_MAJOR 0 #define XXH_VERSION_MAJOR 0
#define XXH_VERSION_MINOR 6 #define XXH_VERSION_MINOR 7
#define XXH_VERSION_RELEASE 5 #define XXH_VERSION_RELEASE 2
#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
XXH_PUBLIC_API unsigned XXH_versionNumber (void); XXH_PUBLIC_API unsigned XXH_versionNumber (void);
@ -159,7 +172,14 @@ XXH_PUBLIC_API unsigned XXH_versionNumber (void);
/*-********************************************************************** /*-**********************************************************************
* 32-bit hash * 32-bit hash
************************************************************************/ ************************************************************************/
typedef unsigned int XXH32_hash_t; #if !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
typedef uint32_t XXH32_hash_t;
#else
typedef unsigned int XXH32_hash_t;
#endif
/*! XXH32() : /*! XXH32() :
Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input".
@ -172,11 +192,11 @@ XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned in
typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState); XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed);
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t length); XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state_in); XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
/* /*
* Streaming functions generate the xxHash of an input provided in multiple segments. * Streaming functions generate the xxHash of an input provided in multiple segments.
@ -216,7 +236,14 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src
/*-********************************************************************** /*-**********************************************************************
* 64-bit hash * 64-bit hash
************************************************************************/ ************************************************************************/
typedef unsigned long long XXH64_hash_t; #if !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
typedef uint64_t XXH64_hash_t;
#else
typedef unsigned long long XXH64_hash_t;
#endif
/*! XXH64() : /*! XXH64() :
Calculate the 64-bit hash of sequence of length "len" stored at memory address "input". Calculate the 64-bit hash of sequence of length "len" stored at memory address "input".
@ -229,16 +256,18 @@ XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned lo
typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState); XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed);
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t length); XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* state_in); XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
/*====== Canonical representation ======*/ /*====== Canonical representation ======*/
typedef struct { unsigned char digest[8]; } XXH64_canonical_t; typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
#endif /* XXH_NO_LONG_LONG */ #endif /* XXH_NO_LONG_LONG */
@ -256,68 +285,259 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src
* static allocation of XXH state, on stack or in a struct for example. * static allocation of XXH state, on stack or in a struct for example.
* Never **ever** use members directly. */ * Never **ever** use members directly. */
#if !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
struct XXH32_state_s { struct XXH32_state_s {
uint32_t total_len_32; XXH32_hash_t total_len_32;
uint32_t large_len; XXH32_hash_t large_len;
uint32_t v1; XXH32_hash_t v1;
uint32_t v2; XXH32_hash_t v2;
uint32_t v3; XXH32_hash_t v3;
uint32_t v4; XXH32_hash_t v4;
uint32_t mem32[4]; XXH32_hash_t mem32[4];
uint32_t memsize; XXH32_hash_t memsize;
uint32_t reserved; /* never read nor write, might be removed in a future version */ XXH32_hash_t reserved; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH32_state_t */ }; /* typedef'd to XXH32_state_t */
#ifndef XXH_NO_LONG_LONG /* remove 64-bit support */
struct XXH64_state_s { struct XXH64_state_s {
uint64_t total_len; XXH64_hash_t total_len;
uint64_t v1; XXH64_hash_t v1;
uint64_t v2; XXH64_hash_t v2;
uint64_t v3; XXH64_hash_t v3;
uint64_t v4; XXH64_hash_t v4;
uint64_t mem64[4]; XXH64_hash_t mem64[4];
uint32_t memsize; XXH32_hash_t memsize;
uint32_t reserved[2]; /* never read nor write, might be removed in a future version */ XXH32_hash_t reserved32; /* required for padding anyway */
XXH64_hash_t reserved64; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH64_state_t */ }; /* typedef'd to XXH64_state_t */
#endif /* XXH_NO_LONG_LONG */
# else
struct XXH32_state_s {
unsigned total_len_32;
unsigned large_len;
unsigned v1;
unsigned v2;
unsigned v3;
unsigned v4;
unsigned mem32[4];
unsigned memsize;
unsigned reserved; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH32_state_t */
# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */
struct XXH64_state_s {
unsigned long long total_len;
unsigned long long v1;
unsigned long long v2;
unsigned long long v3;
unsigned long long v4;
unsigned long long mem64[4];
unsigned memsize;
unsigned reserved[2]; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH64_state_t */
# endif
# endif
/*-**********************************************************************
* XXH3
* New experimental hash
************************************************************************/
#ifndef XXH_NO_LONG_LONG
/* ============================================
* XXH3 is a new hash algorithm,
* featuring improved speed performance for both small and large inputs.
* See full speed analysis at : http://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
* In general, expect XXH3 to run about ~2x faster on large inputs,
* and >3x faster on small ones, though exact differences depend on platform.
*
* The algorithm is portable, will generate the same hash on all platforms.
* It benefits greatly from vectorization units, but does not require it.
*
* XXH3 offers 2 variants, _64bits and _128bits.
* When only 64 bits are needed, prefer calling the _64bits variant :
* it reduces the amount of mixing, resulting in faster speed on small inputs.
* It's also generally simpler to manipulate a scalar return type than a struct.
*
* The XXH3 algorithm is still considered experimental.
* Produced results can still change between versions.
* For example, results produced by v0.7.1 are not comparable with results from v0.7.0 .
* It's nonetheless possible to use XXH3 for ephemeral data (local sessions),
* but avoid storing values in long-term storage for later re-use.
*
* The API supports one-shot hashing, streaming mode, and custom secrets.
*
* There are still a number of opened questions that community can influence during the experimental period.
* I'm trying to list a few of them below, though don't consider this list as complete.
*
* - 128-bits output type : currently defined as a structure of two 64-bits fields.
* That's because 128-bit values do not exist in C standard.
* Note that it means that, at byte level, result is not identical depending on endianess.
* However, at field level, they are identical on all platforms.
* The canonical representation solves the issue of identical byte-level representation across platforms,
* which is necessary for serialization.
* Would there be a better representation for a 128-bit hash result ?
* Are the names of the inner 64-bit fields important ? Should they be changed ?
*
* - Seed type for 128-bits variant : currently, it's a single 64-bit value, like the 64-bit variant.
* It could be argued that it's more logical to offer a 128-bit seed input parameter for a 128-bit hash.
* But 128-bit seed is more difficult to use, since it requires to pass a structure instead of a scalar value.
* Such a variant could either replace current one, or become an additional one.
* Farmhash, for example, offers both variants (the 128-bits seed variant is called `doubleSeed`).
* If both 64-bit and 128-bit seeds are possible, which variant should be called XXH128 ?
*
* - Result for len==0 : Currently, the result of hashing a zero-length input is `0`.
* It seems okay as a return value when using all "default" secret and seed (it used to be a request for XXH32/XXH64).
* But is it still fine to return `0` when secret or seed are non-default ?
* Are there use cases which could depend on generating a different hash result for zero-length input when the secret is different ?
*/
#ifdef XXH_NAMESPACE
# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
#endif
/* XXH3_64bits() :
* default 64-bit variant, using default secret and default seed of 0.
* It's the fastest variant. */
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
/* XXH3_64bits_withSecret() :
* It's possible to provide any blob of bytes as a "secret" to generate the hash.
* This makes it more difficult for an external actor to prepare an intentional collision.
* The secret *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
* It should consist of random bytes.
* Avoid repeating same character, or sequences of bytes,
* and especially avoid swathes of \0.
* Failure to respect these conditions will result in a poor quality hash.
*/
#define XXH3_SECRET_SIZE_MIN 136
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
/* XXH3_64bits_withSeed() :
* This variant generates on the fly a custom secret,
* based on the default secret, altered using the `seed` value.
* While this operation is decently fast, note that it's not completely free.
* note : seed==0 produces same results as XXH3_64bits() */
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
/* streaming 64-bit */
#if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11+ */
# include <stdalign.h>
# define XXH_ALIGN(n) alignas(n)
#elif defined(__GNUC__)
# define XXH_ALIGN(n) __attribute__ ((aligned(n)))
#elif defined(_MSC_VER)
# define XXH_ALIGN(n) __declspec(align(n))
#else
# define XXH_ALIGN(n) /* disabled */
#endif
typedef struct XXH3_state_s XXH3_state_t;
#define XXH3_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
#define XXH3_INTERNALBUFFER_SIZE 256
struct XXH3_state_s {
XXH_ALIGN(64) XXH64_hash_t acc[8];
XXH_ALIGN(64) char customSecret[XXH3_SECRET_DEFAULT_SIZE]; /* used to store a custom secret generated from the seed. Makes state larger. Design might change */
XXH_ALIGN(64) char buffer[XXH3_INTERNALBUFFER_SIZE];
XXH32_hash_t bufferedSize;
XXH32_hash_t nbStripesPerBlock;
XXH32_hash_t nbStripesSoFar;
XXH32_hash_t secretLimit;
XXH32_hash_t reserved32;
XXH32_hash_t reserved32_2;
XXH64_hash_t totalLen;
XXH64_hash_t seed;
XXH64_hash_t reserved64;
const void* secret; /* note : there is some padding after, due to alignment on 64 bytes */
}; /* typedef'd to XXH3_state_t */
/* Streaming requires state maintenance.
* This operation costs memory and cpu.
* As a consequence, streaming is slower than one-shot hashing.
* For better performance, prefer using one-shot functions whenever possible. */
XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void);
XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state);
/* XXH3_64bits_reset() :
* initialize with default parameters.
* result will be equivalent to `XXH3_64bits()`. */
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr);
/* XXH3_64bits_reset_withSeed() :
* generate a custom secret from `seed`, and store it into state.
* digest will be equivalent to `XXH3_64bits_withSeed()`. */
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
/* XXH3_64bits_reset_withSecret() :
* `secret` is referenced, and must outlive the hash streaming session.
* secretSize must be >= XXH3_SECRET_SIZE_MIN.
*/
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr);
/* 128-bit */
#ifdef XXH_NAMESPACE
# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
#endif
typedef struct {
XXH64_hash_t low64;
XXH64_hash_t high64;
} XXH128_hash_t;
XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed);
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len);
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); /* == XXH128() */
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr);
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr);
/* Note : for better performance, following functions should be inlined,
* using XXH_INLINE_ALL */
/* return : 1 is equal, 0 if different */
XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
/* This comparator is compatible with stdlib's qsort().
* return : >0 if *h128_1 > *h128_2
* <0 if *h128_1 < *h128_2
* =0 if *h128_1 == *h128_2 */
XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
/*====== Canonical representation ======*/
typedef struct { unsigned char digest[16]; } XXH128_canonical_t;
XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
#endif /* XXH_NO_LONG_LONG */
/*-**********************************************************************
* XXH_INLINE_ALL
************************************************************************/
#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */ # include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */
#endif #endif
#endif /* XXH_STATIC_LINKING_ONLY */ #endif /* XXH_STATIC_LINKING_ONLY */

View file

@ -50,14 +50,10 @@
* Prefer these methods in priority order (0 > 1 > 2) * Prefer these methods in priority order (0 > 1 > 2)
*/ */
#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ # if !defined(__clang__) && defined(__GNUC__) && defined(__ARM_FEATURE_UNALIGNED) && defined(__ARM_ARCH) && (__ARM_ARCH == 6)
|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
|| defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define XXH_FORCE_MEMORY_ACCESS 2 # define XXH_FORCE_MEMORY_ACCESS 2
# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ # elif !defined(__clang__) && ((defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ (defined(__GNUC__) && (defined(__ARM_ARCH) && __ARM_ARCH >= 7)))
|| defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
|| defined(__ARM_ARCH_7S__) ))
# define XXH_FORCE_MEMORY_ACCESS 1 # define XXH_FORCE_MEMORY_ACCESS 1
# endif # endif
#endif #endif
@ -71,18 +67,6 @@
# define XXH_ACCEPT_NULL_INPUT_POINTER 0 # define XXH_ACCEPT_NULL_INPUT_POINTER 0
#endif #endif
/*!XXH_FORCE_NATIVE_FORMAT :
* By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
* Results are therefore identical for little-endian and big-endian CPU.
* This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
* Should endian-independence be of no importance for your application, you may set the #define below to 1,
* to improve speed for Big-endian CPU.
* This option has no impact on Little_Endian CPU.
*/
#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
# define XXH_FORCE_NATIVE_FORMAT 0
#endif
/*!XXH_FORCE_ALIGN_CHECK : /*!XXH_FORCE_ALIGN_CHECK :
* This is a minor performance trick, only useful with lots of very small keys. * This is a minor performance trick, only useful with lots of very small keys.
* It means : check for aligned/unaligned input. * It means : check for aligned/unaligned input.
@ -98,6 +82,18 @@
# endif # endif
#endif #endif
/*!XXH_REROLL:
* Whether to reroll XXH32_finalize, and XXH64_finalize,
* instead of using an unrolled jump table/if statement loop.
*
* This is automatically defined on -Os/-Oz on GCC and Clang. */
#ifndef XXH_REROLL
# if defined(__OPTIMIZE_SIZE__)
# define XXH_REROLL 1
# else
# define XXH_REROLL 0
# endif
#endif
/* ************************************* /* *************************************
* Includes & Memory related functions * Includes & Memory related functions
@ -111,7 +107,7 @@ static void XXH_free (void* p) { free(p); }
#include <string.h> #include <string.h>
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
#include <assert.h> /* assert */ #include <limits.h> /* ULLONG_MAX */
#define XXH_STATIC_LINKING_ONLY #define XXH_STATIC_LINKING_ONLY
#include "xxhash.h" #include "xxhash.h"
@ -122,20 +118,46 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
***************************************/ ***************************************/
#ifdef _MSC_VER /* Visual Studio */ #ifdef _MSC_VER /* Visual Studio */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# define FORCE_INLINE static __forceinline # define XXH_FORCE_INLINE static __forceinline
# define XXH_NO_INLINE static __declspec(noinline)
#else #else
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# ifdef __GNUC__ # ifdef __GNUC__
# define FORCE_INLINE static inline __attribute__((always_inline)) # define XXH_FORCE_INLINE static inline __attribute__((always_inline))
# define XXH_NO_INLINE static __attribute__((noinline))
# else # else
# define FORCE_INLINE static inline # define XXH_FORCE_INLINE static inline
# define XXH_NO_INLINE static
# endif # endif
# else # else
# define FORCE_INLINE static # define XXH_FORCE_INLINE static
# define XXH_NO_INLINE static
# endif /* __STDC_VERSION__ */ # endif /* __STDC_VERSION__ */
#endif #endif
/* *************************************
* Debug
***************************************/
/* DEBUGLEVEL is expected to be defined externally,
* typically through compiler command line.
* Value must be a number. */
#ifndef DEBUGLEVEL
# define DEBUGLEVEL 0
#endif
#if (DEBUGLEVEL>=1)
# include <assert.h> /* note : can still be disabled with NDEBUG */
# define XXH_ASSERT(c) assert(c)
#else
# define XXH_ASSERT(c) ((void)0)
#endif
/* note : use after variable declarations */
#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; }
/* ************************************* /* *************************************
* Basic Types * Basic Types
***************************************/ ***************************************/
@ -154,6 +176,9 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
# endif # endif
#endif #endif
/* === Memory access === */
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
@ -181,18 +206,41 @@ static U32 XXH_read32(const void* memPtr)
#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
/* === Endianess === */
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
#ifndef XXH_CPU_LITTLE_ENDIAN
static int XXH_isLittleEndian(void)
{
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
}
# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
#endif
/* **************************************** /* ****************************************
* Compiler-specific Functions and Macros * Compiler-specific Functions and Macros
******************************************/ ******************************************/
#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
#ifndef __has_builtin
# define __has_builtin(x) 0
#endif
#if !defined(NO_CLANG_BUILTIN) && __has_builtin(__builtin_rotateleft32) && __has_builtin(__builtin_rotateleft64)
# define XXH_rotl32 __builtin_rotateleft32
# define XXH_rotl64 __builtin_rotateleft64
/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
#if defined(_MSC_VER) #elif defined(_MSC_VER)
# define XXH_rotl32(x,r) _rotl(x,r) # define XXH_rotl32(x,r) _rotl(x,r)
# define XXH_rotl64(x,r) _rotl64(x,r) # define XXH_rotl64(x,r) _rotl64(x,r)
#else #else
# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
#endif #endif
#if defined(_MSC_VER) /* Visual Studio */ #if defined(_MSC_VER) /* Visual Studio */
@ -210,38 +258,14 @@ static U32 XXH_swap32 (U32 x)
#endif #endif
/* *************************************
* Architecture Macros
***************************************/
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
#ifndef XXH_CPU_LITTLE_ENDIAN
static int XXH_isLittleEndian(void)
{
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
}
# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
#endif
/* *************************** /* ***************************
* Memory reads * Memory reads
*****************************/ *****************************/
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) XXH_FORCE_INLINE U32 XXH_readLE32(const void* ptr)
{ {
if (align==XXH_unaligned) return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
else
return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
}
FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
{
return XXH_readLE32_align(ptr, endian, XXH_unaligned);
} }
static U32 XXH_readBE32(const void* ptr) static U32 XXH_readBE32(const void* ptr)
@ -249,29 +273,82 @@ static U32 XXH_readBE32(const void* ptr)
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
} }
XXH_FORCE_INLINE U32
XXH_readLE32_align(const void* ptr, XXH_alignment align)
{
if (align==XXH_unaligned) {
return XXH_readLE32(ptr);
} else {
return XXH_CPU_LITTLE_ENDIAN ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
}
}
/* ************************************* /* *************************************
* Macros * Misc
***************************************/ ***************************************/
#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */
XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
/* ******************************************************************* /* *******************************************************************
* 32-bit hash functions * 32-bit hash functions
*********************************************************************/ *********************************************************************/
static const U32 PRIME32_1 = 2654435761U; static const U32 PRIME32_1 = 0x9E3779B1U; /* 0b10011110001101110111100110110001 */
static const U32 PRIME32_2 = 2246822519U; static const U32 PRIME32_2 = 0x85EBCA77U; /* 0b10000101111010111100101001110111 */
static const U32 PRIME32_3 = 3266489917U; static const U32 PRIME32_3 = 0xC2B2AE3DU; /* 0b11000010101100101010111000111101 */
static const U32 PRIME32_4 = 668265263U; static const U32 PRIME32_4 = 0x27D4EB2FU; /* 0b00100111110101001110101100101111 */
static const U32 PRIME32_5 = 374761393U; static const U32 PRIME32_5 = 0x165667B1U; /* 0b00010110010101100110011110110001 */
static U32 XXH32_round(U32 seed, U32 input) static U32 XXH32_round(U32 acc, U32 input)
{ {
seed += input * PRIME32_2; acc += input * PRIME32_2;
seed = XXH_rotl32(seed, 13); acc = XXH_rotl32(acc, 13);
seed *= PRIME32_1; acc *= PRIME32_1;
return seed; #if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE)
/* UGLY HACK:
* This inline assembly hack forces acc into a normal register. This is the
* only thing that prevents GCC and Clang from autovectorizing the XXH32 loop
* (pragmas and attributes don't work for some resason) without globally
* disabling SSE4.1.
*
* The reason we want to avoid vectorization is because despite working on
* 4 integers at a time, there are multiple factors slowing XXH32 down on
* SSE4:
* - There's a ridiculous amount of lag from pmulld (10 cycles of latency on newer chips!)
* making it slightly slower to multiply four integers at once compared to four
* integers independently. Even when pmulld was fastest, Sandy/Ivy Bridge, it is
* still not worth it to go into SSE just to multiply unless doing a long operation.
*
* - Four instructions are required to rotate,
* movqda tmp, v // not required with VEX encoding
* pslld tmp, 13 // tmp <<= 13
* psrld v, 19 // x >>= 19
* por v, tmp // x |= tmp
* compared to one for scalar:
* roll v, 13 // reliably fast across the board
* shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
*
* - Instruction level parallelism is actually more beneficial here because the
* SIMD actually serializes this operation: While v1 is rotating, v2 can load data,
* while v3 can multiply. SSE forces them to operate together.
*
* How this hack works:
* __asm__("" // Declare an assembly block but don't declare any instructions
* : // However, as an Input/Output Operand,
* "+r" // constrain a read/write operand (+) as a general purpose register (r).
* (acc) // and set acc as the operand
* );
*
* Because of the 'r', the compiler has promised that seed will be in a
* general purpose register and the '+' says that it will be 'read/write',
* so it has to assume it has changed. It is like volatile without all the
* loads and stores.
*
* Since the argument has to be in a normal register (not an SSE register),
* each time XXH32_round is called, it is impossible to vectorize. */
__asm__("" : "+r" (acc));
#endif
return acc;
} }
/* mix all bits */ /* mix all bits */
@ -285,17 +362,15 @@ static U32 XXH32_avalanche(U32 h32)
return(h32); return(h32);
} }
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) #define XXH_get32bits(p) XXH_readLE32_align(p, align)
static U32 static U32
XXH32_finalize(U32 h32, const void* ptr, size_t len, XXH32_finalize(U32 h32, const void* ptr, size_t len, XXH_alignment align)
XXH_endianess endian, XXH_alignment align)
{ {
const BYTE* p = (const BYTE*)ptr; const BYTE* p = (const BYTE*)ptr;
#define PROCESS1 \ #define PROCESS1 \
h32 += (*p) * PRIME32_5; \ h32 += (*p++) * PRIME32_5; \
p++; \
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
#define PROCESS4 \ #define PROCESS4 \
@ -303,8 +378,20 @@ XXH32_finalize(U32 h32, const void* ptr, size_t len,
p+=4; \ p+=4; \
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
switch(len&15) /* or switch(bEnd - p) */ /* Compact rerolled version */
{ if (XXH_REROLL) {
len &= 15;
while (len >= 4) {
PROCESS4;
len -= 4;
}
while (len > 0) {
PROCESS1;
--len;
}
return XXH32_avalanche(h32);
} else {
switch(len&15) /* or switch(bEnd - p) */ {
case 12: PROCESS4; case 12: PROCESS4;
/* fallthrough */ /* fallthrough */
case 8: PROCESS4; case 8: PROCESS4;
@ -343,14 +430,13 @@ XXH32_finalize(U32 h32, const void* ptr, size_t len,
/* fallthrough */ /* fallthrough */
case 0: return XXH32_avalanche(h32); case 0: return XXH32_avalanche(h32);
} }
assert(0); XXH_ASSERT(0);
return h32; /* reaching this point is deemed impossible */ return h32; /* reaching this point is deemed impossible */
}
} }
XXH_FORCE_INLINE U32
FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_alignment align)
XXH32_endian_align(const void* input, size_t len, U32 seed,
XXH_endianess endian, XXH_alignment align)
{ {
const BYTE* p = (const BYTE*)input; const BYTE* p = (const BYTE*)input;
const BYTE* bEnd = p + len; const BYTE* bEnd = p + len;
@ -385,11 +471,11 @@ XXH32_endian_align(const void* input, size_t len, U32 seed,
h32 += (U32)len; h32 += (U32)len;
return XXH32_finalize(h32, p, len&15, endian, align); return XXH32_finalize(h32, p, len&15, align);
} }
XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, unsigned int seed)
{ {
#if 0 #if 0
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
@ -397,21 +483,15 @@ XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int s
XXH32_reset(&state, seed); XXH32_reset(&state, seed);
XXH32_update(&state, input, len); XXH32_update(&state, input, len);
return XXH32_digest(&state); return XXH32_digest(&state);
#else #else
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if (XXH_FORCE_ALIGN_CHECK) { if (XXH_FORCE_ALIGN_CHECK) {
if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH32_endian_align(input, len, seed, XXH_aligned);
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
else
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
} } } }
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH32_endian_align(input, len, seed, XXH_unaligned);
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
else
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
#endif #endif
} }
@ -448,12 +528,9 @@ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int s
} }
FORCE_INLINE XXH_PUBLIC_API XXH_errorcode
XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) XXH32_update(XXH32_state_t* state, const void* input, size_t len)
{ {
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
if (input==NULL) if (input==NULL)
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
return XXH_OK; return XXH_OK;
@ -461,22 +538,25 @@ XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size
return XXH_ERROR; return XXH_ERROR;
#endif #endif
state->total_len_32 += (unsigned)len; { const BYTE* p = (const BYTE*)input;
state->large_len |= (len>=16) | (state->total_len_32>=16); const BYTE* const bEnd = p + len;
state->total_len_32 += (XXH32_hash_t)len;
state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
if (state->memsize + len < 16) { /* fill in tmp buffer */ if (state->memsize + len < 16) { /* fill in tmp buffer */
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
state->memsize += (unsigned)len; state->memsize += (XXH32_hash_t)len;
return XXH_OK; return XXH_OK;
} }
if (state->memsize) { /* some data left from previous update */ if (state->memsize) { /* some data left from previous update */
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
{ const U32* p32 = state->mem32; { const U32* p32 = state->mem32;
state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++;
state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++;
state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++;
state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); state->v4 = XXH32_round(state->v4, XXH_readLE32(p32));
} }
p += 16-state->memsize; p += 16-state->memsize;
state->memsize = 0; state->memsize = 0;
@ -490,10 +570,10 @@ XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size
U32 v4 = state->v4; U32 v4 = state->v4;
do { do {
v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4;
v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; v2 = XXH32_round(v2, XXH_readLE32(p)); p+=4;
v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; v3 = XXH32_round(v3, XXH_readLE32(p)); p+=4;
v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; v4 = XXH32_round(v4, XXH_readLE32(p)); p+=4;
} while (p<=limit); } while (p<=limit);
state->v1 = v1; state->v1 = v1;
@ -506,24 +586,13 @@ XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size
XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
state->memsize = (unsigned)(bEnd-p); state->memsize = (unsigned)(bEnd-p);
} }
}
return XXH_OK; return XXH_OK;
} }
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
else
return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
}
FORCE_INLINE U32
XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
{ {
U32 h32; U32 h32;
@ -538,18 +607,7 @@ XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
h32 += state->total_len_32; h32 += state->total_len_32;
return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned); return XXH32_finalize(h32, state->mem32, state->memsize, XXH_aligned);
}
XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_digest_endian(state_in, XXH_littleEndian);
else
return XXH32_digest_endian(state_in, XXH_bigEndian);
} }
@ -595,6 +653,31 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src
# endif # endif
#endif #endif
/*! XXH_REROLL_XXH64:
* Whether to reroll the XXH64_finalize() loop.
*
* Just like XXH32, we can unroll the XXH64_finalize() loop. This can be a performance gain
* on 64-bit hosts, as only one jump is required.
*
* However, on 32-bit hosts, because arithmetic needs to be done with two 32-bit registers,
* and 64-bit arithmetic needs to be simulated, it isn't beneficial to unroll. The code becomes
* ridiculously large (the largest function in the binary on i386!), and rerolling it saves
* anywhere from 3kB to 20kB. It is also slightly faster because it fits into cache better
* and is more likely to be inlined by the compiler.
*
* If XXH_REROLL is defined, this is ignored and the loop is always rerolled. */
#ifndef XXH_REROLL_XXH64
# if (defined(__ILP32__) || defined(_ILP32)) /* ILP32 is often defined on 32-bit GCC family */ \
|| !(defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) /* x86-64 */ \
|| defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) /* aarch64 */ \
|| defined(__PPC64__) || defined(__PPC64LE__) || defined(__ppc64__) || defined(__powerpc64__) /* ppc64 */ \
|| defined(__mips64__) || defined(__mips64)) /* mips64 */ \
|| (!defined(SIZE_MAX) || SIZE_MAX < ULLONG_MAX) /* check limits */
# define XXH_REROLL_XXH64 1
# else
# define XXH_REROLL_XXH64 0
# endif
#endif /* !defined(XXH_REROLL_XXH64) */
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
@ -641,17 +724,9 @@ static U64 XXH_swap64 (U64 x)
} }
#endif #endif
FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) XXH_FORCE_INLINE U64 XXH_readLE64(const void* ptr)
{ {
if (align==XXH_unaligned) return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
else
return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
}
FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
{
return XXH_readLE64_align(ptr, endian, XXH_unaligned);
} }
static U64 XXH_readBE64(const void* ptr) static U64 XXH_readBE64(const void* ptr)
@ -659,14 +734,23 @@ static U64 XXH_readBE64(const void* ptr)
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
} }
XXH_FORCE_INLINE U64
XXH_readLE64_align(const void* ptr, XXH_alignment align)
{
if (align==XXH_unaligned)
return XXH_readLE64(ptr);
else
return XXH_CPU_LITTLE_ENDIAN ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
}
/*====== xxh64 ======*/ /*====== xxh64 ======*/
static const U64 PRIME64_1 = 11400714785074694791ULL; static const U64 PRIME64_1 = 0x9E3779B185EBCA87ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */
static const U64 PRIME64_2 = 14029467366897019727ULL; static const U64 PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */
static const U64 PRIME64_3 = 1609587929392839161ULL; static const U64 PRIME64_3 = 0x165667B19E3779F9ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */
static const U64 PRIME64_4 = 9650029242287828579ULL; static const U64 PRIME64_4 = 0x85EBCA77C2B2AE63ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */
static const U64 PRIME64_5 = 2870177450012600261ULL; static const U64 PRIME64_5 = 0x27D4EB2F165667C5ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */
static U64 XXH64_round(U64 acc, U64 input) static U64 XXH64_round(U64 acc, U64 input)
{ {
@ -695,17 +779,15 @@ static U64 XXH64_avalanche(U64 h64)
} }
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) #define XXH_get64bits(p) XXH_readLE64_align(p, align)
static U64 static U64
XXH64_finalize(U64 h64, const void* ptr, size_t len, XXH64_finalize(U64 h64, const void* ptr, size_t len, XXH_alignment align)
XXH_endianess endian, XXH_alignment align)
{ {
const BYTE* p = (const BYTE*)ptr; const BYTE* p = (const BYTE*)ptr;
#define PROCESS1_64 \ #define PROCESS1_64 \
h64 ^= (*p) * PRIME64_5; \ h64 ^= (*p++) * PRIME64_5; \
p++; \
h64 = XXH_rotl64(h64, 11) * PRIME64_1; h64 = XXH_rotl64(h64, 11) * PRIME64_1;
#define PROCESS4_64 \ #define PROCESS4_64 \
@ -720,7 +802,24 @@ XXH64_finalize(U64 h64, const void* ptr, size_t len,
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \ h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \
} }
switch(len&31) { /* Rerolled version for 32-bit targets is faster and much smaller. */
if (XXH_REROLL || XXH_REROLL_XXH64) {
len &= 31;
while (len >= 8) {
PROCESS8_64;
len -= 8;
}
if (len >= 4) {
PROCESS4_64;
len -= 4;
}
while (len > 0) {
PROCESS1_64;
--len;
}
return XXH64_avalanche(h64);
} else {
switch(len & 31) {
case 24: PROCESS8_64; case 24: PROCESS8_64;
/* fallthrough */ /* fallthrough */
case 16: PROCESS8_64; case 16: PROCESS8_64;
@ -801,15 +900,14 @@ XXH64_finalize(U64 h64, const void* ptr, size_t len,
/* fallthrough */ /* fallthrough */
case 0: return XXH64_avalanche(h64); case 0: return XXH64_avalanche(h64);
} }
}
/* impossible to reach */ /* impossible to reach */
assert(0); XXH_ASSERT(0);
return 0; /* unreachable, but some compilers complain without it */ return 0; /* unreachable, but some compilers complain without it */
} }
FORCE_INLINE U64 XXH_FORCE_INLINE U64
XXH64_endian_align(const void* input, size_t len, U64 seed, XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_alignment align)
XXH_endianess endian, XXH_alignment align)
{ {
const BYTE* p = (const BYTE*)input; const BYTE* p = (const BYTE*)input;
const BYTE* bEnd = p + len; const BYTE* bEnd = p + len;
@ -848,11 +946,11 @@ XXH64_endian_align(const void* input, size_t len, U64 seed,
h64 += (U64) len; h64 += (U64) len;
return XXH64_finalize(h64, p, len, endian, align); return XXH64_finalize(h64, p, len, align);
} }
XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, unsigned long long seed)
{ {
#if 0 #if 0
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
@ -860,21 +958,16 @@ XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned
XXH64_reset(&state, seed); XXH64_reset(&state, seed);
XXH64_update(&state, input, len); XXH64_update(&state, input, len);
return XXH64_digest(&state); return XXH64_digest(&state);
#else #else
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if (XXH_FORCE_ALIGN_CHECK) { if (XXH_FORCE_ALIGN_CHECK) {
if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_endian_align(input, len, seed, XXH_aligned);
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
else
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
} } } }
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_endian_align(input, len, seed, XXH_unaligned);
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
else
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
#endif #endif
} }
@ -903,17 +996,14 @@ XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long
state.v2 = seed + PRIME64_2; state.v2 = seed + PRIME64_2;
state.v3 = seed + 0; state.v3 = seed + 0;
state.v4 = seed - PRIME64_1; state.v4 = seed - PRIME64_1;
/* do not write into reserved, planned to be removed in a future version */ /* do not write into reserved64, might be removed in a future version */
memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64));
return XXH_OK; return XXH_OK;
} }
FORCE_INLINE XXH_PUBLIC_API XXH_errorcode
XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) XXH64_update (XXH64_state_t* state, const void* input, size_t len)
{ {
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
if (input==NULL) if (input==NULL)
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
return XXH_OK; return XXH_OK;
@ -921,6 +1011,9 @@ XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size
return XXH_ERROR; return XXH_ERROR;
#endif #endif
{ const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
state->total_len += len; state->total_len += len;
if (state->memsize + len < 32) { /* fill in tmp buffer */ if (state->memsize + len < 32) { /* fill in tmp buffer */
@ -931,10 +1024,10 @@ XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size
if (state->memsize) { /* tmp buffer is full */ if (state->memsize) { /* tmp buffer is full */
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0));
state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1));
state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2));
state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3));
p += 32-state->memsize; p += 32-state->memsize;
state->memsize = 0; state->memsize = 0;
} }
@ -947,10 +1040,10 @@ XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size
U64 v4 = state->v4; U64 v4 = state->v4;
do { do {
v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8;
v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; v2 = XXH64_round(v2, XXH_readLE64(p)); p+=8;
v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; v3 = XXH64_round(v3, XXH_readLE64(p)); p+=8;
v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; v4 = XXH64_round(v4, XXH_readLE64(p)); p+=8;
} while (p<=limit); } while (p<=limit);
state->v1 = v1; state->v1 = v1;
@ -963,21 +1056,13 @@ XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size
XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
state->memsize = (unsigned)(bEnd-p); state->memsize = (unsigned)(bEnd-p);
} }
}
return XXH_OK; return XXH_OK;
} }
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* state)
return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
else
return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
}
FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
{ {
U64 h64; U64 h64;
@ -998,17 +1083,7 @@ FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess
h64 += (U64) state->total_len; h64 += (U64) state->total_len;
return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned); return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, XXH_aligned);
}
XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_digest_endian(state_in, XXH_littleEndian);
else
return XXH64_digest_endian(state_in, XXH_bigEndian);
} }
@ -1026,4 +1101,14 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src
return XXH_readBE64(src); return XXH_readBE64(src);
} }
/* *********************************************************************
* XXH3
* New generation hash designed for speed on small keys and vectorization
************************************************************************ */
#include "xxh3.h"
#endif /* XXH_NO_LONG_LONG */ #endif /* XXH_NO_LONG_LONG */