raze-gles/polymer/eduke32/build/include/libdivide.h

1106 lines
44 KiB
C
Raw Normal View History

/* libdivide.h
Copyright 2010 ridiculous_fish
Modified for EDuke32. zlib license.
*/
#ifndef libdivide_h_
#define libdivide_h_
#if defined(_WIN32) || defined(WIN32)
#define LIBDIVIDE_WINDOWS 1
#endif
#if defined(_MSC_VER)
#define LIBDIVIDE_VC 1
#endif
#if defined(__x86_64__) || defined(_WIN64) || defined(_M_64)
#define LIBDIVIDE_IS_X86_64 1
#endif
#if defined(__i386__)
#define LIBDIVIDE_IS_i386 1
#endif
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#if LIBDIVIDE_IS_X86_64 || defined __SSE2__ || (defined _M_IX86_FP && _M_IX86_FP == 2)
#define LIBDIVIDE_USE_SSE2 1
#endif
#if LIBDIVIDE_USE_SSE2
#include <emmintrin.h>
#endif
#if LIBDIVIDE_VC
#include <intrin.h>
#endif
#ifndef __has_builtin
#define __has_builtin(x) 0 // Compatibility with non-clang compilers.
#endif
#if defined(__SIZEOF_INT128__)
#define HAS_INT128_T 1
#endif
#if __GNUC__ || __clang__
#define LIBDIVIDE_GCC_STYLE_ASM 1
#endif
/* Explanation of "more" field: bit 6 is whether to use shift path. If we are using the shift path, bit 7 is whether the divisor is negative in the signed case; in the unsigned case it is 0. Bits 0-4 is shift value (for shift path or mult path). In 32 bit case, bit 5 is always 0. We use bit 7 as the "negative divisor indicator" so that we can use sign extension to efficiently go to a full-width -1.
u32: [0-4] shift value
[5] ignored
[6] add indicator
[7] shift path
s32: [0-4] shift value
[5] shift path
[6] add indicator
[7] indicates negative divisor
u64: [0-5] shift value
[6] add indicator
[7] shift path
s64: [0-5] shift value
[6] add indicator
[7] indicates negative divisor
magic number of 0 indicates shift path (we ran out of bits!)
*/
enum {
LIBDIVIDE_32_SHIFT_MASK = 0x1F,
LIBDIVIDE_64_SHIFT_MASK = 0x3F,
LIBDIVIDE_ADD_MARKER = 0x40,
LIBDIVIDE_U32_SHIFT_PATH = 0x80,
LIBDIVIDE_U64_SHIFT_PATH = 0x80,
LIBDIVIDE_S32_SHIFT_PATH = 0x20,
LIBDIVIDE_NEGATIVE_DIVISOR = 0x80
};
// these are padded to optimize access via LUT
typedef struct {
uint32_t magic;
uint8_t more;
} libdivide_u32_t;
typedef struct {
int32_t magic;
uint8_t more;
} libdivide_s32_t;
typedef struct {
int32_t magic;
uint8_t more;
uint8_t filler[3];
} libdivide_s32pad_t;
typedef struct {
uint64_t magic;
uint8_t more;
} libdivide_u64_t;
typedef struct {
int64_t magic;
uint8_t more;
} libdivide_s64_t;
typedef struct {
int64_t magic;
uint8_t more;
uint8_t filler[7]; // should think of something useful to do with this...
} libdivide_s64pad_t;
libdivide_s32_t libdivide_s32_gen(int32_t y);
libdivide_u32_t libdivide_u32_gen(uint32_t y);
libdivide_s64_t libdivide_s64_gen(int64_t y);
libdivide_u64_t libdivide_u64_gen(uint64_t y);
int32_t libdivide_s32_do(int32_t numer, const libdivide_s32_t *denom);
uint32_t libdivide_u32_do(uint32_t numer, const libdivide_u32_t *denom);
int64_t libdivide_s64_do(int64_t numer, const libdivide_s64_t *denom);
uint64_t libdivide_u64_do(uint64_t y, const libdivide_u64_t *denom);
int libdivide_u32_get_algorithm(const libdivide_u32_t *denom);
uint32_t libdivide_u32_do_alg0(uint32_t numer, const libdivide_u32_t *denom);
uint32_t libdivide_u32_do_alg1(uint32_t numer, const libdivide_u32_t *denom);
uint32_t libdivide_u32_do_alg2(uint32_t numer, const libdivide_u32_t *denom);
int libdivide_u64_get_algorithm(const libdivide_u64_t *denom);
uint64_t libdivide_u64_do_alg0(uint64_t numer, const libdivide_u64_t *denom);
uint64_t libdivide_u64_do_alg1(uint64_t numer, const libdivide_u64_t *denom);
uint64_t libdivide_u64_do_alg2(uint64_t numer, const libdivide_u64_t *denom);
int libdivide_s32_get_algorithm(const libdivide_s32_t *denom);
int32_t libdivide_s32_do_alg0(int32_t numer, const libdivide_s32_t *denom);
int32_t libdivide_s32_do_alg1(int32_t numer, const libdivide_s32_t *denom);
int32_t libdivide_s32_do_alg2(int32_t numer, const libdivide_s32_t *denom);
int32_t libdivide_s32_do_alg3(int32_t numer, const libdivide_s32_t *denom);
int32_t libdivide_s32_do_alg4(int32_t numer, const libdivide_s32_t *denom);
int libdivide_s64_get_algorithm(const libdivide_s64_t *denom);
int64_t libdivide_s64_do_alg0(int64_t numer, const libdivide_s64_t *denom);
int64_t libdivide_s64_do_alg1(int64_t numer, const libdivide_s64_t *denom);
int64_t libdivide_s64_do_alg2(int64_t numer, const libdivide_s64_t *denom);
int64_t libdivide_s64_do_alg3(int64_t numer, const libdivide_s64_t *denom);
int64_t libdivide_s64_do_alg4(int64_t numer, const libdivide_s64_t *denom);
#if LIBDIVIDE_USE_SSE2
__m128i libdivide_u32_do_vector(__m128i numers, const libdivide_u32_t * denom);
__m128i libdivide_s32_do_vector(__m128i numers, const libdivide_s32_t * denom);
__m128i libdivide_u64_do_vector(__m128i numers, const libdivide_u64_t * denom);
__m128i libdivide_s64_do_vector(__m128i numers, const libdivide_s64_t * denom);
__m128i libdivide_u32_do_vector_alg0(__m128i numers, const libdivide_u32_t * denom);
__m128i libdivide_u32_do_vector_alg1(__m128i numers, const libdivide_u32_t * denom);
__m128i libdivide_u32_do_vector_alg2(__m128i numers, const libdivide_u32_t * denom);
__m128i libdivide_s32_do_vector_alg0(__m128i numers, const libdivide_s32_t * denom);
__m128i libdivide_s32_do_vector_alg1(__m128i numers, const libdivide_s32_t * denom);
__m128i libdivide_s32_do_vector_alg2(__m128i numers, const libdivide_s32_t * denom);
__m128i libdivide_s32_do_vector_alg3(__m128i numers, const libdivide_s32_t * denom);
__m128i libdivide_s32_do_vector_alg4(__m128i numers, const libdivide_s32_t * denom);
__m128i libdivide_u64_do_vector_alg0(__m128i numers, const libdivide_u64_t * denom);
__m128i libdivide_u64_do_vector_alg1(__m128i numers, const libdivide_u64_t * denom);
__m128i libdivide_u64_do_vector_alg2(__m128i numers, const libdivide_u64_t * denom);
__m128i libdivide_s64_do_vector_alg0(__m128i numers, const libdivide_s64_t * denom);
__m128i libdivide_s64_do_vector_alg1(__m128i numers, const libdivide_s64_t * denom);
__m128i libdivide_s64_do_vector_alg2(__m128i numers, const libdivide_s64_t * denom);
__m128i libdivide_s64_do_vector_alg3(__m128i numers, const libdivide_s64_t * denom);
__m128i libdivide_s64_do_vector_alg4(__m128i numers, const libdivide_s64_t * denom);
#endif
#endif
#ifdef LIBDIVIDE_BODY
//////// Internal Utility Functions
static inline uint32_t libdivide__mullhi_u32(uint32_t x, uint32_t y) {
uint64_t xl = x, yl = y;
uint64_t rl = xl * yl;
return (uint32_t)(rl >> 32);
}
static uint64_t libdivide__mullhi_u64(uint64_t x, uint64_t y) {
#if HAS_INT128_T
__uint128_t xl = x, yl = y;
__uint128_t rl = xl * yl;
return (uint64_t)(rl >> 64);
#else
//full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64)
const uint32_t mask = 0xFFFFFFFF;
const uint32_t x0 = (uint32_t)(x & mask), x1 = (uint32_t)(x >> 32);
const uint32_t y0 = (uint32_t)(y & mask), y1 = (uint32_t)(y >> 32);
const uint32_t x0y0_hi = libdivide__mullhi_u32(x0, y0);
const uint64_t x0y1 = x0 * (uint64_t)y1;
const uint64_t x1y0 = x1 * (uint64_t)y0;
const uint64_t x1y1 = x1 * (uint64_t)y1;
uint64_t temp = x1y0 + x0y0_hi;
uint64_t temp_lo = temp & mask, temp_hi = temp >> 32;
return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32);
#endif
}
static inline int64_t libdivide__mullhi_s64(int64_t x, int64_t y) {
#if HAS_INT128_T
__int128_t xl = x, yl = y;
__int128_t rl = xl * yl;
return (int64_t)(rl >> 64);
#else
//full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64)
const uint32_t mask = 0xFFFFFFFF;
const uint32_t x0 = (uint32_t)(x & mask), y0 = (uint32_t)(y & mask);
const int32_t x1 = (int32_t)(x >> 32), y1 = (int32_t)(y >> 32);
const uint32_t x0y0_hi = libdivide__mullhi_u32(x0, y0);
const int64_t t = x1*(int64_t)y0 + x0y0_hi;
const int64_t w1 = x0*(int64_t)y1 + (t & mask);
return x1*(int64_t)y1 + (t >> 32) + (w1 >> 32);
#endif
}
#if LIBDIVIDE_USE_SSE2
static inline __m128i libdivide__u64_to_m128(uint64_t x) {
#if LIBDIVIDE_VC && ! _WIN64
//64 bit windows doesn't seem to have an implementation of any of these load intrinsics, and 32 bit Visual C++ crashes
_declspec(align(16)) uint64_t temp[2] = {x, x};
return _mm_load_si128((const __m128i*)temp);
#elif defined(__ICC)
uint64_t __attribute__((aligned(16))) temp[2] = {x,x};
return _mm_load_si128((const __m128i*)temp);
#elif __clang__ && (2 > __clang_major__ || (2 == __clang_major__ && 7 > __clang_minor__))
// clang does not provide this intrinsic either
return (__m128i){x, x};
#else
// everyone else gets it right
return _mm_set1_epi64x(x);
#endif
}
static inline __m128i libdivide_get_FFFFFFFF00000000(void) {
//returns the same as _mm_set1_epi64(0xFFFFFFFF00000000ULL) without touching memory
__m128i result = _mm_set1_epi8(-1); //optimizes to pcmpeqd on OS X
return _mm_slli_epi64(result, 32);
}
static inline __m128i libdivide_get_00000000FFFFFFFF(void) {
//returns the same as _mm_set1_epi64(0x00000000FFFFFFFFULL) without touching memory
__m128i result = _mm_set1_epi8(-1); //optimizes to pcmpeqd on OS X
result = _mm_srli_epi64(result, 32);
return result;
}
#if 0
static inline __m128i libdivide_get_0000FFFF(void) {
//returns the same as _mm_set1_epi32(0x0000FFFFULL) without touching memory
__m128i result; //we don't care what its contents are
result = _mm_cmpeq_epi8(result, result); //all 1s
result = _mm_srli_epi32(result, 16);
return result;
}
#endif
static inline __m128i libdivide_s64_signbits(__m128i v) {
//we want to compute v >> 63, that is, _mm_srai_epi64(v, 63). But there is no 64 bit shift right arithmetic instruction in SSE2. So we have to fake it by first duplicating the high 32 bit values, and then using a 32 bit shift. Another option would be to use _mm_srli_epi64(v, 63) and then subtract that from 0, but that approach appears to be substantially slower for unknown reasons
__m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1));
__m128i signBits = _mm_srai_epi32(hiBitsDuped, 31);
return signBits;
}
/* Returns an __m128i whose low 32 bits are equal to amt and has zero elsewhere. */
static inline __m128i libdivide_u32_to_m128i(uint32_t amt) {
return _mm_set_epi32(0, 0, 0, amt);
}
static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) {
//implementation of _mm_sra_epi64. Here we have two 64 bit values which are shifted right to logically become (64 - amt) values, and are then sign extended from a (64 - amt) bit number.
const int b = 64 - amt;
__m128i m = libdivide__u64_to_m128(1ULL << (b - 1));
__m128i x = _mm_srl_epi64(v, libdivide_u32_to_m128i(amt));
__m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m); //result = x^m - m
return result;
}
/* Here, b is assumed to contain one 32 bit value repeated four times. If it did not, the function would not work. */
static inline __m128i libdivide__mullhi_u32_flat_vector(__m128i a, __m128i b) {
__m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32);
__m128i a1X3X = _mm_srli_epi64(a, 32);
__m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), libdivide_get_FFFFFFFF00000000());
return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); // = hi_product_0123
}
/* Here, y is assumed to contain one 64 bit value repeated twice. */
static inline __m128i libdivide_mullhi_u64_flat_vector(__m128i x, __m128i y) {
//full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64)
const __m128i mask = libdivide_get_00000000FFFFFFFF();
const __m128i x0 = _mm_and_si128(x, mask), x1 = _mm_srli_epi64(x, 32); //x0 is low half of 2 64 bit values, x1 is high half in low slots
const __m128i y0 = _mm_and_si128(y, mask), y1 = _mm_srli_epi64(y, 32);
const __m128i x0y0_hi = _mm_srli_epi64(_mm_mul_epu32(x0, y0), 32); //x0 happens to have the low half of the two 64 bit values in 32 bit slots 0 and 2, so _mm_mul_epu32 computes their full product, and then we shift right by 32 to get just the high values
const __m128i x0y1 = _mm_mul_epu32(x0, y1);
const __m128i x1y0 = _mm_mul_epu32(x1, y0);
const __m128i x1y1 = _mm_mul_epu32(x1, y1);
const __m128i temp = _mm_add_epi64(x1y0, x0y0_hi);
__m128i temp_lo = _mm_and_si128(temp, mask), temp_hi = _mm_srli_epi64(temp, 32);
temp_lo = _mm_srli_epi64(_mm_add_epi64(temp_lo, x0y1), 32);
temp_hi = _mm_add_epi64(x1y1, temp_hi);
return _mm_add_epi64(temp_lo, temp_hi);
}
/* y is one 64 bit value repeated twice */
static inline __m128i libdivide_mullhi_s64_flat_vector(__m128i x, __m128i y) {
__m128i p = libdivide_mullhi_u64_flat_vector(x, y);
__m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y);
p = _mm_sub_epi64(p, t1);
__m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x);
p = _mm_sub_epi64(p, t2);
return p;
}
/* SSE2 does not have a signed multiplication instruction, but we can convert unsigned to signed pretty efficiently. Again, b is just a 32 bit value repeated four times. */
static inline __m128i libdivide_mullhi_s32_flat_vector(__m128i a, __m128i b) {
__m128i p = libdivide__mullhi_u32_flat_vector(a, b);
__m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b); //t1 = (a >> 31) & y, arithmetic shift
__m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a);
p = _mm_sub_epi32(p, t1);
p = _mm_sub_epi32(p, t2);
return p;
}
#endif
static inline int32_t libdivide__count_trailing_zeros32(uint32_t val) {
#if __GNUC__ || __has_builtin(__builtin_ctz)
/* Fast way to count trailing zeros */
return __builtin_ctz(val);
#elif LIBDIVIDE_VC
unsigned long result;
if (_BitScanForward(&result, val)) {
return result;
}
return 0;
#else
/* Dorky way to count trailing zeros. Note that this hangs for val = 0! */
int32_t result = 0;
val = (val ^ (val - 1)) >> 1; // Set v's trailing 0s to 1s and zero rest
while (val) {
val >>= 1;
result++;
}
return result;
#endif
}
static inline int32_t libdivide__count_trailing_zeros64(uint64_t val) {
#if __LP64__ && (__GNUC__ || __has_builtin(__builtin_ctzll))
/* Fast way to count trailing zeros. Note that we disable this in 32 bit because gcc does something horrible - it calls through to a dynamically bound function. */
return __builtin_ctzll(val);
#elif LIBDIVIDE_VC && _WIN64
unsigned long result;
if (_BitScanForward64(&result, val)) {
return result;
}
return 0;
#else
/* Pretty good way to count trailing zeros. Note that this hangs for val = 0! */
uint32_t lo = val & 0xFFFFFFFF;
if (lo != 0) return libdivide__count_trailing_zeros32(lo);
return 32 + libdivide__count_trailing_zeros32(val >> 32);
#endif
}
static inline int32_t libdivide__count_leading_zeros32(uint32_t val) {
#if __GNUC__ || __has_builtin(__builtin_clzll)
/* Fast way to count leading zeros */
return __builtin_clz(val);
#elif LIBDIVIDE_VC
unsigned long result;
if (_BitScanReverse(&result, val)) {
return 31 - result;
}
return 0;
#else
/* Dorky way to count leading zeros. Note that this hangs for val = 0! */
int32_t result = 0;
while (! (val & (1U << 31))) {
val <<= 1;
result++;
}
return result;
#endif
}
static inline int32_t libdivide__count_leading_zeros64(uint64_t val) {
#if __GNUC__ || __has_builtin(__builtin_clzll)
/* Fast way to count leading zeros */
return __builtin_clzll(val);
#elif LIBDIVIDE_VC && _WIN64
unsigned long result;
if (_BitScanReverse64(&result, val)) {
return 63 - result;
}
return 0;
#else
/* Dorky way to count leading zeros. Note that this hangs for val = 0! */
int32_t result = 0;
while (! (val & (1ULL << 63))) {
val <<= 1;
result++;
}
return result;
#endif
}
//libdivide_64_div_32_to_32: divides a 64 bit uint {u1, u0} by a 32 bit uint {v}. The result must fit in 32 bits. Returns the quotient directly and the remainder in *r
#if (LIBDIVIDE_IS_i386 || LIBDIVIDE_IS_X86_64) && LIBDIVIDE_GCC_STYLE_ASM
static uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) {
uint32_t result;
__asm__("divl %[v]"
: "=a"(result), "=d"(*r)
: [v] "r"(v), "a"(u0), "d"(u1)
);
return result;
}
#else
static uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) {
uint64_t n = (((uint64_t)u1) << 32) | u0;
uint32_t result = (uint32_t)(n / v);
*r = (uint32_t)(n - result * (uint64_t)v);
return result;
}
#endif
#if LIBDIVIDE_IS_X86_64 && LIBDIVIDE_GCC_STYLE_ASM
static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) {
//u0 -> rax
//u1 -> rdx
//divq
uint64_t result;
__asm__("divq %[v]"
: "=a"(result), "=d"(*r)
: [v] "r"(v), "a"(u0), "d"(u1)
);
return result;
}
#else
/* Code taken from Hacker's Delight, http://www.hackersdelight.org/HDcode/divlu.c . License permits inclusion here per http://www.hackersdelight.org/permissions.htm
*/
static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) {
const uint64_t b = (1ULL << 32); // Number base (16 bits).
uint64_t un1, un0, // Norm. dividend LSD's.
vn1, vn0, // Norm. divisor digits.
q1, q0, // Quotient digits.
un64, un21, un10,// Dividend digit pairs.
rhat; // A remainder.
int s; // Shift amount for norm.
if (EDUKE32_PREDICT_FALSE(u1 >= v)) { // If overflow, set rem.
if (r != NULL) // to an impossible value,
*r = (uint64_t)(-1); // and return the largest
return (uint64_t)(-1);} // possible quotient.
/* count leading zeros */
s = libdivide__count_leading_zeros64(v); // 0 <= s <= 63.
if (EDUKE32_PREDICT_TRUE(s > 0)) {
v = v << s; // Normalize divisor.
un64 = (u1 << s) | ((u0 >> (64 - s)) & (-s >> 31));
un10 = u0 << s; // Shift dividend left.
} else {
// Avoid undefined behavior.
un64 = u1 | u0;
un10 = u0;
}
vn1 = v >> 32; // Break divisor up into
vn0 = v & 0xFFFFFFFF; // two 32-bit digits.
un1 = un10 >> 32; // Break right half of
un0 = un10 & 0xFFFFFFFF; // dividend into two digits.
q1 = un64/vn1; // Compute the first
rhat = un64 - q1*vn1; // quotient digit, q1.
again1:
if (q1 >= b || q1*vn0 > b*rhat + un1) {
q1 = q1 - 1;
rhat = rhat + vn1;
if (rhat < b) goto again1;}
un21 = un64*b + un1 - q1*v; // Multiply and subtract.
q0 = un21/vn1; // Compute the second
rhat = un21 - q0*vn1; // quotient digit, q0.
again2:
if (q0 >= b || q0*vn0 > b*rhat + un0) {
q0 = q0 - 1;
rhat = rhat + vn1;
if (rhat < b) goto again2;}
if (r != NULL) // If remainder is wanted,
*r = (un21*b + un0 - q0*v) >> s; // return it.
return q1*b + q0;
}
#endif
#if LIBDIVIDE_ASSERTIONS_ON
#define LIBDIVIDE_ASSERT(x) do { if (! (x)) { fprintf(stderr, "Assertion failure on line %ld: %s\n", (long)__LINE__, #x); exit(-1); } } while (0)
#else
#define LIBDIVIDE_ASSERT(x)
#endif
////////// UINT32
libdivide_u32_t libdivide_u32_gen(uint32_t d) {
libdivide_u32_t result;
if (EDUKE32_PREDICT_FALSE((d & (d - 1)) == 0)) {
result.magic = 0;
result.more = libdivide__count_trailing_zeros32(d) | LIBDIVIDE_U32_SHIFT_PATH;
}
else {
const uint32_t floor_log_2_d = 31 - libdivide__count_leading_zeros32(d);
uint8_t more;
uint32_t rem, proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem);
const uint32_t e = d - rem;
LIBDIVIDE_ASSERT(rem > 0 && rem < d);
/* This power works if e < 2**floor_log_2_d. */
if (e < (1U << floor_log_2_d)) {
/* This power works */
more = floor_log_2_d;
}
else {
/* We have to use the general 33-bit algorithm. We need to compute (2**power) / d. However, we already have (2**(power-1))/d and its remainder. By doubling both, and then correcting the remainder, we can compute the larger division. */
const uint32_t twice_rem = rem + rem;
proposed_m += proposed_m; //don't care about overflow here - in fact, we expect it
if (twice_rem >= d || twice_rem < rem) proposed_m += 1;
more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
}
result.magic = 1 + proposed_m;
result.more = more;
//result.more's shift should in general be ceil_log_2_d. But if we used the smaller power, we subtract one from the shift because we're using the smaller power. If we're using the larger power, we subtract one from the shift because it's taken care of by the add indicator. So floor_log_2_d happens to be correct in both cases.
}
return result;
}
uint32_t libdivide_u32_do(uint32_t numer, const libdivide_u32_t *denom) {
uint8_t more = denom->more;
if (EDUKE32_PREDICT_FALSE(more & LIBDIVIDE_U32_SHIFT_PATH)) {
return numer >> (more & LIBDIVIDE_32_SHIFT_MASK);
}
else {
uint32_t q = libdivide__mullhi_u32(denom->magic, numer);
if (more & LIBDIVIDE_ADD_MARKER) {
uint32_t t = ((numer - q) >> 1) + q;
return t >> (more & LIBDIVIDE_32_SHIFT_MASK);
}
else {
return q >> more; //all upper bits are 0 - don't need to mask them off
}
}
}
int libdivide_u32_get_algorithm(const libdivide_u32_t *denom) {
uint8_t more = denom->more;
if (more & LIBDIVIDE_U32_SHIFT_PATH) return 0;
else if (! (more & LIBDIVIDE_ADD_MARKER)) return 1;
else return 2;
}
uint32_t libdivide_u32_do_alg0(uint32_t numer, const libdivide_u32_t *denom) {
return numer >> (denom->more & LIBDIVIDE_32_SHIFT_MASK);
}
uint32_t libdivide_u32_do_alg1(uint32_t numer, const libdivide_u32_t *denom) {
uint32_t q = libdivide__mullhi_u32(denom->magic, numer);
return q >> denom->more;
}
uint32_t libdivide_u32_do_alg2(uint32_t numer, const libdivide_u32_t *denom) {
// denom->add != 0
uint32_t q = libdivide__mullhi_u32(denom->magic, numer);
uint32_t t = ((numer - q) >> 1) + q;
return t >> (denom->more & LIBDIVIDE_32_SHIFT_MASK);
}
#if LIBDIVIDE_USE_SSE2
__m128i libdivide_u32_do_vector(__m128i numers, const libdivide_u32_t *denom) {
uint8_t more = denom->more;
if (more & LIBDIVIDE_U32_SHIFT_PATH) {
return _mm_srl_epi32(numers, libdivide_u32_to_m128i(more & LIBDIVIDE_32_SHIFT_MASK));
}
else {
__m128i q = libdivide__mullhi_u32_flat_vector(numers, _mm_set1_epi32(denom->magic));
if (more & LIBDIVIDE_ADD_MARKER) {
//uint32_t t = ((numer - q) >> 1) + q;
//return t >> denom->shift;
__m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q);
return _mm_srl_epi32(t, libdivide_u32_to_m128i(more & LIBDIVIDE_32_SHIFT_MASK));
}
else {
//q >> denom->shift
return _mm_srl_epi32(q, libdivide_u32_to_m128i(more));
}
}
}
__m128i libdivide_u32_do_vector_alg0(__m128i numers, const libdivide_u32_t *denom) {
return _mm_srl_epi32(numers, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK));
}
__m128i libdivide_u32_do_vector_alg1(__m128i numers, const libdivide_u32_t *denom) {
__m128i q = libdivide__mullhi_u32_flat_vector(numers, _mm_set1_epi32(denom->magic));
return _mm_srl_epi32(q, libdivide_u32_to_m128i(denom->more));
}
__m128i libdivide_u32_do_vector_alg2(__m128i numers, const libdivide_u32_t *denom) {
__m128i q = libdivide__mullhi_u32_flat_vector(numers, _mm_set1_epi32(denom->magic));
__m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q);
return _mm_srl_epi32(t, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK));
}
#endif
/////////// UINT64
libdivide_u64_t libdivide_u64_gen(uint64_t d) {
libdivide_u64_t result;
if (EDUKE32_PREDICT_FALSE((d & (d - 1)) == 0)) {
result.more = libdivide__count_trailing_zeros64(d) | LIBDIVIDE_U64_SHIFT_PATH;
result.magic = 0;
}
else {
const uint32_t floor_log_2_d = 63 - libdivide__count_leading_zeros64(d);
uint64_t rem, proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem); //== (1 << (64 + floor_log_2_d)) / d
uint8_t more;
const uint64_t e = d - rem;
LIBDIVIDE_ASSERT(rem > 0 && rem < d);
/* This power works if e < 2**floor_log_2_d. */
if (e < (1ULL << floor_log_2_d)) {
/* This power works */
more = floor_log_2_d;
}
else {
/* We have to use the general 65-bit algorithm. We need to compute (2**power) / d. However, we already have (2**(power-1))/d and its remainder. By doubling both, and then correcting the remainder, we can compute the larger division. */
const uint64_t twice_rem = rem + rem;
proposed_m += proposed_m; //don't care about overflow here - in fact, we expect it
if (twice_rem >= d || twice_rem < rem) proposed_m += 1;
more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
}
result.magic = 1 + proposed_m;
result.more = more;
//result.more's shift should in general be ceil_log_2_d. But if we used the smaller power, we subtract one from the shift because we're using the smaller power. If we're using the larger power, we subtract one from the shift because it's taken care of by the add indicator. So floor_log_2_d happens to be correct in both cases, which is why we do it outside of the if statement.
}
return result;
}
uint64_t libdivide_u64_do(uint64_t numer, const libdivide_u64_t *denom) {
uint8_t more = denom->more;
if (EDUKE32_PREDICT_FALSE(more & LIBDIVIDE_U64_SHIFT_PATH)) {
return numer >> (more & LIBDIVIDE_64_SHIFT_MASK);
}
else {
uint64_t q = libdivide__mullhi_u64(denom->magic, numer);
if (more & LIBDIVIDE_ADD_MARKER) {
uint64_t t = ((numer - q) >> 1) + q;
return t >> (more & LIBDIVIDE_64_SHIFT_MASK);
}
else {
return q >> more; //all upper bits are 0 - don't need to mask them off
}
}
}
int libdivide_u64_get_algorithm(const libdivide_u64_t *denom) {
uint8_t more = denom->more;
if (EDUKE32_PREDICT_FALSE(more & LIBDIVIDE_U64_SHIFT_PATH)) return 0;
else if (! (more & LIBDIVIDE_ADD_MARKER)) return 1;
else return 2;
}
uint64_t libdivide_u64_do_alg0(uint64_t numer, const libdivide_u64_t *denom) {
return numer >> (denom->more & LIBDIVIDE_64_SHIFT_MASK);
}
uint64_t libdivide_u64_do_alg1(uint64_t numer, const libdivide_u64_t *denom) {
uint64_t q = libdivide__mullhi_u64(denom->magic, numer);
return q >> denom->more;
}
uint64_t libdivide_u64_do_alg2(uint64_t numer, const libdivide_u64_t *denom) {
uint64_t q = libdivide__mullhi_u64(denom->magic, numer);
uint64_t t = ((numer - q) >> 1) + q;
return t >> (denom->more & LIBDIVIDE_64_SHIFT_MASK);
}
#if LIBDIVIDE_USE_SSE2
__m128i libdivide_u64_do_vector(__m128i numers, const libdivide_u64_t * denom) {
uint8_t more = denom->more;
if (more & LIBDIVIDE_U64_SHIFT_PATH) {
return _mm_srl_epi64(numers, libdivide_u32_to_m128i(more & LIBDIVIDE_64_SHIFT_MASK));
}
else {
__m128i q = libdivide_mullhi_u64_flat_vector(numers, libdivide__u64_to_m128(denom->magic));
if (more & LIBDIVIDE_ADD_MARKER) {
//uint32_t t = ((numer - q) >> 1) + q;
//return t >> denom->shift;
__m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q);
return _mm_srl_epi64(t, libdivide_u32_to_m128i(more & LIBDIVIDE_64_SHIFT_MASK));
}
else {
//q >> denom->shift
return _mm_srl_epi64(q, libdivide_u32_to_m128i(more));
}
}
}
__m128i libdivide_u64_do_vector_alg0(__m128i numers, const libdivide_u64_t *denom) {
return _mm_srl_epi64(numers, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_64_SHIFT_MASK));
}
__m128i libdivide_u64_do_vector_alg1(__m128i numers, const libdivide_u64_t *denom) {
__m128i q = libdivide_mullhi_u64_flat_vector(numers, libdivide__u64_to_m128(denom->magic));
return _mm_srl_epi64(q, libdivide_u32_to_m128i(denom->more));
}
__m128i libdivide_u64_do_vector_alg2(__m128i numers, const libdivide_u64_t *denom) {
__m128i q = libdivide_mullhi_u64_flat_vector(numers, libdivide__u64_to_m128(denom->magic));
__m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q);
return _mm_srl_epi64(t, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_64_SHIFT_MASK));
}
#endif
/////////// SINT32
static inline int32_t libdivide__mullhi_s32(int32_t x, int32_t y) {
int64_t xl = x, yl = y;
int64_t rl = xl * yl;
return (int32_t)(rl >> 32); //needs to be arithmetic shift
}
libdivide_s32_t libdivide_s32_gen(int32_t d) {
libdivide_s32_t result;
/* If d is a power of 2, or negative a power of 2, we have to use a shift. This is especially important because the magic algorithm fails for -1. To check if d is a power of 2 or its inverse, it suffices to check whether its absolute value has exactly one bit set. This works even for INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set and is a power of 2. */
uint32_t absD = (uint32_t)(d < 0 ? -d : d); //gcc optimizes this to the fast abs trick
if (EDUKE32_PREDICT_FALSE((absD & (absD - 1)) == 0)) { //check if exactly one bit is set, don't care if absD is 0 since that's divide by zero
result.magic = 0;
result.more = libdivide__count_trailing_zeros32(absD) | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0) | LIBDIVIDE_S32_SHIFT_PATH;
}
else {
const uint32_t floor_log_2_d = 31 - libdivide__count_leading_zeros32(absD);
uint8_t more;
//the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word is 0 and the high word is floor_log_2_d - 1
uint32_t rem, proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem);
const uint32_t e = absD - rem;
LIBDIVIDE_ASSERT(floor_log_2_d >= 1);
/* We are going to start with a power of floor_log_2_d - 1. This works if works if e < 2**floor_log_2_d. */
if (e < (1U << floor_log_2_d)) {
/* This power works */
more = floor_log_2_d - 1;
}
else {
/* We need to go one higher. This should not make proposed_m overflow, but it will make it negative when interpreted as an int32_t. */
const uint32_t twice_rem = rem + rem;
proposed_m += proposed_m;
if (twice_rem >= absD || twice_rem < rem) proposed_m += 1;
more = floor_log_2_d | LIBDIVIDE_ADD_MARKER | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); //use the general algorithm
}
proposed_m += 1;
result.magic = (d < 0 ? -(int32_t)proposed_m : (int32_t)proposed_m);
result.more = more;
}
return result;
}
int32_t libdivide_s32_do(int32_t numer, const libdivide_s32_t *denom) {
uint8_t more = denom->more;
if (more & LIBDIVIDE_S32_SHIFT_PATH) {
uint8_t shifter = more & LIBDIVIDE_32_SHIFT_MASK;
int32_t q = (numer + ((numer >> 31) & ((1 << shifter) - 1))) >> shifter;
int32_t shiftMask = (int8_t)more >> 7; //must be arithmetic shift and then sign-extend
q = (q ^ shiftMask) - shiftMask;
return q;
}
else {
int32_t q = libdivide__mullhi_s32(denom->magic, numer);
if (more & LIBDIVIDE_ADD_MARKER) {
int32_t sign = (int8_t)more >> 7; //must be arithmetic shift and then sign extend
q += ((numer ^ sign) - sign);
}
q >>= more & LIBDIVIDE_32_SHIFT_MASK;
q += (q < 0);
return q;
}
}
int libdivide_s32_get_algorithm(const libdivide_s32_t *denom) {
uint8_t more = denom->more;
int positiveDivisor = ! (more & LIBDIVIDE_NEGATIVE_DIVISOR);
if (more & LIBDIVIDE_S32_SHIFT_PATH) return (positiveDivisor ? 0 : 1);
else if (more & LIBDIVIDE_ADD_MARKER) return (positiveDivisor ? 2 : 3);
else return 4;
}
int32_t libdivide_s32_do_alg0(int32_t numer, const libdivide_s32_t *denom) {
uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK;
int32_t q = numer + ((numer >> 31) & ((1 << shifter) - 1));
return q >> shifter;
}
int32_t libdivide_s32_do_alg1(int32_t numer, const libdivide_s32_t *denom) {
uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK;
int32_t q = numer + ((numer >> 31) & ((1 << shifter) - 1));
return - (q >> shifter);
}
int32_t libdivide_s32_do_alg2(int32_t numer, const libdivide_s32_t *denom) {
int32_t q = libdivide__mullhi_s32(denom->magic, numer);
q += numer;
q >>= denom->more & LIBDIVIDE_32_SHIFT_MASK;
q += (q < 0);
return q;
}
int32_t libdivide_s32_do_alg3(int32_t numer, const libdivide_s32_t *denom) {
int32_t q = libdivide__mullhi_s32(denom->magic, numer);
q -= numer;
q >>= denom->more & LIBDIVIDE_32_SHIFT_MASK;
q += (q < 0);
return q;
}
int32_t libdivide_s32_do_alg4(int32_t numer, const libdivide_s32_t *denom) {
int32_t q = libdivide__mullhi_s32(denom->magic, numer);
q >>= denom->more & LIBDIVIDE_32_SHIFT_MASK;
q += (q < 0);
return q;
}
#if LIBDIVIDE_USE_SSE2
__m128i libdivide_s32_do_vector(__m128i numers, const libdivide_s32_t * denom) {
uint8_t more = denom->more;
if (more & LIBDIVIDE_S32_SHIFT_PATH) {
uint32_t shifter = more & LIBDIVIDE_32_SHIFT_MASK;
__m128i roundToZeroTweak = _mm_set1_epi32((1 << shifter) - 1); //could use _mm_srli_epi32 with an all -1 register
__m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); //q = numer + ((numer >> 31) & roundToZeroTweak);
q = _mm_sra_epi32(q, libdivide_u32_to_m128i(shifter)); // q = q >> shifter
__m128i shiftMask = _mm_set1_epi32((int32_t)((int8_t)more >> 7)); //set all bits of shift mask = to the sign bit of more
q = _mm_sub_epi32(_mm_xor_si128(q, shiftMask), shiftMask); //q = (q ^ shiftMask) - shiftMask;
return q;
}
else {
__m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic));
if (more & LIBDIVIDE_ADD_MARKER) {
__m128i sign = _mm_set1_epi32((int32_t)(int8_t)more >> 7); //must be arithmetic shift
q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign)); // q += ((numer ^ sign) - sign);
}
q = _mm_sra_epi32(q, libdivide_u32_to_m128i(more & LIBDIVIDE_32_SHIFT_MASK)); //q >>= shift
q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0)
return q;
}
}
__m128i libdivide_s32_do_vector_alg0(__m128i numers, const libdivide_s32_t *denom) {
uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK;
__m128i roundToZeroTweak = _mm_set1_epi32((1 << shifter) - 1);
__m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak));
return _mm_sra_epi32(q, libdivide_u32_to_m128i(shifter));
}
__m128i libdivide_s32_do_vector_alg1(__m128i numers, const libdivide_s32_t *denom) {
uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK;
__m128i roundToZeroTweak = _mm_set1_epi32((1 << shifter) - 1);
__m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak));
return _mm_sub_epi32(_mm_setzero_si128(), _mm_sra_epi32(q, libdivide_u32_to_m128i(shifter)));
}
__m128i libdivide_s32_do_vector_alg2(__m128i numers, const libdivide_s32_t *denom) {
__m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic));
q = _mm_add_epi32(q, numers);
q = _mm_sra_epi32(q, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK));
q = _mm_add_epi32(q, _mm_srli_epi32(q, 31));
return q;
}
__m128i libdivide_s32_do_vector_alg3(__m128i numers, const libdivide_s32_t *denom) {
__m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic));
q = _mm_sub_epi32(q, numers);
q = _mm_sra_epi32(q, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK));
q = _mm_add_epi32(q, _mm_srli_epi32(q, 31));
return q;
}
__m128i libdivide_s32_do_vector_alg4(__m128i numers, const libdivide_s32_t *denom) {
__m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic));
q = _mm_sra_epi32(q, libdivide_u32_to_m128i(denom->more)); //q >>= shift
q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0)
return q;
}
#endif
///////////// SINT64
libdivide_s64_t libdivide_s64_gen(int64_t d) {
libdivide_s64_t result;
/* If d is a power of 2, or negative a power of 2, we have to use a shift. This is especially important because the magic algorithm fails for -1. To check if d is a power of 2 or its inverse, it suffices to check whether its absolute value has exactly one bit set. This works even for INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set and is a power of 2. */
const uint64_t absD = (uint64_t)(d < 0 ? -d : d); //gcc optimizes this to the fast abs trick
if (EDUKE32_PREDICT_FALSE((absD & (absD - 1)) == 0)) { //check if exactly one bit is set, don't care if absD is 0 since that's divide by zero
result.more = libdivide__count_trailing_zeros64(absD) | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0);
result.magic = 0;
}
else {
const uint32_t floor_log_2_d = 63 - libdivide__count_leading_zeros64(absD);
//the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word is 0 and the high word is floor_log_2_d - 1
uint8_t more;
uint64_t rem, proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem);
const uint64_t e = absD - rem;
/* We are going to start with a power of floor_log_2_d - 1. This works if works if e < 2**floor_log_2_d. */
if (e < (1ULL << floor_log_2_d)) {
/* This power works */
more = floor_log_2_d - 1;
}
else {
/* We need to go one higher. This should not make proposed_m overflow, but it will make it negative when interpreted as an int32_t. */
const uint64_t twice_rem = rem + rem;
proposed_m += proposed_m;
if (twice_rem >= absD || twice_rem < rem) proposed_m += 1;
more = floor_log_2_d | LIBDIVIDE_ADD_MARKER | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0);
}
proposed_m += 1;
result.more = more;
result.magic = (d < 0 ? -(int64_t)proposed_m : (int64_t)proposed_m);
}
return result;
}
int64_t libdivide_s64_do(int64_t numer, const libdivide_s64_t *denom) {
uint8_t more = denom->more;
int64_t magic = denom->magic;
if (magic == 0) { //shift path
uint32_t shifter = more & LIBDIVIDE_64_SHIFT_MASK;
int64_t q = (numer + ((numer >> 63) & ((1LL << shifter) - 1))) >> shifter;
int64_t shiftMask = (int8_t)more >> 7; //must be arithmetic shift and then sign-extend
q = (q ^ shiftMask) - shiftMask;
return q;
}
else {
int64_t q = libdivide__mullhi_s64(magic, numer);
if (more & LIBDIVIDE_ADD_MARKER) {
int64_t sign = (int8_t)more >> 7; //must be arithmetic shift and then sign extend
q += ((numer ^ sign) - sign);
}
q >>= more & LIBDIVIDE_64_SHIFT_MASK;
q += (q < 0);
return q;
}
}
int libdivide_s64_get_algorithm(const libdivide_s64_t *denom) {
uint8_t more = denom->more;
int positiveDivisor = ! (more & LIBDIVIDE_NEGATIVE_DIVISOR);
if (denom->magic == 0) return (positiveDivisor ? 0 : 1); //shift path
else if (more & LIBDIVIDE_ADD_MARKER) return (positiveDivisor ? 2 : 3);
else return 4;
}
int64_t libdivide_s64_do_alg0(int64_t numer, const libdivide_s64_t *denom) {
uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK;
int64_t q = numer + ((numer >> 63) & ((1LL << shifter) - 1));
return q >> shifter;
}
int64_t libdivide_s64_do_alg1(int64_t numer, const libdivide_s64_t *denom) {
//denom->shifter != -1 && demo->shiftMask != 0
uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK;
int64_t q = numer + ((numer >> 63) & ((1LL << shifter) - 1));
return - (q >> shifter);
}
int64_t libdivide_s64_do_alg2(int64_t numer, const libdivide_s64_t *denom) {
int64_t q = libdivide__mullhi_s64(denom->magic, numer);
q += numer;
q >>= denom->more & LIBDIVIDE_64_SHIFT_MASK;
q += (q < 0);
return q;
}
int64_t libdivide_s64_do_alg3(int64_t numer, const libdivide_s64_t *denom) {
int64_t q = libdivide__mullhi_s64(denom->magic, numer);
q -= numer;
q >>= denom->more & LIBDIVIDE_64_SHIFT_MASK;
q += (q < 0);
return q;
}
int64_t libdivide_s64_do_alg4(int64_t numer, const libdivide_s64_t *denom) {
int64_t q = libdivide__mullhi_s64(denom->magic, numer);
q >>= denom->more;
q += (q < 0);
return q;
}
#if LIBDIVIDE_USE_SSE2
__m128i libdivide_s64_do_vector(__m128i numers, const libdivide_s64_t * denom) {
uint8_t more = denom->more;
int64_t magic = denom->magic;
if (magic == 0) { //shift path
uint32_t shifter = more & LIBDIVIDE_64_SHIFT_MASK;
__m128i roundToZeroTweak = libdivide__u64_to_m128((1LL << shifter) - 1);
__m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); //q = numer + ((numer >> 63) & roundToZeroTweak);
q = libdivide_s64_shift_right_vector(q, shifter); // q = q >> shifter
__m128i shiftMask = _mm_set1_epi32((int32_t)((int8_t)more >> 7));
q = _mm_sub_epi64(_mm_xor_si128(q, shiftMask), shiftMask); //q = (q ^ shiftMask) - shiftMask;
return q;
}
else {
__m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(magic));
if (more & LIBDIVIDE_ADD_MARKER) {
__m128i sign = _mm_set1_epi32((int32_t)((int8_t)more >> 7)); //must be arithmetic shift
q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign)); // q += ((numer ^ sign) - sign);
}
q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); //q >>= denom->mult_path.shift
q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0)
return q;
}
}
__m128i libdivide_s64_do_vector_alg0(__m128i numers, const libdivide_s64_t *denom) {
uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK;
__m128i roundToZeroTweak = libdivide__u64_to_m128((1LL << shifter) - 1);
__m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak));
q = libdivide_s64_shift_right_vector(q, shifter);
return q;
}
__m128i libdivide_s64_do_vector_alg1(__m128i numers, const libdivide_s64_t *denom) {
uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK;
__m128i roundToZeroTweak = libdivide__u64_to_m128((1LL << shifter) - 1);
__m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak));
q = libdivide_s64_shift_right_vector(q, shifter);
return _mm_sub_epi64(_mm_setzero_si128(), q);
}
__m128i libdivide_s64_do_vector_alg2(__m128i numers, const libdivide_s64_t *denom) {
__m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(denom->magic));
q = _mm_add_epi64(q, numers);
q = libdivide_s64_shift_right_vector(q, denom->more & LIBDIVIDE_64_SHIFT_MASK);
q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0)
return q;
}
__m128i libdivide_s64_do_vector_alg3(__m128i numers, const libdivide_s64_t *denom) {
__m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(denom->magic));
q = _mm_sub_epi64(q, numers);
q = libdivide_s64_shift_right_vector(q, denom->more & LIBDIVIDE_64_SHIFT_MASK);
q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0)
return q;
}
__m128i libdivide_s64_do_vector_alg4(__m128i numers, const libdivide_s64_t *denom) {
__m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(denom->magic));
q = libdivide_s64_shift_right_vector(q, denom->more);
q = _mm_add_epi64(q, _mm_srli_epi64(q, 63));
return q;
}
#endif
#endif //LIBDIVIDE_HEADER_ONLY