mirror of
https://github.com/ZDoom/raze-gles.git
synced 2024-12-25 11:10:47 +00:00
etcpak: Remove SSE4.1 and AVX2 support.
They are of no use to us on ARM. git-svn-id: https://svn.eduke32.com/eduke32@5683 1a8010ca-5511-0410-912e-c29ae57300e0
This commit is contained in:
parent
7a940a255f
commit
a98e36e095
3 changed files with 1 additions and 327 deletions
|
@ -8,15 +8,6 @@
|
|||
#include "Tables.hpp"
|
||||
#include "Types.hpp"
|
||||
#include "Vector.hpp"
|
||||
#ifdef __SSE4_1__
|
||||
# ifdef _MSC_VER
|
||||
# include <intrin.h>
|
||||
# include <Windows.h>
|
||||
# define _bswap(x) _byteswap_ulong(x)
|
||||
# else
|
||||
# include <x86intrin.h>
|
||||
# endif
|
||||
#endif
|
||||
|
||||
namespace
|
||||
{
|
||||
|
@ -25,48 +16,6 @@ typedef std::array<uint16, 4> v4i;
|
|||
|
||||
void Average( const uint8* data, v4i* a )
|
||||
{
|
||||
#ifdef __SSE4_1__
|
||||
__m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
|
||||
__m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
|
||||
__m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
|
||||
__m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);
|
||||
|
||||
__m128i d0l = _mm_unpacklo_epi8(d0, _mm_setzero_si128());
|
||||
__m128i d0h = _mm_unpackhi_epi8(d0, _mm_setzero_si128());
|
||||
__m128i d1l = _mm_unpacklo_epi8(d1, _mm_setzero_si128());
|
||||
__m128i d1h = _mm_unpackhi_epi8(d1, _mm_setzero_si128());
|
||||
__m128i d2l = _mm_unpacklo_epi8(d2, _mm_setzero_si128());
|
||||
__m128i d2h = _mm_unpackhi_epi8(d2, _mm_setzero_si128());
|
||||
__m128i d3l = _mm_unpacklo_epi8(d3, _mm_setzero_si128());
|
||||
__m128i d3h = _mm_unpackhi_epi8(d3, _mm_setzero_si128());
|
||||
|
||||
__m128i sum0 = _mm_add_epi16(d0l, d1l);
|
||||
__m128i sum1 = _mm_add_epi16(d0h, d1h);
|
||||
__m128i sum2 = _mm_add_epi16(d2l, d3l);
|
||||
__m128i sum3 = _mm_add_epi16(d2h, d3h);
|
||||
|
||||
__m128i sum0l = _mm_unpacklo_epi16(sum0, _mm_setzero_si128());
|
||||
__m128i sum0h = _mm_unpackhi_epi16(sum0, _mm_setzero_si128());
|
||||
__m128i sum1l = _mm_unpacklo_epi16(sum1, _mm_setzero_si128());
|
||||
__m128i sum1h = _mm_unpackhi_epi16(sum1, _mm_setzero_si128());
|
||||
__m128i sum2l = _mm_unpacklo_epi16(sum2, _mm_setzero_si128());
|
||||
__m128i sum2h = _mm_unpackhi_epi16(sum2, _mm_setzero_si128());
|
||||
__m128i sum3l = _mm_unpacklo_epi16(sum3, _mm_setzero_si128());
|
||||
__m128i sum3h = _mm_unpackhi_epi16(sum3, _mm_setzero_si128());
|
||||
|
||||
__m128i b0 = _mm_add_epi32(sum0l, sum0h);
|
||||
__m128i b1 = _mm_add_epi32(sum1l, sum1h);
|
||||
__m128i b2 = _mm_add_epi32(sum2l, sum2h);
|
||||
__m128i b3 = _mm_add_epi32(sum3l, sum3h);
|
||||
|
||||
__m128i a0 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b2, b3), _mm_set1_epi32(4)), 3);
|
||||
__m128i a1 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b0, b1), _mm_set1_epi32(4)), 3);
|
||||
__m128i a2 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b1, b3), _mm_set1_epi32(4)), 3);
|
||||
__m128i a3 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b0, b2), _mm_set1_epi32(4)), 3);
|
||||
|
||||
_mm_storeu_si128((__m128i*)&a[0], _mm_packus_epi32(_mm_shuffle_epi32(a0, _MM_SHUFFLE(3, 0, 1, 2)), _mm_shuffle_epi32(a1, _MM_SHUFFLE(3, 0, 1, 2))));
|
||||
_mm_storeu_si128((__m128i*)&a[2], _mm_packus_epi32(_mm_shuffle_epi32(a2, _MM_SHUFFLE(3, 0, 1, 2)), _mm_shuffle_epi32(a3, _MM_SHUFFLE(3, 0, 1, 2))));
|
||||
#else
|
||||
uint32 r[4];
|
||||
uint32 g[4];
|
||||
uint32 b[4];
|
||||
|
@ -91,60 +40,10 @@ void Average( const uint8* data, v4i* a )
|
|||
a[1] = v4i{ uint16( (r[0] + r[1] + 4) / 8 ), uint16( (g[0] + g[1] + 4) / 8 ), uint16( (b[0] + b[1] + 4) / 8 ), 0};
|
||||
a[2] = v4i{ uint16( (r[1] + r[3] + 4) / 8 ), uint16( (g[1] + g[3] + 4) / 8 ), uint16( (b[1] + b[3] + 4) / 8 ), 0};
|
||||
a[3] = v4i{ uint16( (r[0] + r[2] + 4) / 8 ), uint16( (g[0] + g[2] + 4) / 8 ), uint16( (b[0] + b[2] + 4) / 8 ), 0};
|
||||
#endif
|
||||
}
|
||||
|
||||
void CalcErrorBlock( const uint8* data, uint err[4][4] )
|
||||
{
|
||||
#ifdef __SSE4_1__
|
||||
__m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
|
||||
__m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
|
||||
__m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
|
||||
__m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);
|
||||
|
||||
__m128i dm0 = _mm_and_si128(d0, _mm_set1_epi32(0x00FFFFFF));
|
||||
__m128i dm1 = _mm_and_si128(d1, _mm_set1_epi32(0x00FFFFFF));
|
||||
__m128i dm2 = _mm_and_si128(d2, _mm_set1_epi32(0x00FFFFFF));
|
||||
__m128i dm3 = _mm_and_si128(d3, _mm_set1_epi32(0x00FFFFFF));
|
||||
|
||||
__m128i d0l = _mm_unpacklo_epi8(dm0, _mm_setzero_si128());
|
||||
__m128i d0h = _mm_unpackhi_epi8(dm0, _mm_setzero_si128());
|
||||
__m128i d1l = _mm_unpacklo_epi8(dm1, _mm_setzero_si128());
|
||||
__m128i d1h = _mm_unpackhi_epi8(dm1, _mm_setzero_si128());
|
||||
__m128i d2l = _mm_unpacklo_epi8(dm2, _mm_setzero_si128());
|
||||
__m128i d2h = _mm_unpackhi_epi8(dm2, _mm_setzero_si128());
|
||||
__m128i d3l = _mm_unpacklo_epi8(dm3, _mm_setzero_si128());
|
||||
__m128i d3h = _mm_unpackhi_epi8(dm3, _mm_setzero_si128());
|
||||
|
||||
__m128i sum0 = _mm_add_epi16(d0l, d1l);
|
||||
__m128i sum1 = _mm_add_epi16(d0h, d1h);
|
||||
__m128i sum2 = _mm_add_epi16(d2l, d3l);
|
||||
__m128i sum3 = _mm_add_epi16(d2h, d3h);
|
||||
|
||||
__m128i sum0l = _mm_unpacklo_epi16(sum0, _mm_setzero_si128());
|
||||
__m128i sum0h = _mm_unpackhi_epi16(sum0, _mm_setzero_si128());
|
||||
__m128i sum1l = _mm_unpacklo_epi16(sum1, _mm_setzero_si128());
|
||||
__m128i sum1h = _mm_unpackhi_epi16(sum1, _mm_setzero_si128());
|
||||
__m128i sum2l = _mm_unpacklo_epi16(sum2, _mm_setzero_si128());
|
||||
__m128i sum2h = _mm_unpackhi_epi16(sum2, _mm_setzero_si128());
|
||||
__m128i sum3l = _mm_unpacklo_epi16(sum3, _mm_setzero_si128());
|
||||
__m128i sum3h = _mm_unpackhi_epi16(sum3, _mm_setzero_si128());
|
||||
|
||||
__m128i b0 = _mm_add_epi32(sum0l, sum0h);
|
||||
__m128i b1 = _mm_add_epi32(sum1l, sum1h);
|
||||
__m128i b2 = _mm_add_epi32(sum2l, sum2h);
|
||||
__m128i b3 = _mm_add_epi32(sum3l, sum3h);
|
||||
|
||||
__m128i a0 = _mm_add_epi32(b2, b3);
|
||||
__m128i a1 = _mm_add_epi32(b0, b1);
|
||||
__m128i a2 = _mm_add_epi32(b1, b3);
|
||||
__m128i a3 = _mm_add_epi32(b0, b2);
|
||||
|
||||
_mm_storeu_si128((__m128i*)&err[0], a0);
|
||||
_mm_storeu_si128((__m128i*)&err[1], a1);
|
||||
_mm_storeu_si128((__m128i*)&err[2], a2);
|
||||
_mm_storeu_si128((__m128i*)&err[3], a3);
|
||||
#else
|
||||
uint terr[4][4];
|
||||
|
||||
memset(terr, 0, 16 * sizeof(uint));
|
||||
|
@ -175,7 +74,6 @@ void CalcErrorBlock( const uint8* data, uint err[4][4] )
|
|||
{
|
||||
err[i][3] = 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
uint CalcError( const uint block[4], const v4i& average )
|
||||
|
@ -190,41 +88,6 @@ uint CalcError( const uint block[4], const v4i& average )
|
|||
|
||||
void ProcessAverages( v4i* a )
|
||||
{
|
||||
#ifdef __SSE4_1__
|
||||
for( int i=0; i<2; i++ )
|
||||
{
|
||||
__m128i d = _mm_loadu_si128((__m128i*)a[i*2].data());
|
||||
|
||||
__m128i t = _mm_add_epi16(_mm_mullo_epi16(d, _mm_set1_epi16(31)), _mm_set1_epi16(128));
|
||||
|
||||
__m128i c = _mm_srli_epi16(_mm_add_epi16(t, _mm_srli_epi16(t, 8)), 8);
|
||||
|
||||
__m128i c1 = _mm_shuffle_epi32(c, _MM_SHUFFLE(3, 2, 3, 2));
|
||||
__m128i diff = _mm_sub_epi16(c, c1);
|
||||
diff = _mm_max_epi16(diff, _mm_set1_epi16(-4));
|
||||
diff = _mm_min_epi16(diff, _mm_set1_epi16(3));
|
||||
|
||||
__m128i co = _mm_add_epi16(c1, diff);
|
||||
|
||||
c = _mm_blend_epi16(co, c, 0xF0);
|
||||
|
||||
__m128i a0 = _mm_or_si128(_mm_slli_epi16(c, 3), _mm_srli_epi16(c, 2));
|
||||
|
||||
_mm_storeu_si128((__m128i*)a[4+i*2].data(), a0);
|
||||
}
|
||||
|
||||
for( int i=0; i<2; i++ )
|
||||
{
|
||||
__m128i d = _mm_loadu_si128((__m128i*)a[i*2].data());
|
||||
|
||||
__m128i t0 = _mm_add_epi16(_mm_mullo_epi16(d, _mm_set1_epi16(15)), _mm_set1_epi16(128));
|
||||
__m128i t1 = _mm_srli_epi16(_mm_add_epi16(t0, _mm_srli_epi16(t0, 8)), 8);
|
||||
|
||||
__m128i t2 = _mm_or_si128(t1, _mm_slli_epi16(t1, 4));
|
||||
|
||||
_mm_storeu_si128((__m128i*)a[i*2].data(), t2);
|
||||
}
|
||||
#else
|
||||
for( int i=0; i<2; i++ )
|
||||
{
|
||||
for( int j=0; j<3; j++ )
|
||||
|
@ -249,7 +112,6 @@ void ProcessAverages( v4i* a )
|
|||
a[i][1] = g_avg2[mul8bit( a[i][1], 15 )];
|
||||
a[i][2] = g_avg2[mul8bit( a[i][2], 15 )];
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void EncodeAverages( uint64& _d, const v4i* a, size_t idx )
|
||||
|
@ -281,28 +143,6 @@ void EncodeAverages( uint64& _d, const v4i* a, size_t idx )
|
|||
|
||||
uint64 CheckSolid( const uint8* src )
|
||||
{
|
||||
#ifdef __SSE4_1__
|
||||
__m128i d0 = _mm_loadu_si128(((__m128i*)src) + 0);
|
||||
__m128i d1 = _mm_loadu_si128(((__m128i*)src) + 1);
|
||||
__m128i d2 = _mm_loadu_si128(((__m128i*)src) + 2);
|
||||
__m128i d3 = _mm_loadu_si128(((__m128i*)src) + 3);
|
||||
|
||||
__m128i c = _mm_shuffle_epi32(d0, _MM_SHUFFLE(0, 0, 0, 0));
|
||||
|
||||
__m128i c0 = _mm_cmpeq_epi8(d0, c);
|
||||
__m128i c1 = _mm_cmpeq_epi8(d1, c);
|
||||
__m128i c2 = _mm_cmpeq_epi8(d2, c);
|
||||
__m128i c3 = _mm_cmpeq_epi8(d3, c);
|
||||
|
||||
__m128i m0 = _mm_and_si128(c0, c1);
|
||||
__m128i m1 = _mm_and_si128(c2, c3);
|
||||
__m128i m = _mm_and_si128(m0, m1);
|
||||
|
||||
if (!_mm_testc_si128(m, _mm_set1_epi32(-1)))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
const uint8* ptr = src + 4;
|
||||
for( int i=1; i<16; i++ )
|
||||
{
|
||||
|
@ -312,7 +152,7 @@ uint64 CheckSolid( const uint8* src )
|
|||
}
|
||||
ptr += 4;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0x02000000 |
|
||||
( uint( src[0] & 0xF8 ) << 16 ) |
|
||||
( uint( src[1] & 0xF8 ) << 8 ) |
|
||||
|
@ -351,62 +191,6 @@ void FindBestFit( uint64 terr[2][8], uint16 tsel[16][8], v4i a[8], const uint32*
|
|||
int dg = a[bid][1] - g;
|
||||
int db = a[bid][2] - b;
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
// Reference implementation
|
||||
|
||||
__m128i pix = _mm_set1_epi32(dr * 77 + dg * 151 + db * 28);
|
||||
// Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
|
||||
__m128i error0 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[0]));
|
||||
__m128i error1 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[1]));
|
||||
__m128i error2 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[0]));
|
||||
__m128i error3 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[1]));
|
||||
|
||||
__m128i index0 = _mm_and_si128(_mm_cmplt_epi32(error1, error0), _mm_set1_epi32(1));
|
||||
__m128i minError0 = _mm_min_epi32(error0, error1);
|
||||
|
||||
__m128i index1 = _mm_sub_epi32(_mm_set1_epi32(2), _mm_cmplt_epi32(error3, error2));
|
||||
__m128i minError1 = _mm_min_epi32(error2, error3);
|
||||
|
||||
__m128i minIndex0 = _mm_blendv_epi8(index0, index1, _mm_cmplt_epi32(minError1, minError0));
|
||||
__m128i minError = _mm_min_epi32(minError0, minError1);
|
||||
|
||||
// Squaring the minimum error to produce correct values when adding
|
||||
__m128i minErrorLow = _mm_shuffle_epi32(minError, _MM_SHUFFLE(1, 1, 0, 0));
|
||||
__m128i squareErrorLow = _mm_mul_epi32(minErrorLow, minErrorLow);
|
||||
squareErrorLow = _mm_add_epi64(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 0));
|
||||
_mm_storeu_si128(((__m128i*)ter) + 0, squareErrorLow);
|
||||
__m128i minErrorHigh = _mm_shuffle_epi32(minError, _MM_SHUFFLE(3, 3, 2, 2));
|
||||
__m128i squareErrorHigh = _mm_mul_epi32(minErrorHigh, minErrorHigh);
|
||||
squareErrorHigh = _mm_add_epi64(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 1));
|
||||
_mm_storeu_si128(((__m128i*)ter) + 1, squareErrorHigh);
|
||||
|
||||
// Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
|
||||
error0 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[2]));
|
||||
error1 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[3]));
|
||||
error2 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[2]));
|
||||
error3 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[3]));
|
||||
|
||||
index0 = _mm_and_si128(_mm_cmplt_epi32(error1, error0), _mm_set1_epi32(1));
|
||||
minError0 = _mm_min_epi32(error0, error1);
|
||||
|
||||
index1 = _mm_sub_epi32(_mm_set1_epi32(2), _mm_cmplt_epi32(error3, error2));
|
||||
minError1 = _mm_min_epi32(error2, error3);
|
||||
|
||||
__m128i minIndex1 = _mm_blendv_epi8(index0, index1, _mm_cmplt_epi32(minError1, minError0));
|
||||
minError = _mm_min_epi32(minError0, minError1);
|
||||
|
||||
// Squaring the minimum error to produce correct values when adding
|
||||
minErrorLow = _mm_shuffle_epi32(minError, _MM_SHUFFLE(1, 1, 0, 0));
|
||||
squareErrorLow = _mm_mul_epi32(minErrorLow, minErrorLow);
|
||||
squareErrorLow = _mm_add_epi64(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 2));
|
||||
_mm_storeu_si128(((__m128i*)ter) + 2, squareErrorLow);
|
||||
minErrorHigh = _mm_shuffle_epi32(minError, _MM_SHUFFLE(3, 3, 2, 2));
|
||||
squareErrorHigh = _mm_mul_epi32(minErrorHigh, minErrorHigh);
|
||||
squareErrorHigh = _mm_add_epi64(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 3));
|
||||
_mm_storeu_si128(((__m128i*)ter) + 3, squareErrorHigh);
|
||||
__m128i minIndex = _mm_packs_epi32(minIndex0, minIndex1);
|
||||
_mm_storeu_si128((__m128i*)sel, minIndex);
|
||||
#else
|
||||
int pix = dr * 77 + dg * 151 + db * 28;
|
||||
|
||||
for( int t=0; t<8; t++ )
|
||||
|
@ -426,64 +210,9 @@ void FindBestFit( uint64 terr[2][8], uint16 tsel[16][8], v4i a[8], const uint32*
|
|||
*sel++ = idx;
|
||||
*ter++ += err;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
// Non-reference implementation, but faster. Produces same results as the AVX2 version
|
||||
void FindBestFit( uint32 terr[2][8], uint16 tsel[16][8], v4i a[8], const uint32* id, const uint8* data )
|
||||
{
|
||||
for( size_t i=0; i<16; i++ )
|
||||
{
|
||||
uint16* sel = tsel[i];
|
||||
uint bid = id[i];
|
||||
uint32* ter = terr[bid%2];
|
||||
|
||||
uint8 b = *data++;
|
||||
uint8 g = *data++;
|
||||
uint8 r = *data++;
|
||||
data++;
|
||||
|
||||
int dr = a[bid][0] - r;
|
||||
int dg = a[bid][1] - g;
|
||||
int db = a[bid][2] - b;
|
||||
|
||||
// The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
|
||||
// This produces slightly different results, but is significant faster
|
||||
__m128i pixel = _mm_set1_epi16(dr * 38 + dg * 76 + db * 14);
|
||||
__m128i pix = _mm_abs_epi16(pixel);
|
||||
|
||||
// Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
|
||||
// Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
|
||||
__m128i error0 = _mm_abs_epi16(_mm_sub_epi16(pix, g_table128_SIMD[0]));
|
||||
__m128i error1 = _mm_abs_epi16(_mm_sub_epi16(pix, g_table128_SIMD[1]));
|
||||
|
||||
__m128i index = _mm_and_si128(_mm_cmplt_epi16(error1, error0), _mm_set1_epi16(1));
|
||||
__m128i minError = _mm_min_epi16(error0, error1);
|
||||
|
||||
// Exploiting symmetry of the selector table and use the sign bit
|
||||
// This produces slightly different results, but is needed to produce same results as AVX2 implementation
|
||||
__m128i indexBit = _mm_andnot_si128(_mm_srli_epi16(pixel, 15), _mm_set1_epi8(-1));
|
||||
__m128i minIndex = _mm_or_si128(index, _mm_add_epi16(indexBit, indexBit));
|
||||
|
||||
// Squaring the minimum error to produce correct values when adding
|
||||
__m128i squareErrorLo = _mm_mullo_epi16(minError, minError);
|
||||
__m128i squareErrorHi = _mm_mulhi_epi16(minError, minError);
|
||||
|
||||
__m128i squareErrorLow = _mm_unpacklo_epi16(squareErrorLo, squareErrorHi);
|
||||
__m128i squareErrorHigh = _mm_unpackhi_epi16(squareErrorLo, squareErrorHi);
|
||||
|
||||
squareErrorLow = _mm_add_epi32(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 0));
|
||||
_mm_storeu_si128(((__m128i*)ter) + 0, squareErrorLow);
|
||||
squareErrorHigh = _mm_add_epi32(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 1));
|
||||
_mm_storeu_si128(((__m128i*)ter) + 1, squareErrorHigh);
|
||||
|
||||
_mm_storeu_si128((__m128i*)sel, minIndex);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
uint8_t convert6(float f)
|
||||
{
|
||||
int i = (std::min(std::max(static_cast<int>(f), 0), 1023) - 15) >> 1;
|
||||
|
@ -674,11 +403,7 @@ uint64 ProcessRGB( const uint8* src )
|
|||
size_t idx = GetLeastError( err, 4 );
|
||||
EncodeAverages( d, a, idx );
|
||||
|
||||
#if defined __SSE4_1__ && !defined REFERENCE_IMPLEMENTATION
|
||||
uint32 terr[2][8] = {};
|
||||
#else
|
||||
uint64 terr[2][8] = {};
|
||||
#endif
|
||||
uint16 tsel[16][8];
|
||||
auto id = g_id[idx];
|
||||
FindBestFit( terr, tsel, a, id, src );
|
||||
|
|
|
@ -66,44 +66,3 @@ const uint32 g_flags[64] = {
|
|||
0x00000402, 0x00000402, 0x0000E002, 0x0000E002,
|
||||
0x00000402, 0x0000E002, 0x0000E002, 0x0000E002
|
||||
};
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
const uint8 g_flags_AVX2[64] =
|
||||
{
|
||||
0x63, 0x63, 0x63, 0x63,
|
||||
0x63, 0x63, 0x63, 0x7D,
|
||||
0x63, 0x63, 0x7D, 0x7D,
|
||||
0x63, 0x7D, 0x7D, 0x7D,
|
||||
0x43, 0x43, 0x43, 0x43,
|
||||
0x43, 0x43, 0x43, 0x5D,
|
||||
0x43, 0x43, 0x5D, 0x5D,
|
||||
0x43, 0x5D, 0x5D, 0x5D,
|
||||
0x23, 0x23, 0x23, 0x23,
|
||||
0x23, 0x23, 0x23, 0x3D,
|
||||
0x23, 0x23, 0x3D, 0x3D,
|
||||
0x23, 0x3D, 0x3D, 0x3D,
|
||||
0x03, 0x03, 0x03, 0x03,
|
||||
0x03, 0x03, 0x03, 0x1D,
|
||||
0x03, 0x03, 0x1D, 0x1D,
|
||||
0x03, 0x1D, 0x1D, 0x1D,
|
||||
};
|
||||
|
||||
const __m128i g_table_SIMD[2] =
|
||||
{
|
||||
_mm_setr_epi16( 2, 5, 9, 13, 18, 24, 33, 47),
|
||||
_mm_setr_epi16( 8, 17, 29, 42, 60, 80, 106, 183)
|
||||
};
|
||||
const __m128i g_table128_SIMD[2] =
|
||||
{
|
||||
_mm_setr_epi16( 2*128, 5*128, 9*128, 13*128, 18*128, 24*128, 33*128, 47*128),
|
||||
_mm_setr_epi16( 8*128, 17*128, 29*128, 42*128, 60*128, 80*128, 106*128, 183*128)
|
||||
};
|
||||
const __m128i g_table256_SIMD[4] =
|
||||
{
|
||||
_mm_setr_epi32( 2*256, 5*256, 9*256, 13*256),
|
||||
_mm_setr_epi32( 8*256, 17*256, 29*256, 42*256),
|
||||
_mm_setr_epi32( 18*256, 24*256, 33*256, 47*256),
|
||||
_mm_setr_epi32( 60*256, 80*256, 106*256, 183*256)
|
||||
};
|
||||
#endif
|
||||
|
||||
|
|
|
@ -2,9 +2,6 @@
|
|||
#define __TABLES_HPP__
|
||||
|
||||
#include "Types.hpp"
|
||||
#ifdef __SSE4_1__
|
||||
#include <smmintrin.h>
|
||||
#endif
|
||||
|
||||
extern const int32 g_table[8][4];
|
||||
extern const int64 g_table256[8][4];
|
||||
|
@ -15,11 +12,4 @@ extern const uint32 g_avg2[16];
|
|||
|
||||
extern const uint32 g_flags[64];
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
extern const uint8 g_flags_AVX2[64];
|
||||
extern const __m128i g_table_SIMD[2];
|
||||
extern const __m128i g_table128_SIMD[2];
|
||||
extern const __m128i g_table256_SIMD[4];
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in a new issue