gzdoom-gles/src/m_swap.h

220 lines
4.5 KiB
C
Raw Normal View History

// Emacs style mode select -*- C++ -*-
//-----------------------------------------------------------------------------
//
// $Id:$
//
// Copyright (C) 1993-1996 by id Software, Inc.
//
// This source is available for distribution and/or modification
// only under the terms of the DOOM Source Code License as
// published by id Software. All rights reserved.
//
// The source is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// FITNESS FOR A PARTICULAR PURPOSE. See the DOOM Source Code License
// for more details.
//
// DESCRIPTION:
// Endianess handling, swapping 16bit and 32bit.
//
//-----------------------------------------------------------------------------
#ifndef __M_SWAP_H__
#define __M_SWAP_H__
#include <stdlib.h>
// Endianess handling.
// WAD files are stored little endian.
#ifdef __APPLE__
#include <libkern/OSByteOrder.h>
inline short LittleShort(short x)
{
return (short)OSSwapLittleToHostInt16((uint16_t)x);
}
inline unsigned short LittleShort(unsigned short x)
{
return OSSwapLittleToHostInt16(x);
}
inline short LittleShort(int x)
{
return OSSwapLittleToHostInt16((uint16_t)x);
}
inline int LittleLong(int x)
{
return OSSwapLittleToHostInt32((uint32_t)x);
}
inline unsigned int LittleLong(unsigned int x)
{
return OSSwapLittleToHostInt32(x);
}
inline short BigShort(short x)
{
return (short)OSSwapBigToHostInt16((uint16_t)x);
}
inline unsigned short BigShort(unsigned short x)
{
return OSSwapBigToHostInt16(x);
}
inline int BigLong(int x)
{
return OSSwapBigToHostInt32((uint32_t)x);
}
inline unsigned int BigLong(unsigned int x)
{
return OSSwapBigToHostInt32(x);
}
#else
#ifdef __BIG_ENDIAN__
// Swap 16bit, that is, MSB and LSB byte.
// No masking with 0xFF should be necessary.
inline short LittleShort (short x)
{
return (short)((((unsigned short)x)>>8) | (((unsigned short)x)<<8));
}
inline unsigned short LittleShort (unsigned short x)
{
return (unsigned short)((x>>8) | (x<<8));
}
// Swapping 32bit.
inline unsigned int LittleLong (unsigned int x)
{
return (unsigned int)(
(x>>24)
| ((x>>8) & 0xff00)
| ((x<<8) & 0xff0000)
| (x<<24));
}
inline int LittleLong (int x)
{
return (int)(
(((unsigned int)x)>>24)
| ((((unsigned int)x)>>8) & 0xff00)
| ((((unsigned int)x)<<8) & 0xff0000)
| (((unsigned int)x)<<24));
}
#define BigShort(x) (x)
#define BigLong(x) (x)
#else
#define LittleShort(x) (x)
#define LittleLong(x) (x)
#if defined(_MSC_VER)
inline short BigShort (short x)
{
return (short)_byteswap_ushort((unsigned short)x);
}
inline unsigned short BigShort (unsigned short x)
{
return _byteswap_ushort(x);
}
inline int BigLong (int x)
{
return (int)_byteswap_ulong((unsigned long)x);
}
inline unsigned int BigLong (unsigned int x)
{
return (unsigned int)_byteswap_ulong((unsigned long)x);
}
#pragma warning (default: 4035)
#else
inline short BigShort (short x)
{
return (short)((((unsigned short)x)>>8) | (((unsigned short)x)<<8));
}
inline unsigned short BigShort (unsigned short x)
{
return (unsigned short)((x>>8) | (x<<8));
}
inline unsigned int BigLong (unsigned int x)
{
return (unsigned int)(
(x>>24)
| ((x>>8) & 0xff00)
| ((x<<8) & 0xff0000)
| (x<<24));
}
inline int BigLong (int x)
{
return (int)(
(((unsigned int)x)>>24)
| ((((unsigned int)x)>>8) & 0xff00)
| ((((unsigned int)x)<<8) & 0xff0000)
| (((unsigned int)x)<<24));
}
- Ported vlinetallasm4 to AMD64 assembly. Even with the increased number of registers AMD64 provides, this routine still needs to be written as self- modifying code for maximum performance. The additional registers do allow for further optimization over the x86 version by allowing all four pixels to be in flight at the same time. The end result is that AMD64 ASM is about 2.18 times faster than AMD64 C and about 1.06 times faster than x86 ASM. (For further comparison, AMD64 C and x86 C are practically the same for this function.) Should I port any more assembly to AMD64, mvlineasm4 is the most likely candidate, but it's not used enough at this point to bother. Also, this may or may not work with Linux at the moment, since it doesn't have the eh_handler metadata. Win64 is easier, since I just need to structure the function prologue and epilogue properly and use some assembler directives/macros to automatically generate the metadata. And that brings up another point: You need YASM to assemble the AMD64 code, because NASM doesn't support the Win64 metadata directives. - Added an SSE version of DoBlending. This is strictly C intrinsics. VC++ still throws around unneccessary register moves. GCC seems to be pretty close to optimal, requiring only about 2 cycles/color. They're both faster than my hand-written MMX routine, so I don't need to feel bad about not hand-optimizing this for x64 builds. - Removed an extra instruction from DoBlending_MMX, transposed two instructions, and unrolled it once, shaving off about 80 cycles from the time required to blend 256 palette entries. Why? Because I tried writing a C version of the routine using compiler intrinsics and was appalled by all the extra movq's VC++ added to the code. GCC was better, but still generated extra instructions. I only wanted a C version because I can't use inline assembly with VC++'s x64 compiler, and x64 assembly is a bit of a pain. (It's a pain because Linux and Windows have different calling conventions, and you need to maintain extra metadata for functions.) So, the assembly version stays and the C version stays out. - Removed all the pixel doubling r_detail modes, since the one platform they were intended to assist (486) actually sees very little benefit from them. - Rewrote CheckMMX in C and renamed it to CheckCPU. - Fixed: CPUID function 0x80000005 is specified to return detailed L1 cache only for AMD processors, so we must not use it on other architectures, or we end up overwriting the L1 cache line size with 0 or some other number we don't actually understand. SVN r1134 (trunk)
2008-08-09 03:13:43 +00:00
#endif
#endif // __BIG_ENDIAN__
#endif // __APPLE__
// Data accessors, since some data is highly likely to be unaligned.
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__)
inline int GetShort(const unsigned char *foo)
{
return *(const short *)foo;
}
inline int GetInt(const unsigned char *foo)
{
return *(const int *)foo;
}
inline int GetBigInt(const unsigned char *foo)
{
return BigLong(GetInt(foo));
}
#else
inline int GetShort(const unsigned char *foo)
{
return short(foo[0] | (foo[1] << 8));
}
inline int GetInt(const unsigned char *foo)
{
return int(foo[0] | (foo[1] << 8) | (foo[2] << 16) | (foo[3] << 24));
}
inline int GetBigInt(const unsigned char *foo)
{
return int((foo[0] << 24) | (foo[1] << 16) | (foo[2] << 8) | foo[3]);
}
#endif
#ifdef __BIG_ENDIAN__
inline int GetNativeInt(const unsigned char *foo)
{
return GetBigInt(foo);
}
#else
inline int GetNativeInt(const unsigned char *foo)
{
return GetInt(foo);
}
#endif
#endif // __M_SWAP_H__