mirror of
https://github.com/ZDoom/qzdoom.git
synced 2024-12-16 15:31:30 +00:00
dda5ddd3c2
registers AMD64 provides, this routine still needs to be written as self- modifying code for maximum performance. The additional registers do allow for further optimization over the x86 version by allowing all four pixels to be in flight at the same time. The end result is that AMD64 ASM is about 2.18 times faster than AMD64 C and about 1.06 times faster than x86 ASM. (For further comparison, AMD64 C and x86 C are practically the same for this function.) Should I port any more assembly to AMD64, mvlineasm4 is the most likely candidate, but it's not used enough at this point to bother. Also, this may or may not work with Linux at the moment, since it doesn't have the eh_handler metadata. Win64 is easier, since I just need to structure the function prologue and epilogue properly and use some assembler directives/macros to automatically generate the metadata. And that brings up another point: You need YASM to assemble the AMD64 code, because NASM doesn't support the Win64 metadata directives. - Added an SSE version of DoBlending. This is strictly C intrinsics. VC++ still throws around unneccessary register moves. GCC seems to be pretty close to optimal, requiring only about 2 cycles/color. They're both faster than my hand-written MMX routine, so I don't need to feel bad about not hand-optimizing this for x64 builds. - Removed an extra instruction from DoBlending_MMX, transposed two instructions, and unrolled it once, shaving off about 80 cycles from the time required to blend 256 palette entries. Why? Because I tried writing a C version of the routine using compiler intrinsics and was appalled by all the extra movq's VC++ added to the code. GCC was better, but still generated extra instructions. I only wanted a C version because I can't use inline assembly with VC++'s x64 compiler, and x64 assembly is a bit of a pain. (It's a pain because Linux and Windows have different calling conventions, and you need to maintain extra metadata for functions.) So, the assembly version stays and the C version stays out. - Removed all the pixel doubling r_detail modes, since the one platform they were intended to assist (486) actually sees very little benefit from them. - Rewrote CheckMMX in C and renamed it to CheckCPU. - Fixed: CPUID function 0x80000005 is specified to return detailed L1 cache only for AMD processors, so we must not use it on other architectures, or we end up overwriting the L1 cache line size with 0 or some other number we don't actually understand. SVN r1134 (trunk)
127 lines
2.7 KiB
C++
127 lines
2.7 KiB
C++
// Emacs style mode select -*- C++ -*-
|
|
//-----------------------------------------------------------------------------
|
|
//
|
|
// $Id:$
|
|
//
|
|
// Copyright (C) 1993-1996 by id Software, Inc.
|
|
//
|
|
// This source is available for distribution and/or modification
|
|
// only under the terms of the DOOM Source Code License as
|
|
// published by id Software. All rights reserved.
|
|
//
|
|
// The source is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// FITNESS FOR A PARTICULAR PURPOSE. See the DOOM Source Code License
|
|
// for more details.
|
|
//
|
|
// DESCRIPTION:
|
|
// Endianess handling, swapping 16bit and 32bit.
|
|
//
|
|
//-----------------------------------------------------------------------------
|
|
|
|
|
|
#ifndef __M_SWAP_H__
|
|
#define __M_SWAP_H__
|
|
|
|
#include <stdlib.h>
|
|
|
|
// Endianess handling.
|
|
// WAD files are stored little endian.
|
|
#ifdef WORDS_BIGENDIAN
|
|
|
|
// Swap 16bit, that is, MSB and LSB byte.
|
|
// No masking with 0xFF should be necessary.
|
|
inline short LittleShort (short x)
|
|
{
|
|
return (short)((((unsigned short)x)>>8) | (((unsigned short)x)<<8));
|
|
}
|
|
|
|
inline unsigned short LittleShort (unsigned short x)
|
|
{
|
|
return (unsigned short)((x>>8) | (x<<8));
|
|
}
|
|
|
|
// Swapping 32bit.
|
|
inline unsigned int LittleLong (unsigned int x)
|
|
{
|
|
return (unsigned int)(
|
|
(x>>24)
|
|
| ((x>>8) & 0xff00)
|
|
| ((x<<8) & 0xff0000)
|
|
| (x<<24));
|
|
}
|
|
|
|
inline int LittleLong (int x)
|
|
{
|
|
return (int)(
|
|
(((unsigned int)x)>>24)
|
|
| ((((unsigned int)x)>>8) & 0xff00)
|
|
| ((((unsigned int)x)<<8) & 0xff0000)
|
|
| (((unsigned int)x)<<24));
|
|
}
|
|
|
|
#define BigShort(x) (x)
|
|
#define BigLong(x) (x)
|
|
|
|
#else
|
|
|
|
#define LittleShort(x) (x)
|
|
#define LittleLong(x) (x)
|
|
|
|
#if defined(_MSC_VER)
|
|
|
|
inline short BigShort (short x)
|
|
{
|
|
return (short)_byteswap_ushort((unsigned short)x);
|
|
}
|
|
|
|
inline unsigned short BigShort (unsigned short x)
|
|
{
|
|
return _byteswap_ushort(x);
|
|
}
|
|
|
|
inline int BigLong (int x)
|
|
{
|
|
return (int)_byteswap_ulong((unsigned long)x);
|
|
}
|
|
|
|
inline unsigned int BigLong (unsigned int x)
|
|
{
|
|
return (unsigned int)_byteswap_ulong((unsigned long)x);
|
|
}
|
|
#pragma warning (default: 4035)
|
|
|
|
#else
|
|
|
|
inline short BigShort (short x)
|
|
{
|
|
return (short)((((unsigned short)x)>>8) | (((unsigned short)x)<<8));
|
|
}
|
|
|
|
inline unsigned short BigShort (unsigned short x)
|
|
{
|
|
return (unsigned short)((x>>8) | (x<<8));
|
|
}
|
|
|
|
inline unsigned int BigLong (unsigned int x)
|
|
{
|
|
return (unsigned int)(
|
|
(x>>24)
|
|
| ((x>>8) & 0xff00)
|
|
| ((x<<8) & 0xff0000)
|
|
| (x<<24));
|
|
}
|
|
|
|
inline int BigLong (int x)
|
|
{
|
|
return (int)(
|
|
(((unsigned int)x)>>24)
|
|
| ((((unsigned int)x)>>8) & 0xff00)
|
|
| ((((unsigned int)x)<<8) & 0xff0000)
|
|
| (((unsigned int)x)<<24));
|
|
}
|
|
#endif
|
|
|
|
#endif // WORDS_BIGENDIAN
|
|
|
|
#endif // __M_SWAP_H__
|