qzdoom/src/asm_ia32/tmap3.asm
Randy Heit dda5ddd3c2 - Ported vlinetallasm4 to AMD64 assembly. Even with the increased number of
registers AMD64 provides, this routine still needs to be written as self-
  modifying code for maximum performance. The additional registers do allow
  for further optimization over the x86 version by allowing all four pixels
  to be in flight at the same time. The end result is that AMD64 ASM is about
  2.18 times faster than AMD64 C and about 1.06 times faster than x86 ASM.
  (For further comparison, AMD64 C and x86 C are practically the same for
  this function.) Should I port any more assembly to AMD64, mvlineasm4 is the
  most likely candidate, but it's not used enough at this point to bother.
  Also, this may or may not work with Linux at the moment, since it doesn't
  have the eh_handler metadata. Win64 is easier, since I just need to
  structure the function prologue and epilogue properly and use some
  assembler directives/macros to automatically generate the metadata. And
  that brings up another point: You need YASM to assemble the AMD64 code,
  because NASM doesn't support the Win64 metadata directives.
- Added an SSE version of DoBlending. This is strictly C intrinsics.
  VC++ still throws around unneccessary register moves. GCC seems to be
  pretty close to optimal, requiring only about 2 cycles/color. They're
  both faster than my hand-written MMX routine, so I don't need to feel
  bad about not hand-optimizing this for x64 builds.
- Removed an extra instruction from DoBlending_MMX, transposed two
  instructions, and unrolled it once, shaving off about 80 cycles from the
  time required to blend 256 palette entries. Why? Because I tried writing
  a C version of the routine using compiler intrinsics and was appalled by
  all the extra movq's VC++ added to the code. GCC was better, but still
  generated extra instructions. I only wanted a C version because I can't
  use inline assembly with VC++'s x64 compiler, and x64 assembly is a bit
  of a pain. (It's a pain because Linux and Windows have different calling
  conventions, and you need to maintain extra metadata for functions.) So,
  the assembly version stays and the C version stays out.
- Removed all the pixel doubling r_detail modes, since the one platform they
  were intended to assist (486) actually sees very little benefit from them.
- Rewrote CheckMMX in C and renamed it to CheckCPU.
- Fixed: CPUID function 0x80000005 is specified to return detailed L1 cache
  only for AMD processors, so we must not use it on other architectures, or
  we end up overwriting the L1 cache line size with 0 or some other number
  we don't actually understand.


SVN r1134 (trunk)
2008-08-09 03:13:43 +00:00

333 lines
6.9 KiB
NASM

%include "valgrind.inc"
%ifdef M_TARGET_WATCOM
SEGMENT DATA PUBLIC ALIGN=16 CLASS=DATA USE32
SEGMENT DATA
%else
SECTION .data
%endif
%ifndef M_TARGET_LINUX
%define ylookup _ylookup
%define vplce _vplce
%define vince _vince
%define palookupoffse _palookupoffse
%define bufplce _bufplce
%define dc_iscale _dc_iscale
%define dc_colormap _dc_colormap
%define dc_count _dc_count
%define dc_dest _dc_dest
%define dc_source _dc_source
%define dc_texturefrac _dc_texturefrac
%define dc_pitch _dc_pitch
%define setupvlinetallasm _setupvlinetallasm
%define vlinetallasm4 _vlinetallasm4
%define vlinetallasmathlon4 _vlinetallasmathlon4
%define vlinetallasm1 _vlinetallasm1
%define prevlinetallasm1 _prevlinetallasm1
%endif
EXTERN vplce
EXTERN vince
EXTERN palookupoffse
EXTERN bufplce
EXTERN ylookup
EXTERN dc_iscale
EXTERN dc_colormap
EXTERN dc_count
EXTERN dc_dest
EXTERN dc_source
EXTERN dc_texturefrac
EXTERN dc_pitch
GLOBAL vlt4pitch
GLOBAL vlt1pitch
%ifdef M_TARGET_WATCOM
SEGMENT CODE PUBLIC ALIGN=16 CLASS=CODE USE32
SEGMENT CODE
%else
SECTION .text
%endif
ALIGN 16
GLOBAL setpitch3
setpitch3:
mov [vltpitch+2], eax
mov [vltpitcha+2],eax
mov [vlt1pitch1+2], eax
mov [vlt1pitch2+2], eax
selfmod vltpitch, vlt1pitch2+6
ret
ALIGN 16
GLOBAL setupvlinetallasm
setupvlinetallasm:
mov ecx, [esp+4]
mov [shifter1+2], cl
mov [shifter2+2], cl
mov [shifter3+2], cl
mov [shifter4+2], cl
mov [shifter1a+2], cl
mov [shifter2a+2], cl
mov [shifter3a+2], cl
mov [shifter4a+2], cl
mov [preshift+2], cl
mov [shift11+2], cl
mov [shift12+2], cl
selfmod shifter1, shift12+6
ret
SECTION .rtext progbits alloc exec write align=64
ALIGN 16
GLOBAL vlinetallasm4
vlinetallasm4:
push ebx
mov eax, [bufplce+0]
mov ebx, [bufplce+4]
mov ecx, [bufplce+8]
mov edx, [bufplce+12]
mov [source1+3], eax
mov [source2+3], ebx
mov [source3+3], ecx
mov [source4+3], edx
mov eax, [palookupoffse+0]
mov ebx, [palookupoffse+4]
mov ecx, [palookupoffse+8]
mov edx, [palookupoffse+12]
mov [lookup1+2], eax
mov [lookup2+2], ebx
mov [lookup3+2], ecx
mov [lookup4+2], edx
mov eax, [vince+0]
mov ebx, [vince+4]
mov ecx, [vince+8]
mov edx, [vince+12]
mov [step1+2], eax
mov [step2+2], ebx
mov [step3+2], ecx
mov [step4+1], edx
push ebp
push esi
push edi
mov ecx, [dc_count]
mov edi, [dc_dest]
mov eax, dword [ylookup+ecx*4-4]
add eax, edi
sub edi, eax
mov [write1+2],eax
inc eax
mov [write2+2],eax
inc eax
mov [write3+2],eax
inc eax
mov [write4+2],eax
mov ebx, [vplce]
mov ecx, [vplce+4]
mov esi, [vplce+8]
mov eax, [vplce+12]
selfmod loopit, vltpitch
jmp loopit
ALIGN 16
loopit:
mov edx, ebx
shifter1: shr edx, 24
source1: movzx edx, BYTE [edx+0x88888888]
lookup1: mov dl, [edx+0x88888888]
write1: mov [edi+0x88888880], dl
step1: add ebx, 0x88888888
mov edx, ecx
shifter2: shr edx, 24
source2: movzx edx, BYTE [edx+0x88888888]
lookup2: mov dl, [edx+0x88888888]
write2: mov [edi+0x88888881], dl
step2: add ecx, 0x88888888
mov edx, esi
shifter3: shr edx, 24
source3: movzx edx, BYTE [edx+0x88888888]
lookup3: mov dl, BYTE [edx+0x88888888]
write3: mov [edi+0x88888882], dl
step3: add esi, 0x88888888
mov edx, eax
shifter4: shr edx, 24
source4: movzx edx, BYTE [edx+0x88888888]
lookup4: mov dl, [edx+0x88888888]
write4: mov [edi+0x88888883], dl
step4: add eax, 0x88888888
vltpitch: add edi, 320
jle near loopit
mov [vplce], ebx
mov [vplce+4], ecx
mov [vplce+8], esi
mov [vplce+12], eax
pop edi
pop esi
pop ebp
pop ebx
ret
ALIGN 16
GLOBAL vlinetallasmathlon4
vlinetallasmathlon4:
push ebx
mov eax, [bufplce+0]
mov ebx, [bufplce+4]
mov ecx, [bufplce+8]
mov edx, [bufplce+12]
mov [source1a+3], eax
mov [source2a+3], ebx
mov [source3a+3], ecx
mov [source4a+3], edx
mov eax, [palookupoffse+0]
mov ebx, [palookupoffse+4]
mov ecx, [palookupoffse+8]
mov edx, [palookupoffse+12]
mov [lookup1a+2], eax
mov [lookup2a+2], ebx
mov [lookup3a+2], ecx
mov [lookup4a+2], edx
mov eax, [vince+0]
mov ebx, [vince+4]
mov ecx, [vince+8]
mov edx, [vince+12]
mov [step1a+2], eax
mov [step2a+2], ebx
mov [step3a+2], ecx
mov [step4a+1], edx
push ebp
push esi
push edi
mov ecx, [dc_count]
mov edi, [dc_dest]
mov eax, dword [ylookup+ecx*4-4]
add eax, edi
sub edi, eax
mov [write1a+2],eax
inc eax
mov [write2a+2],eax
inc eax
mov [write3a+2],eax
inc eax
mov [write4a+2],eax
mov ebp, [vplce]
mov ecx, [vplce+4]
mov esi, [vplce+8]
mov eax, [vplce+12]
selfmod loopita, vltpitcha
jmp loopita
; Unfortunately, this code has not been carefully analyzed to determine
; how well it utilizes the processor's instruction units. Instead, I just
; kept rearranging code, seeing what sped it up and what slowed it down
; until I arrived at this. The is the fastest version I was able to
; manage, but that does not mean it cannot be made faster with careful
; instructing shuffling.
ALIGN 64
loopita: mov edx, ebp
mov ebx, ecx
shifter1a: shr edx, 24
shifter2a: shr ebx, 24
source1a: movzx edx, BYTE [edx+0x88888888]
source2a: movzx ebx, BYTE [ebx+0x88888888]
step1a: add ebp, 0x88888888
step2a: add ecx, 0x88888888
lookup1a: mov dl, [edx+0x88888888]
lookup2a: mov dh, [ebx+0x88888888]
mov ebx, esi
write1a: mov [edi+0x88888880], dl
write2a: mov [edi+0x88888881], dh
shifter3a: shr ebx, 24
mov edx, eax
source3a: movzx ebx, BYTE [ebx+0x88888888]
shifter4a: shr edx, 24
step3a: add esi, 0x88888888
source4a: movzx edx, BYTE [edx+0x88888888]
step4a: add eax, 0x88888888
lookup3a: mov bl, [ebx+0x88888888]
lookup4a: mov dl, [edx+0x88888888]
write3a: mov [edi+0x88888882], bl
write4a: mov [edi+0x88888883], dl
vltpitcha: add edi, 320
jle near loopita
mov [vplce], ebp
mov [vplce+4], ecx
mov [vplce+8], esi
mov [vplce+12], eax
pop edi
pop esi
pop ebp
pop ebx
ret
ALIGN 16
GLOBAL prevlinetallasm1
prevlinetallasm1:
mov ecx, [dc_count]
cmp ecx, 1
ja vlinetallasm1
mov eax, [dc_iscale]
mov edx, [dc_texturefrac]
add eax, edx
mov ecx, [dc_source]
preshift: shr edx, 16
push ebx
push edi
mov edi, [dc_colormap]
movzx ebx, byte [ecx+edx]
mov ecx, [dc_dest]
mov bl, byte [edi+ebx]
pop edi
mov byte [ecx], bl
pop ebx
ret
ALIGN 16
GLOBAL vlinetallasm1
vlinetallasm1:
push ebp
push ebx
push edi
push esi
mov ebp, [dc_count]
mov ebx, [dc_texturefrac] ; ebx = frac
mov edi, [dc_dest]
mov ecx, ebx
shift11: shr ecx, 16
mov esi, [dc_source]
mov edx, [dc_iscale]
vlt1pitch1: sub edi, 0x88888888
mov eax, [dc_colormap]
loop2:
movzx ecx, BYTE [esi+ecx]
add ebx, edx
vlt1pitch2: add edi, 0x88888888
mov cl,[eax+ecx]
mov [edi],cl
mov ecx,ebx
shift12: shr ecx,16
dec ebp
jnz loop2
mov eax,ebx
pop esi
pop edi
pop ebx
pop ebp
ret