; "Build Engine & Tools" Copyright (c) 1993-1997 Ken Silverman ; Ken Silverman's official web site: "http://www.advsys.net/ken" ; See the included license file "BUILDLIC.TXT" for license info. ; This file has been modified from Ken Silverman's original release %include "valgrind.inc" SECTION .data %ifndef M_TARGET_LINUX %define ylookup _ylookup %define vince _vince %define vplce _vplce %define palookupoffse _palookupoffse %define bufplce _bufplce %define dc_iscale _dc_iscale %define dc_colormap _dc_colormap %define dc_count _dc_count %define dc_dest _dc_dest %define dc_source _dc_source %define dc_texturefrac _dc_texturefrac %define setupvlineasm _setupvlineasm %define prevlineasm1 _prevlineasm1 %define vlineasm1 _vlineasm1 %define vlineasm4 _vlineasm4 %define setupmvlineasm _setupmvlineasm %define mvlineasm1 _mvlineasm1 %define mvlineasm4 _mvlineasm4 %define R_SetupDrawSlabA _R_SetupDrawSlabA %define R_DrawSlabA _R_DrawSlabA %endif EXTERN ylookup ; near EXTERN vplce ; near EXTERN vince ; near EXTERN palookupoffse ; near EXTERN bufplce ; near EXTERN dc_iscale EXTERN dc_colormap EXTERN dc_count EXTERN dc_dest EXTERN dc_source EXTERN dc_texturefrac SECTION .text ALIGN 16 GLOBAL setvlinebpl_ setvlinebpl_: mov [fixchain1a+2], eax mov [fixchain1b+2], eax mov [fixchain2a+2], eax mov [fixchain1m+2], eax mov [fixchain2ma+2], eax mov [fixchain2mb+2], eax selfmod fixchain1a, fixchain2mb+6 setdrawslabbpl: mov dword [voxbpl1+2], eax mov dword [voxbpl2+2], eax mov dword [voxbpl3+2], eax mov dword [voxbpl4+2], eax mov dword [voxbpl5+2], eax mov dword [voxbpl6+2], eax mov dword [voxbpl7+2], eax mov dword [voxbpl8+2], eax selfmod voxbpl1, voxpl8+6 ret SECTION .data lastslabcolormap: dd 4 SECTION .text GLOBAL R_SetupDrawSlabA GLOBAL @R_SetupDrawSlabA@4 R_SetupDrawSlabA: mov ecx, [esp+4] @R_SetupDrawSlabA@4: cmp [lastslabcolormap], ecx je .done mov [lastslabcolormap], ecx mov dword [voxpal1+2], ecx mov dword [voxpal2+2], ecx mov dword [voxpal3+2], ecx mov dword [voxpal4+2], ecx mov dword [voxpal5+2], ecx mov dword [voxpal6+2], ecx mov dword [voxpal7+2], ecx mov dword [voxpal8+2], ecx .done ret ; pass it log2(texheight) ALIGN 16 GLOBAL setupvlineasm setupvlineasm: mov ecx, [esp+4] ;First 2 lines for VLINEASM1, rest for VLINEASM4 mov byte [premach3a+2], cl mov byte [mach3a+2], cl mov byte [machvsh1+2], cl ;32-shy mov byte [machvsh3+2], cl ;32-shy mov byte [machvsh5+2], cl ;32-shy mov byte [machvsh6+2], cl ;32-shy mov ch, cl sub ch, 16 mov byte [machvsh8+2], ch ;16-shy neg cl mov byte [machvsh7+2], cl ;shy mov byte [machvsh9+2], cl ;shy mov byte [machvsh10+2], cl ;shy mov byte [machvsh11+2], cl ;shy mov byte [machvsh12+2], cl ;shy mov eax, 1 shl eax, cl dec eax mov dword [machvsh2+2], eax ;(1<>sh) ;vplc3 = (ebp<<(32-sh))+((edx&65535)<<(16-sh)) machvsh5: shl esi, 88h ;32-sh mov eax, edx machvsh6: shl ebp, 88h ;32-sh and edx, 0000ffffh machvsh7: shr eax, 88h ;sh add esi, eax machvsh8: shl edx, 88h ;16-sh add ebp, edx mov dword [vplce+12], esi mov dword [vplce+4], ebp pop edi pop esi pop ebx pop ebp ret ;************************************************************************* ;************************* Masked Vertical Lines ************************* ;************************************************************************* ; pass it log2(texheight) ALIGN 16 GLOBAL setupmvlineasm setupmvlineasm: mov ecx, dword [esp+4] mov byte [maskmach3a+2], cl mov byte [machmv13+2], cl mov byte [machmv14+2], cl mov byte [machmv15+2], cl mov byte [machmv16+2], cl selfmod maskmach3a, machmv13+6 ret ALIGN 16 GLOBAL mvlineasm1 ;Masked vline mvlineasm1: push ebx push edi push esi push ebp mov ecx, [dc_count] mov ebp, [dc_colormap] mov edi, [dc_dest] mov eax, [dc_iscale] mov edx, [dc_texturefrac] mov esi, [dc_source] beginmvline: mov ebx, edx maskmach3a: shr ebx, 32 movzx ebx, byte [esi+ebx] cmp ebx, 0 je short skipmask1 maskmach3c: mov bl, byte [ebp+ebx] mov [edi], bl skipmask1: add edx, eax fixchain1m: add edi, 320 dec ecx jnz short beginmvline pop ebp pop esi pop edi pop ebx mov eax, edx ret ALIGN 16 GLOBAL mvlineasm4 mvlineasm4: push ebx push esi push edi push ebp mov ecx,[dc_count] mov edi,[dc_dest] mov eax, [bufplce+0] mov ebx, [bufplce+4] mov [machmv1+3], eax mov [machmv4+3], ebx mov eax, [bufplce+8] mov ebx, [bufplce+12] mov [machmv7+3], eax mov [machmv10+3], ebx mov eax, [palookupoffse] mov ebx, [palookupoffse+4] mov [machmv2+2], eax mov [machmv5+2], ebx mov eax, [palookupoffse+8] mov ebx, [palookupoffse+12] mov [machmv8+2], eax mov [machmv11+2], ebx mov eax, [vince] ;vince mov ebx, [vince+4] xor bl, bl mov [machmv3+2], eax mov [machmv6+2], ebx mov eax, [vince+8] mov ebx, [vince+12] mov [machmv9+2], eax mov [machmv12+2], ebx inc ecx push ecx mov ecx, [vplce+0] mov edx, [vplce+4] mov esi, [vplce+8] mov ebp, [vplce+12] fixchain2ma: sub edi, 320 selfmod beginmvlineasm4, machmv2+6 jmp short beginmvlineasm4 ALIGN 16 beginmvlineasm4: dec dword [esp] jz near endmvlineasm4 mov eax, ebp mov ebx, esi machmv16: shr eax, 32 machmv12: add ebp, 0x88888888 ;vince[3] machmv15: shr ebx, 32 machmv9: add esi, 0x88888888 ;vince[2] machmv10: movzx eax, byte [eax+0x88888888];bufplce[3] machmv7: movzx ebx, byte [ebx+0x88888888];bufplce[2] cmp eax, 1 adc dl, dl cmp ebx, 1 adc dl, dl machmv8: mov bl, [ebx+0x88888888] ;palookupoffs[2] machmv11: mov bh, [eax+0x88888888] ;palookupoffs[3] mov eax, edx machmv6: add edx, 0x88888888 ;vince[1] machmv14: shr eax, 32 shl ebx, 16 machmv4: movzx eax, byte [eax+0x88888888];bufplce[1] cmp eax, 1 adc dl, dl machmv5: mov bh, [eax+0x88888888] ;palookupoffs[1] mov eax, ecx machmv3: add ecx, 0x88888888 ;vince[0] machmv13: shr eax, 32 machmv1: movzx eax, byte [eax+0x88888888];bufplce[0] cmp eax, 1 adc dl, dl machmv2: mov bl, [eax+0x88888888] ;palookupoffs[0] xor eax, eax shl dl, 4 fixchain2mb: add edi, 320 mov al, dl add eax, mvcase15 jmp eax ;16 byte cases ALIGN 16 endmvlineasm4: mov [vplce], ecx mov [vplce+4], edx mov [vplce+8], esi mov [vplce+12], ebp pop ecx pop ebp pop edi pop esi pop ebx ret ;5,7,8,8,11,13,12,14,11,13,14,14,12,14,15,7 ALIGN 16 mvcase15: mov [edi], ebx jmp beginmvlineasm4 ALIGN 16 mvcase14: mov [edi+1], bh shr ebx, 16 mov [edi+2], bx jmp beginmvlineasm4 ALIGN 16 mvcase13: mov [edi], bl shr ebx, 16 mov [edi+2], bx jmp beginmvlineasm4 ALIGN 16 mvcase12: shr ebx, 16 mov [edi+2], bx jmp beginmvlineasm4 ALIGN 16 mvcase11: mov [edi], bx shr ebx, 16 mov [edi+3], bh jmp beginmvlineasm4 ALIGN 16 mvcase10: mov [edi+1], bh shr ebx, 16 mov [edi+3], bh jmp beginmvlineasm4 ALIGN 16 mvcase9: mov [edi], bl shr ebx, 16 mov [edi+3], bh jmp beginmvlineasm4 ALIGN 16 mvcase8: shr ebx, 16 mov [edi+3], bh jmp beginmvlineasm4 ALIGN 16 mvcase7: mov [edi], bx shr ebx, 16 mov [edi+2], bl jmp beginmvlineasm4 ALIGN 16 mvcase6: shr ebx, 8 mov [edi+1], bx jmp beginmvlineasm4 ALIGN 16 mvcase5: mov [edi], bl shr ebx, 16 mov [edi+2], bl jmp beginmvlineasm4 ALIGN 16 mvcase4: shr ebx, 16 mov [edi+2], bl jmp beginmvlineasm4 ALIGN 16 mvcase3: mov [edi], bx jmp beginmvlineasm4 ALIGN 16 mvcase2: mov [edi+1], bh jmp beginmvlineasm4 ALIGN 16 mvcase1: mov [edi], bl jmp beginmvlineasm4 ALIGN 16 mvcase0: jmp beginmvlineasm4 align 16 ;************************************************************************* ;***************************** Voxel Slabs ******************************* ;************************************************************************* GLOBAL R_DrawSlabA R_DrawSlabA: push ebx push ebp push esi push edi mov eax, [esp+5*4+0] mov ebx, [esp+5*4+4] mov ecx, [esp+5*4+8] mov edx, [esp+5*4+12] mov esi, [esp+5*4+16] mov edi, [esp+5*4+20] cmp eax, 2 je voxbegdraw2 ja voxskip2 xor eax, eax voxbegdraw1: mov ebp, ebx shr ebp, 16 add ebx, edx dec ecx mov al, byte [esi+ebp] voxpal1: mov al, byte [eax+88888888h] mov byte [edi], al voxbpl1: lea edi, [edi+88888888h] jnz voxbegdraw1 jmp voxskipslab5 voxbegdraw2: mov ebp, ebx shr ebp, 16 add ebx, edx xor eax, eax dec ecx mov al, byte [esi+ebp] voxpal2: mov al, byte [eax+88888888h] mov ah, al mov word [edi], ax voxbpl2: lea edi, [edi+88888888h] jnz voxbegdraw2 jmp voxskipslab5 voxskip2: cmp eax, 4 jne voxskip4 xor eax, eax voxbegdraw4: mov ebp, ebx add ebx, edx shr ebp, 16 xor eax, eax mov al, byte [esi+ebp] voxpal3: mov al, byte [eax+88888888h] mov ah, al shl eax, 8 mov al, ah shl eax, 8 mov al, ah mov dword [edi], eax voxbpl3: add edi, 88888888h dec ecx jnz voxbegdraw4 jmp voxskipslab5 voxskip4: add eax, edi test edi, 1 jz voxskipslab1 cmp edi, eax je voxskipslab1 push eax push ebx push ecx push edi voxbegslab1: mov ebp, ebx add ebx, edx shr ebp, 16 xor eax, eax mov al, byte [esi+ebp] voxpal4: mov al, byte [eax+88888888h] mov byte [edi], al voxbpl4: add edi, 88888888h dec ecx jnz voxbegslab1 pop edi pop ecx pop ebx pop eax inc edi voxskipslab1: push eax test edi, 2 jz voxskipslab2 dec eax cmp edi, eax jge voxskipslab2 push ebx push ecx push edi voxbegslab2: mov ebp, ebx add ebx, edx shr ebp, 16 xor eax, eax mov al, byte [esi+ebp] voxpal5: mov al, byte [eax+88888888h] mov ah, al mov word [edi], ax voxbpl5: add edi, 88888888h dec ecx jnz voxbegslab2 pop edi pop ecx pop ebx add edi, 2 voxskipslab2: mov eax, [esp] sub eax, 3 cmp edi, eax jge voxskipslab3 voxprebegslab3: push ebx push ecx push edi voxbegslab3: mov ebp, ebx add ebx, edx shr ebp, 16 xor eax, eax mov al, byte [esi+ebp] voxpal6: mov al, byte [eax+88888888h] mov ah, al shl eax, 8 mov al, ah shl eax, 8 mov al, ah mov dword [edi], eax voxbpl6: add edi, 88888888h dec ecx jnz voxbegslab3 pop edi pop ecx pop ebx add edi, 4 mov eax, [esp] sub eax, 3 cmp edi, eax jl voxprebegslab3 voxskipslab3: mov eax, [esp] dec eax cmp edi, eax jge voxskipslab4 push ebx push ecx push edi voxbegslab4: mov ebp, ebx add ebx, edx shr ebp, 16 xor eax, eax mov al, byte [esi+ebp] voxpal7: mov al, byte [eax+88888888h] mov ah, al mov word [edi], ax voxbpl7: add edi, 88888888h dec ecx jnz voxbegslab4 pop edi pop ecx pop ebx add edi, 2 voxskipslab4: pop eax cmp edi, eax je voxskipslab5 voxbegslab5: mov ebp, ebx add ebx, edx shr ebp, 16 xor eax, eax mov al, byte [esi+ebp] voxpal8: mov al, byte [eax+88888888h] mov byte [edi], al voxbpl8: add edi, 88888888h dec ecx jnz voxbegslab5 voxskipslab5: pop edi pop esi pop ebp pop ebx ret align 16 %ifdef M_TARGET_MACHO GLOBAL _rtext_a_end _rtext_a_end: %endif