// Function-wrapped Watcom pragmas // by Jonathon Fowler (jf@jonof.id.au) // // These functions represent some of the more longer-winded pragmas // from the original pragmas.h wrapped into functions for easier // use since many jumps and whatnot make it harder to write macro- // inline versions. I'll eventually convert these to macro-inline // equivalents. --Jonathon #include "compat.h" #include "pragmas.h" libdivide_s64pad_t divtable64[DIVTABLESIZE]; libdivide_s32pad_t divtable32[DIVTABLESIZE]; void initdivtables(void) { libdivide_s64_t d; libdivide_s32_t d32; for (bssize_t i=1; i= align) { uint32_t izero = 0; double fzero = 0; c -= align; if (align&1) { *dd = izero; dd += 1; } if (align&2) { *(uint16_t*)dd = izero; dd += 2; } if (align&4) { *(uint32_t*)dd = izero; dd += 4; } if (align&8) { *(double*)dd = fzero; dd += 8; } if (align&16) { *(double*)dd = fzero; *(double*)(dd+8) = fzero; dd += 16; } } align = c >> 5; while (align) { __asm__ ( " dcbz 0, %0\n" " addi %0, %0, 32\n" : "+r"(dd) : : "memory" ); align--; } if ((c &= 31)) { while (c--) { *dd++ = 0; } } return; } __asm__ __volatile__( " add %1, %1, %2\n" " neg. %2, %2\n" " beq 2f\n" "1:\n" " stbx %0, %1, %2\n" " addic. %2, %2, 1\n" " rotrwi %0, %0, 8\n" " bne 1b\n" "2:\n" : "+r"(a), "+b"(d), "+r"(c) : : "cc", "xer", "memory" ); } #else // // Generic C version // void qinterpolatedown16(intptr_t bufptr, int32_t num, int32_t val, int32_t add) { // gee, I wonder who could have provided this... int32_t i, *lptr = (int32_t *)bufptr; for (i=0; i>16); val += add; } } void qinterpolatedown16short(intptr_t bufptr, int32_t num, int32_t val, int32_t add) { // ...maybe the same person who provided this too? int32_t i; int16_t *sptr = (int16_t *)bufptr; for (i=0; i>16); val += add; } } void clearbuf(void *d, int32_t c, int32_t a) { int32_t *p = (int32_t *)d; while ((c--) > 0) *(p++) = a; } void copybuf(const void *s, void *d, int32_t c) { const int32_t *p = (const int32_t *)s; int32_t *q = (int32_t *)d; while ((c--) > 0) *(q++) = *(p++); } void swapbuf4(void *a, void *b, int32_t c) { int32_t *p = (int32_t *)a, *q = (int32_t *)b; int32_t x, y; while ((c--) > 0) { x = *q; y = *p; *(q++) = y; *(p++) = x; } } void clearbufbyte(void *D, int32_t c, int32_t a) { // Cringe City int32_t const m[4] = { 0xffl, 0xff00l, 0xff0000l, (int32_t)0xff000000l }; int32_t z = 0; char *p = (char *)D; while ((c--) > 0) { *(p++) = (uint8_t)((a & m[z])>>(z<<3)); z=(z+1)&3; } } void copybufbyte(const void *S, void *D, int32_t c) { const char *p = (const char *)S; char *q = (char *)D; while ((c--) > 0) *(q++) = *(p++); } // copybufreverse() is a special case: use the assembly version for GCC on x86 // *and* x86_64, and the C version otherwise. // XXX: we don't honor NOASM in the x86_64 case. #if defined(__GNUC__) && defined(__x86_64__) // NOTE: Almost CODEDUP from x86 GCC assembly version, except that // - %%esi -> %%rsi // - %%edi -> %%rdi // - (dec,inc,sub,add)l suffix removed where necessary void copybufreverse(const void *S, void *D, int32_t c) { __asm__ __volatile__( "shrl $1, %%ecx\n\t" "jnc 0f\n\t" // jnc skipit1 "movb (%%rsi), %%al\n\t" "dec %%rsi\n\t" "movb %%al, (%%rdi)\n\t" "inc %%rdi\n\t" "0:\n\t" // skipit1: "shrl $1, %%ecx\n\t" "jnc 1f\n\t" // jnc skipit2 "movw -1(%%rsi), %%ax\n\t" "sub $2, %%rsi\n\t" "rorw $8, %%ax\n\t" "movw %%ax, (%%rdi)\n\t" "add $2, %%rdi\n\t" "1:\n\t" // skipit2 "testl %%ecx, %%ecx\n\t" "jz 3f\n\t" // jz endloop "2:\n\t" // begloop "movl -3(%%rsi), %%eax\n\t" "sub $4, %%rsi\n\t" "bswapl %%eax\n\t" "movl %%eax, (%%rdi)\n\t" "add $4, %%rdi\n\t" "decl %%ecx\n\t" "jnz 2b\n\t" // jnz begloop "3:" : "+S"(S), "+D"(D), "+c"(c) : : "eax", "memory", "cc" ); } #else void copybufreverse(const void *S, void *D, int32_t c) { const char *p = (const char *)S; char *q = (char *)D; while ((c--) > 0) *(q++) = *(p--); } #endif #endif