mirror of
https://github.com/DarkPlacesEngine/gmqcc.git
synced 2024-11-27 14:12:36 +00:00
Perliminary work on arithmetic exception handling in the constant evaluator. We can safely check for arithmetic underflow, overflow, divison by zero and inexactness now. Inexactness of expressions are propagated through the AST such that using an inexact value in a comparison will trigger a warning with -Winexact-compares.
This commit is contained in:
parent
7024ebfe7f
commit
6424ebaf98
10 changed files with 542 additions and 58 deletions
1
ast.c
1
ast.c
|
@ -359,6 +359,7 @@ ast_value* ast_value_new(lex_ctx_t ctx, const char *name, int t)
|
|||
self->cvq = CV_NONE;
|
||||
self->hasvalue = false;
|
||||
self->isimm = false;
|
||||
self->inexact = false;
|
||||
self->uses = 0;
|
||||
memset(&self->constval, 0, sizeof(self->constval));
|
||||
self->initlist = NULL;
|
||||
|
|
1
ast.h
1
ast.h
|
@ -214,6 +214,7 @@ struct ast_value_s
|
|||
bool isfield; /* this declares a field */
|
||||
bool isimm; /* an immediate, not just const */
|
||||
bool hasvalue;
|
||||
bool inexact; /* inexact coming from folded expression */
|
||||
basic_value_t constval;
|
||||
/* for TYPE_ARRAY we have an optional vector
|
||||
* of constants when an initializer list
|
||||
|
|
|
@ -344,6 +344,10 @@ will search its intrinsics table for something that matches that
|
|||
function name by appending "__builtin_" to it. This behaviour may
|
||||
be unexpected, so enabling this will produce a diagnostic when
|
||||
such a function is resolved to a builtin.
|
||||
.It Fl W Ns Cm inexact-compares
|
||||
When comparing an inexact value such as `1.0/3.0' the result is
|
||||
pathologically wrong. Enabling this will trigger a compiler warning
|
||||
on such expressions.
|
||||
.El
|
||||
.Sh COMPILE FLAGS
|
||||
.Bl -tag -width Ds
|
||||
|
|
558
fold.c
558
fold.c
|
@ -29,6 +29,409 @@
|
|||
#define FOLD_STRING_UNTRANSLATE_HTSIZE 1024
|
||||
#define FOLD_STRING_DOTRANSLATE_HTSIZE 1024
|
||||
|
||||
/*
|
||||
* The constant folder is also responsible for validating if the constant
|
||||
* expressions produce valid results. We cannot trust the FPU control
|
||||
* unit for these exceptions because setting FPU control words might not
|
||||
* work. Systems can set and enforce FPU modes of operation. It's also valid
|
||||
* for libc's to simply ignore FPU exceptions. For instance ARM CPUs in
|
||||
* glibc. We implement some trivial and IEE 754 conformant functions which
|
||||
* emulate those operations. This is an entierly optional compiler feature
|
||||
* which shouldn't be enabled for anything other than performing strict
|
||||
* passes on constant expressions since it's quite slow.
|
||||
*/
|
||||
typedef uint32_t sfloat_t;
|
||||
|
||||
typedef union {
|
||||
qcfloat_t f;
|
||||
sfloat_t s;
|
||||
} sfloat_cast_t;
|
||||
|
||||
typedef enum {
|
||||
SFLOAT_INVALID = 1,
|
||||
SFLOAT_DIVBYZERO = 4,
|
||||
SFLOAT_OVERFLOW = 8,
|
||||
SFLOAT_UNDERFLOW = 16,
|
||||
SFLOAT_INEXACT = 32
|
||||
} sfloat_exceptionflags_t;
|
||||
|
||||
typedef enum {
|
||||
SFLOAT_ROUND_NEAREST_EVEN,
|
||||
SFLOAT_ROUND_DOWN,
|
||||
SFLOAT_ROUND_UP,
|
||||
SFLOAT_ROUND_TO_ZERO
|
||||
} sfloat_roundingmode_t;
|
||||
|
||||
typedef enum {
|
||||
SFLOAT_TAFTER,
|
||||
SFLOAT_TBEFORE
|
||||
} sfloat_tdetect_t;
|
||||
|
||||
typedef struct {
|
||||
sfloat_roundingmode_t roundingmode;
|
||||
sfloat_exceptionflags_t exceptionflags;
|
||||
sfloat_tdetect_t tiny;
|
||||
} sfloat_state_t;
|
||||
|
||||
/* The value of a NaN */
|
||||
#define SFLOAT_NAN 0xFFC00000
|
||||
/* Count of leading zero bits before the most-significand 1 bit. */
|
||||
#define SFLOAT_CLZ(X, SUB) \
|
||||
(__builtin_clz((X)) - (SUB))
|
||||
/* Test if NaN */
|
||||
#define SFLOAT_ISNAN(A) \
|
||||
(0xFF000000 < (uint32_t)((A) << 1))
|
||||
/* Test if signaling NaN */
|
||||
#define SFLOAT_ISSNAN(A) \
|
||||
(((((A) >> 22) & 0x1FF) == 0x1FE) && ((A) & 0x003FFFFF))
|
||||
/* Raise exception */
|
||||
#define SFLOAT_RAISE(STATE, FLAGS) \
|
||||
((STATE)->exceptionflags |= (FLAGS))
|
||||
/*
|
||||
* Shifts `A' right `COUNT' bits. Non-zero bits are stored in LSB. Size
|
||||
* sets the arbitrarly-large limit.
|
||||
*/
|
||||
#define SFLOAT_SHIFT(SIZE, A, COUNT, Z) \
|
||||
*(Z) = ((COUNT) == 0) \
|
||||
? 1 \
|
||||
: (((COUNT) < (SIZE)) \
|
||||
? ((A) >> (COUNT)) | (((A) << ((-(COUNT)) & ((SIZE) - 1))) != 0) \
|
||||
: ((A) != 0))
|
||||
/* Extract fractional component */
|
||||
#define SFLOAT_EXTRACT_FRAC(X) \
|
||||
((uint32_t)((X) & 0x007FFFFF))
|
||||
/* Extract exponent component */
|
||||
#define SFLOAT_EXTRACT_EXP(X) \
|
||||
((int16_t)((X) >> 23) & 0xFF)
|
||||
/* Extract sign bit */
|
||||
#define SFLOAT_EXTRACT_SIGN(X) \
|
||||
((X) >> 31)
|
||||
/* Normalize a subnormal */
|
||||
#define SFLOAT_SUBNORMALIZE(SA, Z, SZ) \
|
||||
(void)(*(SZ) = (SA) << SFLOAT_CLZ((SA), 8), *(SZ) = 1 - SFLOAT_CLZ((SA), 8))
|
||||
/*
|
||||
* Pack sign, exponent and significand and produce a float.
|
||||
*
|
||||
* Integer portions of the significand are added to the exponent. The
|
||||
* exponent input should be one less than the result exponent whenever
|
||||
* the significand is normalized since normalized significand will
|
||||
* always have an integer portion of value one.
|
||||
*/
|
||||
#define SFLOAT_PACK(SIGN, EXP, SIG) \
|
||||
(sfloat_t)((((uint32_t)(SIGN)) << 31) + (((uint32_t)(EXP)) << 23) + (SIG))
|
||||
|
||||
/* Calculate NaN. If either operands are signaling then raise invalid */
|
||||
static sfloat_t sfloat_propagate_nan(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
|
||||
bool isnan_a = SFLOAT_ISNAN(a);
|
||||
bool issnan_a = SFLOAT_ISSNAN(a);
|
||||
bool isnan_b = SFLOAT_ISNAN(b);
|
||||
bool issnan_b = SFLOAT_ISSNAN(b);
|
||||
|
||||
a |= 0x00400000;
|
||||
b |= 0x00400000;
|
||||
|
||||
if (issnan_a | issnan_b)
|
||||
SFLOAT_RAISE(state, SFLOAT_INEXACT);
|
||||
if (issnan_a) {
|
||||
if (issnan_b)
|
||||
goto larger;
|
||||
return isnan_b ? b : a;
|
||||
} else if (isnan_a) {
|
||||
if (issnan_b | !isnan_b)
|
||||
return a;
|
||||
larger:
|
||||
if ((uint32_t)(a << 1) < (uint32_t)(b << 1)) return b;
|
||||
if ((uint32_t)(b << 1) < (uint32_t)(a << 1)) return a;
|
||||
return (a < b) ? a : b;
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
/* Round and pack */
|
||||
static sfloat_t SFLOAT_PACK_round(sfloat_state_t *state, bool sign_z, int16_t exp_z, uint32_t sig_z) {
|
||||
sfloat_roundingmode_t mode = state->roundingmode;
|
||||
bool even = !!(mode == SFLOAT_ROUND_NEAREST_EVEN);
|
||||
unsigned char increment = 0x40;
|
||||
unsigned char bits = sig_z & 0x7F;
|
||||
|
||||
if (!even) {
|
||||
if (mode == SFLOAT_ROUND_TO_ZERO)
|
||||
increment = 0;
|
||||
else {
|
||||
increment = 0x7F;
|
||||
if (sign_z) {
|
||||
if (mode == SFLOAT_ROUND_UP)
|
||||
increment = 0;
|
||||
} else {
|
||||
if (mode == SFLOAT_ROUND_DOWN)
|
||||
increment = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (0xFD <= (uint16_t)exp_z) {
|
||||
if ((0xFD < exp_z) || ((exp_z == 0xFD) && ((int32_t)(sig_z + increment) < 0))) {
|
||||
SFLOAT_RAISE(state, SFLOAT_OVERFLOW | SFLOAT_INEXACT);
|
||||
return SFLOAT_PACK(sign_z, 0xFF, 0) - (increment == 0);
|
||||
}
|
||||
if (exp_z < 0) {
|
||||
/* Check for underflow */
|
||||
bool tiny = (state->tiny == SFLOAT_TBEFORE) || (exp_z < -1) || (sig_z + increment < 0x80000000);
|
||||
SFLOAT_SHIFT(32, sig_z, -exp_z, &sig_z);
|
||||
exp_z = 0;
|
||||
bits = sig_z & 0x7F;
|
||||
if (tiny && bits)
|
||||
SFLOAT_RAISE(state, SFLOAT_UNDERFLOW);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Significand has point between bits 30 and 29, 7 bits to the left of
|
||||
* the usual place. This shifted significand has to be normalized
|
||||
* or smaller, if it isn't the exponent must be zero, in which case
|
||||
* no rounding occurs since the result will be a subnormal.
|
||||
*/
|
||||
if (bits)
|
||||
SFLOAT_RAISE(state, SFLOAT_INEXACT);
|
||||
sig_z = (sig_z + increment) >> 7;
|
||||
sig_z &= ~(((bits ^ 0x40) == 0) & even);
|
||||
if (sig_z == 0)
|
||||
exp_z = 0;
|
||||
return SFLOAT_PACK(sign_z, exp_z, sig_z);
|
||||
}
|
||||
|
||||
/* Normalized round and pack */
|
||||
static sfloat_t SFLOAT_PACK_normal(sfloat_state_t *state, bool sign_z, int16_t exp_z, uint32_t sig_z) {
|
||||
unsigned char c = SFLOAT_CLZ(sig_z, 1);
|
||||
return SFLOAT_PACK_round(state, sign_z, exp_z - c, sig_z << c);
|
||||
}
|
||||
|
||||
static sfloat_t sfloat_add_impl(sfloat_state_t *state, sfloat_t a, sfloat_t b, bool sign_z) {
|
||||
int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
|
||||
int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
|
||||
int16_t exp_z = 0;
|
||||
int16_t exp_d = exp_a - exp_b;
|
||||
uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a) << 6;
|
||||
uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b) << 6;
|
||||
uint32_t sig_z = 0;
|
||||
|
||||
if (0 < exp_d) {
|
||||
if (exp_a == 0xFF)
|
||||
return sig_a ? sfloat_propagate_nan(state, a, b) : a;
|
||||
if (exp_b == 0)
|
||||
--exp_d;
|
||||
else
|
||||
sig_b |= 0x20000000;
|
||||
SFLOAT_SHIFT(32, sig_b, exp_d, &sig_b);
|
||||
exp_z = exp_a;
|
||||
} else if (exp_d < 0) {
|
||||
if (exp_b == 0xFF)
|
||||
return sig_b ? sfloat_propagate_nan(state, a, b) : SFLOAT_PACK(sign_z, 0xFF, 0);
|
||||
if (exp_a == 0)
|
||||
++exp_d;
|
||||
else
|
||||
sig_a |= 0x20000000;
|
||||
SFLOAT_SHIFT(32, sig_a, -exp_d, &sig_a);
|
||||
exp_z = exp_b;
|
||||
} else {
|
||||
if (exp_a == 0xFF)
|
||||
return (sig_a | sig_b) ? sfloat_propagate_nan(state, a, b) : a;
|
||||
if (exp_a == 0)
|
||||
return SFLOAT_PACK(sign_z, 0, (sig_a + sig_b) >> 6);
|
||||
sig_z = 0x40000000 + sig_a + sig_b;
|
||||
exp_z = exp_a;
|
||||
goto end;
|
||||
}
|
||||
sig_a |= 0x20000000;
|
||||
sig_z = (sig_a + sig_b) << 1;
|
||||
--exp_z;
|
||||
if ((int32_t)sig_z < 0) {
|
||||
sig_z = sig_a + sig_b;
|
||||
++exp_z;
|
||||
}
|
||||
end:
|
||||
return SFLOAT_PACK_round(state, sign_z, exp_z, sig_z);
|
||||
}
|
||||
|
||||
static sfloat_t sfloat_sub_impl(sfloat_state_t *state, sfloat_t a, sfloat_t b, bool sign_z) {
|
||||
int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
|
||||
int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
|
||||
int16_t exp_z = 0;
|
||||
int16_t exp_d = exp_a - exp_b;
|
||||
uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a) << 7;
|
||||
uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b) << 7;
|
||||
uint32_t sig_z = 0;
|
||||
|
||||
if (0 < exp_d) goto exp_greater_a;
|
||||
if (exp_d < 0) goto exp_greater_b;
|
||||
|
||||
if (exp_a == 0xFF) {
|
||||
if (sig_a | sig_b)
|
||||
return sfloat_propagate_nan(state, a, b);
|
||||
SFLOAT_RAISE(state, SFLOAT_INVALID);
|
||||
return SFLOAT_NAN;
|
||||
}
|
||||
|
||||
if (exp_a == 0)
|
||||
exp_a = exp_b = 1;
|
||||
|
||||
if (sig_b < sig_a) goto greater_a;
|
||||
if (sig_a < sig_b) goto greater_b;
|
||||
|
||||
return SFLOAT_PACK(state->roundingmode == SFLOAT_ROUND_DOWN, 0, 0);
|
||||
|
||||
exp_greater_b:
|
||||
if (exp_b == 0xFF)
|
||||
return (sig_b) ? sfloat_propagate_nan(state, a, b) : SFLOAT_PACK(sign_z ^ 1, 0xFF, 0);
|
||||
if (exp_a == 0)
|
||||
++exp_d;
|
||||
else
|
||||
sig_a |= 0x40000000;
|
||||
SFLOAT_SHIFT(32, sig_a, -exp_d, &sig_a);
|
||||
sig_b |= 0x40000000;
|
||||
greater_b:
|
||||
sig_z = sig_b - sig_a;
|
||||
exp_z = exp_b;
|
||||
sign_z ^= 1;
|
||||
goto end;
|
||||
|
||||
exp_greater_a:
|
||||
if (exp_a == 0xFF)
|
||||
return (sig_a) ? sfloat_propagate_nan(state, a, b) : a;
|
||||
if (exp_b == 0)
|
||||
--exp_d;
|
||||
else
|
||||
sig_b |= 0x40000000;
|
||||
SFLOAT_SHIFT(32, sig_b, exp_d, &sig_b);
|
||||
sig_a |= 0x40000000;
|
||||
greater_a:
|
||||
sig_z = sig_a - sig_b;
|
||||
exp_z = exp_a;
|
||||
|
||||
end:
|
||||
--exp_z;
|
||||
return SFLOAT_PACK_normal(state, sign_z, exp_z, sig_z);
|
||||
}
|
||||
|
||||
static GMQCC_INLINE sfloat_t sfloat_add(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
|
||||
bool sign_a = SFLOAT_EXTRACT_SIGN(a);
|
||||
bool sign_b = SFLOAT_EXTRACT_SIGN(b);
|
||||
return (sign_a == sign_b) ? sfloat_add_impl(state, a, b, sign_a)
|
||||
: sfloat_sub_impl(state, a, b, sign_a);
|
||||
}
|
||||
|
||||
static GMQCC_INLINE sfloat_t sfloat_sub(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
|
||||
bool sign_a = SFLOAT_EXTRACT_SIGN(a);
|
||||
bool sign_b = SFLOAT_EXTRACT_SIGN(b);
|
||||
return (sign_a == sign_b) ? sfloat_sub_impl(state, a, b, sign_a)
|
||||
: sfloat_add_impl(state, a, b, sign_a);
|
||||
}
|
||||
|
||||
static sfloat_t sfloat_mul(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
|
||||
int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
|
||||
int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
|
||||
int16_t exp_z = 0;
|
||||
uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a);
|
||||
uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b);
|
||||
uint32_t sig_z = 0;
|
||||
uint64_t sig_z64 = 0;
|
||||
bool sign_a = SFLOAT_EXTRACT_SIGN(a);
|
||||
bool sign_b = SFLOAT_EXTRACT_SIGN(b);
|
||||
bool sign_z = sign_a ^ sign_b;
|
||||
|
||||
if (exp_a == 0xFF) {
|
||||
if (sig_a || ((exp_b == 0xFF) && sig_b))
|
||||
return sfloat_propagate_nan(state, a, b);
|
||||
if ((exp_b | sig_b) == 0) {
|
||||
SFLOAT_RAISE(state, SFLOAT_INVALID);
|
||||
return SFLOAT_NAN;
|
||||
}
|
||||
return SFLOAT_PACK(sign_z, 0xFF, 0);
|
||||
}
|
||||
if (exp_b == 0xFF) {
|
||||
if (sig_b)
|
||||
return sfloat_propagate_nan(state, a, b);
|
||||
if ((exp_a | sig_a) == 0) {
|
||||
SFLOAT_RAISE(state, SFLOAT_INVALID);
|
||||
return SFLOAT_NAN;
|
||||
}
|
||||
return SFLOAT_PACK(sign_z, 0xFF, 0);
|
||||
}
|
||||
if (exp_a == 0) {
|
||||
if (sig_a == 0)
|
||||
return SFLOAT_PACK(sign_z, 0, 0);
|
||||
SFLOAT_SUBNORMALIZE(sig_a, &exp_a, &sig_a);
|
||||
}
|
||||
if (exp_b == 0) {
|
||||
if (sig_b == 0)
|
||||
return SFLOAT_PACK(sign_z, 0, 0);
|
||||
SFLOAT_SUBNORMALIZE(sig_b, &exp_b, &sig_b);
|
||||
}
|
||||
exp_z = exp_a + exp_b - 0x7F;
|
||||
sig_a = (sig_a | 0x00800000) << 7;
|
||||
sig_b = (sig_b | 0x00800000) << 8;
|
||||
SFLOAT_SHIFT(64, ((uint64_t)sig_a) * sig_b, 32, &sig_z64);
|
||||
sig_z = sig_z64;
|
||||
if (0 <= (int32_t)(sig_z << 1)) {
|
||||
sig_z <<= 1;
|
||||
--exp_z;
|
||||
}
|
||||
return SFLOAT_PACK_round(state, sign_z, exp_z, sig_z);
|
||||
}
|
||||
|
||||
static sfloat_t sfloat_div(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
|
||||
int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
|
||||
int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
|
||||
int16_t exp_z = 0;
|
||||
uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a);
|
||||
uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b);
|
||||
uint32_t sig_z = 0;
|
||||
bool sign_a = SFLOAT_EXTRACT_SIGN(a);
|
||||
bool sign_b = SFLOAT_EXTRACT_SIGN(b);
|
||||
bool sign_z = sign_a ^ sign_b;
|
||||
|
||||
if (exp_a == 0xFF) {
|
||||
if (sig_a)
|
||||
return sfloat_propagate_nan(state, a, b);
|
||||
if (exp_b == 0xFF) {
|
||||
if (sig_b)
|
||||
return sfloat_propagate_nan(state, a, b);
|
||||
SFLOAT_RAISE(state, SFLOAT_INVALID);
|
||||
return SFLOAT_NAN;
|
||||
}
|
||||
return SFLOAT_PACK(sign_z, 0xFF, 0);
|
||||
}
|
||||
if (exp_b == 0xFF)
|
||||
return (sig_b) ? sfloat_propagate_nan(state, a, b) : SFLOAT_PACK(sign_z, 0, 0);
|
||||
if (exp_b == 0) {
|
||||
if (sig_b == 0) {
|
||||
if ((exp_a | sig_a) == 0) {
|
||||
SFLOAT_RAISE(state, SFLOAT_INVALID);
|
||||
return SFLOAT_NAN;
|
||||
}
|
||||
SFLOAT_RAISE(state, SFLOAT_DIVBYZERO);
|
||||
return SFLOAT_PACK(sign_z, 0xFF, 0);
|
||||
}
|
||||
SFLOAT_SUBNORMALIZE(sig_b, &exp_b, &sig_b);
|
||||
}
|
||||
if (exp_a == 0) {
|
||||
if (sig_a == 0)
|
||||
return SFLOAT_PACK(sign_z, 0, 0);
|
||||
SFLOAT_SUBNORMALIZE(sig_a, &exp_a, &sig_a);
|
||||
}
|
||||
exp_z = exp_a - exp_b + 0x7D;
|
||||
sig_a = (sig_a | 0x00800000) << 7;
|
||||
sig_b = (sig_b | 0x00800000) << 8;
|
||||
if (sig_b <= (sig_a + sig_a)) {
|
||||
sig_a >>= 1;
|
||||
++exp_z;
|
||||
}
|
||||
sig_z = (((uint64_t)sig_a) << 32) / sig_b;
|
||||
if ((sig_z & 0x3F) == 0)
|
||||
sig_z |= ((uint64_t)sig_b * sig_z != ((uint64_t)sig_a) << 32);
|
||||
return SFLOAT_PACK_round(state, sign_z, exp_z, sig_z);
|
||||
}
|
||||
|
||||
/*
|
||||
* There is two stages to constant folding in GMQCC: there is the parse
|
||||
* stage constant folding, where, witht he help of the AST, operator
|
||||
|
@ -227,10 +630,10 @@ fold_t *fold_init(parser_t *parser) {
|
|||
* prime the tables with common constant values at constant
|
||||
* locations.
|
||||
*/
|
||||
(void)fold_constgen_float (fold, 0.0f);
|
||||
(void)fold_constgen_float (fold, 1.0f);
|
||||
(void)fold_constgen_float (fold, -1.0f);
|
||||
(void)fold_constgen_float (fold, 2.0f);
|
||||
(void)fold_constgen_float (fold, 0.0f, false);
|
||||
(void)fold_constgen_float (fold, 1.0f, false);
|
||||
(void)fold_constgen_float (fold, -1.0f, false);
|
||||
(void)fold_constgen_float (fold, 2.0f, false);
|
||||
|
||||
(void)fold_constgen_vector(fold, vec3_create(0.0f, 0.0f, 0.0f));
|
||||
(void)fold_constgen_vector(fold, vec3_create(-1.0f, -1.0f, -1.0f));
|
||||
|
@ -275,7 +678,7 @@ void fold_cleanup(fold_t *fold) {
|
|||
mem_d(fold);
|
||||
}
|
||||
|
||||
ast_expression *fold_constgen_float(fold_t *fold, qcfloat_t value) {
|
||||
ast_expression *fold_constgen_float(fold_t *fold, qcfloat_t value, bool inexact) {
|
||||
ast_value *out = NULL;
|
||||
size_t i;
|
||||
|
||||
|
@ -287,6 +690,7 @@ ast_expression *fold_constgen_float(fold_t *fold, qcfloat_t value) {
|
|||
out = ast_value_new(fold_ctx(fold), "#IMMEDIATE", TYPE_FLOAT);
|
||||
out->cvq = CV_CONST;
|
||||
out->hasvalue = true;
|
||||
out->inexact = inexact;
|
||||
out->constval.vfloat = value;
|
||||
|
||||
vec_push(fold->imm_float, out);
|
||||
|
@ -372,7 +776,7 @@ static GMQCC_INLINE ast_expression *fold_op_mul_vec(fold_t *fold, vec3_t vec, as
|
|||
out->node.keep = false;
|
||||
((ast_member*)out)->rvalue = true;
|
||||
if (x != -1.0f)
|
||||
return (ast_expression*)ast_binary_new(fold_ctx(fold), INSTR_MUL_F, fold_constgen_float(fold, x), out);
|
||||
return (ast_expression*)ast_binary_new(fold_ctx(fold), INSTR_MUL_F, fold_constgen_float(fold, x, false), out);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -381,7 +785,7 @@ static GMQCC_INLINE ast_expression *fold_op_mul_vec(fold_t *fold, vec3_t vec, as
|
|||
static GMQCC_INLINE ast_expression *fold_op_neg(fold_t *fold, ast_value *a) {
|
||||
if (isfloat(a)) {
|
||||
if (fold_can_1(a))
|
||||
return fold_constgen_float(fold, -fold_immvalue_float(a));
|
||||
return fold_constgen_float(fold, -fold_immvalue_float(a), false);
|
||||
} else if (isvector(a)) {
|
||||
if (fold_can_1(a))
|
||||
return fold_constgen_vector(fold, vec3_neg(fold_immvalue_vector(a)));
|
||||
|
@ -392,25 +796,72 @@ static GMQCC_INLINE ast_expression *fold_op_neg(fold_t *fold, ast_value *a) {
|
|||
static GMQCC_INLINE ast_expression *fold_op_not(fold_t *fold, ast_value *a) {
|
||||
if (isfloat(a)) {
|
||||
if (fold_can_1(a))
|
||||
return fold_constgen_float(fold, !fold_immvalue_float(a));
|
||||
return fold_constgen_float(fold, !fold_immvalue_float(a), false);
|
||||
} else if (isvector(a)) {
|
||||
if (fold_can_1(a))
|
||||
return fold_constgen_float(fold, vec3_notf(fold_immvalue_vector(a)));
|
||||
return fold_constgen_float(fold, vec3_notf(fold_immvalue_vector(a)), false);
|
||||
} else if (isstring(a)) {
|
||||
if (fold_can_1(a)) {
|
||||
if (OPTS_FLAG(TRUE_EMPTY_STRINGS))
|
||||
return fold_constgen_float(fold, !fold_immvalue_string(a));
|
||||
return fold_constgen_float(fold, !fold_immvalue_string(a), false);
|
||||
else
|
||||
return fold_constgen_float(fold, !fold_immvalue_string(a) || !*fold_immvalue_string(a));
|
||||
return fold_constgen_float(fold, !fold_immvalue_string(a) || !*fold_immvalue_string(a), false);
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool fold_check_except_float(sfloat_t (*callback)(sfloat_state_t *, sfloat_t, sfloat_t),
|
||||
fold_t *fold,
|
||||
ast_value *a,
|
||||
ast_value *b)
|
||||
{
|
||||
sfloat_state_t s;
|
||||
sfloat_cast_t ca;
|
||||
sfloat_cast_t cb;
|
||||
|
||||
s.roundingmode = SFLOAT_ROUND_NEAREST_EVEN;
|
||||
s.tiny = SFLOAT_TBEFORE;
|
||||
s.exceptionflags = 0;
|
||||
ca.f = fold_immvalue_float(a);
|
||||
cb.f = fold_immvalue_float(b);
|
||||
|
||||
callback(&s, ca.s, cb.s);
|
||||
if (s.exceptionflags == 0)
|
||||
return false;
|
||||
|
||||
if (s.exceptionflags & SFLOAT_DIVBYZERO)
|
||||
compile_error(fold_ctx(fold), "division by zero");
|
||||
#if 0
|
||||
/*
|
||||
* To be enabled once softfloat implementations for stuff like sqrt()
|
||||
* exist
|
||||
*/
|
||||
if (s.exceptionflags & SFLOAT_INVALID)
|
||||
compile_error(fold_ctx(fold), "invalid argument");
|
||||
#endif
|
||||
|
||||
if (s.exceptionflags & SFLOAT_OVERFLOW)
|
||||
compile_error(fold_ctx(fold), "arithmetic overflow");
|
||||
if (s.exceptionflags & SFLOAT_UNDERFLOW)
|
||||
compile_error(fold_ctx(fold), "arithmetic underflow");
|
||||
|
||||
return s.exceptionflags == SFLOAT_INEXACT;
|
||||
}
|
||||
|
||||
static bool fold_check_inexact_float(fold_t *fold, ast_value *a, ast_value *b) {
|
||||
lex_ctx_t ctx = fold_ctx(fold);
|
||||
if (!a->inexact && !b->inexact)
|
||||
return false;
|
||||
return compile_warning(ctx, WARN_INEXACT_COMPARES, "inexact value in comparison");
|
||||
}
|
||||
|
||||
static GMQCC_INLINE ast_expression *fold_op_add(fold_t *fold, ast_value *a, ast_value *b) {
|
||||
if (isfloat(a)) {
|
||||
if (fold_can_2(a, b))
|
||||
return fold_constgen_float(fold, fold_immvalue_float(a) + fold_immvalue_float(b));
|
||||
if (fold_can_2(a, b)) {
|
||||
bool inexact = fold_check_except_float(&sfloat_add, fold, a, b);
|
||||
return fold_constgen_float(fold, fold_immvalue_float(a) + fold_immvalue_float(b), inexact);
|
||||
}
|
||||
} else if (isvector(a)) {
|
||||
if (fold_can_2(a, b))
|
||||
return fold_constgen_vector(fold, vec3_add(fold_immvalue_vector(a), fold_immvalue_vector(b)));
|
||||
|
@ -420,8 +871,10 @@ static GMQCC_INLINE ast_expression *fold_op_add(fold_t *fold, ast_value *a, ast_
|
|||
|
||||
static GMQCC_INLINE ast_expression *fold_op_sub(fold_t *fold, ast_value *a, ast_value *b) {
|
||||
if (isfloat(a)) {
|
||||
if (fold_can_2(a, b))
|
||||
return fold_constgen_float(fold, fold_immvalue_float(a) - fold_immvalue_float(b));
|
||||
if (fold_can_2(a, b)) {
|
||||
bool inexact = fold_check_except_float(&sfloat_sub, fold, a, b);
|
||||
return fold_constgen_float(fold, fold_immvalue_float(a) - fold_immvalue_float(b), inexact);
|
||||
}
|
||||
} else if (isvector(a)) {
|
||||
if (fold_can_2(a, b))
|
||||
return fold_constgen_vector(fold, vec3_sub(fold_immvalue_vector(a), fold_immvalue_vector(b)));
|
||||
|
@ -435,8 +888,10 @@ static GMQCC_INLINE ast_expression *fold_op_mul(fold_t *fold, ast_value *a, ast_
|
|||
if (fold_can_2(a, b))
|
||||
return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(b), fold_immvalue_float(a)));
|
||||
} else {
|
||||
if (fold_can_2(a, b))
|
||||
return fold_constgen_float(fold, fold_immvalue_float(a) * fold_immvalue_float(b));
|
||||
if (fold_can_2(a, b)) {
|
||||
bool inexact = fold_check_except_float(&sfloat_mul, fold, a, b);
|
||||
return fold_constgen_float(fold, fold_immvalue_float(a) * fold_immvalue_float(b), inexact);
|
||||
}
|
||||
}
|
||||
} else if (isvector(a)) {
|
||||
if (isfloat(b)) {
|
||||
|
@ -444,7 +899,7 @@ static GMQCC_INLINE ast_expression *fold_op_mul(fold_t *fold, ast_value *a, ast_
|
|||
return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
|
||||
} else {
|
||||
if (fold_can_2(a, b)) {
|
||||
return fold_constgen_float(fold, vec3_mulvv(fold_immvalue_vector(a), fold_immvalue_vector(b)));
|
||||
return fold_constgen_float(fold, vec3_mulvv(fold_immvalue_vector(a), fold_immvalue_vector(b)), false);
|
||||
} else if (OPTS_OPTIMIZATION(OPTIM_VECTOR_COMPONENTS) && fold_can_1(a)) {
|
||||
ast_expression *out;
|
||||
if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(a), b, "xyz"))) return out;
|
||||
|
@ -464,13 +919,14 @@ static GMQCC_INLINE ast_expression *fold_op_mul(fold_t *fold, ast_value *a, ast_
|
|||
static GMQCC_INLINE ast_expression *fold_op_div(fold_t *fold, ast_value *a, ast_value *b) {
|
||||
if (isfloat(a)) {
|
||||
if (fold_can_2(a, b)) {
|
||||
return fold_constgen_float(fold, fold_immvalue_float(a) / fold_immvalue_float(b));
|
||||
bool inexact = fold_check_except_float(&sfloat_div, fold, a, b);
|
||||
return fold_constgen_float(fold, fold_immvalue_float(a) / fold_immvalue_float(b), inexact);
|
||||
} else if (fold_can_1(b)) {
|
||||
return (ast_expression*)ast_binary_new(
|
||||
fold_ctx(fold),
|
||||
INSTR_MUL_F,
|
||||
(ast_expression*)a,
|
||||
fold_constgen_float(fold, 1.0f / fold_immvalue_float(b))
|
||||
fold_constgen_float(fold, 1.0f / fold_immvalue_float(b), false)
|
||||
);
|
||||
}
|
||||
} else if (isvector(a)) {
|
||||
|
@ -482,7 +938,7 @@ static GMQCC_INLINE ast_expression *fold_op_div(fold_t *fold, ast_value *a, ast_
|
|||
INSTR_MUL_VF,
|
||||
(ast_expression*)a,
|
||||
(fold_can_1(b))
|
||||
? (ast_expression*)fold_constgen_float(fold, 1.0f / fold_immvalue_float(b))
|
||||
? (ast_expression*)fold_constgen_float(fold, 1.0f / fold_immvalue_float(b), false)
|
||||
: (ast_expression*)ast_binary_new(
|
||||
fold_ctx(fold),
|
||||
INSTR_DIV_F,
|
||||
|
@ -497,14 +953,14 @@ static GMQCC_INLINE ast_expression *fold_op_div(fold_t *fold, ast_value *a, ast_
|
|||
|
||||
static GMQCC_INLINE ast_expression *fold_op_mod(fold_t *fold, ast_value *a, ast_value *b) {
|
||||
return (fold_can_2(a, b))
|
||||
? fold_constgen_float(fold, fmod(fold_immvalue_float(a), fold_immvalue_float(b)))
|
||||
? fold_constgen_float(fold, fmod(fold_immvalue_float(a), fold_immvalue_float(b)), false)
|
||||
: NULL;
|
||||
}
|
||||
|
||||
static GMQCC_INLINE ast_expression *fold_op_bor(fold_t *fold, ast_value *a, ast_value *b) {
|
||||
if (isfloat(a)) {
|
||||
if (fold_can_2(a, b))
|
||||
return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) | ((qcint_t)fold_immvalue_float(b))));
|
||||
return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) | ((qcint_t)fold_immvalue_float(b))), false);
|
||||
} else {
|
||||
if (isvector(b)) {
|
||||
if (fold_can_2(a, b))
|
||||
|
@ -520,7 +976,7 @@ static GMQCC_INLINE ast_expression *fold_op_bor(fold_t *fold, ast_value *a, ast_
|
|||
static GMQCC_INLINE ast_expression *fold_op_band(fold_t *fold, ast_value *a, ast_value *b) {
|
||||
if (isfloat(a)) {
|
||||
if (fold_can_2(a, b))
|
||||
return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) & ((qcint_t)fold_immvalue_float(b))));
|
||||
return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) & ((qcint_t)fold_immvalue_float(b))), false);
|
||||
} else {
|
||||
if (isvector(b)) {
|
||||
if (fold_can_2(a, b))
|
||||
|
@ -536,7 +992,7 @@ static GMQCC_INLINE ast_expression *fold_op_band(fold_t *fold, ast_value *a, ast
|
|||
static GMQCC_INLINE ast_expression *fold_op_xor(fold_t *fold, ast_value *a, ast_value *b) {
|
||||
if (isfloat(a)) {
|
||||
if (fold_can_2(a, b))
|
||||
return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) ^ ((qcint_t)fold_immvalue_float(b))));
|
||||
return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) ^ ((qcint_t)fold_immvalue_float(b))), false);
|
||||
} else {
|
||||
if (fold_can_2(a, b)) {
|
||||
if (isvector(b))
|
||||
|
@ -550,13 +1006,13 @@ static GMQCC_INLINE ast_expression *fold_op_xor(fold_t *fold, ast_value *a, ast_
|
|||
|
||||
static GMQCC_INLINE ast_expression *fold_op_lshift(fold_t *fold, ast_value *a, ast_value *b) {
|
||||
if (fold_can_2(a, b) && isfloats(a, b))
|
||||
return fold_constgen_float(fold, (qcfloat_t)floorf(fold_immvalue_float(a) * powf(2.0f, fold_immvalue_float(b))));
|
||||
return fold_constgen_float(fold, (qcfloat_t)floorf(fold_immvalue_float(a) * powf(2.0f, fold_immvalue_float(b))), false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static GMQCC_INLINE ast_expression *fold_op_rshift(fold_t *fold, ast_value *a, ast_value *b) {
|
||||
if (fold_can_2(a, b) && isfloats(a, b))
|
||||
return fold_constgen_float(fold, (qcfloat_t)floorf(fold_immvalue_float(a) / powf(2.0f, fold_immvalue_float(b))));
|
||||
return fold_constgen_float(fold, (qcfloat_t)floorf(fold_immvalue_float(a) / powf(2.0f, fold_immvalue_float(b))), false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -573,7 +1029,8 @@ static GMQCC_INLINE ast_expression *fold_op_andor(fold_t *fold, ast_value *a, as
|
|||
((expr) ? (fold_immediate_true(fold, a) || fold_immediate_true(fold, b))
|
||||
: (fold_immediate_true(fold, a) && fold_immediate_true(fold, b)))
|
||||
? 1
|
||||
: 0
|
||||
: 0,
|
||||
false
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -591,12 +1048,13 @@ static GMQCC_INLINE ast_expression *fold_op_tern(fold_t *fold, ast_value *a, ast
|
|||
|
||||
static GMQCC_INLINE ast_expression *fold_op_exp(fold_t *fold, ast_value *a, ast_value *b) {
|
||||
if (fold_can_2(a, b))
|
||||
return fold_constgen_float(fold, (qcfloat_t)powf(fold_immvalue_float(a), fold_immvalue_float(b)));
|
||||
return fold_constgen_float(fold, (qcfloat_t)powf(fold_immvalue_float(a), fold_immvalue_float(b)), false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static GMQCC_INLINE ast_expression *fold_op_lteqgt(fold_t *fold, ast_value *a, ast_value *b) {
|
||||
if (fold_can_2(a,b)) {
|
||||
fold_check_inexact_float(fold, a, b);
|
||||
if (fold_immvalue_float(a) < fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[2];
|
||||
if (fold_immvalue_float(a) == fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[0];
|
||||
if (fold_immvalue_float(a) > fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[1];
|
||||
|
@ -604,11 +1062,21 @@ static GMQCC_INLINE ast_expression *fold_op_lteqgt(fold_t *fold, ast_value *a, a
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static GMQCC_INLINE ast_expression *fold_op_ltgt(fold_t *fold, ast_value *a, ast_value *b, bool lt) {
|
||||
if (fold_can_2(a, b)) {
|
||||
fold_check_inexact_float(fold, a, b);
|
||||
return (lt) ? (ast_expression*)fold->imm_float[!!(fold_immvalue_float(a) < fold_immvalue_float(b))]
|
||||
: (ast_expression*)fold->imm_float[!!(fold_immvalue_float(a) > fold_immvalue_float(b))];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static GMQCC_INLINE ast_expression *fold_op_cmp(fold_t *fold, ast_value *a, ast_value *b, bool ne) {
|
||||
if (fold_can_2(a, b)) {
|
||||
if (isfloat(a) && isfloat(b)) {
|
||||
float la = fold_immvalue_float(a);
|
||||
float lb = fold_immvalue_float(b);
|
||||
fold_check_inexact_float(fold, a, b);
|
||||
return (ast_expression*)fold->imm_float[!(ne ? la == lb : la != lb)];
|
||||
} if (isvector(a) && isvector(b)) {
|
||||
vec3_t la = fold_immvalue_vector(a);
|
||||
|
@ -622,7 +1090,7 @@ static GMQCC_INLINE ast_expression *fold_op_cmp(fold_t *fold, ast_value *a, ast_
|
|||
static GMQCC_INLINE ast_expression *fold_op_bnot(fold_t *fold, ast_value *a) {
|
||||
if (isfloat(a)) {
|
||||
if (fold_can_1(a))
|
||||
return fold_constgen_float(fold, -1-fold_immvalue_float(a));
|
||||
return fold_constgen_float(fold, -1-fold_immvalue_float(a), false);
|
||||
} else {
|
||||
if (isvector(a)) {
|
||||
if (fold_can_1(a))
|
||||
|
@ -685,6 +1153,8 @@ ast_expression *fold_op(fold_t *fold, const oper_info *info, ast_expression **op
|
|||
fold_op_case(1, ('|'), bor, (fold, a, b));
|
||||
fold_op_case(1, ('&'), band, (fold, a, b));
|
||||
fold_op_case(1, ('^'), xor, (fold, a, b));
|
||||
fold_op_case(1, ('<'), ltgt, (fold, a, b, true));
|
||||
fold_op_case(1, ('>'), ltgt, (fold, a, b, false));
|
||||
fold_op_case(2, ('<', '<'), lshift, (fold, a, b));
|
||||
fold_op_case(2, ('>', '>'), rshift, (fold, a, b));
|
||||
fold_op_case(2, ('|', '|'), andor, (fold, a, b, true));
|
||||
|
@ -708,46 +1178,46 @@ ast_expression *fold_op(fold_t *fold, const oper_info *info, ast_expression **op
|
|||
* and a generic selection function.
|
||||
*/
|
||||
static GMQCC_INLINE ast_expression *fold_intrin_isfinite(fold_t *fold, ast_value *a) {
|
||||
return fold_constgen_float(fold, isfinite(fold_immvalue_float(a)));
|
||||
return fold_constgen_float(fold, isfinite(fold_immvalue_float(a)), false);
|
||||
}
|
||||
static GMQCC_INLINE ast_expression *fold_intrin_isinf(fold_t *fold, ast_value *a) {
|
||||
return fold_constgen_float(fold, isinf(fold_immvalue_float(a)));
|
||||
return fold_constgen_float(fold, isinf(fold_immvalue_float(a)), false);
|
||||
}
|
||||
static GMQCC_INLINE ast_expression *fold_intrin_isnan(fold_t *fold, ast_value *a) {
|
||||
return fold_constgen_float(fold, isnan(fold_immvalue_float(a)));
|
||||
return fold_constgen_float(fold, isnan(fold_immvalue_float(a)), false);
|
||||
}
|
||||
static GMQCC_INLINE ast_expression *fold_intrin_isnormal(fold_t *fold, ast_value *a) {
|
||||
return fold_constgen_float(fold, isnormal(fold_immvalue_float(a)));
|
||||
return fold_constgen_float(fold, isnormal(fold_immvalue_float(a)), false);
|
||||
}
|
||||
static GMQCC_INLINE ast_expression *fold_intrin_signbit(fold_t *fold, ast_value *a) {
|
||||
return fold_constgen_float(fold, signbit(fold_immvalue_float(a)));
|
||||
return fold_constgen_float(fold, signbit(fold_immvalue_float(a)), false);
|
||||
}
|
||||
static GMQCC_INLINE ast_expression *fold_intirn_acosh(fold_t *fold, ast_value *a) {
|
||||
return fold_constgen_float(fold, acoshf(fold_immvalue_float(a)));
|
||||
return fold_constgen_float(fold, acoshf(fold_immvalue_float(a)), false);
|
||||
}
|
||||
static GMQCC_INLINE ast_expression *fold_intrin_asinh(fold_t *fold, ast_value *a) {
|
||||
return fold_constgen_float(fold, asinhf(fold_immvalue_float(a)));
|
||||
return fold_constgen_float(fold, asinhf(fold_immvalue_float(a)), false);
|
||||
}
|
||||
static GMQCC_INLINE ast_expression *fold_intrin_atanh(fold_t *fold, ast_value *a) {
|
||||
return fold_constgen_float(fold, (float)atanh(fold_immvalue_float(a)));
|
||||
return fold_constgen_float(fold, (float)atanh(fold_immvalue_float(a)), false);
|
||||
}
|
||||
static GMQCC_INLINE ast_expression *fold_intrin_exp(fold_t *fold, ast_value *a) {
|
||||
return fold_constgen_float(fold, expf(fold_immvalue_float(a)));
|
||||
return fold_constgen_float(fold, expf(fold_immvalue_float(a)), false);
|
||||
}
|
||||
static GMQCC_INLINE ast_expression *fold_intrin_exp2(fold_t *fold, ast_value *a) {
|
||||
return fold_constgen_float(fold, exp2f(fold_immvalue_float(a)));
|
||||
return fold_constgen_float(fold, exp2f(fold_immvalue_float(a)), false);
|
||||
}
|
||||
static GMQCC_INLINE ast_expression *fold_intrin_expm1(fold_t *fold, ast_value *a) {
|
||||
return fold_constgen_float(fold, expm1f(fold_immvalue_float(a)));
|
||||
return fold_constgen_float(fold, expm1f(fold_immvalue_float(a)), false);
|
||||
}
|
||||
static GMQCC_INLINE ast_expression *fold_intrin_mod(fold_t *fold, ast_value *lhs, ast_value *rhs) {
|
||||
return fold_constgen_float(fold, fmodf(fold_immvalue_float(lhs), fold_immvalue_float(rhs)));
|
||||
return fold_constgen_float(fold, fmodf(fold_immvalue_float(lhs), fold_immvalue_float(rhs)), false);
|
||||
}
|
||||
static GMQCC_INLINE ast_expression *fold_intrin_pow(fold_t *fold, ast_value *lhs, ast_value *rhs) {
|
||||
return fold_constgen_float(fold, powf(fold_immvalue_float(lhs), fold_immvalue_float(rhs)));
|
||||
return fold_constgen_float(fold, powf(fold_immvalue_float(lhs), fold_immvalue_float(rhs)), false);
|
||||
}
|
||||
static GMQCC_INLINE ast_expression *fold_intrin_fabs(fold_t *fold, ast_value *a) {
|
||||
return fold_constgen_float(fold, fabsf(fold_immvalue_float(a)));
|
||||
return fold_constgen_float(fold, fabsf(fold_immvalue_float(a)), false);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -568,6 +568,12 @@
|
|||
BUILTINS = true
|
||||
|
||||
|
||||
#When comparing an inexact value such as `1.0/3.0' the result is
|
||||
#pathologically wrong. Enabling this will trigger a compiler warning
|
||||
#on such expressions.
|
||||
INEXACT_COMPARES = true
|
||||
|
||||
|
||||
[optimizations]
|
||||
#Some general peephole optimizations. For instance the code `a = b
|
||||
#+ c` typically generates 2 instructions, an ADD and a STORE. This
|
||||
|
|
8
intrin.c
8
intrin.c
|
@ -422,7 +422,7 @@ static ast_expression *intrin_atanh(intrin_t *intrin) {
|
|||
(ast_expression*)ast_binary_new(
|
||||
intrin_ctx(intrin),
|
||||
INSTR_MUL_F,
|
||||
(ast_expression*)fold_constgen_float(intrin->fold, 0.5),
|
||||
(ast_expression*)fold_constgen_float(intrin->fold, 0.5, false),
|
||||
(ast_expression*)calllog
|
||||
)
|
||||
);
|
||||
|
@ -496,7 +496,7 @@ static ast_expression *intrin_exp(intrin_t *intrin) {
|
|||
intrin_ctx(intrin),
|
||||
INSTR_LT,
|
||||
(ast_expression*)i,
|
||||
(ast_expression*)fold_constgen_float(intrin->fold, 200.0f)
|
||||
(ast_expression*)fold_constgen_float(intrin->fold, 200.0f, false)
|
||||
),
|
||||
false,
|
||||
NULL,
|
||||
|
@ -1027,7 +1027,7 @@ static ast_expression *intrin_pow(intrin_t *intrin) {
|
|||
intrin_ctx(intrin),
|
||||
INSTR_GT,
|
||||
(ast_expression*)callfabs,
|
||||
(ast_expression*)fold_constgen_float(intrin->fold, QC_POW_EPSILON)
|
||||
(ast_expression*)fold_constgen_float(intrin->fold, QC_POW_EPSILON, false)
|
||||
),
|
||||
/* pre not */
|
||||
false,
|
||||
|
@ -1911,7 +1911,7 @@ static ast_expression *intrin_log_variant(intrin_t *intrin, const char *name, fl
|
|||
vec_push(value->expression.params, arg1);
|
||||
|
||||
vec_push(callln->params, (ast_expression*)arg1);
|
||||
vec_push(callln->params, (ast_expression*)fold_constgen_float(intrin->fold, base));
|
||||
vec_push(callln->params, (ast_expression*)fold_constgen_float(intrin->fold, base, false));
|
||||
|
||||
vec_push(body->exprs,
|
||||
(ast_expression*)ast_return_new(
|
||||
|
|
1
opts.c
1
opts.c
|
@ -93,6 +93,7 @@ static void opts_setdefault(void) {
|
|||
opts_set(opts.warn, WARN_CONST_OVERWRITE, true);
|
||||
opts_set(opts.warn, WARN_DIRECTIVE_INMACRO, true);
|
||||
opts_set(opts.warn, WARN_BUILTINS, true);
|
||||
opts_set(opts.warn, WARN_INEXACT_COMPARES, true);
|
||||
|
||||
/* flags */
|
||||
opts_set(opts.flags, ADJUST_VECTOR_FIELDS, true);
|
||||
|
|
1
opts.def
1
opts.def
|
@ -98,6 +98,7 @@
|
|||
GMQCC_DEFINE_FLAG(CONST_OVERWRITE)
|
||||
GMQCC_DEFINE_FLAG(DIRECTIVE_INMACRO)
|
||||
GMQCC_DEFINE_FLAG(BUILTINS)
|
||||
GMQCC_DEFINE_FLAG(INEXACT_COMPARES)
|
||||
#endif
|
||||
|
||||
#ifdef GMQCC_TYPE_OPTIMIZATIONS
|
||||
|
|
18
parser.c
18
parser.c
|
@ -1293,7 +1293,7 @@ static bool parser_close_call(parser_t *parser, shunt *sy)
|
|||
if ((fun->flags & AST_FLAG_VARIADIC) &&
|
||||
!(/*funval->cvq == CV_CONST && */ funval->hasvalue && funval->constval.vfunc->builtin))
|
||||
{
|
||||
call->va_count = (ast_expression*)fold_constgen_float(parser->fold, (qcfloat_t)paramcount);
|
||||
call->va_count = (ast_expression*)fold_constgen_float(parser->fold, (qcfloat_t)paramcount, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1548,14 +1548,14 @@ static bool parse_sya_operand(parser_t *parser, shunt *sy, bool with_labels)
|
|||
return true;
|
||||
}
|
||||
else if (parser->tok == TOKEN_FLOATCONST) {
|
||||
ast_expression *val = fold_constgen_float(parser->fold, (parser_token(parser)->constval.f));
|
||||
ast_expression *val = fold_constgen_float(parser->fold, (parser_token(parser)->constval.f), false);
|
||||
if (!val)
|
||||
return false;
|
||||
vec_push(sy->out, syexp(parser_ctx(parser), val));
|
||||
return true;
|
||||
}
|
||||
else if (parser->tok == TOKEN_INTCONST || parser->tok == TOKEN_CHARCONST) {
|
||||
ast_expression *val = fold_constgen_float(parser->fold, (qcfloat_t)(parser_token(parser)->constval.i));
|
||||
ast_expression *val = fold_constgen_float(parser->fold, (qcfloat_t)(parser_token(parser)->constval.i), false);
|
||||
if (!val)
|
||||
return false;
|
||||
vec_push(sy->out, syexp(parser_ctx(parser), val));
|
||||
|
@ -4030,7 +4030,7 @@ static bool parse_function_body(parser_t *parser, ast_value *var)
|
|||
self_think = (ast_expression*)ast_entfield_new(ctx, gbl_self, fld_think);
|
||||
|
||||
time_plus_1 = (ast_expression*)ast_binary_new(ctx, INSTR_ADD_F,
|
||||
gbl_time, (ast_expression*)fold_constgen_float(parser->fold, 0.1f));
|
||||
gbl_time, (ast_expression*)fold_constgen_float(parser->fold, 0.1f, false));
|
||||
|
||||
if (!self_frame || !self_nextthink || !self_think || !time_plus_1) {
|
||||
if (self_frame) ast_delete(self_frame);
|
||||
|
@ -4155,7 +4155,7 @@ static bool parse_function_body(parser_t *parser, ast_value *var)
|
|||
goto enderrfn;
|
||||
}
|
||||
func->varargs = varargs;
|
||||
func->fixedparams = (ast_value*)fold_constgen_float(parser->fold, vec_size(var->expression.params));
|
||||
func->fixedparams = (ast_value*)fold_constgen_float(parser->fold, vec_size(var->expression.params), false);
|
||||
}
|
||||
|
||||
parser->function = func;
|
||||
|
@ -4213,7 +4213,7 @@ static ast_expression *array_accessor_split(
|
|||
|
||||
cmp = ast_binary_new(ctx, INSTR_LT,
|
||||
(ast_expression*)index,
|
||||
(ast_expression*)fold_constgen_float(parser->fold, middle));
|
||||
(ast_expression*)fold_constgen_float(parser->fold, middle, false));
|
||||
if (!cmp) {
|
||||
ast_delete(left);
|
||||
ast_delete(right);
|
||||
|
@ -4246,7 +4246,7 @@ static ast_expression *array_setter_node(parser_t *parser, ast_value *array, ast
|
|||
if (value->expression.vtype == TYPE_FIELD && value->expression.next->vtype == TYPE_VECTOR)
|
||||
assignop = INSTR_STORE_V;
|
||||
|
||||
subscript = ast_array_index_new(ctx, (ast_expression*)array, (ast_expression*)fold_constgen_float(parser->fold, from));
|
||||
subscript = ast_array_index_new(ctx, (ast_expression*)array, (ast_expression*)fold_constgen_float(parser->fold, from, false));
|
||||
if (!subscript)
|
||||
return NULL;
|
||||
|
||||
|
@ -4312,7 +4312,7 @@ static ast_expression *array_field_setter_node(
|
|||
if (value->expression.vtype == TYPE_FIELD && value->expression.next->vtype == TYPE_VECTOR)
|
||||
assignop = INSTR_STOREP_V;
|
||||
|
||||
subscript = ast_array_index_new(ctx, (ast_expression*)array, (ast_expression*)fold_constgen_float(parser->fold, from));
|
||||
subscript = ast_array_index_new(ctx, (ast_expression*)array, (ast_expression*)fold_constgen_float(parser->fold, from, false));
|
||||
if (!subscript)
|
||||
return NULL;
|
||||
|
||||
|
@ -4375,7 +4375,7 @@ static ast_expression *array_getter_node(parser_t *parser, ast_value *array, ast
|
|||
ast_return *ret;
|
||||
ast_array_index *subscript;
|
||||
|
||||
subscript = ast_array_index_new(ctx, (ast_expression*)array, (ast_expression*)fold_constgen_float(parser->fold, from));
|
||||
subscript = ast_array_index_new(ctx, (ast_expression*)array, (ast_expression*)fold_constgen_float(parser->fold, from, false));
|
||||
if (!subscript)
|
||||
return NULL;
|
||||
|
||||
|
|
2
parser.h
2
parser.h
|
@ -127,7 +127,7 @@ ast_expression *parser_find_global(parser_t *parser, const char *name);
|
|||
/* fold.c */
|
||||
fold_t *fold_init (parser_t *);
|
||||
void fold_cleanup (fold_t *);
|
||||
ast_expression *fold_constgen_float (fold_t *, qcfloat_t);
|
||||
ast_expression *fold_constgen_float (fold_t *, qcfloat_t, bool);
|
||||
ast_expression *fold_constgen_vector(fold_t *, vec3_t);
|
||||
ast_expression *fold_constgen_string(fold_t *, const char *, bool);
|
||||
bool fold_generate (fold_t *, ir_builder *);
|
||||
|
|
Loading…
Reference in a new issue