mirror of
https://github.com/ZDoom/raze-gles.git
synced 2024-12-24 10:40:46 +00:00
Fixes a couple of gameplay bugs introduced with ROR, fixes a MSVC compile issue with the newer version of kplib.c and updates nedmalloc with changes from the latest version in their svn
git-svn-id: https://svn.eduke32.com/eduke32@1452 1a8010ca-5511-0410-912e-c29ae57300e0
This commit is contained in:
parent
9190ee2139
commit
ad0179da42
9 changed files with 218 additions and 202 deletions
|
@ -4,7 +4,7 @@
|
|||
http://creativecommons.org/licenses/publicdomain. Send questions,
|
||||
comments, complaints, performance data, etc to dl@cs.oswego.edu
|
||||
|
||||
* Version pre-2.8.4 Mon Nov 27 11:22:37 2006 (dl at gee)
|
||||
* Version 2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)
|
||||
|
||||
Note: There may be an updated version of this malloc obtainable at
|
||||
ftp://gee.cs.oswego.edu/pub/misc/malloc.c
|
||||
|
@ -245,7 +245,8 @@ USE_LOCKS default: 0 (false)
|
|||
pthread or WIN32 mutex lock/unlock. (If set true, this can be
|
||||
overridden on a per-mspace basis for mspace versions.) If set to a
|
||||
non-zero value other than 1, locks are used, but their
|
||||
implementation is left out, so lock functions must be supplied manually.
|
||||
implementation is left out, so lock functions must be supplied manually,
|
||||
as described below.
|
||||
|
||||
USE_SPIN_LOCKS default: 1 iff USE_LOCKS and on x86 using gcc or MSC
|
||||
If true, uses custom spin locks for locking. This is currently
|
||||
|
@ -495,7 +496,6 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
|
|||
#endif /* WIN32 */
|
||||
#ifdef WIN32
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
// #define _WIN32_WINNT 0x403
|
||||
#include <windows.h>
|
||||
#define HAVE_MMAP 1
|
||||
#define HAVE_MORECORE 0
|
||||
|
@ -532,6 +532,12 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
|
|||
#include <sys/types.h> /* For size_t */
|
||||
#endif /* LACKS_SYS_TYPES_H */
|
||||
|
||||
#if (defined(__GNUC__) && ((defined(__i386__) || defined(__x86_64__)))) || (defined(_MSC_VER) && _MSC_VER>=1310)
|
||||
#define SPIN_LOCKS_AVAILABLE 1
|
||||
#else
|
||||
#define SPIN_LOCKS_AVAILABLE 0
|
||||
#endif
|
||||
|
||||
/* The maximum possible size_t value has all bits set */
|
||||
#define MAX_SIZE_T (~(size_t)0)
|
||||
|
||||
|
@ -566,11 +572,11 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
|
|||
#define USE_LOCKS 0
|
||||
#endif /* USE_LOCKS */
|
||||
#ifndef USE_SPIN_LOCKS
|
||||
#if USE_LOCKS && (defined(__GNUC__) && ((defined(__i386__) || defined(__x86_64__)))) || (defined(_MSC_VER) && _MSC_VER>=1310)
|
||||
#if USE_LOCKS && SPIN_LOCKS_AVAILABLE
|
||||
#define USE_SPIN_LOCKS 1
|
||||
#else
|
||||
#define USE_SPIN_LOCKS 0
|
||||
#endif /* USE_LOCKS && ... */
|
||||
#endif /* USE_LOCKS && SPIN_LOCKS_AVAILABLE. */
|
||||
#endif /* USE_SPIN_LOCKS */
|
||||
#ifndef INSECURE
|
||||
#define INSECURE 0
|
||||
|
@ -1144,15 +1150,17 @@ size_t destroy_mspace(mspace msp);
|
|||
mspace create_mspace_with_base(void* base, size_t capacity, int locked);
|
||||
|
||||
/*
|
||||
mspace_mmap_large_chunks controls whether requests for large chunks
|
||||
are allocated in their own mmapped regions, separate from others in
|
||||
this mspace. By default this is enabled, which reduces
|
||||
fragmentation. However, such chunks are not necessarily released to
|
||||
the system upon destroy_mspace. Disabling by setting to false may
|
||||
increase fragmentation, but avoids leakage when relying on
|
||||
destroy_mspace to release all memory allocated using this space.
|
||||
mspace_track_large_chunks controls whether requests for large chunks
|
||||
are allocated in their own untracked mmapped regions, separate from
|
||||
others in this mspace. By default large chunks are not tracked,
|
||||
which reduces fragmentation. However, such chunks are not
|
||||
necessarily released to the system upon destroy_mspace. Enabling
|
||||
tracking by setting to true may increase fragmentation, but avoids
|
||||
leakage when relying on destroy_mspace to release all memory
|
||||
allocated using this space. The function returns the previous
|
||||
setting.
|
||||
*/
|
||||
int mspace_mmap_large_chunks(mspace msp, int enable);
|
||||
int mspace_track_large_chunks(mspace msp, int enable);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -1280,7 +1288,7 @@ int mspace_mallopt(int, int);
|
|||
#ifndef LACKS_ERRNO_H
|
||||
#include <errno.h> /* for MALLOC_FAILURE_ACTION */
|
||||
#endif /* LACKS_ERRNO_H */
|
||||
#if FOOTERS
|
||||
#if FOOTERS || DEBUG
|
||||
#include <time.h> /* for magic initialization */
|
||||
#endif /* FOOTERS */
|
||||
#ifndef LACKS_STDLIB_H
|
||||
|
@ -1288,6 +1296,7 @@ int mspace_mallopt(int, int);
|
|||
#endif /* LACKS_STDLIB_H */
|
||||
#ifdef DEBUG
|
||||
#if ABORT_ON_ASSERT_FAILURE
|
||||
#undef assert
|
||||
#define assert(x) if(!(x)) ABORT
|
||||
#else /* ABORT_ON_ASSERT_FAILURE */
|
||||
#include <assert.h>
|
||||
|
@ -1308,7 +1317,14 @@ int mspace_mallopt(int, int);
|
|||
#endif /* USE_BUILTIN_FFS */
|
||||
#if HAVE_MMAP
|
||||
#ifndef LACKS_SYS_MMAN_H
|
||||
/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
|
||||
#if (defined(linux) && !defined(__USE_GNU))
|
||||
#define __USE_GNU 1
|
||||
#include <sys/mman.h> /* for mmap */
|
||||
#undef __USE_GNU
|
||||
#else
|
||||
#include <sys/mman.h> /* for mmap */
|
||||
#endif /* linux */
|
||||
#endif /* LACKS_SYS_MMAN_H */
|
||||
#ifndef LACKS_FCNTL_H
|
||||
#include <fcntl.h>
|
||||
|
@ -1602,7 +1618,6 @@ static FORCEINLINE int win32munmap(void* ptr, size_t size) {
|
|||
* Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
|
||||
*/
|
||||
#if HAVE_MMAP
|
||||
#define IS_MMAPPED_BIT (SIZE_T_ONE)
|
||||
#define USE_MMAP_BIT (SIZE_T_ONE)
|
||||
|
||||
#ifdef MMAP
|
||||
|
@ -1621,7 +1636,6 @@ static FORCEINLINE int win32munmap(void* ptr, size_t size) {
|
|||
#define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
|
||||
#endif /* DIRECT_MMAP */
|
||||
#else /* HAVE_MMAP */
|
||||
#define IS_MMAPPED_BIT (SIZE_T_ZERO)
|
||||
#define USE_MMAP_BIT (SIZE_T_ZERO)
|
||||
|
||||
#define MMAP(s) MFAIL
|
||||
|
@ -1671,29 +1685,33 @@ static FORCEINLINE int win32munmap(void* ptr, size_t size) {
|
|||
|
||||
Because lock-protected regions generally have bounded times, it is
|
||||
OK to use the supplied simple spinlocks in the custom versions for
|
||||
x86.
|
||||
x86. Spinlocks are likely to improve performance for lightly
|
||||
contended applications, but worsen performance under heavy
|
||||
contention.
|
||||
|
||||
If USE_LOCKS is > 1, the definitions of lock routines here are
|
||||
bypassed, in which case you will need to define at least
|
||||
INITIAL_LOCK, ACQUIRE_LOCK, RELEASE_LOCK and possibly TRY_LOCK
|
||||
(which is not used in this malloc, but commonly needed in
|
||||
extensions.)
|
||||
bypassed, in which case you will need to define the type MLOCK_T,
|
||||
and at least INITIAL_LOCK, ACQUIRE_LOCK, RELEASE_LOCK and possibly
|
||||
TRY_LOCK (which is not used in this malloc, but commonly needed in
|
||||
extensions.) You must also declare a
|
||||
static MLOCK_T malloc_global_mutex = { initialization values };.
|
||||
|
||||
*/
|
||||
|
||||
#if USE_LOCKS == 1
|
||||
|
||||
#if USE_SPIN_LOCKS
|
||||
#if USE_SPIN_LOCKS && SPIN_LOCKS_AVAILABLE
|
||||
#ifndef WIN32
|
||||
|
||||
/* Custom pthread-style spin locks on x86 and x64 for gcc */
|
||||
struct pthread_mlock_t {
|
||||
volatile unsigned int l;
|
||||
volatile unsigned int c;
|
||||
volatile pthread_t threadid;
|
||||
unsigned int c;
|
||||
pthread_t threadid;
|
||||
};
|
||||
#define MLOCK_T struct pthread_mlock_t
|
||||
#define CURRENT_THREAD pthread_self()
|
||||
#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0)
|
||||
#define INITIAL_LOCK(sl) ((sl)->threadid = 0, (sl)->l = (sl)->c = 0, 0)
|
||||
#define ACQUIRE_LOCK(sl) pthread_acquire_lock(sl)
|
||||
#define RELEASE_LOCK(sl) pthread_release_lock(sl)
|
||||
#define TRY_LOCK(sl) pthread_try_lock(sl)
|
||||
|
@ -1722,10 +1740,11 @@ static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
|
|||
: "memory", "cc");
|
||||
if (!ret) {
|
||||
assert(!sl->threadid);
|
||||
sl->c = 1;
|
||||
sl->threadid = CURRENT_THREAD;
|
||||
sl->c = 1;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if ((++spins & SPINS_PER_YIELD) == 0) {
|
||||
#if defined (__SVR4) && defined (__sun) /* solaris */
|
||||
thr_yield();
|
||||
|
@ -1739,14 +1758,13 @@ static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) {
|
||||
assert(sl->l != 0);
|
||||
volatile unsigned int* lp = &sl->l;
|
||||
assert(*lp != 0);
|
||||
assert(sl->threadid == CURRENT_THREAD);
|
||||
if (--sl->c == 0) {
|
||||
sl->threadid = 0;
|
||||
volatile unsigned int* lp = &sl->l;
|
||||
int prev = 0;
|
||||
int ret;
|
||||
__asm__ __volatile__ ("lock; xchgl %0, %1"
|
||||
|
@ -1774,8 +1792,8 @@ static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
|
|||
: "memory", "cc");
|
||||
if (!ret) {
|
||||
assert(!sl->threadid);
|
||||
sl->c = 1;
|
||||
sl->threadid = CURRENT_THREAD;
|
||||
sl->c = 1;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -1785,16 +1803,15 @@ static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
|
|||
|
||||
#else /* WIN32 */
|
||||
/* Custom win32-style spin locks on x86 and x64 for MSC */
|
||||
struct win32_mlock_t
|
||||
{
|
||||
struct win32_mlock_t {
|
||||
volatile long l;
|
||||
volatile unsigned int c;
|
||||
volatile long threadid;
|
||||
unsigned int c;
|
||||
long threadid;
|
||||
};
|
||||
|
||||
#define MLOCK_T struct win32_mlock_t
|
||||
#define CURRENT_THREAD win32_getcurrentthreadid()
|
||||
#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0)
|
||||
#define CURRENT_THREAD GetCurrentThreadId()
|
||||
#define INITIAL_LOCK(sl) ((sl)->threadid = 0, (sl)->l = (sl)->c = 0, 0)
|
||||
#define ACQUIRE_LOCK(sl) win32_acquire_lock(sl)
|
||||
#define RELEASE_LOCK(sl) win32_release_lock(sl)
|
||||
#define TRY_LOCK(sl) win32_try_lock(sl)
|
||||
|
@ -1802,36 +1819,19 @@ struct win32_mlock_t
|
|||
|
||||
static MLOCK_T malloc_global_mutex = { 0, 0, 0};
|
||||
|
||||
static FORCEINLINE long win32_getcurrentthreadid() {
|
||||
#ifdef _MSC_VER
|
||||
#if defined(_M_IX86)
|
||||
long *threadstruct=(long *)__readfsdword(0x18);
|
||||
long threadid=threadstruct[0x24/sizeof(long)];
|
||||
return threadid;
|
||||
#elif defined(_M_X64)
|
||||
/* todo */
|
||||
return GetCurrentThreadId();
|
||||
#else
|
||||
return GetCurrentThreadId();
|
||||
#endif
|
||||
#else
|
||||
return GetCurrentThreadId();
|
||||
#endif
|
||||
}
|
||||
|
||||
static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) {
|
||||
int spins = 0;
|
||||
for (;;) {
|
||||
if (sl->l != 0) {
|
||||
if (sl->threadid == CURRENT_THREAD) {
|
||||
if (sl->threadid == (signed)CURRENT_THREAD) {
|
||||
++sl->c;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (!interlockedexchange(&sl->l, 1)) {
|
||||
assert(!sl->threadid);
|
||||
sl->c=CURRENT_THREAD;
|
||||
assert(!sl->1855
|
||||
);
|
||||
sl->threadid = CURRENT_THREAD;
|
||||
sl->c = 1;
|
||||
return 0;
|
||||
|
@ -1853,7 +1853,7 @@ static FORCEINLINE void win32_release_lock (MLOCK_T *sl) {
|
|||
|
||||
static FORCEINLINE int win32_try_lock (MLOCK_T *sl) {
|
||||
if(sl->l != 0) {
|
||||
if (sl->threadid == CURRENT_THREAD) {
|
||||
if (sl->threadid == (signed)CURRENT_THREAD) {
|
||||
++sl->c;
|
||||
return 1;
|
||||
}
|
||||
|
@ -1909,9 +1909,9 @@ static int pthread_init_lock (MLOCK_T *sl) {
|
|||
#define MLOCK_T CRITICAL_SECTION
|
||||
#define CURRENT_THREAD GetCurrentThreadId()
|
||||
#define INITIAL_LOCK(s) (!InitializeCriticalSectionAndSpinCount((s), 0x80000000|4000))
|
||||
#define ACQUIRE_LOCK(s) (EnterCriticalSection(s), 0)
|
||||
#define RELEASE_LOCK(s) LeaveCriticalSection(s)
|
||||
#define TRY_LOCK(s) TryEnterCriticalSection(s)
|
||||
#define ACQUIRE_LOCK(s) (EnterCriticalSection(sl), 0)
|
||||
#define RELEASE_LOCK(s) LeaveCriticalSection(sl)
|
||||
#define TRY_LOCK(s) TryEnterCriticalSection(sl)
|
||||
#define NEED_GLOBAL_LOCK_INIT
|
||||
|
||||
static MLOCK_T malloc_global_mutex;
|
||||
|
@ -1959,8 +1959,12 @@ static void init_malloc_global_mutex() {
|
|||
#endif /* USE_LOCKS */
|
||||
|
||||
#if USE_LOCKS
|
||||
#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
|
||||
#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
|
||||
#endif
|
||||
#ifndef RELEASE_MALLOC_GLOBAL_LOCK
|
||||
#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
|
||||
#endif
|
||||
#else /* USE_LOCKS */
|
||||
#define ACQUIRE_MALLOC_GLOBAL_LOCK()
|
||||
#define RELEASE_MALLOC_GLOBAL_LOCK()
|
||||
|
@ -2063,8 +2067,9 @@ static void init_malloc_global_mutex() {
|
|||
|
||||
The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
|
||||
the chunk size redundantly records whether the current chunk is
|
||||
inuse. This redundancy enables usage checks within free and realloc,
|
||||
and reduces indirection when freeing and consolidating chunks.
|
||||
inuse (unless the chunk is mmapped). This redundancy enables usage
|
||||
checks within free and realloc, and reduces indirection when freeing
|
||||
and consolidating chunks.
|
||||
|
||||
Each freshly allocated chunk must have both cinuse and pinuse set.
|
||||
That is, each allocated chunk borders either a previously allocated
|
||||
|
@ -2093,9 +2098,8 @@ static void init_malloc_global_mutex() {
|
|||
space is still allocated for it (TOP_FOOT_SIZE) to enable
|
||||
separation or merging when space is extended.
|
||||
|
||||
3. Chunks allocated via mmap, which have the lowest-order bit
|
||||
(IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
|
||||
PINUSE_BIT in their head fields. Because they are allocated
|
||||
3. Chunks allocated via mmap, have both cinuse and pinuse bits
|
||||
cleared in their head fields. Because they are allocated
|
||||
one-by-one, each must carry its own prev_foot field, which is
|
||||
also used to hold the offset this chunk has within its mmapped
|
||||
region, which is needed to preserve alignment. Each mmapped
|
||||
|
@ -2161,9 +2165,7 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */
|
|||
/*
|
||||
The head field of a chunk is or'ed with PINUSE_BIT when previous
|
||||
adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
|
||||
use. If the chunk was obtained with mmap, the prev_foot field has
|
||||
IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
|
||||
mmapped region to the base of the chunk.
|
||||
use, unless mmapped, in which case both bits are cleared.
|
||||
|
||||
FLAG4_BIT is not used by this malloc, but might be useful in extensions.
|
||||
*/
|
||||
|
@ -2180,10 +2182,12 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */
|
|||
/* extraction of fields from head words */
|
||||
#define cinuse(p) ((p)->head & CINUSE_BIT)
|
||||
#define pinuse(p) ((p)->head & PINUSE_BIT)
|
||||
#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
|
||||
#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
|
||||
|
||||
#define chunksize(p) ((p)->head & ~(FLAG_BITS))
|
||||
|
||||
#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
|
||||
#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
|
||||
|
||||
/* Treat space at ptr +/- offset as a chunk */
|
||||
#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
|
||||
|
@ -2208,9 +2212,6 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */
|
|||
#define set_free_with_pinuse(p, s, n)\
|
||||
(clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
|
||||
|
||||
#define is_mmapped(p)\
|
||||
(!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
|
||||
|
||||
/* Get the internal overhead associated with chunk p */
|
||||
#define overhead_for(p)\
|
||||
(is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
|
||||
|
@ -2381,7 +2382,7 @@ typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
|
|||
and so should not try to deallocate or merge with others.
|
||||
(This currently holds only for the initial segment passed
|
||||
into create_mspace_with_base.)
|
||||
* If IS_MMAPPED_BIT set, the segment may be merged with
|
||||
* If USE_MMAP_BIT set, the segment may be merged with
|
||||
other surrounding mmapped segments and trimmed/de-allocated
|
||||
using munmap.
|
||||
* If neither bit is set, then the segment was obtained using
|
||||
|
@ -2396,7 +2397,7 @@ struct malloc_segment {
|
|||
flag_t sflags; /* mmap and extern flag */
|
||||
};
|
||||
|
||||
#define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT)
|
||||
#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
|
||||
#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
|
||||
|
||||
typedef struct malloc_segment msegment;
|
||||
|
@ -2543,7 +2544,7 @@ struct malloc_params {
|
|||
static struct malloc_params mparams;
|
||||
|
||||
/* Ensure mparams initialized */
|
||||
#define ensure_initialization() if (mparams.magic == 0) init_mparams()
|
||||
#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
|
||||
|
||||
#if !ONLY_MSPACES
|
||||
|
||||
|
@ -2752,7 +2753,7 @@ static size_t traverse_and_check(mstate m);
|
|||
I = NTREEBINS-1;\
|
||||
else {\
|
||||
unsigned int K;\
|
||||
__asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "rm" (X));\
|
||||
__asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g" (X));\
|
||||
I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
|
||||
}\
|
||||
}
|
||||
|
@ -2850,7 +2851,7 @@ static size_t traverse_and_check(mstate m);
|
|||
#define compute_bit2idx(X, I)\
|
||||
{\
|
||||
unsigned int J;\
|
||||
__asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "rm" (X));\
|
||||
__asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\
|
||||
I = (bindex_t)J;\
|
||||
}
|
||||
|
||||
|
@ -2921,15 +2922,15 @@ static size_t traverse_and_check(mstate m);
|
|||
#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
|
||||
/* Check if address of next chunk n is higher than base chunk p */
|
||||
#define ok_next(p, n) ((char*)(p) < (char*)(n))
|
||||
/* Check if p has its cinuse bit on */
|
||||
#define ok_cinuse(p) cinuse(p)
|
||||
/* Check if p has inuse status */
|
||||
#define ok_inuse(p) is_inuse(p)
|
||||
/* Check if p has its pinuse bit on */
|
||||
#define ok_pinuse(p) pinuse(p)
|
||||
|
||||
#else /* !INSECURE */
|
||||
#define ok_address(M, a) (1)
|
||||
#define ok_next(b, n) (1)
|
||||
#define ok_cinuse(p) (1)
|
||||
#define ok_inuse(p) (1)
|
||||
#define ok_pinuse(p) (1)
|
||||
#endif /* !INSECURE */
|
||||
|
||||
|
@ -2958,6 +2959,8 @@ static size_t traverse_and_check(mstate m);
|
|||
|
||||
#define mark_inuse_foot(M,p,s)
|
||||
|
||||
/* Macros for setting head/foot of non-mmapped chunks */
|
||||
|
||||
/* Set cinuse bit and pinuse bit of next chunk */
|
||||
#define set_inuse(M,p,s)\
|
||||
((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
|
||||
|
@ -3058,7 +3061,6 @@ static int init_mparams(void) {
|
|||
INITIAL_LOCK(&gm->mutex);
|
||||
#endif
|
||||
|
||||
#if (FOOTERS && !INSECURE)
|
||||
{
|
||||
#if USE_DEV_RANDOM
|
||||
int fd;
|
||||
|
@ -3078,13 +3080,9 @@ static int init_mparams(void) {
|
|||
#endif
|
||||
magic |= (size_t)8U; /* ensure nonzero */
|
||||
magic &= ~(size_t)7U; /* improve chances of fault for bad values */
|
||||
}
|
||||
#else /* (FOOTERS && !INSECURE) */
|
||||
magic = (size_t)0x58585858U;
|
||||
#endif /* (FOOTERS && !INSECURE) */
|
||||
|
||||
mparams.magic = magic;
|
||||
}
|
||||
}
|
||||
|
||||
RELEASE_MALLOC_GLOBAL_LOCK();
|
||||
return 1;
|
||||
|
@ -3092,8 +3090,9 @@ static int init_mparams(void) {
|
|||
|
||||
/* support for mallopt */
|
||||
static int change_mparam(int param_number, int value) {
|
||||
size_t val = (value == -1)? MAX_SIZE_T : (size_t)value;
|
||||
size_t val;
|
||||
ensure_initialization();
|
||||
val = (value == -1)? MAX_SIZE_T : (size_t)value;
|
||||
switch(param_number) {
|
||||
case M_TRIM_THRESHOLD:
|
||||
mparams.trim_threshold = val;
|
||||
|
@ -3139,7 +3138,7 @@ static void do_check_top_chunk(mstate m, mchunkptr p) {
|
|||
/* Check properties of (inuse) mmapped chunks */
|
||||
static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
|
||||
size_t sz = chunksize(p);
|
||||
size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
|
||||
size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
|
||||
assert(is_mmapped(p));
|
||||
assert(use_mmap(m));
|
||||
assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
|
||||
|
@ -3153,7 +3152,7 @@ static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
|
|||
/* Check properties of inuse chunks */
|
||||
static void do_check_inuse_chunk(mstate m, mchunkptr p) {
|
||||
do_check_any_chunk(m, p);
|
||||
assert(cinuse(p));
|
||||
assert(is_inuse(p));
|
||||
assert(next_pinuse(p));
|
||||
/* If not pinuse and not mmapped, previous chunk has OK offset */
|
||||
assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
|
||||
|
@ -3166,7 +3165,7 @@ static void do_check_free_chunk(mstate m, mchunkptr p) {
|
|||
size_t sz = chunksize(p);
|
||||
mchunkptr next = chunk_plus_offset(p, sz);
|
||||
do_check_any_chunk(m, p);
|
||||
assert(!cinuse(p));
|
||||
assert(!is_inuse(p));
|
||||
assert(!next_pinuse(p));
|
||||
assert (!is_mmapped(p));
|
||||
if (p != m->dv && p != m->top) {
|
||||
|
@ -3175,7 +3174,7 @@ static void do_check_free_chunk(mstate m, mchunkptr p) {
|
|||
assert(is_aligned(chunk2mem(p)));
|
||||
assert(next->prev_foot == sz);
|
||||
assert(pinuse(p));
|
||||
assert (next == m->top || cinuse(next));
|
||||
assert (next == m->top || is_inuse(next));
|
||||
assert(p->fd->bk == p);
|
||||
assert(p->bk->fd == p);
|
||||
}
|
||||
|
@ -3188,7 +3187,7 @@ static void do_check_free_chunk(mstate m, mchunkptr p) {
|
|||
static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
|
||||
if (mem != 0) {
|
||||
mchunkptr p = mem2chunk(mem);
|
||||
size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
|
||||
size_t sz = p->head & ~INUSE_BITS;
|
||||
do_check_inuse_chunk(m, p);
|
||||
assert((sz & CHUNK_ALIGN_MASK) == 0);
|
||||
assert(sz >= MIN_CHUNK_SIZE);
|
||||
|
@ -3215,7 +3214,7 @@ static void do_check_tree(mstate m, tchunkptr t) {
|
|||
do_check_any_chunk(m, ((mchunkptr)u));
|
||||
assert(u->index == tindex);
|
||||
assert(chunksize(u) == tsize);
|
||||
assert(!cinuse(u));
|
||||
assert(!is_inuse(u));
|
||||
assert(!next_pinuse(u));
|
||||
assert(u->fd->bk == u);
|
||||
assert(u->bk->fd == u);
|
||||
|
@ -3333,13 +3332,13 @@ static size_t traverse_and_check(mstate m) {
|
|||
while (segment_holds(s, q) &&
|
||||
q != m->top && q->head != FENCEPOST_HEAD) {
|
||||
sum += chunksize(q);
|
||||
if (cinuse(q)) {
|
||||
if (is_inuse(q)) {
|
||||
assert(!bin_find(m, q));
|
||||
do_check_inuse_chunk(m, q);
|
||||
}
|
||||
else {
|
||||
assert(q == m->dv || bin_find(m, q));
|
||||
assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
|
||||
assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
|
||||
do_check_free_chunk(m, q);
|
||||
}
|
||||
lastq = q;
|
||||
|
@ -3400,7 +3399,7 @@ static struct mallinfo internal_mallinfo(mstate m) {
|
|||
q != m->top && q->head != FENCEPOST_HEAD) {
|
||||
size_t sz = chunksize(q);
|
||||
sum += sz;
|
||||
if (!cinuse(q)) {
|
||||
if (!is_inuse(q)) {
|
||||
mfree += sz;
|
||||
++nfree;
|
||||
}
|
||||
|
@ -3441,7 +3440,7 @@ static void internal_malloc_stats(mstate m) {
|
|||
mchunkptr q = align_as_chunk(s->base);
|
||||
while (segment_holds(s, q) &&
|
||||
q != m->top && q->head != FENCEPOST_HEAD) {
|
||||
if (!cinuse(q))
|
||||
if (!is_inuse(q))
|
||||
used -= chunksize(q);
|
||||
q = next_chunk(q);
|
||||
}
|
||||
|
@ -3714,9 +3713,7 @@ static void internal_malloc_stats(mstate m) {
|
|||
the mmapped region stored in the prev_foot field of the chunk. This
|
||||
allows reconstruction of the required argument to MUNMAP when freed,
|
||||
and also allows adjustment of the returned chunk to meet alignment
|
||||
requirements (especially in memalign). There is also enough space
|
||||
allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
|
||||
the PINUSE bit so frees can be checked.
|
||||
requirements (especially in memalign).
|
||||
*/
|
||||
|
||||
/* Malloc using mmap */
|
||||
|
@ -3728,13 +3725,13 @@ static void* mmap_alloc(mstate m, size_t nb) {
|
|||
size_t offset = align_offset(chunk2mem(mm));
|
||||
size_t psize = mmsize - offset - MMAP_FOOT_PAD;
|
||||
mchunkptr p = (mchunkptr)(mm + offset);
|
||||
p->prev_foot = offset | IS_MMAPPED_BIT;
|
||||
(p)->head = (psize|CINUSE_BIT);
|
||||
p->prev_foot = offset;
|
||||
p->head = psize;
|
||||
mark_inuse_foot(m, p, psize);
|
||||
chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
|
||||
chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
|
||||
|
||||
if (mm < m->least_addr)
|
||||
if (m->least_addr == 0 || mm < m->least_addr)
|
||||
m->least_addr = mm;
|
||||
if ((m->footprint += mmsize) > m->max_footprint)
|
||||
m->max_footprint = m->footprint;
|
||||
|
@ -3756,7 +3753,7 @@ static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
|
|||
(oldsize - nb) <= (mparams.granularity << 1))
|
||||
return oldp;
|
||||
else {
|
||||
size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
|
||||
size_t offset = oldp->prev_foot;
|
||||
size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
|
||||
size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
|
||||
char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
|
||||
|
@ -3764,7 +3761,7 @@ static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
|
|||
if (cp != CMFAIL) {
|
||||
mchunkptr newp = (mchunkptr)(cp + offset);
|
||||
size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
|
||||
newp->head = (psize|CINUSE_BIT);
|
||||
newp->head = psize;
|
||||
mark_inuse_foot(m, newp, psize);
|
||||
chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
|
||||
chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
|
||||
|
@ -3853,7 +3850,7 @@ static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
|
|||
set_size_and_pinuse_of_free_chunk(q, dsize);
|
||||
}
|
||||
else {
|
||||
if (!cinuse(oldfirst)) {
|
||||
if (!is_inuse(oldfirst)) {
|
||||
size_t nsize = chunksize(oldfirst);
|
||||
unlink_chunk(m, oldfirst, nsize);
|
||||
oldfirst = chunk_plus_offset(oldfirst, nsize);
|
||||
|
@ -3931,8 +3928,8 @@ static void* sys_alloc(mstate m, size_t nb) {
|
|||
|
||||
ensure_initialization();
|
||||
|
||||
/* Directly map large chunks */
|
||||
if (use_mmap(m) && nb >= mparams.mmap_threshold) {
|
||||
/* Directly map large chunks, but only if already initialized */
|
||||
if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
|
||||
void* mem = mmap_alloc(m, nb);
|
||||
if (mem != 0)
|
||||
return mem;
|
||||
|
@ -4026,7 +4023,7 @@ static void* sys_alloc(mstate m, size_t nb) {
|
|||
if (mp != CMFAIL) {
|
||||
tbase = mp;
|
||||
tsize = rsize;
|
||||
mmap_flag = IS_MMAPPED_BIT;
|
||||
mmap_flag = USE_MMAP_BIT;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4056,7 +4053,9 @@ static void* sys_alloc(mstate m, size_t nb) {
|
|||
m->max_footprint = m->footprint;
|
||||
|
||||
if (!is_initialized(m)) { /* first-time initialization */
|
||||
m->seg.base = m->least_addr = tbase;
|
||||
if (m->least_addr == 0 || tbase < m->least_addr)
|
||||
m->least_addr = tbase;
|
||||
m->seg.base = tbase;
|
||||
m->seg.size = tsize;
|
||||
m->seg.sflags = mmap_flag;
|
||||
m->magic = mparams.magic;
|
||||
|
@ -4082,7 +4081,7 @@ static void* sys_alloc(mstate m, size_t nb) {
|
|||
sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
|
||||
if (sp != 0 &&
|
||||
!is_extern_segment(sp) &&
|
||||
(sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
|
||||
(sp->sflags & USE_MMAP_BIT) == mmap_flag &&
|
||||
segment_holds(sp, m->top)) { /* append */
|
||||
sp->size += tsize;
|
||||
init_top(m, m->top, m->topsize + tsize);
|
||||
|
@ -4095,7 +4094,7 @@ static void* sys_alloc(mstate m, size_t nb) {
|
|||
sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
|
||||
if (sp != 0 &&
|
||||
!is_extern_segment(sp) &&
|
||||
(sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
|
||||
(sp->sflags & USE_MMAP_BIT) == mmap_flag) {
|
||||
char* oldbase = sp->base;
|
||||
sp->base = tbase;
|
||||
sp->size += tsize;
|
||||
|
@ -4139,7 +4138,7 @@ static size_t release_unused_segments(mstate m) {
|
|||
mchunkptr p = align_as_chunk(base);
|
||||
size_t psize = chunksize(p);
|
||||
/* Can unmap if first chunk holds entire segment and not pinned */
|
||||
if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
|
||||
if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
|
||||
tchunkptr tp = (tchunkptr)p;
|
||||
assert(segment_holds(sp, (char*)sp));
|
||||
if (p == m->dv) {
|
||||
|
@ -4364,7 +4363,7 @@ static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
|
|||
|
||||
/* Try to either shrink or extend into top. Else malloc-copy-free */
|
||||
|
||||
if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
|
||||
if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp) &&
|
||||
ok_next(oldp, next) && ok_pinuse(next))) {
|
||||
size_t nb = request2size(bytes);
|
||||
if (is_mmapped(oldp))
|
||||
|
@ -4375,7 +4374,7 @@ static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
|
|||
if (rsize >= MIN_CHUNK_SIZE) {
|
||||
mchunkptr remainder = chunk_plus_offset(newp, nb);
|
||||
set_inuse(m, newp, nb);
|
||||
set_inuse(m, remainder, rsize);
|
||||
set_inuse_and_pinuse(m, remainder, rsize);
|
||||
extra = chunk2mem(remainder);
|
||||
}
|
||||
}
|
||||
|
@ -4396,6 +4395,11 @@ static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
|
|||
POSTACTION(m);
|
||||
return 0;
|
||||
}
|
||||
#if DEBUG
|
||||
if (newp != 0) {
|
||||
check_inuse_chunk(m, newp); /* Check requires lock */
|
||||
}
|
||||
#endif
|
||||
|
||||
POSTACTION(m);
|
||||
|
||||
|
@ -4403,7 +4407,6 @@ static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
|
|||
if (extra != 0) {
|
||||
internal_free(m, extra);
|
||||
}
|
||||
check_inuse_chunk(m, newp);
|
||||
return chunk2mem(newp);
|
||||
}
|
||||
else {
|
||||
|
@ -4468,7 +4471,7 @@ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
|
|||
|
||||
if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
|
||||
newp->prev_foot = p->prev_foot + leadsize;
|
||||
newp->head = (newsize|CINUSE_BIT);
|
||||
newp->head = newsize;
|
||||
}
|
||||
else { /* Otherwise, give back leader, use the rest */
|
||||
set_inuse(m, newp, newsize);
|
||||
|
@ -4796,13 +4799,12 @@ void dlfree(void* mem) {
|
|||
#endif /* FOOTERS */
|
||||
if (!PREACTION(fm)) {
|
||||
check_inuse_chunk(fm, p);
|
||||
if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
|
||||
if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
|
||||
size_t psize = chunksize(p);
|
||||
mchunkptr next = chunk_plus_offset(p, psize);
|
||||
if (!pinuse(p)) {
|
||||
size_t prevsize = p->prev_foot;
|
||||
if ((prevsize & IS_MMAPPED_BIT) != 0) {
|
||||
prevsize &= ~IS_MMAPPED_BIT;
|
||||
if (is_mmapped(p)) {
|
||||
psize += prevsize + MMAP_FOOT_PAD;
|
||||
if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
|
||||
fm->footprint -= psize;
|
||||
|
@ -4954,8 +4956,8 @@ void* dlpvalloc(size_t bytes) {
|
|||
}
|
||||
|
||||
int dlmalloc_trim(size_t pad) {
|
||||
ensure_initialization();
|
||||
int result = 0;
|
||||
ensure_initialization();
|
||||
if (!PREACTION(gm)) {
|
||||
result = sys_trim(gm, pad);
|
||||
POSTACTION(gm);
|
||||
|
@ -4990,7 +4992,7 @@ int dlmallopt(int param_number, int value) {
|
|||
size_t dlmalloc_usable_size(void* mem) {
|
||||
if (mem != 0) {
|
||||
mchunkptr p = mem2chunk(mem);
|
||||
if (cinuse(p))
|
||||
if (is_inuse(p))
|
||||
return chunksize(p) - overhead_for(p);
|
||||
}
|
||||
return 0;
|
||||
|
@ -5007,7 +5009,7 @@ static mstate init_user_mstate(char* tbase, size_t tsize) {
|
|||
mstate m = (mstate)(chunk2mem(msp));
|
||||
memset(m, 0, msize);
|
||||
INITIAL_LOCK(&m->mutex);
|
||||
msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
|
||||
msp->head = (msize|INUSE_BITS);
|
||||
m->seg.base = m->least_addr = tbase;
|
||||
m->seg.size = m->footprint = m->max_footprint = tsize;
|
||||
m->magic = mparams.magic;
|
||||
|
@ -5035,7 +5037,7 @@ mspace create_mspace(size_t capacity, int locked) {
|
|||
char* tbase = (char*)(CALL_MMAP(tsize));
|
||||
if (tbase != CMFAIL) {
|
||||
m = init_user_mstate(tbase, tsize);
|
||||
m->seg.sflags = IS_MMAPPED_BIT;
|
||||
m->seg.sflags = USE_MMAP_BIT;
|
||||
set_lock(m, locked);
|
||||
}
|
||||
}
|
||||
|
@ -5056,13 +5058,13 @@ mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
|
|||
return (mspace)m;
|
||||
}
|
||||
|
||||
int mspace_mmap_large_chunks(mspace msp, int enable) {
|
||||
int mspace_track_large_chunks(mspace msp, int enable) {
|
||||
int ret = 0;
|
||||
mstate ms = (mstate)msp;
|
||||
if (!PREACTION(ms)) {
|
||||
if (use_mmap(ms))
|
||||
if (!use_mmap(ms))
|
||||
ret = 1;
|
||||
if (enable)
|
||||
if (!enable)
|
||||
enable_mmap(ms);
|
||||
else
|
||||
disable_mmap(ms);
|
||||
|
@ -5081,7 +5083,7 @@ size_t destroy_mspace(mspace msp) {
|
|||
size_t size = sp->size;
|
||||
flag_t flag = sp->sflags;
|
||||
sp = sp->next;
|
||||
if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
|
||||
if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
|
||||
CALL_MUNMAP(base, size) == 0)
|
||||
freed += size;
|
||||
}
|
||||
|
@ -5213,11 +5215,11 @@ void* mspace_malloc(mspace msp, size_t bytes) {
|
|||
}
|
||||
|
||||
void mspace_free(mspace msp, void* mem) {
|
||||
UNREFERENCED_PARAMETER(msp);
|
||||
if (mem != 0) {
|
||||
mchunkptr p = mem2chunk(mem);
|
||||
#if FOOTERS
|
||||
mstate fm = get_mstate_for(p);
|
||||
msp = msp; /* placate people compiling -Wunused */
|
||||
#else /* FOOTERS */
|
||||
mstate fm = (mstate)msp;
|
||||
#endif /* FOOTERS */
|
||||
|
@ -5227,13 +5229,12 @@ void mspace_free(mspace msp, void* mem) {
|
|||
}
|
||||
if (!PREACTION(fm)) {
|
||||
check_inuse_chunk(fm, p);
|
||||
if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
|
||||
if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
|
||||
size_t psize = chunksize(p);
|
||||
mchunkptr next = chunk_plus_offset(p, psize);
|
||||
if (!pinuse(p)) {
|
||||
size_t prevsize = p->prev_foot;
|
||||
if ((prevsize & IS_MMAPPED_BIT) != 0) {
|
||||
prevsize &= ~IS_MMAPPED_BIT;
|
||||
if (is_mmapped(p)) {
|
||||
psize += prevsize + MMAP_FOOT_PAD;
|
||||
if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
|
||||
fm->footprint -= psize;
|
||||
|
@ -5452,7 +5453,7 @@ struct mallinfo mspace_mallinfo(mspace msp) {
|
|||
size_t mspace_usable_size(void* mem) {
|
||||
if (mem != 0) {
|
||||
mchunkptr p = mem2chunk(mem);
|
||||
if (cinuse(p))
|
||||
if (is_inuse(p))
|
||||
return chunksize(p) - overhead_for(p);
|
||||
}
|
||||
return 0;
|
||||
|
@ -5558,13 +5559,15 @@ int mspace_mallopt(int param_number, int value) {
|
|||
|
||||
/* -----------------------------------------------------------------------
|
||||
History:
|
||||
V2.8.4 (not yet released)
|
||||
* Add mspace_mmap_large_chunks; thanks to Jean Brouwers
|
||||
V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)
|
||||
* Use zeros instead of prev foot for is_mmapped
|
||||
* Add mspace_track_large_chunks; thanks to Jean Brouwers
|
||||
* Fix set_inuse in internal_realloc; thanks to Jean Brouwers
|
||||
* Fix insufficient sys_alloc padding when using 16byte alignment
|
||||
* Fix bad error check in mspace_footprint
|
||||
* Adaptations for ptmalloc, courtesy of Wolfram Gloger.
|
||||
* Reentrant spin locks, courtesy of Earl Chew and others
|
||||
* Win32 improvements, courtesy of Niall Douglas and Earl Chew
|
||||
* Adaptations for ptmalloc; thanks to Wolfram Gloger.
|
||||
* Reentrant spin locks; thanks to Earl Chew and others
|
||||
* Win32 improvements; thanks to Niall Douglas and Earl Chew
|
||||
* Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
|
||||
* Extension hook in malloc_state
|
||||
* Various small adjustments to reduce warnings on some compilers
|
||||
|
|
|
@ -166,6 +166,12 @@ Optionally can also retrieve pool.
|
|||
*/
|
||||
EXTSPEC void *nedgetvalue(nedpool **p, void *mem) THROWSPEC;
|
||||
|
||||
/* Trims the thread cache for the calling thread, returning any existing cache
|
||||
data to the central pool. Remember to ALWAYS call with zero if you used the
|
||||
system pool. Setting disable to non-zero replicates neddisablethreadcache().
|
||||
*/
|
||||
EXTSPEC void nedtrimthreadcache(nedpool *p, int disable) THROWSPEC;
|
||||
|
||||
/* Disables the thread cache for the calling thread, returning any existing cache
|
||||
data to the central pool. Remember to ALWAYS call with zero if you used the
|
||||
system pool.
|
||||
|
|
|
@ -206,7 +206,7 @@ int32_t bitrev(int32_t, int32_t);
|
|||
"beg: shr ebx, 1"\
|
||||
"adc eax, eax"\
|
||||
"dec ecx"\
|
||||
"jnz int16_t beg"\
|
||||
"jnz short beg"\
|
||||
parm [ebx][ecx]\
|
||||
modify nomemory exact [eax ebx ecx]\
|
||||
value [eax]
|
||||
|
@ -263,7 +263,7 @@ static _inline int32_t bitrev(int32_t b, int32_t c)
|
|||
beg: shr edx, 1
|
||||
adc eax, eax
|
||||
sub ecx, 1
|
||||
jnz int16_t beg
|
||||
jnz short beg
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -602,26 +602,26 @@ int32_t Paeth686(int32_t, int32_t, int32_t);
|
|||
void rgbhlineasm(int32_t, int32_t, int32_t, int32_t);
|
||||
#pragma aux rgbhlineasm =\
|
||||
"sub ecx, edx"\
|
||||
"jle int16_t endit"\
|
||||
"jle short endit"\
|
||||
"add edx, offset olinbuf"\
|
||||
"cmp dword ptr trnsrgb, 0"\
|
||||
"jz int16_t begit2"\
|
||||
"jz short begit2"\
|
||||
"begit: mov eax, dword ptr [ecx+edx]"\
|
||||
"or eax, 0xff000000"\
|
||||
"cmp eax, dword ptr trnsrgb"\
|
||||
"jne int16_t skipit"\
|
||||
"jne short skipit"\
|
||||
"and eax, 0xffffff"\
|
||||
"skipit: sub ecx, 3"\
|
||||
"mov [edi], eax"\
|
||||
"lea edi, [edi+ebx]"\
|
||||
"jnz int16_t begit"\
|
||||
"jmp int16_t endit"\
|
||||
"jnz short begit"\
|
||||
"jmp short endit"\
|
||||
"begit2: mov eax, dword ptr [ecx+edx]"\
|
||||
"or eax, 0xff000000"\
|
||||
"sub ecx, 3"\
|
||||
"mov [edi], eax"\
|
||||
"lea edi, [edi+ebx]"\
|
||||
"jnz int16_t begit2"\
|
||||
"jnz short begit2"\
|
||||
"endit:"\
|
||||
parm [ecx][edx][edi][ebx]\
|
||||
modify exact [eax ecx edi]\
|
||||
|
@ -630,14 +630,14 @@ void rgbhlineasm(int32_t, int32_t, int32_t, int32_t);
|
|||
void pal8hlineasm(int32_t, int32_t, int32_t, int32_t);
|
||||
#pragma aux pal8hlineasm =\
|
||||
"sub ecx, edx"\
|
||||
"jle int16_t endit"\
|
||||
"jle short endit"\
|
||||
"add edx, offset olinbuf"\
|
||||
"begit: movzx eax, byte ptr [ecx+edx]"\
|
||||
"mov eax, dword ptr palcol[eax*4]"\
|
||||
"dec ecx"\
|
||||
"mov [edi], eax"\
|
||||
"lea edi, [edi+ebx]"\
|
||||
"jnz int16_t begit"\
|
||||
"jnz short begit"\
|
||||
"endit:"\
|
||||
parm [ecx][edx][edi][ebx]\
|
||||
modify exact [eax ecx edi]\
|
||||
|
@ -684,29 +684,29 @@ static _inline void rgbhlineasm(int32_t c, int32_t d, int32_t t, int32_t b)
|
|||
mov edi, t
|
||||
mov ebx, b
|
||||
sub ecx, edx
|
||||
jle int16_t endit
|
||||
jle short endit
|
||||
add edx, offset olinbuf
|
||||
cmp dword ptr trnsrgb, 0
|
||||
jz int16_t begit2
|
||||
jz short begit2
|
||||
begit:
|
||||
mov eax, dword ptr [ecx+edx]
|
||||
or eax, 0xff000000
|
||||
cmp eax, dword ptr trnsrgb
|
||||
jne int16_t skipit
|
||||
jne short skipit
|
||||
and eax, 0xffffff
|
||||
skipit:
|
||||
sub ecx, 3
|
||||
mov [edi], eax
|
||||
lea edi, [edi+ebx]
|
||||
jnz int16_t begit
|
||||
jmp int16_t endit
|
||||
jnz short begit
|
||||
jmp short endit
|
||||
begit2:
|
||||
mov eax, dword ptr [ecx+edx]
|
||||
or eax, 0xff000000
|
||||
sub ecx, 3
|
||||
mov [edi], eax
|
||||
lea edi, [edi+ebx]
|
||||
jnz int16_t begit2
|
||||
jnz short begit2
|
||||
endit:
|
||||
pop edi
|
||||
pop ebx
|
||||
|
@ -720,7 +720,7 @@ static _inline void pal8hlineasm(int32_t c, int32_t d, int32_t t, int32_t b)
|
|||
mov ecx, c
|
||||
mov edx, d
|
||||
sub ecx, edx
|
||||
jle int16_t endit
|
||||
jle short endit
|
||||
|
||||
push ebx
|
||||
push edi
|
||||
|
@ -732,7 +732,7 @@ begit:movzx eax, byte ptr [ecx+edx]
|
|||
sub ecx, 1
|
||||
mov [edi], eax
|
||||
lea edi, [edi+ebx]
|
||||
jnz int16_t begit
|
||||
jnz short begit
|
||||
pop edi
|
||||
pop ebx
|
||||
endit:
|
||||
|
|
|
@ -725,7 +725,7 @@ void *nedgetvalue(nedpool **p, void *mem) THROWSPEC
|
|||
return np->uservalue;
|
||||
}
|
||||
|
||||
void neddisablethreadcache(nedpool *p) THROWSPEC
|
||||
void nedtrimthreadcache(nedpool *p, int disable) THROWSPEC
|
||||
{
|
||||
int mycache;
|
||||
if (!p)
|
||||
|
@ -736,7 +736,7 @@ void neddisablethreadcache(nedpool *p) THROWSPEC
|
|||
mycache=(int)(size_t) TLSGET(p->mycache);
|
||||
if (!mycache)
|
||||
{ /* Set to mspace 0 */
|
||||
if (TLSSET(p->mycache, (void *)-1)) abort();
|
||||
if (disable && TLSSET(p->mycache, (void *)-1)) abort();
|
||||
}
|
||||
else if (mycache>0)
|
||||
{ /* Set to last used mspace */
|
||||
|
@ -745,16 +745,23 @@ void neddisablethreadcache(nedpool *p) THROWSPEC
|
|||
printf("Threadcache utilisation: %lf%% in cache with %lf%% lost to other threads\n",
|
||||
100.0*tc->successes/tc->mallocs, 100.0*((double) tc->mallocs-tc->frees)/tc->mallocs);
|
||||
#endif
|
||||
if (TLSSET(p->mycache, (void *)(size_t)(-tc->mymspace))) abort();
|
||||
if (disable && TLSSET(p->mycache, (void *)(size_t)(-tc->mymspace))) abort();
|
||||
tc->frees++;
|
||||
RemoveCacheEntries(p, tc, 0);
|
||||
assert(!tc->freeInCache);
|
||||
tc->mymspace=-1;
|
||||
tc->threadid=0;
|
||||
mspace_free(0, p->caches[mycache-1]);
|
||||
p->caches[mycache-1]=0;
|
||||
if (disable)
|
||||
{
|
||||
tc->mymspace=-1;
|
||||
tc->threadid=0;
|
||||
mspace_free(0, p->caches[mycache-1]);
|
||||
p->caches[mycache-1]=0;
|
||||
}
|
||||
}
|
||||
}
|
||||
void neddisablethreadcache(nedpool *p) THROWSPEC
|
||||
{
|
||||
nedtrimthreadcache(p, 1);
|
||||
}
|
||||
|
||||
#define GETMSPACE(m,p,tc,ms,s,action) \
|
||||
do \
|
||||
|
@ -783,12 +790,12 @@ static FORCEINLINE void GetThreadCache(nedpool **p, threadcache **tc, int *mymsp
|
|||
}
|
||||
mycache=(int)(size_t) TLSGET((*p)->mycache);
|
||||
if (mycache>0)
|
||||
{
|
||||
{ /* Already have a cache */
|
||||
*tc=(*p)->caches[mycache-1];
|
||||
*mymspace=(*tc)->mymspace;
|
||||
}
|
||||
else if (!mycache)
|
||||
{
|
||||
{ /* Need to allocate a new cache */
|
||||
*tc=AllocCache(*p);
|
||||
if (!*tc)
|
||||
{ /* Disable */
|
||||
|
@ -799,12 +806,12 @@ static FORCEINLINE void GetThreadCache(nedpool **p, threadcache **tc, int *mymsp
|
|||
*mymspace=(*tc)->mymspace;
|
||||
}
|
||||
else
|
||||
{
|
||||
{ /* Cache disabled, but we do have an assigned thread pool */
|
||||
*tc=0;
|
||||
*mymspace=-mycache-1;
|
||||
}
|
||||
assert(*mymspace>=0);
|
||||
assert((long)(size_t)CURRENT_THREAD==(*tc)->threadid);
|
||||
assert(!(*tc) || (long)(size_t)CURRENT_THREAD==(*tc)->threadid);
|
||||
#ifdef FULLSANITYCHECKS
|
||||
if (*tc)
|
||||
{
|
||||
|
@ -918,7 +925,7 @@ void nedpfree(nedpool *p, void *mem) THROWSPEC
|
|||
struct mallinfo nedpmallinfo(nedpool *p) THROWSPEC
|
||||
{
|
||||
int n;
|
||||
struct mallinfo ret ={0,0,0,0,0,0,0,0,0,0};
|
||||
struct mallinfo ret={0,0,0,0,0,0,0,0,0,0};
|
||||
if (!p) { p=&syspool; if (!syspool.threads) InitPool(&syspool, 0, -1); }
|
||||
for (n=0; p->m[n]; n++)
|
||||
{
|
||||
|
|
|
@ -395,9 +395,9 @@ int32_t A_MoveSprite(int32_t spritenum, const vec3_t *change, uint32_t cliptype)
|
|||
case 1:
|
||||
if (daz >= ActorExtra[spritenum].floorz)
|
||||
{
|
||||
if (totalclock > ActorExtra[spritenum].temp_data[9])
|
||||
if (totalclock > ActorExtra[spritenum].lasttransport)
|
||||
{
|
||||
ActorExtra[spritenum].temp_data[9] = totalclock + (TICSPERFRAME<<2);
|
||||
ActorExtra[spritenum].lasttransport = totalclock + (TICSPERFRAME<<2);
|
||||
|
||||
sprite[spritenum].x += (sprite[OW].x-SX);
|
||||
sprite[spritenum].y += (sprite[OW].y-SY);
|
||||
|
@ -415,9 +415,9 @@ int32_t A_MoveSprite(int32_t spritenum, const vec3_t *change, uint32_t cliptype)
|
|||
case 2:
|
||||
if (daz <= ActorExtra[spritenum].ceilingz)
|
||||
{
|
||||
if (totalclock > ActorExtra[spritenum].temp_data[9])
|
||||
if (totalclock > ActorExtra[spritenum].lasttransport)
|
||||
{
|
||||
ActorExtra[spritenum].temp_data[9] = totalclock + (TICSPERFRAME<<2);
|
||||
ActorExtra[spritenum].lasttransport = totalclock + (TICSPERFRAME<<2);
|
||||
sprite[spritenum].x += (sprite[OW].x-SX);
|
||||
sprite[spritenum].y += (sprite[OW].y-SY);
|
||||
sprite[spritenum].z = sector[sprite[OW].sectnum].floorz - daz + sector[sprite[i].sectnum].ceilingz;
|
||||
|
@ -3278,7 +3278,7 @@ static void G_MoveTransports(void)
|
|||
|
||||
ll = klabs(sprite[j].zvel);
|
||||
|
||||
if (totalclock > ActorExtra[j].temp_data[9])
|
||||
if (totalclock > ActorExtra[j].lasttransport)
|
||||
{
|
||||
warpspriteto = 0;
|
||||
if (ll && sectlotag == 2 && sprite[j].z < (sector[sect].ceilingz+ll))
|
||||
|
@ -3386,7 +3386,7 @@ static void G_MoveTransports(void)
|
|||
}
|
||||
break;
|
||||
case 1:
|
||||
ActorExtra[j].temp_data[9] = totalclock + (TICSPERFRAME<<2);
|
||||
ActorExtra[j].lasttransport = totalclock + (TICSPERFRAME<<2);
|
||||
|
||||
sprite[j].x += (sprite[OW].x-SX);
|
||||
sprite[j].y += (sprite[OW].y-SY);
|
||||
|
@ -3400,7 +3400,7 @@ static void G_MoveTransports(void)
|
|||
|
||||
break;
|
||||
case 2:
|
||||
ActorExtra[j].temp_data[9] = totalclock + (TICSPERFRAME<<2);
|
||||
ActorExtra[j].lasttransport = totalclock + (TICSPERFRAME<<2);
|
||||
sprite[j].x += (sprite[OW].x-SX);
|
||||
sprite[j].y += (sprite[OW].y-SY);
|
||||
sprite[j].z = sector[sprite[OW].sectnum].floorz;
|
||||
|
|
|
@ -540,7 +540,7 @@ spriteinterpolate sprpos[MAXSPRITES];
|
|||
typedef struct {
|
||||
int32_t bposx,bposy,bposz;
|
||||
int32_t floorz,ceilingz,lastvx,lastvy;
|
||||
int32_t flags;
|
||||
int32_t flags,lasttransport,shootzvel;
|
||||
intptr_t temp_data[10]; // sometimes used to hold pointers to con code
|
||||
int16_t picnum,ang,extra,owner,movflag;
|
||||
int16_t tempang,actorstayput,dispicnum;
|
||||
|
|
|
@ -4148,8 +4148,8 @@ void G_SE40(int32_t smoothratio)
|
|||
if (sect != -1)
|
||||
{
|
||||
int32_t renderz, picnum;
|
||||
int16_t backupstat[numsectors];
|
||||
int32_t backupz[numsectors];
|
||||
int16_t backupstat[MAXSECTORS];
|
||||
int32_t backupz[MAXSECTORS];
|
||||
int32_t i;
|
||||
int32_t pix_diff, newz;
|
||||
// initprintf("drawing ror\n");
|
||||
|
|
|
@ -2068,16 +2068,16 @@ static int32_t X_DoExecute(register int32_t once)
|
|||
|
||||
if (tw == CON_ZSHOOT || tw == CON_EZSHOOT)
|
||||
{
|
||||
ActorExtra[vm.g_i].temp_data[9] = Gv_GetVarX(*insptr++);
|
||||
if (ActorExtra[vm.g_i].temp_data[9] == 0)
|
||||
ActorExtra[vm.g_i].temp_data[9] = 1;
|
||||
ActorExtra[vm.g_i].shootzvel = Gv_GetVarX(*insptr++);
|
||||
if (ActorExtra[vm.g_i].shootzvel == 0)
|
||||
ActorExtra[vm.g_i].shootzvel = 1;
|
||||
}
|
||||
|
||||
if ((vm.g_sp->sectnum < 0 || vm.g_sp->sectnum >= numsectors) /* && g_scriptSanityChecks */)
|
||||
{
|
||||
OSD_Printf(CON_ERROR "Invalid sector %d\n",g_errorLineNum,keyw[g_tw],vm.g_sp->sectnum);
|
||||
insptr++;
|
||||
ActorExtra[vm.g_i].temp_data[9]=0;
|
||||
ActorExtra[vm.g_i].shootzvel=0;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2086,7 +2086,7 @@ static int32_t X_DoExecute(register int32_t once)
|
|||
if (tw == CON_EZSHOOT || tw == CON_ESHOOT)
|
||||
aGameVars[g_iReturnVarID].val.lValue = j;
|
||||
|
||||
ActorExtra[vm.g_i].temp_data[9]=0;
|
||||
ActorExtra[vm.g_i].shootzvel=0;
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -2102,23 +2102,23 @@ static int32_t X_DoExecute(register int32_t once)
|
|||
|
||||
if (tw == CON_ZSHOOTVAR || tw == CON_EZSHOOTVAR)
|
||||
{
|
||||
ActorExtra[vm.g_i].temp_data[9] = Gv_GetVarX(*insptr++);
|
||||
if (ActorExtra[vm.g_i].temp_data[9] == 0)
|
||||
ActorExtra[vm.g_i].temp_data[9] = 1;
|
||||
ActorExtra[vm.g_i].shootzvel = Gv_GetVarX(*insptr++);
|
||||
if (ActorExtra[vm.g_i].shootzvel == 0)
|
||||
ActorExtra[vm.g_i].shootzvel = 1;
|
||||
}
|
||||
j=Gv_GetVarX(*insptr++);
|
||||
|
||||
if ((vm.g_sp->sectnum < 0 || vm.g_sp->sectnum >= numsectors) /* && g_scriptSanityChecks */)
|
||||
{
|
||||
OSD_Printf(CON_ERROR "Invalid sector %d\n",g_errorLineNum,keyw[g_tw],vm.g_sp->sectnum);
|
||||
ActorExtra[vm.g_i].temp_data[9]=0;
|
||||
ActorExtra[vm.g_i].shootzvel=0;
|
||||
break;
|
||||
}
|
||||
|
||||
lReturn = A_Shoot(vm.g_i, j);
|
||||
if (tw == CON_ESHOOTVAR || tw == CON_EZSHOOTVAR)
|
||||
aGameVars[g_iReturnVarID].val.lValue = lReturn;
|
||||
ActorExtra[vm.g_i].temp_data[9]=0;
|
||||
ActorExtra[vm.g_i].shootzvel=0;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -460,7 +460,7 @@ int32_t A_Shoot(int32_t i,int32_t atwith)
|
|||
}
|
||||
}
|
||||
|
||||
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9];
|
||||
if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
|
||||
hitscan((const vec3_t *)&srcvect,sect,
|
||||
sintable[(sa+512)&2047],
|
||||
sintable[sa&2047],zvel<<6,
|
||||
|
@ -682,7 +682,7 @@ int32_t A_Shoot(int32_t i,int32_t atwith)
|
|||
if (!g_player[p].ps->auto_aim)
|
||||
{
|
||||
zvel = (100-g_player[p].ps->horiz-g_player[p].ps->horizoff)<<5;
|
||||
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9];
|
||||
if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
|
||||
hitscan((const vec3_t *)&srcvect,sect,sintable[(sa+512)&2047],sintable[sa&2047],
|
||||
zvel<<6,&hitinfo,CLIPMASK1);
|
||||
if (hitinfo.hitsprite != -1)
|
||||
|
@ -734,7 +734,7 @@ int32_t A_Shoot(int32_t i,int32_t atwith)
|
|||
if (ProjectileData[atwith].cstat >= 0) s->cstat &= ~ProjectileData[atwith].cstat;
|
||||
else s->cstat &= ~257;
|
||||
|
||||
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9];
|
||||
if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
|
||||
hitscan((const vec3_t *)&srcvect,sect,
|
||||
sintable[(sa+512)&2047],
|
||||
sintable[sa&2047],
|
||||
|
@ -1038,7 +1038,7 @@ DOSKIPBULLETHOLE:
|
|||
sx+(sintable[(348+sa+512)&2047]/448),
|
||||
sy+(sintable[(sa+348)&2047]/448),
|
||||
sz-(1<<8),atwith,0,14,14,sa,vel,zvel,i,4);*/
|
||||
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9];
|
||||
if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
|
||||
j = A_InsertSprite(sect,
|
||||
srcvect.x+(sintable[(348+sa+512)&2047]/ProjectileData[atwith].offset),
|
||||
srcvect.y+(sintable[(sa+348)&2047]/ProjectileData[atwith].offset),
|
||||
|
@ -1115,7 +1115,7 @@ DOSKIPBULLETHOLE:
|
|||
}
|
||||
}
|
||||
|
||||
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9];
|
||||
if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
|
||||
hitscan((const vec3_t *)&srcvect,sect,
|
||||
sintable[(sa+512)&2047],
|
||||
sintable[sa&2047],zvel<<6,
|
||||
|
@ -1270,7 +1270,7 @@ DOSKIPBULLETHOLE:
|
|||
if (!g_player[p].ps->auto_aim)
|
||||
{
|
||||
zvel = (100-g_player[p].ps->horiz-g_player[p].ps->horizoff)<<5;
|
||||
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9];
|
||||
if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
|
||||
hitscan((const vec3_t *)&srcvect,sect,sintable[(sa+512)&2047],sintable[sa&2047],
|
||||
zvel<<6,&hitinfo,CLIPMASK1);
|
||||
if (hitinfo.hitsprite != -1)
|
||||
|
@ -1321,7 +1321,7 @@ DOSKIPBULLETHOLE:
|
|||
}
|
||||
|
||||
s->cstat &= ~257;
|
||||
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9];
|
||||
if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
|
||||
hitscan((const vec3_t *)&srcvect,sect,
|
||||
sintable[(sa+512)&2047],
|
||||
sintable[sa&2047],
|
||||
|
@ -1558,7 +1558,7 @@ SKIPBULLETHOLE:
|
|||
if (hitinfo.pos.x == 0) hitinfo.pos.x++;
|
||||
zvel = ((g_player[j].ps->oposz - srcvect.z + (3<<8))*vel) / hitinfo.pos.x;
|
||||
}
|
||||
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9];
|
||||
if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
|
||||
oldzvel = zvel;
|
||||
|
||||
if (atwith == SPIT)
|
||||
|
@ -1681,7 +1681,7 @@ SKIPBULLETHOLE:
|
|||
if (p >= 0 && j >= 0)
|
||||
l = j;
|
||||
else l = -1;
|
||||
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9];
|
||||
if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
|
||||
j = A_InsertSprite(sect,
|
||||
srcvect.x+(sintable[(348+sa+512)&2047]/448),
|
||||
srcvect.y+(sintable[(sa+348)&2047]/448),
|
||||
|
@ -1766,7 +1766,7 @@ SKIPBULLETHOLE:
|
|||
if (p >= 0)
|
||||
zvel = (100-g_player[p].ps->horiz-g_player[p].ps->horizoff)*32;
|
||||
else zvel = 0;
|
||||
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9];
|
||||
if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
|
||||
|
||||
srcvect.z -= g_player[p].ps->pyoff;
|
||||
hitscan((const vec3_t *)&srcvect,sect,
|
||||
|
@ -1830,7 +1830,7 @@ SKIPBULLETHOLE:
|
|||
if (zvel < -4096)
|
||||
zvel = -2048;
|
||||
vel = x>>4;
|
||||
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9];
|
||||
if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
|
||||
A_InsertSprite(sect,
|
||||
srcvect.x+(sintable[(512+sa+512)&2047]>>8),
|
||||
srcvect.y+(sintable[(sa+512)&2047]>>8),
|
||||
|
@ -1892,7 +1892,7 @@ SKIPBULLETHOLE:
|
|||
if (sect < 0) break;
|
||||
|
||||
s->cstat &= ~257;
|
||||
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9];
|
||||
if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
|
||||
hitscan((const vec3_t *)&srcvect,sect,
|
||||
sintable[(sa+512)&2047],
|
||||
sintable[sa&2047],
|
||||
|
@ -1971,7 +1971,7 @@ SKIPBULLETHOLE:
|
|||
zvel = ((g_player[j].ps->oposz-srcvect.z)*512) / l ;
|
||||
}
|
||||
else zvel = 0;
|
||||
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9];
|
||||
if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
|
||||
j = A_InsertSprite(sect,
|
||||
srcvect.x+(sintable[(512+sa+512)&2047]>>12),
|
||||
srcvect.y+(sintable[(sa+512)&2047]>>12),
|
||||
|
|
Loading…
Reference in a new issue