Fixes a couple of gameplay bugs introduced with ROR, fixes a MSVC compile issue with the newer version of kplib.c and updates nedmalloc with changes from the latest version in their svn

git-svn-id: https://svn.eduke32.com/eduke32@1452 1a8010ca-5511-0410-912e-c29ae57300e0
This commit is contained in:
terminx 2009-07-07 00:42:06 +00:00
parent 9190ee2139
commit ad0179da42
9 changed files with 218 additions and 202 deletions

View file

@ -4,7 +4,7 @@
http://creativecommons.org/licenses/publicdomain. Send questions, http://creativecommons.org/licenses/publicdomain. Send questions,
comments, complaints, performance data, etc to dl@cs.oswego.edu comments, complaints, performance data, etc to dl@cs.oswego.edu
* Version pre-2.8.4 Mon Nov 27 11:22:37 2006 (dl at gee) * Version 2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)
Note: There may be an updated version of this malloc obtainable at Note: There may be an updated version of this malloc obtainable at
ftp://gee.cs.oswego.edu/pub/misc/malloc.c ftp://gee.cs.oswego.edu/pub/misc/malloc.c
@ -245,7 +245,8 @@ USE_LOCKS default: 0 (false)
pthread or WIN32 mutex lock/unlock. (If set true, this can be pthread or WIN32 mutex lock/unlock. (If set true, this can be
overridden on a per-mspace basis for mspace versions.) If set to a overridden on a per-mspace basis for mspace versions.) If set to a
non-zero value other than 1, locks are used, but their non-zero value other than 1, locks are used, but their
implementation is left out, so lock functions must be supplied manually. implementation is left out, so lock functions must be supplied manually,
as described below.
USE_SPIN_LOCKS default: 1 iff USE_LOCKS and on x86 using gcc or MSC USE_SPIN_LOCKS default: 1 iff USE_LOCKS and on x86 using gcc or MSC
If true, uses custom spin locks for locking. This is currently If true, uses custom spin locks for locking. This is currently
@ -495,7 +496,6 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
#endif /* WIN32 */ #endif /* WIN32 */
#ifdef WIN32 #ifdef WIN32
#define WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN
// #define _WIN32_WINNT 0x403
#include <windows.h> #include <windows.h>
#define HAVE_MMAP 1 #define HAVE_MMAP 1
#define HAVE_MORECORE 0 #define HAVE_MORECORE 0
@ -532,6 +532,12 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
#include <sys/types.h> /* For size_t */ #include <sys/types.h> /* For size_t */
#endif /* LACKS_SYS_TYPES_H */ #endif /* LACKS_SYS_TYPES_H */
#if (defined(__GNUC__) && ((defined(__i386__) || defined(__x86_64__)))) || (defined(_MSC_VER) && _MSC_VER>=1310)
#define SPIN_LOCKS_AVAILABLE 1
#else
#define SPIN_LOCKS_AVAILABLE 0
#endif
/* The maximum possible size_t value has all bits set */ /* The maximum possible size_t value has all bits set */
#define MAX_SIZE_T (~(size_t)0) #define MAX_SIZE_T (~(size_t)0)
@ -566,11 +572,11 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
#define USE_LOCKS 0 #define USE_LOCKS 0
#endif /* USE_LOCKS */ #endif /* USE_LOCKS */
#ifndef USE_SPIN_LOCKS #ifndef USE_SPIN_LOCKS
#if USE_LOCKS && (defined(__GNUC__) && ((defined(__i386__) || defined(__x86_64__)))) || (defined(_MSC_VER) && _MSC_VER>=1310) #if USE_LOCKS && SPIN_LOCKS_AVAILABLE
#define USE_SPIN_LOCKS 1 #define USE_SPIN_LOCKS 1
#else #else
#define USE_SPIN_LOCKS 0 #define USE_SPIN_LOCKS 0
#endif /* USE_LOCKS && ... */ #endif /* USE_LOCKS && SPIN_LOCKS_AVAILABLE. */
#endif /* USE_SPIN_LOCKS */ #endif /* USE_SPIN_LOCKS */
#ifndef INSECURE #ifndef INSECURE
#define INSECURE 0 #define INSECURE 0
@ -1144,15 +1150,17 @@ size_t destroy_mspace(mspace msp);
mspace create_mspace_with_base(void* base, size_t capacity, int locked); mspace create_mspace_with_base(void* base, size_t capacity, int locked);
/* /*
mspace_mmap_large_chunks controls whether requests for large chunks mspace_track_large_chunks controls whether requests for large chunks
are allocated in their own mmapped regions, separate from others in are allocated in their own untracked mmapped regions, separate from
this mspace. By default this is enabled, which reduces others in this mspace. By default large chunks are not tracked,
fragmentation. However, such chunks are not necessarily released to which reduces fragmentation. However, such chunks are not
the system upon destroy_mspace. Disabling by setting to false may necessarily released to the system upon destroy_mspace. Enabling
increase fragmentation, but avoids leakage when relying on tracking by setting to true may increase fragmentation, but avoids
destroy_mspace to release all memory allocated using this space. leakage when relying on destroy_mspace to release all memory
allocated using this space. The function returns the previous
setting.
*/ */
int mspace_mmap_large_chunks(mspace msp, int enable); int mspace_track_large_chunks(mspace msp, int enable);
/* /*
@ -1280,7 +1288,7 @@ int mspace_mallopt(int, int);
#ifndef LACKS_ERRNO_H #ifndef LACKS_ERRNO_H
#include <errno.h> /* for MALLOC_FAILURE_ACTION */ #include <errno.h> /* for MALLOC_FAILURE_ACTION */
#endif /* LACKS_ERRNO_H */ #endif /* LACKS_ERRNO_H */
#if FOOTERS #if FOOTERS || DEBUG
#include <time.h> /* for magic initialization */ #include <time.h> /* for magic initialization */
#endif /* FOOTERS */ #endif /* FOOTERS */
#ifndef LACKS_STDLIB_H #ifndef LACKS_STDLIB_H
@ -1288,6 +1296,7 @@ int mspace_mallopt(int, int);
#endif /* LACKS_STDLIB_H */ #endif /* LACKS_STDLIB_H */
#ifdef DEBUG #ifdef DEBUG
#if ABORT_ON_ASSERT_FAILURE #if ABORT_ON_ASSERT_FAILURE
#undef assert
#define assert(x) if(!(x)) ABORT #define assert(x) if(!(x)) ABORT
#else /* ABORT_ON_ASSERT_FAILURE */ #else /* ABORT_ON_ASSERT_FAILURE */
#include <assert.h> #include <assert.h>
@ -1308,7 +1317,14 @@ int mspace_mallopt(int, int);
#endif /* USE_BUILTIN_FFS */ #endif /* USE_BUILTIN_FFS */
#if HAVE_MMAP #if HAVE_MMAP
#ifndef LACKS_SYS_MMAN_H #ifndef LACKS_SYS_MMAN_H
/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
#if (defined(linux) && !defined(__USE_GNU))
#define __USE_GNU 1
#include <sys/mman.h> /* for mmap */ #include <sys/mman.h> /* for mmap */
#undef __USE_GNU
#else
#include <sys/mman.h> /* for mmap */
#endif /* linux */
#endif /* LACKS_SYS_MMAN_H */ #endif /* LACKS_SYS_MMAN_H */
#ifndef LACKS_FCNTL_H #ifndef LACKS_FCNTL_H
#include <fcntl.h> #include <fcntl.h>
@ -1602,7 +1618,6 @@ static FORCEINLINE int win32munmap(void* ptr, size_t size) {
* Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
*/ */
#if HAVE_MMAP #if HAVE_MMAP
#define IS_MMAPPED_BIT (SIZE_T_ONE)
#define USE_MMAP_BIT (SIZE_T_ONE) #define USE_MMAP_BIT (SIZE_T_ONE)
#ifdef MMAP #ifdef MMAP
@ -1621,7 +1636,6 @@ static FORCEINLINE int win32munmap(void* ptr, size_t size) {
#define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
#endif /* DIRECT_MMAP */ #endif /* DIRECT_MMAP */
#else /* HAVE_MMAP */ #else /* HAVE_MMAP */
#define IS_MMAPPED_BIT (SIZE_T_ZERO)
#define USE_MMAP_BIT (SIZE_T_ZERO) #define USE_MMAP_BIT (SIZE_T_ZERO)
#define MMAP(s) MFAIL #define MMAP(s) MFAIL
@ -1671,29 +1685,33 @@ static FORCEINLINE int win32munmap(void* ptr, size_t size) {
Because lock-protected regions generally have bounded times, it is Because lock-protected regions generally have bounded times, it is
OK to use the supplied simple spinlocks in the custom versions for OK to use the supplied simple spinlocks in the custom versions for
x86. x86. Spinlocks are likely to improve performance for lightly
contended applications, but worsen performance under heavy
contention.
If USE_LOCKS is > 1, the definitions of lock routines here are If USE_LOCKS is > 1, the definitions of lock routines here are
bypassed, in which case you will need to define at least bypassed, in which case you will need to define the type MLOCK_T,
INITIAL_LOCK, ACQUIRE_LOCK, RELEASE_LOCK and possibly TRY_LOCK and at least INITIAL_LOCK, ACQUIRE_LOCK, RELEASE_LOCK and possibly
(which is not used in this malloc, but commonly needed in TRY_LOCK (which is not used in this malloc, but commonly needed in
extensions.) extensions.) You must also declare a
static MLOCK_T malloc_global_mutex = { initialization values };.
*/ */
#if USE_LOCKS == 1 #if USE_LOCKS == 1
#if USE_SPIN_LOCKS #if USE_SPIN_LOCKS && SPIN_LOCKS_AVAILABLE
#ifndef WIN32 #ifndef WIN32
/* Custom pthread-style spin locks on x86 and x64 for gcc */ /* Custom pthread-style spin locks on x86 and x64 for gcc */
struct pthread_mlock_t { struct pthread_mlock_t {
volatile unsigned int l; volatile unsigned int l;
volatile unsigned int c; unsigned int c;
volatile pthread_t threadid; pthread_t threadid;
}; };
#define MLOCK_T struct pthread_mlock_t #define MLOCK_T struct pthread_mlock_t
#define CURRENT_THREAD pthread_self() #define CURRENT_THREAD pthread_self()
#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0) #define INITIAL_LOCK(sl) ((sl)->threadid = 0, (sl)->l = (sl)->c = 0, 0)
#define ACQUIRE_LOCK(sl) pthread_acquire_lock(sl) #define ACQUIRE_LOCK(sl) pthread_acquire_lock(sl)
#define RELEASE_LOCK(sl) pthread_release_lock(sl) #define RELEASE_LOCK(sl) pthread_release_lock(sl)
#define TRY_LOCK(sl) pthread_try_lock(sl) #define TRY_LOCK(sl) pthread_try_lock(sl)
@ -1722,10 +1740,11 @@ static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
: "memory", "cc"); : "memory", "cc");
if (!ret) { if (!ret) {
assert(!sl->threadid); assert(!sl->threadid);
sl->c = 1;
sl->threadid = CURRENT_THREAD; sl->threadid = CURRENT_THREAD;
sl->c = 1;
return 0; return 0;
} }
}
if ((++spins & SPINS_PER_YIELD) == 0) { if ((++spins & SPINS_PER_YIELD) == 0) {
#if defined (__SVR4) && defined (__sun) /* solaris */ #if defined (__SVR4) && defined (__sun) /* solaris */
thr_yield(); thr_yield();
@ -1739,14 +1758,13 @@ static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
} }
} }
} }
}
static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) { static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) {
assert(sl->l != 0); volatile unsigned int* lp = &sl->l;
assert(*lp != 0);
assert(sl->threadid == CURRENT_THREAD); assert(sl->threadid == CURRENT_THREAD);
if (--sl->c == 0) { if (--sl->c == 0) {
sl->threadid = 0; sl->threadid = 0;
volatile unsigned int* lp = &sl->l;
int prev = 0; int prev = 0;
int ret; int ret;
__asm__ __volatile__ ("lock; xchgl %0, %1" __asm__ __volatile__ ("lock; xchgl %0, %1"
@ -1774,8 +1792,8 @@ static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
: "memory", "cc"); : "memory", "cc");
if (!ret) { if (!ret) {
assert(!sl->threadid); assert(!sl->threadid);
sl->c = 1;
sl->threadid = CURRENT_THREAD; sl->threadid = CURRENT_THREAD;
sl->c = 1;
return 1; return 1;
} }
} }
@ -1785,16 +1803,15 @@ static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
#else /* WIN32 */ #else /* WIN32 */
/* Custom win32-style spin locks on x86 and x64 for MSC */ /* Custom win32-style spin locks on x86 and x64 for MSC */
struct win32_mlock_t struct win32_mlock_t {
{
volatile long l; volatile long l;
volatile unsigned int c; unsigned int c;
volatile long threadid; long threadid;
}; };
#define MLOCK_T struct win32_mlock_t #define MLOCK_T struct win32_mlock_t
#define CURRENT_THREAD win32_getcurrentthreadid() #define CURRENT_THREAD GetCurrentThreadId()
#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0) #define INITIAL_LOCK(sl) ((sl)->threadid = 0, (sl)->l = (sl)->c = 0, 0)
#define ACQUIRE_LOCK(sl) win32_acquire_lock(sl) #define ACQUIRE_LOCK(sl) win32_acquire_lock(sl)
#define RELEASE_LOCK(sl) win32_release_lock(sl) #define RELEASE_LOCK(sl) win32_release_lock(sl)
#define TRY_LOCK(sl) win32_try_lock(sl) #define TRY_LOCK(sl) win32_try_lock(sl)
@ -1802,36 +1819,19 @@ struct win32_mlock_t
static MLOCK_T malloc_global_mutex = { 0, 0, 0}; static MLOCK_T malloc_global_mutex = { 0, 0, 0};
static FORCEINLINE long win32_getcurrentthreadid() {
#ifdef _MSC_VER
#if defined(_M_IX86)
long *threadstruct=(long *)__readfsdword(0x18);
long threadid=threadstruct[0x24/sizeof(long)];
return threadid;
#elif defined(_M_X64)
/* todo */
return GetCurrentThreadId();
#else
return GetCurrentThreadId();
#endif
#else
return GetCurrentThreadId();
#endif
}
static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) { static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) {
int spins = 0; int spins = 0;
for (;;) { for (;;) {
if (sl->l != 0) { if (sl->l != 0) {
if (sl->threadid == CURRENT_THREAD) { if (sl->threadid == (signed)CURRENT_THREAD) {
++sl->c; ++sl->c;
return 0; return 0;
} }
} }
else { else {
if (!interlockedexchange(&sl->l, 1)) { if (!interlockedexchange(&sl->l, 1)) {
assert(!sl->threadid); assert(!sl->1855
sl->c=CURRENT_THREAD; );
sl->threadid = CURRENT_THREAD; sl->threadid = CURRENT_THREAD;
sl->c = 1; sl->c = 1;
return 0; return 0;
@ -1853,7 +1853,7 @@ static FORCEINLINE void win32_release_lock (MLOCK_T *sl) {
static FORCEINLINE int win32_try_lock (MLOCK_T *sl) { static FORCEINLINE int win32_try_lock (MLOCK_T *sl) {
if(sl->l != 0) { if(sl->l != 0) {
if (sl->threadid == CURRENT_THREAD) { if (sl->threadid == (signed)CURRENT_THREAD) {
++sl->c; ++sl->c;
return 1; return 1;
} }
@ -1909,9 +1909,9 @@ static int pthread_init_lock (MLOCK_T *sl) {
#define MLOCK_T CRITICAL_SECTION #define MLOCK_T CRITICAL_SECTION
#define CURRENT_THREAD GetCurrentThreadId() #define CURRENT_THREAD GetCurrentThreadId()
#define INITIAL_LOCK(s) (!InitializeCriticalSectionAndSpinCount((s), 0x80000000|4000)) #define INITIAL_LOCK(s) (!InitializeCriticalSectionAndSpinCount((s), 0x80000000|4000))
#define ACQUIRE_LOCK(s) (EnterCriticalSection(s), 0) #define ACQUIRE_LOCK(s) (EnterCriticalSection(sl), 0)
#define RELEASE_LOCK(s) LeaveCriticalSection(s) #define RELEASE_LOCK(s) LeaveCriticalSection(sl)
#define TRY_LOCK(s) TryEnterCriticalSection(s) #define TRY_LOCK(s) TryEnterCriticalSection(sl)
#define NEED_GLOBAL_LOCK_INIT #define NEED_GLOBAL_LOCK_INIT
static MLOCK_T malloc_global_mutex; static MLOCK_T malloc_global_mutex;
@ -1959,8 +1959,12 @@ static void init_malloc_global_mutex() {
#endif /* USE_LOCKS */ #endif /* USE_LOCKS */
#if USE_LOCKS #if USE_LOCKS
#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
#endif
#ifndef RELEASE_MALLOC_GLOBAL_LOCK
#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
#endif
#else /* USE_LOCKS */ #else /* USE_LOCKS */
#define ACQUIRE_MALLOC_GLOBAL_LOCK() #define ACQUIRE_MALLOC_GLOBAL_LOCK()
#define RELEASE_MALLOC_GLOBAL_LOCK() #define RELEASE_MALLOC_GLOBAL_LOCK()
@ -2063,8 +2067,9 @@ static void init_malloc_global_mutex() {
The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
the chunk size redundantly records whether the current chunk is the chunk size redundantly records whether the current chunk is
inuse. This redundancy enables usage checks within free and realloc, inuse (unless the chunk is mmapped). This redundancy enables usage
and reduces indirection when freeing and consolidating chunks. checks within free and realloc, and reduces indirection when freeing
and consolidating chunks.
Each freshly allocated chunk must have both cinuse and pinuse set. Each freshly allocated chunk must have both cinuse and pinuse set.
That is, each allocated chunk borders either a previously allocated That is, each allocated chunk borders either a previously allocated
@ -2093,9 +2098,8 @@ static void init_malloc_global_mutex() {
space is still allocated for it (TOP_FOOT_SIZE) to enable space is still allocated for it (TOP_FOOT_SIZE) to enable
separation or merging when space is extended. separation or merging when space is extended.
3. Chunks allocated via mmap, which have the lowest-order bit 3. Chunks allocated via mmap, have both cinuse and pinuse bits
(IS_MMAPPED_BIT) set in their prev_foot fields, and do not set cleared in their head fields. Because they are allocated
PINUSE_BIT in their head fields. Because they are allocated
one-by-one, each must carry its own prev_foot field, which is one-by-one, each must carry its own prev_foot field, which is
also used to hold the offset this chunk has within its mmapped also used to hold the offset this chunk has within its mmapped
region, which is needed to preserve alignment. Each mmapped region, which is needed to preserve alignment. Each mmapped
@ -2161,9 +2165,7 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */
/* /*
The head field of a chunk is or'ed with PINUSE_BIT when previous The head field of a chunk is or'ed with PINUSE_BIT when previous
adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
use. If the chunk was obtained with mmap, the prev_foot field has use, unless mmapped, in which case both bits are cleared.
IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
mmapped region to the base of the chunk.
FLAG4_BIT is not used by this malloc, but might be useful in extensions. FLAG4_BIT is not used by this malloc, but might be useful in extensions.
*/ */
@ -2180,10 +2182,12 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */
/* extraction of fields from head words */ /* extraction of fields from head words */
#define cinuse(p) ((p)->head & CINUSE_BIT) #define cinuse(p) ((p)->head & CINUSE_BIT)
#define pinuse(p) ((p)->head & PINUSE_BIT) #define pinuse(p) ((p)->head & PINUSE_BIT)
#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
#define chunksize(p) ((p)->head & ~(FLAG_BITS)) #define chunksize(p) ((p)->head & ~(FLAG_BITS))
#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
/* Treat space at ptr +/- offset as a chunk */ /* Treat space at ptr +/- offset as a chunk */
#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
@ -2208,9 +2212,6 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */
#define set_free_with_pinuse(p, s, n)\ #define set_free_with_pinuse(p, s, n)\
(clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
#define is_mmapped(p)\
(!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
/* Get the internal overhead associated with chunk p */ /* Get the internal overhead associated with chunk p */
#define overhead_for(p)\ #define overhead_for(p)\
(is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
@ -2381,7 +2382,7 @@ typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
and so should not try to deallocate or merge with others. and so should not try to deallocate or merge with others.
(This currently holds only for the initial segment passed (This currently holds only for the initial segment passed
into create_mspace_with_base.) into create_mspace_with_base.)
* If IS_MMAPPED_BIT set, the segment may be merged with * If USE_MMAP_BIT set, the segment may be merged with
other surrounding mmapped segments and trimmed/de-allocated other surrounding mmapped segments and trimmed/de-allocated
using munmap. using munmap.
* If neither bit is set, then the segment was obtained using * If neither bit is set, then the segment was obtained using
@ -2396,7 +2397,7 @@ struct malloc_segment {
flag_t sflags; /* mmap and extern flag */ flag_t sflags; /* mmap and extern flag */
}; };
#define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT) #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
typedef struct malloc_segment msegment; typedef struct malloc_segment msegment;
@ -2543,7 +2544,7 @@ struct malloc_params {
static struct malloc_params mparams; static struct malloc_params mparams;
/* Ensure mparams initialized */ /* Ensure mparams initialized */
#define ensure_initialization() if (mparams.magic == 0) init_mparams() #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
#if !ONLY_MSPACES #if !ONLY_MSPACES
@ -2752,7 +2753,7 @@ static size_t traverse_and_check(mstate m);
I = NTREEBINS-1;\ I = NTREEBINS-1;\
else {\ else {\
unsigned int K;\ unsigned int K;\
__asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "rm" (X));\ __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g" (X));\
I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
}\ }\
} }
@ -2850,7 +2851,7 @@ static size_t traverse_and_check(mstate m);
#define compute_bit2idx(X, I)\ #define compute_bit2idx(X, I)\
{\ {\
unsigned int J;\ unsigned int J;\
__asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "rm" (X));\ __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\
I = (bindex_t)J;\ I = (bindex_t)J;\
} }
@ -2921,15 +2922,15 @@ static size_t traverse_and_check(mstate m);
#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
/* Check if address of next chunk n is higher than base chunk p */ /* Check if address of next chunk n is higher than base chunk p */
#define ok_next(p, n) ((char*)(p) < (char*)(n)) #define ok_next(p, n) ((char*)(p) < (char*)(n))
/* Check if p has its cinuse bit on */ /* Check if p has inuse status */
#define ok_cinuse(p) cinuse(p) #define ok_inuse(p) is_inuse(p)
/* Check if p has its pinuse bit on */ /* Check if p has its pinuse bit on */
#define ok_pinuse(p) pinuse(p) #define ok_pinuse(p) pinuse(p)
#else /* !INSECURE */ #else /* !INSECURE */
#define ok_address(M, a) (1) #define ok_address(M, a) (1)
#define ok_next(b, n) (1) #define ok_next(b, n) (1)
#define ok_cinuse(p) (1) #define ok_inuse(p) (1)
#define ok_pinuse(p) (1) #define ok_pinuse(p) (1)
#endif /* !INSECURE */ #endif /* !INSECURE */
@ -2958,6 +2959,8 @@ static size_t traverse_and_check(mstate m);
#define mark_inuse_foot(M,p,s) #define mark_inuse_foot(M,p,s)
/* Macros for setting head/foot of non-mmapped chunks */
/* Set cinuse bit and pinuse bit of next chunk */ /* Set cinuse bit and pinuse bit of next chunk */
#define set_inuse(M,p,s)\ #define set_inuse(M,p,s)\
((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
@ -3058,7 +3061,6 @@ static int init_mparams(void) {
INITIAL_LOCK(&gm->mutex); INITIAL_LOCK(&gm->mutex);
#endif #endif
#if (FOOTERS && !INSECURE)
{ {
#if USE_DEV_RANDOM #if USE_DEV_RANDOM
int fd; int fd;
@ -3078,13 +3080,9 @@ static int init_mparams(void) {
#endif #endif
magic |= (size_t)8U; /* ensure nonzero */ magic |= (size_t)8U; /* ensure nonzero */
magic &= ~(size_t)7U; /* improve chances of fault for bad values */ magic &= ~(size_t)7U; /* improve chances of fault for bad values */
}
#else /* (FOOTERS && !INSECURE) */
magic = (size_t)0x58585858U;
#endif /* (FOOTERS && !INSECURE) */
mparams.magic = magic; mparams.magic = magic;
} }
}
RELEASE_MALLOC_GLOBAL_LOCK(); RELEASE_MALLOC_GLOBAL_LOCK();
return 1; return 1;
@ -3092,8 +3090,9 @@ static int init_mparams(void) {
/* support for mallopt */ /* support for mallopt */
static int change_mparam(int param_number, int value) { static int change_mparam(int param_number, int value) {
size_t val = (value == -1)? MAX_SIZE_T : (size_t)value; size_t val;
ensure_initialization(); ensure_initialization();
val = (value == -1)? MAX_SIZE_T : (size_t)value;
switch(param_number) { switch(param_number) {
case M_TRIM_THRESHOLD: case M_TRIM_THRESHOLD:
mparams.trim_threshold = val; mparams.trim_threshold = val;
@ -3139,7 +3138,7 @@ static void do_check_top_chunk(mstate m, mchunkptr p) {
/* Check properties of (inuse) mmapped chunks */ /* Check properties of (inuse) mmapped chunks */
static void do_check_mmapped_chunk(mstate m, mchunkptr p) { static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
size_t sz = chunksize(p); size_t sz = chunksize(p);
size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD); size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
assert(is_mmapped(p)); assert(is_mmapped(p));
assert(use_mmap(m)); assert(use_mmap(m));
assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
@ -3153,7 +3152,7 @@ static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
/* Check properties of inuse chunks */ /* Check properties of inuse chunks */
static void do_check_inuse_chunk(mstate m, mchunkptr p) { static void do_check_inuse_chunk(mstate m, mchunkptr p) {
do_check_any_chunk(m, p); do_check_any_chunk(m, p);
assert(cinuse(p)); assert(is_inuse(p));
assert(next_pinuse(p)); assert(next_pinuse(p));
/* If not pinuse and not mmapped, previous chunk has OK offset */ /* If not pinuse and not mmapped, previous chunk has OK offset */
assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
@ -3166,7 +3165,7 @@ static void do_check_free_chunk(mstate m, mchunkptr p) {
size_t sz = chunksize(p); size_t sz = chunksize(p);
mchunkptr next = chunk_plus_offset(p, sz); mchunkptr next = chunk_plus_offset(p, sz);
do_check_any_chunk(m, p); do_check_any_chunk(m, p);
assert(!cinuse(p)); assert(!is_inuse(p));
assert(!next_pinuse(p)); assert(!next_pinuse(p));
assert (!is_mmapped(p)); assert (!is_mmapped(p));
if (p != m->dv && p != m->top) { if (p != m->dv && p != m->top) {
@ -3175,7 +3174,7 @@ static void do_check_free_chunk(mstate m, mchunkptr p) {
assert(is_aligned(chunk2mem(p))); assert(is_aligned(chunk2mem(p)));
assert(next->prev_foot == sz); assert(next->prev_foot == sz);
assert(pinuse(p)); assert(pinuse(p));
assert (next == m->top || cinuse(next)); assert (next == m->top || is_inuse(next));
assert(p->fd->bk == p); assert(p->fd->bk == p);
assert(p->bk->fd == p); assert(p->bk->fd == p);
} }
@ -3188,7 +3187,7 @@ static void do_check_free_chunk(mstate m, mchunkptr p) {
static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
if (mem != 0) { if (mem != 0) {
mchunkptr p = mem2chunk(mem); mchunkptr p = mem2chunk(mem);
size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); size_t sz = p->head & ~INUSE_BITS;
do_check_inuse_chunk(m, p); do_check_inuse_chunk(m, p);
assert((sz & CHUNK_ALIGN_MASK) == 0); assert((sz & CHUNK_ALIGN_MASK) == 0);
assert(sz >= MIN_CHUNK_SIZE); assert(sz >= MIN_CHUNK_SIZE);
@ -3215,7 +3214,7 @@ static void do_check_tree(mstate m, tchunkptr t) {
do_check_any_chunk(m, ((mchunkptr)u)); do_check_any_chunk(m, ((mchunkptr)u));
assert(u->index == tindex); assert(u->index == tindex);
assert(chunksize(u) == tsize); assert(chunksize(u) == tsize);
assert(!cinuse(u)); assert(!is_inuse(u));
assert(!next_pinuse(u)); assert(!next_pinuse(u));
assert(u->fd->bk == u); assert(u->fd->bk == u);
assert(u->bk->fd == u); assert(u->bk->fd == u);
@ -3333,13 +3332,13 @@ static size_t traverse_and_check(mstate m) {
while (segment_holds(s, q) && while (segment_holds(s, q) &&
q != m->top && q->head != FENCEPOST_HEAD) { q != m->top && q->head != FENCEPOST_HEAD) {
sum += chunksize(q); sum += chunksize(q);
if (cinuse(q)) { if (is_inuse(q)) {
assert(!bin_find(m, q)); assert(!bin_find(m, q));
do_check_inuse_chunk(m, q); do_check_inuse_chunk(m, q);
} }
else { else {
assert(q == m->dv || bin_find(m, q)); assert(q == m->dv || bin_find(m, q));
assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */ assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
do_check_free_chunk(m, q); do_check_free_chunk(m, q);
} }
lastq = q; lastq = q;
@ -3400,7 +3399,7 @@ static struct mallinfo internal_mallinfo(mstate m) {
q != m->top && q->head != FENCEPOST_HEAD) { q != m->top && q->head != FENCEPOST_HEAD) {
size_t sz = chunksize(q); size_t sz = chunksize(q);
sum += sz; sum += sz;
if (!cinuse(q)) { if (!is_inuse(q)) {
mfree += sz; mfree += sz;
++nfree; ++nfree;
} }
@ -3441,7 +3440,7 @@ static void internal_malloc_stats(mstate m) {
mchunkptr q = align_as_chunk(s->base); mchunkptr q = align_as_chunk(s->base);
while (segment_holds(s, q) && while (segment_holds(s, q) &&
q != m->top && q->head != FENCEPOST_HEAD) { q != m->top && q->head != FENCEPOST_HEAD) {
if (!cinuse(q)) if (!is_inuse(q))
used -= chunksize(q); used -= chunksize(q);
q = next_chunk(q); q = next_chunk(q);
} }
@ -3714,9 +3713,7 @@ static void internal_malloc_stats(mstate m) {
the mmapped region stored in the prev_foot field of the chunk. This the mmapped region stored in the prev_foot field of the chunk. This
allows reconstruction of the required argument to MUNMAP when freed, allows reconstruction of the required argument to MUNMAP when freed,
and also allows adjustment of the returned chunk to meet alignment and also allows adjustment of the returned chunk to meet alignment
requirements (especially in memalign). There is also enough space requirements (especially in memalign).
allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
the PINUSE bit so frees can be checked.
*/ */
/* Malloc using mmap */ /* Malloc using mmap */
@ -3728,13 +3725,13 @@ static void* mmap_alloc(mstate m, size_t nb) {
size_t offset = align_offset(chunk2mem(mm)); size_t offset = align_offset(chunk2mem(mm));
size_t psize = mmsize - offset - MMAP_FOOT_PAD; size_t psize = mmsize - offset - MMAP_FOOT_PAD;
mchunkptr p = (mchunkptr)(mm + offset); mchunkptr p = (mchunkptr)(mm + offset);
p->prev_foot = offset | IS_MMAPPED_BIT; p->prev_foot = offset;
(p)->head = (psize|CINUSE_BIT); p->head = psize;
mark_inuse_foot(m, p, psize); mark_inuse_foot(m, p, psize);
chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
if (mm < m->least_addr) if (m->least_addr == 0 || mm < m->least_addr)
m->least_addr = mm; m->least_addr = mm;
if ((m->footprint += mmsize) > m->max_footprint) if ((m->footprint += mmsize) > m->max_footprint)
m->max_footprint = m->footprint; m->max_footprint = m->footprint;
@ -3756,7 +3753,7 @@ static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
(oldsize - nb) <= (mparams.granularity << 1)) (oldsize - nb) <= (mparams.granularity << 1))
return oldp; return oldp;
else { else {
size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT; size_t offset = oldp->prev_foot;
size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
char* cp = (char*)CALL_MREMAP((char*)oldp - offset, char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
@ -3764,7 +3761,7 @@ static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
if (cp != CMFAIL) { if (cp != CMFAIL) {
mchunkptr newp = (mchunkptr)(cp + offset); mchunkptr newp = (mchunkptr)(cp + offset);
size_t psize = newmmsize - offset - MMAP_FOOT_PAD; size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
newp->head = (psize|CINUSE_BIT); newp->head = psize;
mark_inuse_foot(m, newp, psize); mark_inuse_foot(m, newp, psize);
chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
@ -3853,7 +3850,7 @@ static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
set_size_and_pinuse_of_free_chunk(q, dsize); set_size_and_pinuse_of_free_chunk(q, dsize);
} }
else { else {
if (!cinuse(oldfirst)) { if (!is_inuse(oldfirst)) {
size_t nsize = chunksize(oldfirst); size_t nsize = chunksize(oldfirst);
unlink_chunk(m, oldfirst, nsize); unlink_chunk(m, oldfirst, nsize);
oldfirst = chunk_plus_offset(oldfirst, nsize); oldfirst = chunk_plus_offset(oldfirst, nsize);
@ -3931,8 +3928,8 @@ static void* sys_alloc(mstate m, size_t nb) {
ensure_initialization(); ensure_initialization();
/* Directly map large chunks */ /* Directly map large chunks, but only if already initialized */
if (use_mmap(m) && nb >= mparams.mmap_threshold) { if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
void* mem = mmap_alloc(m, nb); void* mem = mmap_alloc(m, nb);
if (mem != 0) if (mem != 0)
return mem; return mem;
@ -4026,7 +4023,7 @@ static void* sys_alloc(mstate m, size_t nb) {
if (mp != CMFAIL) { if (mp != CMFAIL) {
tbase = mp; tbase = mp;
tsize = rsize; tsize = rsize;
mmap_flag = IS_MMAPPED_BIT; mmap_flag = USE_MMAP_BIT;
} }
} }
} }
@ -4056,7 +4053,9 @@ static void* sys_alloc(mstate m, size_t nb) {
m->max_footprint = m->footprint; m->max_footprint = m->footprint;
if (!is_initialized(m)) { /* first-time initialization */ if (!is_initialized(m)) { /* first-time initialization */
m->seg.base = m->least_addr = tbase; if (m->least_addr == 0 || tbase < m->least_addr)
m->least_addr = tbase;
m->seg.base = tbase;
m->seg.size = tsize; m->seg.size = tsize;
m->seg.sflags = mmap_flag; m->seg.sflags = mmap_flag;
m->magic = mparams.magic; m->magic = mparams.magic;
@ -4082,7 +4081,7 @@ static void* sys_alloc(mstate m, size_t nb) {
sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
if (sp != 0 && if (sp != 0 &&
!is_extern_segment(sp) && !is_extern_segment(sp) &&
(sp->sflags & IS_MMAPPED_BIT) == mmap_flag && (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
segment_holds(sp, m->top)) { /* append */ segment_holds(sp, m->top)) { /* append */
sp->size += tsize; sp->size += tsize;
init_top(m, m->top, m->topsize + tsize); init_top(m, m->top, m->topsize + tsize);
@ -4095,7 +4094,7 @@ static void* sys_alloc(mstate m, size_t nb) {
sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
if (sp != 0 && if (sp != 0 &&
!is_extern_segment(sp) && !is_extern_segment(sp) &&
(sp->sflags & IS_MMAPPED_BIT) == mmap_flag) { (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
char* oldbase = sp->base; char* oldbase = sp->base;
sp->base = tbase; sp->base = tbase;
sp->size += tsize; sp->size += tsize;
@ -4139,7 +4138,7 @@ static size_t release_unused_segments(mstate m) {
mchunkptr p = align_as_chunk(base); mchunkptr p = align_as_chunk(base);
size_t psize = chunksize(p); size_t psize = chunksize(p);
/* Can unmap if first chunk holds entire segment and not pinned */ /* Can unmap if first chunk holds entire segment and not pinned */
if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
tchunkptr tp = (tchunkptr)p; tchunkptr tp = (tchunkptr)p;
assert(segment_holds(sp, (char*)sp)); assert(segment_holds(sp, (char*)sp));
if (p == m->dv) { if (p == m->dv) {
@ -4364,7 +4363,7 @@ static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
/* Try to either shrink or extend into top. Else malloc-copy-free */ /* Try to either shrink or extend into top. Else malloc-copy-free */
if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) && if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp) &&
ok_next(oldp, next) && ok_pinuse(next))) { ok_next(oldp, next) && ok_pinuse(next))) {
size_t nb = request2size(bytes); size_t nb = request2size(bytes);
if (is_mmapped(oldp)) if (is_mmapped(oldp))
@ -4375,7 +4374,7 @@ static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
if (rsize >= MIN_CHUNK_SIZE) { if (rsize >= MIN_CHUNK_SIZE) {
mchunkptr remainder = chunk_plus_offset(newp, nb); mchunkptr remainder = chunk_plus_offset(newp, nb);
set_inuse(m, newp, nb); set_inuse(m, newp, nb);
set_inuse(m, remainder, rsize); set_inuse_and_pinuse(m, remainder, rsize);
extra = chunk2mem(remainder); extra = chunk2mem(remainder);
} }
} }
@ -4396,6 +4395,11 @@ static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
POSTACTION(m); POSTACTION(m);
return 0; return 0;
} }
#if DEBUG
if (newp != 0) {
check_inuse_chunk(m, newp); /* Check requires lock */
}
#endif
POSTACTION(m); POSTACTION(m);
@ -4403,7 +4407,6 @@ static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
if (extra != 0) { if (extra != 0) {
internal_free(m, extra); internal_free(m, extra);
} }
check_inuse_chunk(m, newp);
return chunk2mem(newp); return chunk2mem(newp);
} }
else { else {
@ -4468,7 +4471,7 @@ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
newp->prev_foot = p->prev_foot + leadsize; newp->prev_foot = p->prev_foot + leadsize;
newp->head = (newsize|CINUSE_BIT); newp->head = newsize;
} }
else { /* Otherwise, give back leader, use the rest */ else { /* Otherwise, give back leader, use the rest */
set_inuse(m, newp, newsize); set_inuse(m, newp, newsize);
@ -4796,13 +4799,12 @@ void dlfree(void* mem) {
#endif /* FOOTERS */ #endif /* FOOTERS */
if (!PREACTION(fm)) { if (!PREACTION(fm)) {
check_inuse_chunk(fm, p); check_inuse_chunk(fm, p);
if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
size_t psize = chunksize(p); size_t psize = chunksize(p);
mchunkptr next = chunk_plus_offset(p, psize); mchunkptr next = chunk_plus_offset(p, psize);
if (!pinuse(p)) { if (!pinuse(p)) {
size_t prevsize = p->prev_foot; size_t prevsize = p->prev_foot;
if ((prevsize & IS_MMAPPED_BIT) != 0) { if (is_mmapped(p)) {
prevsize &= ~IS_MMAPPED_BIT;
psize += prevsize + MMAP_FOOT_PAD; psize += prevsize + MMAP_FOOT_PAD;
if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
fm->footprint -= psize; fm->footprint -= psize;
@ -4954,8 +4956,8 @@ void* dlpvalloc(size_t bytes) {
} }
int dlmalloc_trim(size_t pad) { int dlmalloc_trim(size_t pad) {
ensure_initialization();
int result = 0; int result = 0;
ensure_initialization();
if (!PREACTION(gm)) { if (!PREACTION(gm)) {
result = sys_trim(gm, pad); result = sys_trim(gm, pad);
POSTACTION(gm); POSTACTION(gm);
@ -4990,7 +4992,7 @@ int dlmallopt(int param_number, int value) {
size_t dlmalloc_usable_size(void* mem) { size_t dlmalloc_usable_size(void* mem) {
if (mem != 0) { if (mem != 0) {
mchunkptr p = mem2chunk(mem); mchunkptr p = mem2chunk(mem);
if (cinuse(p)) if (is_inuse(p))
return chunksize(p) - overhead_for(p); return chunksize(p) - overhead_for(p);
} }
return 0; return 0;
@ -5007,7 +5009,7 @@ static mstate init_user_mstate(char* tbase, size_t tsize) {
mstate m = (mstate)(chunk2mem(msp)); mstate m = (mstate)(chunk2mem(msp));
memset(m, 0, msize); memset(m, 0, msize);
INITIAL_LOCK(&m->mutex); INITIAL_LOCK(&m->mutex);
msp->head = (msize|PINUSE_BIT|CINUSE_BIT); msp->head = (msize|INUSE_BITS);
m->seg.base = m->least_addr = tbase; m->seg.base = m->least_addr = tbase;
m->seg.size = m->footprint = m->max_footprint = tsize; m->seg.size = m->footprint = m->max_footprint = tsize;
m->magic = mparams.magic; m->magic = mparams.magic;
@ -5035,7 +5037,7 @@ mspace create_mspace(size_t capacity, int locked) {
char* tbase = (char*)(CALL_MMAP(tsize)); char* tbase = (char*)(CALL_MMAP(tsize));
if (tbase != CMFAIL) { if (tbase != CMFAIL) {
m = init_user_mstate(tbase, tsize); m = init_user_mstate(tbase, tsize);
m->seg.sflags = IS_MMAPPED_BIT; m->seg.sflags = USE_MMAP_BIT;
set_lock(m, locked); set_lock(m, locked);
} }
} }
@ -5056,13 +5058,13 @@ mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
return (mspace)m; return (mspace)m;
} }
int mspace_mmap_large_chunks(mspace msp, int enable) { int mspace_track_large_chunks(mspace msp, int enable) {
int ret = 0; int ret = 0;
mstate ms = (mstate)msp; mstate ms = (mstate)msp;
if (!PREACTION(ms)) { if (!PREACTION(ms)) {
if (use_mmap(ms)) if (!use_mmap(ms))
ret = 1; ret = 1;
if (enable) if (!enable)
enable_mmap(ms); enable_mmap(ms);
else else
disable_mmap(ms); disable_mmap(ms);
@ -5081,7 +5083,7 @@ size_t destroy_mspace(mspace msp) {
size_t size = sp->size; size_t size = sp->size;
flag_t flag = sp->sflags; flag_t flag = sp->sflags;
sp = sp->next; sp = sp->next;
if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) && if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
CALL_MUNMAP(base, size) == 0) CALL_MUNMAP(base, size) == 0)
freed += size; freed += size;
} }
@ -5213,11 +5215,11 @@ void* mspace_malloc(mspace msp, size_t bytes) {
} }
void mspace_free(mspace msp, void* mem) { void mspace_free(mspace msp, void* mem) {
UNREFERENCED_PARAMETER(msp);
if (mem != 0) { if (mem != 0) {
mchunkptr p = mem2chunk(mem); mchunkptr p = mem2chunk(mem);
#if FOOTERS #if FOOTERS
mstate fm = get_mstate_for(p); mstate fm = get_mstate_for(p);
msp = msp; /* placate people compiling -Wunused */
#else /* FOOTERS */ #else /* FOOTERS */
mstate fm = (mstate)msp; mstate fm = (mstate)msp;
#endif /* FOOTERS */ #endif /* FOOTERS */
@ -5227,13 +5229,12 @@ void mspace_free(mspace msp, void* mem) {
} }
if (!PREACTION(fm)) { if (!PREACTION(fm)) {
check_inuse_chunk(fm, p); check_inuse_chunk(fm, p);
if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
size_t psize = chunksize(p); size_t psize = chunksize(p);
mchunkptr next = chunk_plus_offset(p, psize); mchunkptr next = chunk_plus_offset(p, psize);
if (!pinuse(p)) { if (!pinuse(p)) {
size_t prevsize = p->prev_foot; size_t prevsize = p->prev_foot;
if ((prevsize & IS_MMAPPED_BIT) != 0) { if (is_mmapped(p)) {
prevsize &= ~IS_MMAPPED_BIT;
psize += prevsize + MMAP_FOOT_PAD; psize += prevsize + MMAP_FOOT_PAD;
if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
fm->footprint -= psize; fm->footprint -= psize;
@ -5452,7 +5453,7 @@ struct mallinfo mspace_mallinfo(mspace msp) {
size_t mspace_usable_size(void* mem) { size_t mspace_usable_size(void* mem) {
if (mem != 0) { if (mem != 0) {
mchunkptr p = mem2chunk(mem); mchunkptr p = mem2chunk(mem);
if (cinuse(p)) if (is_inuse(p))
return chunksize(p) - overhead_for(p); return chunksize(p) - overhead_for(p);
} }
return 0; return 0;
@ -5558,13 +5559,15 @@ int mspace_mallopt(int param_number, int value) {
/* ----------------------------------------------------------------------- /* -----------------------------------------------------------------------
History: History:
V2.8.4 (not yet released) V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)
* Add mspace_mmap_large_chunks; thanks to Jean Brouwers * Use zeros instead of prev foot for is_mmapped
* Add mspace_track_large_chunks; thanks to Jean Brouwers
* Fix set_inuse in internal_realloc; thanks to Jean Brouwers
* Fix insufficient sys_alloc padding when using 16byte alignment * Fix insufficient sys_alloc padding when using 16byte alignment
* Fix bad error check in mspace_footprint * Fix bad error check in mspace_footprint
* Adaptations for ptmalloc, courtesy of Wolfram Gloger. * Adaptations for ptmalloc; thanks to Wolfram Gloger.
* Reentrant spin locks, courtesy of Earl Chew and others * Reentrant spin locks; thanks to Earl Chew and others
* Win32 improvements, courtesy of Niall Douglas and Earl Chew * Win32 improvements; thanks to Niall Douglas and Earl Chew
* Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
* Extension hook in malloc_state * Extension hook in malloc_state
* Various small adjustments to reduce warnings on some compilers * Various small adjustments to reduce warnings on some compilers

View file

@ -166,6 +166,12 @@ Optionally can also retrieve pool.
*/ */
EXTSPEC void *nedgetvalue(nedpool **p, void *mem) THROWSPEC; EXTSPEC void *nedgetvalue(nedpool **p, void *mem) THROWSPEC;
/* Trims the thread cache for the calling thread, returning any existing cache
data to the central pool. Remember to ALWAYS call with zero if you used the
system pool. Setting disable to non-zero replicates neddisablethreadcache().
*/
EXTSPEC void nedtrimthreadcache(nedpool *p, int disable) THROWSPEC;
/* Disables the thread cache for the calling thread, returning any existing cache /* Disables the thread cache for the calling thread, returning any existing cache
data to the central pool. Remember to ALWAYS call with zero if you used the data to the central pool. Remember to ALWAYS call with zero if you used the
system pool. system pool.

View file

@ -206,7 +206,7 @@ int32_t bitrev(int32_t, int32_t);
"beg: shr ebx, 1"\ "beg: shr ebx, 1"\
"adc eax, eax"\ "adc eax, eax"\
"dec ecx"\ "dec ecx"\
"jnz int16_t beg"\ "jnz short beg"\
parm [ebx][ecx]\ parm [ebx][ecx]\
modify nomemory exact [eax ebx ecx]\ modify nomemory exact [eax ebx ecx]\
value [eax] value [eax]
@ -263,7 +263,7 @@ static _inline int32_t bitrev(int32_t b, int32_t c)
beg: shr edx, 1 beg: shr edx, 1
adc eax, eax adc eax, eax
sub ecx, 1 sub ecx, 1
jnz int16_t beg jnz short beg
} }
} }
@ -602,26 +602,26 @@ int32_t Paeth686(int32_t, int32_t, int32_t);
void rgbhlineasm(int32_t, int32_t, int32_t, int32_t); void rgbhlineasm(int32_t, int32_t, int32_t, int32_t);
#pragma aux rgbhlineasm =\ #pragma aux rgbhlineasm =\
"sub ecx, edx"\ "sub ecx, edx"\
"jle int16_t endit"\ "jle short endit"\
"add edx, offset olinbuf"\ "add edx, offset olinbuf"\
"cmp dword ptr trnsrgb, 0"\ "cmp dword ptr trnsrgb, 0"\
"jz int16_t begit2"\ "jz short begit2"\
"begit: mov eax, dword ptr [ecx+edx]"\ "begit: mov eax, dword ptr [ecx+edx]"\
"or eax, 0xff000000"\ "or eax, 0xff000000"\
"cmp eax, dword ptr trnsrgb"\ "cmp eax, dword ptr trnsrgb"\
"jne int16_t skipit"\ "jne short skipit"\
"and eax, 0xffffff"\ "and eax, 0xffffff"\
"skipit: sub ecx, 3"\ "skipit: sub ecx, 3"\
"mov [edi], eax"\ "mov [edi], eax"\
"lea edi, [edi+ebx]"\ "lea edi, [edi+ebx]"\
"jnz int16_t begit"\ "jnz short begit"\
"jmp int16_t endit"\ "jmp short endit"\
"begit2: mov eax, dword ptr [ecx+edx]"\ "begit2: mov eax, dword ptr [ecx+edx]"\
"or eax, 0xff000000"\ "or eax, 0xff000000"\
"sub ecx, 3"\ "sub ecx, 3"\
"mov [edi], eax"\ "mov [edi], eax"\
"lea edi, [edi+ebx]"\ "lea edi, [edi+ebx]"\
"jnz int16_t begit2"\ "jnz short begit2"\
"endit:"\ "endit:"\
parm [ecx][edx][edi][ebx]\ parm [ecx][edx][edi][ebx]\
modify exact [eax ecx edi]\ modify exact [eax ecx edi]\
@ -630,14 +630,14 @@ void rgbhlineasm(int32_t, int32_t, int32_t, int32_t);
void pal8hlineasm(int32_t, int32_t, int32_t, int32_t); void pal8hlineasm(int32_t, int32_t, int32_t, int32_t);
#pragma aux pal8hlineasm =\ #pragma aux pal8hlineasm =\
"sub ecx, edx"\ "sub ecx, edx"\
"jle int16_t endit"\ "jle short endit"\
"add edx, offset olinbuf"\ "add edx, offset olinbuf"\
"begit: movzx eax, byte ptr [ecx+edx]"\ "begit: movzx eax, byte ptr [ecx+edx]"\
"mov eax, dword ptr palcol[eax*4]"\ "mov eax, dword ptr palcol[eax*4]"\
"dec ecx"\ "dec ecx"\
"mov [edi], eax"\ "mov [edi], eax"\
"lea edi, [edi+ebx]"\ "lea edi, [edi+ebx]"\
"jnz int16_t begit"\ "jnz short begit"\
"endit:"\ "endit:"\
parm [ecx][edx][edi][ebx]\ parm [ecx][edx][edi][ebx]\
modify exact [eax ecx edi]\ modify exact [eax ecx edi]\
@ -684,29 +684,29 @@ static _inline void rgbhlineasm(int32_t c, int32_t d, int32_t t, int32_t b)
mov edi, t mov edi, t
mov ebx, b mov ebx, b
sub ecx, edx sub ecx, edx
jle int16_t endit jle short endit
add edx, offset olinbuf add edx, offset olinbuf
cmp dword ptr trnsrgb, 0 cmp dword ptr trnsrgb, 0
jz int16_t begit2 jz short begit2
begit: begit:
mov eax, dword ptr [ecx+edx] mov eax, dword ptr [ecx+edx]
or eax, 0xff000000 or eax, 0xff000000
cmp eax, dword ptr trnsrgb cmp eax, dword ptr trnsrgb
jne int16_t skipit jne short skipit
and eax, 0xffffff and eax, 0xffffff
skipit: skipit:
sub ecx, 3 sub ecx, 3
mov [edi], eax mov [edi], eax
lea edi, [edi+ebx] lea edi, [edi+ebx]
jnz int16_t begit jnz short begit
jmp int16_t endit jmp short endit
begit2: begit2:
mov eax, dword ptr [ecx+edx] mov eax, dword ptr [ecx+edx]
or eax, 0xff000000 or eax, 0xff000000
sub ecx, 3 sub ecx, 3
mov [edi], eax mov [edi], eax
lea edi, [edi+ebx] lea edi, [edi+ebx]
jnz int16_t begit2 jnz short begit2
endit: endit:
pop edi pop edi
pop ebx pop ebx
@ -720,7 +720,7 @@ static _inline void pal8hlineasm(int32_t c, int32_t d, int32_t t, int32_t b)
mov ecx, c mov ecx, c
mov edx, d mov edx, d
sub ecx, edx sub ecx, edx
jle int16_t endit jle short endit
push ebx push ebx
push edi push edi
@ -732,7 +732,7 @@ begit:movzx eax, byte ptr [ecx+edx]
sub ecx, 1 sub ecx, 1
mov [edi], eax mov [edi], eax
lea edi, [edi+ebx] lea edi, [edi+ebx]
jnz int16_t begit jnz short begit
pop edi pop edi
pop ebx pop ebx
endit: endit:

View file

@ -725,7 +725,7 @@ void *nedgetvalue(nedpool **p, void *mem) THROWSPEC
return np->uservalue; return np->uservalue;
} }
void neddisablethreadcache(nedpool *p) THROWSPEC void nedtrimthreadcache(nedpool *p, int disable) THROWSPEC
{ {
int mycache; int mycache;
if (!p) if (!p)
@ -736,7 +736,7 @@ void neddisablethreadcache(nedpool *p) THROWSPEC
mycache=(int)(size_t) TLSGET(p->mycache); mycache=(int)(size_t) TLSGET(p->mycache);
if (!mycache) if (!mycache)
{ /* Set to mspace 0 */ { /* Set to mspace 0 */
if (TLSSET(p->mycache, (void *)-1)) abort(); if (disable && TLSSET(p->mycache, (void *)-1)) abort();
} }
else if (mycache>0) else if (mycache>0)
{ /* Set to last used mspace */ { /* Set to last used mspace */
@ -745,16 +745,23 @@ void neddisablethreadcache(nedpool *p) THROWSPEC
printf("Threadcache utilisation: %lf%% in cache with %lf%% lost to other threads\n", printf("Threadcache utilisation: %lf%% in cache with %lf%% lost to other threads\n",
100.0*tc->successes/tc->mallocs, 100.0*((double) tc->mallocs-tc->frees)/tc->mallocs); 100.0*tc->successes/tc->mallocs, 100.0*((double) tc->mallocs-tc->frees)/tc->mallocs);
#endif #endif
if (TLSSET(p->mycache, (void *)(size_t)(-tc->mymspace))) abort(); if (disable && TLSSET(p->mycache, (void *)(size_t)(-tc->mymspace))) abort();
tc->frees++; tc->frees++;
RemoveCacheEntries(p, tc, 0); RemoveCacheEntries(p, tc, 0);
assert(!tc->freeInCache); assert(!tc->freeInCache);
if (disable)
{
tc->mymspace=-1; tc->mymspace=-1;
tc->threadid=0; tc->threadid=0;
mspace_free(0, p->caches[mycache-1]); mspace_free(0, p->caches[mycache-1]);
p->caches[mycache-1]=0; p->caches[mycache-1]=0;
} }
} }
}
void neddisablethreadcache(nedpool *p) THROWSPEC
{
nedtrimthreadcache(p, 1);
}
#define GETMSPACE(m,p,tc,ms,s,action) \ #define GETMSPACE(m,p,tc,ms,s,action) \
do \ do \
@ -783,12 +790,12 @@ static FORCEINLINE void GetThreadCache(nedpool **p, threadcache **tc, int *mymsp
} }
mycache=(int)(size_t) TLSGET((*p)->mycache); mycache=(int)(size_t) TLSGET((*p)->mycache);
if (mycache>0) if (mycache>0)
{ { /* Already have a cache */
*tc=(*p)->caches[mycache-1]; *tc=(*p)->caches[mycache-1];
*mymspace=(*tc)->mymspace; *mymspace=(*tc)->mymspace;
} }
else if (!mycache) else if (!mycache)
{ { /* Need to allocate a new cache */
*tc=AllocCache(*p); *tc=AllocCache(*p);
if (!*tc) if (!*tc)
{ /* Disable */ { /* Disable */
@ -799,12 +806,12 @@ static FORCEINLINE void GetThreadCache(nedpool **p, threadcache **tc, int *mymsp
*mymspace=(*tc)->mymspace; *mymspace=(*tc)->mymspace;
} }
else else
{ { /* Cache disabled, but we do have an assigned thread pool */
*tc=0; *tc=0;
*mymspace=-mycache-1; *mymspace=-mycache-1;
} }
assert(*mymspace>=0); assert(*mymspace>=0);
assert((long)(size_t)CURRENT_THREAD==(*tc)->threadid); assert(!(*tc) || (long)(size_t)CURRENT_THREAD==(*tc)->threadid);
#ifdef FULLSANITYCHECKS #ifdef FULLSANITYCHECKS
if (*tc) if (*tc)
{ {

View file

@ -395,9 +395,9 @@ int32_t A_MoveSprite(int32_t spritenum, const vec3_t *change, uint32_t cliptype)
case 1: case 1:
if (daz >= ActorExtra[spritenum].floorz) if (daz >= ActorExtra[spritenum].floorz)
{ {
if (totalclock > ActorExtra[spritenum].temp_data[9]) if (totalclock > ActorExtra[spritenum].lasttransport)
{ {
ActorExtra[spritenum].temp_data[9] = totalclock + (TICSPERFRAME<<2); ActorExtra[spritenum].lasttransport = totalclock + (TICSPERFRAME<<2);
sprite[spritenum].x += (sprite[OW].x-SX); sprite[spritenum].x += (sprite[OW].x-SX);
sprite[spritenum].y += (sprite[OW].y-SY); sprite[spritenum].y += (sprite[OW].y-SY);
@ -415,9 +415,9 @@ int32_t A_MoveSprite(int32_t spritenum, const vec3_t *change, uint32_t cliptype)
case 2: case 2:
if (daz <= ActorExtra[spritenum].ceilingz) if (daz <= ActorExtra[spritenum].ceilingz)
{ {
if (totalclock > ActorExtra[spritenum].temp_data[9]) if (totalclock > ActorExtra[spritenum].lasttransport)
{ {
ActorExtra[spritenum].temp_data[9] = totalclock + (TICSPERFRAME<<2); ActorExtra[spritenum].lasttransport = totalclock + (TICSPERFRAME<<2);
sprite[spritenum].x += (sprite[OW].x-SX); sprite[spritenum].x += (sprite[OW].x-SX);
sprite[spritenum].y += (sprite[OW].y-SY); sprite[spritenum].y += (sprite[OW].y-SY);
sprite[spritenum].z = sector[sprite[OW].sectnum].floorz - daz + sector[sprite[i].sectnum].ceilingz; sprite[spritenum].z = sector[sprite[OW].sectnum].floorz - daz + sector[sprite[i].sectnum].ceilingz;
@ -3278,7 +3278,7 @@ static void G_MoveTransports(void)
ll = klabs(sprite[j].zvel); ll = klabs(sprite[j].zvel);
if (totalclock > ActorExtra[j].temp_data[9]) if (totalclock > ActorExtra[j].lasttransport)
{ {
warpspriteto = 0; warpspriteto = 0;
if (ll && sectlotag == 2 && sprite[j].z < (sector[sect].ceilingz+ll)) if (ll && sectlotag == 2 && sprite[j].z < (sector[sect].ceilingz+ll))
@ -3386,7 +3386,7 @@ static void G_MoveTransports(void)
} }
break; break;
case 1: case 1:
ActorExtra[j].temp_data[9] = totalclock + (TICSPERFRAME<<2); ActorExtra[j].lasttransport = totalclock + (TICSPERFRAME<<2);
sprite[j].x += (sprite[OW].x-SX); sprite[j].x += (sprite[OW].x-SX);
sprite[j].y += (sprite[OW].y-SY); sprite[j].y += (sprite[OW].y-SY);
@ -3400,7 +3400,7 @@ static void G_MoveTransports(void)
break; break;
case 2: case 2:
ActorExtra[j].temp_data[9] = totalclock + (TICSPERFRAME<<2); ActorExtra[j].lasttransport = totalclock + (TICSPERFRAME<<2);
sprite[j].x += (sprite[OW].x-SX); sprite[j].x += (sprite[OW].x-SX);
sprite[j].y += (sprite[OW].y-SY); sprite[j].y += (sprite[OW].y-SY);
sprite[j].z = sector[sprite[OW].sectnum].floorz; sprite[j].z = sector[sprite[OW].sectnum].floorz;

View file

@ -540,7 +540,7 @@ spriteinterpolate sprpos[MAXSPRITES];
typedef struct { typedef struct {
int32_t bposx,bposy,bposz; int32_t bposx,bposy,bposz;
int32_t floorz,ceilingz,lastvx,lastvy; int32_t floorz,ceilingz,lastvx,lastvy;
int32_t flags; int32_t flags,lasttransport,shootzvel;
intptr_t temp_data[10]; // sometimes used to hold pointers to con code intptr_t temp_data[10]; // sometimes used to hold pointers to con code
int16_t picnum,ang,extra,owner,movflag; int16_t picnum,ang,extra,owner,movflag;
int16_t tempang,actorstayput,dispicnum; int16_t tempang,actorstayput,dispicnum;

View file

@ -4148,8 +4148,8 @@ void G_SE40(int32_t smoothratio)
if (sect != -1) if (sect != -1)
{ {
int32_t renderz, picnum; int32_t renderz, picnum;
int16_t backupstat[numsectors]; int16_t backupstat[MAXSECTORS];
int32_t backupz[numsectors]; int32_t backupz[MAXSECTORS];
int32_t i; int32_t i;
int32_t pix_diff, newz; int32_t pix_diff, newz;
// initprintf("drawing ror\n"); // initprintf("drawing ror\n");

View file

@ -2068,16 +2068,16 @@ static int32_t X_DoExecute(register int32_t once)
if (tw == CON_ZSHOOT || tw == CON_EZSHOOT) if (tw == CON_ZSHOOT || tw == CON_EZSHOOT)
{ {
ActorExtra[vm.g_i].temp_data[9] = Gv_GetVarX(*insptr++); ActorExtra[vm.g_i].shootzvel = Gv_GetVarX(*insptr++);
if (ActorExtra[vm.g_i].temp_data[9] == 0) if (ActorExtra[vm.g_i].shootzvel == 0)
ActorExtra[vm.g_i].temp_data[9] = 1; ActorExtra[vm.g_i].shootzvel = 1;
} }
if ((vm.g_sp->sectnum < 0 || vm.g_sp->sectnum >= numsectors) /* && g_scriptSanityChecks */) if ((vm.g_sp->sectnum < 0 || vm.g_sp->sectnum >= numsectors) /* && g_scriptSanityChecks */)
{ {
OSD_Printf(CON_ERROR "Invalid sector %d\n",g_errorLineNum,keyw[g_tw],vm.g_sp->sectnum); OSD_Printf(CON_ERROR "Invalid sector %d\n",g_errorLineNum,keyw[g_tw],vm.g_sp->sectnum);
insptr++; insptr++;
ActorExtra[vm.g_i].temp_data[9]=0; ActorExtra[vm.g_i].shootzvel=0;
break; break;
} }
@ -2086,7 +2086,7 @@ static int32_t X_DoExecute(register int32_t once)
if (tw == CON_EZSHOOT || tw == CON_ESHOOT) if (tw == CON_EZSHOOT || tw == CON_ESHOOT)
aGameVars[g_iReturnVarID].val.lValue = j; aGameVars[g_iReturnVarID].val.lValue = j;
ActorExtra[vm.g_i].temp_data[9]=0; ActorExtra[vm.g_i].shootzvel=0;
} }
break; break;
@ -2102,23 +2102,23 @@ static int32_t X_DoExecute(register int32_t once)
if (tw == CON_ZSHOOTVAR || tw == CON_EZSHOOTVAR) if (tw == CON_ZSHOOTVAR || tw == CON_EZSHOOTVAR)
{ {
ActorExtra[vm.g_i].temp_data[9] = Gv_GetVarX(*insptr++); ActorExtra[vm.g_i].shootzvel = Gv_GetVarX(*insptr++);
if (ActorExtra[vm.g_i].temp_data[9] == 0) if (ActorExtra[vm.g_i].shootzvel == 0)
ActorExtra[vm.g_i].temp_data[9] = 1; ActorExtra[vm.g_i].shootzvel = 1;
} }
j=Gv_GetVarX(*insptr++); j=Gv_GetVarX(*insptr++);
if ((vm.g_sp->sectnum < 0 || vm.g_sp->sectnum >= numsectors) /* && g_scriptSanityChecks */) if ((vm.g_sp->sectnum < 0 || vm.g_sp->sectnum >= numsectors) /* && g_scriptSanityChecks */)
{ {
OSD_Printf(CON_ERROR "Invalid sector %d\n",g_errorLineNum,keyw[g_tw],vm.g_sp->sectnum); OSD_Printf(CON_ERROR "Invalid sector %d\n",g_errorLineNum,keyw[g_tw],vm.g_sp->sectnum);
ActorExtra[vm.g_i].temp_data[9]=0; ActorExtra[vm.g_i].shootzvel=0;
break; break;
} }
lReturn = A_Shoot(vm.g_i, j); lReturn = A_Shoot(vm.g_i, j);
if (tw == CON_ESHOOTVAR || tw == CON_EZSHOOTVAR) if (tw == CON_ESHOOTVAR || tw == CON_EZSHOOTVAR)
aGameVars[g_iReturnVarID].val.lValue = lReturn; aGameVars[g_iReturnVarID].val.lValue = lReturn;
ActorExtra[vm.g_i].temp_data[9]=0; ActorExtra[vm.g_i].shootzvel=0;
break; break;
} }

View file

@ -460,7 +460,7 @@ int32_t A_Shoot(int32_t i,int32_t atwith)
} }
} }
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9]; if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
hitscan((const vec3_t *)&srcvect,sect, hitscan((const vec3_t *)&srcvect,sect,
sintable[(sa+512)&2047], sintable[(sa+512)&2047],
sintable[sa&2047],zvel<<6, sintable[sa&2047],zvel<<6,
@ -682,7 +682,7 @@ int32_t A_Shoot(int32_t i,int32_t atwith)
if (!g_player[p].ps->auto_aim) if (!g_player[p].ps->auto_aim)
{ {
zvel = (100-g_player[p].ps->horiz-g_player[p].ps->horizoff)<<5; zvel = (100-g_player[p].ps->horiz-g_player[p].ps->horizoff)<<5;
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9]; if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
hitscan((const vec3_t *)&srcvect,sect,sintable[(sa+512)&2047],sintable[sa&2047], hitscan((const vec3_t *)&srcvect,sect,sintable[(sa+512)&2047],sintable[sa&2047],
zvel<<6,&hitinfo,CLIPMASK1); zvel<<6,&hitinfo,CLIPMASK1);
if (hitinfo.hitsprite != -1) if (hitinfo.hitsprite != -1)
@ -734,7 +734,7 @@ int32_t A_Shoot(int32_t i,int32_t atwith)
if (ProjectileData[atwith].cstat >= 0) s->cstat &= ~ProjectileData[atwith].cstat; if (ProjectileData[atwith].cstat >= 0) s->cstat &= ~ProjectileData[atwith].cstat;
else s->cstat &= ~257; else s->cstat &= ~257;
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9]; if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
hitscan((const vec3_t *)&srcvect,sect, hitscan((const vec3_t *)&srcvect,sect,
sintable[(sa+512)&2047], sintable[(sa+512)&2047],
sintable[sa&2047], sintable[sa&2047],
@ -1038,7 +1038,7 @@ DOSKIPBULLETHOLE:
sx+(sintable[(348+sa+512)&2047]/448), sx+(sintable[(348+sa+512)&2047]/448),
sy+(sintable[(sa+348)&2047]/448), sy+(sintable[(sa+348)&2047]/448),
sz-(1<<8),atwith,0,14,14,sa,vel,zvel,i,4);*/ sz-(1<<8),atwith,0,14,14,sa,vel,zvel,i,4);*/
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9]; if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
j = A_InsertSprite(sect, j = A_InsertSprite(sect,
srcvect.x+(sintable[(348+sa+512)&2047]/ProjectileData[atwith].offset), srcvect.x+(sintable[(348+sa+512)&2047]/ProjectileData[atwith].offset),
srcvect.y+(sintable[(sa+348)&2047]/ProjectileData[atwith].offset), srcvect.y+(sintable[(sa+348)&2047]/ProjectileData[atwith].offset),
@ -1115,7 +1115,7 @@ DOSKIPBULLETHOLE:
} }
} }
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9]; if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
hitscan((const vec3_t *)&srcvect,sect, hitscan((const vec3_t *)&srcvect,sect,
sintable[(sa+512)&2047], sintable[(sa+512)&2047],
sintable[sa&2047],zvel<<6, sintable[sa&2047],zvel<<6,
@ -1270,7 +1270,7 @@ DOSKIPBULLETHOLE:
if (!g_player[p].ps->auto_aim) if (!g_player[p].ps->auto_aim)
{ {
zvel = (100-g_player[p].ps->horiz-g_player[p].ps->horizoff)<<5; zvel = (100-g_player[p].ps->horiz-g_player[p].ps->horizoff)<<5;
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9]; if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
hitscan((const vec3_t *)&srcvect,sect,sintable[(sa+512)&2047],sintable[sa&2047], hitscan((const vec3_t *)&srcvect,sect,sintable[(sa+512)&2047],sintable[sa&2047],
zvel<<6,&hitinfo,CLIPMASK1); zvel<<6,&hitinfo,CLIPMASK1);
if (hitinfo.hitsprite != -1) if (hitinfo.hitsprite != -1)
@ -1321,7 +1321,7 @@ DOSKIPBULLETHOLE:
} }
s->cstat &= ~257; s->cstat &= ~257;
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9]; if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
hitscan((const vec3_t *)&srcvect,sect, hitscan((const vec3_t *)&srcvect,sect,
sintable[(sa+512)&2047], sintable[(sa+512)&2047],
sintable[sa&2047], sintable[sa&2047],
@ -1558,7 +1558,7 @@ SKIPBULLETHOLE:
if (hitinfo.pos.x == 0) hitinfo.pos.x++; if (hitinfo.pos.x == 0) hitinfo.pos.x++;
zvel = ((g_player[j].ps->oposz - srcvect.z + (3<<8))*vel) / hitinfo.pos.x; zvel = ((g_player[j].ps->oposz - srcvect.z + (3<<8))*vel) / hitinfo.pos.x;
} }
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9]; if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
oldzvel = zvel; oldzvel = zvel;
if (atwith == SPIT) if (atwith == SPIT)
@ -1681,7 +1681,7 @@ SKIPBULLETHOLE:
if (p >= 0 && j >= 0) if (p >= 0 && j >= 0)
l = j; l = j;
else l = -1; else l = -1;
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9]; if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
j = A_InsertSprite(sect, j = A_InsertSprite(sect,
srcvect.x+(sintable[(348+sa+512)&2047]/448), srcvect.x+(sintable[(348+sa+512)&2047]/448),
srcvect.y+(sintable[(sa+348)&2047]/448), srcvect.y+(sintable[(sa+348)&2047]/448),
@ -1766,7 +1766,7 @@ SKIPBULLETHOLE:
if (p >= 0) if (p >= 0)
zvel = (100-g_player[p].ps->horiz-g_player[p].ps->horizoff)*32; zvel = (100-g_player[p].ps->horiz-g_player[p].ps->horizoff)*32;
else zvel = 0; else zvel = 0;
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9]; if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
srcvect.z -= g_player[p].ps->pyoff; srcvect.z -= g_player[p].ps->pyoff;
hitscan((const vec3_t *)&srcvect,sect, hitscan((const vec3_t *)&srcvect,sect,
@ -1830,7 +1830,7 @@ SKIPBULLETHOLE:
if (zvel < -4096) if (zvel < -4096)
zvel = -2048; zvel = -2048;
vel = x>>4; vel = x>>4;
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9]; if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
A_InsertSprite(sect, A_InsertSprite(sect,
srcvect.x+(sintable[(512+sa+512)&2047]>>8), srcvect.x+(sintable[(512+sa+512)&2047]>>8),
srcvect.y+(sintable[(sa+512)&2047]>>8), srcvect.y+(sintable[(sa+512)&2047]>>8),
@ -1892,7 +1892,7 @@ SKIPBULLETHOLE:
if (sect < 0) break; if (sect < 0) break;
s->cstat &= ~257; s->cstat &= ~257;
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9]; if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
hitscan((const vec3_t *)&srcvect,sect, hitscan((const vec3_t *)&srcvect,sect,
sintable[(sa+512)&2047], sintable[(sa+512)&2047],
sintable[sa&2047], sintable[sa&2047],
@ -1971,7 +1971,7 @@ SKIPBULLETHOLE:
zvel = ((g_player[j].ps->oposz-srcvect.z)*512) / l ; zvel = ((g_player[j].ps->oposz-srcvect.z)*512) / l ;
} }
else zvel = 0; else zvel = 0;
if (ActorExtra[i].temp_data[9]) zvel = ActorExtra[i].temp_data[9]; if (ActorExtra[i].shootzvel) zvel = ActorExtra[i].shootzvel;
j = A_InsertSprite(sect, j = A_InsertSprite(sect,
srcvect.x+(sintable[(512+sa+512)&2047]>>12), srcvect.x+(sintable[(512+sa+512)&2047]>>12),
srcvect.y+(sintable[(sa+512)&2047]>>12), srcvect.y+(sintable[(sa+512)&2047]>>12),