mirror of
https://github.com/ZDoom/raze-gles.git
synced 2025-01-11 10:40:47 +00:00
update nedmalloc to r1116
git-svn-id: https://svn.eduke32.com/eduke32@1509 1a8010ca-5511-0410-912e-c29ae57300e0
This commit is contained in:
parent
5ea5f4bc9b
commit
dcd7c3c9fe
4 changed files with 1275 additions and 942 deletions
|
@ -35,7 +35,9 @@
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#define REPLACE_SYSTEM_ALLOCATOR
|
||||
#define USE_ALLOCATOR 1
|
||||
#define REPLACE_SYSTEM_ALLOCATOR 1
|
||||
#define USE_MAGIC_HEADERS 1
|
||||
#include "nedmalloc.h"
|
||||
|
||||
#ifndef TRUE
|
||||
|
|
|
@ -375,7 +375,18 @@ malloc_getpagesize default: derive from system includes, or 4096.
|
|||
memory from the system in page-size units. This may be (and
|
||||
usually is) a function rather than a constant. This is ignored
|
||||
if WIN32, where page size is determined using getSystemInfo during
|
||||
initialization.
|
||||
initialization. This may be several megabytes if ENABLE_LARGE_PAGES
|
||||
is enabled.
|
||||
|
||||
ENABLE_LARGE_PAGES default: NOT defined
|
||||
Causes the system page size to be the value of GetLargePageMinimum()
|
||||
if that function is available (Windows Server 2003/Vista or later).
|
||||
This allows the use of large page entries in the MMU which can
|
||||
significantly improve performance in large working set applications
|
||||
as TLB cache load is reduced by a factor of three. Note that enabling
|
||||
this option is equal to locking the process' memory in current
|
||||
implementations of Windows and requires the SE_LOCK_MEMORY_PRIVILEGE
|
||||
to be held by the process in order to succeed.
|
||||
|
||||
USE_DEV_RANDOM default: 0 (i.e., not used)
|
||||
Causes malloc to use /dev/random to initialize secure magic seed for
|
||||
|
@ -405,6 +416,7 @@ LACKS_STDLIB_H default: NOT defined unless on WIN32
|
|||
|
||||
DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,
|
||||
system_info.dwAllocationGranularity in WIN32,
|
||||
GetLargePageMinimum() if ENABLE_LARGE_PAGES,
|
||||
otherwise 64K.
|
||||
Also settable using mallopt(M_GRANULARITY, x)
|
||||
The unit for allocating and deallocating memory from the system. On
|
||||
|
@ -418,6 +430,15 @@ DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,
|
|||
versions of malloc, the equivalent of this option was called
|
||||
"TOP_PAD")
|
||||
|
||||
DEFAULT_GRANULARITY_ALIGNED default: undefined (which means page size)
|
||||
Whether to enforce alignment when allocating and deallocating memory
|
||||
from the system i.e. the base address of all allocations will be
|
||||
aligned to DEFAULT_GRANULARITY if it is set. Note that enabling this carries
|
||||
some overhead as multiple calls must now be made when probing for a valid
|
||||
aligned value, however it does greatly ease the checking for whether
|
||||
a given memory pointer was allocated by this allocator rather than
|
||||
some other.
|
||||
|
||||
DEFAULT_TRIM_THRESHOLD default: 2MB
|
||||
Also settable using mallopt(M_TRIM_THRESHOLD, x)
|
||||
The maximum amount of unused top-most memory to keep before
|
||||
|
@ -497,6 +518,7 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
|
|||
#ifdef WIN32
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#include <tchar.h>
|
||||
#define HAVE_MMAP 1
|
||||
#define HAVE_MORECORE 0
|
||||
#define LACKS_UNISTD_H
|
||||
|
@ -1262,7 +1284,7 @@ int mspace_mallopt(int, int);
|
|||
#endif /* MSPACES */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; /* end of extern "C" */
|
||||
} /* end of extern "C" */
|
||||
#endif /* __cplusplus */
|
||||
|
||||
/*
|
||||
|
@ -1277,10 +1299,8 @@ int mspace_mallopt(int, int);
|
|||
|
||||
/*------------------------------ internal #includes ---------------------- */
|
||||
|
||||
#ifdef WIN32
|
||||
#ifndef __GNUC__
|
||||
#if defined(WIN32) && defined(_MSC_VER)
|
||||
#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
|
||||
#endif
|
||||
#endif /* WIN32 */
|
||||
|
||||
#include <stdio.h> /* for printing in malloc_stats */
|
||||
|
@ -1516,6 +1536,29 @@ unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
|
|||
((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
|
||||
((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
|
||||
|
||||
/*
|
||||
malloc_params holds global properties, including those that can be
|
||||
dynamically set using mallopt. There is a single instance, mparams,
|
||||
initialized in init_mparams. Note that the non-zeroness of "magic"
|
||||
also serves as an initialization flag.
|
||||
*/
|
||||
|
||||
typedef unsigned int flag_t; /* The type of various bit flag sets */
|
||||
|
||||
struct malloc_params {
|
||||
volatile size_t magic;
|
||||
size_t page_size;
|
||||
size_t granularity;
|
||||
size_t mmap_threshold;
|
||||
size_t trim_threshold;
|
||||
flag_t default_mflags;
|
||||
};
|
||||
|
||||
static struct malloc_params mparams;
|
||||
|
||||
/* Ensure mparams initialized */
|
||||
#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
|
||||
|
||||
/* -------------------------- MMAP preliminaries ------------------------- */
|
||||
|
||||
/*
|
||||
|
@ -1532,14 +1575,41 @@ unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
|
|||
#if HAVE_MMAP
|
||||
|
||||
#ifndef WIN32
|
||||
#define MUNMAP_DEFAULT(a, s) munmap((a), (s))
|
||||
#define MMAP_PROT (PROT_READ|PROT_WRITE)
|
||||
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
|
||||
#define MAP_ANONYMOUS MAP_ANON
|
||||
#endif /* MAP_ANON */
|
||||
#ifdef DEFAULT_GRANULARITY_ALIGNED
|
||||
#define MMAP_IMPL mmap_aligned
|
||||
static void* lastAlignedmmap; /* Used as a hint */
|
||||
static void* mmap_aligned(void *start, size_t length, int prot, int flags, int fd, off_t offset) {
|
||||
void* baseaddress = 0;
|
||||
void* ptr = 0;
|
||||
if(!start) {
|
||||
baseaddress = lastAlignedmmap;
|
||||
for(;;) {
|
||||
if(baseaddress) flags|=MAP_FIXED;
|
||||
ptr = mmap(baseaddress, length, prot, flags, fd, offset);
|
||||
if(!ptr)
|
||||
baseaddress = (void*)((size_t)baseaddress + mparams.granularity);
|
||||
else if((size_t)ptr & (mparams.granularity - SIZE_T_ONE)) {
|
||||
munmap(ptr, length);
|
||||
baseaddress = (void*)(((size_t)ptr + mparams.granularity) & ~(mparams.granularity - SIZE_T_ONE));
|
||||
}
|
||||
else break;
|
||||
}
|
||||
}
|
||||
else ptr = mmap(start, length, prot, flags, fd, offset);
|
||||
if(ptr) lastAlignedmmap = (void*)((size_t) ptr + mparams.granularity);
|
||||
return ptr;
|
||||
}
|
||||
#else
|
||||
#define MMAP_IMPL mmap
|
||||
#endif /* DEFAULT_GRANULARITY_ALIGNED */
|
||||
#define MUNMAP_DEFAULT(a, s) munmap((a), (s))
|
||||
#define MMAP_PROT (PROT_READ|PROT_WRITE)
|
||||
#ifdef MAP_ANONYMOUS
|
||||
#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
|
||||
#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
|
||||
#define MMAP_DEFAULT(s) MMAP_IMPL(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
|
||||
#else /* MAP_ANONYMOUS */
|
||||
/*
|
||||
Nearly all versions of mmap support MAP_ANONYMOUS, so the following
|
||||
|
@ -1549,8 +1619,8 @@ unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
|
|||
static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
|
||||
#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
|
||||
(dev_zero_fd = open("/dev/zero", O_RDWR), \
|
||||
mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
|
||||
mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
|
||||
MMAP_IMPL(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
|
||||
MMAP_IMPL(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
|
||||
#endif /* MAP_ANONYMOUS */
|
||||
|
||||
#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
|
||||
|
@ -1558,8 +1628,51 @@ static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
|
|||
#else /* WIN32 */
|
||||
|
||||
/* Win32 MMAP via VirtualAlloc */
|
||||
#ifdef DEFAULT_GRANULARITY_ALIGNED
|
||||
static void* lastWin32mmap; /* Used as a hint */
|
||||
#endif /* DEFAULT_GRANULARITY_ALIGNED */
|
||||
#ifdef ENABLE_LARGE_PAGES
|
||||
static int largepagesavailable = 1;
|
||||
#endif /* ENABLE_LARGE_PAGES */
|
||||
static FORCEINLINE void* win32mmap(size_t size) {
|
||||
void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
|
||||
void* baseaddress = 0;
|
||||
void* ptr = 0;
|
||||
#ifdef ENABLE_LARGE_PAGES
|
||||
/* Note that large pages are *always* allocated on a large page boundary.
|
||||
If however granularity is small then don't waste a kernel call if size
|
||||
isn't around the size of a large page */
|
||||
if(largepagesavailable && size >= 1*1024*1024) {
|
||||
ptr = VirtualAlloc(baseaddress, size, MEM_RESERVE|MEM_COMMIT|MEM_LARGE_PAGES, PAGE_READWRITE);
|
||||
if(!ptr && ERROR_PRIVILEGE_NOT_HELD==GetLastError()) largepagesavailable=0;
|
||||
}
|
||||
#endif
|
||||
if(!ptr) {
|
||||
#ifdef DEFAULT_GRANULARITY_ALIGNED
|
||||
/* We try to avoid overhead by speculatively reserving at aligned
|
||||
addresses until we succeed */
|
||||
baseaddress = lastWin32mmap;
|
||||
for(;;) {
|
||||
void* reserveaddr = VirtualAlloc(baseaddress, size, MEM_RESERVE, PAGE_READWRITE);
|
||||
if(!reserveaddr)
|
||||
baseaddress = (void*)((size_t)baseaddress + mparams.granularity);
|
||||
else if((size_t)reserveaddr & (mparams.granularity - SIZE_T_ONE)) {
|
||||
VirtualFree(reserveaddr, 0, MEM_RELEASE);
|
||||
baseaddress = (void*)(((size_t)reserveaddr + mparams.granularity) & ~(mparams.granularity - SIZE_T_ONE));
|
||||
}
|
||||
else break;
|
||||
}
|
||||
#endif
|
||||
if(!ptr) ptr = VirtualAlloc(baseaddress, size, baseaddress ? MEM_COMMIT : MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
|
||||
#if DEBUG
|
||||
if(lastWin32mmap && ptr!=lastWin32mmap) printf("Non-contiguous VirtualAlloc between %p and %p\n", ptr, lastWin32mmap);
|
||||
#endif
|
||||
#ifdef DEFAULT_GRANULARITY_ALIGNED
|
||||
if(ptr) lastWin32mmap = (void*)((size_t) ptr + mparams.granularity);
|
||||
#endif
|
||||
}
|
||||
#if DEBUG
|
||||
printf("VirtualAlloc returns %p size %u\n", ptr, size);
|
||||
#endif
|
||||
return (ptr != 0)? ptr: MFAIL;
|
||||
}
|
||||
|
||||
|
@ -1757,7 +1870,7 @@ static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
|
|||
#endif /* solaris */
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) {
|
||||
volatile unsigned int* lp = &sl->l;
|
||||
|
@ -1823,15 +1936,14 @@ static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) {
|
|||
int spins = 0;
|
||||
for (;;) {
|
||||
if (sl->l != 0) {
|
||||
if (sl->threadid == (signed)CURRENT_THREAD) {
|
||||
if (sl->threadid == CURRENT_THREAD) {
|
||||
++sl->c;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (!interlockedexchange(&sl->l, 1)) {
|
||||
assert(!sl->1855
|
||||
);
|
||||
assert(!sl->threadid);
|
||||
sl->threadid = CURRENT_THREAD;
|
||||
sl->c = 1;
|
||||
return 0;
|
||||
|
@ -1852,8 +1964,8 @@ static FORCEINLINE void win32_release_lock (MLOCK_T *sl) {
|
|||
}
|
||||
|
||||
static FORCEINLINE int win32_try_lock (MLOCK_T *sl) {
|
||||
if(sl->l != 0) {
|
||||
if (sl->threadid == (signed)CURRENT_THREAD) {
|
||||
if (sl->l != 0) {
|
||||
if (sl->threadid == CURRENT_THREAD) {
|
||||
++sl->c;
|
||||
return 1;
|
||||
}
|
||||
|
@ -2120,7 +2232,6 @@ typedef struct malloc_chunk* mchunkptr;
|
|||
typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
|
||||
typedef unsigned int bindex_t; /* Described below */
|
||||
typedef unsigned int binmap_t; /* Described below */
|
||||
typedef unsigned int flag_t; /* The type of various bit flag sets */
|
||||
|
||||
/* ------------------- Chunks sizes and alignments ----------------------- */
|
||||
|
||||
|
@ -2513,10 +2624,10 @@ struct malloc_state {
|
|||
size_t footprint;
|
||||
size_t max_footprint;
|
||||
flag_t mflags;
|
||||
msegment seg;
|
||||
#if USE_LOCKS
|
||||
MLOCK_T mutex; /* locate lock among fields that rarely change */
|
||||
#endif /* USE_LOCKS */
|
||||
msegment seg;
|
||||
void* extp; /* Unused but available for extensions */
|
||||
size_t exts;
|
||||
};
|
||||
|
@ -2525,27 +2636,6 @@ typedef struct malloc_state* mstate;
|
|||
|
||||
/* ------------- Global malloc_state and malloc_params ------------------- */
|
||||
|
||||
/*
|
||||
malloc_params holds global properties, including those that can be
|
||||
dynamically set using mallopt. There is a single instance, mparams,
|
||||
initialized in init_mparams. Note that the non-zeroness of "magic"
|
||||
also serves as an initialization flag.
|
||||
*/
|
||||
|
||||
struct malloc_params {
|
||||
volatile size_t magic;
|
||||
size_t page_size;
|
||||
size_t granularity;
|
||||
size_t mmap_threshold;
|
||||
size_t trim_threshold;
|
||||
flag_t default_mflags;
|
||||
};
|
||||
|
||||
static struct malloc_params mparams;
|
||||
|
||||
/* Ensure mparams initialized */
|
||||
#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
|
||||
|
||||
#if !ONLY_MSPACES
|
||||
|
||||
/* The global malloc_state used for all non-"mspace" calls */
|
||||
|
@ -2734,7 +2824,7 @@ static size_t traverse_and_check(mstate m);
|
|||
/* ---------------------------- Indexing Bins ---------------------------- */
|
||||
|
||||
#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
|
||||
#define small_index(s) ((s) >> SMALLBIN_SHIFT)
|
||||
#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
|
||||
#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
|
||||
#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
|
||||
|
||||
|
@ -2782,7 +2872,7 @@ static size_t traverse_and_check(mstate m);
|
|||
I = NTREEBINS-1;\
|
||||
else {\
|
||||
unsigned int K;\
|
||||
_BitScanReverse((DWORD *) &K, X);\
|
||||
_BitScanReverse((DWORD *) &K, (DWORD) X);\
|
||||
I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
|
||||
}\
|
||||
}
|
||||
|
@ -3003,6 +3093,10 @@ static size_t traverse_and_check(mstate m);
|
|||
|
||||
/* ---------------------------- setting mparams -------------------------- */
|
||||
|
||||
#ifdef ENABLE_LARGE_PAGES
|
||||
typedef size_t (WINAPI *GetLargePageMinimum_t)(void);
|
||||
#endif
|
||||
|
||||
/* Initialize mparams */
|
||||
static int init_mparams(void) {
|
||||
#ifdef NEED_GLOBAL_LOCK_INIT
|
||||
|
@ -3026,6 +3120,20 @@ static int init_mparams(void) {
|
|||
psize = system_info.dwPageSize;
|
||||
gsize = ((DEFAULT_GRANULARITY != 0)?
|
||||
DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
|
||||
#ifdef ENABLE_LARGE_PAGES
|
||||
{
|
||||
GetLargePageMinimum_t GetLargePageMinimum_ = (GetLargePageMinimum_t) GetProcAddress(GetModuleHandle(__T("kernel32.dll")), "GetLargePageMinimum");
|
||||
if(GetLargePageMinimum_) {
|
||||
size_t largepagesize = GetLargePageMinimum_();
|
||||
if(largepagesize) {
|
||||
psize = largepagesize;
|
||||
gsize = ((DEFAULT_GRANULARITY != 0)?
|
||||
DEFAULT_GRANULARITY : largepagesize);
|
||||
if(gsize < largepagesize) gsize = largepagesize;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif /* WIN32 */
|
||||
|
||||
|
@ -5465,6 +5573,7 @@ int mspace_mallopt(int param_number, int value) {
|
|||
|
||||
#endif /* MSPACES */
|
||||
|
||||
|
||||
/* -------------------- Alternative MORECORE functions ------------------- */
|
||||
|
||||
/*
|
||||
|
@ -5753,4 +5862,3 @@ History:
|
|||
|
||||
*/
|
||||
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* nedalloc, an alternative malloc implementation for multiple threads without
|
||||
lock contention based on dlmalloc v2.8.3. (C) 2005 Niall Douglas
|
||||
lock contention based on dlmalloc v2.8.3. (C) 2005-2009 Niall Douglas
|
||||
|
||||
Boost Software License - Version 1.0 - August 17th, 2003
|
||||
|
||||
|
@ -29,8 +29,6 @@ DEALINGS IN THE SOFTWARE.
|
|||
#ifndef NEDMALLOC_H
|
||||
#define NEDMALLOC_H
|
||||
|
||||
#define THREADCACHEMAX 65536
|
||||
#define THREADCACHEMAXFREESPACE (1024*1024*4)
|
||||
|
||||
/* See malloc.c.h for what each function does.
|
||||
|
||||
|
@ -40,19 +38,34 @@ free etc. instead of nedmalloc, nedfree etc. You may or may not want this.
|
|||
NO_NED_NAMESPACE prevents the functions from being defined in the nedalloc
|
||||
namespace when in C++ (uses the global namespace instead).
|
||||
|
||||
EXTSPEC can be defined to be __declspec(dllexport) or
|
||||
NEDMALLOCEXTSPEC can be defined to be __declspec(dllexport) or
|
||||
__attribute__ ((visibility("default"))) or whatever you like. It defaults
|
||||
to extern.
|
||||
to extern unless NEDMALLOC_DLL_EXPORTS is set as it would be when building
|
||||
nedmalloc.dll.
|
||||
|
||||
USE_LOCKS can be 2 if you want to define your own MLOCK_T, INITIAL_LOCK,
|
||||
ACQUIRE_LOCK, RELEASE_LOCK, TRY_LOCK, IS_LOCKED and NULL_LOCK_INITIALIZER.
|
||||
|
||||
USE_MAGIC_HEADERS causes nedalloc to allocate an extra three sizeof(size_t)
|
||||
to each block. nedpfree() and nedprealloc() can then automagically know when
|
||||
to free a system allocated block. Enabling this typically adds 20-50% to
|
||||
application memory usage.
|
||||
|
||||
USE_ALLOCATOR can be one of these settings:
|
||||
0: System allocator (nedmalloc now simply acts as a threadcache).
|
||||
WARNING: Intended for DEBUG USE ONLY - not all functions work correctly.
|
||||
1: dlmalloc
|
||||
|
||||
*/
|
||||
|
||||
#include <stddef.h> /* for size_t */
|
||||
|
||||
#ifndef EXTSPEC
|
||||
#define EXTSPEC extern
|
||||
#ifndef NEDMALLOCEXTSPEC
|
||||
#ifdef NEDMALLOC_DLL_EXPORTS
|
||||
#define NEDMALLOCEXTSPEC extern __declspec(dllexport)
|
||||
#else
|
||||
#define NEDMALLOCEXTSPEC extern
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER>=1400
|
||||
|
@ -65,7 +78,23 @@ ACQUIRE_LOCK, RELEASE_LOCK, TRY_LOCK, IS_LOCKED and NULL_LOCK_INITIALIZER.
|
|||
#define NEDMALLOCPTRATTR
|
||||
#endif
|
||||
|
||||
#ifndef USE_MAGIC_HEADERS
|
||||
#define USE_MAGIC_HEADERS 0
|
||||
#endif
|
||||
|
||||
#ifndef USE_ALLOCATOR
|
||||
#define USE_ALLOCATOR 1 /* dlmalloc */
|
||||
#endif
|
||||
|
||||
#if !USE_ALLOCATOR && !USE_MAGIC_HEADERS
|
||||
#error If you are using the system allocator then you MUST use magic headers
|
||||
#endif
|
||||
|
||||
#ifdef REPLACE_SYSTEM_ALLOCATOR
|
||||
#if USE_ALLOCATOR==0
|
||||
#error Cannot combine using the system allocator with replacing the system allocator
|
||||
#endif
|
||||
#ifndef WIN32 /* We have a dedidicated patcher for Windows */
|
||||
#define nedmalloc malloc
|
||||
#define nedcalloc calloc
|
||||
#define nedrealloc realloc
|
||||
|
@ -81,16 +110,12 @@ ACQUIRE_LOCK, RELEASE_LOCK, TRY_LOCK, IS_LOCKED and NULL_LOCK_INITIALIZER.
|
|||
#ifdef _MSC_VER
|
||||
#define nedblksize _msize
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef _MSC_VER
|
||||
#ifndef UNREFERENCED_PARAMETER
|
||||
#define UNREFERENCED_PARAMETER(x) x=x
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef NO_MALLINFO
|
||||
#define NO_MALLINFO 0
|
||||
#define NO_MALLINFO 0
|
||||
#endif
|
||||
|
||||
#if !NO_MALLINFO
|
||||
|
@ -117,33 +142,36 @@ extern "C" {
|
|||
/* These are the global functions */
|
||||
|
||||
/* Gets the usable size of an allocated block. Note this will always be bigger than what was
|
||||
asked for due to rounding etc.
|
||||
asked for due to rounding etc. Tries to return zero if this is not a nedmalloc block (though
|
||||
one could see a segfault up to 6.25% of the time). On Win32 SEH is used to guarantee that a
|
||||
segfault never happens.
|
||||
*/
|
||||
EXTSPEC size_t nedblksize(void *mem) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC size_t nedblksize(void *mem) THROWSPEC;
|
||||
|
||||
EXTSPEC void nedsetvalue(void *v) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC void nedsetvalue(void *v) THROWSPEC;
|
||||
|
||||
EXTSPEC NEDMALLOCPTRATTR void * nedmalloc(size_t size) THROWSPEC;
|
||||
EXTSPEC NEDMALLOCPTRATTR void * nedcalloc(size_t no, size_t size) THROWSPEC;
|
||||
EXTSPEC NEDMALLOCPTRATTR void * nedrealloc(void *mem, size_t size) THROWSPEC;
|
||||
EXTSPEC void nedfree(void *mem) THROWSPEC;
|
||||
EXTSPEC NEDMALLOCPTRATTR void * nedmemalign(size_t alignment, size_t bytes) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC NEDMALLOCPTRATTR void * nedmalloc(size_t size) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC NEDMALLOCPTRATTR void * nedcalloc(size_t no, size_t size) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC NEDMALLOCPTRATTR void * nedrealloc(void *mem, size_t size) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC void nedfree(void *mem) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC NEDMALLOCPTRATTR void * nedmemalign(size_t alignment, size_t bytes) THROWSPEC;
|
||||
#if !NO_MALLINFO
|
||||
EXTSPEC struct mallinfo nedmallinfo(void) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC struct mallinfo nedmallinfo(void) THROWSPEC;
|
||||
#endif
|
||||
EXTSPEC int nedmallopt(int parno, int value) THROWSPEC;
|
||||
EXTSPEC int nedmalloc_trim(size_t pad) THROWSPEC;
|
||||
EXTSPEC void nedmalloc_stats(void) THROWSPEC;
|
||||
EXTSPEC size_t nedmalloc_footprint(void) THROWSPEC;
|
||||
EXTSPEC NEDMALLOCPTRATTR void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC;
|
||||
EXTSPEC NEDMALLOCPTRATTR void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC int nedmallopt(int parno, int value) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC void* nedmalloc_internals(size_t *granularity, size_t *magic) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC int nedmalloc_trim(size_t pad) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC void nedmalloc_stats(void) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC size_t nedmalloc_footprint(void) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC NEDMALLOCPTRATTR void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC NEDMALLOCPTRATTR void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC;
|
||||
|
||||
/* Destroys the system memory pool used by the functions above.
|
||||
Useful for when you have nedmalloc in a DLL you're about to unload.
|
||||
If you call ANY nedmalloc functions after calling this you will
|
||||
get a fatal exception!
|
||||
*/
|
||||
EXTSPEC void neddestroysyspool(void) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC void neddestroysyspool() THROWSPEC;
|
||||
|
||||
/* These are the pool functions */
|
||||
struct nedpool_t;
|
||||
|
@ -156,52 +184,50 @@ will *normally* be accessing the pool concurrently. Setting this to zero means i
|
|||
extends on demand, but be careful of this as it can rapidly consume system resources
|
||||
where bursts of concurrent threads use a pool at once.
|
||||
*/
|
||||
EXTSPEC NEDMALLOCPTRATTR nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC NEDMALLOCPTRATTR nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC;
|
||||
|
||||
/* Destroys a memory pool previously created by nedcreatepool().
|
||||
*/
|
||||
EXTSPEC void neddestroypool(nedpool *p) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC void neddestroypool(nedpool *p) THROWSPEC;
|
||||
|
||||
/* Sets a value to be associated with a pool. You can retrieve this value by passing
|
||||
any memory block allocated from that pool.
|
||||
*/
|
||||
EXTSPEC void nedpsetvalue(nedpool *p, void *v) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC void nedpsetvalue(nedpool *p, void *v) THROWSPEC;
|
||||
/* Gets a previously set value using nedpsetvalue() or zero if memory is unknown.
|
||||
Optionally can also retrieve pool.
|
||||
*/
|
||||
EXTSPEC void *nedgetvalue(nedpool **p, void *mem) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC void *nedgetvalue(nedpool **p, void *mem) THROWSPEC;
|
||||
|
||||
/* Trims the thread cache for the calling thread, returning any existing cache
|
||||
data to the central pool. Remember to ALWAYS call with zero if you used the
|
||||
system pool. Setting disable to non-zero replicates neddisablethreadcache().
|
||||
*/
|
||||
EXTSPEC void nedtrimthreadcache(nedpool *p, int disable) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC void nedtrimthreadcache(nedpool *p, int disable) THROWSPEC;
|
||||
|
||||
/* Disables the thread cache for the calling thread, returning any existing cache
|
||||
data to the central pool. Remember to ALWAYS call with zero if you used the
|
||||
system pool.
|
||||
*/
|
||||
EXTSPEC void neddisablethreadcache(nedpool *p) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC void neddisablethreadcache(nedpool *p) THROWSPEC;
|
||||
|
||||
EXTSPEC NEDMALLOCPTRATTR void * nedpmalloc(nedpool *p, size_t size) THROWSPEC;
|
||||
EXTSPEC NEDMALLOCPTRATTR void * nedpcalloc(nedpool *p, size_t no, size_t size) THROWSPEC;
|
||||
EXTSPEC NEDMALLOCPTRATTR void * nedprealloc(nedpool *p, void *mem, size_t size) THROWSPEC;
|
||||
EXTSPEC void nedpfree(nedpool *p, void *mem) THROWSPEC;
|
||||
EXTSPEC NEDMALLOCPTRATTR void * nedpmemalign(nedpool *p, size_t alignment, size_t bytes) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC NEDMALLOCPTRATTR void * nedpmalloc(nedpool *p, size_t size) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC NEDMALLOCPTRATTR void * nedpcalloc(nedpool *p, size_t no, size_t size) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC NEDMALLOCPTRATTR void * nedprealloc(nedpool *p, void *mem, size_t size) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC void nedpfree(nedpool *p, void *mem) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC NEDMALLOCPTRATTR void * nedpmemalign(nedpool *p, size_t alignment, size_t bytes) THROWSPEC;
|
||||
#if !NO_MALLINFO
|
||||
EXTSPEC struct mallinfo nedpmallinfo(nedpool *p) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC struct mallinfo nedpmallinfo(nedpool *p) THROWSPEC;
|
||||
#endif
|
||||
EXTSPEC int nedpmallopt(nedpool *p, int parno, int value) THROWSPEC;
|
||||
EXTSPEC int nedpmalloc_trim(nedpool *p, size_t pad) THROWSPEC;
|
||||
EXTSPEC void nedpmalloc_stats(nedpool *p) THROWSPEC;
|
||||
EXTSPEC size_t nedpmalloc_footprint(nedpool *p) THROWSPEC;
|
||||
EXTSPEC NEDMALLOCPTRATTR void **nedpindependent_calloc(nedpool *p, size_t elemsno, size_t elemsize, void **chunks) THROWSPEC;
|
||||
EXTSPEC NEDMALLOCPTRATTR void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void **chunks) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC int nedpmallopt(nedpool *p, int parno, int value) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC int nedpmalloc_trim(nedpool *p, size_t pad) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC void nedpmalloc_stats(nedpool *p) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC size_t nedpmalloc_footprint(nedpool *p) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC NEDMALLOCPTRATTR void **nedpindependent_calloc(nedpool *p, size_t elemsno, size_t elemsize, void **chunks) THROWSPEC;
|
||||
NEDMALLOCEXTSPEC NEDMALLOCPTRATTR void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void **chunks) THROWSPEC;
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#undef EXTSPEC
|
||||
|
||||
#endif
|
||||
|
|
File diff suppressed because it is too large
Load diff
Loading…
Reference in a new issue