1997-03-03 19:43:25 +00:00
|
|
|
/* Zone memory management. -*- Mode: ObjC -*-
|
|
|
|
Copyright (C) 1997 Free Software Foundation, Inc.
|
|
|
|
|
1997-05-03 18:15:44 +00:00
|
|
|
Written by: Yoo C. Chung <wacko@laplace.snu.ac.kr>
|
1997-03-03 19:43:25 +00:00
|
|
|
Date: January 1997
|
|
|
|
|
1996-05-12 00:56:10 +00:00
|
|
|
This file is part of the GNUstep Base Library.
|
1996-03-22 01:26:22 +00:00
|
|
|
|
|
|
|
This library is free software; you can redistribute it and/or
|
1997-09-01 21:59:51 +00:00
|
|
|
modify it under the terms of the GNU Library General Public License
|
|
|
|
as published by the Free Software Foundation; either version 2 of
|
|
|
|
the License, or (at your option) any later version.
|
1997-03-03 19:43:25 +00:00
|
|
|
|
1997-09-01 21:59:51 +00:00
|
|
|
This library is distributed in the hope that it will be useful, but
|
|
|
|
WITHOUT ANY WARRANTY; without even the implied warranty of
|
1996-03-22 01:26:22 +00:00
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
Library General Public License for more details.
|
1997-03-03 19:43:25 +00:00
|
|
|
|
1996-03-22 01:26:22 +00:00
|
|
|
You should have received a copy of the GNU Library General Public
|
1997-09-01 21:59:51 +00:00
|
|
|
License along with this library; if not, write to the Free Software
|
|
|
|
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
|
1997-03-03 19:43:25 +00:00
|
|
|
|
|
|
|
/* Design goals:
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
- Allocation and deallocation should be reasonably efficient.
|
1997-03-03 19:43:25 +00:00
|
|
|
|
1997-05-03 17:28:54 +00:00
|
|
|
- Finding the zone containing a given pointer should be reasonably
|
1997-03-03 19:56:37 +00:00
|
|
|
efficient, since objects in Objective-C use that information to
|
1997-03-03 19:43:25 +00:00
|
|
|
deallocate themselves. */
|
|
|
|
|
|
|
|
|
|
|
|
/* Actual design:
|
|
|
|
|
|
|
|
- All memory chunks allocated in a zone is preceded by a pointer to
|
|
|
|
the zone. This makes locating the zone containing the memory chunk
|
1997-03-03 19:56:37 +00:00
|
|
|
extremely fast. However, this creates an additional 4 byte
|
1997-03-03 19:43:25 +00:00
|
|
|
overhead for 32 bit machines (8 bytes on 64 bit machines!).
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
- The default zone uses objc_malloc() and friends. We assume that
|
|
|
|
they're thread safe and that they return NULL if we're out of
|
1997-03-03 19:58:17 +00:00
|
|
|
memory (they currently don't, unfortunately, so this is a FIXME).
|
1997-05-03 18:15:44 +00:00
|
|
|
We also need to prepend a zone pointer. And because of this, we
|
|
|
|
need to waste even more space to satisfy alignment requirements.
|
1997-03-03 19:56:37 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
- For freeable zones, a small linear buffer is used for
|
|
|
|
deallocating and allocating. Anything that can't go into the
|
|
|
|
buffer then uses a more general purpose segregated fit algorithm
|
|
|
|
after flushing the buffer.
|
1996-03-22 01:26:22 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
- For memory chunks in freeable zones, the pointer to the zone is
|
|
|
|
preceded by the size, which also contains other information for
|
|
|
|
boundary tags. This adds 4 bytes for freeable zones, for a total
|
1997-05-03 17:28:54 +00:00
|
|
|
of a minimum of 8 byte overhead for every memory chunk in the zone
|
|
|
|
(assuming we're on a 32 bit machine).
|
1997-03-03 19:43:25 +00:00
|
|
|
|
|
|
|
- For nonfreeable zones, worst-like fit is used. This is OK since
|
|
|
|
we don't have to worry about memory fragmentation. */
|
|
|
|
|
|
|
|
/* Other information:
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
- This uses some GCC specific extensions. But since the library is
|
1997-03-03 19:58:17 +00:00
|
|
|
supposed to compile on GCC 2.7.2.1 (patched) or higher, and the
|
|
|
|
only other Objective-C compiler I know of (other than NeXT's, which
|
|
|
|
is based on GCC as far as I know) is the StepStone compiler, which
|
|
|
|
I haven't the foggiest idea why anyone would prefer it to GCC ;),
|
|
|
|
it should be OK.
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-05-03 17:28:54 +00:00
|
|
|
- We cannot interchangeably use malloc() and friends (or
|
|
|
|
objc_malloc() and friends) for memory allocated from zones if we
|
|
|
|
want a fast NSZoneFromPointer(), since we would have to search all
|
|
|
|
the zones to see if they contained the pointer. We could
|
|
|
|
accomplish this if we abandon the current scheme of finding zone
|
|
|
|
pointers and use a centralized table, which would also probably
|
|
|
|
save space, though it would be slower.
|
|
|
|
|
|
|
|
- If a garbage collecting malloc is used for objc_malloc(), then
|
|
|
|
that garbage collector must be able to mark from interior pointers,
|
|
|
|
since the actual memory returned to the user in the default zone is
|
|
|
|
offset from the memory returned from objc_malloc().
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
- These functions should be thread safe, but I haven't really
|
|
|
|
tested them extensively in multithreaded cases. */
|
|
|
|
|
1997-03-03 19:58:17 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Define to turn off assertions. */
|
|
|
|
#define NDEBUG
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
#include <gnustep/base/preface.h>
|
1997-01-06 21:35:52 +00:00
|
|
|
#include <assert.h>
|
1997-03-03 19:43:25 +00:00
|
|
|
#include <stddef.h>
|
1996-03-22 01:26:22 +00:00
|
|
|
#include <string.h>
|
1997-03-03 19:56:37 +00:00
|
|
|
#include <objc/objc-api.h>
|
1997-01-06 21:35:52 +00:00
|
|
|
#include <Foundation/NSException.h>
|
1997-03-03 19:43:25 +00:00
|
|
|
#include <Foundation/NSPage.h>
|
|
|
|
#include <Foundation/NSString.h>
|
1996-03-22 01:26:22 +00:00
|
|
|
#include <Foundation/NSZone.h>
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
#define ALIGN 8 /* Alignment. FIXME: Make this portable. */
|
|
|
|
#define MINGRAN 256 /* Minimum granularity. */
|
|
|
|
#define DEFBLOCK 16384 /* Default granularity. */
|
1997-09-01 21:59:51 +00:00
|
|
|
#define BUFFER 4 /* Buffer size. FIXME: Find reasonable optimum. */
|
1997-03-03 19:43:25 +00:00
|
|
|
#define MAX_SEG 16 /* Segregated list size. */
|
|
|
|
#define ZPTRSZ sizeof(NSZone*) /* Size of zone pointers. */
|
|
|
|
#define SZSZ sizeof(size_t) /* Size of size_t. */
|
1996-03-22 01:26:22 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Information bits in size. */
|
|
|
|
#define INUSE 0x01 /* Current chunk in use. */
|
|
|
|
#define PREVUSE 0x02 /* Previous chunk in use. */
|
1996-03-22 01:26:22 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Bits to mask off to get size. */
|
|
|
|
#define SIZE_BITS (INUSE | PREVUSE)
|
1996-03-22 01:26:22 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Minimum chunk size for freeable zones. */
|
|
|
|
#define MINCHUNK roundupto(2*(SZSZ+ZPTRSZ), ALIGN)
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Size of block headers in freeable zones. */
|
|
|
|
#define FF_HEAD (roundupto(sizeof(ff_block)+ZPTRSZ+SZSZ, MINCHUNK)-ZPTRSZ-SZSZ)
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Size of block headers in nonfreeable zones. */
|
|
|
|
#define NF_HEAD (roundupto(sizeof(nf_block)+ZPTRSZ, ALIGN)-ZPTRSZ)
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
#define CLTOSZ(n) ((n)*MINCHUNK) /* Converts classes to sizes. */
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
typedef struct _ffree_free_link ff_link;
|
|
|
|
typedef struct _nfree_block_struct nf_block;
|
|
|
|
typedef struct _ffree_block_struct ff_block;
|
1997-03-03 19:46:52 +00:00
|
|
|
typedef struct _ffree_zone_struct ffree_zone;
|
|
|
|
typedef struct _nfree_zone_struct nfree_zone;
|
1997-01-06 21:35:52 +00:00
|
|
|
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Links for free lists. */
|
|
|
|
struct _ffree_free_link
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
size_t *prev, *next;
|
|
|
|
};
|
1996-03-22 01:26:22 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Header for blocks in nonfreeable zones. */
|
|
|
|
struct _nfree_block_struct
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
struct _nfree_block_struct *next;
|
|
|
|
size_t size; // Size of block
|
|
|
|
size_t top; // Position of next memory chunk to allocate
|
|
|
|
};
|
1996-03-22 01:26:22 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Header for blocks in freeable zones. */
|
|
|
|
struct _ffree_block_struct
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
struct _ffree_block_struct *next;
|
|
|
|
size_t size;
|
|
|
|
};
|
1996-03-22 01:26:22 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* NSZone structure for freeable zones. */
|
1997-03-03 19:46:52 +00:00
|
|
|
struct _ffree_zone_struct
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
NSZone common;
|
1997-03-03 19:56:37 +00:00
|
|
|
objc_mutex_t lock;
|
1997-03-03 19:43:25 +00:00
|
|
|
ff_block *blocks; // Linked list of blocks
|
|
|
|
size_t *segheadlist[MAX_SEG]; // Segregated list, holds heads
|
|
|
|
size_t *segtaillist[MAX_SEG]; // Segregated list, holds tails
|
|
|
|
size_t bufsize; // Buffer size
|
|
|
|
size_t size_buf[BUFFER]; // Buffer holding sizes
|
|
|
|
size_t *ptr_buf[BUFFER]; // Buffer holding pointers to chunks
|
|
|
|
};
|
1996-03-22 01:26:22 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* NSZone structure for nonfreeable zones. */
|
1997-03-03 19:46:52 +00:00
|
|
|
struct _nfree_zone_struct
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
NSZone common;
|
1997-03-03 19:56:37 +00:00
|
|
|
objc_mutex_t lock;
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Linked list of blocks in decreasing order of free space,
|
|
|
|
except maybe for the first block. */
|
|
|
|
nf_block *blocks;
|
|
|
|
};
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
|
|
|
|
/* Rounds up N to nearest multiple of BASE. */
|
|
|
|
static inline size_t roundupto (size_t n, size_t base);
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
/* Default zone functions for default zone. */
|
|
|
|
static void* default_malloc (NSZone *zone, size_t size);
|
|
|
|
static void* default_realloc (NSZone *zone, void *ptr, size_t size);
|
|
|
|
static void default_free (NSZone *zone, void *ptr);
|
|
|
|
static void default_recycle (NSZone *zone);
|
1997-03-03 19:58:17 +00:00
|
|
|
static BOOL default_check (NSZone *zone);
|
|
|
|
static struct NSZoneStats default_stats (NSZone *zone);
|
1997-03-03 19:43:25 +00:00
|
|
|
|
|
|
|
/* Memory management functions for freeable zones. */
|
|
|
|
static void* fmalloc (NSZone *zone, size_t size);
|
|
|
|
static void* frealloc (NSZone *zone, void *ptr, size_t size);
|
|
|
|
static void ffree (NSZone *zone, void *ptr);
|
|
|
|
static void frecycle (NSZone *zone);
|
1997-03-03 19:58:17 +00:00
|
|
|
static BOOL fcheck (NSZone *zone);
|
|
|
|
static struct NSZoneStats fstats (NSZone *zone);
|
1997-03-03 19:43:25 +00:00
|
|
|
|
|
|
|
static inline size_t segindex (size_t size);
|
1997-03-03 19:46:52 +00:00
|
|
|
static void* get_chunk (ffree_zone *zone, size_t size);
|
|
|
|
static void take_chunk (ffree_zone *zone, size_t *chunk);
|
|
|
|
static void put_chunk (ffree_zone *zone, size_t *chunk);
|
|
|
|
static inline void add_buf (ffree_zone *zone, size_t *chunk);
|
|
|
|
static void flush_buf (ffree_zone *zone);
|
1997-03-03 19:43:25 +00:00
|
|
|
|
|
|
|
/* Memory management functions for nonfreeable zones. */
|
|
|
|
static void* nmalloc (NSZone *zone, size_t size);
|
|
|
|
static void nrecycle (NSZone *zone);
|
1997-03-03 19:56:37 +00:00
|
|
|
static void* nrealloc (NSZone *zone, void *ptr, size_t size);
|
|
|
|
static void nfree (NSZone *zone, void *ptr);
|
1997-03-03 19:58:17 +00:00
|
|
|
static BOOL ncheck (NSZone *zone);
|
|
|
|
static struct NSZoneStats nstats (NSZone *zone);
|
1997-03-03 19:43:25 +00:00
|
|
|
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
static NSZone default_zone =
|
|
|
|
{
|
|
|
|
default_malloc, default_realloc, default_free, default_recycle,
|
1997-03-03 19:58:17 +00:00
|
|
|
default_check, default_stats, DEFBLOCK, @"default"
|
1997-03-03 19:43:25 +00:00
|
|
|
};
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Default zone. Name is hopelessly long so that no one will ever
|
|
|
|
want to use it. ;) */
|
1997-03-03 19:56:37 +00:00
|
|
|
NSZone* __nszone_private_hidden_default_zone = &default_zone;
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
|
|
|
|
static inline size_t
|
|
|
|
roundupto (size_t n, size_t base)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:56:37 +00:00
|
|
|
size_t a = (n/base)*base;
|
|
|
|
|
|
|
|
return (n-a)? (a+base): n;
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
static void*
|
|
|
|
default_malloc (NSZone *zone, size_t size)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-05-03 18:15:44 +00:00
|
|
|
void *mem;
|
|
|
|
NSZone **zoneptr;
|
1997-03-03 19:56:37 +00:00
|
|
|
|
1997-05-03 18:15:44 +00:00
|
|
|
mem = objc_malloc(ALIGN+size);
|
1997-03-03 19:56:37 +00:00
|
|
|
if (mem == NULL)
|
1997-03-03 19:58:17 +00:00
|
|
|
[NSException raise: NSMallocException
|
1997-05-03 17:28:54 +00:00
|
|
|
format: @"Default zone has run out of memory"];
|
1997-05-03 18:15:44 +00:00
|
|
|
zoneptr = mem+(ALIGN-ZPTRSZ);
|
|
|
|
*zoneptr = zone;
|
|
|
|
return mem+ALIGN;
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
static void*
|
|
|
|
default_realloc (NSZone *zone, void *ptr, size_t size)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-05-03 18:15:44 +00:00
|
|
|
void **mem = ptr-ALIGN;
|
|
|
|
|
|
|
|
if (size == 0)
|
|
|
|
{
|
|
|
|
objc_free(mem);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
mem = objc_realloc(mem, size+ALIGN);
|
|
|
|
if (mem == NULL)
|
1997-03-03 19:58:17 +00:00
|
|
|
[NSException raise: NSMallocException
|
1997-05-03 17:28:54 +00:00
|
|
|
format: @"Default zone has run out of memory"];
|
1997-05-03 18:15:44 +00:00
|
|
|
return mem+ALIGN;
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
static void
|
|
|
|
default_free (NSZone *zone, void *ptr)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-05-03 18:15:44 +00:00
|
|
|
objc_free(ptr-ALIGN);
|
1997-03-03 19:56:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
default_recycle (NSZone *zone)
|
|
|
|
{
|
|
|
|
/* Recycle the default zone? Thou hast got to be kiddin'. */
|
|
|
|
[NSException raise: NSGenericException
|
1997-05-03 17:28:54 +00:00
|
|
|
format: @"Trying to recycle default zone"];
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:58:17 +00:00
|
|
|
static BOOL
|
|
|
|
default_check (NSZone *zone)
|
|
|
|
{
|
|
|
|
/* We can't check memory managed by objc_malloc(). */
|
|
|
|
[NSException raise: NSGenericException format: @"Not implemented"];
|
|
|
|
return NO;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct NSZoneStats
|
|
|
|
default_stats (NSZone *zone)
|
|
|
|
{
|
|
|
|
struct NSZoneStats dummy;
|
|
|
|
|
|
|
|
/* We can't obtain statistics from the memory managed by objc_malloc(). */
|
|
|
|
[NSException raise: NSGenericException format: @"Not implemented"];
|
|
|
|
return dummy;
|
|
|
|
}
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Search the buffer to see if there is any memory chunks large enough
|
|
|
|
to satisfy request using first fit. If the memory chunk found has
|
|
|
|
a size exactly equal to the one requested, remove it from the buffer
|
|
|
|
and return it. If not, cut off a chunk that does match the size
|
|
|
|
and return it. If there is no chunk large enough in the buffer,
|
|
|
|
get a chunk from the general purpose allocator that uses segregated
|
|
|
|
fit. Since a chunk in the buffer is not freed in the general purpose
|
|
|
|
allocator, the headers are as if it is still in use. */
|
|
|
|
static void*
|
|
|
|
fmalloc (NSZone *zone, size_t size)
|
|
|
|
{
|
|
|
|
size_t i = 0;
|
|
|
|
size_t chunksize = roundupto(size+SZSZ+ZPTRSZ, MINCHUNK);
|
1997-03-03 19:46:52 +00:00
|
|
|
ffree_zone *zptr = (ffree_zone*)zone;
|
1997-03-03 19:43:25 +00:00
|
|
|
size_t bufsize;
|
|
|
|
size_t *size_buf = zptr->size_buf;
|
|
|
|
size_t **ptr_buf = zptr->ptr_buf;
|
|
|
|
size_t *chunkhead;
|
|
|
|
|
|
|
|
if (size == 0)
|
|
|
|
return NULL;
|
1997-03-03 19:56:37 +00:00
|
|
|
objc_mutex_lock(zptr->lock);
|
1997-03-03 19:43:25 +00:00
|
|
|
bufsize = zptr->bufsize;
|
|
|
|
while ((i < bufsize) && (chunksize > size_buf[i]))
|
|
|
|
i++;
|
|
|
|
if (i < bufsize)
|
|
|
|
/* Use memory chunk in buffer. */
|
|
|
|
{
|
|
|
|
if (size_buf[i] == chunksize)
|
1997-05-03 17:28:54 +00:00
|
|
|
/* Exact fit. */
|
|
|
|
{
|
|
|
|
zptr->bufsize--;
|
|
|
|
bufsize = zptr->bufsize;
|
|
|
|
chunkhead = ptr_buf[i];
|
|
|
|
size_buf[i] = size_buf[bufsize];
|
|
|
|
ptr_buf[i] = ptr_buf[bufsize];
|
|
|
|
|
|
|
|
assert(*chunkhead & INUSE);
|
|
|
|
assert((*chunkhead & ~SIZE_BITS)%MINCHUNK == 0);
|
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
else
|
1997-05-03 17:28:54 +00:00
|
|
|
/* Break off chunk. */
|
|
|
|
{
|
|
|
|
NSZone **zoneptr; // Pointer to zone preceding memory chunk
|
|
|
|
|
|
|
|
chunkhead = ptr_buf[i];
|
|
|
|
|
|
|
|
assert(*chunkhead & INUSE);
|
|
|
|
assert((*chunkhead & ~SIZE_BITS)%MINCHUNK == 0);
|
|
|
|
assert(chunksize < size_buf[i]);
|
|
|
|
|
|
|
|
size_buf[i] -= chunksize;
|
|
|
|
ptr_buf[i] = (void*)chunkhead+chunksize;
|
|
|
|
*(ptr_buf[i]) = size_buf[i] | PREVUSE | INUSE;
|
|
|
|
zoneptr = (NSZone**)(ptr_buf[i]+1);
|
|
|
|
*zoneptr = zone;
|
|
|
|
*chunkhead = chunksize | (*chunkhead & PREVUSE) | INUSE;
|
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
/* Get memory from segregate fit allocator. */
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
flush_buf(zptr);
|
|
|
|
chunkhead = get_chunk(zptr, chunksize);
|
|
|
|
if (chunkhead == NULL)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
|
|
|
objc_mutex_unlock(zptr->lock);
|
|
|
|
if (zone->name != nil)
|
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Zone %s has run out of memory",
|
|
|
|
[zone->name cStringNoCopy]];
|
|
|
|
else
|
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Out of memory"];
|
|
|
|
}
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
assert(*chunkhead & INUSE);
|
|
|
|
assert((*chunkhead & ~SIZE_BITS)%MINCHUNK == 0);
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
1997-03-03 19:56:37 +00:00
|
|
|
objc_mutex_unlock(zptr->lock);
|
1997-03-03 19:43:25 +00:00
|
|
|
return (void*)chunkhead+(SZSZ+ZPTRSZ);
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* If PTR == NULL, then it's the same as ordinary memory allocation.
|
|
|
|
If a smaller size than it originally had is requested, shrink the
|
|
|
|
chunk. If a larger size is requested, check if there is enough
|
|
|
|
space after it. If there isn't enough space, get a new chunk and
|
|
|
|
move it there, releasing the original. The space before the chunk
|
|
|
|
should also be checked, but I'll leave this to a later date. */
|
|
|
|
static void*
|
|
|
|
frealloc (NSZone *zone, void *ptr, size_t size)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
size_t realsize;
|
|
|
|
size_t chunksize = roundupto(size+SZSZ+ZPTRSZ, MINCHUNK);
|
1997-03-03 19:46:52 +00:00
|
|
|
ffree_zone *zptr = (ffree_zone*)zone;
|
1997-03-03 19:43:25 +00:00
|
|
|
size_t *chunkhead, *slack;
|
|
|
|
NSZone **zoneptr; // Zone pointer preceding memory chunk.
|
1997-01-11 21:39:38 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
if (size == 0)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
ffree(zone, ptr);
|
|
|
|
return NULL;
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
if (ptr == NULL)
|
|
|
|
return fmalloc(zone, size);
|
|
|
|
chunkhead = ptr-(SZSZ+ZPTRSZ);
|
1997-05-03 17:28:54 +00:00
|
|
|
objc_mutex_lock(zptr->lock);
|
1997-03-03 19:43:25 +00:00
|
|
|
realsize = *chunkhead & ~SIZE_BITS;
|
|
|
|
|
|
|
|
assert(*chunkhead & INUSE);
|
|
|
|
assert(realsize%MINCHUNK == 0);
|
|
|
|
|
|
|
|
if (chunksize < realsize)
|
|
|
|
/* Make chunk smaller. */
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
slack = (void*)chunkhead+chunksize;
|
|
|
|
*slack = (realsize-chunksize) | PREVUSE | INUSE;
|
|
|
|
zoneptr = (NSZone**)(slack+1);
|
|
|
|
*zoneptr = zone;
|
|
|
|
add_buf(zptr, slack);
|
|
|
|
*chunkhead = chunksize | (*chunkhead & PREVUSE) | INUSE;
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
else if (chunksize > realsize)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
size_t nextsize;
|
|
|
|
size_t *nextchunk, *farchunk;
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
nextchunk = (void*)chunkhead+realsize;
|
|
|
|
nextsize = *nextchunk & ~SIZE_BITS;
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
assert(nextsize%MINCHUNK == 0);
|
|
|
|
|
|
|
|
if (!(*nextchunk & INUSE) && (nextsize+realsize >= chunksize))
|
1997-05-03 17:28:54 +00:00
|
|
|
/* Expand to next chunk. */
|
|
|
|
{
|
|
|
|
take_chunk(zptr, nextchunk);
|
|
|
|
if (nextsize+realsize == chunksize)
|
|
|
|
{
|
|
|
|
farchunk = (void*)nextchunk+nextsize;
|
|
|
|
*farchunk |= PREVUSE;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
slack = (void*)chunkhead+chunksize;
|
|
|
|
*slack = ((nextsize+realsize)-chunksize) | PREVUSE;
|
|
|
|
put_chunk(zptr, slack);
|
|
|
|
}
|
|
|
|
*chunkhead = chunksize | (*chunkhead & PREVUSE) | INUSE;
|
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
else
|
1997-05-03 17:28:54 +00:00
|
|
|
/* Get new chunk and copy. */
|
|
|
|
{
|
|
|
|
size_t *newchunk;
|
|
|
|
|
|
|
|
newchunk = get_chunk(zptr, chunksize);
|
|
|
|
if (newchunk == NULL)
|
|
|
|
{
|
|
|
|
objc_mutex_unlock(zptr->lock);
|
|
|
|
if (zone->name != nil)
|
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Zone %s has run out of memory",
|
|
|
|
[zone->name cStringNoCopy]];
|
|
|
|
else
|
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Out of memory"];
|
|
|
|
}
|
|
|
|
memcpy((void*)newchunk+SZSZ+ZPTRSZ, (void*)chunkhead+SZSZ+ZPTRSZ,
|
|
|
|
realsize-SZSZ-ZPTRSZ);
|
|
|
|
add_buf(zptr, chunkhead);
|
|
|
|
chunkhead = newchunk;
|
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
/* FIXME: consider other cases where we can get more memory. */
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
1997-03-03 19:56:37 +00:00
|
|
|
objc_mutex_unlock(zptr->lock);
|
1997-03-03 19:43:25 +00:00
|
|
|
return (void*)chunkhead+(SZSZ+ZPTRSZ);
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Frees memory chunk by simply adding it to the buffer. */
|
|
|
|
static void
|
|
|
|
ffree (NSZone *zone, void *ptr)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:56:37 +00:00
|
|
|
objc_mutex_lock(((ffree_zone*)zone)->lock);
|
1997-03-03 19:46:52 +00:00
|
|
|
add_buf((ffree_zone*)zone, ptr-(SZSZ+ZPTRSZ));
|
1997-03-03 19:56:37 +00:00
|
|
|
objc_mutex_unlock(((ffree_zone*)zone)->lock);
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:58:17 +00:00
|
|
|
/* Recycle the zone. According to OpenStep, we need to return live
|
|
|
|
objects to the default zone, but there is no easy way to return
|
1997-05-03 17:28:54 +00:00
|
|
|
them, especially since the default zone may have been customized.
|
|
|
|
So not returning memory to the default zone is a feature, not a
|
|
|
|
bug (or so I think). */
|
1997-03-03 19:43:25 +00:00
|
|
|
static void
|
|
|
|
frecycle (NSZone *zone)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:46:52 +00:00
|
|
|
ffree_zone *zptr = (ffree_zone*)zone;
|
1997-03-03 19:43:25 +00:00
|
|
|
ff_block *block = zptr->blocks;
|
|
|
|
ff_block *nextblock;
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
objc_mutex_deallocate(zptr->lock);
|
1997-03-03 19:43:25 +00:00
|
|
|
while (block != NULL)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
nextblock = block->next;
|
1997-03-03 19:58:17 +00:00
|
|
|
objc_free(block);
|
1997-03-03 19:43:25 +00:00
|
|
|
block = nextblock;
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
if (zone->name != nil)
|
|
|
|
[zone->name release];
|
1997-03-03 19:58:17 +00:00
|
|
|
objc_free(zptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check integrity of a freeable zone. Doesn't have to be
|
|
|
|
particularly efficient. */
|
|
|
|
static BOOL
|
|
|
|
fcheck (NSZone *zone)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
ffree_zone *zptr = (ffree_zone*)zone;
|
|
|
|
ff_block *block;
|
|
|
|
size_t *chunk;
|
|
|
|
|
|
|
|
objc_mutex_lock(zptr->lock);
|
|
|
|
/* Check integrity of each block the zone owns. */
|
|
|
|
block = zptr->blocks;
|
|
|
|
while (block != NULL)
|
|
|
|
{
|
|
|
|
size_t blocksize, pos;
|
|
|
|
size_t *nextchunk;
|
|
|
|
|
|
|
|
blocksize = block->size;
|
|
|
|
pos = FF_HEAD;
|
|
|
|
while (pos < blocksize-(SZSZ+ZPTRSZ))
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
|
|
|
size_t chunksize;
|
|
|
|
|
|
|
|
chunk = (void*)block+pos;
|
|
|
|
chunksize = *chunk & ~SIZE_BITS;
|
|
|
|
nextchunk = (void*)chunk+chunksize;
|
|
|
|
if (*chunk & INUSE)
|
|
|
|
/* Check whether this is a valid used chunk. */
|
|
|
|
{
|
|
|
|
NSZone **zoneptr;
|
|
|
|
|
|
|
|
zoneptr = (NSZone**)(chunk+1);
|
|
|
|
if ((*zoneptr != zone) || !(*nextchunk & PREVUSE))
|
|
|
|
goto inconsistent;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
/* Check whether this is a valid free chunk. */
|
|
|
|
{
|
|
|
|
size_t *footer;
|
|
|
|
|
|
|
|
footer = nextchunk-1;
|
|
|
|
if ((*footer != chunksize) || (*nextchunk & PREVUSE))
|
|
|
|
goto inconsistent;
|
|
|
|
}
|
|
|
|
pos += chunksize;
|
|
|
|
}
|
1997-03-03 19:58:17 +00:00
|
|
|
chunk = (void*)block+pos;
|
|
|
|
/* Check whether the block ends properly. */
|
|
|
|
if (((*chunk & ~SIZE_BITS) != 0) || !(*chunk & INUSE))
|
1997-05-03 17:28:54 +00:00
|
|
|
goto inconsistent;
|
1997-03-03 19:58:17 +00:00
|
|
|
block = block->next;
|
|
|
|
}
|
|
|
|
/* Check the integrity of the segregated list. */
|
|
|
|
for (i = 0; i < MAX_SEG; i++)
|
|
|
|
{
|
|
|
|
chunk = zptr->segheadlist[i];
|
|
|
|
while (chunk != NULL)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
|
|
|
size_t *nextchunk;
|
|
|
|
|
|
|
|
nextchunk = ((ff_link*)(chunk+1))->next;
|
|
|
|
/* Isn't this one ugly if statement? */
|
|
|
|
if ((*chunk & INUSE)
|
|
|
|
|| (segindex(*chunk & ~SIZE_BITS) != i)
|
|
|
|
|| ((nextchunk != NULL)
|
|
|
|
&& (chunk != ((ff_link*)(nextchunk+1))->prev))
|
|
|
|
|| ((nextchunk == NULL) && (chunk != zptr->segtaillist[i])))
|
|
|
|
goto inconsistent;
|
|
|
|
chunk = nextchunk;
|
|
|
|
}
|
1997-03-03 19:58:17 +00:00
|
|
|
}
|
|
|
|
/* Check the buffer. */
|
|
|
|
if (zptr->bufsize >= BUFFER)
|
|
|
|
goto inconsistent;
|
|
|
|
for (i = 0; i < zptr->bufsize; i++)
|
|
|
|
{
|
|
|
|
chunk = zptr->ptr_buf[i];
|
|
|
|
if ((zptr->size_buf[i] != (*chunk & ~SIZE_BITS)) || !(*chunk & INUSE))
|
1997-05-03 17:28:54 +00:00
|
|
|
goto inconsistent;
|
1997-03-03 19:58:17 +00:00
|
|
|
}
|
|
|
|
objc_mutex_unlock(zptr->lock);
|
|
|
|
return YES;
|
|
|
|
|
|
|
|
inconsistent: // Jump here if an inconsistency was found.
|
|
|
|
objc_mutex_unlock(zptr->lock);
|
|
|
|
return NO;
|
|
|
|
}
|
|
|
|
|
1997-05-03 17:28:54 +00:00
|
|
|
/* Obtain statistics about the zone. Doesn't have to be particularly
|
|
|
|
efficient. */
|
1997-03-03 19:58:17 +00:00
|
|
|
static struct NSZoneStats
|
|
|
|
fstats (NSZone *zone)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
struct NSZoneStats stats;
|
|
|
|
ffree_zone *zptr = (ffree_zone*)zone;
|
|
|
|
ff_block *block;
|
|
|
|
|
|
|
|
stats.bytes_total = 0;
|
|
|
|
stats.chunks_used = 0;
|
|
|
|
stats.bytes_used = 0;
|
|
|
|
stats.chunks_free = 0;
|
|
|
|
stats.bytes_free = 0;
|
|
|
|
objc_mutex_lock(zptr->lock);
|
|
|
|
block = zptr->blocks;
|
|
|
|
/* Go through each block. */
|
|
|
|
while (block != NULL)
|
|
|
|
{
|
|
|
|
size_t blocksize;
|
|
|
|
size_t *chunk;
|
|
|
|
|
|
|
|
blocksize = block->size;
|
|
|
|
stats.bytes_total += blocksize;
|
|
|
|
chunk = (void*)block+FF_HEAD;
|
|
|
|
while ((void*)chunk < (void*)block+(blocksize-ZPTRSZ-SZSZ))
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
|
|
|
size_t chunksize;
|
|
|
|
|
|
|
|
chunksize = *chunk & ~SIZE_BITS;
|
|
|
|
if (*chunk & INUSE)
|
|
|
|
{
|
|
|
|
stats.chunks_used++;
|
|
|
|
stats.bytes_used += chunksize;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
stats.chunks_free++;
|
|
|
|
stats.bytes_free += chunksize;
|
|
|
|
}
|
|
|
|
chunk = (void*)chunk+chunksize;
|
|
|
|
}
|
1997-03-03 19:58:17 +00:00
|
|
|
block = block->next;
|
|
|
|
}
|
|
|
|
/* Go through buffer. */
|
|
|
|
for (i = 0; i < zptr->bufsize; i++)
|
|
|
|
{
|
|
|
|
stats.chunks_used--;
|
|
|
|
stats.chunks_free++;
|
|
|
|
stats.bytes_used -= zptr->size_buf[i];
|
|
|
|
stats.bytes_free += zptr->size_buf[i];
|
|
|
|
}
|
|
|
|
objc_mutex_unlock(zptr->lock);
|
|
|
|
/* Remove overhead. */
|
|
|
|
stats.bytes_used -= (SZSZ+ZPTRSZ)*stats.chunks_used;
|
|
|
|
return stats;
|
1997-03-03 19:43:25 +00:00
|
|
|
}
|
|
|
|
|
1997-09-01 21:59:51 +00:00
|
|
|
/* Calculate the which segregation class a certain size should be in.
|
|
|
|
FIXME: Optimize code and find a more optimum distribution. */
|
1997-03-03 19:43:25 +00:00
|
|
|
static inline size_t
|
|
|
|
segindex (size_t size)
|
|
|
|
{
|
|
|
|
assert(size%MINCHUNK == 0);
|
|
|
|
|
|
|
|
if (size < CLTOSZ(8))
|
|
|
|
return size/MINCHUNK;
|
|
|
|
else if (size < CLTOSZ(16))
|
|
|
|
return 7;
|
|
|
|
else if (size < CLTOSZ(32))
|
|
|
|
return 8;
|
|
|
|
else if (size < CLTOSZ(64))
|
|
|
|
return 9;
|
|
|
|
else if (size < CLTOSZ(128))
|
|
|
|
return 10;
|
|
|
|
else if (size < CLTOSZ(256))
|
|
|
|
return 11;
|
|
|
|
else if (size < CLTOSZ(512))
|
|
|
|
return 12;
|
|
|
|
else if (size < CLTOSZ(1024))
|
|
|
|
return 13;
|
|
|
|
else if (size < CLTOSZ(2048))
|
|
|
|
return 14;
|
|
|
|
else
|
|
|
|
return 15;
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Look through the segregated list with first fit to find a memory
|
|
|
|
chunk. If one is not found, get more memory. */
|
|
|
|
static void*
|
1997-03-03 19:46:52 +00:00
|
|
|
get_chunk (ffree_zone *zone, size_t size)
|
1996-03-22 01:26:22 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
size_t class = segindex(size);
|
|
|
|
size_t *chunk = zone->segheadlist[class];
|
|
|
|
NSZone **zoneptr; // Zone pointer preceding memory chunk
|
1996-03-22 01:26:22 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
assert(size%MINCHUNK == 0);
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
while ((chunk != NULL) && ((*chunk & ~SIZE_BITS) < size))
|
|
|
|
chunk = ((ff_link*)(chunk+1))->next;
|
|
|
|
if (chunk == NULL)
|
|
|
|
/* Get more memory. */
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
class++;
|
|
|
|
while ((class < MAX_SEG) && (zone->segheadlist[class] == NULL))
|
1997-05-03 17:28:54 +00:00
|
|
|
class++;
|
1997-03-03 19:43:25 +00:00
|
|
|
if (class == MAX_SEG)
|
1997-05-03 17:28:54 +00:00
|
|
|
/* Absolutely no memory in segregated list. */
|
|
|
|
{
|
|
|
|
size_t blocksize;
|
|
|
|
ff_block *block;
|
|
|
|
|
|
|
|
blocksize = roundupto(size+FF_HEAD+SZSZ+ZPTRSZ, zone->common.gran);
|
|
|
|
block = objc_malloc(blocksize);
|
|
|
|
if (block == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
block->size = blocksize;
|
|
|
|
block->next = zone->blocks;
|
|
|
|
zone->blocks = block;
|
|
|
|
chunk = (void*)block+(blocksize-SZSZ-ZPTRSZ);
|
|
|
|
if (FF_HEAD+size+SZSZ+ZPTRSZ < blocksize)
|
|
|
|
{
|
|
|
|
*chunk = INUSE;
|
|
|
|
chunk = (void*)block+(FF_HEAD+size);
|
|
|
|
*chunk = (blocksize-size-FF_HEAD-SZSZ-ZPTRSZ) | PREVUSE;
|
|
|
|
put_chunk(zone, chunk);
|
|
|
|
|
|
|
|
assert((*chunk & ~SIZE_BITS)%MINCHUNK == 0);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
*chunk = PREVUSE | INUSE;
|
|
|
|
chunk = (void*)block+FF_HEAD;
|
|
|
|
}
|
1997-01-06 21:35:52 +00:00
|
|
|
else
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
|
|
|
size_t *slack;
|
|
|
|
|
|
|
|
chunk = zone->segheadlist[class];
|
|
|
|
|
|
|
|
assert(class < MAX_SEG);
|
|
|
|
assert(!(*chunk & INUSE));
|
|
|
|
assert(*chunk & PREVUSE);
|
|
|
|
assert(size < (*chunk & ~SIZE_BITS));
|
|
|
|
assert((*chunk & ~SIZE_BITS)%MINCHUNK == 0);
|
|
|
|
|
|
|
|
take_chunk(zone, chunk);
|
|
|
|
slack = (void*)chunk+size;
|
|
|
|
*slack = ((*chunk & ~SIZE_BITS)-size) | PREVUSE;
|
|
|
|
put_chunk(zone, slack);
|
|
|
|
}
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
1997-01-06 21:35:52 +00:00
|
|
|
else
|
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
size_t chunksize = *chunk & ~SIZE_BITS;
|
|
|
|
|
|
|
|
assert(chunksize%MINCHUNK == 0);
|
|
|
|
assert(!(*chunk & INUSE));
|
|
|
|
assert(*chunk & PREVUSE);
|
|
|
|
assert(*(size_t*)((void*)chunk+chunksize) & INUSE);
|
|
|
|
|
|
|
|
take_chunk(zone, chunk);
|
|
|
|
if (chunksize > size)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
|
|
|
size_t *slack;
|
|
|
|
|
|
|
|
slack = (void*)chunk+size;
|
|
|
|
*slack = (chunksize-size) | PREVUSE;
|
|
|
|
put_chunk(zone, slack);
|
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
else
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
|
|
|
size_t *nextchunk = (void*)chunk+chunksize;
|
|
|
|
|
|
|
|
assert(!(*nextchunk & PREVUSE));
|
|
|
|
assert(chunksize == size);
|
|
|
|
|
|
|
|
*nextchunk |= PREVUSE;
|
|
|
|
}
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
*chunk = size | PREVUSE | INUSE;
|
|
|
|
zoneptr = (NSZone**)(chunk+1);
|
|
|
|
*zoneptr = (NSZone*)zone;
|
|
|
|
return chunk;
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Take the given chunk out of the free list. No headers are set. */
|
|
|
|
static void
|
1997-03-03 19:46:52 +00:00
|
|
|
take_chunk (ffree_zone *zone, size_t *chunk)
|
1996-03-22 01:26:22 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
size_t size = *chunk & ~SIZE_BITS;
|
|
|
|
size_t class = segindex(size);
|
|
|
|
ff_link *otherlink;
|
|
|
|
ff_link *links = (ff_link*)(chunk+1);
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
assert(size%MINCHUNK == 0);
|
|
|
|
assert(!(*chunk & INUSE));
|
|
|
|
assert(*chunk & PREVUSE);
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
if (links->prev == NULL)
|
|
|
|
zone->segheadlist[class] = links->next;
|
|
|
|
else
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
otherlink = (ff_link*)(links->prev+1);
|
|
|
|
otherlink->next = links->next;
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
if (links->next == NULL)
|
|
|
|
zone->segtaillist[class] = links->prev;
|
1997-01-06 21:35:52 +00:00
|
|
|
else
|
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
otherlink = (ff_link*)(links->next+1);
|
|
|
|
otherlink->prev = links->prev;
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Add the given chunk to the segregated list. The header to the
|
|
|
|
chunk must be set appropriately, but the tailer is set here. */
|
1997-01-06 21:35:52 +00:00
|
|
|
static void
|
1997-03-03 19:46:52 +00:00
|
|
|
put_chunk (ffree_zone *zone, size_t *chunk)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
size_t size = *chunk & ~SIZE_BITS;
|
|
|
|
size_t class = segindex(size);
|
|
|
|
size_t *tailer = (void*)chunk+(size-SZSZ);
|
|
|
|
ff_link *links = (ff_link*)(chunk+1);
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
assert(size%MINCHUNK == 0);
|
|
|
|
assert(!(*chunk & INUSE));
|
|
|
|
assert(*chunk & PREVUSE);
|
|
|
|
|
|
|
|
*tailer = size;
|
|
|
|
if (zone->segtaillist[class] == NULL)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
assert(zone->segheadlist[class] == NULL);
|
|
|
|
|
|
|
|
zone->segheadlist[class] = zone->segtaillist[class] = chunk;
|
|
|
|
links->prev = links->next = NULL;
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
1997-01-06 21:35:52 +00:00
|
|
|
else
|
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
ff_link *prevlink = (ff_link*)(zone->segtaillist[class]+1);
|
|
|
|
|
|
|
|
assert(zone->segheadlist[class] != NULL);
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
links->next = NULL;
|
|
|
|
links->prev = zone->segtaillist[class];
|
|
|
|
prevlink->next = chunk;
|
|
|
|
zone->segtaillist[class] = chunk;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add the given pointer to the buffer. If the buffer becomes full,
|
|
|
|
flush it. The given pointer must always be one that points to used
|
1997-05-03 17:28:54 +00:00
|
|
|
memory (i.e. chunks with headers that declare them as used). */
|
1997-03-03 19:43:25 +00:00
|
|
|
static inline void
|
1997-03-03 19:46:52 +00:00
|
|
|
add_buf (ffree_zone *zone, size_t *chunk)
|
1997-03-03 19:43:25 +00:00
|
|
|
{
|
|
|
|
size_t bufsize = zone->bufsize;
|
|
|
|
|
|
|
|
assert(bufsize < BUFFER);
|
|
|
|
assert(*chunk & INUSE);
|
|
|
|
assert((*chunk & ~SIZE_BITS)%MINCHUNK == 0);
|
|
|
|
|
|
|
|
zone->bufsize++;
|
|
|
|
zone->size_buf[bufsize] = *chunk & ~SIZE_BITS;
|
|
|
|
zone->ptr_buf[bufsize] = chunk;
|
|
|
|
if (bufsize == BUFFER-1)
|
|
|
|
flush_buf(zone);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush buffers. All coalescing is done here. */
|
|
|
|
static void
|
1997-03-03 19:46:52 +00:00
|
|
|
flush_buf (ffree_zone *zone)
|
1997-03-03 19:43:25 +00:00
|
|
|
{
|
|
|
|
size_t i, size;
|
|
|
|
size_t bufsize = zone->bufsize;
|
|
|
|
size_t *chunk, *nextchunk;
|
|
|
|
size_t *size_buf = zone->size_buf;
|
|
|
|
size_t **ptr_buf = zone->ptr_buf;
|
|
|
|
|
|
|
|
assert(bufsize <= BUFFER);
|
|
|
|
|
|
|
|
for (i = 0; i < bufsize; i++)
|
|
|
|
{
|
|
|
|
size = size_buf[i];
|
|
|
|
chunk = ptr_buf[i];
|
|
|
|
|
|
|
|
assert((*chunk & ~SIZE_BITS) == size);
|
|
|
|
assert(*chunk & INUSE);
|
|
|
|
|
|
|
|
nextchunk = (void*)chunk+size;
|
|
|
|
if (!(*chunk & PREVUSE))
|
1997-05-03 17:28:54 +00:00
|
|
|
/* Coalesce with previous chunk. */
|
|
|
|
{
|
|
|
|
size_t prevsize = *(chunk-1);
|
1997-03-03 19:43:25 +00:00
|
|
|
|
1997-05-03 17:28:54 +00:00
|
|
|
assert(prevsize%MINCHUNK == 0);
|
1997-03-03 19:43:25 +00:00
|
|
|
|
1997-05-03 17:28:54 +00:00
|
|
|
size += prevsize;
|
|
|
|
chunk = (void*)chunk-prevsize;
|
1997-03-03 19:43:25 +00:00
|
|
|
|
1997-05-03 17:28:54 +00:00
|
|
|
assert(!(*chunk & INUSE));
|
|
|
|
assert(*chunk & PREVUSE);
|
|
|
|
assert((*chunk & ~SIZE_BITS) == prevsize);
|
|
|
|
|
|
|
|
take_chunk(zone, chunk);
|
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
if (!(*nextchunk & INUSE))
|
1997-05-03 17:28:54 +00:00
|
|
|
/* Coalesce with next chunk. */
|
|
|
|
{
|
|
|
|
size_t nextsize = *nextchunk & ~SIZE_BITS;
|
|
|
|
|
|
|
|
assert(chunksize%MINCHUNK == 0);
|
|
|
|
assert(*nextchunk & PREVUSE);
|
|
|
|
assert(!(*nextchunk & INUSE));
|
|
|
|
assert((void*)chunk+chunksize == nextchunk);
|
|
|
|
|
|
|
|
take_chunk(zone, nextchunk);
|
|
|
|
size += nextsize;
|
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
*chunk = size | PREVUSE;
|
|
|
|
put_chunk(zone, chunk);
|
|
|
|
nextchunk = (void*)chunk+size;
|
|
|
|
*nextchunk &= ~PREVUSE;
|
|
|
|
|
|
|
|
assert((*chunk & ~SIZE_BITS)%MINCHUNK == 0);
|
|
|
|
assert(!(*chunk & INUSE));
|
|
|
|
assert(*chunk & PREVUSE);
|
|
|
|
assert(*nextchunk & INUSE);
|
|
|
|
assert(!(*nextchunk & PREVUSE));
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
zone->bufsize = 0;
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* If the first block in block list has enough space, use that space.
|
|
|
|
Otherwise, sort the block list in decreasing free space order (only
|
|
|
|
the first block needs to be put in its appropriate place since
|
|
|
|
the rest of the list is already sorted). Then check if the first
|
|
|
|
block has enough space for the request. If it does, use it. If it
|
|
|
|
doesn't, get more memory from the default zone, since none of the
|
|
|
|
other blocks in the block list could have enough memory. */
|
|
|
|
static void*
|
|
|
|
nmalloc (NSZone *zone, size_t size)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:46:52 +00:00
|
|
|
nfree_zone *zptr = (nfree_zone*)zone;
|
1997-03-03 19:43:25 +00:00
|
|
|
size_t top;
|
|
|
|
size_t chunksize = roundupto(size+ZPTRSZ, ALIGN);
|
|
|
|
NSZone **chunkhead;
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:58:17 +00:00
|
|
|
if (size == 0)
|
|
|
|
return NULL;
|
1997-03-03 19:56:37 +00:00
|
|
|
objc_mutex_lock(zptr->lock);
|
1997-03-03 19:43:25 +00:00
|
|
|
top = zptr->blocks->top;
|
|
|
|
/* No need to worry about (block == NULL), since a nonfreeable zone
|
|
|
|
always starts with a block. */
|
|
|
|
if (zptr->blocks->size-top >= chunksize)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
chunkhead = (void*)(zptr->blocks)+top;
|
|
|
|
*chunkhead = zone;
|
|
|
|
zptr->blocks->top += chunksize;
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
1997-01-06 21:35:52 +00:00
|
|
|
else
|
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
size_t freesize = zptr->blocks->size-top;
|
|
|
|
nf_block *block, *preblock;
|
|
|
|
|
|
|
|
/* First, get the block list in decreasing free size order. */
|
|
|
|
preblock = NULL;
|
|
|
|
block = zptr->blocks;
|
|
|
|
while ((block->next != NULL)
|
1997-05-03 17:28:54 +00:00
|
|
|
&& (freesize < block->next->size-block->next->top))
|
|
|
|
{
|
|
|
|
preblock = block;
|
|
|
|
block = block->next;
|
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
if (preblock != NULL)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
|
|
|
preblock->next = zptr->blocks;
|
|
|
|
zptr->blocks = zptr->blocks->next;
|
|
|
|
preblock->next->next = block;
|
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
if (zptr->blocks->size-zptr->blocks->top < chunksize)
|
1997-05-03 17:28:54 +00:00
|
|
|
/* Get new block. */
|
|
|
|
{
|
|
|
|
size_t blocksize = roundupto(chunksize+NF_HEAD, zone->gran);
|
|
|
|
|
|
|
|
block = objc_malloc(blocksize);
|
|
|
|
if (block == NULL)
|
|
|
|
{
|
|
|
|
objc_mutex_unlock(zptr->lock);
|
|
|
|
if (zone->name != nil)
|
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Zone %s has run out of memory",
|
|
|
|
[zone->name cStringNoCopy]];
|
|
|
|
else
|
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Out of memory"];
|
|
|
|
}
|
|
|
|
block->next = zptr->blocks;
|
|
|
|
block->size = blocksize;
|
|
|
|
block->top = NF_HEAD;
|
|
|
|
zptr->blocks = block;
|
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
chunkhead = (void*)block+zptr->blocks->top;
|
|
|
|
*chunkhead = zone;
|
|
|
|
zptr->blocks->top += chunksize;
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
1997-03-03 19:56:37 +00:00
|
|
|
objc_mutex_unlock(zptr->lock);
|
1997-03-03 19:43:25 +00:00
|
|
|
return chunkhead+1;
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
/* Return the blocks to the default zone, then deallocate mutex, and
|
1997-03-03 19:58:17 +00:00
|
|
|
then release zone name if it exists. */
|
1997-01-06 21:35:52 +00:00
|
|
|
static void
|
1997-03-03 19:43:25 +00:00
|
|
|
nrecycle (NSZone *zone)
|
1996-03-22 01:26:22 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
nf_block *nextblock;
|
1997-03-03 19:46:52 +00:00
|
|
|
nf_block *block = ((nfree_zone*)zone)->blocks;
|
1997-03-03 19:43:25 +00:00
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
objc_mutex_deallocate(((nfree_zone*)zone)->lock);
|
1997-03-03 19:43:25 +00:00
|
|
|
while (block != NULL)
|
|
|
|
{
|
|
|
|
nextblock = block->next;
|
1997-03-03 19:58:17 +00:00
|
|
|
objc_free(block);
|
1997-03-03 19:43:25 +00:00
|
|
|
block = nextblock;
|
|
|
|
}
|
|
|
|
if (zone->name != nil)
|
|
|
|
[zone->name release];
|
1997-03-03 19:58:17 +00:00
|
|
|
objc_free(zone);
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
static void*
|
|
|
|
nrealloc (NSZone *zone, void *ptr, size_t size)
|
|
|
|
{
|
1997-03-03 19:58:17 +00:00
|
|
|
if (zone->name != nil)
|
|
|
|
[NSException raise: NSGenericException
|
1997-05-03 17:28:54 +00:00
|
|
|
format: @"Trying to reallocate in nonfreeable zone %s",
|
|
|
|
[zone->name cStringNoCopy]];
|
1997-03-03 19:58:17 +00:00
|
|
|
else
|
|
|
|
[NSException raise: NSGenericException
|
1997-05-03 17:28:54 +00:00
|
|
|
format: @"Trying to reallocate in nonfreeable zone"];
|
1997-03-03 19:56:37 +00:00
|
|
|
return NULL; // Useless return
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nfree (NSZone *zone, void *ptr)
|
|
|
|
{
|
1997-03-03 19:58:17 +00:00
|
|
|
if (zone->name != nil)
|
|
|
|
[NSException raise: NSGenericException
|
1997-05-03 17:28:54 +00:00
|
|
|
format: @"Trying to free memory from nonfreeable zone %s",
|
|
|
|
[zone->name cStringNoCopy]];
|
1997-03-03 19:58:17 +00:00
|
|
|
else
|
|
|
|
[NSException raise: NSGenericException
|
1997-05-03 17:28:54 +00:00
|
|
|
format: @"Trying to free memory from nonfreeable zone"];
|
1997-03-03 19:58:17 +00:00
|
|
|
}
|
|
|
|
|
1997-05-03 17:28:54 +00:00
|
|
|
/* Check integrity of a nonfreeable zone. Doesn't have to
|
|
|
|
particularly efficient. */
|
1997-03-03 19:58:17 +00:00
|
|
|
static BOOL
|
|
|
|
ncheck (NSZone *zone)
|
|
|
|
{
|
|
|
|
nfree_zone *zptr = (nfree_zone*)zone;
|
|
|
|
nf_block *block;
|
|
|
|
|
|
|
|
objc_mutex_lock(zptr->lock);
|
|
|
|
block = zptr->blocks;
|
|
|
|
while (block != NULL)
|
|
|
|
{
|
|
|
|
if (block->size < block->top)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
|
|
|
objc_mutex_unlock(zptr->lock);
|
|
|
|
return NO;
|
|
|
|
}
|
1997-03-03 19:58:17 +00:00
|
|
|
block = block->next;
|
|
|
|
}
|
|
|
|
/* FIXME: Do more checking? */
|
|
|
|
objc_mutex_unlock(zptr->lock);
|
|
|
|
return YES;
|
|
|
|
}
|
|
|
|
|
1997-05-03 17:28:54 +00:00
|
|
|
/* Return statistics for a nonfreeable zone. Doesn't have to
|
|
|
|
particularly efficient. */
|
1997-03-03 19:58:17 +00:00
|
|
|
static struct NSZoneStats
|
|
|
|
nstats (NSZone *zone)
|
|
|
|
{
|
|
|
|
struct NSZoneStats stats;
|
|
|
|
nfree_zone *zptr = (nfree_zone*)zone;
|
|
|
|
nf_block *block;
|
|
|
|
|
|
|
|
stats.bytes_total = 0;
|
|
|
|
stats.chunks_used = 0;
|
|
|
|
stats.bytes_used = 0;
|
|
|
|
stats.chunks_free = 0;
|
|
|
|
stats.bytes_free = 0;
|
|
|
|
objc_mutex_lock(zptr->lock);
|
|
|
|
block = zptr->blocks;
|
|
|
|
while (block != NULL)
|
|
|
|
{
|
|
|
|
size_t *chunk;
|
|
|
|
|
|
|
|
stats.bytes_total += block->size;
|
|
|
|
chunk = (void*)block+NF_HEAD;
|
|
|
|
while ((void*)chunk < (void*)block+block->top)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
|
|
|
stats.chunks_used++;
|
|
|
|
stats.bytes_used += *chunk;
|
|
|
|
chunk = (void*)chunk+(*chunk);
|
|
|
|
}
|
1997-03-03 19:58:17 +00:00
|
|
|
if (block->size != block->top)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
|
|
|
stats.chunks_free++;
|
|
|
|
stats.bytes_free += block->size-block->top;
|
|
|
|
}
|
1997-03-03 19:58:17 +00:00
|
|
|
block = block->next;
|
|
|
|
}
|
|
|
|
objc_mutex_unlock(zptr->lock);
|
|
|
|
stats.bytes_used -= ZPTRSZ*stats.chunks_used;
|
|
|
|
return stats;
|
1997-03-03 19:56:37 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
NSZone*
|
|
|
|
NSCreateZone (size_t start, size_t gran, BOOL canFree)
|
1996-03-22 01:26:22 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
size_t i, startsize, granularity;
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
if (start > 0)
|
|
|
|
startsize = roundupto(start, MINGRAN);
|
|
|
|
else
|
|
|
|
startsize = MINGRAN;
|
|
|
|
if (gran > 0)
|
|
|
|
granularity = roundupto(gran, MINGRAN);
|
|
|
|
else
|
|
|
|
granularity = MINGRAN;
|
|
|
|
if (canFree)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
ff_block *block;
|
1997-03-03 19:46:52 +00:00
|
|
|
ffree_zone *zone;
|
1997-03-03 19:43:25 +00:00
|
|
|
size_t *header, *tailer;
|
|
|
|
NSZone **zoneptr;
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
zone = objc_malloc(sizeof(ffree_zone));
|
|
|
|
if (zone == NULL)
|
1997-05-03 17:28:54 +00:00
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"No memory to create zone"];
|
1997-03-03 19:43:25 +00:00
|
|
|
zone->common.malloc = fmalloc;
|
|
|
|
zone->common.realloc = frealloc;
|
|
|
|
zone->common.free = ffree;
|
|
|
|
zone->common.recycle = frecycle;
|
1997-03-03 19:58:17 +00:00
|
|
|
zone->common.check = fcheck;
|
|
|
|
zone->common.stats = fstats;
|
1997-03-03 19:43:25 +00:00
|
|
|
zone->common.gran = granularity;
|
|
|
|
zone->common.name = nil;
|
1997-03-03 19:56:37 +00:00
|
|
|
zone->lock = objc_mutex_allocate();
|
1997-03-03 19:43:25 +00:00
|
|
|
for (i = 0; i < MAX_SEG; i++)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
|
|
|
zone->segheadlist[i] = NULL;
|
|
|
|
zone->segtaillist[i] = NULL;
|
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
zone->bufsize = 0;
|
1997-03-03 19:56:37 +00:00
|
|
|
zone->blocks = objc_malloc(startsize);
|
|
|
|
if (zone->blocks == NULL)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
|
|
|
objc_mutex_deallocate(zone->lock);
|
|
|
|
objc_free(zone);
|
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"No memory to create zone"];
|
|
|
|
}
|
1997-03-03 19:46:52 +00:00
|
|
|
block = zone->blocks;
|
1997-03-03 19:43:25 +00:00
|
|
|
block->next = NULL;
|
|
|
|
block->size = startsize;
|
|
|
|
header = (void*)block+FF_HEAD;
|
|
|
|
*header = (startsize-FF_HEAD-SZSZ-ZPTRSZ) | PREVUSE | INUSE;
|
|
|
|
zoneptr = (NSZone**)(header+1);
|
|
|
|
*zoneptr = (NSZone*)zone;
|
|
|
|
tailer = (void*)block+(startsize-SZSZ-ZPTRSZ);
|
|
|
|
*tailer = INUSE | PREVUSE;
|
|
|
|
add_buf(zone, header);
|
|
|
|
return (NSZone*)zone;
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
nf_block *block;
|
1997-03-03 19:46:52 +00:00
|
|
|
nfree_zone *zone;
|
1997-03-03 19:43:25 +00:00
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
zone = objc_malloc(sizeof(nfree_zone));
|
1997-03-03 19:58:17 +00:00
|
|
|
if (zone == NULL)
|
1997-05-03 17:28:54 +00:00
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"No memory to create zone"];
|
1997-03-03 19:43:25 +00:00
|
|
|
zone->common.malloc = nmalloc;
|
1997-03-03 19:56:37 +00:00
|
|
|
zone->common.realloc = nrealloc;
|
|
|
|
zone->common.free = nfree;
|
1997-03-03 19:43:25 +00:00
|
|
|
zone->common.recycle = nrecycle;
|
1997-03-03 19:58:17 +00:00
|
|
|
zone->common.check = ncheck;
|
|
|
|
zone->common.stats = nstats;
|
1997-03-03 19:43:25 +00:00
|
|
|
zone->common.gran = granularity;
|
|
|
|
zone->common.name = nil;
|
1997-03-03 19:56:37 +00:00
|
|
|
zone->lock = objc_mutex_allocate();
|
|
|
|
zone->blocks = objc_malloc(startsize);
|
|
|
|
if (zone->blocks == NULL)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
|
|
|
objc_mutex_deallocate(zone->lock);
|
|
|
|
objc_free(zone);
|
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"No memory to create zone"];
|
|
|
|
}
|
1997-03-03 19:46:52 +00:00
|
|
|
block = zone->blocks;
|
1997-03-03 19:43:25 +00:00
|
|
|
block->next = NULL;
|
|
|
|
block->size = startsize;
|
|
|
|
block->top = NF_HEAD;
|
|
|
|
return (NSZone*)zone;
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
inline NSZone*
|
|
|
|
NSDefaultMallocZone (void)
|
1996-03-22 01:26:22 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
return __nszone_private_hidden_default_zone;
|
|
|
|
}
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:58:17 +00:00
|
|
|
/* Not in OpenStep. */
|
1997-03-03 19:56:37 +00:00
|
|
|
void
|
|
|
|
NSSetDefaultMallocZone (NSZone *zone)
|
|
|
|
{
|
|
|
|
__nszone_private_hidden_default_zone = zone;
|
|
|
|
}
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
inline NSZone*
|
|
|
|
NSZoneFromPointer (void *ptr)
|
|
|
|
{
|
|
|
|
return *((NSZone**)ptr-1);
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
inline void*
|
|
|
|
NSZoneMalloc (NSZone *zone, size_t size)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
return (zone->malloc)(zone, size);
|
|
|
|
}
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
void*
|
|
|
|
NSZoneCalloc (NSZone *zone, size_t elems, size_t bytes)
|
|
|
|
{
|
|
|
|
return memset(NSZoneMalloc(zone, elems*bytes), 0, elems*bytes);
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
1996-03-22 01:26:22 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
inline void*
|
|
|
|
NSZoneRealloc (NSZone *zone, void *ptr, size_t size)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
return (zone->realloc)(zone, ptr, size);
|
|
|
|
}
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
inline void
|
|
|
|
NSRecycleZone (NSZone *zone)
|
|
|
|
{
|
|
|
|
(zone->recycle)(zone);
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
inline void
|
|
|
|
NSZoneFree (NSZone *zone, void *ptr)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
(zone->free)(zone, ptr);
|
|
|
|
}
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
void
|
|
|
|
NSSetZoneName (NSZone *zone, NSString *name)
|
|
|
|
{
|
1997-03-03 19:56:37 +00:00
|
|
|
/* FIXME: Not thread safe. But will it matter? */
|
1997-03-03 19:43:25 +00:00
|
|
|
if (zone->name != nil)
|
|
|
|
[zone->name release];
|
|
|
|
if (name == nil)
|
|
|
|
zone->name = nil;
|
1997-01-06 21:35:52 +00:00
|
|
|
else
|
1997-03-03 19:43:25 +00:00
|
|
|
zone->name = [name copy];
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
1996-03-22 01:26:22 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
inline NSString*
|
|
|
|
NSZoneName (NSZone *zone)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
1997-03-03 19:43:25 +00:00
|
|
|
return zone->name;
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
1997-03-03 19:58:17 +00:00
|
|
|
|
1997-05-03 17:28:54 +00:00
|
|
|
/* Not in OpenStep. */
|
|
|
|
inline void
|
|
|
|
NSZoneRegisterRegion (NSZone *zone, void *low, void *high)
|
|
|
|
{
|
|
|
|
return; // Do nothing in this implementation.
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Not in OpenStep. */
|
|
|
|
inline void
|
|
|
|
NSDeregisterZone (NSZone *zone)
|
|
|
|
{
|
|
|
|
return; // Do nothing in this implementation
|
|
|
|
}
|
|
|
|
|
1997-03-03 19:58:17 +00:00
|
|
|
/* Not in OpenStep. */
|
|
|
|
void*
|
|
|
|
NSZoneRegisterChunk (NSZone *zone, void *chunk)
|
|
|
|
{
|
|
|
|
NSZone **zoneptr = chunk;
|
|
|
|
|
|
|
|
*zoneptr = zone;
|
|
|
|
return zoneptr+1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Not in OpenStep. */
|
|
|
|
size_t
|
|
|
|
NSZoneChunkOverhead (void)
|
|
|
|
{
|
|
|
|
return ZPTRSZ;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Not in OpenStep. */
|
|
|
|
inline BOOL
|
|
|
|
NSZoneCheck (NSZone *zone)
|
|
|
|
{
|
|
|
|
return (zone->check)(zone);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Not in OpenStep. */
|
|
|
|
inline struct NSZoneStats
|
|
|
|
NSZoneStats (NSZone *zone)
|
|
|
|
{
|
|
|
|
return (zone->stats)(zone);
|
|
|
|
}
|