2001-12-17 14:31:42 +00:00
|
|
|
/** Zone memory management. -*- Mode: ObjC -*-
|
1998-10-15 05:03:16 +00:00
|
|
|
Copyright (C) 1997,1998 Free Software Foundation, Inc.
|
1997-03-03 19:43:25 +00:00
|
|
|
|
1997-05-03 18:15:44 +00:00
|
|
|
Written by: Yoo C. Chung <wacko@laplace.snu.ac.kr>
|
1997-03-03 19:43:25 +00:00
|
|
|
Date: January 1997
|
1998-10-15 05:03:16 +00:00
|
|
|
Rewrite by: Richard Frith-Macdonald <richard@brainstrom.co.uk>
|
1997-03-03 19:43:25 +00:00
|
|
|
|
1996-05-12 00:56:10 +00:00
|
|
|
This file is part of the GNUstep Base Library.
|
1996-03-22 01:26:22 +00:00
|
|
|
|
|
|
|
This library is free software; you can redistribute it and/or
|
2007-09-14 11:36:11 +00:00
|
|
|
modify it under the terms of the GNU Lesser General Public License
|
2008-06-08 10:38:33 +00:00
|
|
|
as published by the Free Software Foundation; either
|
|
|
|
version 2 of the License, or (at your option) any later version.
|
1997-03-03 19:43:25 +00:00
|
|
|
|
1997-09-01 21:59:51 +00:00
|
|
|
This library is distributed in the hope that it will be useful, but
|
|
|
|
WITHOUT ANY WARRANTY; without even the implied warranty of
|
1996-03-22 01:26:22 +00:00
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
2019-12-09 23:36:00 +00:00
|
|
|
Lesser General Public License for more details.
|
1997-03-03 19:43:25 +00:00
|
|
|
|
2007-09-14 11:36:11 +00:00
|
|
|
You should have received a copy of the GNU Lesser General Public
|
1997-09-01 21:59:51 +00:00
|
|
|
License along with this library; if not, write to the Free Software
|
2024-11-07 13:37:59 +00:00
|
|
|
Foundation, Inc., 31 Milk Street #960789 Boston, MA 02196 USA.
|
2001-12-18 16:54:15 +00:00
|
|
|
|
|
|
|
<title>NSZone class reference</title>
|
|
|
|
$Date$ $Revision$
|
|
|
|
*/
|
1997-03-03 19:43:25 +00:00
|
|
|
|
|
|
|
/* Design goals:
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
- Allocation and deallocation should be reasonably efficient.
|
1998-10-15 05:03:16 +00:00
|
|
|
- We want to catch code that writes outside it's permitted area.
|
1997-03-03 19:43:25 +00:00
|
|
|
|
1998-10-15 05:03:16 +00:00
|
|
|
*/
|
1997-03-03 19:43:25 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* Actual design:
|
|
|
|
|
2011-02-19 19:42:42 +00:00
|
|
|
- The default zone uses malloc() and friends. We assume that
|
1997-03-03 19:56:37 +00:00
|
|
|
they're thread safe and that they return NULL if we're out of
|
1998-10-15 05:03:16 +00:00
|
|
|
memory (glibc malloc does this, what about other mallocs? FIXME).
|
2005-02-22 11:22:44 +00:00
|
|
|
|
1998-10-15 05:03:16 +00:00
|
|
|
- The OpenStep spec says that when a zone is recycled, any memory in
|
|
|
|
use is returned to the default zone.
|
|
|
|
Since, in general, we have no control over the system malloc, we can't
|
|
|
|
possibly do this. Instead, we move the recycled zone to a list of
|
|
|
|
'dead' zones, and as soon as all memory used in it is released, we
|
|
|
|
destroy it and remove it from that list. In the meantime, we release
|
|
|
|
any blocks of memory we can (ie those that don't contain unfreed chunks).
|
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
- For freeable zones, a small linear buffer is used for
|
|
|
|
deallocating and allocating. Anything that can't go into the
|
|
|
|
buffer then uses a more general purpose segregated fit algorithm
|
|
|
|
after flushing the buffer.
|
1996-03-22 01:26:22 +00:00
|
|
|
|
1998-10-15 05:03:16 +00:00
|
|
|
- For memory chunks in freeable zones, the pointer to the chunk is
|
|
|
|
preceded by the a chunk header which contains the size of the chunk
|
|
|
|
(plus a couple of flags) and a pointer to the end of the memory
|
|
|
|
requested. This adds 8 bytes for freeable zones, which is usually
|
|
|
|
what we need for alignment purposes anyway (assuming we're on a
|
|
|
|
32 bit machine). The granularity for allocation of chunks is quite
|
|
|
|
large - a chunk must be big enough to hold the chunk header plus a
|
|
|
|
couple of pointers and an unsigned size value.
|
|
|
|
The actual memory allocated will be the size of the chunk header plus
|
|
|
|
the size of memory requested plus one (a guard byte), all rounded up
|
|
|
|
to a multiple of the granularity.
|
1997-03-03 19:43:25 +00:00
|
|
|
|
|
|
|
- For nonfreeable zones, worst-like fit is used. This is OK since
|
|
|
|
we don't have to worry about memory fragmentation. */
|
|
|
|
|
|
|
|
/* Other information:
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
- This uses some GCC specific extensions. But since the library is
|
1997-03-03 19:58:17 +00:00
|
|
|
supposed to compile on GCC 2.7.2.1 (patched) or higher, and the
|
|
|
|
only other Objective-C compiler I know of (other than NeXT's, which
|
|
|
|
is based on GCC as far as I know) is the StepStone compiler, which
|
|
|
|
I haven't the foggiest idea why anyone would prefer it to GCC ;),
|
|
|
|
it should be OK.
|
1997-01-06 21:35:52 +00:00
|
|
|
|
1997-03-03 19:43:25 +00:00
|
|
|
- These functions should be thread safe, but I haven't really
|
|
|
|
tested them extensively in multithreaded cases. */
|
|
|
|
|
1997-03-03 19:58:17 +00:00
|
|
|
|
1998-11-19 21:26:27 +00:00
|
|
|
/* Define to turn off NSAssertions. */
|
|
|
|
#define NS_BLOCK_ASSERTIONS 1
|
1997-03-03 19:43:25 +00:00
|
|
|
|
2000-06-22 03:15:27 +00:00
|
|
|
#define IN_NSZONE_M 1
|
1997-03-03 19:56:37 +00:00
|
|
|
|
2010-02-19 08:12:46 +00:00
|
|
|
#import "common.h"
|
1997-03-03 19:43:25 +00:00
|
|
|
#include <stddef.h>
|
2010-02-17 11:47:06 +00:00
|
|
|
#import "Foundation/NSException.h"
|
|
|
|
#import "Foundation/NSLock.h"
|
|
|
|
#import "GSPrivate.h"
|
|
|
|
#import "GSPThread.h"
|
2009-04-15 08:03:19 +00:00
|
|
|
|
2024-07-23 12:06:24 +00:00
|
|
|
/**
|
|
|
|
* Primary structure representing an <code>NSZone</code>. Technically it
|
|
|
|
* consists of a set of function pointers for zone upkeep functions plus some
|
|
|
|
* other things-
|
|
|
|
<example>
|
|
|
|
{
|
|
|
|
// Functions for zone.
|
|
|
|
void *(*malloc)(struct _NSZone *zone, size_t size);
|
|
|
|
void *(*realloc)(struct _NSZone *zone, void *ptr, size_t size);
|
|
|
|
void (*free)(struct _NSZone *zone, void *ptr);
|
|
|
|
void (*recycle)(struct _NSZone *zone);
|
|
|
|
BOOL (*check)(struct _NSZone *zone);
|
|
|
|
BOOL (*lookup)(struct _NSZone *zone, void *ptr);
|
|
|
|
|
|
|
|
// Zone statistics (not always maintained).
|
|
|
|
struct NSZoneStats (*stats)(struct _NSZone *zone);
|
|
|
|
|
|
|
|
size_t gran; // Zone granularity (passed in on initialization)
|
|
|
|
NSString *name; // Name of zone (default is 'nil')
|
|
|
|
NSZone *next; // Pointer used for internal management of multiple zones.
|
|
|
|
}</example>
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct _NSZone
|
|
|
|
{
|
|
|
|
/* Functions for zone. */
|
|
|
|
void *(*malloc)(struct _NSZone *zone, size_t size);
|
|
|
|
void *(*realloc)(struct _NSZone *zone, void *ptr, size_t size);
|
|
|
|
void (*free)(struct _NSZone *zone, void *ptr);
|
|
|
|
void (*recycle)(struct _NSZone *zone);
|
|
|
|
BOOL (*check)(struct _NSZone *zone);
|
|
|
|
BOOL (*lookup)(struct _NSZone *zone, void *ptr);
|
|
|
|
struct NSZoneStats (*stats)(struct _NSZone *zone);
|
|
|
|
|
|
|
|
size_t gran; // Zone granularity
|
|
|
|
__unsafe_unretained NSString *name; // Name of zone (default is 'nil')
|
|
|
|
NSZone *next;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
2021-07-28 14:17:47 +00:00
|
|
|
static gs_mutex_t zoneLock = GS_MUTEX_INIT_STATIC;
|
2018-03-26 14:05:01 +00:00
|
|
|
|
2009-04-15 08:03:19 +00:00
|
|
|
/**
|
|
|
|
* Try to get more memory - the normal process has failed.
|
|
|
|
* If we can't do anything, just return a null pointer.
|
|
|
|
* Try to do some logging if possible.
|
|
|
|
*/
|
|
|
|
void *
|
|
|
|
GSOutOfMemory(NSUInteger size, BOOL retry)
|
|
|
|
{
|
2012-01-09 08:28:27 +00:00
|
|
|
fprintf(stderr, "GSOutOfMemory ... wanting %"PRIuPTR" bytes.\n", size);
|
2009-04-15 08:03:19 +00:00
|
|
|
return 0;
|
|
|
|
}
|
1996-03-22 01:26:22 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Default zone functions for default zone. */
|
|
|
|
static void* default_malloc (NSZone *zone, size_t size);
|
|
|
|
static void* default_realloc (NSZone *zone, void *ptr, size_t size);
|
|
|
|
static void default_free (NSZone *zone, void *ptr);
|
|
|
|
static void default_recycle (NSZone *zone);
|
|
|
|
static BOOL default_check (NSZone *zone);
|
|
|
|
static BOOL default_lookup (NSZone *zone, void *ptr);
|
|
|
|
static struct NSZoneStats default_stats (NSZone *zone);
|
1998-10-22 10:58:37 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static void*
|
|
|
|
default_malloc (NSZone *zone, size_t size)
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
void *mem;
|
1996-03-22 01:26:22 +00:00
|
|
|
|
2011-02-19 19:42:42 +00:00
|
|
|
mem = malloc(size);
|
2015-05-24 21:22:58 +00:00
|
|
|
if (mem != NULL)
|
|
|
|
{
|
|
|
|
return mem;
|
|
|
|
}
|
|
|
|
[NSException raise: NSMallocException
|
2015-08-04 16:23:22 +00:00
|
|
|
format: @"Default zone has run out of memory"];
|
2015-05-24 21:22:58 +00:00
|
|
|
return 0;
|
2009-04-10 08:25:03 +00:00
|
|
|
}
|
1996-03-22 01:26:22 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static void*
|
|
|
|
default_realloc (NSZone *zone, void *ptr, size_t size)
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
void *mem;
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2015-05-24 21:22:58 +00:00
|
|
|
mem = realloc(ptr, size);
|
|
|
|
if (mem != NULL)
|
2009-04-10 08:25:03 +00:00
|
|
|
{
|
|
|
|
return mem;
|
|
|
|
}
|
2015-05-24 21:22:58 +00:00
|
|
|
[NSException raise: NSMallocException
|
2015-08-04 16:23:22 +00:00
|
|
|
format: @"Default zone has run out of memory"];
|
2015-05-24 21:22:58 +00:00
|
|
|
return 0;
|
1998-10-15 05:03:16 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static void
|
|
|
|
default_free (NSZone *zone, void *ptr)
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2011-02-19 19:42:42 +00:00
|
|
|
free(ptr);
|
1998-10-15 05:03:16 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static void
|
|
|
|
default_recycle (NSZone *zone)
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Recycle the default zone? Thou hast got to be kiddin'. */
|
|
|
|
[NSException raise: NSGenericException
|
|
|
|
format: @"Trying to recycle default zone"];
|
1998-10-15 05:03:16 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static BOOL
|
|
|
|
default_check (NSZone *zone)
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2011-02-19 19:42:42 +00:00
|
|
|
/* We can't check memory managed by malloc(). */
|
2009-04-10 08:25:03 +00:00
|
|
|
[NSException raise: NSGenericException
|
|
|
|
format: @"No checking for default zone"];
|
|
|
|
return NO;
|
1998-10-15 05:03:16 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static BOOL
|
|
|
|
default_lookup (NSZone *zone, void *ptr)
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Assume all memory is in default zone. */
|
|
|
|
return YES;
|
1998-10-15 05:03:16 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static struct NSZoneStats
|
|
|
|
default_stats (NSZone *zone)
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
struct NSZoneStats dummy = {0,0,0,0,0};
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2011-02-19 19:42:42 +00:00
|
|
|
/* We can't obtain statistics from the memory managed by malloc(). */
|
2009-04-10 08:25:03 +00:00
|
|
|
[NSException raise: NSGenericException
|
|
|
|
format: @"No statistics for default zone"];
|
|
|
|
return dummy;
|
1998-10-15 05:03:16 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static NSZone default_zone =
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
default_malloc, default_realloc, default_free, default_recycle,
|
|
|
|
default_check, default_lookup, default_stats, 0, @"default", 0
|
|
|
|
};
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/*
|
|
|
|
* For backward compatibility.
|
|
|
|
*/
|
|
|
|
NSZone *__nszone_private_hidden_default_zone = &default_zone;
|
1998-10-15 05:03:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
2021-03-26 15:06:49 +00:00
|
|
|
GS_DECLARE void
|
2009-04-10 08:25:03 +00:00
|
|
|
NSSetZoneName (NSZone *zone, NSString *name)
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
if (!zone)
|
|
|
|
zone = NSDefaultMallocZone();
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zoneLock);
|
2009-04-10 08:25:03 +00:00
|
|
|
name = [name copy];
|
|
|
|
if (zone->name != nil)
|
|
|
|
[zone->name release];
|
|
|
|
zone->name = name;
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zoneLock);
|
1998-10-15 05:03:16 +00:00
|
|
|
}
|
|
|
|
|
2021-03-26 15:06:49 +00:00
|
|
|
GS_DECLARE NSString*
|
2009-04-10 08:25:03 +00:00
|
|
|
NSZoneName (NSZone *zone)
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
if (!zone)
|
|
|
|
zone = NSDefaultMallocZone();
|
|
|
|
return zone->name;
|
1998-10-15 05:03:16 +00:00
|
|
|
}
|
2005-02-22 11:22:44 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Alignment */
|
|
|
|
#ifdef ALIGN
|
|
|
|
#undef ALIGN
|
|
|
|
#endif
|
|
|
|
#define ALIGN ((__alignof__(double) < 8) ? 8 : __alignof__(double))
|
|
|
|
#define MINGRAN 256 /* Minimum granularity. */
|
|
|
|
#define DEFBLOCK 16384 /* Default granularity. */
|
|
|
|
#define BUFFER 4 /* Buffer size. FIXME?: Is this a reasonable optimum. */
|
|
|
|
#define MAX_SEG 16 /* Segregated list size. */
|
|
|
|
#define FBSZ sizeof(ff_block)
|
|
|
|
#define NBSZ sizeof(nf_chunk)
|
1997-01-06 21:35:52 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Information bits in size. */
|
|
|
|
#define INUSE 0x01 /* Current chunk in use. */
|
|
|
|
#define PREVUSE 0x02 /* Previous chunk in use. */
|
|
|
|
#define LIVE 0x04
|
1997-01-06 21:35:52 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Bits to mask off to get size. */
|
|
|
|
#define SIZE_BITS (INUSE | PREVUSE | LIVE)
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
#define NF_HEAD sizeof(nf_block)
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
typedef struct _ffree_free_link ff_link;
|
|
|
|
typedef struct _nfree_block_struct nf_block;
|
|
|
|
typedef struct _ffree_block_struct ff_block;
|
|
|
|
typedef struct _ffree_zone_struct ffree_zone;
|
|
|
|
typedef struct _nfree_zone_struct nfree_zone;
|
1997-03-03 19:58:17 +00:00
|
|
|
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Header for blocks in nonfreeable zones. */
|
|
|
|
struct _nfree_block_unpadded
|
1997-03-03 19:58:17 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
struct _nfree_block_struct *next;
|
|
|
|
size_t size; // Size of block
|
|
|
|
size_t top; // Position of next memory chunk to allocate
|
|
|
|
};
|
|
|
|
#define NFBPAD sizeof(struct _nfree_block_unpadded)
|
1997-03-03 19:58:17 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
struct _nfree_block_struct
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
struct _nfree_block_struct *next;
|
|
|
|
size_t size; // Size of block
|
|
|
|
size_t top; // Position of next memory chunk to allocate
|
|
|
|
char padding[ALIGN - ((NFBPAD % ALIGN) ? (NFBPAD % ALIGN) : ALIGN)];
|
|
|
|
};
|
2000-02-19 00:40:47 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
struct _ffree_block_unpadded {
|
|
|
|
size_t size;
|
|
|
|
struct _ffree_block_struct *next;
|
|
|
|
};
|
|
|
|
#define FFCPAD sizeof(struct _ffree_block_unpadded)
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Header for blocks and chunks in freeable zones. */
|
|
|
|
struct _ffree_block_struct
|
1997-03-03 19:58:17 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
size_t size;
|
|
|
|
struct _ffree_block_struct *next;
|
|
|
|
char padding[ALIGN - ((FFCPAD % ALIGN) ? (FFCPAD % ALIGN) : ALIGN)];
|
|
|
|
};
|
1997-03-03 19:58:17 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
struct _ffree_free_link_unpadded
|
|
|
|
{
|
|
|
|
size_t size;
|
|
|
|
ff_link *prev;
|
|
|
|
ff_link *next;
|
|
|
|
size_t back; /* Back link at end of 'dead' block. */
|
|
|
|
};
|
|
|
|
#define FFDPAD sizeof(struct _ffree_free_link_unpadded)
|
1997-03-03 19:58:17 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
struct _ffree_free_link
|
|
|
|
{
|
|
|
|
size_t size;
|
|
|
|
ff_link *prev;
|
|
|
|
ff_link *next;
|
|
|
|
size_t back;
|
|
|
|
char padding[ALIGN - ((FFDPAD % ALIGN) ? (FFDPAD % ALIGN) : ALIGN)];
|
|
|
|
};
|
1997-05-03 17:28:54 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* NSZone structure for freeable zones. */
|
|
|
|
struct _ffree_zone_struct
|
|
|
|
{
|
|
|
|
NSZone common;
|
2021-07-28 14:17:47 +00:00
|
|
|
gs_mutex_t lock;
|
2009-04-10 08:25:03 +00:00
|
|
|
ff_block *blocks; // Linked list of blocks
|
|
|
|
ff_link *segheadlist[MAX_SEG]; // Segregated list, holds heads
|
|
|
|
ff_link *segtaillist[MAX_SEG]; // Segregated list, holds tails
|
|
|
|
size_t bufsize; // Buffer size
|
|
|
|
size_t size_buf[BUFFER]; // Buffer holding sizes
|
|
|
|
ff_block *ptr_buf[BUFFER]; // Buffer holding pointers to chunks
|
|
|
|
};
|
1997-03-03 19:43:25 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Rounds up N to nearest multiple of BASE. */
|
1997-03-03 19:43:25 +00:00
|
|
|
static inline size_t
|
2009-04-10 08:25:03 +00:00
|
|
|
roundupto (size_t n, size_t base)
|
1997-03-03 19:43:25 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
size_t a = (n/base)*base;
|
1997-03-03 19:43:25 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
return (n-a)? (a+base): n;
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/*
|
|
|
|
* Minimum chunk size for freeable zones.
|
|
|
|
* Need room for basic chunk header, next and prev pointers for
|
|
|
|
* free-list, and a reverse pointer (size_t) to go at the end of the
|
|
|
|
* chunk while it is waiting to be consolidated with other chunks.
|
|
|
|
*/
|
|
|
|
#define MINCHUNK sizeof(ff_link)
|
1997-05-03 17:28:54 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
#define CLTOSZ(n) ((n)*MINCHUNK) /* Converts classes to sizes. */
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline void*
|
|
|
|
chunkToPointer(ff_block *chunk)
|
|
|
|
{
|
|
|
|
return (void*)(&chunk[1]);
|
|
|
|
}
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline ff_block*
|
|
|
|
pointerToChunk(void* ptr)
|
|
|
|
{
|
|
|
|
return &(((ff_block*)ptr)[-1]);
|
|
|
|
}
|
2005-02-22 11:22:44 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline size_t
|
|
|
|
chunkIsLive(ff_block* ptr)
|
|
|
|
{
|
|
|
|
return ptr->size & LIVE;
|
|
|
|
}
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline size_t
|
|
|
|
chunkIsInUse(ff_block* ptr)
|
|
|
|
{
|
|
|
|
return ptr->size & INUSE;
|
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline size_t
|
|
|
|
chunkIsPrevInUse(ff_block* ptr)
|
|
|
|
{
|
|
|
|
return ptr->size & PREVUSE;
|
|
|
|
}
|
2005-02-22 11:22:44 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline size_t
|
|
|
|
chunkSize(ff_block* ptr)
|
|
|
|
{
|
|
|
|
return ptr->size & ~SIZE_BITS;
|
|
|
|
}
|
2005-02-22 11:22:44 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline size_t
|
|
|
|
chunkClrLive(ff_block* ptr)
|
|
|
|
{
|
|
|
|
return ptr->size &= ~LIVE;
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline void
|
|
|
|
chunkClrPrevInUse(ff_block* ptr)
|
1996-03-22 01:26:22 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
ptr->size &= ~PREVUSE;
|
|
|
|
}
|
1997-01-06 21:35:52 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline void
|
|
|
|
chunkSetInUse(ff_block* ptr)
|
|
|
|
{
|
|
|
|
ptr->size |= INUSE;
|
|
|
|
}
|
2005-02-22 11:22:44 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline size_t
|
|
|
|
chunkSetLive(ff_block* ptr)
|
|
|
|
{
|
|
|
|
return ptr->size |= LIVE;
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline void
|
|
|
|
chunkSetPrevInUse(ff_block* ptr)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
ptr->size |= PREVUSE;
|
|
|
|
}
|
1997-01-06 21:35:52 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline void
|
|
|
|
chunkSetSize(ff_block* ptr, size_t size)
|
|
|
|
{
|
|
|
|
ptr->size = size;
|
|
|
|
}
|
2005-02-22 11:22:44 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline ff_block*
|
|
|
|
chunkNext(ff_block *ptr)
|
|
|
|
{
|
|
|
|
return (ff_block*) ((void*)ptr+chunkSize(ptr));
|
|
|
|
}
|
2005-02-22 11:22:44 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline void
|
|
|
|
chunkMakeLink(ff_block *ptr)
|
|
|
|
{
|
|
|
|
NSAssert(!chunkIsInUse(ptr), NSInternalInconsistencyException);
|
|
|
|
NSAssert(!chunkIsLive(ptr), NSInternalInconsistencyException);
|
|
|
|
(&(chunkNext(ptr)->size))[-1] = chunkSize(ptr);
|
|
|
|
}
|
2005-02-22 11:22:44 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline ff_block*
|
|
|
|
chunkChop(ff_block *ptr, size_t size)
|
|
|
|
{
|
|
|
|
ff_block *remainder;
|
|
|
|
size_t left = chunkSize(ptr)-size;
|
1997-01-06 21:35:52 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
NSAssert((chunkSize(ptr) % MINCHUNK) == 0, NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunkSize(ptr) > size, NSInternalInconsistencyException);
|
|
|
|
remainder = (ff_block*)((void*)ptr+size);
|
|
|
|
chunkSetSize(remainder, left | PREVUSE);
|
|
|
|
chunkMakeLink(remainder);
|
|
|
|
chunkSetSize(ptr, size | chunkIsPrevInUse(ptr) | INUSE);
|
|
|
|
return remainder;
|
1997-03-03 19:43:25 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static inline ff_block*
|
|
|
|
chunkPrev(ff_block *ptr)
|
1997-03-03 19:43:25 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
size_t offset;
|
|
|
|
ff_block *prev;
|
1997-03-03 19:43:25 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
NSAssert(!chunkIsPrevInUse(ptr), NSInternalInconsistencyException);
|
|
|
|
offset = (&(ptr->size))[-1];
|
|
|
|
NSAssert(offset > 0 && (offset % MINCHUNK) == 0,
|
2000-02-19 00:40:47 +00:00
|
|
|
NSInternalInconsistencyException);
|
2009-04-10 08:25:03 +00:00
|
|
|
prev = (ff_block*)((void*)ptr-offset);
|
|
|
|
NSAssert(chunkSize(prev) == offset, NSInternalInconsistencyException);
|
|
|
|
NSAssert(!chunkIsInUse(prev), NSInternalInconsistencyException);
|
|
|
|
return prev;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* NSZone structure for nonfreeable zones. */
|
|
|
|
struct _nfree_zone_struct
|
|
|
|
{
|
|
|
|
NSZone common;
|
2021-07-28 14:17:47 +00:00
|
|
|
gs_mutex_t lock;
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Linked list of blocks in decreasing order of free space,
|
|
|
|
except maybe for the first block. */
|
|
|
|
nf_block *blocks;
|
|
|
|
size_t use;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Memory management functions for freeable zones. */
|
|
|
|
static void* fmalloc (NSZone *zone, size_t size);
|
|
|
|
static void* frealloc (NSZone *zone, void *ptr, size_t size);
|
|
|
|
static void ffree (NSZone *zone, void *ptr);
|
|
|
|
static void frecycle (NSZone *zone);
|
|
|
|
static BOOL fcheck (NSZone *zone);
|
|
|
|
static BOOL flookup (NSZone *zone, void *ptr);
|
|
|
|
static struct NSZoneStats fstats (NSZone *zone);
|
|
|
|
|
|
|
|
static inline size_t segindex (size_t size);
|
|
|
|
static ff_block* get_chunk (ffree_zone *zone, size_t size);
|
|
|
|
static void take_chunk (ffree_zone *zone, ff_block *chunk);
|
|
|
|
static void put_chunk (ffree_zone *zone, ff_block *chunk);
|
|
|
|
static inline void add_buf (ffree_zone *zone, ff_block *chunk);
|
|
|
|
static void flush_buf (ffree_zone *zone);
|
|
|
|
|
|
|
|
/* Memory management functions for nonfreeable zones. */
|
|
|
|
static void* nmalloc (NSZone *zone, size_t size);
|
|
|
|
static void nrecycle (NSZone *zone);
|
|
|
|
static void* nrealloc (NSZone *zone, void *ptr, size_t size);
|
|
|
|
static void nfree (NSZone *zone, void *ptr);
|
|
|
|
static BOOL ncheck (NSZone *zone);
|
|
|
|
static BOOL nlookup (NSZone *zone, void *ptr);
|
|
|
|
static struct NSZoneStats nstats (NSZone *zone);
|
|
|
|
|
|
|
|
/* Memory management functions for recycled zones. */
|
|
|
|
static void* rmalloc (NSZone *zone, size_t size);
|
|
|
|
static void rrecycle (NSZone *zone);
|
|
|
|
static void* rrealloc (NSZone *zone, void *ptr, size_t size);
|
|
|
|
static void rffree (NSZone *zone, void *ptr);
|
|
|
|
static void rnfree (NSZone *zone, void *ptr);
|
|
|
|
|
2009-04-10 08:28:21 +00:00
|
|
|
/*
|
|
|
|
* Lists of zones to be used to determine if a pointer is in a zone.
|
|
|
|
*/
|
|
|
|
static NSZone *zone_list = 0;
|
2009-04-10 08:25:03 +00:00
|
|
|
|
|
|
|
static inline void
|
|
|
|
destroy_zone(NSZone* zone)
|
|
|
|
{
|
2018-01-23 16:34:26 +00:00
|
|
|
if (zone)
|
2009-04-10 08:25:03 +00:00
|
|
|
{
|
2018-01-23 16:34:26 +00:00
|
|
|
if (zone_list == zone)
|
|
|
|
{
|
|
|
|
zone_list = zone->next;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
NSZone *ptr = zone_list;
|
|
|
|
|
|
|
|
while (ptr != NULL && ptr->next != zone)
|
|
|
|
{
|
|
|
|
ptr = ptr->next;
|
|
|
|
}
|
|
|
|
if (ptr)
|
|
|
|
{
|
|
|
|
ptr->next = zone->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
free((void*)zone);
|
2009-04-10 08:25:03 +00:00
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Search the buffer to see if there is any memory chunks large enough
|
|
|
|
to satisfy request using first fit. If the memory chunk found has
|
|
|
|
a size exactly equal to the one requested, remove it from the buffer
|
|
|
|
and return it. If not, cut off a chunk that does match the size
|
|
|
|
and return it. If there is no chunk large enough in the buffer,
|
|
|
|
get a chunk from the general purpose allocator that uses segregated
|
|
|
|
fit. Since a chunk in the buffer is not freed in the general purpose
|
|
|
|
allocator, the headers are as if it is still in use. */
|
|
|
|
static void*
|
|
|
|
fmalloc (NSZone *zone, size_t size)
|
1997-03-03 19:43:25 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
size_t i = 0;
|
|
|
|
size_t chunksize = roundupto(size+FBSZ+1, MINCHUNK);
|
|
|
|
ffree_zone *zptr = (ffree_zone*)zone;
|
|
|
|
size_t bufsize;
|
|
|
|
size_t *size_buf = zptr->size_buf;
|
|
|
|
ff_block **ptr_buf = zptr->ptr_buf;
|
|
|
|
ff_block *chunkhead;
|
|
|
|
void *result;
|
2005-02-22 11:22:44 +00:00
|
|
|
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
bufsize = zptr->bufsize;
|
|
|
|
while ((i < bufsize) && (chunksize > size_buf[i]))
|
|
|
|
i++;
|
|
|
|
if (i < bufsize)
|
|
|
|
/* Use memory chunk in buffer. */
|
1997-03-03 19:43:25 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
if (size_buf[i] == chunksize)
|
|
|
|
/* Exact fit. */
|
|
|
|
{
|
|
|
|
zptr->bufsize--;
|
|
|
|
bufsize = zptr->bufsize;
|
|
|
|
chunkhead = ptr_buf[i];
|
|
|
|
size_buf[i] = size_buf[bufsize];
|
|
|
|
ptr_buf[i] = ptr_buf[bufsize];
|
1997-03-03 19:43:25 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
NSAssert(chunkIsInUse(chunkhead), NSInternalInconsistencyException);
|
|
|
|
NSAssert((chunkSize(chunkhead) % MINCHUNK) == 0,
|
|
|
|
NSInternalInconsistencyException);
|
|
|
|
}
|
|
|
|
else
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
/*
|
|
|
|
* Break off chunk leaving remainder marked as in use since it
|
|
|
|
* stays in this buffer rather than on a free-list.
|
|
|
|
*/
|
|
|
|
chunkhead = ptr_buf[i];
|
|
|
|
size_buf[i] -= chunksize;
|
|
|
|
ptr_buf[i] = chunkChop(chunkhead, chunksize);
|
|
|
|
chunkSetInUse(ptr_buf[i]);
|
1997-05-03 17:28:54 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
/* Get memory from segregate fit allocator. */
|
|
|
|
{
|
|
|
|
flush_buf(zptr);
|
|
|
|
chunkhead = get_chunk(zptr, chunksize);
|
|
|
|
if (chunkhead == NULL)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
if (zone->name != nil)
|
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Zone %@ has run out of memory", zone->name];
|
|
|
|
else
|
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Out of memory"];
|
1997-05-03 17:28:54 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
|
|
|
|
NSAssert(chunkIsInUse(chunkhead), NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunkIsPrevInUse(chunkNext(chunkhead)),
|
|
|
|
NSInternalInconsistencyException);
|
|
|
|
NSAssert((chunkSize(chunkhead) % MINCHUNK) == 0,
|
2000-02-19 00:40:47 +00:00
|
|
|
NSInternalInconsistencyException);
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
chunkhead->next = (ff_block*)(chunkToPointer(chunkhead)+size);
|
|
|
|
*((char*)chunkhead->next) = (char)42;
|
|
|
|
chunkSetLive(chunkhead);
|
|
|
|
result = chunkToPointer(chunkhead);
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
return result;
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* If PTR == NULL, then it's the same as ordinary memory allocation.
|
|
|
|
If a smaller size than it originally had is requested, shrink the
|
|
|
|
chunk. If a larger size is requested, check if there is enough
|
|
|
|
space after it. If there isn't enough space, get a new chunk and
|
|
|
|
move it there, releasing the original. The space before the chunk
|
|
|
|
should also be checked, but I'll leave this to a later date. */
|
1997-03-03 19:43:25 +00:00
|
|
|
static void*
|
2009-04-10 08:25:03 +00:00
|
|
|
frealloc (NSZone *zone, void *ptr, size_t size)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
size_t realsize;
|
|
|
|
size_t chunksize = roundupto(size+FBSZ+1, MINCHUNK);
|
|
|
|
ffree_zone *zptr = (ffree_zone*)zone;
|
|
|
|
ff_block *chunkhead, *slack;
|
|
|
|
void *result;
|
1997-01-06 21:35:52 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
NSAssert(ptr == NULL || NSZoneFromPointer(ptr) == zone,
|
|
|
|
NSInternalInconsistencyException);
|
|
|
|
if (ptr == NULL)
|
|
|
|
return fmalloc(zone, size);
|
|
|
|
chunkhead = pointerToChunk(ptr);
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
realsize = chunkSize(chunkhead);
|
|
|
|
|
|
|
|
NSAssert(chunkIsInUse(chunkhead), NSInternalInconsistencyException);
|
|
|
|
NSAssert((realsize % MINCHUNK) == 0, NSInternalInconsistencyException);
|
|
|
|
|
|
|
|
chunkClrLive(chunkhead);
|
|
|
|
if (chunksize < realsize)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
/*
|
|
|
|
* Chop tail off existing memory chunk and tell the next chunk
|
|
|
|
* after it that it is no longer in use. Then put it in the
|
|
|
|
* buffer to be added to the free list later (we can't add it
|
|
|
|
* immediately 'cos we might invalidate the rule that there
|
|
|
|
* must not be two adjacent unused chunks).
|
|
|
|
*/
|
|
|
|
slack = chunkChop(chunkhead, chunksize);
|
|
|
|
chunkSetInUse(slack);
|
|
|
|
add_buf(zptr, slack);
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
else if (chunksize > realsize)
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
size_t nextsize;
|
|
|
|
ff_block *nextchunk, *farchunk;
|
1997-03-03 19:43:25 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
nextchunk = chunkNext(chunkhead);
|
|
|
|
nextsize = chunkSize(nextchunk);
|
|
|
|
|
|
|
|
NSAssert((nextsize % MINCHUNK) == 0, NSInternalInconsistencyException);
|
|
|
|
|
|
|
|
if (!chunkIsInUse(nextchunk) && (nextsize+realsize >= chunksize))
|
|
|
|
/* Expand to next chunk. */
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
take_chunk(zptr, nextchunk);
|
|
|
|
if (nextsize+realsize == chunksize)
|
|
|
|
{
|
|
|
|
farchunk = chunkNext(nextchunk);
|
|
|
|
chunkSetPrevInUse(farchunk);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
chunkSetSize(chunkhead, nextsize+realsize);
|
|
|
|
slack = chunkChop(chunkhead, chunksize);
|
|
|
|
put_chunk(zptr, slack);
|
|
|
|
}
|
|
|
|
chunkSetSize(chunkhead, chunksize |
|
|
|
|
chunkIsPrevInUse(chunkhead) | INUSE);
|
1997-05-03 17:28:54 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
else
|
|
|
|
/* Get new chunk and copy. */
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
ff_block *newchunk;
|
1997-05-03 17:28:54 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
newchunk = get_chunk(zptr, chunksize);
|
|
|
|
if (newchunk == NULL)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
1997-05-03 17:28:54 +00:00
|
|
|
if (zone->name != nil)
|
|
|
|
[NSException raise: NSMallocException
|
2001-03-19 23:53:23 +00:00
|
|
|
format: @"Zone %@ has run out of memory",
|
|
|
|
zone->name];
|
1997-05-03 17:28:54 +00:00
|
|
|
else
|
|
|
|
[NSException raise: NSMallocException
|
2000-02-19 00:40:47 +00:00
|
|
|
format: @"Out of memory"];
|
1997-05-03 17:28:54 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
memcpy((void*)(&newchunk[1]), (void*)(&chunkhead[1]), realsize-FBSZ);
|
|
|
|
add_buf(zptr, chunkhead);
|
|
|
|
chunkhead = newchunk;
|
1997-05-03 17:28:54 +00:00
|
|
|
}
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
chunkhead->next = (ff_block*)(chunkToPointer(chunkhead)+size);
|
|
|
|
*((char*)chunkhead->next) = (char)42;
|
|
|
|
chunkSetLive(chunkhead);
|
|
|
|
result = chunkToPointer(chunkhead);
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Frees memory chunk by simply adding it to the buffer. */
|
|
|
|
static void
|
|
|
|
ffree (NSZone *zone, void *ptr)
|
|
|
|
{
|
|
|
|
ff_block *chunk;
|
|
|
|
NSAssert(NSZoneFromPointer(ptr) == zone, NSInternalInconsistencyException);
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(((ffree_zone*)zone)->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
chunk = pointerToChunk(ptr);
|
|
|
|
if (chunkIsLive(chunk) == 0)
|
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Attempt to free freed memory"];
|
|
|
|
NSAssert(*((char*)chunk->next) == (char)42, NSInternalInconsistencyException);
|
|
|
|
add_buf((ffree_zone*)zone, chunk);
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(((ffree_zone*)zone)->lock);
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
1998-10-15 05:03:16 +00:00
|
|
|
static BOOL
|
2009-04-10 08:25:03 +00:00
|
|
|
frecycle1(NSZone *zone)
|
1996-03-22 01:26:22 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
ffree_zone *zptr = (ffree_zone*)zone;
|
|
|
|
ff_block *block;
|
|
|
|
ff_block *nextblock;
|
1997-03-03 19:43:25 +00:00
|
|
|
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
flush_buf(zptr);
|
|
|
|
block = zptr->blocks;
|
|
|
|
while (block != NULL)
|
1997-03-03 19:43:25 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
ff_block *tmp = &block[1];
|
|
|
|
nextblock = block->next;
|
|
|
|
if (chunkIsInUse(tmp) == 0 && chunkNext(tmp) == chunkNext(block))
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
if (zptr->blocks == block)
|
|
|
|
zptr->blocks = block->next;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
tmp = zptr->blocks;
|
|
|
|
while (tmp->next != block)
|
|
|
|
tmp = tmp->next;
|
|
|
|
tmp->next = block->next;
|
|
|
|
}
|
2011-02-19 19:42:42 +00:00
|
|
|
free((void*)block);
|
1998-10-15 05:03:16 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
block = nextblock;
|
1997-03-03 19:43:25 +00:00
|
|
|
}
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
1998-10-15 05:03:16 +00:00
|
|
|
if (zptr->blocks == 0)
|
|
|
|
{
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_DESTROY(zptr->lock);
|
1998-10-15 05:03:16 +00:00
|
|
|
return YES;
|
|
|
|
}
|
|
|
|
return NO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Recycle the zone. */
|
|
|
|
static void
|
2009-04-10 08:25:03 +00:00
|
|
|
frecycle (NSZone *zone)
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zoneLock);
|
1997-03-03 19:43:25 +00:00
|
|
|
if (zone->name != nil)
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
|
|
|
NSString *name = zone->name;
|
|
|
|
zone->name = nil;
|
|
|
|
[name release];
|
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
if (frecycle1(zone) == YES)
|
1998-10-22 10:02:39 +00:00
|
|
|
destroy_zone(zone);
|
1998-10-15 05:03:16 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
zone->malloc = rmalloc;
|
|
|
|
zone->realloc = rrealloc;
|
2009-04-10 08:25:03 +00:00
|
|
|
zone->free = rffree;
|
1998-10-15 05:03:16 +00:00
|
|
|
zone->recycle = rrecycle;
|
|
|
|
}
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zoneLock);
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:56:37 +00:00
|
|
|
static void
|
2009-04-10 08:25:03 +00:00
|
|
|
rffree (NSZone *zone, void *ptr)
|
1997-03-03 19:56:37 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
ffree(zone, ptr);
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zoneLock);
|
2009-04-10 08:25:03 +00:00
|
|
|
if (frecycle1(zone))
|
|
|
|
destroy_zone(zone);
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zoneLock);
|
1998-10-15 05:03:16 +00:00
|
|
|
}
|
|
|
|
|
1997-03-03 19:58:17 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Check integrity of a freeable zone. Doesn't have to be
|
1997-05-03 17:28:54 +00:00
|
|
|
particularly efficient. */
|
1997-03-03 19:58:17 +00:00
|
|
|
static BOOL
|
2009-04-10 08:25:03 +00:00
|
|
|
fcheck (NSZone *zone)
|
1997-03-03 19:58:17 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
size_t i;
|
|
|
|
ffree_zone *zptr = (ffree_zone*)zone;
|
|
|
|
ff_block *block;
|
1997-03-03 19:58:17 +00:00
|
|
|
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Check integrity of each block the zone owns. */
|
1997-03-03 19:58:17 +00:00
|
|
|
block = zptr->blocks;
|
|
|
|
while (block != NULL)
|
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
ff_block *blockstart = &block[1];
|
|
|
|
ff_block *blockend = chunkNext(block);
|
|
|
|
ff_block *nextchunk = blockstart;
|
|
|
|
|
|
|
|
if (blockend->next != block)
|
|
|
|
goto inconsistent;
|
|
|
|
if (!chunkIsPrevInUse(blockstart))
|
|
|
|
goto inconsistent;
|
|
|
|
|
|
|
|
while (nextchunk < blockend)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
ff_block *chunk = nextchunk;
|
|
|
|
size_t chunksize;
|
|
|
|
|
|
|
|
chunksize = chunkSize(chunk);
|
|
|
|
if ((chunksize % ALIGN) != 0)
|
|
|
|
goto inconsistent;
|
|
|
|
nextchunk = chunkNext(chunk);
|
|
|
|
|
|
|
|
if (chunkIsInUse(chunk))
|
|
|
|
/* Check whether this is a valid used chunk. */
|
|
|
|
{
|
|
|
|
if (!chunkIsPrevInUse(nextchunk))
|
|
|
|
goto inconsistent;
|
|
|
|
if (chunkIsLive(chunk))
|
|
|
|
{
|
|
|
|
if (chunk->next < &chunk[1] || chunk->next > nextchunk)
|
|
|
|
goto inconsistent;
|
|
|
|
if (*(char*)chunk->next != (char)42)
|
|
|
|
goto inconsistent;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
/* Check whether this is a valid free chunk. */
|
|
|
|
{
|
|
|
|
if (chunkIsPrevInUse(nextchunk))
|
|
|
|
goto inconsistent;
|
|
|
|
if (!chunkIsInUse(nextchunk))
|
|
|
|
goto inconsistent;
|
|
|
|
if (chunkIsLive(chunk))
|
|
|
|
goto inconsistent;
|
|
|
|
}
|
|
|
|
if (chunk != blockstart && chunkIsPrevInUse(chunk) == 0)
|
|
|
|
{
|
|
|
|
ff_block *prev = chunkPrev(chunk);
|
|
|
|
|
|
|
|
if (chunkNext(prev) != chunk)
|
|
|
|
goto inconsistent;
|
|
|
|
}
|
1997-05-03 17:28:54 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Check whether the block ends properly. */
|
|
|
|
if (nextchunk != blockend)
|
|
|
|
goto inconsistent;
|
|
|
|
if (chunkSize(blockend) != 0)
|
|
|
|
goto inconsistent;
|
|
|
|
if (chunkIsInUse(blockend) == 0)
|
|
|
|
goto inconsistent;
|
|
|
|
|
1997-03-03 19:58:17 +00:00
|
|
|
block = block->next;
|
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Check the integrity of the segregated list. */
|
|
|
|
for (i = 0; i < MAX_SEG; i++)
|
|
|
|
{
|
|
|
|
ff_link *chunk = zptr->segheadlist[i];
|
|
|
|
|
|
|
|
while (chunk != NULL)
|
|
|
|
{
|
|
|
|
ff_link *nextchunk;
|
|
|
|
|
|
|
|
nextchunk = chunk->next;
|
|
|
|
/* Isn't this one ugly if statement? */
|
|
|
|
if (chunkIsInUse((ff_block*)chunk)
|
|
|
|
|| (segindex(chunkSize((ff_block*)chunk)) != i)
|
|
|
|
|| ((nextchunk != NULL) && (chunk != nextchunk->prev))
|
|
|
|
|| ((nextchunk == NULL) && (chunk != zptr->segtaillist[i])))
|
|
|
|
goto inconsistent;
|
|
|
|
chunk = nextchunk;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Check the buffer. */
|
|
|
|
if (zptr->bufsize > BUFFER)
|
|
|
|
goto inconsistent;
|
|
|
|
for (i = 0; i < zptr->bufsize; i++)
|
|
|
|
{
|
|
|
|
ff_block *chunk = zptr->ptr_buf[i];
|
|
|
|
if ((zptr->size_buf[i] != chunkSize(chunk)) || !chunkIsInUse(chunk))
|
|
|
|
goto inconsistent;
|
|
|
|
}
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
1997-03-03 19:58:17 +00:00
|
|
|
return YES;
|
2009-04-10 08:25:03 +00:00
|
|
|
|
|
|
|
inconsistent: // Jump here if an inconsistency was found.
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
return NO;
|
1997-03-03 19:58:17 +00:00
|
|
|
}
|
|
|
|
|
1998-10-15 05:03:16 +00:00
|
|
|
static BOOL
|
2009-04-10 08:25:03 +00:00
|
|
|
flookup (NSZone *zone, void *ptr)
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
ffree_zone *zptr = (ffree_zone*)zone;
|
|
|
|
ff_block *block;
|
|
|
|
BOOL found = NO;
|
2000-02-19 00:40:47 +00:00
|
|
|
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zptr->lock);
|
2000-02-19 00:40:47 +00:00
|
|
|
for (block = zptr->blocks; block != NULL; block = block->next)
|
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
if (ptr >= (void*)block && ptr < (void*)chunkNext(block))
|
2000-02-19 00:40:47 +00:00
|
|
|
{
|
|
|
|
found = YES;
|
|
|
|
break;
|
1998-10-15 05:03:16 +00:00
|
|
|
}
|
|
|
|
}
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2000-02-19 00:40:47 +00:00
|
|
|
return found;
|
1998-10-15 05:03:16 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Obtain statistics about the zone. Doesn't have to be particularly
|
|
|
|
efficient. */
|
1997-03-03 19:58:17 +00:00
|
|
|
static struct NSZoneStats
|
2009-04-10 08:25:03 +00:00
|
|
|
fstats (NSZone *zone)
|
1997-03-03 19:58:17 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
size_t i;
|
1997-03-03 19:58:17 +00:00
|
|
|
struct NSZoneStats stats;
|
2009-04-10 08:25:03 +00:00
|
|
|
ffree_zone *zptr = (ffree_zone*)zone;
|
|
|
|
ff_block *block;
|
1997-03-03 19:58:17 +00:00
|
|
|
|
|
|
|
stats.bytes_total = 0;
|
|
|
|
stats.chunks_used = 0;
|
|
|
|
stats.bytes_used = 0;
|
|
|
|
stats.chunks_free = 0;
|
|
|
|
stats.bytes_free = 0;
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
block = zptr->blocks;
|
|
|
|
/* Go through each block. */
|
|
|
|
while (block != NULL)
|
|
|
|
{
|
|
|
|
ff_block *blockend = chunkNext(block);
|
|
|
|
ff_block *chunk = &block[1];
|
|
|
|
|
|
|
|
stats.bytes_total += chunkSize(block);
|
|
|
|
while (chunk < blockend)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
size_t chunksize = chunkSize(chunk);
|
|
|
|
|
|
|
|
if (chunkIsInUse(chunk))
|
|
|
|
{
|
|
|
|
stats.chunks_used++;
|
|
|
|
stats.bytes_used += chunksize;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
stats.chunks_free++;
|
|
|
|
stats.bytes_free += chunksize;
|
|
|
|
}
|
|
|
|
chunk = chunkNext(chunk);
|
1997-05-03 17:28:54 +00:00
|
|
|
}
|
1997-03-03 19:58:17 +00:00
|
|
|
block = block->next;
|
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Go through buffer. */
|
|
|
|
for (i = 0; i < zptr->bufsize; i++)
|
|
|
|
{
|
|
|
|
stats.chunks_used--;
|
|
|
|
stats.chunks_free++;
|
|
|
|
stats.bytes_used -= zptr->size_buf[i];
|
|
|
|
stats.bytes_free += zptr->size_buf[i];
|
|
|
|
}
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Remove overhead. */
|
|
|
|
stats.bytes_used -= FBSZ*stats.chunks_used;
|
1997-03-03 19:58:17 +00:00
|
|
|
return stats;
|
1997-03-03 19:56:37 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Calculate which segregation class a certain size should be in.
|
|
|
|
FIXME: Optimize code and find a more optimum distribution. */
|
|
|
|
static inline size_t
|
|
|
|
segindex (size_t size)
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
NSAssert(size%MINCHUNK == 0, NSInternalInconsistencyException);
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
if (size < CLTOSZ(8))
|
|
|
|
return size/MINCHUNK;
|
|
|
|
else if (size < CLTOSZ(16))
|
|
|
|
return 7;
|
|
|
|
else if (size < CLTOSZ(32))
|
|
|
|
return 8;
|
|
|
|
else if (size < CLTOSZ(64))
|
|
|
|
return 9;
|
|
|
|
else if (size < CLTOSZ(128))
|
|
|
|
return 10;
|
|
|
|
else if (size < CLTOSZ(256))
|
|
|
|
return 11;
|
|
|
|
else if (size < CLTOSZ(512))
|
|
|
|
return 12;
|
|
|
|
else if (size < CLTOSZ(1024))
|
|
|
|
return 13;
|
|
|
|
else if (size < CLTOSZ(2048))
|
|
|
|
return 14;
|
|
|
|
else
|
|
|
|
return 15;
|
1998-10-15 05:03:16 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Look through the segregated list with first fit to find a memory
|
|
|
|
chunk. If one is not found, get more memory. */
|
|
|
|
static ff_block*
|
|
|
|
get_chunk (ffree_zone *zone, size_t size)
|
1998-10-15 05:03:16 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
size_t class = segindex(size);
|
|
|
|
ff_block *chunk;
|
|
|
|
ff_link *link = zone->segheadlist[class];
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
NSAssert(size%MINCHUNK == 0, NSInternalInconsistencyException);
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
while ((link != NULL) && (chunkSize((ff_block*)link) < size))
|
|
|
|
link = link->next;
|
|
|
|
if (link == NULL)
|
|
|
|
/* Get more memory. */
|
|
|
|
{
|
|
|
|
class++;
|
|
|
|
while ((class < MAX_SEG) && (zone->segheadlist[class] == NULL))
|
|
|
|
class++;
|
|
|
|
if (class == MAX_SEG)
|
|
|
|
/* Absolutely no memory in segregated list. */
|
|
|
|
{
|
|
|
|
size_t blocksize;
|
|
|
|
ff_block *block;
|
1997-01-06 21:35:52 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
blocksize = roundupto(size, zone->common.gran);
|
2011-02-19 19:42:42 +00:00
|
|
|
block = malloc(blocksize+2*FBSZ);
|
2009-04-10 08:25:03 +00:00
|
|
|
if (block == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up the new block header and add to blocks list.
|
|
|
|
*/
|
|
|
|
block->size = blocksize+FBSZ; /* Point to block trailer. */
|
|
|
|
block->next = zone->blocks;
|
|
|
|
zone->blocks = block;
|
|
|
|
/*
|
|
|
|
* Set up the block trailer.
|
|
|
|
*/
|
|
|
|
chunk = chunkNext(block);
|
|
|
|
chunk->next = block; /* Point back to block head. */
|
|
|
|
/*
|
|
|
|
* Now set up block contents.
|
|
|
|
*/
|
|
|
|
if (size < blocksize)
|
|
|
|
{
|
|
|
|
chunkSetSize(chunk, INUSE); /* Tailer size is zero. */
|
|
|
|
chunk = &block[1];
|
|
|
|
chunkSetSize(chunk, size | PREVUSE | INUSE);
|
|
|
|
chunk = chunkNext(chunk);
|
|
|
|
chunkSetSize(chunk, (block->size-FBSZ-size) | PREVUSE);
|
|
|
|
put_chunk(zone, chunk);
|
|
|
|
chunk = &block[1];
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
chunkSetSize(chunk, PREVUSE | INUSE);
|
|
|
|
chunk = &block[1];
|
|
|
|
chunkSetSize(chunk, size | PREVUSE | INUSE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ff_block *slack;
|
|
|
|
|
|
|
|
NSAssert(class < MAX_SEG, NSInternalInconsistencyException);
|
|
|
|
|
|
|
|
chunk = (ff_block*)zone->segheadlist[class];
|
|
|
|
|
|
|
|
NSAssert(!chunkIsInUse(chunk), NSInternalInconsistencyException);
|
|
|
|
NSAssert(size < chunkSize(chunk), NSInternalInconsistencyException);
|
|
|
|
NSAssert((chunkSize(chunk) % MINCHUNK) == 0,
|
|
|
|
NSInternalInconsistencyException);
|
|
|
|
|
|
|
|
take_chunk(zone, chunk);
|
|
|
|
slack = chunkChop(chunk, size);
|
|
|
|
put_chunk(zone, slack);
|
|
|
|
}
|
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
else
|
1997-01-06 21:35:52 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
size_t chunksize;
|
1997-03-03 19:43:25 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
chunk = (ff_block*)link;
|
|
|
|
chunksize = chunkSize(chunk);
|
|
|
|
|
|
|
|
NSAssert((chunksize % MINCHUNK) == 0, NSInternalInconsistencyException);
|
|
|
|
NSAssert(!chunkIsInUse(chunk), NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunkIsPrevInUse(chunk), NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunkIsInUse(chunkNext(chunk)),
|
|
|
|
NSInternalInconsistencyException);
|
|
|
|
|
|
|
|
take_chunk(zone, chunk);
|
|
|
|
if (chunksize > size)
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
ff_block *slack;
|
|
|
|
|
|
|
|
slack = chunkChop(chunk, size);
|
|
|
|
put_chunk(zone, slack);
|
1997-05-03 17:28:54 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
else
|
1997-05-03 17:28:54 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
ff_block *nextchunk = chunkNext(chunk);
|
|
|
|
|
|
|
|
NSAssert(!chunkIsInUse(chunk), NSInternalInconsistencyException);
|
|
|
|
NSAssert(!chunkIsPrevInUse(nextchunk),
|
|
|
|
NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunksize == size, NSInternalInconsistencyException);
|
|
|
|
chunkSetInUse(chunk);
|
|
|
|
chunkSetPrevInUse(nextchunk);
|
1997-05-03 17:28:54 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
}
|
|
|
|
NSAssert(chunkIsInUse(chunk), NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunkIsPrevInUse(chunkNext(chunk)),
|
|
|
|
NSInternalInconsistencyException);
|
|
|
|
return chunk;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Take the given chunk out of the free list. No headers are set. */
|
|
|
|
static void
|
|
|
|
take_chunk (ffree_zone *zone, ff_block *chunk)
|
|
|
|
{
|
|
|
|
size_t size = chunkSize(chunk);
|
|
|
|
size_t class = segindex(size);
|
|
|
|
ff_link *otherlink;
|
|
|
|
ff_link *links = (ff_link*)chunk;
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
NSAssert((size % MINCHUNK) == 0, NSInternalInconsistencyException);
|
|
|
|
NSAssert(!chunkIsInUse(chunk), NSInternalInconsistencyException);
|
|
|
|
|
|
|
|
if (links->prev == NULL)
|
|
|
|
zone->segheadlist[class] = links->next;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
otherlink = links->prev;
|
|
|
|
otherlink->next = links->next;
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
if (links->next == NULL)
|
|
|
|
zone->segtaillist[class] = links->prev;
|
1997-01-06 21:35:52 +00:00
|
|
|
else
|
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
otherlink = links->next;
|
|
|
|
otherlink->prev = links->prev;
|
|
|
|
}
|
|
|
|
}
|
1997-03-03 19:43:25 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/*
|
|
|
|
* Add the given chunk to the segregated list. The header to the
|
|
|
|
* chunk must be set appropriately, but the tailer is set here.
|
|
|
|
* NB. The chunk must NOT be in use, and the adjacent chunks within
|
|
|
|
* its memory block MUST be in use - the memory coalescing done in
|
|
|
|
* flush_buf() depends on this rule.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
put_chunk (ffree_zone *zone, ff_block *chunk)
|
|
|
|
{
|
|
|
|
size_t size = chunkSize(chunk);
|
|
|
|
size_t class = segindex(size);
|
|
|
|
ff_link *links = (ff_link*)chunk;
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
NSAssert((chunkSize(chunk) % MINCHUNK) == 0,
|
|
|
|
NSInternalInconsistencyException);
|
|
|
|
NSAssert(!chunkIsInUse(chunk), NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunkIsPrevInUse(chunk), NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunkIsInUse(chunkNext(chunk)), NSInternalInconsistencyException);
|
|
|
|
|
|
|
|
chunkMakeLink(chunk);
|
|
|
|
if (zone->segtaillist[class] == NULL)
|
|
|
|
{
|
|
|
|
NSAssert(zone->segheadlist[class] == NULL,
|
|
|
|
NSInternalInconsistencyException);
|
|
|
|
|
|
|
|
zone->segheadlist[class] = zone->segtaillist[class] = links;
|
|
|
|
links->prev = links->next = NULL;
|
1997-01-06 21:35:52 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
ff_link *prevlink = zone->segtaillist[class];
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
NSAssert(zone->segheadlist[class] != NULL,
|
|
|
|
NSInternalInconsistencyException);
|
1998-10-15 05:03:16 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
links->next = NULL;
|
|
|
|
links->prev = prevlink;
|
|
|
|
prevlink->next = links;
|
|
|
|
zone->segtaillist[class] = links;
|
|
|
|
}
|
1996-03-22 01:26:22 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Add the given pointer to the buffer. If the buffer becomes full,
|
|
|
|
flush it. The given pointer must always be one that points to used
|
|
|
|
memory (i.e. chunks with headers that declare them as used). */
|
|
|
|
static inline void
|
|
|
|
add_buf (ffree_zone *zone, ff_block *chunk)
|
1997-03-03 19:43:25 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
size_t bufsize = zone->bufsize;
|
1996-03-22 01:26:22 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
NSAssert(bufsize < BUFFER, NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunkIsInUse(chunk), NSInternalInconsistencyException);
|
|
|
|
NSAssert((chunkSize(chunk) % MINCHUNK) == 0,
|
|
|
|
NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunkSize(chunk) >= MINCHUNK, NSInternalInconsistencyException);
|
1996-03-22 01:26:22 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
zone->bufsize++;
|
|
|
|
zone->size_buf[bufsize] = chunkSize(chunk);
|
|
|
|
zone->ptr_buf[bufsize] = chunk;
|
|
|
|
chunkClrLive(chunk);
|
|
|
|
if (bufsize == BUFFER-1)
|
|
|
|
flush_buf(zone);
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Flush buffers. All coalescing is done here. */
|
|
|
|
static void
|
|
|
|
flush_buf (ffree_zone *zone)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
size_t i, size;
|
|
|
|
size_t bufsize = zone->bufsize;
|
|
|
|
ff_block *chunk, *nextchunk;
|
|
|
|
size_t *size_buf = zone->size_buf;
|
|
|
|
ff_block **ptr_buf = zone->ptr_buf;
|
1999-04-12 12:53:30 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
NSAssert(bufsize <= BUFFER, NSInternalInconsistencyException);
|
2009-03-24 11:12:25 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
for (i = 0; i < bufsize; i++)
|
|
|
|
{
|
|
|
|
size = size_buf[i];
|
|
|
|
chunk = ptr_buf[i];
|
1999-09-29 14:13:52 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
NSAssert(chunkSize(chunk) == size, NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunkIsInUse(chunk), NSInternalInconsistencyException);
|
|
|
|
|
|
|
|
nextchunk = chunkNext(chunk);
|
|
|
|
if (!chunkIsPrevInUse(chunk))
|
|
|
|
/* Coalesce with previous chunk. */
|
|
|
|
{
|
|
|
|
chunk = chunkPrev(chunk);
|
|
|
|
NSAssert(!chunkIsInUse(chunk), NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunkIsPrevInUse(chunk), NSInternalInconsistencyException);
|
|
|
|
size += chunkSize(chunk);
|
|
|
|
take_chunk(zone, chunk);
|
|
|
|
}
|
|
|
|
if (!chunkIsInUse(nextchunk))
|
|
|
|
/* Coalesce with next chunk. */
|
|
|
|
{
|
|
|
|
size_t nextsize = chunkSize(nextchunk);
|
|
|
|
|
|
|
|
NSAssert(chunkIsPrevInUse(nextchunk),
|
|
|
|
NSInternalInconsistencyException);
|
|
|
|
NSAssert((nextsize % MINCHUNK) == 0,
|
|
|
|
NSInternalInconsistencyException);
|
|
|
|
size += nextsize;
|
|
|
|
take_chunk(zone, nextchunk);
|
|
|
|
nextchunk = chunkNext(nextchunk);
|
|
|
|
}
|
|
|
|
chunkSetSize(chunk, size | PREVUSE);
|
|
|
|
put_chunk(zone, chunk);
|
|
|
|
chunkClrPrevInUse(nextchunk);
|
|
|
|
NSAssert(chunkNext(chunk) == nextchunk, NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunkPrev(nextchunk) == chunk, NSInternalInconsistencyException);
|
|
|
|
NSAssert((chunkSize(chunk) % MINCHUNK) == 0,
|
|
|
|
NSInternalInconsistencyException);
|
|
|
|
NSAssert(!chunkIsInUse(chunk), NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunkIsPrevInUse(chunk), NSInternalInconsistencyException);
|
|
|
|
NSAssert(chunkIsInUse(nextchunk), NSInternalInconsistencyException);
|
|
|
|
NSAssert(!chunkIsPrevInUse(nextchunk), NSInternalInconsistencyException);
|
|
|
|
}
|
|
|
|
zone->bufsize = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the first block in block list has enough space, use that space.
|
|
|
|
Otherwise, sort the block list in decreasing free space order (only
|
|
|
|
the first block needs to be put in its appropriate place since
|
|
|
|
the rest of the list is already sorted). Then check if the first
|
|
|
|
block has enough space for the request. If it does, use it. If it
|
|
|
|
doesn't, get more memory from the default zone, since none of the
|
|
|
|
other blocks in the block list could have enough memory. */
|
|
|
|
static void*
|
|
|
|
nmalloc (NSZone *zone, size_t size)
|
1999-04-12 12:53:30 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
nfree_zone *zptr = (nfree_zone*)zone;
|
|
|
|
size_t chunksize = roundupto(size, ALIGN);
|
|
|
|
size_t freesize;
|
|
|
|
void *chunkhead;
|
|
|
|
nf_block *block;
|
|
|
|
size_t top;
|
|
|
|
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
block = zptr->blocks;
|
|
|
|
top = block->top;
|
|
|
|
freesize = block->size-top;
|
|
|
|
if (freesize >= chunksize)
|
|
|
|
{
|
|
|
|
chunkhead = (void*)(block)+top;
|
|
|
|
block->top += chunksize;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
nf_block *preblock;
|
|
|
|
|
|
|
|
/* First, get the block list in decreasing free size order. */
|
|
|
|
preblock = NULL;
|
|
|
|
while ((block->next != NULL)
|
|
|
|
&& (freesize < block->next->size-block->next->top))
|
|
|
|
{
|
|
|
|
preblock = block;
|
|
|
|
block = block->next;
|
|
|
|
}
|
|
|
|
if (preblock != NULL)
|
|
|
|
{
|
|
|
|
preblock->next = zptr->blocks;
|
|
|
|
zptr->blocks = zptr->blocks->next;
|
|
|
|
preblock->next->next = block;
|
|
|
|
}
|
|
|
|
if (zptr->blocks->size-zptr->blocks->top < chunksize)
|
|
|
|
/* Get new block. */
|
|
|
|
{
|
|
|
|
size_t blocksize = roundupto(chunksize+NF_HEAD, zone->gran);
|
1999-04-12 12:53:30 +00:00
|
|
|
|
2011-02-19 19:42:42 +00:00
|
|
|
block = malloc(blocksize);
|
2009-04-10 08:25:03 +00:00
|
|
|
if (block == NULL)
|
|
|
|
{
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
if (zone->name != nil)
|
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Zone %@ has run out of memory",
|
|
|
|
zone->name];
|
|
|
|
else
|
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Out of memory"];
|
|
|
|
}
|
|
|
|
block->next = zptr->blocks;
|
|
|
|
block->size = blocksize;
|
|
|
|
block->top = NF_HEAD;
|
|
|
|
zptr->blocks = block;
|
|
|
|
}
|
|
|
|
chunkhead = (void*)block+block->top;
|
|
|
|
block->top += chunksize;
|
|
|
|
}
|
|
|
|
zptr->use++;
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
return chunkhead;
|
|
|
|
}
|
1999-09-28 10:25:42 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Return the blocks to the default zone, then deallocate mutex, and
|
|
|
|
then release zone name if it exists. */
|
|
|
|
static BOOL
|
|
|
|
nrecycle1 (NSZone *zone)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
nfree_zone *zptr = (nfree_zone*)zone;
|
2009-01-19 11:00:33 +00:00
|
|
|
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
if (zptr->use == 0)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
nf_block *nextblock;
|
|
|
|
nf_block *block = zptr->blocks;
|
|
|
|
|
|
|
|
while (block != NULL)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
nextblock = block->next;
|
2011-02-19 19:42:42 +00:00
|
|
|
free(block);
|
2009-04-10 08:25:03 +00:00
|
|
|
block = nextblock;
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
zptr->blocks = 0;
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
if (zptr->blocks == 0)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_DESTROY(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
return YES;
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
return NO;
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Recycle the zone. */
|
|
|
|
static void
|
|
|
|
nrecycle (NSZone *zone)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zoneLock);
|
2009-04-10 08:25:03 +00:00
|
|
|
if (zone->name != nil)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
NSString *name = zone->name;
|
|
|
|
zone->name = nil;
|
|
|
|
[name release];
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
if (nrecycle1(zone) == YES)
|
|
|
|
destroy_zone(zone);
|
2009-01-19 11:00:33 +00:00
|
|
|
else
|
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
zone->malloc = rmalloc;
|
|
|
|
zone->realloc = rrealloc;
|
|
|
|
zone->free = rnfree;
|
|
|
|
zone->recycle = rrecycle;
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zoneLock);
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
1999-09-29 14:13:52 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static void*
|
|
|
|
nrealloc (NSZone *zone, void *ptr, size_t size)
|
|
|
|
{
|
|
|
|
nfree_zone *zptr = (nfree_zone*)zone;
|
|
|
|
void *tmp = nmalloc(zone, size);
|
2009-01-19 11:00:33 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
if (ptr != 0)
|
|
|
|
{
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
if (tmp)
|
|
|
|
{
|
|
|
|
nf_block *block;
|
|
|
|
size_t old = 0;
|
2009-01-19 11:00:33 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
for (block = zptr->blocks; block != NULL; block = block->next) {
|
|
|
|
if (ptr >= (void*)block && ptr < ((void*)block)+block->size) {
|
|
|
|
old = ((void*)block)+block->size - ptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (old > 0)
|
|
|
|
{
|
|
|
|
if (size < old)
|
|
|
|
old = size;
|
|
|
|
memcpy(tmp, ptr, old);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
zptr->use--;
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
}
|
|
|
|
return tmp;
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/*
|
|
|
|
* The OpenStep spec says we don't release memory - but we have to do
|
|
|
|
* some minimal bookkeeping so that, when the zone is recycled, we can
|
|
|
|
* determine if all the allocated memory has been freed. Until it is
|
|
|
|
* all freed, we can't actually destroy the zone!
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
nfree (NSZone *zone, void *ptr)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
nfree_zone *zptr = (nfree_zone*)zone;
|
2009-01-19 11:00:33 +00:00
|
|
|
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
zptr->use--;
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static void
|
|
|
|
rnfree (NSZone *zone, void *ptr)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
nfree_zone *zptr = (nfree_zone*)zone;
|
|
|
|
|
|
|
|
nfree(zone, ptr);
|
|
|
|
if (zptr->use == 0)
|
|
|
|
{
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zoneLock);
|
2009-04-10 08:25:03 +00:00
|
|
|
nrecycle1(zone);
|
|
|
|
destroy_zone(zone);
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zoneLock);
|
2009-04-10 08:25:03 +00:00
|
|
|
}
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Check integrity of a nonfreeable zone. Doesn't have to
|
|
|
|
particularly efficient. */
|
|
|
|
static BOOL
|
|
|
|
ncheck (NSZone *zone)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
nfree_zone *zptr = (nfree_zone*)zone;
|
|
|
|
nf_block *block;
|
|
|
|
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
block = zptr->blocks;
|
|
|
|
while (block != NULL)
|
|
|
|
{
|
|
|
|
if (block->size < block->top)
|
|
|
|
{
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
return NO;
|
|
|
|
}
|
|
|
|
block = block->next;
|
|
|
|
}
|
|
|
|
/* FIXME: Do more checking? */
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
return YES;
|
2009-01-20 10:15:52 +00:00
|
|
|
}
|
2009-01-19 11:00:33 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static BOOL
|
|
|
|
nlookup (NSZone *zone, void *ptr)
|
2009-01-20 10:15:52 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
nfree_zone *zptr = (nfree_zone*)zone;
|
|
|
|
nf_block *block;
|
|
|
|
BOOL found = NO;
|
|
|
|
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
for (block = zptr->blocks; block != NULL; block = block->next)
|
|
|
|
{
|
|
|
|
if (ptr >= (void*)block && ptr < ((void*)block)+block->size)
|
|
|
|
{
|
|
|
|
found = YES;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
return found;
|
2009-01-20 10:15:52 +00:00
|
|
|
}
|
2009-01-19 11:00:33 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
/* Return statistics for a nonfreeable zone. Doesn't have to
|
|
|
|
particularly efficient. */
|
|
|
|
static struct NSZoneStats
|
|
|
|
nstats (NSZone *zone)
|
2009-01-20 10:15:52 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
struct NSZoneStats stats;
|
|
|
|
nfree_zone *zptr = (nfree_zone*)zone;
|
|
|
|
nf_block *block;
|
|
|
|
|
|
|
|
stats.bytes_total = 0;
|
|
|
|
stats.chunks_used = 0;
|
|
|
|
stats.bytes_used = 0;
|
|
|
|
stats.chunks_free = 0;
|
|
|
|
stats.bytes_free = 0;
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
block = zptr->blocks;
|
|
|
|
while (block != NULL)
|
|
|
|
{
|
|
|
|
size_t *chunk;
|
|
|
|
|
|
|
|
stats.bytes_total += block->size;
|
|
|
|
chunk = (void*)block+NF_HEAD;
|
|
|
|
while ((void*)chunk < (void*)block+block->top)
|
|
|
|
{
|
|
|
|
stats.chunks_used++;
|
|
|
|
stats.bytes_used += *chunk;
|
|
|
|
chunk = (void*)chunk+(*chunk);
|
|
|
|
}
|
|
|
|
if (block->size != block->top)
|
|
|
|
{
|
|
|
|
stats.chunks_free++;
|
|
|
|
stats.bytes_free += block->size-block->top;
|
|
|
|
}
|
|
|
|
block = block->next;
|
|
|
|
}
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zptr->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
return stats;
|
2009-01-20 10:15:52 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
|
|
|
|
static void*
|
|
|
|
rmalloc (NSZone *zone, size_t size)
|
2009-01-20 10:15:52 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Attempt to malloc memory in recycled zone"];
|
|
|
|
return 0;
|
2009-01-20 10:15:52 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static void
|
|
|
|
rrecycle (NSZone *zone)
|
2009-01-20 10:15:52 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Attempt to recycle a recycled zone"];
|
2009-01-20 10:15:52 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static void*
|
|
|
|
rrealloc (NSZone *zone, void *ptr, size_t size)
|
2009-01-20 10:15:52 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Attempt to realloc memory in recycled zone"];
|
|
|
|
return 0;
|
2009-01-20 10:15:52 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
static void rnfree (NSZone *zone, void *ptr);
|
2009-01-20 10:15:52 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
GS_DECLARE NSZone*
|
|
|
|
NSZoneFromPointer(void *ptr)
|
2009-01-20 10:15:52 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
NSZone *zone;
|
|
|
|
|
|
|
|
if (ptr == 0) return 0;
|
|
|
|
if (zone_list == 0) return &default_zone;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if we can find the zone in our list of all zones.
|
|
|
|
*/
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zoneLock);
|
2009-04-10 08:25:03 +00:00
|
|
|
for (zone = zone_list; zone != 0; zone = zone->next)
|
2009-03-10 11:10:27 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
if ((zone->lookup)(zone, ptr) == YES)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
2009-03-10 11:10:27 +00:00
|
|
|
}
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zoneLock);
|
2009-04-10 08:25:03 +00:00
|
|
|
return (zone == 0) ? &default_zone : zone;
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
|
|
|
|
2021-03-26 15:06:49 +00:00
|
|
|
GS_DECLARE NSZone*
|
2009-04-10 08:25:03 +00:00
|
|
|
NSCreateZone (NSUInteger start, NSUInteger gran, BOOL canFree)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
size_t i, startsize, granularity;
|
|
|
|
NSZone *newZone;
|
2009-01-19 11:00:33 +00:00
|
|
|
|
2009-04-10 08:25:03 +00:00
|
|
|
if (start > 0)
|
|
|
|
startsize = roundupto(start, roundupto(MINGRAN, MINCHUNK));
|
|
|
|
else
|
|
|
|
startsize = roundupto(MINGRAN, MINCHUNK);
|
|
|
|
if (gran > 0)
|
|
|
|
granularity = roundupto(gran, roundupto(MINGRAN, MINCHUNK));
|
|
|
|
else
|
|
|
|
granularity = roundupto(MINGRAN, MINCHUNK);
|
|
|
|
if (canFree)
|
2009-01-20 10:15:52 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
ffree_zone *zone;
|
|
|
|
ff_block *block;
|
|
|
|
ff_block *chunk;
|
|
|
|
ff_block *tailer;
|
|
|
|
|
2011-02-19 19:42:42 +00:00
|
|
|
zone = malloc(sizeof(ffree_zone));
|
2009-04-10 08:25:03 +00:00
|
|
|
if (zone == NULL)
|
|
|
|
[NSException raise: NSMallocException
|
2015-08-04 16:23:22 +00:00
|
|
|
format: @"No memory to create zone"];
|
2009-04-10 08:25:03 +00:00
|
|
|
zone->common.malloc = fmalloc;
|
|
|
|
zone->common.realloc = frealloc;
|
|
|
|
zone->common.free = ffree;
|
|
|
|
zone->common.recycle = frecycle;
|
|
|
|
zone->common.check = fcheck;
|
|
|
|
zone->common.lookup = flookup;
|
|
|
|
zone->common.stats = fstats;
|
|
|
|
zone->common.gran = granularity;
|
|
|
|
zone->common.name = nil;
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_INIT_RECURSIVE(zone->lock);
|
2009-04-10 08:25:03 +00:00
|
|
|
for (i = 0; i < MAX_SEG; i++)
|
|
|
|
{
|
|
|
|
zone->segheadlist[i] = NULL;
|
|
|
|
zone->segtaillist[i] = NULL;
|
|
|
|
}
|
|
|
|
zone->bufsize = 0;
|
2011-02-19 19:42:42 +00:00
|
|
|
zone->blocks = malloc(startsize + 2*FBSZ);
|
2009-04-10 08:25:03 +00:00
|
|
|
if (zone->blocks == NULL)
|
|
|
|
{
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_DESTROY(zone->lock);
|
2011-02-19 19:42:42 +00:00
|
|
|
free(zone);
|
2009-04-10 08:25:03 +00:00
|
|
|
[NSException raise: NSMallocException
|
2015-08-04 16:23:22 +00:00
|
|
|
format: @"No memory to create zone"];
|
2009-04-10 08:25:03 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Set up block header.
|
|
|
|
*/
|
|
|
|
block = zone->blocks;
|
|
|
|
block->next = NULL; /* Point to next block. */
|
|
|
|
block->size = startsize+FBSZ; /* Point to first chunk. */
|
|
|
|
/*
|
|
|
|
* Set up block trailer.
|
|
|
|
*/
|
|
|
|
tailer = chunkNext(block);
|
|
|
|
chunkSetSize(tailer, PREVUSE|INUSE);
|
|
|
|
tailer->next = block; /* Point back to block start. */
|
|
|
|
/*
|
|
|
|
* Set up the block as a single chunk and put it in the
|
|
|
|
* buffer for quick allocation.
|
|
|
|
*/
|
|
|
|
chunk = &block[1];
|
|
|
|
chunkSetSize(chunk, (block->size-FBSZ) | PREVUSE|INUSE);
|
|
|
|
add_buf(zone, chunk);
|
|
|
|
|
|
|
|
newZone = (NSZone*)zone;
|
2009-01-20 10:15:52 +00:00
|
|
|
}
|
2009-01-19 11:00:33 +00:00
|
|
|
else
|
2009-01-20 10:15:52 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
nf_block *block;
|
|
|
|
nfree_zone *zone;
|
|
|
|
|
2011-02-19 19:42:42 +00:00
|
|
|
zone = malloc(sizeof(nfree_zone));
|
2009-04-10 08:25:03 +00:00
|
|
|
if (zone == NULL)
|
|
|
|
[NSException raise: NSMallocException
|
2015-08-04 16:23:22 +00:00
|
|
|
format: @"No memory to create zone"];
|
2009-04-10 08:25:03 +00:00
|
|
|
zone->common.malloc = nmalloc;
|
|
|
|
zone->common.realloc = nrealloc;
|
|
|
|
zone->common.free = nfree;
|
|
|
|
zone->common.recycle = nrecycle;
|
|
|
|
zone->common.check = ncheck;
|
|
|
|
zone->common.lookup = nlookup;
|
|
|
|
zone->common.stats = nstats;
|
|
|
|
zone->common.gran = granularity;
|
|
|
|
zone->common.name = nil;
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_INIT_RECURSIVE(zone->lock);
|
2011-02-19 19:42:42 +00:00
|
|
|
zone->blocks = malloc(startsize);
|
2009-04-10 08:25:03 +00:00
|
|
|
zone->use = 0;
|
|
|
|
if (zone->blocks == NULL)
|
|
|
|
{
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_DESTROY(zone->lock);
|
2011-02-19 19:42:42 +00:00
|
|
|
free(zone);
|
2009-04-10 08:25:03 +00:00
|
|
|
[NSException raise: NSMallocException
|
2015-08-04 16:23:22 +00:00
|
|
|
format: @"No memory to create zone"];
|
2009-04-10 08:25:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
block = zone->blocks;
|
|
|
|
block->next = NULL;
|
|
|
|
block->size = startsize;
|
|
|
|
block->top = NF_HEAD;
|
|
|
|
newZone = (NSZone*)zone;
|
2009-01-20 10:15:52 +00:00
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_LOCK(zoneLock);
|
2009-04-10 08:25:03 +00:00
|
|
|
newZone->next = zone_list;
|
|
|
|
zone_list = newZone;
|
2021-07-28 14:17:47 +00:00
|
|
|
GS_MUTEX_UNLOCK(zoneLock);
|
2009-04-10 08:25:03 +00:00
|
|
|
|
|
|
|
return newZone;
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
|
|
|
|
2021-03-26 15:06:49 +00:00
|
|
|
GS_DECLARE void*
|
2009-04-10 08:25:03 +00:00
|
|
|
NSZoneCalloc (NSZone *zone, NSUInteger elems, NSUInteger bytes)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
2015-05-24 21:22:58 +00:00
|
|
|
void *mem;
|
|
|
|
|
|
|
|
if (0 == zone || NSDefaultMallocZone() == zone)
|
|
|
|
{
|
|
|
|
mem = calloc(elems, bytes);
|
|
|
|
if (mem != NULL)
|
|
|
|
{
|
|
|
|
return mem;
|
|
|
|
}
|
|
|
|
[NSException raise: NSMallocException
|
|
|
|
format: @"Default zone has run out of memory"];
|
|
|
|
}
|
2009-04-10 08:25:03 +00:00
|
|
|
return memset(NSZoneMalloc(zone, elems*bytes), 0, elems*bytes);
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
|
|
|
|
2021-03-26 15:06:49 +00:00
|
|
|
GS_DECLARE void*
|
2009-04-10 08:25:03 +00:00
|
|
|
NSAllocateCollectable(NSUInteger size, NSUInteger options)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
2009-04-10 08:25:03 +00:00
|
|
|
return NSZoneCalloc(NSDefaultMallocZone(), 1, size);
|
2009-01-19 11:00:33 +00:00
|
|
|
}
|
|
|
|
|
2021-03-26 15:06:49 +00:00
|
|
|
GS_DECLARE void*
|
2009-04-10 08:25:03 +00:00
|
|
|
NSReallocateCollectable(void *ptr, NSUInteger size, NSUInteger options)
|
|
|
|
{
|
|
|
|
return NSZoneRealloc(0, ptr, size);
|
|
|
|
}
|
2009-01-19 11:00:33 +00:00
|
|
|
|
2021-03-26 15:06:49 +00:00
|
|
|
GS_DECLARE NSZone*
|
2009-01-19 11:00:33 +00:00
|
|
|
NSDefaultMallocZone (void)
|
|
|
|
{
|
|
|
|
return &default_zone;
|
|
|
|
}
|
|
|
|
|
|
|
|
NSZone*
|
|
|
|
GSAtomicMallocZone (void)
|
|
|
|
{
|
|
|
|
return &default_zone;
|
|
|
|
}
|
|
|
|
|
2021-03-26 15:06:49 +00:00
|
|
|
GS_DECLARE void*
|
2009-02-23 20:42:32 +00:00
|
|
|
NSZoneMalloc (NSZone *zone, NSUInteger size)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
|
|
|
if (!zone)
|
|
|
|
zone = NSDefaultMallocZone();
|
|
|
|
return (zone->malloc)(zone, size);
|
|
|
|
}
|
|
|
|
|
2021-03-26 15:06:49 +00:00
|
|
|
GS_DECLARE void*
|
2009-02-23 20:42:32 +00:00
|
|
|
NSZoneRealloc (NSZone *zone, void *ptr, NSUInteger size)
|
2009-01-19 11:00:33 +00:00
|
|
|
{
|
|
|
|
if (!zone)
|
|
|
|
zone = NSDefaultMallocZone();
|
|
|
|
return (zone->realloc)(zone, ptr, size);
|
|
|
|
}
|
|
|
|
|
2021-03-26 15:06:49 +00:00
|
|
|
GS_DECLARE void
|
2009-01-19 11:00:33 +00:00
|
|
|
NSRecycleZone (NSZone *zone)
|
|
|
|
{
|
|
|
|
if (!zone)
|
|
|
|
zone = NSDefaultMallocZone();
|
|
|
|
(zone->recycle)(zone);
|
|
|
|
}
|
|
|
|
|
2021-03-26 15:06:49 +00:00
|
|
|
GS_DECLARE void
|
2009-01-19 11:00:33 +00:00
|
|
|
NSZoneFree (NSZone *zone, void *ptr)
|
|
|
|
{
|
|
|
|
if (!zone)
|
|
|
|
zone = NSDefaultMallocZone();
|
|
|
|
(zone->free)(zone, ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
BOOL
|
|
|
|
NSZoneCheck (NSZone *zone)
|
|
|
|
{
|
|
|
|
if (!zone)
|
|
|
|
zone = NSDefaultMallocZone();
|
|
|
|
return (zone->check)(zone);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct NSZoneStats
|
|
|
|
NSZoneStats (NSZone *zone)
|
|
|
|
{
|
|
|
|
if (!zone)
|
|
|
|
zone = NSDefaultMallocZone();
|
|
|
|
return (zone->stats)(zone);
|
|
|
|
}
|
|
|
|
|
2009-04-15 08:03:19 +00:00
|
|
|
BOOL
|
|
|
|
GSPrivateIsCollectable(const void *ptr)
|
|
|
|
{
|
|
|
|
return NO;
|
|
|
|
}
|
|
|
|
|