Apply patch to switch completely to using pthreads

git-svn-id: svn+ssh://svn.gna.org/svn/gnustep/libs/base/trunk@29367 72102866-910b-0410-8b05-ffd578937521
This commit is contained in:
Richard Frith-MacDonald 2010-01-23 17:00:13 +00:00
parent 3110312b36
commit b662140e16
8 changed files with 174 additions and 197 deletions

View file

@ -1,3 +1,14 @@
2010-01-23 Niels Grewe <niels.grewe@halbordnung.de>
* Source/GSFFCallInvocation.m:
* Source/NSZone.m:
* Source/Additions/GSObjCRuntime.m:
* Source/Additions/GCObject.m:
* Source/synchronization.m:
* Source/GSPThread.h:
* Headers/Additions/GNUstepBase/GSObjCRuntime.h:
Updates for switch to using pthreads
2010-01-21 Eric Wasylishen <ewasylishen@gmail.com>
* Headers/Foundation/NSObjCRuntime.h: define CGFLOAT_DEFINED when

View file

@ -656,31 +656,6 @@ GSObjCZone(NSObject *obj);
GS_EXPORT void *
GSAutoreleasedBuffer(unsigned size);
/**
* Allocate a new objc_mutex_t and store it in the location
* pointed to by request. A mutex is only created if the value
* pointed to by request is NULL. This function is thread safe
* in the sense that multiple threads my call this function with the same
* value of request and only one will actually set the mutex.
* It is the users responsibility that no one else attempts to set
* the mutex pointed to. This function should be
* used with objc_mutex_t variables which were statically initialized
* to NULL like:
* <example>
* void function (void)
* {
* static objc_mutex_t my_lock = NULL;
* if (my_lock == NULL)
* GSAllocateMutexAt(&amp;my_lock);
* objc_mutex_lock(my_lock);
* do_work ();
* objc_mutex_unlock(my_lock);
* }
* </example>
*/
GS_EXPORT void
GSAllocateMutexAt(objc_mutex_t *request);
/**
* <p>Prints a message to fptr using the format string provided and any
* additional arguments. The format string is interpreted as by

View file

@ -40,6 +40,7 @@
#include "GNUstepBase/GCObject.h"
#include "GNUstepBase/GSCategories.h"
#include <pthread.h>
/*
* The head of a linked list of all garbage collecting objects is a
@ -70,18 +71,25 @@ static BOOL isCollecting = NO;
#ifdef NeXT_RUNTIME
static void *allocationLock = NULL;
#define objc_mutex_allocate() NULL
#define objc_mutex_lock(lock)
#define objc_mutex_unlock(lock)
#define pthread_mutex_lock(lock)
#define pthread_mutex_unlock(lock)
#else
static objc_mutex_t allocationLock = NULL;
static pthread_mutex_t *allocationLock = NULL;
#endif
+ (void) _becomeMultiThreaded: (NSNotification *)aNotification
{
if (allocationLock == 0)
if (allocationLock == NULL)
{
allocationLock = objc_mutex_allocate();
# ifndef NeXT_RUNTIME
allocationLock = malloc(sizeof(pthread_mutex_t));
if (allocationLock == NULL)
{
abort();
}
pthread_mutex_init(allocationLock, NULL);
# endif
}
}
@ -95,7 +103,7 @@ static objc_mutex_t allocationLock = NULL;
if (allocationLock != 0)
{
objc_mutex_lock(allocationLock);
pthread_mutex_lock(allocationLock);
}
o->gc.next = allObjects;
o->gc.previous = allObjects->gc.previous;
@ -104,7 +112,7 @@ static objc_mutex_t allocationLock = NULL;
o->gc.flags.refCount = 1;
if (allocationLock != 0)
{
objc_mutex_unlock(allocationLock);
pthread_mutex_unlock(allocationLock);
}
return o;
@ -145,13 +153,13 @@ static objc_mutex_t allocationLock = NULL;
if (allocationLock != 0)
{
objc_mutex_lock(allocationLock);
pthread_mutex_lock(allocationLock);
}
if (isCollecting == YES)
{
if (allocationLock != 0)
{
objc_mutex_unlock(allocationLock);
pthread_mutex_unlock(allocationLock);
}
return; // Don't allow recursion.
}
@ -211,7 +219,7 @@ static objc_mutex_t allocationLock = NULL;
isCollecting = NO;
if (allocationLock != 0)
{
objc_mutex_unlock(allocationLock);
pthread_mutex_unlock(allocationLock);
}
}
@ -261,7 +269,7 @@ static objc_mutex_t allocationLock = NULL;
if (allocationLock != 0)
{
objc_mutex_lock(allocationLock);
pthread_mutex_lock(allocationLock);
}
// p = anObject->gc.previous;
// n = anObject->gc.next;
@ -273,7 +281,7 @@ static objc_mutex_t allocationLock = NULL;
[n gcSetPreviousObject: p];
if (allocationLock != 0)
{
objc_mutex_unlock(allocationLock);
pthread_mutex_unlock(allocationLock);
}
}
@ -288,7 +296,7 @@ static objc_mutex_t allocationLock = NULL;
if (allocationLock != 0)
{
objc_mutex_lock(allocationLock);
pthread_mutex_lock(allocationLock);
}
o->gc.next = allObjects;
o->gc.previous = allObjects->gc.previous;
@ -297,7 +305,7 @@ static objc_mutex_t allocationLock = NULL;
o->gc.flags.refCount = 1;
if (allocationLock != 0)
{
objc_mutex_unlock(allocationLock);
pthread_mutex_unlock(allocationLock);
}
return o;
}
@ -315,7 +323,7 @@ static objc_mutex_t allocationLock = NULL;
if (allocationLock != 0)
{
objc_mutex_lock(allocationLock);
pthread_mutex_lock(allocationLock);
}
// p = anObject->gc.previous;
// n = anObject->gc.next;
@ -327,7 +335,7 @@ static objc_mutex_t allocationLock = NULL;
[n gcSetPreviousObject: p];
if (allocationLock != 0)
{
objc_mutex_unlock(allocationLock);
pthread_mutex_unlock(allocationLock);
}
[super dealloc];
}
@ -410,7 +418,7 @@ static objc_mutex_t allocationLock = NULL;
{
if (allocationLock != 0)
{
objc_mutex_lock(allocationLock);
pthread_mutex_lock(allocationLock);
}
if (gc.flags.refCount > 0 && gc.flags.refCount-- == 1)
{
@ -419,7 +427,7 @@ static objc_mutex_t allocationLock = NULL;
}
if (allocationLock != 0)
{
objc_mutex_unlock(allocationLock);
pthread_mutex_unlock(allocationLock);
}
}
@ -430,12 +438,12 @@ static objc_mutex_t allocationLock = NULL;
{
if (allocationLock != 0)
{
objc_mutex_lock(allocationLock);
pthread_mutex_lock(allocationLock);
}
gc.flags.refCount++;
if (allocationLock != 0)
{
objc_mutex_unlock(allocationLock);
pthread_mutex_unlock(allocationLock);
}
return self;
}

View file

@ -58,6 +58,10 @@
#include <string.h>
#ifndef NeXT_RUNTIME
#include <pthread.h>
#endif
#ifdef NeXT_Foundation_LIBRARY
@interface NSObject (MissingFromMacOSX)
+ (IMP) methodForSelector: (SEL)aSelector;
@ -67,59 +71,6 @@
#define BDBGPrintf(format, args...) \
do { if (behavior_debug) { fprintf(stderr, (format) , ## args); } } while (0)
static objc_mutex_t local_lock = NULL;
/* This class it intended soley for thread safe / +load safe
initialization of the local lock.
It's a root class so it won't trigger the initialization
of any other class. */
@interface _GSObjCRuntimeInitializer /* Root Class */
{
Class isa;
}
+ (Class)class;
@end
@implementation _GSObjCRuntimeInitializer
+ (void)initialize
{
if (local_lock == NULL)
{
local_lock = objc_mutex_allocate();
}
}
+ (Class)class
{
return self;
}
@end
void
GSAllocateMutexAt(objc_mutex_t *request)
{
if (request == NULL)
{
/* This could be called very early in process
initialization so many things may not have
been setup correctly yet. */
fprintf(stderr,
"Error: GSAllocateMutexAt() called with NULL pointer.\n");
abort();
}
if (local_lock == NULL)
{
/* Initialize in a thread safe way. */
[_GSObjCRuntimeInitializer class];
}
objc_mutex_lock(local_lock);
if (*request == NULL)
{
*request = objc_mutex_allocate();
}
objc_mutex_unlock(local_lock);
}
/**
* This function is used to locate information about the instance
* variable of obj called name. It returns YES if the variable
@ -310,15 +261,10 @@ GSClassList(Class *buffer, unsigned int max, BOOL clearCache)
#else
static Class *cache = 0;
static unsigned cacheClassCount = 0;
static volatile objc_mutex_t cache_lock = NULL;
static pthread_mutex_t cache_lock = PTHREAD_MUTEX_INITIALIZER;
unsigned int num;
if (cache_lock == NULL)
{
GSAllocateMutexAt((void*)&cache_lock);
}
objc_mutex_lock(cache_lock);
pthread_mutex_lock(&cache_lock);
if (clearCache)
{
@ -369,7 +315,7 @@ GSClassList(Class *buffer, unsigned int max, BOOL clearCache)
num = (max > cacheClassCount) ? 0 : (cacheClassCount - max);
}
objc_mutex_unlock(cache_lock);
pthread_mutex_unlock(&cache_lock);
#endif
@ -1267,26 +1213,22 @@ gs_find_protocol_named(const char *name)
static GSIMapTable_t protocol_by_name;
static BOOL protocol_by_name_init = NO;
static volatile objc_mutex_t protocol_by_name_lock = NULL;
static pthread_mutex_t protocol_by_name_lock = PTHREAD_MUTEX_INITIALIZER;
/* Not sure about the semantics of inlining
functions with static variables. */
static void
gs_init_protocol_lock(void)
{
if (protocol_by_name_lock == NULL)
{
GSAllocateMutexAt((void *)&protocol_by_name_lock);
objc_mutex_lock(protocol_by_name_lock);
if (protocol_by_name_init == NO)
{
pthread_mutex_lock(&protocol_by_name_lock);
if (protocol_by_name_init == NO)
{
GSIMapInitWithZoneAndCapacity (&protocol_by_name,
NSDefaultMallocZone(),
128);
protocol_by_name_init = YES;
}
objc_mutex_unlock(protocol_by_name_lock);
}
pthread_mutex_unlock(&protocol_by_name_lock);
}
void
@ -1303,7 +1245,7 @@ GSRegisterProtocol(Protocol *proto)
pcl p;
p = (pcl)proto;
objc_mutex_lock(protocol_by_name_lock);
pthread_mutex_lock(&protocol_by_name_lock);
node = GSIMapNodeForKey(&protocol_by_name,
(GSIMapKey) p->protocol_name);
if (node == 0)
@ -1312,7 +1254,7 @@ GSRegisterProtocol(Protocol *proto)
(GSIMapKey) (void *) p->protocol_name,
(GSIMapVal) (void *) p);
}
objc_mutex_unlock(protocol_by_name_lock);
pthread_mutex_unlock(&protocol_by_name_lock);
}
}
@ -1334,7 +1276,7 @@ GSProtocolFromName(const char *name)
}
else
{
objc_mutex_lock(protocol_by_name_lock);
pthread_mutex_lock(&protocol_by_name_lock);
node = GSIMapNodeForKey(&protocol_by_name, (GSIMapKey) name);
if (node)
@ -1353,7 +1295,7 @@ GSProtocolFromName(const char *name)
(GSIMapVal) (void *) p);
}
}
objc_mutex_unlock(protocol_by_name_lock);
pthread_mutex_unlock(&protocol_by_name_lock);
}

View file

@ -32,6 +32,8 @@
#import <callback.h>
#import "callframe.h"
#include <pthread.h>
#import "GSInvocation.h"
#ifndef INLINE
@ -136,7 +138,7 @@ static GSIMapTable_t ff_callback_map;
/* Lock that protects the ff_callback_map */
static objc_mutex_t ff_callback_map_lock = NULL;
static pthread_mutex_t ff_callback_map_lock = PTHREAD_MUTEX_INITIALIZER;
/* Static pre-computed return type info */
@ -477,7 +479,7 @@ static IMP gs_objc_msg_forward (SEL sel)
GSIMapNode node;
// Lock
objc_mutex_lock (ff_callback_map_lock);
pthread_mutex_lock (&ff_callback_map_lock);
node = GSIMapNodeForKey (&ff_callback_map,
(GSIMapKey) ((void *) &returnInfo));
@ -503,7 +505,7 @@ static IMP gs_objc_msg_forward (SEL sel)
(GSIMapVal) forwarding_callback);
}
// Unlock
objc_mutex_unlock (ff_callback_map_lock);
pthread_mutex_unlock (&ff_callback_map_lock);
}
return forwarding_callback;
}
@ -512,8 +514,6 @@ static IMP gs_objc_msg_forward (SEL sel)
{
int index;
ff_callback_map_lock = objc_mutex_allocate ();
for (index = 0; index < STATIC_CALLBACK_LIST_SIZE; ++index)
{
returnTypeInfo[index].type = index;

58
Source/GSPThread.h Normal file
View file

@ -0,0 +1,58 @@
/* GSPThread.h
Copyright (C) 2010 Free Software Foundation, Inc.
Written by: Niels Grewe <niels.grewe@halbordnung.de>
This file is part of the GNUstep Base Library.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02111 USA.
*/
#ifndef _GSPThread_h_
#define _GSPThread_h_
/*
* Since glibc does not enable Unix98 extensions by default, we need to tell it
* to do so explicitly. Whether that support is switched on by _XOPEN_SOURCE or
* by __USE_UNIX98 depends on whether <features.h> has already been included or
* will be included by pthread.h. Hence both flags need to be set here. This
* shouldn't be be a problem with other libcs.
*/
#define _XOPEN_SOURCE 500
#define __USE_UNIX98
#include <pthread.h>
/*
* Macro to initialize recursive mutexes in a portable way. Adopted from
* libobjc2 (lock.h).
*/
# ifdef PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
# define GS_INIT_RECURSIVE_MUTEX(x) x = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
# elif defined(PTHREAD_RECURSIVE_MUTEX_INITIALIZER)
# define GS_INIT_RECURSIVE_MUTEX(x) x = PTHREAD_RECURSIVE_MUTEX_INITIALIZER
# else
# define GS_INIT_RECURSIVE_MUTEX(x) GSPThreadInitRecursiveMutex(&(x))
static inline void GSPThreadInitRecursiveMutex(pthread_mutex_t *x)
{
pthread_mutexattr_t recursiveAttributes;
pthread_mutexattr_init(&recursiveAttributes);
pthread_mutexattr_settype(&recursiveAttributes, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(x, &recursiveAttributes);
pthread_mutexattr_destroy(&recursiveAttributes);
}
# endif // PTHREAD_RECURSIVE_MUTEX_INITIALIZER(_NP)
#endif // _GSPThread_h_

View file

@ -96,6 +96,7 @@
#include "Foundation/NSZone.h"
#include "Foundation/NSLock.h"
#include "GSPrivate.h"
#include "GSPThread.h"
/**
* Try to get more memory - the normal process has failed.
@ -555,7 +556,7 @@ struct _ffree_free_link
struct _ffree_zone_struct
{
NSZone common;
objc_mutex_t lock;
pthread_mutex_t lock;
ff_block *blocks; // Linked list of blocks
ff_link *segheadlist[MAX_SEG]; // Segregated list, holds heads
ff_link *segtaillist[MAX_SEG]; // Segregated list, holds tails
@ -710,7 +711,7 @@ chunkPrev(ff_block *ptr)
struct _nfree_zone_struct
{
NSZone common;
objc_mutex_t lock;
pthread_mutex_t lock;
/* Linked list of blocks in decreasing order of free space,
except maybe for the first block. */
nf_block *blocks;
@ -791,7 +792,7 @@ fmalloc (NSZone *zone, size_t size)
ff_block *chunkhead;
void *result;
objc_mutex_lock(zptr->lock);
pthread_mutex_lock(&(zptr->lock));
bufsize = zptr->bufsize;
while ((i < bufsize) && (chunksize > size_buf[i]))
i++;
@ -830,7 +831,7 @@ fmalloc (NSZone *zone, size_t size)
chunkhead = get_chunk(zptr, chunksize);
if (chunkhead == NULL)
{
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
if (zone->name != nil)
[NSException raise: NSMallocException
format: @"Zone %@ has run out of memory", zone->name];
@ -849,7 +850,7 @@ fmalloc (NSZone *zone, size_t size)
*((char*)chunkhead->next) = (char)42;
chunkSetLive(chunkhead);
result = chunkToPointer(chunkhead);
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
return result;
}
@ -873,7 +874,7 @@ frealloc (NSZone *zone, void *ptr, size_t size)
if (ptr == NULL)
return fmalloc(zone, size);
chunkhead = pointerToChunk(ptr);
objc_mutex_lock(zptr->lock);
pthread_mutex_lock(&(zptr->lock));
realsize = chunkSize(chunkhead);
NSAssert(chunkIsInUse(chunkhead), NSInternalInconsistencyException);
@ -929,7 +930,7 @@ frealloc (NSZone *zone, void *ptr, size_t size)
newchunk = get_chunk(zptr, chunksize);
if (newchunk == NULL)
{
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
if (zone->name != nil)
[NSException raise: NSMallocException
format: @"Zone %@ has run out of memory",
@ -947,7 +948,7 @@ frealloc (NSZone *zone, void *ptr, size_t size)
*((char*)chunkhead->next) = (char)42;
chunkSetLive(chunkhead);
result = chunkToPointer(chunkhead);
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
return result;
}
@ -957,14 +958,14 @@ ffree (NSZone *zone, void *ptr)
{
ff_block *chunk;
NSAssert(NSZoneFromPointer(ptr) == zone, NSInternalInconsistencyException);
objc_mutex_lock(((ffree_zone*)zone)->lock);
pthread_mutex_lock(&(((ffree_zone*)zone)->lock));
chunk = pointerToChunk(ptr);
if (chunkIsLive(chunk) == 0)
[NSException raise: NSMallocException
format: @"Attempt to free freed memory"];
NSAssert(*((char*)chunk->next) == (char)42, NSInternalInconsistencyException);
add_buf((ffree_zone*)zone, chunk);
objc_mutex_unlock(((ffree_zone*)zone)->lock);
pthread_mutex_unlock(&(((ffree_zone*)zone)->lock));
}
static BOOL
@ -974,7 +975,7 @@ frecycle1(NSZone *zone)
ff_block *block;
ff_block *nextblock;
objc_mutex_lock(zptr->lock);
pthread_mutex_lock(&(zptr->lock));
flush_buf(zptr);
block = zptr->blocks;
while (block != NULL)
@ -996,10 +997,10 @@ frecycle1(NSZone *zone)
}
block = nextblock;
}
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
if (zptr->blocks == 0)
{
objc_mutex_deallocate(zptr->lock);
pthread_mutex_destroy(&(zptr->lock));
return YES;
}
return NO;
@ -1048,7 +1049,7 @@ fcheck (NSZone *zone)
ffree_zone *zptr = (ffree_zone*)zone;
ff_block *block;
objc_mutex_lock(zptr->lock);
pthread_mutex_lock(&(zptr->lock));
/* Check integrity of each block the zone owns. */
block = zptr->blocks;
while (block != NULL)
@ -1141,11 +1142,11 @@ fcheck (NSZone *zone)
if ((zptr->size_buf[i] != chunkSize(chunk)) || !chunkIsInUse(chunk))
goto inconsistent;
}
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
return YES;
inconsistent: // Jump here if an inconsistency was found.
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
return NO;
}
@ -1156,7 +1157,7 @@ flookup (NSZone *zone, void *ptr)
ff_block *block;
BOOL found = NO;
objc_mutex_lock(zptr->lock);
pthread_mutex_lock(&(zptr->lock));
for (block = zptr->blocks; block != NULL; block = block->next)
{
if (ptr >= (void*)block && ptr < (void*)chunkNext(block))
@ -1165,7 +1166,7 @@ flookup (NSZone *zone, void *ptr)
break;
}
}
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
return found;
}
@ -1184,7 +1185,7 @@ fstats (NSZone *zone)
stats.bytes_used = 0;
stats.chunks_free = 0;
stats.bytes_free = 0;
objc_mutex_lock(zptr->lock);
pthread_mutex_lock(&(zptr->lock));
block = zptr->blocks;
/* Go through each block. */
while (block != NULL)
@ -1219,7 +1220,7 @@ fstats (NSZone *zone)
stats.bytes_used -= zptr->size_buf[i];
stats.bytes_free += zptr->size_buf[i];
}
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
/* Remove overhead. */
stats.bytes_used -= FBSZ*stats.chunks_used;
return stats;
@ -1540,7 +1541,7 @@ nmalloc (NSZone *zone, size_t size)
nf_block *block;
size_t top;
objc_mutex_lock(zptr->lock);
pthread_mutex_lock(&(zptr->lock));
block = zptr->blocks;
top = block->top;
freesize = block->size-top;
@ -1575,7 +1576,7 @@ nmalloc (NSZone *zone, size_t size)
block = objc_malloc(blocksize);
if (block == NULL)
{
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
if (zone->name != nil)
[NSException raise: NSMallocException
format: @"Zone %@ has run out of memory",
@ -1593,7 +1594,7 @@ nmalloc (NSZone *zone, size_t size)
block->top += chunksize;
}
zptr->use++;
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
return chunkhead;
}
@ -1604,7 +1605,7 @@ nrecycle1 (NSZone *zone)
{
nfree_zone *zptr = (nfree_zone*)zone;
objc_mutex_lock(zptr->lock);
pthread_mutex_lock(&(zptr->lock));
if (zptr->use == 0)
{
nf_block *nextblock;
@ -1618,10 +1619,10 @@ nrecycle1 (NSZone *zone)
}
zptr->blocks = 0;
}
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
if (zptr->blocks == 0)
{
objc_mutex_deallocate(zptr->lock);
pthread_mutex_destroy(&(zptr->lock));
return YES;
}
return NO;
@ -1658,7 +1659,7 @@ nrealloc (NSZone *zone, void *ptr, size_t size)
if (ptr != 0)
{
objc_mutex_lock(zptr->lock);
pthread_mutex_lock(&(zptr->lock));
if (tmp)
{
nf_block *block;
@ -1678,7 +1679,7 @@ nrealloc (NSZone *zone, void *ptr, size_t size)
}
}
zptr->use--;
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
}
return tmp;
}
@ -1694,9 +1695,9 @@ nfree (NSZone *zone, void *ptr)
{
nfree_zone *zptr = (nfree_zone*)zone;
objc_mutex_lock(zptr->lock);
pthread_mutex_lock(&(zptr->lock));
zptr->use--;
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
}
static void
@ -1722,19 +1723,19 @@ ncheck (NSZone *zone)
nfree_zone *zptr = (nfree_zone*)zone;
nf_block *block;
objc_mutex_lock(zptr->lock);
pthread_mutex_lock(&(zptr->lock));
block = zptr->blocks;
while (block != NULL)
{
if (block->size < block->top)
{
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
return NO;
}
block = block->next;
}
/* FIXME: Do more checking? */
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
return YES;
}
@ -1745,7 +1746,7 @@ nlookup (NSZone *zone, void *ptr)
nf_block *block;
BOOL found = NO;
objc_mutex_lock(zptr->lock);
pthread_mutex_lock(&(zptr->lock));
for (block = zptr->blocks; block != NULL; block = block->next)
{
if (ptr >= (void*)block && ptr < ((void*)block)+block->size)
@ -1754,7 +1755,7 @@ nlookup (NSZone *zone, void *ptr)
break;
}
}
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
return found;
}
@ -1772,7 +1773,7 @@ nstats (NSZone *zone)
stats.bytes_used = 0;
stats.chunks_free = 0;
stats.bytes_free = 0;
objc_mutex_lock(zptr->lock);
pthread_mutex_lock(&(zptr->lock));
block = zptr->blocks;
while (block != NULL)
{
@ -1793,7 +1794,7 @@ nstats (NSZone *zone)
}
block = block->next;
}
objc_mutex_unlock(zptr->lock);
pthread_mutex_unlock(&(zptr->lock));
return stats;
}
@ -1880,7 +1881,7 @@ NSCreateZone (NSUInteger start, NSUInteger gran, BOOL canFree)
zone->common.stats = fstats;
zone->common.gran = granularity;
zone->common.name = nil;
zone->lock = objc_mutex_allocate();
GS_INIT_RECURSIVE_MUTEX(zone->lock);
for (i = 0; i < MAX_SEG; i++)
{
zone->segheadlist[i] = NULL;
@ -1890,7 +1891,7 @@ NSCreateZone (NSUInteger start, NSUInteger gran, BOOL canFree)
zone->blocks = objc_malloc(startsize + 2*FBSZ);
if (zone->blocks == NULL)
{
objc_mutex_deallocate(zone->lock);
pthread_mutex_destroy(&(zone->lock));
objc_free(zone);
[NSException raise: NSMallocException
format: @"No memory to create zone"];
@ -1935,12 +1936,12 @@ NSCreateZone (NSUInteger start, NSUInteger gran, BOOL canFree)
zone->common.stats = nstats;
zone->common.gran = granularity;
zone->common.name = nil;
zone->lock = objc_mutex_allocate();
GS_INIT_RECURSIVE_MUTEX(zone->lock);
zone->blocks = objc_malloc(startsize);
zone->use = 0;
if (zone->blocks == NULL)
{
objc_mutex_deallocate(zone->lock);
pthread_mutex_destroy(&(zone->lock));
objc_free(zone);
[NSException raise: NSMallocException
format: @"No memory to create zone"];

View file

@ -22,17 +22,18 @@
Boston, MA 02111 USA.
*/
#include <stdlib.h>
#include "objc/objc.h"
#include "objc/objc-api.h"
#include "objc/thr.h"
#include "GSPThread.h"
/*
* Node structure...
*/
typedef struct lock_node {
id obj;
objc_mutex_t lock;
pthread_mutex_t lock;
struct lock_node *next;
struct lock_node *prev;
} lock_node_t;
@ -48,19 +49,7 @@ typedef enum {
} sync_return_t;
static lock_node_t *lock_list = NULL;
static objc_mutex_t table_lock = NULL;
/**
* Initialize the table lock.
*/
static void
sync_lock_init()
{
if (table_lock == NULL)
{
table_lock = objc_mutex_allocate();
}
}
static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
/**
* Find the node in the list.
@ -97,9 +86,6 @@ sync_add_node(id obj)
{
lock_node_t *current = NULL;
// get the lock...
sync_lock_init();
// if the list hasn't been initialized, initialize it.
if (lock_list == NULL)
{
@ -143,7 +129,7 @@ sync_add_node(id obj)
{
// add the object and it's lock
current->obj = obj;
current->lock = objc_mutex_allocate();
GS_INIT_RECURSIVE_MUTEX(current->lock);
}
return current;
@ -163,9 +149,7 @@ objc_sync_enter(id obj)
lock_node_t *node = NULL;
int status = 0;
// lock access to the table until we're done....
sync_lock_init();
objc_mutex_lock(table_lock);
pthread_mutex_lock(&table_lock);
node = sync_find_node(obj);
if (node == NULL)
@ -174,15 +158,15 @@ objc_sync_enter(id obj)
if (node == NULL)
{
// unlock the table....
objc_mutex_unlock(table_lock);
pthread_mutex_unlock(&table_lock);
return OBJC_SYNC_NOT_INITIALIZED;
}
}
// unlock the table....
objc_mutex_unlock(table_lock);
pthread_mutex_unlock(&table_lock);
status = objc_mutex_lock(node->lock);
status = pthread_mutex_lock(&(node->lock));
// if the status is more than one, then another thread
// has this section locked, so we abort. A status of -1
@ -209,22 +193,20 @@ objc_sync_exit(id obj)
lock_node_t *node = NULL;
int status = 0;
// lock access to the table until we're done....
sync_lock_init();
objc_mutex_lock(table_lock);
pthread_mutex_lock(&table_lock);
node = sync_find_node(obj);
if (node == NULL)
{
// unlock the table....
objc_mutex_unlock(table_lock);
pthread_mutex_unlock(&table_lock);
return OBJC_SYNC_NOT_INITIALIZED;
}
status = objc_mutex_unlock(node->lock);
status = pthread_mutex_unlock(&(node->lock));
// unlock the table....
objc_mutex_unlock(table_lock);
pthread_mutex_unlock(&table_lock);
// if the status is not zero, then we are not the sole
// owner of this node. Also if -1 is returned, this indicates and error