cleanup changes

git-svn-id: svn+ssh://svn.gna.org/svn/gnustep/libs/base/trunk@39762 72102866-910b-0410-8b05-ffd578937521
This commit is contained in:
Richard Frith-MacDonald 2016-05-14 09:34:01 +00:00
parent 97d38ffc0c
commit 71941dd24d
14 changed files with 198 additions and 746 deletions

View file

@ -1,3 +1,21 @@
2016-05-14 Richard Frith-Macdonald <rfm@gnu.org>
* Headers/Foundation/NSNotification.h:
* Headers/Foundation/NSZone.h:
* Headers/GNUstepBase/GSConfig.h.in:
* Source/GSQuickSort.m:
* Source/GSString.m:
* Source/GSTimSort.m:
* Source/NSAutoreleasePool.m:
* Source/NSConcretePointerFunctions.h:
* Source/NSGarbageCollector.m:
* Source/NSNotificationCenter.m:
* Source/NSObject.m:
* Source/NSThread.m:
* Source/NSZone.m:
Completed(?) removal of GC support.
Plus various minor reorganisations to avoid compiler warnings.
2016-05-13 Richard Frith-Macdonald <rfm@gnu.org>
* Source/Additions/GSMime.m:

View file

@ -70,7 +70,7 @@ DEFINE_BLOCK_TYPE(GSNotificationBlock, void, NSNotification *);
{
#if GS_EXPOSE(NSNotificationCenter)
@private
GS_GC_STRONG void *_table;
void *_table;
#endif
}

View file

@ -308,14 +308,14 @@ enum {
* garbage collected itsself.<br />
* In any case the memory returned is zero'ed.
*/
GS_EXPORT GS_GC_STRONG void *
GS_EXPORT void *
NSAllocateCollectable(NSUInteger size, NSUInteger options);
/** Reallocate memory to be of a different size and/or to have different
* options settings. The behavior of options is as for
* the NSAllocateCollectable() function.
*/
GS_EXPORT GS_GC_STRONG void *
GS_EXPORT void *
NSReallocateCollectable(void *ptr, NSUInteger size, NSUInteger options);
#endif

View file

@ -419,32 +419,16 @@ typedef struct {
#include <stdbool.h>
#endif
// Strong has different semantics in GC and ARC modes, so we need to have a
// macro that picks the correct one.
#if __OBJC_GC__
# define GS_GC_STRONG __strong
#else
# define GS_GC_STRONG
#endif
#if !__has_feature(objc_arc)
// NetBSD > 6 defines __weak in cdefs_elf.h
#ifdef __NetBSD__
#undef __weak
#endif
# if !defined(__weak)
# if __OBJC_GC__
# define __weak __attribute__((objc_gc(weak)))
# else
# define __weak
# endif
# define __weak
# endif
# if !defined(__strong)
# if __OBJC_GC__
# define __strong __attribute__((objc_gc(strong)))
# else
# define __strong
# endif
# define __strong
# endif
#endif

View file

@ -31,7 +31,12 @@
#import "Foundation/NSObjCRuntime.h"
#import "GSSorting.h"
/// Swaps the two provided objects.
/**
* Sorts the provided object array's sortRange according to sortDescriptor.
*/
// Quicksort algorithm copied from Wikipedia :-).
#if GS_USE_QUICKSORT
static inline void
SwapObjects(id * o1, id * o2)
{
@ -42,11 +47,6 @@ SwapObjects(id * o1, id * o2)
*o2 = temp;
}
/**
* Sorts the provided object array's sortRange according to sortDescriptor.
*/
// Quicksort algorithm copied from Wikipedia :-).
#if GS_USE_QUICKSORT
static void
_GSQuickSort(id *objects,
NSRange sortRange,

View file

@ -895,7 +895,7 @@ tsbytes(uintptr_t s, char *buf)
maxLength /= 2;
if (maxLength > 1)
{
unichar *buf = (unichar*)buffer;
unichar *buf = (unichar*)(void*)buffer;
if (maxLength <= length)
{

View file

@ -53,157 +53,6 @@
#define GS_MIN_GALLOP 7
#define GS_INITIAL_TEMP_STORAGE 256
static inline void
reverseRange(id *buffer, NSRange r)
{
NSUInteger loc = r.location;
NSUInteger max = (NSMaxRange(r) - 1);
while (loc < max)
{
id temp = buffer[loc];
buffer[loc++] = buffer[max];
buffer[max--] = temp;
}
}
/* In-place binary insertion sorting for small arrays (i.e. those which are
* smaller than GS_MIN_MERGE. We use this to generate minimal runs for timsort.
*/
static void
internalBinarySort(id *buffer,
NSRange r,
NSUInteger start,
id compOrDesc,
GSComparisonType type,
void *context)
{
NSUInteger min = r.location;
NSUInteger max = NSMaxRange(r);
NSCAssert2(NSLocationInRange(start, r),
@"Start index %lu not in range %@",
start, NSStringFromRange(r));
if (min == start)
{
start++;
}
// We assume that everything before start is sorted.
for (; start < max; ++start)
{
NSUInteger left = min;
NSUInteger right = start;
id pivot = buffer[right];
int i = 0;
do
{
NSUInteger midPoint = (left + ((right - left) >> 1));
NSComparisonResult res = GSCompareUsingDescriptorOrComparator(pivot,
buffer[midPoint],
compOrDesc,
type,
context);
if (NSOrderedAscending == res)
{
right = midPoint;
}
else
{
left = midPoint + 1;
}
} while (left < right);
NSCAssert(left == right, @"Binary sort iteration did not end correctly,");
// We make room for the pivot and place it at left.
for (i = start; i > left; --i)
{
buffer[i] = buffer[(i - 1)];
}
buffer[left] = pivot;
}
}
/*
* Count the number of elements in the range that are already ordered.
* If the order is a descending one, reverse it so that all runs are ordered the
* same way.
*/
static inline NSUInteger
countAscendizedRun(id *buf, NSRange r, id descOrComp,
GSComparisonType type, void*context)
{
NSUInteger min = r.location;
NSUInteger runMax = min + 1;
NSUInteger rangeMax = NSMaxRange(r);
if (runMax == rangeMax)
{
return 1;
}
if (NSOrderedDescending == GSCompareUsingDescriptorOrComparator(buf[min],
buf[runMax++], descOrComp, type, context))
{
while ((runMax < rangeMax) && NSOrderedDescending
== GSCompareUsingDescriptorOrComparator(buf[runMax - 1],
buf[runMax], descOrComp, type, context))
{
runMax++;
}
reverseRange(buf, NSMakeRange(min, (runMax - min)));
}
else // ascending or equal
{
while ((runMax < rangeMax) && NSOrderedDescending
!= GSCompareUsingDescriptorOrComparator(buf[runMax - 1],
buf[runMax], descOrComp, type, context))
{
runMax++;
}
}
return (runMax - min);
}
/*
* Calculate a sensible minimum length for the runs, these need to be powers of
* two, or less than, but close to, one, but always at least GS_MIN_MERGE. For
* details on why this is useful, see Python's listsort.txt.
*/
static inline NSUInteger
minimumRunLength(NSUInteger length)
{
NSUInteger r = 0;
while (length >= GS_MIN_MERGE)
{
r |= length & 1;
length >>= 1;
}
return (length + r);
}
/*
* For arrays up to GS_MIN_MERGE, we don't do merging. Instead, we identify
* pre-ordering at the begining of the range and sort the rest using binary
* sort.
*/
static inline void
miniTimSort(id *buf, NSRange r, id descOrComp, GSComparisonType ty, void *ctx)
{
NSUInteger firstRunLength = countAscendizedRun(buf, r, descOrComp, ty, ctx);
if (r.length == firstRunLength)
{
// In this case, we have already sorted the array here.
return;
}
internalBinarySort(buf, r, (r.location + firstRunLength),
descOrComp, ty, ctx);
}
/*
* Galloping from left searches for an insertion point for key into the
* already sorted buffer and returns the point immediately left of the first
@ -303,9 +152,9 @@ gallopLeft(id key, id *buf, NSRange r, NSUInteger hint, id descOrComp,
*/
offset = MIN(offset, NSMaxRange(r));
if (lastOffset < (NSInteger)r.location)
{
lastOffset = (NSInteger)r.location;
}
{
lastOffset = (NSInteger)r.location;
}
while (lastOffset < offset)
{
NSInteger midPoint = lastOffset + ((offset - lastOffset) >> 1);
@ -448,6 +297,156 @@ NSRange range, NSComparator cmptr)
#if GS_USE_TIMSORT
static inline void
reverseRange(id *buffer, NSRange r)
{
NSUInteger loc = r.location;
NSUInteger max = (NSMaxRange(r) - 1);
while (loc < max)
{
id temp = buffer[loc];
buffer[loc++] = buffer[max];
buffer[max--] = temp;
}
}
/* In-place binary insertion sorting for small arrays (i.e. those which are
* smaller than GS_MIN_MERGE. We use this to generate minimal runs for timsort.
*/
static void
internalBinarySort(id *buffer,
NSRange r,
NSUInteger start,
id compOrDesc,
GSComparisonType type,
void *context)
{
NSUInteger min = r.location;
NSUInteger max = NSMaxRange(r);
NSCAssert2(NSLocationInRange(start, r),
@"Start index %lu not in range %@",
start, NSStringFromRange(r));
if (min == start)
{
start++;
}
// We assume that everything before start is sorted.
for (; start < max; ++start)
{
NSUInteger left = min;
NSUInteger right = start;
id pivot = buffer[right];
int i = 0;
do
{
NSUInteger midPoint = (left + ((right - left) >> 1));
NSComparisonResult res = GSCompareUsingDescriptorOrComparator(pivot,
buffer[midPoint],
compOrDesc,
type,
context);
if (NSOrderedAscending == res)
{
right = midPoint;
}
else
{
left = midPoint + 1;
}
} while (left < right);
NSCAssert(left == right, @"Binary sort iteration did not end correctly,");
// We make room for the pivot and place it at left.
for (i = start; i > left; --i)
{
buffer[i] = buffer[(i - 1)];
}
buffer[left] = pivot;
}
}
/*
* Count the number of elements in the range that are already ordered.
* If the order is a descending one, reverse it so that all runs are ordered the
* same way.
*/
static inline NSUInteger
countAscendizedRun(id *buf, NSRange r, id descOrComp,
GSComparisonType type, void*context)
{
NSUInteger min = r.location;
NSUInteger runMax = min + 1;
NSUInteger rangeMax = NSMaxRange(r);
if (runMax == rangeMax)
{
return 1;
}
if (NSOrderedDescending == GSCompareUsingDescriptorOrComparator(buf[min],
buf[runMax++], descOrComp, type, context))
{
while ((runMax < rangeMax) && NSOrderedDescending
== GSCompareUsingDescriptorOrComparator(buf[runMax - 1],
buf[runMax], descOrComp, type, context))
{
runMax++;
}
reverseRange(buf, NSMakeRange(min, (runMax - min)));
}
else // ascending or equal
{
while ((runMax < rangeMax) && NSOrderedDescending
!= GSCompareUsingDescriptorOrComparator(buf[runMax - 1],
buf[runMax], descOrComp, type, context))
{
runMax++;
}
}
return (runMax - min);
}
/*
* Calculate a sensible minimum length for the runs, these need to be powers of
* two, or less than, but close to, one, but always at least GS_MIN_MERGE. For
* details on why this is useful, see Python's listsort.txt.
*/
static inline NSUInteger
minimumRunLength(NSUInteger length)
{
NSUInteger r = 0;
while (length >= GS_MIN_MERGE)
{
r |= length & 1;
length >>= 1;
}
return (length + r);
}
/*
* For arrays up to GS_MIN_MERGE, we don't do merging. Instead, we identify
* pre-ordering at the begining of the range and sort the rest using binary
* sort.
*/
static inline void
miniTimSort(id *buf, NSRange r, id descOrComp, GSComparisonType ty, void *ctx)
{
NSUInteger firstRunLength = countAscendizedRun(buf, r, descOrComp, ty, ctx);
if (r.length == firstRunLength)
{
// In this case, we have already sorted the array here.
return;
}
internalBinarySort(buf, r, (r.location + firstRunLength),
descOrComp, ty, ctx);
}
/* These macros make calling the cached IMPs easier,
* if we choose to do so later.
*/
@ -754,7 +753,8 @@ descriptorOrComparator: (id)descriptorOrComparator
*/
localMinGallop -= localMinGallop > 1;
minGallop = localMinGallop;
k = gallopRight(*buf2, buf1, NSMakeRange(0,num1), 0, descOrComp, ty, ctx);
k = gallopRight(*buf2, buf1,
NSMakeRange(0,num1), 0, descOrComp, ty, ctx);
winners1 = k;
if (0 != k)
{
@ -1011,7 +1011,6 @@ descriptorOrComparator: (id)descriptorOrComparator
NS_ENDHANDLER
}
- (void) mergeAtIndex: (NSUInteger)i
{
NSRange r1;
@ -1023,7 +1022,8 @@ descriptorOrComparator: (id)descriptorOrComparator
r1 = runStack[i];
r2 = runStack[i+1];
NSDebugMLLog(@"GSTimSort", @"Merging stack location %lu (stack size: %lu, run %@ with %@)", i,
NSDebugMLLog(@"GSTimSort",
@"Merging stack location %lu (stack size: %lu, run %@ with %@)", i,
stackSize, NSStringFromRange(r1), NSStringFromRange(r2));
/* Do some housekeeping on the stack: We combine the two runs
@ -1045,11 +1045,12 @@ descriptorOrComparator: (id)descriptorOrComparator
r1.length = r1.length - (insert - r1.location);
r1.location = insert;
if (r1.length == 0)
{
// The entire run r2 lies after r1, just return.
return;
}
NSDebugMLLog(@"GSTimSort", @"Insertion point for r2 in r1: %lu, r1 for the merge is now %@.",
{
// The entire run r2 lies after r1, just return.
return;
}
NSDebugMLLog(@"GSTimSort",
"Insertion point for r2 in r1: %lu, r1 for the merge is now %@.",
insert, NSStringFromRange(r1));
// Find an insertion point for the last element of r1 into r2. Subtracting the
@ -1064,7 +1065,9 @@ descriptorOrComparator: (id)descriptorOrComparator
return;
}
(r1.length <= r2.length) ? GS_TIMSORT_MERGE_LOW(self, r1, r2) : GS_TIMSORT_MERGE_HIGH(self, r1, r2);
(r1.length <= r2.length)
? GS_TIMSORT_MERGE_LOW(self, r1, r2)
: GS_TIMSORT_MERGE_HIGH(self, r1, r2);
}
/**
@ -1084,8 +1087,6 @@ descriptorOrComparator: (id)descriptorOrComparator
}
}
- (void) dealloc
{
free(runStack);
@ -1093,8 +1094,8 @@ descriptorOrComparator: (id)descriptorOrComparator
[super dealloc];
}
@end
static void
_GSTimSort(id *objects,
NSRange sortRange,

View file

@ -42,157 +42,6 @@
# endif
#endif
#if __OBJC_GC__
@interface GSAutoreleasePool : NSAutoreleasePool @end
@implementation NSAutoreleasePool
static NSAutoreleasePool *pool = nil;
static Class PoolClass;
+ (void) initialize
{
if ([NSGarbageCollector defaultCollector])
{
pool = NSAllocateObject(self, 0, NSDefaultMallocZone());
}
else
{
PoolClass = [GSAutoreleasePool class];
}
return;
}
+ (id) allocWithZone: (NSZone*)zone
{
if (nil == pool)
{
return NSAllocateObject(PoolClass, 0, 0);
}
return pool;
}
+ (id) new
{
if (nil == pool)
{
return [NSAllocateObject(PoolClass, 0, 0) init];
}
return pool;
}
- (id) init
{
return self;
}
- (unsigned) autoreleaseCount
{
return 0;
}
- (unsigned) autoreleaseCountForObject: (id)anObject
{
return 0;
}
+ (unsigned) autoreleaseCountForObject: (id)anObject
{
return 0;
}
+ (id) currentPool
{
return pool;
}
+ (void) addObject: (id)anObj
{
return;
}
- (void) addObject: (id)anObj
{
return;
}
- (void) drain
{
static NSGarbageCollector *collector = nil;
static SEL sel;
static IMP imp;
if (collector == nil)
{
collector = [NSGarbageCollector defaultCollector];
sel = @selector(collectIfNeeded);
imp = [collector methodForSelector: sel];
}
(*imp)(collector, sel);
}
- (id) retain
{
[NSException raise: NSGenericException
format: @"Don't call `-retain' on a NSAutoreleasePool"];
return self;
}
- (oneway void) release
{
return;
}
- (void) dealloc
{
[NSException raise: NSGenericException
format: @"dealloc should not be called in garbage collected mode"];
GSNOSUPERDEALLOC;
return;
}
- (void) emptyPool
{
return;
}
- (id) autorelease
{
[NSException raise: NSGenericException
format: @"Don't call `-autorelease' on a NSAutoreleasePool"];
return self;
}
+ (void) _endThread: (NSThread*)thread
{
return;
}
+ (void) enableRelease: (BOOL)enable
{
return;
}
+ (void) freeCache
{
return;
}
+ (void) setPoolCountThreshhold: (unsigned)c
{
return;
}
+ (void) setPoolNumberThreshhold: (unsigned)c
{
return;
}
@end
#endif
@ -278,11 +127,7 @@ pop_pool_from_cache (struct autorelease_thread_vars *tv)
}
#if __OBJC_GC__
@implementation GSAutoreleasePool
#else
@implementation NSAutoreleasePool
#endif
+ (void) initialize
{

View file

@ -29,16 +29,7 @@
# include <objc/capabilities.h>
#endif
// Define a weak read barrier macro for ARC or GC, depending on which one this
// target supports. If this target doesn't support zeroing weak references,
// then use an unsafe unretained access.
#if __OBJC_GC__
# include <objc/objc-auto.h>
# define WEAK_READ(x) objc_read_weak((id*)x)
# define WEAK_WRITE(addr, x) objc_assign_weak((id)x, (id*)addr)
# define STRONG_WRITE(addr, x) objc_assign_strongCast((id)x, (id*)addr)
# define STRONG_ACQUIRE(x) x
#elif defined(OBJC_CAP_ARC)
#if defined(OBJC_CAP_ARC)
# include <objc/objc-arc.h>
# define ARC_WEAK_READ(x) objc_loadWeak((id*)x)
# define ARC_WEAK_WRITE(addr, x) objc_storeWeak((id*)addr, (id)x)

View file

@ -27,105 +27,6 @@
#import "Foundation/NSGarbageCollector.h"
static NSGarbageCollector *collector = nil;
#if __OBJC_GC__
#include <objc/objc-auto.h>
id CFRetain(id obj)
{
if (collector)
{
return objc_gc_retain(obj);
}
return [obj retain];
}
void CFRelease(id obj)
{
if (collector)
{
objc_gc_release(obj);
}
else
{
[obj release];
}
}
@implementation NSGarbageCollector
+ (id) defaultCollector
{
return collector;
}
+ (void) initialize
{
if (objc_collecting_enabled())
{
collector = [self alloc];
objc_startCollectorThread();
}
}
- (void) collectIfNeeded
{
objc_collect(OBJC_COLLECT_IF_NEEDED | OBJC_FULL_COLLECTION);
}
- (void) collectExhaustively
{
objc_collect(OBJC_EXHAUSTIVE_COLLECTION);
}
- (void) disable
{
objc_gc_disable();
}
- (void) disableCollectorForPointer: (void *)ptr
{
CFRetain(ptr);
}
- (void) enable
{
objc_gc_enable();
}
- (void) enableCollectorForPointer: (void *)ptr
{
CFRelease(ptr);
}
- (id) init
{
if (self != collector)
{
[self release];
self = collector;
}
return self;
}
- (BOOL) isCollecting
{
return NO;
}
- (BOOL) isEnabled
{
return objc_collectingEnabled();
}
- (NSZone*) zone
{
return NSDefaultMallocZone();
}
@end
#else
static unsigned disabled = 0;
@implementation NSGarbageCollector
@ -195,4 +96,3 @@ static unsigned disabled = 0;
}
@end
#endif // __OBJC_GC__

View file

@ -148,23 +148,6 @@ struct NCTbl; /* Notification Center Table structure */
* trivial class instead ... and gets managed by the garbage collector.
*/
#ifdef __OBJC_GC__
@interface GSObservation : NSObject
{
@public
__weak id observer; /* Object to receive message. */
SEL selector; /* Method selector. */
struct Obs *next; /* Next item in linked list. */
struct NCTbl *link; /* Pointer back to chunk table */
}
@end
@implementation GSObservation
@end
#define Observation GSObservation
#else
typedef struct Obs {
id observer; /* Object to receive message. */
SEL selector; /* Method selector. */
@ -173,8 +156,6 @@ typedef struct Obs {
struct NCTbl *link; /* Pointer back to chunk table */
} Observation;
#endif
#define ENDOBS ((Observation*)-1)
static inline NSUInteger doHash(BOOL shouldHash, NSString* key)
@ -214,34 +195,17 @@ static inline BOOL doEqual(BOOL shouldHash, NSString* key1, NSString* key2)
*/
static void listFree(Observation *list);
#ifdef __OBJC_GC__
/* Observations are managed by the GC system because they need to be
* instances of a class in order to implement weak pointer to observer.
*/
#define obsRetain(X)
#define obsFree(X)
#else
/* Observations have retain/release counts managed explicitly by fast
* function calls.
*/
static void obsRetain(Observation *o);
static void obsFree(Observation *o);
#endif
#define GSI_ARRAY_TYPES 0
#define GSI_ARRAY_TYPE Observation*
#ifdef __OBJC_GC__
#define GSI_ARRAY_NO_RELEASE 1
#define GSI_ARRAY_NO_RETAIN 1
#else
#define GSI_ARRAY_RELEASE(A, X) obsFree(X.ext)
#define GSI_ARRAY_RETAIN(A, X) obsRetain(X.ext)
#endif
#include "GNUstepBase/GSIArray.h"
@ -307,22 +271,6 @@ obsNew(NCTable *t, SEL s, id o)
{
Observation *obs;
#if __OBJC_GC__
/* With clang GC, observations are garbage collected and we don't
* use a cache. However, because the reference to the observer must be
* weak, the observation has to be an instance of a class ...
*/
static Class observationClass;
if (0 == observationClass)
{
observationClass = [GSObservation class];
}
obs = NSAllocateObject(observationClass, 0, _zone);
#else
/* Generally, observations are cached and we create a 'new' observation
* by retrieving from the cache or by allocating a block of observations
* in one go. This works nicely to both hide observations from the
@ -360,7 +308,6 @@ obsNew(NCTable *t, SEL s, id o)
obs->link = (void*)t;
obs->retained = 0;
obs->next = 0;
#endif
obs->selector = s;
obs->observer = o;
@ -493,7 +440,6 @@ static inline void unlockNCTable(NCTable* t)
[t->_lock unlock];
}
#ifndef __OBJC_GC__
static void obsFree(Observation *o)
{
NSCAssert(o->retained >= 0, NSInternalInconsistencyException);
@ -510,7 +456,6 @@ static void obsRetain(Observation *o)
{
o->retained++;
}
#endif
static void listFree(Observation *list)
{

View file

@ -65,10 +65,6 @@
#endif
#endif // __GNUC__
#ifdef __OBJC_GC__
#include <objc/objc-auto.h>
#endif
#define IN_NSOBJECT_M 1
#import "GSPrivate.h"
@ -419,27 +415,6 @@ struct obj_layout {
};
typedef struct obj_layout *obj;
#ifdef __OBJC_GC__
/**
* If -base is compiled in GC mode, then we want to still support manual
* reference counting if we are linked with non-GC code.
*/
static BOOL GSDecrementExtraRefCountWasZero(id anObject);
BOOL
NSDecrementExtraRefCountWasZero(id anObject)
{
if (!objc_collecting_enabled())
{
return GSDecrementExtraRefCountWasZero(anObject);
}
return NO;
}
static BOOL GSDecrementExtraRefCountWasZero(id anObject)
#else
/**
* Examines the extra reference count for the object and, if non-zero
* decrements it, otherwise leaves it unchanged.<br />
@ -449,7 +424,6 @@ static BOOL GSDecrementExtraRefCountWasZero(id anObject)
*/
BOOL
NSDecrementExtraRefCountWasZero(id anObject)
#endif
{
if (double_release_check_enabled)
{
@ -523,34 +497,9 @@ NSDecrementExtraRefCountWasZero(id anObject)
inline NSUInteger
NSExtraRefCount(id anObject)
{
#ifdef __OBJC_GC__
if (objc_collecting_enabled())
{
return UINT_MAX-1;
}
#endif
return ((obj)anObject)[-1].retained;
}
#ifdef __OBJC_GC__
/**
* If -base is compiled in GC mode, then we want to still support manual
* reference counting if we are linked with non-GC code.
*/
static void GSIncrementExtraRefCount(id anObject);
inline void NSIncrementExtraRefCount(id anObject)
{
if (!objc_collecting_enabled())
{
GSIncrementExtraRefCount(anObject);
}
}
static void GSIncrementExtraRefCount(id anObject)
#else
/**
* Increments the extra reference count for anObject.<br />
* The GNUstep version raises an exception if the reference count
@ -559,11 +508,7 @@ static void GSIncrementExtraRefCount(id anObject)
*/
inline void
NSIncrementExtraRefCount(id anObject)
#endif
{
#if __OBJC_GC__
return;
#else /* __OBJC_GC__ */
if (allocationLock != 0)
{
#if defined(GSATOMICREAD)
@ -600,7 +545,6 @@ NSIncrementExtraRefCount(id anObject)
}
((obj)anObject)[-1].retained++;
}
#endif /* __OBJC_GC__ */
}
#ifndef NDEBUG
@ -652,36 +596,8 @@ callCXXConstructors(Class aClass, id anObject)
* the start of each object.
*/
#if __OBJC_GC__
static inline id
GSAllocateObject (Class aClass, NSUInteger extraBytes, NSZone *zone);
inline id
NSAllocateObject(Class aClass, NSUInteger extraBytes, NSZone *zone)
{
id new;
if (!objc_collecting_enabled())
{
new = GSAllocateObject(aClass, extraBytes, zone);
}
else
{
new = class_createInstance(aClass, extraBytes);
}
if (0 == cxx_construct)
{
cxx_construct = sel_registerName(".cxx_construct");
cxx_destruct = sel_registerName(".cxx_destruct");
}
return new;
}
inline id
GSAllocateObject (Class aClass, NSUInteger extraBytes, NSZone *zone)
#else
inline id
NSAllocateObject (Class aClass, NSUInteger extraBytes, NSZone *zone)
#endif
{
id new;
int size;
@ -716,22 +632,9 @@ NSAllocateObject (Class aClass, NSUInteger extraBytes, NSZone *zone)
return new;
}
#if __OBJC_GC__
static void GSDeallocateObject(id anObject);
inline void NSDeallocateObject(id anObject)
{
if (!objc_collecting_enabled())
{
GSDeallocateObject(anObject);
}
}
static void GSDeallocateObject(id anObject)
#else
inline void
NSDeallocateObject(id anObject)
#endif
{
Class aClass = object_getClass(anObject);
@ -765,15 +668,8 @@ NSDeallocateObject(id anObject)
BOOL
NSShouldRetainWithZone (NSObject *anObject, NSZone *requestedZone)
{
#if __OBJC_GC__
// If we're running in hybrid mode, we disable all of the clever zone stuff
// for non-GC code, so this is always true if we're compiled for GC, even if
// we're compiled for GC but not using GC.
return YES;
#else
return (!requestedZone || requestedZone == NSDefaultMallocZone()
|| [anObject zone] == requestedZone);
#endif
}
@ -2078,21 +1974,13 @@ static id gs_weak_load(id obj)
*/
- (NSZone*) zone
{
#if __OBJC_GC__
/* MacOS-X 10.5 seems to return the default malloc zone if GC is enabled.
*/
return NSDefaultMallocZone();
#else
return NSZoneFromPointer(self);
#endif
}
#if !__OBJC_GC__
+ (NSZone *) zone
{
return NSDefaultMallocZone();
}
#endif
/**
* Called to encode the instance variables of the receiver to aCoder.<br />

View file

@ -79,10 +79,6 @@
#import "GSPrivate.h"
#import "GSRunLoopCtxt.h"
#if __OBJC_GC__
# include <objc/objc-auto.h>
#endif
#if defined(HAVE_PTHREAD_NP_H)
# include <pthread_np.h>
#endif

View file

@ -218,121 +218,6 @@ NSZoneName (NSZone *zone)
zone = NSDefaultMallocZone();
return zone->name;
}
#if __OBJC_GC__
#include <objc/objc-auto.h>
__strong void *
NSAllocateCollectable(NSUInteger size, NSUInteger options)
{
if (!objc_collecting_enabled())
{
return calloc(1, size);
}
id obj = objc_gc_allocate_collectable(size,
((options & NSScannedOption) == NSScannedOption));
if ((options & NSCollectorDisabledOption) == NSCollectorDisabledOption)
{
obj = objc_gc_retain(obj);
}
return obj;
}
__strong void *
NSReallocateCollectable(void *ptr, NSUInteger size, NSUInteger options)
{
if (!objc_collecting_enabled())
{
return realloc(ptr, size);
}
return objc_gc_reallocate_collectable(ptr, size,
((options & NSScannedOption) == NSScannedOption));
}
id NSMakeCollectable(id obj)
{
if (objc_collecting_enabled())
{
objc_gc_release(obj);
}
return obj;
}
NSZone*
NSCreateZone (NSUInteger start, NSUInteger gran, BOOL canFree)
{
return &default_zone;
}
inline NSZone*
NSDefaultMallocZone (void)
{
return &default_zone;
}
NSZone*
NSZoneFromPointer (void *ptr)
{
return &default_zone;
}
void
NSRecycleZone (NSZone *zone) { }
BOOL
NSZoneCheck (NSZone *zone)
{
return YES;
}
struct
NSZoneStats NSZoneStats (NSZone *zone)
{
struct NSZoneStats stats = { 0 };
return stats;
}
void
GSMakeWeakPointer(Class theClass, const char *iVarName) { }
BOOL
GSAssignZeroingWeakPointer(void **destination, void *source)
{
objc_assign_weak(source, (id*)destination);
return YES;
}
void*
NSZoneMalloc (NSZone *zone, NSUInteger size)
{
return NSZoneCalloc(zone, 1, size);
}
void*
NSZoneCalloc (NSZone *zone, NSUInteger elems, NSUInteger bytes)
{
if (objc_collecting_enabled())
{
// FIXME: Overflow checking
size_t size = elems * bytes;
return objc_gc_allocate_collectable(size, YES);
}
return calloc(elems, bytes);
}
void*
NSZoneRealloc (NSZone *zone, void *ptr, NSUInteger size)
{
if (objc_collecting_enabled())
{
return objc_gc_reallocate_collectable(ptr, size, YES);
}
return realloc(ptr, size);
}
void NSZoneFree (NSZone *zone, void *ptr) { }
#else /* __OBJC_GC__ */
/* Alignment */
#ifdef ALIGN
@ -1929,4 +1814,3 @@ GSPrivateIsCollectable(const void *ptr)
return NO;
}
#endif /* __OBJC_GC__ */