diff --git a/Source/GSPrivate.h b/Source/GSPrivate.h index fdfe2d8a9..846382b7e 100644 --- a/Source/GSPrivate.h +++ b/Source/GSPrivate.h @@ -641,6 +641,7 @@ GSPrivateEncodeBase64(const uint8_t *src, NSUInteger length, uint8_t *dst) /* When we don't have a runtime with ARC to support weak references, we * use our own version. */ +BOOL GSPrivateMarkedWeak(id obj, BOOL mark) GS_ATTRIB_PRIVATE; void GSWeakInit() GS_ATTRIB_PRIVATE; BOOL objc_delete_weak_refs(id obj); #endif diff --git a/Source/NSObject.m b/Source/NSObject.m index cb6b2d9aa..ee22da266 100644 --- a/Source/NSObject.m +++ b/Source/NSObject.m @@ -208,8 +208,7 @@ extern void GSLogZombie(id o, SEL sel) #undef GSATOMICREAD #endif -#if defined(__llvm__) || (defined(USE_ATOMIC_BUILTINS) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))) -/* Use the GCC atomic operations with recent GCC versions */ +#ifdef OBJC_CAP_ARC typedef intptr_t volatile *gsatomic_t; typedef intptr_t gsrefcount_t; @@ -217,6 +216,15 @@ extern void GSLogZombie(id o, SEL sel) #define GSAtomicIncrement(X) __sync_add_and_fetch(X, 1) #define GSAtomicDecrement(X) __sync_sub_and_fetch(X, 1) +#elif (defined(USE_ATOMIC_BUILTINS) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))) +/* Use the GCC atomic operations with recent GCC versions */ + +typedef int32_t volatile *gsatomic_t; +typedef int32_t gsrefcount_t; +#define GSATOMICREAD(X) (*(X)) +#define GSAtomicIncrement(X) __sync_add_and_fetch(X, 1) +#define GSAtomicDecrement(X) __sync_sub_and_fetch(X, 1) + #elif defined(_WIN32) /* Set up atomic read, increment and decrement for mswindows @@ -382,7 +390,7 @@ __asm__ __volatile__ ( #include -typedef int gsrefcount_t; // No atomics, use a simple integer +typedef int32_t gsrefcount_t; // No atomics, use a simple integer /* Having just one allocationLock for all leads to lock contention * if there are lots of threads doing lots of retain/release calls. @@ -415,12 +423,21 @@ __asm__ __volatile__ ( #endif #define alignof(type) __builtin_offsetof(struct { const char c; type member; }, member) +#ifndef OBJC_CAP_ARC +typedef struct { + BOOL hadWeakReference: 1; // set if the instance ever had a weak reference +} gsinstinfo_t; +#endif + /* * Define a structure to hold information that is held locally * (before the start) in each object. */ typedef struct obj_layout_unpadded { gsrefcount_t retained; +#ifndef OBJC_CAP_ARC + gsinstinfo_t extra; +#endif } unp; #define UNP sizeof(unp) @@ -441,9 +458,26 @@ __asm__ __volatile__ ( char padding[__BIGGEST_ALIGNMENT__ - ((UNP % __BIGGEST_ALIGNMENT__) ? (UNP % __BIGGEST_ALIGNMENT__) : __BIGGEST_ALIGNMENT__)]; gsrefcount_t retained; +#ifndef OBJC_CAP_ARC + gsinstinfo_t extra; +#endif }; typedef struct obj_layout *obj; +#ifndef OBJC_CAP_ARC +BOOL +GSPrivateMarkedWeak(id anObject, BOOL mark) +{ + BOOL wasMarked = ((obj)anObject)[-1].extra.hadWeakReference; + + if (mark) + { + ((obj)anObject)[-1].extra.hadWeakReference = YES; + } + return wasMarked; +} +#endif + /* * These symbols are provided by newer versions of the GNUstep Objective-C * runtime. When linked against an older version, we will use our internal diff --git a/Source/ObjectiveC2/weak.m b/Source/ObjectiveC2/weak.m index 41340b6f5..6809fdb54 100644 --- a/Source/ObjectiveC2/weak.m +++ b/Source/ObjectiveC2/weak.m @@ -171,6 +171,11 @@ GSIMapKey key; GSIMapBucket bucket; WeakRef *ref; + + /* Mark the instance as having had a weak reference at some point. + * This information is used when the instance is deallocated. + */ + GSPrivateMarkedWeak(obj, YES); key.obj = obj; bucket = GSIMapBucketForKey(&weakRefs, key); @@ -254,11 +259,11 @@ GSIMapBucket bucket; WeakRef *ref; - /* FIXME ... for performance we should have marked the object as having - * weak references and we should check that in order to avoid the cost + /* For performance we should have marked the object as having + * weak references and we check that in order to avoid the cost * of the map table lookup when it's not needed. */ - if (0) + if (NO == GSPrivateMarkedWeak(obj, NO)) { return NO; }