rebase:偏移修正,任何一个app生成的二进制文件,在二进制文件内部所有的方法、函数调用,都有一个地址,这个地址是在当前二进制文件中的偏移地址。在APP运行是,每次系统都会随机分配一个ASLR(Address Space Layout Randomization,地址空间布局随机化)地址值,然后在加上偏移值就是运行时确定的内存地址。
if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) { // src points to stack // 创建新值,申请内存空间 struct Block_byref *copy = (struct Block_byref *)malloc(src->size); copy->isa = NULL; // byref value 4 is logical refcount of 2: one for caller, one for stack // 设置flags copy->flags = src->flags | BLOCK_BYREF_NEEDS_FREE | 4; // copy的forwarding指向自己,自己在堆里 copy->forwarding = copy; // patch heap copy to point to itself // src是老值,指向的也是自己。指向的是堆,所以block内部改变,外部也会改变 src->forwarding = copy; // patch stack to point to heap copy copy->size = src->size;
if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) { // Trust copy helper to copy everything of interest // If more than one field shows up in a byref block this is wrong XXX // 通过内存偏移,获取Block_byref_2 struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src+1); struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy+1); // 存值 copy2->byref_keep = src2->byref_keep; copy2->byref_destroy = src2->byref_destroy;
void _Block_object_dispose(const void *object, const int flags) { switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) { case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK: // 如果是__block修饰的,使用_Block_byref_release case BLOCK_FIELD_IS_BYREF: // get rid of the __block data structure held in a Block _Block_byref_release(object); break; // block case BLOCK_FIELD_IS_BLOCK: _Block_release(object); break; // 对象类型的变量 case BLOCK_FIELD_IS_OBJECT: // 调用系统方法,不用处理 _Block_release_object(object); break; case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT: case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK: case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK: case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK: break; default: break; } }
#---------------------------------------- # 6. 重签名第三方 FrameWorks TARGET_APP_FRAMEWORKS_PATH="$TARGET_APP_PATH/Frameworks" if [ -d "$TARGET_APP_FRAMEWORKS_PATH" ]; then for FRAMEWORK in "$TARGET_APP_FRAMEWORKS_PATH/"* do
open class NSCondition: NSObject, NSLocking { internal var mutex = _MutexPointer.allocate(capacity: 1) // 条件 internal var cond = _ConditionVariablePointer.allocate(capacity: 1)
public override init() { #if os(Windows) InitializeSRWLock(mutex) InitializeConditionVariable(cond) #else // 内部封装了pthread 的互斥锁 pthread_mutex_init(mutex, nil) pthread_cond_init(cond, nil) #endif } deinit { #if os(Windows) // SRWLock do not need to be explicitly destroyed #else pthread_mutex_destroy(mutex) pthread_cond_destroy(cond) #endif mutex.deinitialize(count: 1) cond.deinitialize(count: 1) mutex.deallocate() cond.deallocate() } open func lock() { #if os(Windows) AcquireSRWLockExclusive(mutex) #else pthread_mutex_lock(mutex) #endif } open func unlock() { #if os(Windows) ReleaseSRWLockExclusive(mutex) #else pthread_mutex_unlock(mutex) #endif } // 等待 open func wait() { #if os(Windows) SleepConditionVariableSRW(cond, mutex, WinSDK.INFINITE, 0) #else pthread_cond_wait(cond, mutex) #endif }
// Begin synchronizing on 'obj'. // Allocates recursive mutex associated with 'obj' if needed. // Returns OBJC_SYNC_SUCCESS once lock is acquired. int objc_sync_enter(id obj) { int result = OBJC_SYNC_SUCCESS; // 1. obj有值 if (obj) { // 2. 生成SyncData类型的data,这是重点,注意参数ACQUIRE SyncData* data = id2data(obj, ACQUIRE); ASSERT(data); // 3. 加互斥锁 data->mutex.lock(); } else { // 4. // @synchronized(nil) does nothing if (DebugNilSync) { _objc_inform("NIL SYNC DEBUG: @synchronized(nil); set a breakpoint on objc_sync_nil to debug"); } objc_sync_nil(); }
// End synchronizing on 'obj'. // Returns OBJC_SYNC_SUCCESS or OBJC_SYNC_NOT_OWNING_THREAD_ERROR int objc_sync_exit(id obj) { int result = OBJC_SYNC_SUCCESS; // 1. 判断是否有obj if (obj) { // 获取data,注意传的值的参数是RELEASE SyncData* data = id2data(obj, RELEASE); if (!data) { result = OBJC_SYNC_NOT_OWNING_THREAD_ERROR; } else { // 有值的情况下,就进行解锁 bool okay = data->mutex.tryUnlock(); if (!okay) { result = OBJC_SYNC_NOT_OWNING_THREAD_ERROR; } } } else { // @synchronized(nil) does nothing } return result; }
static SyncCache *fetch_cache(bool create) { _objc_pthread_data *data; // 这个函数有解释,而且一目了然,没有从线程缓存池中拿到数据,并且参数create==NO,然后null,如果参数是YES,则创建一个。所以这里没有找到的话返回的是null,这个函数的主要作用就是获取cache data = _objc_fetch_pthread_data(create); if (!data) return NULL;
if (!data->syncCache) { if (!create) { return NULL; } else { int count = 4; data->syncCache = (SyncCache *) calloc(1, sizeof(SyncCache) + count*sizeof(SyncCacheItem)); data->syncCache->allocated = count; } }
// Make sure there's at least one open slot in the list. if (data->syncCache->allocated == data->syncCache->used) { data->syncCache->allocated *= 2; data->syncCache = (SyncCache *) realloc(data->syncCache, sizeof(SyncCache) + data->syncCache->allocated * sizeof(SyncCacheItem)); }
/*********************************************************************** * _objc_fetch_pthread_data * Fetch objc's pthread data for this thread. * If the data doesn't exist yet and create is NO, return NULL. * If the data doesn't exist yet and create is YES, allocate and return it. **********************************************************************/ _objc_pthread_data *_objc_fetch_pthread_data(bool create) { _objc_pthread_data *data;
data = (_objc_pthread_data *)tls_get(_objc_pthread_key); if (!data && create) { data = (_objc_pthread_data *) calloc(1, sizeof(_objc_pthread_data)); tls_set(_objc_pthread_key, data); }
done: // 自旋锁解锁, 这个锁只在第三步有使用。因为这是一个耗时操作 lockp->unlock(); if (result) { // Only new ACQUIRE should get here. // All RELEASE and CHECK and recursive ACQUIRE are // handled by the per-thread caches above. if (why == RELEASE) { // 啥都没有呢,就释放,返回nil // Probably some thread is incorrectly exiting // while the object is held by another thread. return nil; } // 错误判断 if (why != ACQUIRE) _objc_fatal("id2data is buggy"); if (result->object != object) _objc_fatal("id2data is buggy");
#if SUPPORT_DIRECT_THREAD_KEYS // 走了第一步就会变成YES。 if (!fastCacheOccupied) { // Save in fast thread cache // 把SyncData、lockCount=1锁的个数存到tls表中,快速查找的表 tls_set_direct(SYNC_DATA_DIRECT_KEY, result); tls_set_direct(SYNC_COUNT_DIRECT_KEY, (void*)1); } else #endif { // Save in thread cache // 存到线程池的表中 if (!cache) cache = fetch_cache(YES); cache->list[cache->used].data = result; cache->list[cache->used].lockCount = 1; cache->used++; } } // 返回结果。 return result; }
void dispatch_group_enter(dispatch_group_t dg) { // The value is decremented on a 32bits wide atomic so that the carry // for the 0 -> -1 transition is not propagated to the upper 32bits. // dg->dg_bits的值从0 变成 -1 uint32_t old_bits = os_atomic_sub_orig2o(dg, dg_bits, DISPATCH_GROUP_VALUE_INTERVAL, acquire); uint32_t old_value = old_bits & DISPATCH_GROUP_VALUE_MASK; // 等于0 if (unlikely(old_value == 0)) { _dispatch_retain(dg); // <rdar://problem/22318411> } // 我们上面执行了多个enter之后,没有发生crash,但是这里也有解释 // 超过一个最大值时也会发生crash if (unlikely(old_value == DISPATCH_GROUP_VALUE_MAX)) { DISPATCH_CLIENT_CRASH(old_bits, "Too many nested calls to dispatch_group_enter()"); } }
void dispatch_group_leave(dispatch_group_t dg) { // The value is incremented on a 64bits wide atomic so that the carry for // the -1 -> 0 transition increments the generation atomically. // dg_state从-1 -> 0 uint64_t new_state, old_state = os_atomic_add_orig2o(dg, dg_state, DISPATCH_GROUP_VALUE_INTERVAL, release); uint32_t old_value = (uint32_t)(old_state & DISPATCH_GROUP_VALUE_MASK);
if (unlikely(old_value == DISPATCH_GROUP_VALUE_1)) { old_state += DISPATCH_GROUP_VALUE_INTERVAL; do { new_state = old_state; if ((old_state & DISPATCH_GROUP_VALUE_MASK) == 0) { new_state &= ~DISPATCH_GROUP_HAS_WAITERS; new_state &= ~DISPATCH_GROUP_HAS_NOTIFS; } else { // If the group was entered again since the atomic_add above, // we can't clear the waiters bit anymore as we don't know for // which generation the waiters are for new_state &= ~DISPATCH_GROUP_HAS_NOTIFS; } if (old_state == new_state) break; } while (unlikely(!os_atomic_cmpxchgv2o(dg, dg_state, old_state, new_state, &old_state, relaxed))); return _dispatch_group_wake(dg, old_state, true); }
// old_value if (unlikely(old_value == 0)) { DISPATCH_CLIENT_CRASH((uintptr_t)old_value, "Unbalanced call to dispatch_group_leave()"); } }