随笔 利用GCD资源调用实现定时器

dispatch_source_t 资源调用

如下几个方法

dispatch_source_create 创建一个资源调用对象

dispatch_source_set_timer 设置定时器的参数

dispatch_source_set_event_handler 设置定时器的事件处理

dispatch_active 启动定时器

dispatch_source_cancel 取消定时器

实现方式

- (void)gcd_timer{
    
    __block int countDown = 10;
    
    dispatch_source_t timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_get_main_queue());
    
    dispatch_source_set_timer(timer, DISPATCH_TIME_NOW, 1 * NSEC_PER_SEC, 0);
    
    dispatch_source_set_event_handler(timer, ^{
        
        if(countDown <=0){
            dispatch_source_cancel(timer);
            NSLog(@"结束");
            
        }else{
            NSLog(@"没有结束 %d",countDown);
        }
        
        countDown --;
    });
    
    dispatch_activate(timer);
}

dispatch_source_create源码:

dispatch_source_t
dispatch_source_create(dispatch_source_type_t dst, uintptr_t handle,
		unsigned long mask, dispatch_queue_t dq)
{
	dispatch_source_refs_t dr;
	dispatch_source_t ds;

	dr = dux_create(dst, handle, mask)._dr;
	if (unlikely(!dr)) {
		return DISPATCH_BAD_INPUT;
	}

	ds = _dispatch_queue_alloc(source,
			dux_type(dr)->dst_strict ? DSF_STRICT : DQF_MUTABLE, 1,
			DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER)._ds;
	ds->dq_label = "source";
	ds->ds_refs = dr;
	dr->du_owner_wref = _dispatch_ptr2wref(ds);

	if (unlikely(!dq)) {
		dq = _dispatch_get_default_queue(true);
	} else {
		_dispatch_retain((dispatch_queue_t _Nonnull)dq);
	}
	ds->do_targetq = dq;
	if (dr->du_is_timer && (dr->du_timer_flags & DISPATCH_TIMER_INTERVAL)) {
		dispatch_source_set_timer(ds, DISPATCH_TIME_NOW, handle, UINT64_MAX);
	}
	_dispatch_object_debug(ds, "%s", __func__);
	return ds;
}

dispatch_source_set_timer源码:

void
dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start,
		uint64_t interval, uint64_t leeway)
{
	dispatch_timer_source_refs_t dt = ds->ds_timer_refs;
	dispatch_timer_config_t dtc;

	if (unlikely(!dt->du_is_timer)) {
		DISPATCH_CLIENT_CRASH(ds, "Attempt to set timer on a non-timer source");
	}

	if (dt->du_timer_flags & DISPATCH_TIMER_INTERVAL) {
		dtc = _dispatch_interval_config_create(start, interval, leeway, dt);
	} else {
		dtc = _dispatch_timer_config_create(start, interval, leeway, dt);
	}
	if (_dispatch_timer_flags_to_clock(dt->du_timer_flags) != dtc->dtc_clock &&
			dt->du_filter == DISPATCH_EVFILT_TIMER_WITH_CLOCK) {
		DISPATCH_CLIENT_CRASH(0, "Attempting to modify timer clock");
	}

	_dispatch_source_timer_telemetry(ds, dtc->dtc_clock, &dtc->dtc_timer);
	dtc = os_atomic_xchg2o(dt, dt_pending_config, dtc, release);
	if (dtc) free(dtc);
	dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY);
}

dispatch_source_set_event_handler源码:

void
dispatch_source_set_event_handler(dispatch_source_t ds,
		dispatch_block_t handler)
{
	_dispatch_source_set_handler(ds, handler, DS_EVENT_HANDLER, true);
}

static void
_dispatch_source_set_handler(dispatch_source_t ds, void *func,
		uintptr_t kind, bool is_block)
{
	dispatch_continuation_t dc;

	dc = _dispatch_source_handler_alloc(ds, func, kind, is_block);

	if (_dispatch_lane_try_inactive_suspend(ds)) {
		_dispatch_source_handler_replace(ds, kind, dc);
		return _dispatch_lane_resume(ds, DISPATCH_RESUME);
	}

	dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds);
	if (unlikely(dqf & DSF_STRICT)) {
		DISPATCH_CLIENT_CRASH(kind, "Cannot change a handler of this source "
				"after it has been activated");
	}
	// Ignore handlers mutations past cancelation, it's harmless
	if ((dqf & DSF_CANCELED) == 0) {
		_dispatch_ktrace1(DISPATCH_PERF_post_activate_mutation, ds);
		if (kind == DS_REGISTN_HANDLER) {
			_dispatch_bug_deprecated("Setting registration handler after "
					"the source has been activated");
		} else if (func == NULL) {
			_dispatch_bug_deprecated("Clearing handler after "
					"the source has been activated");
		}
	}
	dc->dc_data = (void *)kind;
	_dispatch_barrier_trysync_or_async_f(ds, dc,
			_dispatch_source_set_handler_slow, 0);
}

dispatch_active 源码:

void
dispatch_activate(dispatch_object_t dou)
{
	DISPATCH_OBJECT_TFB(_dispatch_objc_activate, dou);
	if (unlikely(_dispatch_object_is_global(dou))) {
		return;
	}
	if (dx_metatype(dou._do) == _DISPATCH_WORKLOOP_TYPE) {
		return _dispatch_workloop_activate(dou._dwl);
	}
	if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) {
		return _dispatch_lane_resume(dou._dl, DISPATCH_ACTIVATE);
	}
}

static inline bool
_dispatch_object_is_global(dispatch_object_t dou)
{
	return dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT;
}

void
_dispatch_workloop_activate(dispatch_workloop_t dwl)
{
	// This transitions either:
	// - from INACTIVE to ACTIVATING
	// - or from ACTIVE to ACTIVE
	uint64_t old_state = os_atomic_and_orig2o(dwl, dq_state,
			~DISPATCH_QUEUE_ACTIVATED, relaxed);

	if (likely(_dq_state_is_inactive(old_state))) {
		if (dwl->dwl_attr) {
			// Activation of a workloop with attributes forces us to create
			// the workloop up front and register the attributes with the
			// kernel.
			_dispatch_workloop_activate_attributes(dwl);
		}
		if (!dwl->dq_priority) {
			dwl->dq_priority =
					_dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT);
		}
		dwl->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
		os_atomic_and2o(dwl, dq_state, ~DISPATCH_QUEUE_ACTIVATING, relaxed);
		return _dispatch_workloop_wakeup(dwl, 0, DISPATCH_WAKEUP_CONSUME_2);
	}
}


static void
_dispatch_lane_resume_slow(dispatch_lane_t dq)
{
	uint64_t old_state, new_state, delta;

	_dispatch_queue_sidelock_lock(dq);

	// what we want to transfer
	delta  = DISPATCH_QUEUE_SUSPEND_HALF * DISPATCH_QUEUE_SUSPEND_INTERVAL;
	// but this is a resume so consume a suspend count at the same time
	delta -= DISPATCH_QUEUE_SUSPEND_INTERVAL;
	switch (dq->dq_side_suspend_cnt) {
	case 0:
		goto retry;
	case DISPATCH_QUEUE_SUSPEND_HALF:
		// we will transition the side count to 0, so we want to clear this bit
		delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT;
		break;
	}
	os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
		// unsigned overflow of the addition can happen because other
		// threads could have touched this value while we were trying to acquire
		// the lock, or because another thread raced us to do the same operation
		// and got to the lock first.
		if (unlikely(os_add_overflow(old_state, delta, &new_state))) {
			os_atomic_rmw_loop_give_up(goto retry);
		}
	});
	dq->dq_side_suspend_cnt -= DISPATCH_QUEUE_SUSPEND_HALF;
	return _dispatch_queue_sidelock_unlock(dq);

retry:
	_dispatch_queue_sidelock_unlock(dq);
	return _dispatch_lane_resume(dq, DISPATCH_RESUME);
}

DISPATCH_NOINLINE
static void
_dispatch_lane_resume_activate(dispatch_lane_t dq)
{
	if (dx_vtable(dq)->dq_activate) {
		dx_vtable(dq)->dq_activate(dq);
	}

	_dispatch_lane_resume(dq, DISPATCH_ACTIVATION_DONE);
}

DISPATCH_NOINLINE
void
_dispatch_lane_resume(dispatch_lane_t dq, dispatch_resume_op_t op)
{
	// covers all suspend and inactive bits, including side suspend bit
	const uint64_t suspend_bits = DISPATCH_QUEUE_SUSPEND_BITS_MASK;
	uint64_t pending_barrier_width =
			(dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
	uint64_t set_owner_and_set_full_width_and_in_barrier =
			_dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT |
			DISPATCH_QUEUE_IN_BARRIER;

	// backward compatibility: only dispatch sources can abuse
	// dispatch_resume() to really mean dispatch_activate()
	bool is_source = (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE);
	uint64_t old_state, new_state;

	//
	// Activation is a bit tricky as it needs to finalize before the wakeup.
	//
	// The inactive bits have 4 states:
	// - 11: INACTIVE
	// - 10: ACTIVATED, but not activating yet
	// - 01: ACTIVATING right now
	// - 00: fully active
	//
	// ACTIVATED is only used when the queue is otherwise also suspended.
	// In that case the last resume will take over the activation.
	//
	// The ACTIVATING state is tricky because it may be cleared by sources
	// firing, to avoid priority inversions problems such as rdar://45419440
	// where as soon as the kevent is installed, the source may fire
	// before its activating state was cleared.
	//
	if (op == DISPATCH_ACTIVATE) {
		// relaxed atomic because this doesn't publish anything, this is only
		// about picking the thread that gets to finalize the activation
		os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
			if (!_dq_state_is_inactive(old_state)) {
				// object already active or activated
				os_atomic_rmw_loop_give_up(return);
			}
			if (unlikely(_dq_state_suspend_cnt(old_state))) {
				// { sc != 0, i = INACTIVE } -> i = ACTIVATED
				new_state = old_state - DISPATCH_QUEUE_INACTIVE +
						DISPATCH_QUEUE_ACTIVATED;
			} else {
				// { sc = 0, i = INACTIVE } -> i = ACTIVATING
				new_state = old_state - DISPATCH_QUEUE_INACTIVE +
						DISPATCH_QUEUE_ACTIVATING;
			}
		});
	} else if (op == DISPATCH_ACTIVATION_DONE) {
		// release barrier needed to publish the effect of dq_activate()
		os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
			if (unlikely(!(old_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK))) {
				os_atomic_rmw_loop_give_up({
					// object activation was already concurrently done
					// due to a concurrent DISPATCH_WAKEUP_CLEAR_ACTIVATING
					// wakeup call.
					//
					// We still need to consume the internal refcounts because
					// the wakeup doesn't take care of these.
					return _dispatch_release_2_tailcall(dq);
				});
			}

			new_state = old_state - DISPATCH_QUEUE_ACTIVATING;
			if (!_dq_state_is_runnable(new_state)) {
				// Out of width or still suspended.
				// For the former, force _dispatch_lane_non_barrier_complete
				// to reconsider whether it has work to do
				new_state |= DISPATCH_QUEUE_DIRTY;
			} else if (_dq_state_drain_locked(new_state)) {
				// still locked by someone else, make drain_try_unlock() fail
				// and reconsider whether it has work to do
				new_state |= DISPATCH_QUEUE_DIRTY;
			} else {
				// clear overrides and force a wakeup
				new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
				new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
			}
		});
		if (unlikely(new_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK)) {
			DISPATCH_CLIENT_CRASH(dq, "Corrupt activation state");
		}
	} else {
		// release barrier needed to publish the effect of
		// - dispatch_set_target_queue()
		// - dispatch_set_*_handler()
		os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
			new_state = old_state;
			if (is_source && (old_state & suspend_bits) ==
					DISPATCH_QUEUE_INACTIVE) {
				// { sc = 0, i = INACTIVE } -> i = ACTIVATING
				new_state -= DISPATCH_QUEUE_INACTIVE;
				new_state += DISPATCH_QUEUE_ACTIVATING;
			} else if (unlikely(os_sub_overflow(old_state,
					DISPATCH_QUEUE_SUSPEND_INTERVAL, &new_state))) {
				// underflow means over-resume or a suspend count transfer
				// to the side count is needed
				os_atomic_rmw_loop_give_up({
					if (!(old_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT)) {
						goto over_resume;
					}
					return _dispatch_lane_resume_slow(dq);
				});
		//
		// below this, new_state = old_state - DISPATCH_QUEUE_SUSPEND_INTERVAL
		//
			} else if (_dq_state_is_activated(new_state)) {
				// { sc = 1, i = ACTIVATED } -> i = ACTIVATING
				new_state -= DISPATCH_QUEUE_ACTIVATED;
				new_state += DISPATCH_QUEUE_ACTIVATING;
			} else if (!_dq_state_is_runnable(new_state)) {
				// Out of width or still suspended.
				// For the former, force _dispatch_lane_non_barrier_complete
				// to reconsider whether it has work to do
				new_state |= DISPATCH_QUEUE_DIRTY;
			} else if (_dq_state_drain_locked(new_state)) {
				// still locked by someone else, make drain_try_unlock() fail
				// and reconsider whether it has work to do
				new_state |= DISPATCH_QUEUE_DIRTY;
			} else if (!is_source && (_dq_state_has_pending_barrier(new_state) ||
					new_state + pending_barrier_width <
					DISPATCH_QUEUE_WIDTH_FULL_BIT)) {
				// if we can, acquire the full width drain lock
				// and then perform a lock transfer
				//
				// However this is never useful for a source where there are no
				// sync waiters, so never take the lock and do a plain wakeup
				new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
				new_state |= set_owner_and_set_full_width_and_in_barrier;
			} else {
				// clear overrides and force a wakeup
				new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
				new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
			}
		});
	}

	if (_dq_state_is_activating(new_state)) {
		return _dispatch_lane_resume_activate(dq);
	}

	if (_dq_state_is_suspended(new_state)) {
		return;
	}

	if (_dq_state_is_dirty(old_state)) {
		// <rdar://problem/14637483>
		// dependency ordering for dq state changes that were flushed
		// and not acted upon
		os_atomic_thread_fence(dependency);
		dq = os_atomic_force_dependency_on(dq, old_state);
	}
	// Balancing the retain_2 done in suspend() for rdar://8181908
	dispatch_wakeup_flags_t flags = DISPATCH_WAKEUP_CONSUME_2;
	if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) {
		flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE;
	} else if (!_dq_state_is_runnable(new_state)) {
		if (_dq_state_is_base_wlh(old_state)) {
			_dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq);
		}
		return _dispatch_release_2(dq);
	}
	dispatch_assert(!_dq_state_received_sync_wait(old_state));
	dispatch_assert(!_dq_state_in_sync_transfer(old_state));
	return dx_wakeup(dq, _dq_state_max_qos(old_state), flags);

over_resume:
	if (unlikely(_dq_state_is_inactive(old_state))) {
		DISPATCH_CLIENT_CRASH(dq, "Over-resume of an inactive object");
	}
	DISPATCH_CLIENT_CRASH(dq, "Over-resume of an object");
}

dispatch_source_cancel 源码:

void
dispatch_source_cancel(dispatch_source_t ds)
{
	_dispatch_object_debug(ds, "%s", __func__);
	// Right after we set the cancel flag, someone else
	// could potentially invoke the source, do the cancellation,
	// unregister the source, and deallocate it. We would
	// need to therefore retain/release before setting the bit
	_dispatch_retain_2(ds);

	if (_dispatch_queue_atomic_flags_set_orig(ds, DSF_CANCELED) & DSF_CANCELED){
		_dispatch_release_2_tailcall(ds);
	} else {
		dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2);
	}
}

 

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值