dispatch_async 详解

dispatch_async 详解

说明

作用:在调度队列上提交异步执行的块,并立即返回。

函数原型:

void dispatch_async(dispatch_queue_t queue, dispatch_block_t block);

参数:

  1. queue:提交块的队列。系统保留队列,直到块运行完成。此参数不能为空。
  2. block:要提交给目标调度队列的块。此函数代表调用者执行Block_copy和Block_release。此参数不能为空。

此函数是将块提交到调度队列的基本机制。对此函数的调用总是在提交块后立即返回,并且从不等待调用块。目标队列确定该块是相对于提交给同一队列的其他块串行调用还是并发调用。独立的串行队列相互同时处理。

dispatch_async 函数会立即返回,block会在后台异步执行。

源码分析

void dispatch_async(dispatch_queue_t dq, void (^work)(void))
{
    dispatch_async_f(dq, _dispatch_Block_copy(work),
            _dispatch_call_block_and_release);
}
// 调用了下面的方法
void dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
{
    //1、定义一个封装的block操作
    dispatch_continuation_t dc;

    // No fastpath/slowpath hint because we simply don't know
    //2、dq_width == 1表示串行,就是main queue和用户创建的串行queue
    if (dq->dq_width == 1) {
        return dispatch_barrier_async_f(dq, ctxt, func);
    }

    //如果是global queue和用户创建的并行queue则继续向下走
    //3、线程中有个dispatch_continuation_t缓存链表,如果获取到就把链表的下一个设为缓存,相当于把第一个取出来了
    dc = fastpath(_dispatch_continuation_alloc_cacheonly());
    if (!dc) {
        //4、如果没有dispatch_continuation_t缓存在堆上创建一个,并且初始化后调用_dispatch_queue_push
        return _dispatch_async_f_slow(dq, ctxt, func);
    }
    //5、初始化dispatch_continuation_t,把block封装成dispatch_continuation_t
    dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
    dc->dc_func = func;
    dc->dc_ctxt = ctxt;

    // No fastpath/slowpath hint because we simply don't know
    //6、如果do_targetq存在,则任务有do_targetq来执行
    if (dq->do_targetq) {
        return _dispatch_async_f2(dq, dc);
    }

    //7、把dispatch_continuation_t放到queue的执行列表中
    _dispatch_queue_push(dq, dc);
}
//dispatch_continuation_t的结构,封装的一个block操作
struct dispatch_continuation_s {
    const void *do_vtable;
    struct dispatch_continuation_s *volatile do_next;
    dispatch_function_t dc_func;
    void *dc_ctxt
    dispatch_group_t dc_group;
    void *dc_data[3];
};

通过上面的方法我们可以看出,dispatch_async_f 最终将 block 封装成 dispatch_continuation_s 并调用 _dispatch_queue_push 放到对应的 queue 的执行链表结尾,global queue 是放到自己的执行链表执行,main queue 和 user queue 放到 do_targetq 的执行链表执行。

dispatch_async(dispatch_get_main_queue(), ^{})

dispatch_async(异步)和 dispatch_sync(同步)决定有没有开子线程的能力,而不是指会不会开子线程。queue 决定有没有并发的能力。开子线程我就并发,不开就串行。

所以 dispatch_async(dispatch_get_main_queue(), ^{}) 的意思是异步地在主线程按顺序执行,没有开子线程的能力。

esp_err_t esp_http_client_perform(esp_http_client_handle_t client) { esp_err_t err; do { if (client->process_again) { esp_http_client_prepare(client); } switch (client->state) { /* In case of blocking esp_http_client_perform(), the following states will fall through one after the after; in case of non-blocking esp_http_client_perform(), if there is an error condition, like EINPROGRESS or EAGAIN, then the esp_http_client_perform() API will return ESP_ERR_HTTP_EAGAIN error. The user may call esp_http_client_perform API again, and for this reason, we maintain the states */ case HTTP_STATE_INIT: if ((err = esp_http_client_connect(client)) != ESP_OK) { if (client->is_async && err == ESP_ERR_HTTP_CONNECTING) { return ESP_ERR_HTTP_EAGAIN; } http_dispatch_event(client, HTTP_EVENT_ERROR, esp_transport_get_error_handle(client->transport), 0); http_dispatch_event_to_event_loop(HTTP_EVENT_ERROR, &client, sizeof(esp_http_client_handle_t)); return err; } /* falls through */ case HTTP_STATE_CONNECTED: if ((err = esp_http_client_request_send(client, client->post_len)) != ESP_OK) { if (client->is_async && errno == EAGAIN) { return ESP_ERR_HTTP_EAGAIN; } http_dispatch_event(client, HTTP_EVENT_ERROR, esp_transport_get_error_handle(client->transport), 0); http_dispatch_event_to_event_loop(HTTP_EVENT_ERROR, &client, sizeof(esp_http_client_handle_t)); return err; } /* falls through */ case HTTP_STATE_REQ_COMPLETE_HEADER: if ((err = esp_http_client_send_post_data(client)) != ESP_OK) { if (client->is_async && errno == EAGAIN) { return ESP_ERR_HTTP_EAGAIN; } http_dispatch_event(client, HTTP_EVENT_ERROR, esp_transport_get_error_handle(client->transport), 0); http_dispatch_event_to_event_loop(HTTP_EVENT_ERROR, &client, sizeof(esp_http_client_handle_t)); return err; } /* falls through */ case HTTP_STATE_REQ_COMPLETE_DATA: /* Disable caching response body, as data should * be handled by application event handler */ client->cache_data_in_fetch_hdr = 0; int64_t ret = esp_http_client_fetch_headers(client); if (ret < 0) { if ((client->is_async && errno == EAGAIN) || ret == -ESP_ERR_HTTP_EAGAIN) { return ESP_ERR_HTTP_EAGAIN; } /* Enable caching after error condition because next * request could be performed using native APIs */ client->cache_data_in_fetch_hdr = 1; if (esp_transport_get_errno(client->transport) == ENOTCONN) { ESP_LOGW(TAG, "Close connection due to FIN received"); esp_http_client_close(client); http_dispatch_event(client, HTTP_EVENT_ERROR, esp_transport_get_error_handle(client->transport), 0); http_dispatch_event_to_event_loop(HTTP_EVENT_ERROR, &client, sizeof(esp_http_client_handle_t)); return ESP_ERR_HTTP_CONNECTION_CLOSED; } http_dispatch_event(client, HTTP_EVENT_ERROR, esp_transport_get_error_handle(client->transport), 0); http_dispatch_event_to_event_loop(HTTP_EVENT_ERROR, &client, sizeof(esp_http_client_handle_t)); return ESP_ERR_HTTP_FETCH_HEADER; } /* falls through */ case HTTP_STATE_RES_ON_DATA_START: /* Enable caching after fetch headers state because next * request could be performed using native APIs */ client->cache_data_in_fetch_hdr = 1; if ((err = esp_http_check_response(client)) != ESP_OK) { ESP_LOGE(TAG, "Error response"); http_dispatch_event(client, HTTP_EVENT_ERROR, esp_transport_get_error_handle(client->transport), 0); http_dispatch_event_to_event_loop(HTTP_EVENT_ERROR, &client, sizeof(esp_http_client_handle_t)); return err; } while (client->response->is_chunked && !client->is_chunk_complete) { if (esp_http_client_get_data(client) <= 0) { if (client->is_async && errno == EAGAIN) { return ESP_ERR_HTTP_EAGAIN; } ESP_LOGD(TAG, "Read finish or server requests close"); break; } } while (client->response->data_process < client->response->content_length) { if (esp_http_client_get_data(client) <= 0) { if (client->is_async && errno == EAGAIN) { return ESP_ERR_HTTP_EAGAIN; } ESP_LOGD(TAG, "Read finish or server requests close"); break; } } http_dispatch_event(client, HTTP_EVENT_ON_FINISH, NULL, 0); http_dispatch_event_to_event_loop(HTTP_EVENT_ON_FINISH, &client, sizeof(esp_http_client_handle_t)); client->response->buffer->raw_len = 0; if (!http_should_keep_alive(client->parser)) { ESP_LOGD(TAG, "Close connection"); esp_http_client_close(client); } else { if (client->state > HTTP_STATE_CONNECTED) { client->state = HTTP_STATE_CONNECTED; client->first_line_prepared = false; } } break; default: break; } } while (client->process_again); return ESP_OK; }该函数怎么使用
最新发布
11-01
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

UestcXiye

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值