accept&&create socket

//-------------------------------------------Acceptor::OnNewConnections

void Acceptor::OnNewConnections(Socket* acception) {
    int progress = Socket::PROGRESS_INIT;
    do {
        OnNewConnectionsUntilEAGAIN(acception);
        if (acception->Failed()) {
            return;
        }
    } while (acception->MoreReadEvents(&progress));
}

//---------------------------------------Acceptor::OnNewConnectionsUntilEAGAIN
void Acceptor::OnNewConnectionsUntilEAGAIN(Socket* acception) {
    while (1) {
        struct sockaddr_storage in_addr;
        bzero(&in_addr, sizeof(in_addr));
        socklen_t in_len = sizeof(in_addr);

//in_fd包裹accept返回的socket套接字
        butil::fd_guard in_fd(accept(acception->fd(), (sockaddr*)&in_addr, &in_len
        if (in_fd < 0) {
        ........
        }

        ........
        SocketId socket_id;
        SocketOptions options;
        options.keytable_pool = am->_keytable_pool;

//in_fd包裹的socket套接字赋给options.fd
        options.fd = in_fd;
        butil::sockaddr2endpoint(&in_addr, in_len, &options.remote_side);
        options.user = acception->user();
        options.force_ssl = am->_force_ssl;
        options.initial_ssl_ctx = am->_ssl_ctx;
#if BRPC_WITH_RDMA
        if (am->_use_rdma) {
            options.on_edge_triggered_events = rdma::RdmaEndpoint::OnNewDataFromTc
        } else {
#else
        {
#endif
            options.on_edge_triggered_events = InputMessenger::OnNewMessages;
        }
        options.use_rdma = am->_use_rdma;
        options.bthread_tag = am->_bthread_tag;

//Socket::Create静态函数创建Socket*并得到对应的socket_id
        if (Socket::Create(options, &socket_id) != 0) {
            LOG(ERROR) << "Fail to create Socket";
            continue;
        }
        in_fd.release(); // transfer ownership to socket_id


    }
}

//-------------------------------Socket::Create静态函数创建Socket类实例
// SocketId = 32-bit version + 32-bit slot.
int Socket::Create(const SocketOptions& options, SocketId* id) {
    butil::ResourceId<Socket> slot;
    Socket* const m = butil::get_resource(&slot, Forbidden());
    if (m == NULL) {
        LOG(FATAL) << "Fail to get_resource<Socket>";
        return -1;
    }
    g_vars->nsocket << 1;
    CHECK(NULL == m->_shared_part.load(butil::memory_order_relaxed));
    m->_nevent.store(0, butil::memory_order_relaxed);
    m->_keytable_pool = options.keytable_pool;
    m->_tos = 0;
    m->_remote_side = options.remote_side;
    m->_on_edge_triggered_events = options.on_edge_triggered_events;//OnNewMessages
    m->_user = options.user;
    m->_conn = options.conn;
    m->_app_connect = options.app_connect;
    // nref can be non-zero due to concurrent AddressSocket().
    // _this_id will only be used in destructor/Destroy of referenced
    // slots, which is safe and properly fenced. Although it's better
    // to put the id into SocketUniquePtr.

//生成唯一的socket id
    m->_this_id = MakeSocketId(
            VersionOfVRef(m->_versioned_ref.fetch_add(
                    1, butil::memory_order_release)), slot);

    m->_preferred_index = -1;
    m->_hc_count = 0;
    CHECK(m->_read_buf.empty());
    const int64_t cpuwide_now = butil::cpuwide_time_us();
    m->_last_readtime_us.store(cpuwide_now, butil::memory_order_relaxed);
    m->reset_parsing_context(options.initial_parsing_context);
    m->_correlation_id = 0;
    m->_health_check_interval_s = options.health_check_interval_s;
    m->_is_hc_related_ref_held = false;
    m->_hc_started.store(false, butil::memory_order_relaxed);
    m->_ninprocess.store(1, butil::memory_order_relaxed);
    m->_auth_flag_error.store(0, butil::memory_order_relaxed);
    const int rc2 = bthread_id_create(&m->_auth_id, NULL, NULL);
    if (rc2) {
        LOG(ERROR) << "Fail to create auth_id: " << berror(rc2);
        m->SetFailed(rc2, "Fail to create auth_id: %s", berror(rc2));
        return -1;
    }
    m->_force_ssl = options.force_ssl;
    // Disable SSL check if there is no SSL context
    m->_ssl_state = (options.initial_ssl_ctx == NULL ? SSL_OFF : SSL_UNKNOWN);
    m->_ssl_session = NULL;
    m->_ssl_ctx = options.initial_ssl_ctx;
#if BRPC_WITH_RDMA
    CHECK(m->_rdma_ep == NULL);
    if (options.use_rdma) {
        m->_rdma_ep = new (std::nothrow)rdma::RdmaEndpoint(m);
        if (!m->_rdma_ep) {
            const int saved_errno = errno;
            PLOG(ERROR) << "Fail to create RdmaEndpoint";
            m->SetFailed(saved_errno, "Fail to create RdmaEndpoint: %s",
                         berror(saved_errno));
            return -1;
        }
        m->_rdma_state = RDMA_UNKNOWN;
    } else {
        m->_rdma_state = RDMA_OFF;
    }
#endif
    m->_connection_type_for_progressive_read = CONNECTION_TYPE_UNKNOWN;
    m->_controller_released_socket.store(false, butil::memory_order_relaxed);
    m->_overcrowded = false;
    // May be non-zero for RTMP connections.
    m->_fail_me_at_server_stop = false;
    m->_logoff_flag.store(false, butil::memory_order_relaxed);
    m->_additional_ref_status.store(REF_USING, butil::memory_order_relaxed);
    m->_error_code = 0;
    m->_error_text.clear();
    m->_agent_socket_id.store(INVALID_SOCKET_ID, butil::memory_order_relaxed);
    m->_total_streams_unconsumed_size.store(0, butil::memory_order_relaxed);
    m->_ninflight_app_health_check.store(0, butil::memory_order_relaxed);
    // NOTE: last two params are useless in bthread > r32787
    const int rc = bthread_id_list_init(&m->_id_wait_list, 512, 512);
    if (rc) {
        LOG(ERROR) << "Fail to init _id_wait_list: " << berror(rc);
        m->SetFailed(rc, "Fail to init _id_wait_list: %s", berror(rc));
        return -1;
    }
    m->_last_writetime_us.store(cpuwide_now, butil::memory_order_relaxed);
    m->_unwritten_bytes.store(0, butil::memory_order_relaxed);
    m->_keepalive_options = options.keepalive_options;
    m->_bthread_tag = options.bthread_tag;
    CHECK(NULL == m->_write_head.load(butil::memory_order_relaxed));
    m->_is_write_shutdown = false;
    // Must be last one! Internal fields of this Socket may be access
    // just after calling ResetFileDescriptor.

//函数里options.fd赋给m->_fd并且添加事件监听
    if (m->ResetFileDescriptor(options.fd) != 0) {
        const int saved_errno = errno;
        PLOG(ERROR) << "Fail to ResetFileDescriptor";
        m->SetFailed(saved_errno, "Fail to ResetFileDescriptor: %s",
                     berror(saved_errno));
        return -1;
    }
    *id = m->_this_id;//返回创建的socket id
    return 0;
}

//----------------------------------------------------------------Socket::ResetFileDescriptor函数
int Socket::ResetFileDescriptor(int fd) {
    // Reset message sizes when fd is changed.
    _last_msg_size = 0;
    _avg_msg_size = 0;
    // MUST store `_fd' before adding itself into epoll device to avoid
    // race conditions with the callback function inside epoll

//保存参数fd套接字到Socket->_fd
    _fd.store(fd, butil::memory_order_release);
    _reset_fd_real_us = butil::gettimeofday_us();
    if (!ValidFileDescriptor(fd)) {
        return 0;
    }
    // OK to fail, non-socket fd does not support this.
    if (butil::get_local_side(fd, &_local_side) != 0) {
        _local_side = butil::EndPoint();
    }

    // FIXME : close-on-exec should be set by new syscalls or worse: set right
    // after fd-creation syscall. Setting at here has higher probabilities of
    // race condition.
    butil::make_close_on_exec(fd);

    // Make the fd non-blocking.
    if (butil::make_non_blocking(fd) != 0) {
        PLOG(ERROR) << "Fail to set fd=" << fd << " to non-blocking";
        return -1;
    }
    // turn off nagling.
    // OK to fail, namely unix domain socket does not support this.
    butil::make_no_delay(fd);
    if (_tos > 0 &&
        setsockopt(fd, IPPROTO_IP, IP_TOS, &_tos, sizeof(_tos)) != 0) {
        PLOG(ERROR) << "Fail to set tos of fd=" << fd << " to " << _tos;
    }

    if (FLAGS_socket_send_buffer_size > 0) {
        int buff_size = FLAGS_socket_send_buffer_size;
        if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buff_size, sizeof(buff_size)) != 0) {
            PLOG(ERROR) << "Fail to set sndbuf of fd=" << fd << " to "
                        << buff_size;
        }
    }

    if (FLAGS_socket_recv_buffer_size > 0) {
        int buff_size = FLAGS_socket_recv_buffer_size;
        if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &buff_size, sizeof(buff_size)) != 0) {
            PLOG(ERROR) << "Fail to set rcvbuf of fd=" << fd << " to "
                        << buff_size;
        }
    }

    EnableKeepaliveIfNeeded(fd);

    if (_on_edge_triggered_events) {

//添加fd的事件监听,把socket id传递给事件监听,这样才能知道到来事件是属于哪一个socket id
        if (GetGlobalEventDispatcher(fd, _bthread_tag).AddConsumer(id(), fd) != 0) {
            PLOG(ERROR) << "Fail to add SocketId=" << id()
                        << " into EventDispatcher";
            _fd.store(-1, butil::memory_order_release);
            return -1;
        }
    }
    return 0;
}

//-------EventDispatcher::Run-》epoll_wait-》事件来了-》Socket::StartInputEvent-》Socket::ProcessEvent


void EventDispatcher::Run() {
    while (!_stop) {
        epoll_event e[32];
#ifdef BRPC_ADDITIONAL_EPOLL
        // Performance downgrades in examples.
        int n = epoll_wait(_epfd, e, ARRAY_SIZE(e), 0);
        if (n == 0) {
            n = epoll_wait(_epfd, e, ARRAY_SIZE(e), -1);
        }
#else
        const int n = epoll_wait(_epfd, e, ARRAY_SIZE(e), -1);
#endif
        if (_stop) {
            // epoll_ctl/epoll_wait should have some sort of memory fenc
            // guaranteeing that we(after epoll_wait) see _stop set befo
            // epoll_ctl.
            break;
        }
        if (n < 0) {
            if (EINTR == errno) {
                // We've checked _stop, no wake-up will be missed.
                continue;
            }
            PLOG(FATAL) << "Fail to epoll_wait epfd=" << _epfd;
            break;
        }
        for (int i = 0; i < n; ++i) {
            if (e[i].events & (EPOLLIN | EPOLLERR | EPOLLHUP)
#ifdef BRPC_SOCKET_HAS_EOF
                || (e[i].events & has_epollrdhup)
#endif
                ) {
                // We don't care about the return value.
                Socket::StartInputEvent(e[i].data.u64, e[i].events,//socket id就是64位的
                                        _consumer_thread_attr);
            }
        }
        for (int i = 0; i < n; ++i) {
            if (e[i].events & (EPOLLOUT | EPOLLERR | EPOLLHUP)) {
                // We don't care about the return value.
                Socket::HandleEpollOut(e[i].data.u64);
            }
        }
    }
}

//--------------------------------------------Socket::StartInputEvent
int Socket::StartInputEvent(SocketId id, uint32_t events,
                            const bthread_attr_t& thread_attr) {
    SocketUniquePtr s;

//根据socket id获取Socket*指针,这个socket id就是之前调用AddConsumer绑定给监听的
    if (Address(id, &s) < 0) {
        return -1;
    }
    if (NULL == s->_on_edge_triggered_events) {//_on_edge_triggered_events之前被赋为OnNewMessages
        // Callback can be NULL when receiving error epoll events
        // (Added into epoll by `WaitConnected')
        return 0;
    }
    if (s->fd() < 0) {//之前的套接字保存在Socket的_fd
#if defined(OS_LINUX)
        CHECK(!(events & EPOLLIN)) << "epoll_events=" << events;
#elif defined(OS_MACOSX)
        CHECK((short)events != EVFILT_READ) << "kqueue filter=" << events
#endif
        return -1;
    }

    // if (events & has_epollrdhup) {
    //     s->_eof = 1;
    // }
    // Passing e[i].events causes complex visibility issues and
    // requires stronger memory fences, since reading the fd returns
    // error as well, we don't pass the events.

/*因为一个fd上会不断地发生事件,Socket类里面设置了一个butil::atomic _nevent  _nevent变量,用来保证对于一个fd,同时只会有一个bthread在处理,当收到事件时,EventDispatcher给s->_nevent加1,只有当加1前的值是0时启动一个bthread处理对应fd上的数据,加1之前的值不为0说明已经有bthread在处理该fd上的数据了,直接返回*/

    if (s->_nevent.fetch_add(1, butil::memory_order_acq_rel) == 0) {
        // According to the stats, above fetch_add is very effective. In
        // server processing 1 million requests per second, this counter
        // is just 1500~1700/s
        g_vars->neventthread << 1;

        bthread_t tid;
        // transfer ownership as well, don't use s anymore!
        Socket* const p = s.release();

        bthread_attr_t attr = thread_attr;
        attr.keytable_pool = p->_keytable_pool;
        attr.tag = bthread_self_tag();
        if (FLAGS_usercode_in_coroutine) {
            ProcessEvent(p);
        } else if (bthread_start_urgent(&tid, &attr, ProcessEvent, p) !=//启动一个bthread去处理
            LOG(FATAL) << "Fail to start ProcessEvent";
            ProcessEvent(p);
        }
    }
    return 0;
}

//----------------------------------------------Socket::ProcessEvent


void* Socket::ProcessEvent(void* arg) {
    // the enclosed Socket is valid and free to access inside this function.
    SocketUniquePtr s(static_cast<Socket*>(arg));//是属于哪一个Socket*的事件
    s->_on_edge_triggered_events(s.get());//这个就是之前Socket::Create赋值的OnNewMessages
    return NULL;
}

//---------------------------------------------Socket*释放函数OnRecycle();

通过引用计数释放

总共三处函数调用了OnRecycle释放Socket*:

Socket::Address(SocketId id, SocketUniquePtr* ptr) {

Socket::Dereference() 《----DereferenceSocket

Socket::AddressFailedAsWell(SocketId id, SocketUniquePtr* ptr) {

第二个调用:


void DereferenceSocket(Socket* s) {
    if (s) {
        s->Dereference();
    }
}

主动关闭套接字
// Adapt your own nshead-based protocol to use brpc 
class MyNsheadProtocol : public brpc::NsheadService {
public:
    void ProcessNsheadRequest(const brpc::Server&,
                              brpc::Controller* cntl,
                              const brpc::NsheadMessage& request,
                              brpc::NsheadMessage* response, 
                              brpc::NsheadClosure* done) {
        // This object helps you to call done->Run() in RAII style. If you need
        // to process the request asynchronously, pass done_guard.release().
        brpc::ClosureGuard done_guard(done);

        if (cntl->Failed()) {
            // NOTE: You can send back a response containing error information
            // back to client instead of closing the connection.
            cntl->CloseConnection("Close connection due to previous error");
            return;
        }
        *response = request; // Just echo the request to client
    }

///------------------------------


void Controller::CloseConnection(const char* reason_fmt, ...) {
    if (_error_code == 0) {
        _error_code = ECLOSE;
    }
    add_flag(FLAGS_CLOSE_CONNECTION);
    if (!_error_text.empty()) {
        _error_text.push_back(' ');
    }

//---------------------------------访问


    // True if CloseConnection() was called.
    bool IsCloseConnection() const { return has_flag(FLAGS_CLOSE_CONNECTION); }

//在SendRpcResponse函数中会判断这个关闭标志


// Used by UT, can't be static.
void SendRpcResponse(int16_t correlation_id,
                     Controller* cntl, 
                     MethodStatus* method_status,
                     int64_t received_us,
                     const uint8_t* req,
                     const uint8_t* res) 
{
    ControllerPrivateAccessor accessor(cntl);
    // Span* span = accessor.span();
    // if (span) {
        // span->set_start_send_us(butil::cpuwide_time_us());
    // }
    Socket* sock = accessor.get_sending_socket();

    std::unique_ptr<const uint8_t> recycle_req(req);
    std::unique_ptr<const uint8_t> recycle_res(res);

    std::unique_ptr<Controller, LogErrorTextAndDelete> recycle_cntl(cntl);
    ConcurrencyRemover concurrency_remover(method_status, cntl, received_us);

    ClosureGuard guard(brpc::NewCallback(cntl, &Controller::CallAfterRpcResp, req, res));
    
    StreamId response_stream_id = accessor.response_stream();

    if (cntl->IsCloseConnection()) {
        StreamClose(response_stream_id);
        sock->SetFailed();
        return;
    }
    //bool append_body = false;

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值