brpc之read&&handler

 //接上,ProcessEvent里调用OnNewMessages
void InputMessenger::OnNewMessages(Socket* m) {

.......

while (!read_eof) {
    const int64_t received_us = butil::cpuwide_time_us();
    const int64_t base_realtime = butil::gettimeofday_us() - received_us;
    // Calculate bytes to be read.
    size_t once_read = m->_avg_msg_size * 16;
    if (once_read < MIN_ONCE_READ) {
        once_read = MIN_ONCE_READ;
    } else if (once_read > MAX_ONCE_READ) {
        once_read = MAX_ONCE_READ;
    }


    /*如果一次DoRead读入了n条消息,那么前n-1条消息会通过ProcessNewMessage->QueueMessage后台启动了n-1个bthread进行处理,而最后一个消息会被析构函数RunLastMessage执行,原地执行process函数,即协议中的process, 所以这种并发处理方式无法保证服务端回包的顺序是按照客户端发包顺序回复的*/
    const ssize_t nr = m->DoRead(once_read);

........


        if (m->_rdma_state == Socket::RDMA_OFF && messenger->ProcessNewMessage(
                    m, nr, read_eof, received_us, base_realtime, last_msg) < 0) {
            return;
        }

}

//----------------------------------InputMessenger::ProcessNewMessage
int InputMessenger::ProcessNewMessage(
        Socket* m, ssize_t bytes, bool read_eof,
        const uint64_t received_us, const uint64_t base_realtime,
        InputMessageClosure& last_msg) {
    m->AddInputBytes(bytes);

    // Avoid this socket to be closed due to idle_timeout_s
    m->_last_readtime_us.store(received_us, butil::memory_order_re
    
    size_t last_size = m->_read_buf.length();
    int num_bthread_created = 0;
    while (1) {
        size_t index = 8888;
        ParseResult pr = CutInputMessage(m, &index, read_eof);

..........

    }


        m->AddInputMessages(1);
        // Calculate average size of messages
        const size_t cur_size = m->_read_buf.length();
        if (cur_size == 0) {
            // _read_buf is consumed, it's good timing to return blocks
            // cached internally back to TLS, otherwise the memory is not
            // reused until next message arrives which is quite uncertain
            // in situations that most connections are idle.
            m->_read_buf.return_cached_blocks();
        }
        m->_last_msg_size += (last_size - cur_size);
        last_size = cur_size;
        const size_t old_avg = m->_avg_msg_size;
        if (old_avg != 0) {
            m->_avg_msg_size = (old_avg * (MSG_SIZE_WINDOW - 1) + m->_last_msg_size)
            / MSG_SIZE_WINDOW;
        } else {
            m->_avg_msg_size = m->_last_msg_size;
        }
        m->_last_msg_size = 0;
        
        if (pr.message() == NULL) { // the Process() step can be skipped.
            continue;
        }
        pr.message()->_received_us = received_us;
        pr.message()->_base_real_us = base_realtime;
                    
        // This unique_ptr prevents msg to be lost before transfering
        // ownership to last_msg
        DestroyingPtr<InputMessageBase> msg(pr.message());
        QueueMessage(last_msg.release(), &num_bthread_created,
                            m->_keytable_pool);
        if (_handlers[index].process == NULL) {
        m->ReAddress(&msg->_socket);
        m->PostponeEOF();
        msg->_process = _handlers[index].process;
        msg->_arg = _handlers[index].arg;
        
        if (_handlers[index].verify != NULL) {
        if (!m->is_read_progressive()) {
            // Transfer ownership to last_msg
            last_msg.reset(msg.release());
        } else {
            QueueMessage(msg.release(), &num_bthread_created,
                                m->_keytable_pool);
            bthread_flush();
            num_bthread_created = 0;
        }

}

//--------------------------InputMessenger::CutInputMessage函数,两处调用.parse真是坑爹的设计
ParseResult InputMessenger::CutInputMessage(
        Socket* m, size_t* index, bool read_eof) {
    const int preferred = m->preferred_index();
    const int max_index = (int)_max_index.load(butil::memory_order_acquire);

    if (preferred >= 0 && preferred <= max_index
            && _handlers[preferred].parse != NULL) {
        int cur_index = preferred;
        do {

//第一处调用
            ParseResult result = _handlers[cur_index].parse(&m->_read_buf, m, read_eof, _handlers[cur_index].arg);
            LOG(INFO) << "_handlers[cur_index].parse";
            if (result.is_ok() ||
                result.error() == PARSE_ERROR_NOT_ENOUGH_DATA) {
                m->set_preferred_index(cur_index);
                *index = cur_index;
                return result;
            } else if (result.error() != PARSE_ERROR_TRY_OTHERS) {

......................
    }
    for (int i = 0; i <= max_index; ++i) {
        if (i == preferred || _handlers[i].parse == NULL) {
            // Don't try preferred handler(already tried) or invalid handler
            continue;
        }

//第二处调用
        ParseResult result = _handlers[i].parse(&m->_read_buf, m, read_eof, _handlers[i].arg);
        LOG(INFO) << "_handlers[i].parse";
        if (result.is_ok() ||
..............................
}

_handlers[i].parse  ——》 enter ParseRpcMessage
_handlers[cur_index].parse  ——》enter ParseRpcMessage
enter ProcessRpcRequest
message: "I\'m a RPC to connect stream"
_handlers[cur_index].parse  ——》enter ParseRpcMessage
_handlers[i].parse   ——》enter ParseStreamingMessage
_handlers[cur_index].parse   ——》enter ParseStreamingMessage
_handlers[cur_index].parse  ——》enter ParseStreamingMessage

........................

从上面控制台日志看出ParseRpcMessage进入两次后进入ProcessRpcRequest打印出客户端消息"I\'m a RPC to connect stream",后面又进入一次ParseRpcMessage,而客户端只发送一次RPC Message,这里却多次调用,再后面全部是客户端发送过来的Stream Message

...........................................................................................................................................................

ParseRpcMessage返回一个rpc包,ProcessRpcRequest处理这个rpc包,函数最重要的代码是svc->CallMethod,这个CallMethod是之前注册StreamingEchoService服务的方法Echo

void ProcessRpcRequest(InputMessageBase* msg_base) {
    const int64_t start_parse_us = butil::cpuwide_time_us();
    DestroyingPtr<MostCommonMessage> msg(static_cast<MostCommonMessage*>(msg_base));
    SocketUniquePtr socket_guard(msg->ReleaseSocket());
    Socket* socket = socket_guard.get();

    const Server* server = static_cast<const Server*>(msg_base->arg());
    ScopedNonServiceError non_service_error(server);

........

      if (!FLAGS_usercode_in_pthread) {

//CallMethod函数里面打印客户端消息"I\'m a RPC to connect stream"
          return svc->CallMethod(method, cntl.release(),
                                 req.release(), res.release(), done);
      }

//------------------------------------------------------------------------------------------------------------------

//------------------------------StreamingEchoService中Echo调用brpc::StreamAccept

virtual void Echo(google::protobuf::RpcController* controller,
                  const example::EchoRequest* request,
                  example::EchoResponse* response,
                  google::protobuf::Closure* done) {
    // This object helps you to call done->Run() in RAII style. If you need
    // to process the request asynchronously, pass done_guard.release().
    brpc::ClosureGuard done_guard(done);
    brpc::Controller* cntl = static_cast<brpc::Controller*>(controller);
    brpc::StreamOptions stream_options;
    stream_options.handler = &_receiver;
    if (brpc::StreamAccept(&_sd, *cntl, &stream_options) != 0) {
        cntl->SetFailed("Fail to accept stream");
        return;
    }
    request->PrintDebugString(); //"I am client"
    response->set_message("I am server.");
}

//--------------------------------------brpc::StreamAccept调用Stream::Create


int StreamAccept(StreamId* response_stream, Controller &cntl,
                 const StreamOptions* options) {

        ............
    StreamOptions opt;
    if (options != NULL) {
        opt = *options;
    }
    StreamId stream_id;
    if (Stream::Create(opt, cntl._remote_stream_settings, &stream_id) != 0) {
        LOG(ERROR) << "Fail to create stream";
        return -1;
    }
    cntl._response_stream = stream_id;
    *response_stream = stream_id;

//------------------Stream::Create函数调用bthread::execution_queue_start和Socket::Create
int Stream::Create(const StreamOptions &options,
                   const StreamSettings *remote_settings,
                   StreamId *id) {
    Stream* s = new Stream();
    s->_host_socket = NULL;
    s->_fake_socket_weak_ref = NULL;
    s->_connected = false;
    s->_options = options;//options包含了recv数据包处理

........

//注册处理回调函数Consume

if (bthread::execution_queue_start(&s->_consumer_queue, &q_opt, Consume, s) != 0) {
    LOG(FATAL) << "Fail to create ExecutionQueue";
    delete s;
    return -1;
}
SocketOptions sock_opt;
sock_opt.conn = s;
SocketId fake_sock_id;
if (Socket::Create(sock_opt, &fake_sock_id) != 0) {//创建sockid
    s->BeforeRecycle(NULL);
    return -1;
}
SocketUniquePtr ptr;
CHECK_EQ(0, Socket::Address(fake_sock_id, &ptr));
s->_fake_socket_weak_ref = ptr.get();
s->_id = fake_sock_id;//sockid赋给s->_id
*id = s->id();//s->_id将在调用mb.flush函数中使用到

//--------------------------Stream::Consume函数调用mb.flush
int Stream::Consume(void *meta, bthread::TaskIterator<butil::IOBuf*>& iter)

{
    Stream* s = (Stream*)meta;
    s->StopIdleTimer();
  if (iter.is_queue_stopped()) {.....}
  DEFINE_SMALL_ARRAY(butil::IOBuf*, buf_list, s->_options.messages_in_batch, 256);
  MessageBatcher mb(buf_list, s->_options.messages_in_batch, s);
  bool has_timeout_task = false;
   for (; iter; ++iter) {//遍历接收的数据包
     butil::IOBuf* t= *iter;//取得数据包缓存
     if (t == TIMEOUT_TASK) {
         has_timeout_task = true;//超时标记
     } else {
         if (s->_parse_rpc_response) {
             s->_parse_rpc_response = false;
             s->HandleRpcResponse(t);
         } else {
             mb.push(t);//保存接收的数据包到mb
         }
     }
 }
  if (s->_options.handler != NULL) {
    if (has_timeout_task && mb.total_length() == 0) {//如果mb数据为0就可以处理超时
        s->_options.handler->on_idle_timeout(s->id());
    }
}
mb.flush();//这里处理接收的数据包

//----------------------------------mb.flush函数调用真正的on_received_messages处理函数

void flush() {
    if (_size > 0 && _s->_options.handler != NULL) {
        _s->_options.handler->on_received_messages(
                _s->id(), _storage, _size);//这个_s->id()怎么来的呢?在Stream::Create创建的
    }
    for (size_t i = 0; i < _size; ++i) {
        delete _storage[i];//处理完delete
    }
    _size = 0;
}

//-----------------消息队列已经创建,那么接收是怎么存入队列的呢?


int Stream::OnReceived(const StreamFrameMeta& fm, buti
    if (_host_socket == NULL) {
        if (SetHostSocket(sock) != 0) {
            return -1;
        }
    }
    switch (fm.frame_type()) {
    case FRAME_TYPE_FEEDBACK:
        SetRemoteConsumed(fm.feedback().consumed_size(
        CHECK(buf->empty());
        break;
    case FRAME_TYPE_DATA:
        if (_pending_buf != NULL) {
            _pending_buf->append(*buf);
            buf->clear();
        } else {
            _pending_buf = new butil::IOBuf;
            _pending_buf->swap(*buf);
        }
        if (!fm.has_continuation()) {
            butil::IOBuf *tmp = _pending_buf;
            _pending_buf = NULL;
          int rc = bthread::execution_queue_execute(_consumer_queue, tmp);//消费队列消息
            if (rc != 0) {
                CHECK(false) << "Fail to push into cha
                delete tmp;
                Close(rc, "Fail to push into channel")
            }
        }
        break;
    case FRAME_TYPE_RST:
        RPC_VLOG << "stream=" << id() << " received rs
        Close(ECONNRESET, "Received RST frame");
        break;
    case FRAME_TYPE_CLOSE:
        RPC_VLOG << "stream=" << id() << " received cl
        // TODO:: See the comments in Consume
        Close(0, "Received CLOSE frame");
        break;
    case FRAME_TYPE_UNKNOWN:
        RPC_VLOG << "Received unknown frame";
        return -1;
    }
    return 0;
}

//--------------------ParseStreamingMessage函数调用OnReceived
ParseResult ParseStreamingMessage(butil::IOBuf* source,
                            Socket* socket, bool /*read_eof*/, const void* /*arg*/) {
                                
    char header_buf[12];
    const size_t n = source->copy_to(header_buf, sizeof(header_buf));
    if (n >= 4) {
        void* dummy = header_buf;
        if (*(const uint32_t*)dummy != *(const uint32_t*)"STRM") {
            return MakeParseError(PARSE_ERROR_TRY_OTHERS);
        }
    } else {
        if (memcmp(header_buf, "STRM", n) != 0) {
            return MakeParseError(PARSE_ERROR_TRY_OTHERS);
        }
    }
    if (n < sizeof(header_buf)) {
        return MakeParseError(PARSE_ERROR_NOT_ENOUGH_DATA);
    }
    uint32_t body_size;
    uint32_t meta_size;
    butil::RawUnpacker(header_buf + 4).unpack32(body_size).unpack32(meta_size);
    if (body_size > FLAGS_max_body_size) {
        return MakeParseError(PARSE_ERROR_TOO_BIG_DATA);
    } else if (source->length() < sizeof(header_buf) + body_size) {
        return MakeParseError(PARSE_ERROR_NOT_ENOUGH_DATA);
    }
    if (BAIDU_UNLIKELY(meta_size > body_size)) {
        LOG(ERROR) << "meta_size=" << meta_size << " is bigger than body_size="
                   << body_size;
        // Pop the message
        source->pop_front(sizeof(header_buf) + body_size);
        return MakeParseError(PARSE_ERROR_TRY_OTHERS);
    }
    source->pop_front(sizeof(header_buf));
    butil::IOBuf meta_buf;
    source->cutn(&meta_buf, meta_size);
    butil::IOBuf payload;
    source->cutn(&payload, body_size - meta_size);

    do {
        StreamFrameMeta fm;
        if (!ParsePbFromIOBuf(&fm, meta_buf)) {
            LOG(WARNING) << "Fail to Parse StreamFrameMeta from " << *socket;
            break;
        }
        SocketUniquePtr ptr;
        if (Socket::Address((SocketId)fm.stream_id(), &ptr) != 0) {
            RPC_VLOG_IF(fm.frame_type() != FRAME_TYPE_RST
                            && fm.frame_type() != FRAME_TYPE_CLOSE
                            && fm.frame_type() != FRAME_TYPE_FEEDBACK)
                   << "Fail to find stream=" << fm.stream_id();
            // It's normal that the stream is closed before receiving feedback frames from peer.
            // In this case, RST frame should not be sent to peer, otherwise on-fly data can be lost.
            if (fm.has_source_stream_id() && fm.frame_type() != FRAME_TYPE_FEEDBACK) {
                SendStreamRst(socket, fm.source_stream_id());
            }
            break;
        }
        meta_buf.clear();  // to reduce memory resident
        ((Stream*)ptr->conn())->OnReceived(fm, &payload, socket);
    } while (0);

    // Hack input messenger
    return MakeMessage(NULL);//OnReceived处理了,不需要返回
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值