参考:
SSE协议
SSE技术详解:使用 HTTP 做服务端数据推送应用的技术
一句概扩
SSE可理解为:服务端和客户端建立连接之后双方均保持连接,但仅支持服务端向客户端推送数据。推送完毕之后关闭连接,无状态行。
下面是基于libhv
实现的SSE
SSE server
int Handler::sse(const HttpContextPtr& ctx) {
// SSEvent(message) every 1s
hv::setInterval(10000, [ctx](hv::TimerID timerID) {
static int ncount = 0;
if (ctx->writer->isConnected()) {
char szTime[DATETIME_FMT_BUFLEN] = {0};
datetime_t now = datetime_now();
datetime_fmt(&now, szTime);
ctx->writer->SSEvent(szTime);
//增加SSE链接管理,支持数据订阅推送
static LONGLONG ids = 0;
SSEMgr::instance().add(::InterlockedIncrement64(&ids), ctx);
if (++ncount >= 10) {
//hv::killTimer(timerID);
ctx->writer->close();
ncount = 0;
}
} else {
hv::killTimer(timerID);
}
});
return HTTP_STATUS_UNFINISHED;
}
SSE client
typedef std::function<void(const std::string& sid, const std::string& sevent, const std::string& sdata, const unsigned int retry_ms)> sse_msg_cb;
HV_INLINE int sse(http_method method, const char* url, const sse_msg_cb& msg_cb, const http_body& body = NoBody, const http_headers& headers = DefaultHeaders,const unsigned int timeout_s = -1) {
hv::HttpClient cli;
HttpRequest req;
HttpResponse resp;
req.url = url; //
req.method = method;
req.timeout = timeout_s; // 不超时
if (&body != &NoBody) {
req.body = body;
}
if (&headers != &DefaultHeaders) {
req.headers = headers;
}
bool bstream = false;
req.http_cb = [msg_cb, &bstream](HttpMessage* resp, http_parser_state state, const char* data, size_t size) {
if (state == HP_HEADERS_COMPLETE) {
if (resp->headers["Content-Type"] == "text/event-stream") {
bstream = true;
return 0;
}
}
else if (state == HP_BODY) {
/*binary body should check data*/
// printf("%s", std::string(data, size).c_str());
resp->body.append(data, size);
if (!bstream) return 0;
/*/n/n获取message*/
size_t ifind = std::string::npos;
while ((ifind = resp->body.find("\n\n")) != std::string::npos) {
std::string msg = resp->body.substr(0, ifind + 2);
resp->body.erase(0, ifind + 2);
/*解析body,暂时不考虑多data
id:xxx\n
event:xxx\n
data:xxx\n
data:xxx\n
data:xxx\n
retry:10000\n
*/
auto kvs = hv::splitKV(msg, '\n', ':');
if (msg_cb && (kvs.count("id") || kvs.count("event") || kvs.count("data") || kvs.count("retry")))
msg_cb(kvs.count("id") ? kvs["id"] : "", kvs.count("event") ? kvs["event"] : "", kvs.count("data") ? kvs["data"] : "",
kvs.count("retry") ? atoi(kvs["retry"].c_str()) : 0);
}
}
return 0;
};
return cli.send(&req, &resp);
}
测试Demo
sse(HTTP_GET,"http://127.0.0.1:12900/sse", [](const std::string& sid, const std::string& sevent,
const std::string& sdata, const unsigned int retry_ms) {
printf("id:%s\r\nevent:%s\r\ndata:%s\r\nretry:%u\r\n\r\n",
sid.c_str(),sevent.c_str(),sdata.c_str(),retry_ms);
});
项目实践
- 对SSE链接管理、数据推送(可通过参数进行订阅管理)
SSEMgr.h
#pragma once
#include <unordered_map>
#include "hv/HttpContext.h"
#include <mutex>
class SSEMgr
{
public:
static SSEMgr& instance();
void add(LONGLONG id, HttpContextPtr ctx);
void broadcast(const std::string& data);
bool empty();
private:
SSEMgr();
void on_ctx_close(LONGLONG id);
private:
std::mutex m_mtx;
std::unordered_map<LONGLONG, HttpContextPtr> m_id_ctxs;
};
SSEMgr.cpp
#include "stdafx.h"
#include "SSEMgr.h"
#include "GameStatusMgr.h"
SSEMgr& SSEMgr::instance()
{
static SSEMgr sInstance;
return sInstance;
}
SSEMgr::SSEMgr()
{
}
void SSEMgr::add(LONGLONG id, HttpContextPtr ctx)
{
XLOGI("SSEMgr add:%I64d", id);
ctx->userdata = (void*)id;
ctx->writer->onclose = std::bind(&SSEMgr::on_ctx_close, &SSEMgr::instance(), id);
if (ctx->writer->isConnected())
{
std::lock_guard<std::mutex> lk(m_mtx);
m_id_ctxs[id] = ctx;
}
//开始刷新数据
GameStatusMgr::instance();
}
void SSEMgr::broadcast(const std::string& data)
{
std::unordered_map<LONGLONG, HttpContextPtr> id_ctxs;
{
std::lock_guard<std::mutex> lk(m_mtx);
id_ctxs = m_id_ctxs;
}
for (auto &it:id_ctxs)
{
if (it.second && it.second->writer->isConnected())
it.second->writer->SSEvent(data);
else if (it.second) //on_ctx_close 可能触发多次,但是不影响
on_ctx_close((LONGLONG)it.second->userdata);
}
}
bool SSEMgr::empty()
{
std::lock_guard<std::mutex> lk(m_mtx);
return m_id_ctxs.empty();
}
void SSEMgr::on_ctx_close(LONGLONG id)
{
XLOGI("SSEMgr erase:%I64d", id);
std::lock_guard<std::mutex> lk(m_mtx);
m_id_ctxs.erase(id);
}