下面四个库,第一个是google的开源日志库,文件中doc目录下有具体的用法。第二个是一个是序列化和反序列化的一个库,多语言。第三个是一个分布式的消息队列。第四个是kafka的C++库,本身kafka没有C++接口的这是一个第三方库。
glog下载链接
https://github.com/google/glog/archive/master.zip
protobuf
https://codeload.github.com/protocolbuffers/protobuf/zip/master
kafka
http://mirrors.shu.edu.cn/apache/kafka/2.1.0/kafka_2.11-2.1.0.tgz
librdkafka
https://codeload.github.com/edenhill/librdkafka/zip/master
测试的时候发现kafka消息队列是线程安全的。sender同时开了100个线程循环往队列里丢。大概每秒最高可接受500条消息。可能因为是本地测试环境,速度不够。
先把坑说在前面:kafka中的poll(),传入-1阻塞。
还有就是编译了直接粘上了
g++ receiver_Main_test.cpp receiver.cpp addressbook.pb.cc -o receive_test -std=c++11 -lglog -lpthread -lrdkafka++ -lrdkafka -lstdc++ -lpthread -lrdkafka -lpthread -lrt `pkg-config --cflags --libs protobuf`
g++ sender_Main_test.cpp sender.cpp addressbook.pb.cc -o sender_test -std=c++11 -lglog -lpthread -lrdkafka++ -lrdkafka -lstdc++ -lpthread -lrdkafka -lpthread -lrt `pkg-config --cflags --libs protobuf`
CMakeLists.txt后面在写了,还在学。
首先启动zookeeper服务。kafka是依赖zookeeper的,这里是启动的本地单机测试环境。
启动kafka服务。这是两个显示的,如果想后再运行,加第二条后面加&符号
启动receiver
启动sender
此时receiver,图片没有截取太多,测试的时候加了个人信息。
此时的日志文件
sender
文件目录:sender.cpp sender.h sender_Main_test.cpp
sender.h
#ifndef SENDER
#define SENDER
#include <iostream>
#include <string>
#include <cstdlib>
#include <cstdio>
#include <csignal>
#include <cstring>
#include <glog/logging.h>
#ifdef _MSC_VER
#include "../win32/wingetopt.h"
#elif _AIX
#include <unistd.h>
#else
#include <getopt.h>
#endif
#include "addressbook.pb.h"
#include "rdkafkacpp.h"
class ExampleEventCb : public RdKafka::EventCb {
public:
void event_cb(RdKafka::Event &event);
};
class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
public:
void dr_cb(RdKafka::Message &message);
};
bool sender(pt::AddressBook fam, int32_t partition, std::string brokers, std::string topic_str);
#endif // !SENDER
sender.cpp
#include "sender.h"
static bool run = true;
void ExampleEventCb::event_cb(RdKafka::Event &event) {
switch (event.type())
{
case RdKafka::Event::EVENT_ERROR:
if (event.fatal()) {
std::cerr << "FATAL ";
run = false;
}
std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
event.str() << std::endl;
break;
case RdKafka::Event::EVENT_STATS:
std::cerr << "\"STATS\": " << event.str() << std::endl;
break;
case RdKafka::Event::EVENT_LOG:
fprintf(stderr, "LOG-%i-%s: %s\n",
event.severity(), event.fac().c_str(), event.str().c_str());
break;
default:
std::cerr << "EVENT " << event.type() <<
" (" << RdKafka::err2str(event.err()) << "): " <<
event.str() << std::endl;
break;
}
}
void ExampleDeliveryReportCb::dr_cb(RdKafka::Message &message) {
}
bool sender(pt::AddressBook fam, int32_t partition, std::string brokers, std::string topic_str)
{
std::string buf;
fam.SerializeToString(&buf);
static std::string errstr;
static std::string debug;
static RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
static RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
conf->set("metadata.broker.list", brokers, errstr);
static ExampleEventCb ex_event_cb;
static ExampleDeliveryReportCb ex_dr_cb;
conf->set("event_cb", &ex_event_cb, errstr);
conf->set("dr_cb", &ex_dr_cb, errstr);
conf->set("default_topic_conf", tconf, errstr);
if (topic_str.empty())
return false;
static RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
if (!producer)
{
std::cerr << "Failed to create producer: " << errstr << std::endl;
return false;
}
if (!debug.empty()) {
if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) {
std::cerr << errstr << std::endl;
return false;
}
}
producer->produce(topic_str, partition, RdKafka::Producer::RK_MSG_COPY, const_cast<char *>(buf.c_str()), buf.size(), NULL, 0, 0, NULL);
producer->poll(-1);
return true;
}
sender_Main_test.cpp
#include <iostream>
#include <vector> // std::vector
#include <string> // std::string
#include <future> // std::future
#include "Thread_Pool.h"
#include "sender.h"
int main()
{
Thread_Pool pool(4);
for (int i = 0; i < 8; ++i) {
pool.tash_queue_push_back([i] {
pt::AddressBook fam{};
pt::Person *per;
per = fam.add_people();
per->set_name("test");
per->set_id(i);
per->set_email("HaoSirdada");
while (true)
{
sender(fam, 1, "localhost:9092", "first");
}
});
}
while (1) {};
}
receiver
receiver文件目录:receiver.cpp receiver.h receiver_Main_test.cpp
receiver.h
#ifndef RECEIVER
#define RECEIVER
#include <iostream>
#include <string>
#include <cstdlib>
#include <cstdio>
#include <csignal>
#include <cstring>
#include <glog/logging.h>
#ifdef _MSC_VER
#include "../win32/wingetopt.h"
#elif _AIX
#include <unistd.h>
#else
#include <getopt.h>
#endif
#include "addressbook.pb.h"
#include "rdkafkacpp.h"
bool write_in_log(pt::AddressBook fam);
void msg_consume(RdKafka::Message* message, void* opaque);
bool receive(int32_t partition, std::string brokers, std::string topic_str, int64_t start_offset);
class ExampleConsumeCb : public RdKafka::ConsumeCb {
public:
void consume_cb(RdKafka::Message &msg, void *opaque);
};
class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
public:
void dr_cb(RdKafka::Message &message);
};
class ExampleEventCb : public RdKafka::EventCb {
public:
void event_cb(RdKafka::Event &event);
};
#endif // !RECEIVER
receiver.cpp
#include "receiver.h"
static bool run = true;
static bool exit_eof = false;
void ExampleConsumeCb::consume_cb(RdKafka::Message &msg, void *opaque) {
msg_consume(&msg, opaque);
}
void ExampleDeliveryReportCb::dr_cb(RdKafka::Message &message) {
}
void ExampleEventCb::event_cb(RdKafka::Event &event) {
switch (event.type())
{
case RdKafka::Event::EVENT_ERROR:
if (event.fatal()) {
std::cerr << "FATAL ";
run = false;
}
std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
event.str() << std::endl;
break;
case RdKafka::Event::EVENT_STATS:
std::cerr << "\"STATS\": " << event.str() << std::endl;
break;
case RdKafka::Event::EVENT_LOG:
fprintf(stderr, "LOG-%i-%s: %s\n",
event.severity(), event.fac().c_str(), event.str().c_str());
break;
default:
std::cerr << "EVENT " << event.type() <<
" (" << RdKafka::err2str(event.err()) << "): " <<
event.str() << std::endl;
break;
}
}
bool write_in_log(pt::AddressBook fam)
{
FLAGS_log_dir = "/log";
FLAGS_logtostderr = 0;
FLAGS_stderrthreshold = 0;
LOG(INFO) << fam.people(0).name() << " " << fam.people(0).id() << " " << fam.people(0).email();
}
void msg_consume(RdKafka::Message* message, void* opaque) {
pt::AddressBook fam;
switch (message->err()) {
case RdKafka::ERR__TIMED_OUT:
break;
case RdKafka::ERR_NO_ERROR:
/* Real message */
std::cout << "Read msg at offset " << message->offset() << std::endl;
if (message->key()) {
std::cout << "Key: " << *message->key() << std::endl;
}
fam.ParseFromString(std::string(static_cast<const char *>(message->payload())));
write_in_log(fam);
std::cout << fam.people(0).name() << " " << fam.people(0).id() << " " << fam.people(0).email() << std::endl;
break;
case RdKafka::ERR__PARTITION_EOF:
if (exit_eof) {
run = false;
}
break;
case RdKafka::ERR__UNKNOWN_TOPIC:
case RdKafka::ERR__UNKNOWN_PARTITION:
std::cerr << "Consume failed: " << message->errstr() << std::endl;
run = false;
break;
default:
std::cerr << "Consume failed: " << message->errstr() << std::endl;
run = false;
}
}
bool receive(int32_t partition, std::string brokers, std::string topic_str, int64_t start_offset)
{
google::InitGoogleLogging("receive");
int use_ccb = 0;
std::string errstr;
RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
conf->set("metadata.broker.list", brokers, errstr);
ExampleEventCb ex_event_cb;
conf->set("event_cb", &ex_event_cb, errstr);
conf->set("enable.partition.eof", "true", errstr);
RdKafka::Consumer *consumer = RdKafka::Consumer::create(conf, errstr);
if (!consumer) {
std::cerr << "Failed to create consumer: " << errstr << std::endl;
exit(1);
}
RdKafka::Topic *topic = RdKafka::Topic::create(consumer, topic_str,
tconf, errstr);
if (!topic) {
std::cerr << "Failed to create topic: " << errstr << std::endl;
exit(1);
}
RdKafka::ErrorCode resp = consumer->start(topic, partition, start_offset);
if (resp != RdKafka::ERR_NO_ERROR) {
std::cerr << "Failed to start consumer: " <<
RdKafka::err2str(resp) << std::endl;
exit(1);
}
ExampleConsumeCb ex_consume_cb;
while (run) {
if (use_ccb) {
consumer->consume_callback(topic, partition, 1000,
&ex_consume_cb, &use_ccb);
}
else {
RdKafka::Message *msg = consumer->consume(topic, partition, 1000);
msg_consume(msg, NULL);
delete msg;
}
consumer->poll(0);
}
consumer->stop(topic, partition);
consumer->poll(1000);
delete topic;
delete consumer;
}
receiver_Main_test.cpp
#include <iostream>
#include "receiver.h"
int main()
{
receive(1, std::string("localhost:9092"), std::string("first"), RdKafka::Topic::OFFSET_END);
}