使用librdkafka 封装的 C++类

 kafka就不介绍了, 可以参考kafka的c/c++高性能客户端librdkafka简介,下面说下librdkafka的编译:

下载地址:https://github.com/edenhill/librdkafka,下载zip包解压。

解压后找到目录下的win32目录, 里面是vs工程。

这里还不能编译,因为缺少openssl,需要编译openssl。

下载ActivePerl:http://www.activestate.com/activeperl/注意64位和32位, 如果你要编译64位的选择64。

下载后直接安装即可,新版程序自动加上了环境变量,无需添加。

 

下载openssl

网址 http://www.openssl.org/

我用的是openssl-1.0.2l版本,解压到F盘kafka目录。

 

安装步骤:

(可以参照openssl目录下的INSTALL.W64)

1.打开命令行(是vs的命令行工具)

进入openssl源码目录。
cd f:/kafka/openssl-1.0.2l
依次执行:

> perl Configure VC-WIN64A
 > ms\do_win64a
 > nmake -f ms\ntdll.mak
 > cd out32dll
 > ..\ms\test

out32dll目录下:包括可执行文件、两个out32dll和两个inc32夹,里面包含: libeay32.dll, libeay32.lib, ssleay32.dll,ssleay32.lib和头文件。

将上面的文件拷贝到librdkafka的解压目录,dll放在librdkafka的输出目录,其他放在任意位置,配置工程的时候配置进去就行了。

 

回到librdkafka工程(vs2013 打开):

在additionalinclude directories加上

"F:\kafka\openssl-1.0.2l\inc32"

在additionallibrary directories加上

"F:\kafka\openssl-1.0.2l\out32dll"

开始编译:

1.这里会出现ibeay32MT.lib等找不到,这是因为我们编译的openssl并未使用这个名称,这里只需在连接器的输入里面想连接的lib后面的MT干掉即可。

2.会出现U32 size_t不识别的问题, 只需要修改为对应的类型即可,如u32 定义为typedef unsigned u32,  改为直接使用unsigned即可

3.修改C++->运行库为多线程,debug为MTD ,release为MT,

然后全部生成,会出现一个工程跳过,不用管,实际使用不需要。

 

然后就可以开始封装了:

生产者Producer:

#pragma once

#include <iostream>
#include <string>
#include <cstdlib>
#include <cstdio>
#include <csignal>
#include <cstring>
#include <list>
#include "rdkafkacpp.h"
#include <vector>
#include<fstream>
using std::string;
using std::list;
using std::cout;
using std::endl;
using std::vector;
using std::fstream;
class KafkaProducerCallBack : public RdKafka::DeliveryReportCb {
public:
	void dr_cb(RdKafka::Message &message) {
		std::cout << "Message delivery for (" << message.len() << " bytes): " <<
			message.errstr() << std::endl;
		if (message.key())
			std::cout << "Key: " << *(message.key()) << ";" << std::endl;
	}
};
class KafkaProducerEventCallBack : public RdKafka::EventCb {
public:
	void event_cb(RdKafka::Event &event) {
		switch (event.type())
		{
		case RdKafka::Event::EVENT_ERROR:
			std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
				event.str() << std::endl;
			if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
			break;
		case RdKafka::Event::EVENT_STATS:
			std::cerr << "\"STATS\": " << event.str() << std::endl;
			break;
		case RdKafka::Event::EVENT_LOG:
			fprintf(stderr, "LOG-%i-%s: %s\n",
				event.severity(), event.fac().c_str(), event.str().c_str());
			break;
		default:
			std::cerr << "EVENT " << event.type() <<
				" (" << RdKafka::err2str(event.err()) << "): " <<
				event.str() << std::endl;
			break;
		}
	}
};
class KafkaProducerClient
{
public:
	KafkaProducerClient(const string &brokers, const string &topics, int nPpartition = 0);
	virtual ~KafkaProducerClient();
	bool Init();
	void Send(const string &msg);
	void Stop();
private:
	RdKafka::Producer *m_pProducer = NULL;
	RdKafka::Topic *m_pTopic = NULL;
	KafkaProducerCallBack m_producerCallBack;
	KafkaProducerEventCallBack m_producerEventCallBack;
	std::string m_strTopics;
	std::string m_strBroker;
	bool m_bRun = false;
	int m_nPpartition = 0;
};

 

 

#include "KafkaProducerClient.h"

 
KafkaProducerClient::KafkaProducerClient(const string &brokers, const string &topics, int nPpartition /*= 1*/) 
: m_bRun(true), m_strTopics(topics), m_strBroker(brokers), m_nPpartition(nPpartition)
{
 
}
 
KafkaProducerClient::~KafkaProducerClient()
{
	Stop();
}
 
bool KafkaProducerClient::Init()
{
	string errstr = "";
	RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
	RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
	/*设置broker list*/
	if (conf->set("metadata.broker.list", m_strBroker, errstr) != RdKafka::Conf::CONF_OK){
		std::cerr << "RdKafka conf set brokerlist failed :" << errstr.c_str() << endl;
	}
	conf->set("dr_cb", &m_producerCallBack, errstr);
	conf->set("event_cb", &m_producerEventCallBack, errstr);
	m_pProducer = RdKafka::Producer::create(conf, errstr);
	if (!m_pProducer) {
		std::cerr << "Failed to create producer: " << errstr << std::endl;
		return false;
	}
 
	m_pTopic = RdKafka::Topic::create(m_pProducer, m_strTopics,
		tconf, errstr);
	if (!m_pTopic) {
		std::cerr << "Failed to create topic: " << errstr << std::endl;
		return false;
	}
	return true;
}
 
void KafkaProducerClient::Send(const string &msg)
{
	if (!m_bRun)
		return;
	RdKafka::ErrorCode resp = m_pProducer->produce(m_pTopic, m_nPpartition,
												   RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
												   const_cast<char *>(msg.c_str()), msg.size(),
												   NULL, NULL);
	if (resp != RdKafka::ERR_NO_ERROR)
		std::cerr << "Produce failed: " << RdKafka::err2str(resp) << std::endl;
	else
		std::cerr << "Produced message (" << msg.size() << " bytes)" << std::endl;
 
	m_pProducer->poll(0);
}
 
void KafkaProducerClient::Stop()
{
 
	delete m_pTopic;
	delete m_pProducer;
}
 

 

使用方法:

KafkaProducerClient* KafkaprClient_ = new KafkaProducerClient("localhost:9092", "test", 0);

    KafkaprClient_->Init();
    KafkaprClient_->Send("小刚dddddddd");

 

 

 

 

消费者(Consumer):

 

#ifndef KAFKACONSUMER_H

#define KAFKACONSUMER_H
#include <iostream>
#include <string>
#include <cstdlib>
#include <cstdio>
#include <csignal>
#include <cstring>
#include <list>
#include "rdkafkacpp.h"
#include <vector>
#include<fstream>
using std::string;
using std::list;
using std::cout;
using std::endl;
using std::vector;
using std::fstream;
class KafkaConsumerClient{
public:
    KafkaConsumerClient(const std::string& brokers, const std::string& topics, std::string groupid, int32_t nPartition = 0, int64_t offset = 0);
    virtual ~KafkaConsumerClient();
	//初始化
    bool Init();
	//开始获取消息
    void Start(int timeout_ms);
	//停止
	void Stop();
private:
    void Msg(RdKafka::Message *msg, void *opt);
private:
    std::string m_strBrokers;
    std::string m_strTopics;
    std::string m_strGroupid;
    int64_t m_nLastOffset = 0;
    RdKafka::Consumer *m_pKafkaConsumer = nullptr;
    RdKafka::Topic    *m_pTopic         = nullptr;
    int64_t           m_nCurrentOffset  = RdKafka::Topic::OFFSET_BEGINNING;
    int32_t           m_nPartition      = 0;
	bool m_bRun = false;
};
#endif // KAFKACONSUMMER_H

 

 

#include "KafkaConsumerClient.h"

KafkaConsumerClient::KafkaConsumerClient(const std::string& brokers, const std::string& topics, std::string groupid, int32_t nPartition /*= 0*/, int64_t offset /*= 0*/)
:m_strBrokers(brokers),
m_strTopics(topics),
m_strGroupid(groupid),
m_nPartition(nPartition),
m_nCurrentOffset(offset)
{
}
KafkaConsumerClient::~KafkaConsumerClient()
{
	Stop();
}
bool KafkaConsumerClient::Init(){
	RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
    if(!conf){
		std::cerr << "RdKafka create global conf failed" <<endl;
        return false;
    }
    std::string errstr;
    /*设置broker list*/
	if (conf->set("metadata.broker.list", m_strBrokers, errstr) != RdKafka::Conf::CONF_OK){
		std::cerr << "RdKafka conf set brokerlist failed ::" << errstr.c_str() << endl;
    }
    /*设置consumer group*/
    if (conf->set("group.id", m_strGroupid, errstr) != RdKafka::Conf::CONF_OK){
		std::cerr << "RdKafka conf set group.id failed :" << errstr.c_str() << endl;
    }
    std::string strfetch_num = "10240000";
    /*每次从单个分区中拉取消息的最大尺寸*/
    if(conf->set("max.partition.fetch.bytes", strfetch_num, errstr) != RdKafka::Conf::CONF_OK){
		std::cerr << "RdKafka conf set max.partition failed :"<<errstr.c_str()<<endl;
    }
    /*创建kafka consumer实例*/
    m_pKafkaConsumer = RdKafka::Consumer::create(conf, errstr);
    if(!m_pKafkaConsumer){
        std::cerr<<"failed to ceate consumer"<<endl;
    }
    delete conf;
    RdKafka::Conf *tconf = nullptr;
    /*创建kafka topic的配置*/
    tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
    if(!tconf){
		std::cerr << "RdKafka create topic conf failed"<<endl;
        return false;
    }
    if(tconf->set("auto.offset.reset", "smallest", errstr) != RdKafka::Conf::CONF_OK){;
		std::cerr << "RdKafka conf set auto.offset.reset failed:" << errstr.c_str() << endl;
    }
    m_pTopic = RdKafka::Topic::create(m_pKafkaConsumer, m_strTopics, tconf, errstr);
    if(!m_pTopic){
		std::cerr << "RdKafka create topic failed :" << errstr.c_str() << endl;
    }
    delete tconf;
    RdKafka::ErrorCode resp = m_pKafkaConsumer->start(m_pTopic, m_nPartition, m_nLastOffset);
    if (resp != RdKafka::ERR_NO_ERROR){
		std::cerr << "failed to start consumer : " << errstr.c_str() << endl;
    }
    return true;
}
void KafkaConsumerClient::Msg(RdKafka::Message *message, void *opt){
    switch(message->err()){
        case RdKafka::ERR__TIMED_OUT:
            break;
        case RdKafka::ERR_NO_ERROR:
            cout<<("*s\n",
                static_cast<int>(message->len()),
				static_cast<const char *>(message->payload())) << endl;
            m_nLastOffset = message->offset();
            break;
        case RdKafka::ERR__PARTITION_EOF:
			cout << "Reached the end of the queue, offset: " << m_nLastOffset << endl;
            break;
        case RdKafka::ERR__UNKNOWN_TOPIC:
        case RdKafka::ERR__UNKNOWN_PARTITION:
			cout << "Consume failed: " << message->errstr()<<endl;
			Stop();
            break;
        default:
			Stop();
            break;
    }
}
void KafkaConsumerClient::Start(int timeout_ms){
    RdKafka::Message *msg = nullptr;
	m_bRun = true;
	while (m_bRun){
        msg = m_pKafkaConsumer->consume(m_pTopic, m_nPartition, timeout_ms);
        Msg(msg, nullptr);
        m_pKafkaConsumer->poll(0);
        delete msg;
    }
    m_pKafkaConsumer->stop(m_pTopic, m_nPartition);
    if(m_pTopic){
        delete m_pTopic;
        m_pTopic = nullptr;
    }
    if(m_pKafkaConsumer){
        delete m_pKafkaConsumer;
        m_pKafkaConsumer = nullptr;
    }
    /*销毁kafka实例*/
    RdKafka::wait_destroyed(5000);
}
void KafkaConsumerClient::Stop()
{
	m_bRun = false;
}

 

 

使用方法:

KafkaConsumerClient *KafkaConsumerClient_ = new KafkaConsumerClient("localhost:9092", "test", "1", 0);

    if (!KafkaConsumerClient_->Init()){
        fprintf(stderr, "kafka server initialize error\n");
    }
    else{
        KafkaConsumerClient_->Start(1000);
    }

 

 

当前你首先的建立一个kafka 的服务,创建好集群和topic

详情可参考博客:http://www.cnblogs.com/alvingofast/p/kafka_deployment_on_windows.html  写的很详细。

 

最后本博客的所有内容(包括kafka服务搭建需要的jre zookeeper, 其中librdkafka和openssl 64位已编译好)可从本人百度云盘下载:

地址:http://pan.baidu.com/s/1nviKgS5

密码:6tu5

 

 

 

 

 

 

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值