启动
./bin/kafka-server-start.sh –daemon config/server.properties
//创建主题
$>kafka-topics.sh --zookeeper s202:2181 --topic calllog --create --replication-factor 3 --partitions 4
//查看主题列表
$>kafka-topics.sh --zookeeper s202:2181 --list
//启动控制台消费者,消费calllog主题,用于测试.
$>kafka-console-consumer.sh --zookeeper s201:2181 --topic calllog
//启动控制台消费者,消费calllog主题,用于测试.
$>kafka-console-consumer.sh --zookeeper s201:2181 --topic calllog
//配置flume
-
a1.sources = r1
-
a1.sinks = k1
-
a1.channels = c1
-
# Describe/configure the source
-
a1.sources.r1.type = exec
-
a1.sources.r1.command = tail -F /home/airib/work/log.log
-
# Describe the sink
-
#a1.sinks.k1.type = logger
-
a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
-
a1.sinks.k1.topic = test
-
a1.sinks.k1.brokerList = localhost:9092
-
a1.sinks.k1.requiredAcks = 1
-
a1.sinks.k1.batchSize = 20
-
# Use a channel which buffers events in memory
-
a1.channels.c1.type = memory
-
a1.channels.c1.capacity = 1000
-
a1.channels.c1.transactionCapacity = 100
-
# Bind the source and sink to the channel
-
a1.sources.r1.channels = c1
-
a1.sinks.k1.channel = c
//flume读数据到kafka(flume的启动命令)
bin/flume-ng agent -c conf -f conf/dir-exec.conf -n a1
//启动Hadoop,从kafka读取数据存放到hbase中
//启动hbase
start-hbase.sh
//