08【在线日志分析】之Flume Agent(聚合节点) sink to kafka cluster

本文介绍如何使用Apache Flume收集数据并将其发送到Apache Kafka。具体步骤包括配置Flume代理以连接Kafka集群、设置Kafka主题及属性、启动Flume代理并将日志数据推送到Kafka进行实时处理。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

1.创建logtopic
[root@sht-sgmhadoopdn-01 kafka]# bin/kafka-topics.sh --create --zookeeper 172.16.101.58:2181,172.16.101.59:2181,172.16.101.60:2181/kafka --replication-factor 3 --partitions 1 --topic logtopic


2.创建avro_memory_kafka.properties (kafka sink)
[root@sht-sgmhadoopcm-01 ~]# cd /tmp/flume-ng/conf
[root@sht-sgmhadoopcm-01 conf]# cp avro_memory_hdfs.properties avro_memory_kafka.properties
[root@sht-sgmhadoopcm-01 conf]# vi avro_memory_kafka.properties
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1

# Describe/configure the source
a1.sources.r1.type = avro
a1.sources.r1.bind = 172.16.101.54
a1.sources.r1.port = 4545


# Describe the sink
a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
a1.sinks.k1.kafka.topic = logtopic
a1.sinks.k1.kafka.bootstrap.servers = 172.16.101.58:9092,172.16.101.59:9092,172.16.101.60:9092
a1.sinks.k1.kafka.flumeBatchSize = 6000
a1.sinks.k1.kafka.producer.acks = 1
a1.sinks.k1.kafka.producer.linger.ms = 1
a1.sinks.ki.kafka.producer.compression.type = snappy


# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.keep-alive = 90
a1.channels.c1.capacity = 2000000
a1.channels.c1.transactionCapacity = 6000


# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1


3.后台启动 flume-ng agent(聚合节点)和查看nohup.out
[root@sht-sgmhadoopcm-01 ~]# source /etc/profile
[root@sht-sgmhadoopcm-01 ~]# cd /tmp/flume-ng/
[root@sht-sgmhadoopcm-01 flume-ng]# nohup  flume-ng agent -c conf -f /tmp/flume-ng/conf/avro_memory_kafka.properties  -n a1 -Dflume.root.logger=INFO,console &
[1] 4971
[root@sht-sgmhadoopcm-01 flume-ng]# nohup: ignoring input and appending output to `nohup.out'

[root@sht-sgmhadoopcm-01 flume-ng]#
[root@sht-sgmhadoopcm-01 flume-ng]#
[root@sht-sgmhadoopcm-01 flume-ng]# cat nohup.out


4.检查log收集的三台(收集节点)开启没
[hdfs@flume-agent-01 flume-ng]$ . ~/.bash_profile
[hdfs@flume-agent-02 flume-ng]$ . ~/.bash_profile
[hdfs@flume-agent-03 flume-ng]$ . ~/.bash_profile


[hdfs@flume-agent-01 flume-ng]$ nohup  flume-ng agent -c /tmp/flume-ng/conf -f /tmp/flume-ng/conf/exec_memory_avro.properties -n a1 -Dflume.root.logger=INFO,console &
[hdfs@flume-agent-01 flume-ng]$ nohup  flume-ng agent -c /tmp/flume-ng/conf -f /tmp/flume-ng/conf/exec_memory_avro.properties -n a1 -Dflume.root.logger=INFO,console &
[hdfs@flume-agent-01 flume-ng]$ nohup  flume-ng agent -c /tmp/flume-ng/conf -f /tmp/flume-ng/conf/exec_memory_avro.properties -n a1 -Dflume.root.logger=INFO,console &


5.打开kafka manager监控
http://172.16.101.55:9999

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值