tar -zxf /opt/software/apache-flume-1.10.1-bin.tar.gz -C /opt/module/
mv /opt/module/apache-flume-1.10.1-bin/ /opt/module/flume
vim log4j2.xml
<Property name="LOG_DIR">/opt/module/flume/log</Property>
<AppenderRef ref="Console" />
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<Configuration status="ERROR">
<Properties>
<Property name="LOG_DIR">/opt/module/flume/log</Property>
</Properties>
<Appenders>
<Console name="Console" target="SYSTEM_ERR">
<PatternLayout pattern="%d (%t) [%p - %l] %m%n" />
</Console>
<RollingFile name="LogFile" fileName="${LOG_DIR}/flume.log" filePattern="${LOG_DIR}/archive/flume.log.%d{yyyyMMdd}-%i">
<PatternLayout pattern="%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %equals{%x}{[]}{} - %m%n" />
<Policies>
<!-- Roll every night at midnight or when the file reaches 100MB -->
<SizeBasedTriggeringPolicy size="100 MB"/>
<CronTriggeringPolicy schedule="0 0 0 * * ?"/>
</Policies>
<DefaultRolloverStrategy min="1" max="20">
<Delete basePath="${LOG_DIR}/archive">
<!-- Nested conditions: the inner condition is only evaluated on files for which the outer conditions are true. -->
<IfFileName glob="flume.log.*">
<!-- Only allow 1 GB of files to accumulate -->
<IfAccumulatedFileSize exceeds="1 GB"/>
</IfFileName>
</Delete>
</DefaultRolloverStrategy>
</RollingFile>
</Appenders>
<Loggers>
<Logger name="org.apache.flume.lifecycle" level="info"/>
<Logger name="org.jboss" level="WARN"/>
<Logger name="org.apache.avro.ipc.netty.NettyTransceiver" level="WARN"/>
<Logger name="org.apache.hadoop" level="INFO"/>
<Logger name="org.apache.hadoop.hive" level="ERROR"/>
<Root level="INFO">
<AppenderRef ref="LogFile" />
<AppenderRef ref="Console" />
</Root>
</Loggers>
</Configuration>
配置flume
在hadoop102节点的Flume的job目录下创建file_to_kafka.conf
mkdir /opt/module/flume/job
vim job/file_to_kafka.conf
#定义组件
a1.sources = r1
a1.channels = c1
#配置source
a1.sources.r1.type = TAILDIR
a1.sources.r1.filegroups = f1
a1.sources.r1.filegroups.f1 = /opt/module/applog/log/app.*
a1.sources.r1.positionFile = /opt/module/flume/taildir_position.json
#配置channel
a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
a1.channels.c1.kafka.bootstrap.servers = hadoop102:9092,hadoop103:9092
a1.channels.c1.kafka.topic = topic_log
a1.channels.c1.parseAsFlumeEvent = false
#组装
a1.sources.r1.channels = c1
vi fl.sh
#!/bin/bash
# 检查输入参数是否为 start 或 stop
if [ -z "$1" ] || [[ "$1" != "start" && "$1" != "stop" ]]; then
echo "用法: $0 {start|stop}"
exit 1
fi
case $1 in
"start")
# 启动 hadoop102 和 hadoop103 上的 Flume 采集任务
for i in hadoop102 hadoop103
do
echo " --------启动 $i 采集flume-------"
ssh $i "nohup /opt/module/flume/bin/flume-ng agent -n a1 -c /opt/module/flume/conf/ -f /opt/module/flume/job/file_to_kafka.conf >/dev/null 2>&1 &"
if [ $? -ne 0 ]; then
echo "启动 $i 上的 Flume 失败"
fi
done
;;
"stop")
# 停止 hadoop102 和 hadoop103 上的 Flume 采集任务
for i in hadoop102 hadoop103
do
echo " --------停止 $i 采集flume-------"
pid=$(ssh $i "ps -ef | grep file_to_kafka | grep -v grep | awk '{print \$2}'")
if [ -n "$pid" ]; then
ssh $i "kill $pid"
if [ $? -ne 0 ]; then
echo "停止 $i 上的 Flume 失败,尝试强制终止"
ssh $i "kill -9 $pid"
fi
else
echo "$i 上没有运行的 Flume 采集任务"
fi
done
;;
esac
chmod 777 fl.sh