1.xsync集群分发脚本
参考这篇博客安装
https://blog.youkuaiyun.com/yhblog/article/details/84066218
#!/bin/bash
#获取输入参数的个数.没有参数直接退出
pcount=$#
if((pcount==0));then
echo no args;
exit;
fi
#2.获取文件名称
p1=$1
fname=`basename $p1`
echo fname=$fname
#3.获取上级目录到绝对路径
pdir=`cd -P $(dirname $p1); pwd`
echo pdir=$pdir
#4.获取当前用户名称
user=`whoami`
#5.循环
for ((host=102; host<105;host++));do
echo ----------------hadoop$host------------------
rsync -av $pdir/$fname $user@hadoop$host:$pdir
done
2.zookeeper集群起动关闭脚本
#!/bin/bash
case $1 in
"start"){
for i in hadoop102 hadoop103 hadoop104
do
ssh $i "/opt/module/zookeeper-3.4.10/bin/zkServer.sh start"
done
};;
"stop"){
for i in hadoop102 hadoop103 hadoop104
do
ssh $i "/opt/module/zookeeper-3.4.10/bin/zkServer.sh stop"
done
};;
"status"){
for i in hadoop102 hadoop103 hadoop104
do
ssh $i "/opt/module/zookeeper-3.4.10/bin/zkServer.sh status"
done
};;
esac
3.日志启动脚本
#!/bin/bash
for i in hadoop102 hadoop103
do
echo ---------$i 生成日志----------
ssh $i "java -jar /opt/module/log-collector-1.0-SNAPSHOT-jar-with-dependencies.jar $1 $2 >/dev/null 2>&1 &"
done
4.zkmanager启动脚本
./zkmanager.sh
#! /bin/bash
for host in hdp-1 hdp-2 hdp-3
do
echo "${host}:${1}ing...."
ssh $host "source /etc/profile;/root/apps/zookeeper-3.4.6/bin/zkServer.sh $1"
done
sleep 2
for host in hdp-1 hdp-2 hdp-3
do
ssh $host "source /etc/profile;/root/apps/zookeeper-3.4.6/bin/zkServer.sh status"
done
5.kafka启动脚本
./start-kafka-cluster.sh
#!/bin/bash
brokers="hdp-1 hdp-2 hdp-3"
KAFKA_HOME="/root/apps/kafka_2.12-2.2.0"
KAFKA_NAME="kafka_2.12-2.2.0"
echo "开启kafka ..."
for broker in $brokers
do
echo "INFO : Starting ${KAFKA_NAME} on ${broker} ..."
ssh ${broker} -C "source /etc/profile; sh ${KAFKA_HOME}/bin/kafka-server-start.sh -daemon ${KAFKA_HOME}/config/server.properties"
done
6.kafka关闭脚本
./stop-kafka-cluster.sh
#!/bin/bash
brokers="hdp-1 hdp-2 hdp-3"
KAFKA_HOME="/root/apps/kafka_2.12-2.2.0"
KAFKA_NAME="kafka_2.12-2.2.0"
echo "关闭kafka ..."
for broker in $brokers
do
echo "INFO : Shut down ${KAFKA_NAME} on ${broker} ..."
ssh ${broker} "source /etc/profile;bash ${KAFKA_HOME}/bin/kafka-server-stop.sh"
done
7.启动/关闭Elasticsearch
在root的根目录下 vi es-cluster.sh
启动 ./es-cluster.sh start
关闭 ./es-cluster.sh stop
#!/bin/bash
case $1 in
"start")
{
for i in hdp-1 hdp-2 hdp-3
do
echo "==========启动ES服务: $i=========="
ssh $i "su zpark<<!
cd /root/apps/elasticsearch-6.3.1
./bin/elasticsearch -d
!"
done
};;
"stop")
{
for i in hdp-1 hdp-2 hdp-3
do
echo "========关闭ES服务: $i==============="
ssh $i "ps -ef|grep elasticsearch-6.3.1 |grep -v grep|awk '{print \$2}'|xargs kill" >/dev/null 2>&1
done
};;
esac
8.启动/关闭logger (实时项目中的logger)
在root下的applog/gmall vi logger-cluster.sh
启动/关闭 ./logger-cluster.sh start或stop
#!/bin/bash
JAVA_BIN=/root/apps/jdk1.8.0_201/bin/java
APPNAME=gmall-logger-0.0.1-SNAPSHOT.jar
SERVER_PORT=8080
case $1 in
"start")
{
for i in hdp-1 hdp-2 hdp-3
do
echo "========启动日志服务: $i==============="
ssh $i "$JAVA_BIN -Xms32m -Xmx64m -jar /root/applog/gmall/$APPNAME >/dev/null 2>&1 &"
done
};;
"stop")
{
for i in hdp-1 hdp-2 hdp-3
do
echo "========关闭日志服务: $i==============="
ssh $i "ps -ef|grep $APPNAME |grep -v grep|awk '{print \$2}'|xargs kill" >/dev/null 2>&1
done
};;
esac