智警杯&大数据技能竞赛决赛多人环境搭建万能脚本

脚本执行步骤:

A : master, B : salve1, C : slave2, D : slave3
大数据脚本步骤

master

第一步:

mkdir /opt/soft

再传所有文件

0.sh >改yum的url

cd /etc/yum.repos.d/
rm -rf  *
wget http://172.16.47.240/bigdata/repofile/bigdata.repo
yum clean all
yum makecache

1.sh >改3行

systemctl stop firewalld

hostnamectl set-hostname master

echo '192.168.223.129 master master.root
192.168.223.130 slave1 slave1.root
192.168.223.131 slave2 slave2.root
192.168.223.132 slave3 slave3.root'>>/etc/hosts

echo 'NETWORK=yes
HOSTNAME=master'>/etc/sysconfig/network


timedatectl set-timezone Asia/Shanghai

ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo "TZ='Asia/Shanghai'; export TZ;">> /etc/profile
source /etc/profile

# cd /opt/soft
# rpm -ivh autogen-libopts-5.18-5.el7.x86_64.rpm
# rpm -ivh ntpdate-4.2.6p5-18.el7.centos.x86_64.rpm 
# rpm -ivh ntp-4.2.6p5-18.el7.centos.x86_64.rpm 
yum install -y ntp
echo 'SYNC_HWCLOCK=yes'>>/etc/sysconfig/ntpd

sed -i 's/^server/#&/'  /etc/ntp.conf
echo 'server  127.127.0.1
fudge   127.127.0.1   stratum 10'>>/etc/ntp.conf
/bin/systemctl restart ntpd.service


ssh-keygen -R slave1
ssh-keygen -R slave2
ssh-keygen -R slave3
/usr/sbin/sshd
ssh-keygen -t dsa -P '' -f /root/.ssh/id_dsa
cat /root/.ssh/id_dsa.pub >> /root/.ssh/authorized_keys
ssh-copy-id slave1

ssh-copy-id slave2

ssh-copy-id slave3



3.sh >

source /etc/profile
/usr/zookeeper/zookeeper-3.4.10/bin/zkServer.sh start
/usr/zookeeper/zookeeper-3.4.10/bin/zkServer.sh status



4h.sh >

mkdir -p /usr/hadoop
tar -zxvf /opt/soft/hadoop-2.7.3.tar.gz -C /usr/hadoop
cd /usr/hadoop/hadoop-2.7.3/etc/hadoop
echo 'export JAVA_HOME=/usr/java/jdk1.8.0_171'>>hadoop-env.sh
echo '<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
    <name>fs.default.name</name>
    <value>hdfs://master:9000</value>
</property>
<property>
    <name>hadoop.tmp.dir</name>
    <value>/usr/hadoop/hadoop-2.7.3/hdfs/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<property>
    <name>io.file.buffer.size</name>
    <value>131072</value>
</property>
<property>
    <name>fs.checkpoint.period</name>
    <value>60</value>
</property>
<property>
    <name>fs.checkpoint.size</name>
    <value>67108864</value>
</property>
</configuration>'>core-site.xml
echo '<?xml version="1.0"?>
<configuration>
<property>
    <name>yarn.resourcemanager.address</name>
    <value>master:18040</value>
</property>
<property>
    <name>yarn.resourcemanager.scheduler.address</name>
    <value>master:18030</value>
</property>
<property>
    <name>yarn.resourcemanager.webapp.address</name>
    <value>master:18088</value>
</property>
<property>
    <name>yarn.resourcemanager.resource-tracker.address</name>
    <value>master:18025</value>
</property>
<property>
    <name>yarn.resourcemanager.admin.address</name>
    <value>master:18141</value>
</property>
<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
</property>
<property>
    <name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<!-- Site specific YARN configuration properties -->
</configuration>'>yarn-site.xml
echo 'slave1
slave2
slave3'>slaves
echo 'master'>master
echo '<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
    <name>dfs.replication</name>
    <value>2</value>
</property>
<property>
    <name>dfs.namenode.name.dir</name>
    <value>file:/usr/hadoop/hadoop-2.7.3/hdfs/name</value>
    <final>true</final>
</property>
<property>
    <name>dfs.datanode.data.dir</name>
    <value>file:/usr/hadoop/hadoop-2.7.3/hdfs/data</value>
    <final>true</final>
</property>
<property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>master:9001</value>
</property>
<property>
    <name>dfs.webhdfs.enabled</name>
    <value>true</value>
</property>
<property>
    <name>dfs.permissions</name>
    <value>false</value>
</property>
</configuration>'>hdfs-site.xml
cp mapred-site.xml.template mapred-site.xml
echo '<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
</property>
</configuration>'>mapred-site.xml



6h.sh >

hadoop namenode -format
/usr/hadoop/hadoop-2.7.3/sbin/start-all.sh
# /usr/hadoop/hadoop-2.7.3/sbin/start-dfs.sh

# /usr/hadoop/hadoop-2.7.3/sbin/start-yarn.sh




7hi.sh >

mkdir -p /usr/hive
tar -zxvf /opt/soft/apache-hive-2.1.1-bin.tar.gz -C /usr/hive/
echo 'export HIVE_HOME=/usr/hive/apache-hive-2.1.1-bin
export PATH=$PATH:$HIVE
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

TurkeyMan

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值