vi /etc/sysconfig/network-scripts/ifcfg-ens33
static no
IPADDR=192.168.自己.100 101 102 103
GATWAY=192.168.自己.1
service network restart
vi /etc/hosts
192.168.自己.100 master
192.168.自己.101 slave01
192.168.自己.102 slave02
192.168.自己.103 slave03
vi /etc/hostname
master
scp -r /etc/hosts slave01:/etc/hosts
scp -r /etc/hosts slave02:/etc/hosts
scp -r /etc/hosts slave03:/etc/hosts
ssh-keygen -t rsa
ssh-copy-id -i root@slave01
ssh-copy-id -i root@slave02
ssh-copy-id -i root@slave03
vi core-site.xml
master
vi hdfs-site.xml
2
追加
<!-- secondaryName http 服务的地址和端口 -->
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>master:50090</value>
</property>
vi mapred-site.xml
追加
<!--MapReduce 历史工作信息服务 IPC 地址 -->
<property>
<name>mapreduce.jobhistory.address</name>
<value>master:10020</value>
</property>
<!--MapReduce 历史工作信息服务 Web 地址 -->
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>master:19888</value>
</property>
vi yarn-site.xml
改localhost为master
vi /opt/hadoop/etc/hadoop/slaves
slave01
slave02
slave03
scp -r /opt/jdk slave01:/opt/jdk
scp -r /opt/jdk slave02:/opt/jdk
scp -r /opt/jdk slave03:/opt/jdk
scp -r /opt/hadoop slave01:/opt/hadoop
scp -r /opt/hadoop slave02:/opt/hadoop
scp -r /opt/hadoop slave03:/opt/hadoop
scp -r /opt/hadoop-record slave01:/opt/hadoop-record
scp -r /opt/hadoop-record slave02:/opt/hadoop-record
scp -r /opt/hadoop-record slave03:/opt/hadoop-record
scp -r /etc/profile.d/hadoop-eco.sh slave01:/etc/profile.d/hadoop-eco.sh
scp -r /etc/profile.d/hadoop-eco.sh slave02:/etc/profile.d/hadoop-eco.sh
scp -r /etc/profile.d/hadoop-eco.sh slave03:/etc/profile.d/hadoop-eco.sh
各从机
source /etc/profile.d/hadoop-eco.sh
systemctl stop firewalld.service
systemctl disable firewalld.service
hdfs namenode -format
start-all.sh