1.修改主机名
# hostnamectl set-hostname master
# hostnamectl set-hostname slave1
# hostnamectl set-hostname slave2
#reboot 重启
2.映射
# vi /etc/hosts 写入IP,主机名
192.168.118.135 master
192.168.118.128 slave1
192.168.118.129 slave2
# cat /etc/hosts 检查
3. 免密(master和slave1、slave2都需要输入)
# ssh-keygen 产生公钥、密钥 -t rsa 指定rsa(常用),dsa;
# cd .ssh
# ls ls看有id_rsa.pub(公钥) id_rsa(密钥)
# cat id_rsa.pub >> authorized_keys
# scp id_rsa.pub @slave1:~ /@master:~ /@slave2:~
# cat id_rsa.pub >> ~/.ssh/authorized_keys 把传过来的公钥复制到au….下
# cat authorized_keys 检查是否已成功传入各节点公钥
# ssh master /slave1 验证
4、安装jdk
#tar -zxf jdk-8u191-linux-x64.tar -C /opt 解压指定路径下
#cd /opt
#mv jdk-8u191-linux-x64.tar jdk 改名
#vi /etc/profile 添加环境变量
添:
export JAVA_HOME=/opt/jdk
PATH=$JAVA_HOME/bin:$JAVA_HOME/lib:$PATH
可一起添加Hadoop环境变量
添:
export JAVA_HOME=/opt/hadoop2.7.1
PATH=$JAVA_HOME/bin:$JAVA_HOME/lib:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
# source /etc/profile 刷新
5.安装hadoop
#tar -txf hadoop-2.7.1.tar -C /opt 解压指定路径下/opt
#mv hadoop-2.7.1.tar hadoop2.7.1 改名
#vi /etc/profile 添加环境变量
#source /etc/profile 或 . /etc/profire 刷新环境变量
#echo $PATH
#cd hadoop2.7.1 ls
#mkdir hdfs
#cd hdfs
#mkdir data /name
#cd etc/hadoop
5.1添加配置文件
# vi Hadoop-env.sh
改 export JAVA_HOME=/opt/jdk
# vi mapred-env.sh
改 JAVA_HOME=/opt/jdk
# vi yarn-env.sh
同上
# vi slaves
添:master
slave1
slave2 (删除其中)
以下四个文件均在configuration内添加
# vi core-site.xml
添:
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/hadoop/tmp</value>
</property>
<property>
<name>hadoop.proxyuser.hadoop.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.hadoop.groups</name>
<value>*</value>
</property>
# vi hdfs-site.xml
添:
<property>
<name>dfs.namenode.http-address</name>
<value>master:50070</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>master:50090</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:///opt/hadoop2.7.1/hdfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///opt/hadoop2.7.1/hdfs/name</value>
</property>
<property>
<name>dfs.namenode.checkpoint.dir</name>
<value>file:///opt/hadoop2.7.1/hdfs/name</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.stream-buffer-size</name>
<value>131072</value>
</property>
#mv mapred-site.xml.template mapred-site.xml 改名
#vi mapred-site.xml
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>master:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>master:19888</value>
</property>
# vi yarn-site.xml
<property>
<name>yarn.resourcemanager.address</name>
<value>master:18040</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>master:18030</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>master:18088</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>master:18025</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>master:18141</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.auxservice.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
在Hadoop2.7.1下创建tmp
#mkdir tmp
#scp -r /opt/* slave1:/opt/
#scp -r /opt/* slave2:/opt/ 远程分发
#systemctl stop firewalld 关闭防火墙
# hadoop namenode -format 格式化
# start-all.sh 启动Hadoop集群
#jps 查看进程(4个)子节点(3个)