1.设置ssh免密码登录。
2.设置Hadoop的环境变量
export HADOOP_HOME=/opt/op/hadoop-2.7.4
PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin:$HADOOP_HOME/bin
3.设置hadoop-env.sh
export JAVA_HOME=/opt/op/jdk1.8.0_144
4.设置core-site.xml
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/data/hadoop/</value>
<description>Abase for other temporary directories.</description>
</property>
<property>
<name>fs.defaultFS</name>
<value>hdfs://node5:9000</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
<description>
If "true", enable permission checking in HDFS.
If "false", permission checking is turned off,
but all other behavior is unchanged.
Switching from one parameter value to the other does not change the mode,
owner or group of files or directories.
</description>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
</configuration>
5.设置hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>node5:50090</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/data/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/data/hadoop/dfs/data</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
<description>
If "true", enable permission checking in HDFS.
If "false", permission checking is turned off,
but all other behavior is unchanged.
Switching from one parameter value to the other does not change the mode,
owner or group of files or directories.
</description>
</property>
</configuration>
6.设置mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
7.设置slave
node5
node1
node2
node3
node4
8.我没有设置master
9.把node5(master)所有的文件复制到slave中
scp /opt/op/hadoop node1:/opt/op/
10.设置各个slave的环境变量
export HADOOP_HOME=/opt/op/hadoop-2.7.4
PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin:$HADOOP_HOME/bin
运行命令
source /etc/profile
11.在master(node5)运行./start-all.sh命令
12.在master上运行jps命令查看是否包含以下进程
9236 ResourceManager
8788 NameNode
9732 Jps
8920 DataNode
9083 SecondaryNameNode
9341 NodeManager
13.在各个slave中jps查看是否包含以下进程
3190 DataNode
3432 Jps
3246 NodeManager
14.通过网页查看是否成功