创建hadoop用户
sudo useradd -m hadoop -s /bin/bash
sudo passwd hadoop
# 为 hadoop 用户增加管理员权限,方便部署
visudo
## Allow root to run any commands anywhere
root ALL=(ALL) ALL
hadoop ALL=(ALL) ALL # 90行左右,添加这行
安装SSH、配置SSH无密码登陆
# CentOS 默认已安装了 SSH client、SSH server,打开终端执行如下命令进行检验
rpm -qa | grep ssh
# 若果没有SH client、SSH server,执行以下命令进行安装
sudo yum install openssh-clients
sudo yum install openssh-server
ssh localhost # 首次登陆输入yes
# 配置免密登录
exit # 退出刚才的 ssh localhost
cd ~/.ssh/ # 若没有该目录,请先执行一次ssh localhost
ssh-keygen -t rsa # 会有提示,都按回车就可以
cat id_rsa.pub >> authorized_keys # 加入授权
sudo chmod 600 ./authorized_keys # 修改文件权限
# 或者使用下面这条命令一键配置免密
ssh-copy-id localhost
# 此时输入ssh localhost就不要输入密码了
安装hadoop
# 拿到安装包,传到虚拟机,常规解压步骤 ===> /usr/local/hadoop
cd /usr/local
sudo chown -R hadoop ./hadoop # 修改文件权限
# 查看版本信息
cd /usr/local/hadoop
./bin/hadoop version
配置环境变量
# 先设置以下环境变量
vi ~/.bashrc
# Hadoop Environment Variables
export HADOOP_HOME=/opt/module/hadoop-2.7.2
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_OPTS="-Djava.library.path=${HADOOP_HOME}/lib/native/"
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin
source ~/.bashrc # 使配置生效
伪分布式配置
<!-- core-site.xml -->
<configuration>
<!-- 指定Hadoop运行时产生文件的存储目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>file:/opt/module/hadoop-2.7.2/data</value>
<description>Abase for other temporary directories.</description>
</property>
<!-- 指定HDFS的NameNode的地址 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://localhost:9000</value>
</property>
</configuration>
<!-- hdfs-site.xml -->
<configuration>
<!-- 指定HDFS副本的数量,默认是3 -->
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/opt/module/hadoop-2.7.2/data/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/opt/module/hadoop-2.7.2/data/dfs/data</value>
</property>
</configuration>
配置YARN
<!-- mapred-site.xml -->
<configuration>
<!-- 指定MR运行在YARN上 -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
<!-- yarn-site.xml -->
<configuration>
<!-- Reducer获取数据的方式 -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
启动
# 行NameNode的格式化,只要执行一次,再执行会产生错误
./bin/hdfs namenode -format
./sbin/start-dfs.sh
# 启动YARN
./sbin/start-yarn.sh
# 开启历史服务器,才能在Web中查看任务运行情况
./sbin/mr-jobhistory-daemon.sh start historyserver
访问
HDFS:http://localhost:50070
YARN:http://localhost:8088