前言:
上一篇博客HDFS伪分布式部署基于管理员root部署成功,但是实际生产中使用 Hadoop 用户操作及配置
官网参考:http://hadoop.apache.org/docs/r2.8.4/hadoop-project-dist/hadoop- common/SingleCluster.html
1、检查hadoop用户是否存在
2、创建hadoop用户
3、停止hadoop服务及权限
4、配置hadoop用户SSH互信
# 设置hadoop用户shh互信(新用户需要重新设置ssh)
# 其他用户启动hadoop,authorized_keys必须授权0600
4、修改HDFS三大进程对外服务IP
#读写都是通过NameNode名称节点访问IP
#修改DataNode数据节点访问IP
#修改SecondayNameNode数据节点访问IP
6、删除root用户的DFS文件及DFS磁盘格式化
7、hadoop用户启动
上一篇博客HDFS伪分布式部署基于管理员root部署成功,但是实际生产中使用 Hadoop 用户操作及配置
官网参考:http://hadoop.apache.org/docs/r2.8.4/hadoop-project-dist/hadoop- common/SingleCluster.html
1、检查hadoop用户是否存在
-
[root@hadoop001 ~]# id hadoop
- uid=516(hadoop) gid=516(hadoop) groups=516(hadoop)
2、创建hadoop用户
-
[root@hadoop001 ~]# useradd hadoop
-
[root@hadoop001 ~]# passwd hadoop
-
Changing password for user hadoop.
- New password:
3、停止hadoop服务及权限
-
[root@hadoop001 ~]# kill -9 $(pgrep -f hadoop)
- [root@hadoop001 ~]# jps
- 15803 Jps
-
[root@hadoop001 software]# pwd
/opt/software - [root@hadoop001 software]# chown -R hadoop:hadoop hadoop-2.8.1
4、配置hadoop用户SSH互信
# 设置hadoop用户shh互信(新用户需要重新设置ssh)
# 其他用户启动hadoop,authorized_keys必须授权0600
-
[root@hadoop001 .ssh]# su - hadoop
-
[hadoop@hadoop001 ~]$ ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
-
Generating public/private rsa key pair.
-
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
-
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
-
The key fingerprint is:
-
68:5e:33:dd:2a:d2:01:e7:4e:4e:7b:4a:72:7d:67:83 hadoop@hadoop001
-
The key's randomart image is:
+--[ RSA 2048]----+
| |
| |
| . . |
| = . . |
| o S . . |
| o B * . . |
| + O + E + |
| = + . o . |
| . |
+-----------------+
[hadoop@hadoop001 ~]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[hadoop@hadoop001 ~]$ chmod 0600 ~/.ssh/authorized_keys
#检查通过hadoop用户互信
[hadoop@hadoop001 ~]$ chmod 0600 ~/.ssh/authorized_keys
[hadoop@hadoop001 ~]$ ssh -p2222 localhost date --ssh端口为2222
Fri May 18 17:32:59 EDT 2018 -
4、修改HDFS三大进程对外服务IP
#读写都是通过NameNode名称节点访问IP
-
[root@hadoop001 hadoop]# pwd
-
/opt/software/hadoop-2.8.1/etc/hadoop
-
-
[root@hadoop001 hadoop]# vim core-site.xml
-
--添加<property>,localhost修改为IP
- <configuration>
-
<property>
-
<name>fs.defaultFS</name>
-
<value>hdfs://localhost:9000</value>
- </property>
- </configuration>
#修改DataNode数据节点访问IP
-
[root@hadoop001 hadoop]# pwd
-
/opt/software/hadoop-2.8.1/etc/hadoop
-
-
[root@hadoop001 hadoop]# vim slaves
- localhost --localhost修改为IP,多DN逗号分隔ip
#修改SecondayNameNode数据节点访问IP
-
[root@hadoop001 hadoop]# vim hdfs-site.xml
-
-- 添加<property>
-
<configuration>
-
<property>
-
<name>dfs.namenode.secondary.http-address</name>
-
<value>192.168.0.129:50090</value>
-
</property>
-
-
<property>
-
<name>dfs.namenode.secondary.https-address</name>
-
<value>192.168.0.129:50091</value>
-
</property>
-
<property>
-
<name>dfs.replication</name>
-
<value>1</value>
-
</property>
- </configuration>
6、删除root用户的DFS文件及DFS磁盘格式化
-
[root@hadoop001 tmp]# rm -rf /tmp/hadoop-* /tmp/hsperfdata-*
-
[root@hadoop001 tmp]# su - hadoop
- [hadoop@hadoop001 hadoop-2.8.1]$ hdfs namenode -format
7、hadoop用户启动
-
[hadoop@hadoop001 sbin]$ pwd
-
/opt/software/hadoop-2.8.1/sbin
-
[hadoop@hadoop001 sbin]$ ./start-dfs.sh -- 第一次启动输入密码
-
Starting namenodes on [hadoop001]
-
hadoop001: starting namenode, logging to /opt/software/hadoop-2.8.1/logs/hadoop-hadoop-namenode-hadoop001.out
-
192.168.0.129: starting datanode, logging to /opt/software/hadoop-2.8.1/logs/hadoop-hadoop-datanode-hadoop001.out
-
Starting secondary namenodes [hadoop001]
- hadoop001: starting secondarynamenode, logging to /opt/software/hadoop-2.8.1/logs/hadoop-hadoop-secondarynamenode-hadoop001.out
来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/31441024/viewspace-2154712/,如需转载,请注明出处,否则将追究法律责任。
转载于:http://blog.itpub.net/31441024/viewspace-2154712/