Hadoop-3.1.1安装后记

解压完Hadoop-3.1.1之后,进入到Hadoop-3.1.1文件夹中

bin:放可执行文件

etc:放配置文件

lib:放使用的库

配置好Hadoop的环境变量:

HADOOP_HOME=/export/servers/hadoop-3.1.1
PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

并使环境变量生效

source /etc/profile

 

etc/hadoop就是我们所要配置的文件

capacity-scheduler.xml            httpfs-log4j.properties     mapred-site.xml
configuration.xsl                 httpfs-signature.secret     shellprofile.d
container-executor.cfg            httpfs-site.xml             ssl-client.xml.example
core-site.xml                     kms-acls.xml                ssl-server.xml.example
hadoop-env.cmd                    kms-env.sh                  user_ec_policies.xml.template
hadoop-env.sh                     kms-log4j.properties        workers
hadoop-metrics2.properties        kms-site.xml                yarn-env.cmd
hadoop-policy.xml                 log4j.properties            yarn-env.sh
hadoop-user-functions.sh.example  mapred-env.cmd              yarnservice-log4j.properties
hdfs-site.xml                     mapred-env.sh               yarn-site.xml
httpfs-env.sh                     mapred-queues.xml.template

我现在是处于学习阶段,所以一共配置6个文件,依照学长的说法,如果你hadoop的log中出现了java各自连接,报错异常

他们所采取的方法是:帮你重新做配置一遍Hadoop,因为前期的准备不充分的话,各自报错扑面而来

HDFS集群有nameNode(主节点)和dataNode(从节点)

yarn集群有ResourceManager(主节点)和nodeManager(从节点)

格式化HDFS namenode的目的是:HDFS需要格式化的过程来创建存放元数据(image,editlog)的目录

提示:格式化HDFS namenode的命令

hdfs namenode -format

 

启动齐群:

#会登录进所有的worker启动相关进程,也可以手动启动进程,但是没有必要
start-dfs.sh
start-yarn.sh
mapred --daemon start historyserver

 

core-site.html

<configuration>
	<!-- 文件系统类型,表示的是分布式文件系统 -->
	<!-- 这个配置说明,主节点在master上8020这个端口 -->
<property>
	<name>fs.defaultFS</name>
	<value>hdfs://master:8020</value>
</property>

<!-- 临时文件存储目录 -->
<property>
	<name>hadoop.tmp.dir</name>
	<value>/export/servers/hadoop-3.1.1/datas/tmp</value>
</property>
<!-- 缓冲区大小,实际工作中根据服务区性能动态调整 -->
<property>
	<name>io.file.buffer.size</name>
	<value>8192</value>
</property>
<!-- 开启hdfs的垃圾桶机制,删除掉的数据可以从垃圾桶中回收,单位分钟 -->
<property>
	<name>fs.trash.interval</name>
	<value>10080</value>
</property>
</configuration>

hadoop-env.sh

export JAVA_HOME=/export/servers/jdk1.8.0_152

hdfs-site.xml

<configuration>
<property>
	<!-- 配置namenode元数据的存放路径 -->
	<!-- 元数据是HDFS里面最核心的数据 -->
	<name>dfs.namenode.name.dir</name>
	<value>file:///export/servers/hadoop-3.1.1/datas/namenode/namenodedatas</value>
</property>

<!-- 文件块大小 -->
<!-- 如果文件非常大,需要分块,这个值是128M -->
<!-- 设置文件分块的大小:block -->
<property>
	<name>dfs.blocksize</name>
	<value>134217728</value>
</property>

<property>
	<name>dfs.namenode.handler.count</name>
	<value>10</value>
</property>

<!-- datanode数据存放目录 -->
<property>
	<name>dfs.datanode.data.dir</name>
	<value>file:///export/servers/hadoop-3.1.1/datas/datanode/datanodeDatas</value>
</property>

<!-- 设置了一个主机和端口 -->
<!-- 这是namenode通过浏览器访问的端口50070 -->
<property>
	<name>dfs.namenode.http-address</name>
	<value>master:50070</value>
</property>

<!-- 设置文件的副本的个数-->
<!-- 为什么要设置呢?其实是防止某一台机器宕机丢失数据 -->
<!-- 一般在hdfs 被切分成多个block,每个block有3个副本,有3份一抹一样的数据,即使一台机器宕机,也不会影响数据的完整性-->
<property>
	<name>dfs.replication</name>
	<value>3</value>
</property>

<!-- 设置hdfs的访问权限 -->
<!-- false是关闭 -->
<property>
	<name>dfs.permissions.enabled</name>
	<value>false</value>
</property>

<!-- 设置checkpoint检查点 -->
<property>
	<name>dfs.namenode.checkpoint.edits.dir</name>
	<value>file:///export/servers/hadoop-3.1.1/datas/dfs/nn/snn/edits</value>
</property>

<property>
	<name>dfs.namenode.secondary.http-address</name>
	<value>master.hadoop.com:50090</value>
</property>

<!-- 设置hdfs的日志存放路径 -->
<property>
	<name>dfs.namenode.edits.dir</name>
	<value>file:///export/servers/hadoop-3.1.1/datas/dfs/nn/edits</value>
</property>

<property>
	<name>dfs.namenode.checkpoint.dir</name>
	<value>file:///export/servers/hadoop-3.1.1/datas/dfs/snn/name</value>
</property>

</configuration>


<!-- mapred-site.xml -->
<configuration>
	<!-- 指定mapreduce的执行框架是yarn集群 -->
<property>
	<name>mapreduce.framework.name</name>
	<value>yarn</value>
</property>

<!-- 表示内存的大小1024 -->
<property>
	<name>mapreduce.map.memory.mb</name>
	<value>1024</value>
</property>

<property>
	<name>mapreduce.map.java.opts</name>
	<value>-Xmx512M</value>
</property>

<property>
	<name>mapreduce.task.io.sort.mb</name>
	<value>256</value>
</property>

<property>
	<name>mapreduce.task.io.sort.factor</name>
	<value>100</value>
</property>

<property>
	<name>mapreduce.reduce.shuffle.parallelcopies</name>
	<value>25</value>
</property>

<property>
	<name>mapreduce.jobhistory.address</name>
	<value>master.hadoop.com:10020</value>
</property>

<property>
	<name>mapreduce.jobhistory.webapp.address</name>
	<value>master.hadoop.com:19888</value>
</property>

<property>
	<name>mapreduce.jobhistory.intermediate-done-dir</name>
	<value>/export/servers/hadoop-3.1.1/datas/jobhistory/intermediateDoneDatas</value>
</property>

<property>
	<name>mapreduce.jobhistory.done-dir</name>
	<value>/export/servers/hadoop-3.1.1/datas/jobhistory/DoneDatas</value>
</property>

<property>
	<name>yarn.app.mapreduce.am.env</name>
	<value>HADOOP_MAPRED_HOME=/export/servers/hadoop-3.1.1</value>
</property>

<property>
	<name>mapreduce.map.env</name>
	<value>HADOOP_MAPRED_HOME=/export/servers/hadoop-3.1.1</value>
</property>

</configuration>

yarn-site.xml

<configuration>
	
<property>
	<name>dfs.namenode.handler.count</name>
	<value>100</value>
</property>

<property>
	<name>yarn.log-aggregation-enable</name>
	<value>true</value>
</property>


<property>
	<name>yarn.resourcemanager.address</name>
	<value>master:8032</value>
</property>

<property>
	<name>yarn.resourcemanager.scheduler.address</name>
	<value>master:8030</value>
</property>

<property>
	<name>yarn.resourcemanager.resource-tracker.address</name>
	<value>master:8031</value>
</property>

<property>
	<name>yarn.resourcemanager.admin.address</name>
	<value>master:8033</value>
</property>

<property>
	<name>yarn.resourcemanager.webapp.address</name>
	<value>master:8088</value>
</property>

<property>
	<name>yarn.resourcemanager.hostname</name>
	<value>master</value>
</property>
<property>
	<name>yarn.scheduler.minimum-allocation-mb</name>
	<value>1024</value>
</property>



<property>
	<name>yarn.scheduler.maximum-allocation-mb</name>
	<value>2048</value>
</property>

<property>
	<name>yarn.nodemanager.vmem-pmem-ratio</name>
	<value>2.1</value>
</property>


<!-- 设置不检查虚拟内存值,不然内存不够会报错 -->
<property>
	<name>yarn.namemanager.vmem-check-enabled</name>
	<value>false</value>
</property>

<property>
	<name>yarn.namemanager.resource.memory-mb</name>
	<value>1024</value>
</property>

<property>
	<name>yarn.namemanager.resource.detect-hardware-capabilities</name>
	<value>true</value>
</property>

<property>
	<name>yarn.nodemanager.local-dirs</name>
	<value>file:///export/servers/hadoop-3.1.1/datas/nodemanager/nodemanagerDatas</value>
</property>

<property>
	<name>yarn-nodemanager.log-dirs</name>
	<value>file:///export/servers/hadoop-3.1.1/datas/nodemanager/nodemanagerLogs</value>
</property>

<property>
	<name>yarn.nodemanager.log.retain-seconds</name>
	<value>10800</value>
</property>

<property>
	<name>yarn.nodemanager.remote-app-log-dir</name>
	<value>/export/servers/hadoop-3.1.1/datas/remoteAppLog/remoteAppLogs</value>
</property>

<property>
	<name>yarn.nodemanager.remote-app-log-dir-suffix</name>
	<value>logs</value>
</property>

<property>
	<name>yarn.nodemanager.aux-services</name>
	<value>mapreduce_shuffle</value>
</property>

<property>
	<name>yarn.log-aggregation.retain-seconds</name>
	<value>18144000</value>
</property>

<property>
	<name>yarn.log-aggregation.retain-check-interval-seconds</name>
	<value>86400</value>
</property>
<!-- yarn上面运行一个任务,最少需要1.5G内存,虚拟机没有这么大的内存就调小这个值,不然会报错 -->
<property>
	<name>yarn.app.mapreduce.am.resource.mb</name>
	<value>300</value>
</property>

</configuration>

worker

master
slave1
slave2

 

创建临时数据和临时文件夹

mkdir -p /export/servers/hadoop-3.1.1/datas/tmp
mkdir -p /export/servers/hadoop-3.1.1/datas/dfs/nn/snn/edits
mkdir -p /export/servers/hadoop-3.1.1/datas/namenode/namenodedatas
mkdir -p /export/servers/hadoop-3.1.1/datas/datanode/datanodeDatas
mkdir -p /export/servers/hadoop-3.1.1/datas/dfs/nn/edits
mkdir -p /export/servers/hadoop-3.1.1/datas/dfs/snn/name
mkdir -p /export/servers/hadoop-3.1.1/datas/jobhistory/intermediateDoneDatas
mkdir -p /export/servers/hadoop-3.1.1/datas/jobhistory/DoneDatas
mkdir -p /export/servers/hadoop-3.1.1/datas/nodemanager/nodemanagerDatas
mkdir -p /export/servers/hadoop-3.1.1/datas/nodemanager/nodemanagerLogs
mkdir -p /export/servers/hadoop-3.1.1/datas/remoteAppLog/remoteAppLogs

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值