Linux虚拟机一键安装Hadoop伪分布式脚本

此脚本书写时仅为个人使用,如需使用可以修改,本篇内容为讲解需要修改的内容,完整脚本如下

#!/bin/bash

# 脚本存放位置 /opt/modules/
# 压缩包存放位置 /opt/modules/hadoop-3.3.1.tar.gz
# 安装目录  /opt/installs
# 安装版本  hadoop-3.3.1



CRTDIR=$(pwd)

# 此处为压缩包存放位置
HadoopTargz="/opt/modules/hadoop-3.3.1.tar.gz"

echo "${HadoopTargz}"

# 检查原先是否已配置hadoop环境变量
checkExist(){
 hadoop1=$(grep -n "export HADOOP_HOME=.*" /etc/profile | cut -f1 -d':')
    if [ -n "$hadoop1" ];then
        echo "HADOOP_HOME已配置,删除内容"
        sed -i "${hadoop1}d" /etc/profile
    fi
 
hadoop2=$(grep -n "export PATH=.*\$HADOOP_HOME.*" /etc/profile | cut -f1 -d':')
    if [ -n "$hadoop2" ];then
        echo "PATH-HADOOP路径已配置,删除内容"
        sed -i "${hadoop2}d" /etc/profile
    fi
}
# 查询是否有hadoop-3.3.1.tar.gz
if [ -e $HadoopTargz ];
then

echo "— — 存在hadoop压缩包 — —"
 echo "正在解压hadoop压缩包..."
 tar -zxvf ${HadoopTargz} -C /opt/installs
 if [ -e "/opt/install/hadoop" ];then
 echo "存在/opt/install/hadoop,删除..."
 rm -rf /opt/install/hadoop
 fi
  if [ -e "/opt/install/hadoop-3.3.1" ];then
 echo "存在/opt/install/hadoop-3.3.1,删除..."
 rm -rf /opt/install/hadoop-3.3.1
 fi
# 将解压后的压缩包重命名为hadoop
mv /opt/installs/hadoop-3.3.1 /opt/installs/hadoop

#修改配置文件


# 检查配置信息
 checkExist 
 echo "---------------------------------"
 echo "正在配置hadoop环境..."
 sed -i '$a export HADOOP_HOME=/opt/installs/hadoop' /etc/profile
 sed -i '$a export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin' /etc/profile
 echo "---------------------------------"
 echo "HADOOP环境配置已完成..."
 echo "---------------------------------"
 echo "正在重新加载配置文件..."
 echo "---------------------------------"
 source /etc/profile
 echo "配置版本信息如下:"
 hadoop version
 echo "HADOOP安装完成"
else
 echo "未检测到安装包,请将安装包放到/opt目录下"
fi

# 开始修改hadoop伪分布式配置文件
# 如果安装目录为本篇所使用目录,直接使用即可,如果自定义路径请修改以下语句的路径
# 例如 /opt/installs/hadoop/etc/hadoop/hadoop-env.sh  修改为 /你的安装路径/hadoop/etc/hadoop/hadoop-env.sh
# 本脚本虚拟机ip为192.168.99.101 如需自用请修改


## hadoop-env.sh
cat << EOF >> /opt/installs/hadoop/etc/hadoop/hadoop-env.sh
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root

export JAVA_HOME=/opt/installs/jdk
EOF

## core-site.xml
cat << EOF > /opt/installs/hadoop/etc/hadoop/core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
    <!-- 设置namenode节点 -->
  <!-- 注意: hadoop1.x时代默认端⼝9000 hadoop2.x时代默认端⼝8020 hadoop3.x时 代默认端⼝ 9820 -->
  <property>
    <name>fs.defaultFS</name>
    <value>hdfs://192.168.99.101:9820</value>
  </property>
  
  <!-- hdfs的基础路径,被其他属性所依赖的⼀个基础路径 -->
  <property>
    <name>hadoop.tmp.dir</name>
    <value>/opt/installs/hadoop/tmp</value>
  </property>

  <property>
    <name>hadoop.proxyuser.root.hosts</name>
    <value>*</value>
</property>
<property>
    <name>hadoop.proxyuser.root.groups</name>
    <value>*</value>
</property>
<property>
    <name>hadoop.http.staticuser.user</name>
    <value>root</value>
</property>
<!-- 不开启权限检查 -->
<property>
   <name>dfs.permissions.enabled</name>
   <value>false</value>
</property>

</configuration>
EOF

## hdfs-site.xml
cat << EOF > /opt/installs/hadoop/etc/hadoop/hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
    <property>
        <!--备份数量-->
        <name>dfs.replication</name>
        <value>1</value>
    </property>
    <!--secondarynamenode守护进程的http地址:主机名和端⼝号。参考守护进程布局 -->
    <property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>192.168.99.101:9868</value>
    </property>
    <!-- namenode守护进程的http地址:主机名和端⼝号。参考守护进程布局 -->
    <property>
        <name>dfs.namenode.http-address</name>
        <value>192.168.99.101:9870</value>
    </property>

    <property>
    <name>dfs.webhdfs.enabled</name>
    <value>true</value>
    </property>

</configuration>
EOF

## yarn-site.xml
cat << EOF > /opt/installs/hadoop/etc/hadoop/yarn-site.xml
<?xml version="1.0"?>
<configuration>
  <!-- mapreduce计算服务方法 -->
  <property>
      <name>yarn.nodemanager.aux-services</name>
      <value>mapreduce_shuffle</value>
  </property>

  <!-- 配置resourcemanager的主机ip -->
  <property>
      <name>yarn.resourcemanager.hostname</name>
      <value>192.168.99.101</value>
  </property>

  <property>
		  <name>yarn.application.classpath</name>
		  <value>
				/opt/installs/hadoop/etc/hadoop:/opt/installs/hadoop/share/hadoop/common/*:/opt/installs/hadoop/share/hadoop/common/lib/*:/opt/installs/hadoop/share/hadoop/hdfs/*:/opt/installs/hadoop/share/hadoop/hdfs/lib/*:/opt/installs/hadoop/share/hadoop/mapreduce/*:/opt/installs/hadoop/share/hadoop/mapreduce/lib/*:/opt/installs/hadoop/share/hadoop/yarn/*:/opt/installs/hadoop/share/hadoop/yarn/lib/*
		  </value>
    </property>	

	<property>
      <name>yarn.nodemanager.vmem-check-enabled</name>
      <value>false</value>
    </property>

   <property>
      <name>yarn.nodemanager.pmem-check-enabled</name>
      <value>false</value>
   </property>
   <!-- 开启日志聚合 -->
<property>
  <name>yarn.log-aggregation-enable</name>
  <value>true</value>
</property>

<!-- 设置日志保存时间 7 天 -->
<property>
  <name>yarn.log-aggregation.retain-seconds</name>
  <value>604800</value>
</property>
</configuration>
EOF

## mapred-site.xml
cat << EOF > /opt/installs/hadoop/etc/hadoop/mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <!-- 历史服务器端地址 -->
<property>
  <name>mapreduce.jobhistory.address</name>
  <value>192.168.99.101:10020</value>
</property>

<!-- 历史服务器 web 端地址 -->
<property>
  <name>mapreduce.jobhistory.webapp.address</name>
  <value>192.168.99.101:19888</value>
</property>
</configuration>
EOF

## workers
cat << EOF >/opt/installs/hadoop/etc/hadoop/workers
192.168.99.101
EOF


# 初始化namenode
hdfs namenode -format

# 提示安装成功
echo "install hadoop has successed"

 source /etc/profile

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值