hadoop3.1.4完全分布式配置文件最简洁

  • core-site.xml
    <configuration>
        <property>
          <name>fs.defaultFS</name>
          <value>hdfs://nn</value>
        </property>
        <property>
          <name>hadoop.tmp.dir</name>
          <value>/pro/hadoop/tmp</value>
        </property>
         <property>
           <name>ha.zookeeper.quorum</name>
           <value>ha1:2181,ha2:2181,ha3:2181</value>
         </property>
    </configuration>
  • hdfs-site.xml

    <configuration>
        <property>
            <name>dfs.replication</name>
            <value>3</value>
        </property>
        <property>
          <name>dfs.nameservices</name>
          <value>nn</value>
        </property>
        <property>
          <name>dfs.ha.namenodes.nn</name>
          <value>nn1,nn2</value>
        </property>
        <property>
          <name>dfs.namenode.rpc-address.nn.nn1</name>
          <value>ha1:8020</value>
        </property>
        <property>
          <name>dfs.namenode.rpc-address.nn.nn2</name>
          <value>ha2:8020</value>
        </property>
        <property>
          <name>dfs.namenode.http-address.nn.nn1</name>
          <value>ha1:9870</value>
        </property>
        <property>
          <name>dfs.namenode.http-address.nn.nn2</name>
          <value>ha2:9870</value>
        </property>
        <property>
          <name>dfs.namenode.shared.edits.dir</name>
          <value>qjournal://ha1:8485;ha2:8485;ha3:8485/nn</value>
        </property>
        <property>
          <name>dfs.client.failover.proxy.provider.nn</name>
          <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
        </property>
        <property>
          <name>dfs.ha.fencing.methods</name>
          <value>sshfence</value>
        </property>
        <property>
          <name>dfs.ha.fencing.ssh.private-key-files</name>
          <value>/root/.ssh/id_rsa</value>
        </property>
        <property>
          <name>dfs.ha.fencing.ssh.connect-timeout</name>
          <value>30000</value>
        </property>
        <property>
          <name>dfs.journalnode.edits.dir</name>
          <value>/usr/local/hadoop/journaldata</value>
        </property>
        <property>
          <name>dfs.ha.nn.not-become-active-in-safemode</name>
          <value>true</value>
        </property>
         <property>
           <name>dfs.ha.automatic-failover.enabled</name>
           <value>true</value>
         </property>
    </configuration>
  • mapred-site.xml
    <configuration>
        <property>
            <name>mapreduce.framework.name</name>
            <value>yarn</value>
        </property>
        <property>
            <name>mapreduce.application.classpath</name>
            <value>$HADOOP_HOME/share/hadoop/mapreduce/*:$HADOOP_HOME/share/hadoop/mapreduce/lib/*</value>
        </property>
        <property>
            <name>yarn.app.mapreduce.am.env</name>
            <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
        </property>
        <property>
            <name>mapreduce.map.env</name>
            <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
        </property>
        <property>
            <name>mapreduce.reduce.env</name>
            <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
        </property>
    </configuration>
     
  • workes
    ha1
    ha2
    ha3
  • yum-site.xml
    <configuration>
        <property>
            <name>yarn.nodemanager.aux-services</name>
            <value>mapreduce_shuffle</value>
        </property>
        <property>
            <name>yarn.nodemanager.env-whitelist</name>
            <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
        </property>
        <property>
          <name>yarn.resourcemanager.ha.enabled</name>
          <value>true</value>
        </property>
        <property>
          <name>yarn.resourcemanager.cluster-id</name>
          <value>cluster1</value>
        </property>
        <property>
          <name>yarn.resourcemanager.ha.rm-ids</name>
          <value>rm1,rm2</value>
        </property>
        <property>
          <name>yarn.resourcemanager.hostname.rm1</name>
          <value>ha1</value>
        </property>
        <property>
          <name>yarn.resourcemanager.hostname.rm2</name>
          <value>ha2</value>
        </property>
        <property>
          <name>yarn.resourcemanager.webapp.address.rm1</name>
          <value>ha1:8088</value>
        </property>
        <property>
          <name>yarn.resourcemanager.webapp.address.rm2</name>
          <value>ha2:8088</value>
        </property>
        <property>
           <name>yarn.log-aggregation-enable</name>
           <value>true</value>
        </property>
        <property>
          <name>hadoop.zk.address</name>
          <value>ha1:2181,ha2:2181,ha3:2181</value>
        </property>
    </configuration>

  • hadoop-env.sh
    HDFS_NAMENODE_USER=root
    export HDFS_NAMENODE_USER=root # 为hadoop配置三个角色的用户
    export HDFS_SECONDARYNAMEDODE_USER=root
    export HDFS_DATANODE_USER=root
    export HDFS_JOURNALNODE_USER=root
    export HDFS_ZKFC_USER=root
    export YARN_NODEMANAGER_USER=root
    export YARN_RESOURCEMANAGER_USER=root
    export JAVA_HOME=/pro/jdk1.8.0_211

  • zk全替换
     

     The number of milliseconds of each tick
    tickTime=2000
    # The number of ticks that the initial 
    # synchronization phase can take
    initLimit=10
    # The number of ticks that can pass between 
    # sending a request and getting an acknowledgement
    syncLimit=5
    # the directory where the snapshot is stored.
    # do not use /tmp for storage, /tmp here is just 
    # example sakes.
    #dataDir=/pro/zookeeper
    # the port at which the clients will connect
    #clientPort=2181
    # the maximum number of client connections.
    # increase this if you need to handle more clients
    #maxClientCnxns=60
    #
    # Be sure to read the maintenance section of the 
    # administrator guide before turning on autopurge.
    #
    # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
    #
    # The number of snapshots to retain in dataDir
    #autopurge.snapRetainCount=3
    # Purge task interval in hours
    # Set to "0" to disable auto purge feature
    #autopurge.purgeInterval=1

    ## Metrics Providers
    #
    # https://prometheus.io Metrics Exporter
    #metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
    #metricsProvider.httpPort=7000
    #metricsProvider.exportJvmInfo=true
    dataDir=/pro/zookeeper/data
    dataLogDir=/pro/zookeeper/logs
    clientPort=2181
    server.1=ha1:2888:3888
    server.2=ha2:2888:3888
    server.3=ha3:2888:3888
    注意创建data和myid

  • 启动时先启动 hdfs --daemon start journalnode

  • /etc/profile
     

    export PATH=$JAVA_HOME/bin:$PATH

    export HADOOP_HOME=/pro/hadoop-3.1.4
    export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH

    export ZOOKEEPER_HOME=/pro/zookeeper
    export PATH=$ZOOKEEPER_HOME/bin:$PATH

  • 如果有hive
    <?xml version="1.0" encoding="UTF-8" standalone="no"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
    <configuration>
    <property>
    <name>javax.jdo.option.ConnectionURL</name>
    <value>jdbc:mysql://hadoop0:3306/hive?useSSL=false</value>
    </property>
    <property>
    <name>javax.jdo.option.ConnectionDriverName</name>
    <value>com.mysql.jdbc.Driver</value>
    </property>
    <property>
    <name>javax.jdo.option.ConnectionUserName</name>
    <value>root</value>
    </property>
    <property>
    <name>javax.jdo.option.ConnectionPassword</name>
    <value>123456</value>
    </property>
    <property>
    <name>hive.metastore.schema.verification</name>
    <value>false</value>
    </property>
    </configuration>

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值