安装环境
Ubuntu20.04 x64
各版本情况:
hadoop_version:3.2.1
spark_version:3.1.1
zeppelin_version:0.8.0
安装步骤
安装docker:
sudo apt-get install docker
安装docker-compose
sudo apt-get install docker-compose
配置docker-compose.yml
vim docker-compose.yml
version : "3"
services :
namenode :
image : bde2020/hadoop- namenode: 2.0.0- hadoop3.2.1- java8
container_name : namenode
restart : always
ports :
- 9870: 9870
- 9000: 9000
volumes :
- hadoop_namenode: /hadoop/dfs/name
environment :
- CLUSTER_NAME=test
env_file :
- ./hadoop.env
datanode :
image : bde2020/hadoop- datanode: 2.0.0- hadoop3.2.1- java8
container_name : datanode1
restart : always
volumes :
- hadoop_datanode: /hadoop/dfs/data
ports :
- 9864: 9864
environment :
SERVICE_PRECONDITION : "namenode:9870"
env_file :
- ./hadoop.env
datanode2 :
image : bde2020/hadoop- datanode: 2.0.0- hadoop3.2.1- java8
container_name : datanode2
restart : always
volumes :
- hadoop_datanode2: /hadoop/dfs/data
ports :
- 9863: 9864
environment :
SERVICE_PRECONDITION : "namenode:9870"
env_file :
- ./hadoop.env
resourcemanager :
image : bde2020/hadoop- resourcemanager: 2.0.0- hadoop3.2.1- java8
container_name : resourcemanager
restart : always
environment :
SERVICE_PRECONDITION : "namenode:9000 namenode:9870 datanode:9864 datanode2:9864"
ports :
- 8088: 8088
expose :
- "8032"
- "8031"
- "8030"
env_file :
- ./hadoop.env
nodemanager1 :
image : bde2020/hadoop- nodemanager: 2.0.0- hadoop3.2.1- java8
container_name : nodemanager
restart : always
environment :
SERVICE_PRECONDITION : "namenode:9000 namenode:9870 datanode:9864 datanode2:9864 resourcemanager:8088"
ports :
- 8042: 8042
env_file :
- ./hadoop.env
historyserver :
image : bde2020/hadoop- historyserver: 2.0.0- hadoop3.2.1- java8
container_name : historyserver
restart : always
environment :
SERVICE_PRECONDITION : "namenode:9000 namenode:9870 datanode:9864 datanode2:9864 resourcemanager:8088"
volumes :
- hadoop_historyserver: /hadoop/yarn/timeline
ports :
- 8188: 8188
env_file :
- ./hadoop.env
spark-master :
image : bde2020/spark- master: 3.1.1- hadoop3.2
container_name : spark- master
ports :
- 8080: 8080
- 7077: 7077
env_file :
- ./hadoop.env
spark-worker1 :
image : bde2020/spark- worker: 3.1.1- hadoop3.2
environment :
- "SPARK_MASTER=spark://spark-master:7077"
env_file :
- ./hadoop.env
spark-worker2 :
image : bde2020/spark- worker: 3.1.1- hadoop3.2
environment :
- "SPARK_MASTER=spark://spark-master:7077"
env_file :
- ./hadoop.env
zeppelin :
image : bde2020/zeppelin: 0.8.0- hadoop- 2.8.0- spark- 2.3.1
ports :
- 80: 8080
volumes :
- ./notebook: /opt/zeppelin/notebook
environment :
SPARK_MASTER : "spark://spark-master:7077"
MASTER : "spark:spark-master:7077"
env_file :
- ./hadoop.env
volumes :
hadoop_namenode :
hadoop_datanode :
hadoop_datanode2 :
hadoop_historyserver :
配置hadoop.env
vim hadoop.env
CORE_CONF_fs_defaultFS=hdfs://namenode:9000
CORE_CONF_hadoop_http_staticuser_user=root
CORE_CONF_hadoop_proxyuser_hue_hosts=*
CORE_CONF_hadoop_proxyuser_hue_groups=*
CORE_CONF_io_compression_codecs=org.apache.hadoop.io.compress.SnappyCodec
HDFS_CONF_dfs_webhdfs_enabled=true
HDFS_CONF_dfs_permissions_enabled=false
HDFS_CONF_dfs_namenode_datanode_registration_ip___hostname___check=false
YARN_CONF_yarn_log___aggregation___enable=true
YARN_CONF_yarn_log_server_url=http://historyserver:8188/applicationhistory/logs/
YARN_CONF_yarn_resourcemanager_recovery_enabled=true
YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
YARN_CONF_yarn_resourcemanager_scheduler_class=org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___mb=8192
YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___vcores=4
YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate
YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true
YARN_CONF_yarn_resourcemanager_hostname=resourcemanager
YARN_CONF_yarn_resourcemanager_address=resourcemanager:8032
YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030
YARN_CONF_yarn_resourcemanager_resource__tracker_address=resourcemanager:8031
YARN_CONF_yarn_timeline___service_enabled=true
YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
YARN_CONF_yarn_timeline___service_hostname=historyserver
YARN_CONF_mapreduce_map_output_compress=true
YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec
YARN_CONF_yarn_nodemanager_resource_memory___mb=16384
YARN_CONF_yarn_nodemanager_resource_cpu___vcores=8
YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5
YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle
MAPRED_CONF_mapreduce_framework_name=yarn
MAPRED_CONF_mapred_child_java_opts=-Xmx4096m
MAPRED_CONF_mapreduce_map_memory_mb=4096
MAPRED_CONF_mapreduce_reduce_memory_mb=8192
MAPRED_CONF_mapreduce_map_java_opts=-Xmx3072m
MAPRED_CONF_mapreduce_reduce_java_opts=-Xmx6144m
MAPRED_CONF_yarn_app_mapreduce_am_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
MAPRED_CONF_mapreduce_map_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
MAPRED_CONF_mapreduce_reduce_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
启动集群
sudo docker-compose up -d
查看运行情况
sudo docker ps -a
进入namenode或者datanode方法
sudo docker exec -it [ node名] /bin/bash
例如:进入namenode查看
sudo docker exec -it namenode /bin/bash
hdfs dfsadmin -report
查看服务端口设置
namenode: http://<ip_address>:9870
datanode1: http://<ip_address>:9864
datanode2: http://<ip_address>:9863
resource manager: http://<ip_address>:8088
history server: http://<ip_address>:8188
nodemanager: http://<ip_address>:8042
spark master: http://<ip_address>:8080
zeppelin: http://<ip_address>:80
常见hadoop集群命令
hdfs dfs -mkdir /abc
hdfs dfs -ls /
hdfs dfs -ls -R /
hdfs dfs -put /etc/hosts /abc/hosts
hdfs dfs -appendToFile /etc/hosts /abc/hosts
hdfs dfs -checksum /abc/hosts
hdfs dfs -du -h /
hdfs dfs -get /abc/hosts ./hosts
hdfs dfs -cat /abc/hosts
hdfs dfs -tail /abc/hosts
hdfs dfs -mv /abc/hosts /abc/xyz
hdfs dfs -cp /abc/xyz /abc/hosts
hdfs dfs -find / -name xyz
hdfs dfs -rmdir /abc
hdfs dfs -rm /abc/hosts
hdfs dfs -rm -r /abc
hdfs dfs -df
hdfs dfsadmin -report