zkServer启动
xcall.sh zkServer.sh start
hdfs启动
start-dfs.sh
http://hadoop11:9870
安全模式命令
hdfs dfsadmin -safemode enter/get/leave;
yarn启动
start-yarn.sh
http://hadoop11:8088
yarn的history服务
mr-jobhistory-daemon.sh start historyserver---对应进程名字JobHistoryServer
hive启动
方式1: hive
方式2: hiveserver2 &
beeline
beeline>!connect jdbc:hive2://hadoop10:10000
hive的metastore服务启动
hive --service metastore >/dev/null 2>&1 &
spark启动
spark-submit --master yarn xxx.py
spark-submit的运行端口
http://hadoop11:4040
spark的history服务启动
/opt/installs/spark/sbin/start-history-server.sh ---对应进程名字HistoryServer
http://hadoop11:18080
hbase启动
start-hbase.sh
http://hadoop11:16010
hbase-manager图形化界面启动
cd /opt/installs/hbase-manager-2.0.8-hbase-2.x/
java -jar hbase-manager-admin-2.0.8.jar
http://hadoop11:9527/login
phoenix启动
/opt/installs/phoenix/bin/sqlline.py hadoop11:2181
mysql服务启动
systemctl start mysqld.service
systemctl stop mysqld.service
systemctl restart mysqld.service
设置服务开机自启动
systemctl enable mysqld.service
systemctl disable mysqld.service
防火墙服务启动
systemctl start firewalld
systemctl stop firewalld
systemctl status firewalld
dolphinscheduler启动
cd /opt/installs/dolphinscheduler_installed/bin
[root@hadoop10 bin]# ./start-all.sh
单独启动一个worker-server
[root@hadoop10 bin]# ./dolphinscheduler-daemon.sh stop worker-server
[root@hadoop10 bin]# ./dolphinscheduler-daemon.sh start worker-server
http://hadoop10:12345/dolphinscheduler
用户名:admin 密码: dolphinscheduler123
datax-web启动
cd /opt/installs/datax-web-2.1.2/
[root@hadoop10 bin]# ./bin/start-all.sh
[root@hadoop10 bin]# ./bin/stop-all.sh
http://hadoop10:9527/index.html
用户名:admin 密码:123456
flume启动
cd /opt/installs/flume1.9/conf/
bin/flume-ng agent --conf conf --name a1 --conf-file xxx.conf -Dflume.root.logger=INFO,console
CDH服务启动
启动httpd服务
systemctl start httpd.service
http://hadoop01/cm6/6.2.1/redhat7/yum/RPMS/x86_64/
启动server服务进程
systemctl start cloudera-scm-server
systemctl status cloudera-scm-server
查看端口
yum install net-tools
netstat -an | grep 7180
http://hadoop01:7180/cmf/login
账号密码均为admin
kafka启动
xcall.sh kafka-server-start.sh -daemon /opt/installs/kafka/config/server.properties
xcall.sh kafka-server-stop.sh stop
启动kafka的consumer消费者(topica为消息队列的名字)
bin/kafka-console-consumer.sh --bootstrap-server hadoop11:9092 --topic topica
启动kafka的producer生产者(topica为消息队列的名字)
kafka-console-producer.sh --bootstrap-server hadoop11:9092,hadoop12:9092,hadoop13:9092 --topic topica
kafka eagle启动
cd /opt/installs/eagle/bin/
ke.sh start
http://hadoop11:8048/ke
用户名:admin 密码:123456 token=keadmin
Flink启动&运行
1、会话模式部署
启动hadoop(hdfs+yarn)
执行脚本命令向YARN集群申请资源,开启一个YARN会话,启动Flink集群。
yarn-session.sh -nm test
可以通过yarn-session.sh -h 查看相关参数
在新的窗口运行flink任务
flink run -c com.demo.day1.Demo1_WordCount /opt/flinkjob/flink-test-1.0-SNAPSHOT.jar
2、应用模式部署
上传flink的lib和plugins到HDFS上
hdfs dfs -put /opt/flinkjob/flink-test-1.0-SNAPSHOT.jar /my-flinkjars
提交flink作业
flink run-application \
-t yarn-application \
-Dyarn.provided.lib.dirs="hdfs://hdfs-cluster/flink-dist" \
-c com.demo.day1.Demo1_WordCount \
hdfs://hdfs-cluster/my-flinkjars/flink-test-1.0-SNAPSHOT.jar
dinky平台启动
cd /opt/installs/dinky
sh auto.sh start
doris启动
伪分布:
/opt/installs/doris/fe/bin/start_fe.sh --daemon
/opt/installs/doris/be/bin/start_be.sh --daemon