#!/bin/sh
hdfs dfs -rmr /tmp/checkpoint/gx_db_mysql
CONF_DIR=/home/hadoop/gx_bigdata/gx_db/conf/mysql
APP_CONF=application.conf
/usr/local/service/spark/bin/spark-submit \
--class com.gx.GxDBMysqlApplication \
--name 'com.gx.GxDBMysqlApplication' \
--master yarn \
--deploy-mode cluster \
--driver-memory 2g \
--driver-cores 1 \
--executor-memory 2g \
--executor-cores 3 \
--num-executors 3 \
--packages org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.2 \
--conf spark.yarn.maxAppAttempts=1 \
--conf spark.yarn.submit.waitAppCompletion=false \
--conf spark.dynamicAllocation.enabled=false \
--conf spark.sql.shuffle.partitions=27 \
--conf spark.eventLog.enabled=true \
--conf spark.shuffle.file.buffer=16k \
--conf spark.reducer.maxSizeInFlight=32m \
--conf spark.shuffle.io.maxRetries=5 \
--conf "spark.driver.extraJavaOptions=-XX:+UseG1GC -XX:+PrintFlagsFinal -XX:+PrintReferenceGC -verbose:gc -XX:NewRatio=6 -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+HeapDumpOnOutOfMemoryError -XX:+PrintAdaptiveSizePolicy -XX:+UnlockDiagnosticVMOptions -XX:+G1SummarizeConcMark -XX:InitiatingHeapOccupancyPercent=35 -XX:ConcGCThreads=8" \
--conf "spark.executor.extraJavaOptions=-XX:+UseG1GC -XX:+PrintFlagsFinal -XX:+PrintReferenceGC -verbose:gc -XX:NewRatio=6 -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+HeapDumpOnOutOfMemoryError -XX:+PrintAdaptiveSizePolicy -XX:+UnlockDiagnosticVMOptions -XX:+G1SummarizeConcMark -XX:InitiatingHeapOccupancyPercent=35 -XX:ConcGCThreads=8" \
--conf "spark.memory.fraction=0.8" \
--files $CONF_DIR/$APP_CONF,$CONF_DIR/log4j.properties,$CONF_DIR/metrics.properties \
target/gx_hbase-1.0.jar