BaseAppV2

public abstract class BaseAppV2 {


    public abstract void run(StreamExecutionEnvironment env,
                             Map<String, DataStream<String>> sourceStreams,
                             String isLocalOrTest
                           );

    public void init(int parallelism, String ck, String groupId, String isLocalOrTest, String... topics) throws IOException {
        System.getProperties().setProperty(CommonConstant.HADOOP_USER_NAME,CommonConstant.HDFS_USER);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(parallelism);
//        1.开启ck以及间隔时间
        env.enableCheckpointing(5*60*1000, CheckpointingMode.AT_LEAST_ONCE);
//        2.配置checkpoint重启
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, Time.seconds(3)));
//        3.配置checkpoint配置
        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
//        4.超时时间10分钟
        checkpointConfig.setCheckpointTimeout(1000*60*10);
//        5.最小间隔4分钟
        checkpointConfig.setMinPauseBetweenCheckpoints(1000*60*4);
//        6.同一时间只允许一个检查点
        checkpointConfig.setMaxConcurrentCheckpoints(1);
//        7.保存checkpoint
        checkpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
//        8.配置保存ck的地址
        if (CommonConstant.TEST.equalsIgnoreCase(isLocalOrTest)){
            env.setStateBackend(new FsStateBackend(CommonConstant.CHECKPOINT_TEST_POSITION+ck));
        }else if (CommonConstant.PRODUCE.equalsIgnoreCase(isLocalOrTest)){
            env.setStateBackend(new FsStateBackend(CommonConstant.CHECKPOINT_PROD_POSITION+ck));
        }
        //判断环境并将数据写入多个topic
        final Map<String, DataStream<String>> sourceStreams = new HashMap<>();
        if (CommonConstant.LOCAL.equalsIgnoreCase(isLocalOrTest)){
            for (String topic : topics) {
                final DataStreamSource<String> stream = env.addSource(new MyLocalKafkaSource(groupId, topic));
                sourceStreams.put(topic,stream);
            }
        }else if (CommonConstant.TEST.equalsIgnoreCase(isLocalOrTest) || CommonConstant.PRODUCE.equalsIgnoreCase(isLocalOrTest)){
            for (String topic : topics) {
                final DataStreamSource<String> stream = env.addSource(MyKafkaUtil.getFlinkKafkaSource(groupId, topic,isLocalOrTest));
                sourceStreams.put(topic,stream);
            }
        }else {
            System.out.println("请输出正确的环境!");
        }

        //插入代码
        run(env,sourceStreams,isLocalOrTest);

        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值