ElasticJob+ShardingJDBC的Hello World

本文介绍了一个使用ElasticJob和ShardingJDBC的示例项目,展示了如何通过ElasticJob进行任务调度,并利用ShardingJDBC实现数据库读写分离及分库分表。项目中包含了具体代码实现,包括作业调度配置、数据库连接池配置、分库分表策略等。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

ElasticJob+ShardingJDBC的Hello World
----------------------------------------------------------------------------
import com.dangdang.ddframe.job.api.ShardingContext;
import com.dangdang.ddframe.job.api.simple.SimpleJob;
import com.dangdang.ddframe.job.config.JobCoreConfiguration;
import com.dangdang.ddframe.job.config.simple.SimpleJobConfiguration;
import com.dangdang.ddframe.job.lite.api.JobScheduler;
import com.dangdang.ddframe.job.lite.config.LiteJobConfiguration;
import com.dangdang.ddframe.job.reg.base.CoordinatorRegistryCenter;
import com.dangdang.ddframe.job.reg.zookeeper.ZookeeperConfiguration;
import com.dangdang.ddframe.job.reg.zookeeper.ZookeeperRegistryCenter;

import java.io.IOException;
import java.util.concurrent.TimeUnit;

public class MySimpleJob implements SimpleJob {
    @Override
    public void execute(ShardingContext shardingContext) {

       //throw  new RuntimeException("failover失效测试");
        try{
            TimeUnit.SECONDS.sleep(5);
        }catch (Exception e){

        }

        System.out.println("Task Executing....");

    }
    public static void main(String[] args) throws IOException {
        CoordinatorRegistryCenter registryCenter = createRegistryCenter();
        new JobScheduler(registryCenter, createJobConfiguration1()).init();
        //new JobScheduler(registryCenter, createJobConfiguration2()).init();
        System.in.read();

    }
    private static CoordinatorRegistryCenter createRegistryCenter() {
        CoordinatorRegistryCenter regCenter = new ZookeeperRegistryCenter(new ZookeeperConfiguration("127.0.0.1:2181", "elastic-job-demo"));
        regCenter.init();
        return regCenter;
    }
    private static LiteJobConfiguration createJobConfiguration1() {
        JobCoreConfiguration simpleCoreConfig = JobCoreConfiguration.newBuilder("demoSimpleJob1", "0/15 * * * * ?", 10).failover(true).build();
        // 定义SIMPLE类型配置
        SimpleJobConfiguration simpleJobConfig = new SimpleJobConfiguration(simpleCoreConfig, MySimpleJob.class.getCanonicalName());
        // 定义Lite作业根配置
        LiteJobConfiguration simpleJobRootConfig = LiteJobConfiguration.newBuilder(simpleJobConfig).overwrite(true).build();
        return simpleJobRootConfig;
    }
    private static LiteJobConfiguration createJobConfiguration2() {
        JobCoreConfiguration simpleCoreConfig = JobCoreConfiguration.newBuilder("demoSimpleJob2", "0/5 * * * * ?", 10).build();
        // 定义SIMPLE类型配置
        SimpleJobConfiguration simpleJobConfig = new SimpleJobConfiguration(simpleCoreConfig, MySimpleJob.class.getCanonicalName());
        // 定义Lite作业根配置
        LiteJobConfiguration simpleJobRootConfig = LiteJobConfiguration.newBuilder(simpleJobConfig).build();
        return simpleJobRootConfig;
    }
}

 

--------------------------------------------------------------------------------------------------------------------------------------------------------------------

package shardingjdbc;

import com.alibaba.druid.pool.DruidDataSource;
import com.google.common.collect.Lists;
import org.apache.shardingsphere.api.config.masterslave.LoadBalanceStrategyConfiguration;
import org.apache.shardingsphere.api.config.masterslave.MasterSlaveRuleConfiguration;
import org.apache.shardingsphere.api.config.sharding.KeyGeneratorConfiguration;
import org.apache.shardingsphere.api.config.sharding.ShardingRuleConfiguration;
import org.apache.shardingsphere.api.config.sharding.TableRuleConfiguration;
import org.apache.shardingsphere.api.config.sharding.strategy.StandardShardingStrategyConfiguration;
import org.apache.shardingsphere.core.constant.properties.ShardingPropertiesConstant;
import org.apache.shardingsphere.core.strategy.keygen.SnowflakeShardingKeyGenerator;
import org.apache.shardingsphere.shardingjdbc.api.ShardingDataSourceFactory;

import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.*;

public class ShardingJdbcConfig {


    public static void main(String[] args) throws Exception {

        SnowflakeShardingKeyGenerator keyGenerator=new SnowflakeShardingKeyGenerator();
        String sql="insert into svc_order(`order_id`,`user_id`,`order_name`) values (?,?,?)";
        for (int i=0;i<3;i++){
            ShardingJdbcConfig shardingJdbcConfig = new ShardingJdbcConfig();
            DataSource dataSource = shardingJdbcConfig.buildDataSource();
            Connection connection = dataSource.getConnection();
            PreparedStatement preparedStatement = connection.prepareStatement("SELECT  * FROM svc_order WHERE  user_id=? and  order_id > ?");
            preparedStatement.setObject(1, 52);
            preparedStatement.setObject(2, 401401034783588353L);
            preparedStatement.executeQuery();
            preparedStatement.close();
            connection.close();

        }

    }

    DataSource buildDataSource() throws SQLException {



        ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();

        //设定TableRuleConfiguration, 每个TableRuleConfiguration中有分库策略和分表策略
        shardingRuleConfig.getTableRuleConfigs().add(getOrderTableRuleConfiguration());
        shardingRuleConfig.getTableRuleConfigs().add(getOrderItemTableRuleConfiguration());

        shardingRuleConfig.getBindingTableGroups().add("svc_order, svc_order_item");
        //shardingRuleConfig.getBroadcastTables().add("t_config");
        //设置主从
        shardingRuleConfig.setMasterSlaveRuleConfigs(getMasterSlaveRuleConfigurations());
        Properties properties = new Properties();
        properties.setProperty(ShardingPropertiesConstant.SQL_SHOW.getKey(), "true");
        return ShardingDataSourceFactory.createDataSource(createDataSourceMap(), shardingRuleConfig, properties);
    }

    private static KeyGeneratorConfiguration getKeyGeneratorConfiguration() {
        KeyGeneratorConfiguration result = new KeyGeneratorConfiguration("SNOWFLAKE", "order_id");
        return result;
    }

    TableRuleConfiguration getOrderTableRuleConfiguration() {
        TableRuleConfiguration result = new TableRuleConfiguration("svc_order", "ds_${0..1}.svc_order_${0..1}");
        result.setDatabaseShardingStrategyConfig(new StandardShardingStrategyConfiguration("user_id", new PreciseModuloShardingDatabaseAlgorithm()));
        result.setTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("order_id", new PreciseModuloShardingTableAlgorithm()));
        //result.setKeyGeneratorConfig(getKeyGeneratorConfiguration());
        return result;
    }

    TableRuleConfiguration getOrderItemTableRuleConfiguration() {
        TableRuleConfiguration result = new TableRuleConfiguration("svc_order_item", "ds_${0..1}.svc_order_item_${0..1}");
        result.setDatabaseShardingStrategyConfig(new StandardShardingStrategyConfiguration("user_id", new PreciseModuloShardingDatabaseAlgorithm()));
        result.setTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("order_item_id", new PreciseModuloShardingTableAlgorithm()));
        return result;
    }

    /**
     * 一主多从的结构
     * 从配置上看,符合一主多从的结构,主数据源只有一个,从数据源有多个。
     * @return
     */
    List<MasterSlaveRuleConfiguration> getMasterSlaveRuleConfigurations() {
        MasterSlaveRuleConfiguration masterSlaveRuleConfig1 = new MasterSlaveRuleConfiguration("ds_0", "ds_master_0", Arrays.asList("ds_master_0_slave_0", "ds_master_0_slave_1"),new LoadBalanceStrategyConfiguration("ROUND_ROBIN"));
        MasterSlaveRuleConfiguration masterSlaveRuleConfig2 = new MasterSlaveRuleConfiguration("ds_1", "ds_master_1", Arrays.asList("ds_master_1_slave_0", "ds_master_1_slave_1"),new LoadBalanceStrategyConfiguration("ROUND_ROBIN"));
        return Lists.newArrayList(masterSlaveRuleConfig1, masterSlaveRuleConfig2);
    }

    Map<String, DataSource> createDataSourceMap() {
        final Map<String, DataSource> result = new HashMap<>();
        result.put("ds_master_0", createDataSource("ds_master_0"));
        result.put("ds_master_0_slave_0", createDataSource("ds_master_0_slave_0"));
        result.put("ds_master_0_slave_1", createDataSource("ds_master_0_slave_1"));


        result.put("ds_master_1", createDataSource("ds_master_1"));
        result.put("ds_master_1_slave_0", createDataSource("ds_master_1_slave_0"));
        result.put("ds_master_1_slave_1", createDataSource("ds_master_1_slave_1"));
        return result;
    }


    private static DataSource createDataSource(final String dataSourceName) {
        DruidDataSource result = new DruidDataSource();
        String url = String.format("jdbc:mysql://127.0.0.1:3306/%s?useUnicode=true&serverTimezone=UTC&characterEncoding=utf-8&useSSL=false", dataSourceName);
        result.setUrl(url);
        result.setUsername("xxxx");
        result.setPassword("yyyy");
        return result;
    }


}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值