<dependency> <groupId>org.apache.shardingsphere</groupId> <artifactId>shardingsphere-jdbc</artifactId> <version>5.5.1</version> </dependency>
# https://shardingsphere.apache.org/index_zh.html
mode:
# 运行模式类型。可选配置:内存模式 Memory、单机模式 Standalone、集群模式 Cluster - 目前为单机模式
type: Standalone
dataSources:
ds:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.cj.jdbc.Driver
jdbcUrl: jdbc:mysql://127.0.0.1:3307/big_market?useUnicode=true&characterEncoding=utf8&autoReconnect=true&zeroDateTimeBehavior=convertToNull&serverTimezone=UTC&useSSL=true
username: root
password: 123456
connectionTimeoutMilliseconds: 30000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 15
minPoolSize: 5
ds_0:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.cj.jdbc.Driver
jdbcUrl: jdbc:mysql://127.0.0.1:3307/big_market_01?useUnicode=true&characterEncoding=utf8&autoReconnect=true&zeroDateTimeBehavior=convertToNull&serverTimezone=UTC&useSSL=true
username: root
password: 123456
connectionTimeoutMilliseconds: 30000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 15
minPoolSize: 5
ds_1:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.cj.jdbc.Driver
jdbcUrl: jdbc:mysql://127.0.0.1:3307/big_market_02?useUnicode=true&characterEncoding=utf8&autoReconnect=true&zeroDateTimeBehavior=convertToNull&serverTimezone=UTC&useSSL=true
username: root
password: 123456
connectionTimeoutMilliseconds: 30000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 15
minPoolSize: 5
rules:
- !SHARDING
# 库的路由
defaultDatabaseStrategy:
standard:
shardingColumn: user_id
shardingAlgorithmName: database_inline
# 表的路由
tables:
raffle_activity_account:
actualDataNodes: ds_$->{0..1}.raffle_activity_account
raffle_activity_account_month:
actualDataNodes: ds_$->{0..1}.raffle_activity_account_month
raffle_activity_account_day:
actualDataNodes: ds_$->{0..1}.raffle_activity_account_day
task:
actualDataNodes: ds_$->{0..1}.task
#自定义的分库策略,当自定义的时候就每次需要手动设置库
databaseStrategy:
hint:
shardingAlgorithmName: hint_class_based
# 领取活动表(签到等获得抽奖机会)
raffle_activity_order:
actualDataNodes: ds_$->{0..1}.raffle_activity_order_00$->{0..3}
tableStrategy:
standard:
shardingColumn: user_id
shardingAlgorithmName: raffle_activity_order_inline
# 参与活动订单表(参与活动)
user_raffle_order:
actualDataNodes: ds_$->{0..1}.user_raffle_order_00$->{0..3}
tableStrategy:
standard:
shardingColumn: user_id
shardingAlgorithmName: user_raffle_order_inline
# 用户中奖记录表
user_award_record:
actualDataNodes: ds_$->{0..1}.user_award_record_00$->{0..3}
tableStrategy:
standard:
shardingColumn: user_id
shardingAlgorithmName: user_award_record_inline
# 路由算法
shardingAlgorithms:
# 库-路由算法 2是两个库,库的数量。库的数量用哈希模2来计算。
database_inline:
type: INLINE
props:
algorithm-expression: ds_$->{Math.abs(user_id.hashCode()) % 2}
# 表-路由算法 4是一个库里,表的数量。4 - 1 为了获得 011 这样的二进制值。不推荐 user_order_$->{Math.abs(user_id.hashCode()) % 2} 作为表的路由
raffle_activity_order_inline:
type: INLINE
props:
algorithm-expression: raffle_activity_order_00$->{(user_id.hashCode() ^ (user_id.hashCode()) >>> 16) & (4 - 1)}
user_raffle_order_inline:
type: INLINE
props:
algorithm-expression: user_raffle_order_00$->{(user_id.hashCode() ^ (user_id.hashCode()) >>> 16) & (4 - 1)}
user_award_record_inline:
type: INLINE
props:
algorithm-expression: user_award_record_00$->{(user_id.hashCode() ^ (user_id.hashCode()) >>> 16) & (4 - 1)}
hint_class_based:
type: CLASS_BASED
props:
strategy: HINT
algorithmClassName: com.yw.infrastructure.hint.MyDatasourceRoutingAlgorithm
#表不路由,需要加载全部单表
- !SINGLE
tables:
- "ds.*"
# 加载全部单表
props:
# 是否在日志中打印 SQL。
# 打印 SQL 可以帮助开发者快速定位系统问题。日志内容包含:逻辑 SQL,真实 SQL 和 SQL 解析结果。
# 如果开启配置,日志将使用 Topic ShardingSphere-SQL,日志级别是 INFO。 false
sql-show: true
# 是否在日志中打印简单风格的 SQL。false
sql-simple: true
# 用于设置任务处理线程池的大小。每个 ShardingSphereDataSource 使用一个独立的线程池,同一个 JVM 的不同数据源不共享线程池。
executor-size: 20
# 查询请求在每个数据库实例中所能使用的最大连接数。1
max-connections-size-per-query: 1
# 在程序启动和更新时,是否检查分片元数据的结构一致性。
check-table-metadata-enabled: false
# 在程序启动和更新时,是否检查重复表。false
check-duplicate-table-enabled: false
package com.yw.trigger.job;
import com.alibaba.fastjson2.JSON;
import com.xxl.job.core.handler.annotation.XxlJob;
import com.yw.domain.award.model.entity.TaskEntity;
import com.yw.domain.task.service.ITaskService;
import com.yw.types.exception.AppException;
import lombok.extern.slf4j.Slf4j;
import org.apache.shardingsphere.infra.hint.HintManager;
import org.redisson.api.RLock;
import org.redisson.api.RedissonClient;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.util.List;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
/**
* 扫描库表对失败的发奖任务进行重新发送mq
*
* @author: yuanwen
* @since: 2024/11/18
*/
@Service
@Slf4j
public class ScanSendAwardTaskJob {
@Resource
private ITaskService taskService;
@Resource
private ThreadPoolExecutor threadPoolExecutor;
@Resource
private RedissonClient redissonClient;
private final String DB_1 = "0";
private final String DB_2 = "1";
//这里的corn表达式表示每隔5秒执行一次
//0/5表示从0秒开始到第五秒
// 0/5 * * * * ?
// 秒 分 时 日 月 指定星期
// @Scheduled(cron = "0/5 * * * * ?")
@XxlJob(value = "ScanSendAwardTask_DB1")
public void scanSendAwardTaskDB1() {
//加入分布式锁
RLock lock = redissonClient.getLock("big-market-SendMessageTaskJob_DB1");
try {
//采用分布式锁,防止分布式部署导致多个线程扫描到同一个任务导致重复发送
boolean isLock = lock.tryLock(3, TimeUnit.SECONDS);
if (!isLock) return;
//分为2库,多线程异步来扫描
threadPoolExecutor.execute(() -> {
//扫描发送奖品任务列表
HintManager hintManager = taskService.scanDataSource(DB_1);
List<TaskEntity> failTasks = taskService.queryFailTaskList();
log.info("扫描发送奖品任务列表:{}", JSON.toJSONString(failTasks));
//发送失败奖品列表也使用多线程来加快速度
failTasks.forEach(task ->
threadPoolExecutor.execute(() -> {
hintManager.setDatabaseShardingValue("ds_" + DB_1);
//这部分无需传递,底层待研究
// //传递给当前线程的子线程
//// taskService.scanDataSource(DB_1);
// hintManager.setDatabaseShardingValue("ds_" + DB_1);
try {
//重新发奖并更新任务状态
taskService.sendTaskMessage(task);
taskService.updateTaskSendMessageCompleted(task.getUserId(), task.getMessageId());
log.info("补偿发送奖品任务成功:{}", JSON.toJSONString(task));
} catch (Exception e) {
//发送失败处理
log.error("发送奖品任务失败:task:{},message:{}", JSON.toJSONString(task), e.getMessage());
taskService.updateTaskSendMessageFailed(task.getUserId(), task.getMessageId());
}
})
);
//清除当前循环中的分片键
hintManager.close();
});
} catch (Exception e) {
log.error("扫描发送奖品任务失败:{}", e.getMessage());
throw new AppException("扫描发送奖品任务失败", e);
} finally {
lock.unlock();
}
}
@XxlJob(value = "ScanSendAwardTask_DB2")
public void scanSendAwardTaskDB2() {
//加入分布式锁
RLock lock = redissonClient.getLock("big-market-SendMessageTaskJob_DB2");
try {
//采用分布式锁,防止分布式部署导致多个线程扫描到同一个任务导致重复发送
boolean isLock = lock.tryLock(3, TimeUnit.SECONDS);
if (!isLock) return;
//分为2库,多线程异步来扫描
threadPoolExecutor.execute(() -> {
//扫描发送奖品任务列表
HintManager hintManager = taskService.scanDataSource(DB_2);
List<TaskEntity> failTasks = taskService.queryFailTaskList();
log.info("扫描发送奖品任务列表:{}", JSON.toJSONString(failTasks));
//发送失败奖品列表也使用多线程来加快速度
failTasks.forEach(task ->
threadPoolExecutor.execute(() -> {
hintManager.setDatabaseShardingValue("ds_" + DB_1);
//这部分无需传递,底层待研究
// //传递给当前线程的子线程
//// taskService.scanDataSource(DB_1);
// hintManager.setDatabaseShardingValue("ds_" + DB_1);
try {
//重新发奖并更新任务状态
taskService.sendTaskMessage(task);
taskService.updateTaskSendMessageCompleted(task.getUserId(), task.getMessageId());
log.info("补偿发送奖品任务成功:{}", JSON.toJSONString(task));
} catch (Exception e) {
//发送失败处理
log.error("发送奖品任务失败:task:{},message:{}", JSON.toJSONString(task), e.getMessage());
taskService.updateTaskSendMessageFailed(task.getUserId(), task.getMessageId());
}
})
);
//清除当前循环中的分片键
hintManager.close();
});
} catch (Exception e) {
log.error("扫描发送奖品任务失败:{}", e.getMessage());
throw new AppException("扫描发送奖品任务失败", e);
} finally {
lock.unlock();
}
}
}
public HintManager scanDataSource(String hit) {
HintManager hintManager = HintManager.getInstance();
hintManager.setDatabaseShardingValue("ds_" + hit);
return hintManager;
}
这里面的HintManager.getInstance()源码是涉及到,本次场景中是线程池去异步执行任务,且task是自定义的切换数据源
TransmittableThreadLocal,该类的详情文章:ThreadLocal垮线程池传递数据解决方案