要实现几十亿运单数据的秒级响应,同时兼顾存储和删除性能,需要从架构设计、数据分片、缓存策略和查询优化等多个方面综合考虑。以下是一个基于Java生态的完整解决方案:
1. 数据存储架构设计
分库分表策略
// 使用ShardingSphere实现分库分表
@Configuration
public class ShardingConfig {
@Bean
public DataSource dataSource() throws SQLException {
// 按运单ID哈希分片,假设分为16个库,每个库16张表
ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
// 分库规则
shardingRuleConfig.getShardingAlgorithms().put("database_inline",
new StandardShardingAlgorithmConfiguration("INLINE", "Math.abs(order_id.hashCode()) % 16"));
// 分表规则
shardingRuleConfig.getShardingAlgorithms().put("table_inline",
new StandardShardingAlgorithmConfiguration("INLINE", "Math.abs(order_id.hashCode()) / 16 % 16"));
// 配置运单表规则
ShardingTableRuleConfiguration orderTableRuleConfig = new ShardingTableRuleConfiguration(
"t_order", "ds_${0..15}.t_order_${0..15}");
orderTableRuleConfig.setDatabaseShardingStrategy(new StandardShardingStrategyConfiguration(
"order_id", "database_inline"));
orderTableRuleConfig.setTableShardingStrategy(new StandardShardingStrategyConfiguration(
"order_id", "table_inline"));
shardingRuleConfig.getTables().add(orderTableRuleConfig);
return ShardingSphereDataSourceFactory.createDataSource(createDataSourceMap(),
Collections.singleton(shardingRuleConfig), new Properties());
}
private Map<String, DataSource> createDataSourceMap() {
// 创建16个数据源
Map<String, DataSource> result = new HashMap<>();
for (int i = 0; i < 16; i++) {
result.put("ds_" + i, createDataSource("ds_" + i));
}
return result;
}
private DataSource createDataSource(String name) {
// 创建HikariCP连接池
HikariConfig config = new HikariConfig();
config.setJdbcUrl("jdbc:mysql://localhost:3306/" + name);
config.setUsername("root");
config.setPassword("password");
config.setMaximumPoolSize(20);
return new HikariDataSource(config);
}
}
2. 多级缓存体系
热点缓存 + 分布式缓存
// 使用Caffeine作为本地缓存,Redis作为分布式缓存
@Configuration
@EnableCaching
public class CacheConfig {
@Bean
public CacheManager cacheManager(RedisConnectionFactory redisConnectionFactory) {
// 本地缓存配置 (Caffeine)
CaffeineCacheManager caffeineCacheManager = new CaffeineCacheManager();
caffeineCacheManager.setCaffeine(Caffeine.newBuilder()
.expireAfterWrite(10, TimeUnit.MINUTES)
.maximumSize(10_000));
// Redis缓存配置
RedisCacheConfiguration redisCacheConfig = RedisCacheConfiguration.defaultCacheConfig()
.entryTtl(Duration.ofHours(1))
.disableCachingNullValues()
.serializeValuesWith(RedisSerializationContext.SerializationPair
.fromSerializer(new GenericJackson2JsonRedisSerializer()));
// 组合缓存管理器
return new CompositeCacheManager(
caffeineCacheManager,
RedisCacheManager.builder(redisConnectionFactory)
.cacheDefaults(redisCacheConfig)
.build()
);
}
// 使用Redisson实现分布式锁
@Bean
public RedissonClient redissonClient() {
Config config = new Config();
config.useClusterServers()
.addNodeAddress("redis://127.0.0.1:7000", "redis://127.0.0.1:7001")
.setScanInterval(2000);
return Redisson.create(config);
}
}
3. 查询优化方案
索引设计 + 异步处理
// 运单查询服务实现
@Service
public class OrderQueryServiceImpl implements OrderQueryService {
@Autowired
private OrderMapper orderMapper;
@Autowired
private CacheManager cacheManager;
@Autowired
private RedissonClient redissonClient;
@Override
@Cacheable(value = "orders", key = "#orderId")
public OrderDTO getOrderById(String orderId) {
// 1. 先查本地缓存,由CacheManager自动处理
// 2. 本地缓存未命中,查询Redis
// 3. Redis未命中,查询数据库
// 使用分布式锁防止缓存击穿
RLock lock = redissonClient.getLock("order:lock:" + orderId);
try {
lock.lock(5, TimeUnit.SECONDS);
// 双重检查
Cache cache = cacheManager.getCache("orders");
OrderDTO cachedOrder = cache.get(orderId, OrderDTO.class);
if (cachedOrder != null) {
return cachedOrder;
}
// 数据库查询
OrderDTO order = orderMapper.selectById(orderId);
if (order != null) {
cache.put(orderId, order);
}
return order;
} finally {
lock.unlock();
}
}
// 批量查询优化
@Override
public Map<String, OrderDTO> batchGetOrders(List<String> orderIds) {
Map<String, OrderDTO> result = new HashMap<>();
List<String> uncachedIds = new ArrayList<>();
// 1. 先批量查缓存
Cache cache = cacheManager.getCache("orders");
for (String orderId : orderIds) {
OrderDTO order = cache.get(orderId, OrderDTO.class);
if (order != null) {
result.put(orderId, order);
} else {
uncachedIds.add(orderId);
}
}
// 2. 批量查询未命中的
if (!uncachedIds.isEmpty()) {
List<OrderDTO> dbOrders = orderMapper.selectBatchIds(uncachedIds);
for (OrderDTO order : dbOrders) {
result.put(order.getOrderId(), order);
cache.put(order.getOrderId(), order);
}
}
return result;
}
}
4. 数据删除优化
软删除 + 异步清理
// 运单删除服务实现
@Service
public class OrderDeleteServiceImpl implements OrderDeleteService {
@Autowired
private OrderMapper orderMapper;
@Autowired
private CacheManager cacheManager;
@Autowired
private RedissonClient redissonClient;
@Autowired
private ThreadPoolTaskExecutor taskExecutor;
@Override
@Transactional
public void deleteOrder(String orderId) {
// 1. 先标记为删除状态(软删除)
orderMapper.logicalDelete(orderId);
// 2. 异步处理后续清理工作
taskExecutor.execute(() -> {
// 使用分布式锁
RLock lock = redissonClient.getLock("order:delete:lock:" + orderId);
try {
lock.lock(10, TimeUnit.SECONDS);
// 从缓存中移除
Cache cache = cacheManager.getCache("orders");
cache.evict(orderId);
// 异步归档到历史表
orderMapper.archiveOrder(orderId);
// 最终从主表删除
orderMapper.physicalDelete(orderId);
} finally {
lock.unlock();
}
});
}
}
5. 大数据量查询优化
使用Elasticsearch实现复杂查询
// 集成Elasticsearch实现复杂查询
@Service
public class OrderSearchServiceImpl implements OrderSearchService {
@Autowired
private RestHighLevelClient esClient;
@Override
public List<OrderDTO> searchOrders(OrderSearchCondition condition) {
SearchRequest searchRequest = new SearchRequest("order_index");
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
// 构建查询条件
BoolQueryBuilder boolQuery = QueryBuilders.boolQuery();
if (StringUtils.isNotBlank(condition.getKeyword())) {
boolQuery.must(QueryBuilders.multiMatchQuery(condition.getKeyword(),
"order_no", "receiver_name", "receiver_address"));
}
if (condition.getStartTime() != null) {
boolQuery.filter(QueryBuilders.rangeQuery("create_time")
.gte(condition.getStartTime()));
}
// 其他条件...
sourceBuilder.query(boolQuery)
.from((condition.getPage() - 1) * condition.getSize())
.size(condition.getSize())
.sort("create_time", SortOrder.DESC);
searchRequest.source(sourceBuilder);
try {
SearchResponse response = esClient.search(searchRequest, RequestOptions.DEFAULT);
return Arrays.stream(response.getHits().getHits())
.map(hit -> JSON.parseObject(hit.getSourceAsString(), OrderDTO.class))
.collect(Collectors.toList());
} catch (IOException e) {
throw new RuntimeException("ES查询失败", e);
}
}
}
6. 数据同步方案
基于Canal的增量同步
// Canal客户端实现MySQL到ES的增量同步
@Component
public class CanalClient implements InitializingBean {
@Autowired
private RestHighLevelClient esClient;
@Override
public void afterPropertiesSet() throws Exception {
CanalConnector connector = CanalConnectors.newClusterConnector(
"canal-server:11111", "destination", "", "");
new Thread(() -> {
connector.connect();
connector.subscribe(".*\\..*");
while (true) {
Message message = connector.getWithoutAck(100);
long batchId = message.getId();
if (batchId != -1) {
try {
List<CanalEntry.Entry> entries = message.getEntries();
for (CanalEntry.Entry entry : entries) {
if (entry.getEntryType() == CanalEntry.EntryType.ROWDATA) {
processEntry(entry);
}
}
connector.ack(batchId);
} catch (Exception e) {
connector.rollback(batchId);
}
}
}
}).start();
}
private void processEntry(CanalEntry.Entry entry) {
CanalEntry.RowChange rowChange;
try {
rowChange = CanalEntry.RowChange.parseFrom(entry.getStoreValue());
} catch (Exception e) {
throw new RuntimeException("解析binlog失败", e);
}
if (rowChange.getEventType() == CanalEntry.EventType.DELETE) {
// 处理删除事件
for (CanalEntry.RowData rowData : rowChange.getRowDatasList()) {
String orderId = getColumnValue(rowData.getBeforeColumnsList(), "order_id");
deleteFromES(orderId);
}
} else if (rowChange.getEventType() == CanalEntry.EventType.INSERT ||
rowChange.getEventType() == CanalEntry.EventType.UPDATE) {
// 处理插入和更新事件
for (CanalEntry.RowData rowData : rowChange.getRowDatasList()) {
OrderDTO order = convertToOrder(rowData.getAfterColumnsList());
indexToES(order);
}
}
}
private void indexToES(OrderDTO order) {
IndexRequest request = new IndexRequest("order_index")
.id(order.getOrderId())
.source(JSON.toJSONString(order), XContentType.JSON);
try {
esClient.index(request, RequestOptions.DEFAULT);
} catch (IOException e) {
throw new RuntimeException("ES索引失败", e);
}
}
private void deleteFromES(String orderId) {
DeleteRequest request = new DeleteRequest("order_index").id(orderId);
try {
esClient.delete(request, RequestOptions.DEFAULT);
} catch (IOException e) {
throw new RuntimeException("ES删除失败", e);
}
}
}
7. 性能优化补充
- 冷热数据分离:
○ 最近3个月数据存MySQL (热数据)
○ 3-12个月数据存TiDB (温数据)
○ 超过1年数据归档到HBase (冷数据) - 读写分离:
// 使用ShardingSphere配置读写分离
ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
MasterSlaveRuleConfiguration masterSlaveRuleConfig = new MasterSlaveRuleConfiguration(
“ds_master_slave”, “ds_master”, Arrays.asList(“ds_slave_0”, “ds_slave_1”));
shardingRuleConfig.getMasterSlaveRules().add(masterSlaveRuleConfig); - 连接池优化:
// HikariCP配置优化
HikariConfig config = new HikariConfig();
config.setMaximumPoolSize(50); // 根据实际负载调整
config.setMinimumIdle(10);
config.setIdleTimeout(600000);
config.setConnectionTimeout(30000);
config.setMaxLifetime(1800000); - JVM优化:
-Xms4g -Xmx4g -XX:+UseG1GC -XX:MaxGCPauseMillis=200
-XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent
总结
这套方案通过以下方式实现几十亿运单数据的秒级响应: - 分库分表:水平拆分数据,降低单表压力
- 多级缓存:本地缓存+分布式缓存减少数据库访问
- 读写分离:提高查询吞吐量
- 搜索引擎:Elasticsearch支持复杂查询
- 异步处理:非核心操作异步化
- 冷热分离:不同存储介质存放不同热度的数据
- 软删除+异步清理:避免删除操作阻塞主流程
实际实施时需要根据具体业务场景和数据特点进行调整,并做好性能测试和监控。
5121

被折叠的 条评论
为什么被折叠?



