架构模式实现示例:分与合的具体实践

架构模式实现示例:分与合的具体实践

概述

本文档提供架构设计中"分"与"合"原则的具体实现示例,包括代码片段、配置示例和实际应用场景。

分的设计模式实现

1. 分层架构实现示例

Spring Boot 分层架构
// 表示层 - Controller
@RestController
@RequestMapping("/api/users")
public class UserController {
    @Autowired
    private UserService userService;
    
    @GetMapping("/{id}")
    public ResponseEntity<UserDTO> getUser(@PathVariable Long id) {
        UserDTO user = userService.findById(id);
        return ResponseEntity.ok(user);
    }
}

// 业务层 - Service
@Service
public class UserService {
    @Autowired
    private UserRepository userRepository;
    @Autowired
    private UserMapper userMapper;
    
    public UserDTO findById(Long id) {
        User user = userRepository.findById(id)
            .orElseThrow(() -> new UserNotFoundException(id));
        return userMapper.toDTO(user);
    }
}

// 数据访问层 - Repository
@Repository
public interface UserRepository extends JpaRepository<User, Long> {
    Optional<User> findByEmail(String email);
    List<User> findByStatusAndCreatedAtBetween(
        UserStatus status, LocalDateTime start, LocalDateTime end
    );
}

// 领域模型
@Entity
@Table(name = "users")
public class User {
    @Id
    @GeneratedValue(strategy = GenerationType.IDENTITY)
    private Long id;
    
    @Column(unique = true)
    private String email;
    
    @Enumerated(EnumType.STRING)
    private UserStatus status;
    
    private LocalDateTime createdAt;
    private LocalDateTime updatedAt;
    
    // 业务逻辑方法
    public void activate() {
        if (this.status != UserStatus.PENDING) {
            throw new IllegalStateException("User cannot be activated");
        }
        this.status = UserStatus.ACTIVE;
        this.updatedAt = LocalDateTime.now();
    }
}
分层配置示例
# application-layered.yml
server:
  port: 8080

# 数据源配置
spring:
  datasource:
    master:
      url: jdbc:mysql://master.db:3306/app
      username: ${DB_USERNAME}
      password: ${DB_PASSWORD}
    slave:
      url: jdbc:mysql://slave.db:3306/app
      username: ${DB_USERNAME}
      password: ${DB_PASSWORD}
  
  # 缓存配置
  redis:
    cluster:
      nodes:
        - redis1:6379
        - redis2:6379
        - redis3:6379
    timeout: 2000ms

# 分层缓存配置
cache:
  layers:
    l1: # 本地缓存
      type: caffeine
      maximum-size: 10000
      expire-after-write: 5m
    l2: # 分布式缓存
      type: redis
      ttl: 30m
    l3: # 数据库查询缓存
      type: mysql
      ttl: 1h

# 线程池配置(按层分离)
thread-pool:
  controller:
    core-size: 20
    max-size: 100
    queue-capacity: 1000
  service:
    core-size: 30
    max-size: 150
    queue-capacity: 2000
  repository:
    core-size: 40
    max-size: 200
    queue-capacity: 3000

2. 微服务架构实现示例

服务拆分策略
// 用户服务 - 独立部署
@SpringBootApplication
@EnableDiscoveryClient
public class UserServiceApplication {
    public static void main(String[] args) {
        SpringApplication.run(UserServiceApplication.class, args);
    }
}

@RestController
@RequestMapping("/users")
public class UserServiceController {
    
    @Autowired
    private UserApplicationService userService;
    
    @PostMapping
    public ResponseEntity<UserDTO> createUser(@RequestBody CreateUserRequest request) {
        UserDTO user = userService.createUser(request);
        return ResponseEntity.status(HttpStatus.CREATED).body(user);
    }
}

// 订单服务 - 独立部署
@SpringBootApplication
@EnableDiscoveryClient
public class OrderServiceApplication {
    public static void main(String[] args) {
        SpringApplication.run(OrderServiceApplication.class, args);
    }
}

@RestController
@RequestMapping("/orders")
public class OrderServiceController {
    
    @Autowired
    private OrderApplicationService orderService;
    
    @PostMapping
    public ResponseEntity<OrderDTO> createOrder(@RequestBody CreateOrderRequest request) {
        OrderDTO order = orderService.createOrder(request);
        return ResponseEntity.status(HttpStatus.CREATED).body(order);
    }
}
服务间通信 - Feign客户端
// 用户服务Feign客户端
@FeignClient(name = "user-service", fallback = UserServiceFallback.class)
public interface UserServiceClient {
    
    @GetMapping("/users/{id}")
    ResponseEntity<UserDTO> getUser(@PathVariable("id") Long userId);
    
    @PostMapping("/users/{id}/credit")
    ResponseEntity<Void> deductCredit(@PathVariable("id") Long userId, 
                                     @RequestBody DeductCreditRequest request);
}

// 订单服务调用用户服务
@Service
public class OrderService {
    
    @Autowired
    private UserServiceClient userServiceClient;
    
    @Autowired
    private OrderRepository orderRepository;
    
    @Transactional
    public OrderDTO createOrder(CreateOrderRequest request) {
        // 1. 验证用户
        UserDTO user = userServiceClient.getUser(request.getUserId()).getBody();
        if (user == null) {
            throw new BusinessException("用户不存在");
        }
        
        // 2. 扣减积分
        userServiceClient.deductCredit(request.getUserId(), 
            new DeductCreditRequest(request.getTotalAmount()));
        
        // 3. 创建订单
        Order order = Order.create(request);
        orderRepository.save(order);
        
        return OrderMapper.toDTO(order);
    }
}
分布式事务 - Saga模式
// Saga编排器
@Component
public class OrderSagaOrchestrator {
    
    @Autowired
    private UserServiceClient userServiceClient;
    
    @Autowired
    private InventoryServiceClient inventoryServiceClient;
    
    @Autowired
    private PaymentServiceClient paymentServiceClient;
    
    @EventListener
    public void handleOrderCreated(OrderCreatedEvent event) {
        SagaTransaction saga = SagaTransaction.builder()
            .withStep("reserveInventory", this::reserveInventory)
            .withStep("deductUserCredit", this::deductUserCredit)
            .withStep("processPayment", this::processPayment)
            .withCompensation("cancelPayment", this::cancelPayment)
            .withCompensation("restoreUserCredit", this::restoreUserCredit)
            .withCompensation("releaseInventory", this::releaseInventory)
            .build();
            
        saga.execute(event.getOrderId());
    }
    
    private boolean reserveInventory(Long orderId) {
        return inventoryServiceClient.reserveInventory(orderId).isSuccess();
    }
    
    private boolean deductUserCredit(Long orderId) {
        return userServiceClient.deductCredit(orderId).isSuccess();
    }
    
    private void cancelPayment(Long orderId) {
        paymentServiceClient.refund(orderId);
    }
}

3. 分库分表实现示例

ShardingSphere配置
# sharding-sphere.yml
spring:
  shardingsphere:
    datasource:
      names: ds0,ds1,ds2,ds3
      ds0:
        type: com.zaxxer.hikari.HikariDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        jdbc-url: jdbc:mysql://db0:3306/order_0
        username: ${DB_USERNAME}
        password: ${DB_PASSWORD}
      ds1:
        type: com.zaxxer.hikari.HikariDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        jdbc-url: jdbc:mysql://db1:3306/order_1
        username: ${DB_USERNAME}
        password: ${DB_PASSWORD}
      ds2:
        type: com.zaxxer.hikari.HikariDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        jdbc-url: jdbc:mysql://db2:3306/order_2
        username: ${DB_USERNAME}
        password: ${DB_PASSWORD}
      ds3:
        type: com.zaxxer.hikari.HikariDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        jdbc-url: jdbc:mysql://db3:3306/order_3
        username: ${DB_USERNAME}
        password: ${DB_PASSWORD}
    
    sharding:
      tables:
        t_order:
          actual-data-nodes: ds$->{0..3}.t_order_$->{0..7}
          table-strategy:
            inline:
              sharding-column: order_id
              algorithm-expression: t_order_$->{order_id % 8}
          key-generator:
            column: order_id
            type: SNOWFLAKE
            props:
              worker-id: 1
        t_order_item:
          actual-data-nodes: ds$->{0..3}.t_order_item_$->{0..7}
          table-strategy:
            inline:
              sharding-column: order_id
              algorithm-expression: t_order_item_$->{order_id % 8}
    
    binding-tables:
      - t_order,t_order_item
    
    broadcast-tables:
      - t_config
      - t_dictionary
分片路由实现
// 分片策略实现
@Component
public class UserShardingStrategy implements StandardShardingAlgorithm<Long> {
    
    @Override
    public String doSharding(Collection<String> availableTargetNames, 
                           PreciseShardingValue<Long> shardingValue) {
        Long userId = shardingValue.getValue();
        
        // 根据用户ID计算分片
        int shardIndex = (int) (userId % 4);
        String targetDataSource = "ds" + shardIndex;
        
        if (availableTargetNames.contains(targetDataSource)) {
            return targetDataSource;
        }
        
        throw new UnsupportedOperationException("No available datasource found");
    }
    
    @Override
    public Collection<String> doSharding(Collection<String> availableTargetNames,
                                       RangeShardingValue<Long> shardingValue) {
        Range<Long> valueRange = shardingValue.getValueRange();
        
        // 范围查询需要路由到多个分片
        Set<String> result = new HashSet<>();
        for (long userId = valueRange.lowerEndpoint(); 
             userId <= valueRange.upperEndpoint(); userId++) {
            int shardIndex = (int) (userId % 4);
            result.add("ds" + shardIndex);
        }
        
        return result;
    }
}

// 分片查询服务
@Service
public class ShardedOrderService {
    
    @Autowired
    private OrderRepository orderRepository;
    
    public PageResult<OrderDTO> queryOrders(OrderQueryRequest request) {
        // 1. 解析查询条件,确定需要访问的分片
        Set<String> targetShards = determineTargetShards(request);
        
        // 2. 并行查询各个分片
        List<CompletableFuture<List<Order>>> futures = targetShards.stream()
            .map(shard -> CompletableFuture.supplyAsync(() -> 
                queryShard(shard, request), ExecutorHolder.getQueryExecutor()))
            .collect(Collectors.toList());
        
        // 3. 合并结果
        List<Order> allOrders = futures.stream()
            .map(CompletableFuture::join)
            .flatMap(List::stream)
            .sorted(Comparator.comparing(Order::getCreatedAt).reversed())
            .collect(Collectors.toList());
        
        // 4. 分页处理
        return PageResult.of(allOrders, request.getPageNo(), request.getPageSize());
    }
}

4. 多级缓存实现示例

本地缓存 + Redis缓存
// 多级缓存管理器
@Component
public class MultiLevelCacheManager {
    
    private final LoadingCache<String, Object> localCache;
    private final RedisTemplate<String, Object> redisTemplate;
    
    public MultiLevelCacheManager(RedisTemplate<String, Object> redisTemplate) {
        this.redisTemplate = redisTemplate;
        this.localCache = Caffeine.newBuilder()
            .maximumSize(10000)
            .expireAfterWrite(5, TimeUnit.MINUTES)
            .recordStats()
            .build(this::loadFromRedis);
    }
    
    public <T> T get(String key, Class<T> type) {
        // 1. 本地缓存查找
        Object value = localCache.getIfPresent(key);
        if (value != null) {
            return type.cast(value);
        }
        
        // 2. Redis缓存查找
        value = redisTemplate.opsForValue().get(key);
        if (value != null) {
            localCache.put(key, value);
            return type.cast(value);
        }
        
        return null;
    }
    
    public void put(String key, Object value, long timeout, TimeUnit unit) {
        // 1. 写入Redis
        redisTemplate.opsForValue().set(key, value, timeout, unit);
        
        // 2. 写入本地缓存
        localCache.put(key, value);
    }
    
    public void evict(String key) {
        // 1. 清除本地缓存
        localCache.invalidate(key);
        
        // 2. 清除Redis缓存
        redisTemplate.delete(key);
    }
    
    private Object loadFromRedis(String key) {
        return redisTemplate.opsForValue().get(key);
    }
}

// 缓存注解实现
@Aspect
@Component
public class CacheAspect {
    
    @Autowired
    private MultiLevelCacheManager cacheManager;
    
    @Around("@annotation(multiLevelCache)")
    public Object handleCache(ProceedingJoinPoint point, MultiLevelCache multiLevelCache) throws Throwable {
        String key = generateKey(point, multiLevelCache.key());
        
        // 查询缓存
        Object value = cacheManager.get(key, multiLevelCache.returnType());
        if (value != null) {
            return value;
        }
        
        // 执行方法
        value = point.proceed();
        
        // 写入缓存
        if (value != null) {
            cacheManager.put(key, value, multiLevelCache.timeout(), multiLevelCache.unit());
        }
        
        return value;
    }
    
    private String generateKey(ProceedingJoinPoint point, String keyExpression) {
        // 解析SpEL表达式生成缓存key
        return keyExpression;
    }
}

合的设计模式实现

1. 批量处理实现示例

批量消息处理器
// 批量处理器
@Component
public class BatchMessageProcessor {
    
    private final BlockingQueue<Message> messageQueue = new LinkedBlockingQueue<>(10000);
    private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(2);
    
    @PostConstruct
    public void init() {
        // 定时批处理任务
        scheduler.scheduleWithFixedDelay(this::processBatch, 
            0, 100, TimeUnit.MILLISECONDS);
        
        // 定时清理任务
        scheduler.scheduleWithFixedDelay(this::cleanup, 
            0, 1, TimeUnit.MINUTES);
    }
    
    public void addMessage(Message message) {
        try {
            messageQueue.offer(message, 1, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new RuntimeException("Failed to add message", e);
        }
    }
    
    private void processBatch() {
        List<Message> batch = new ArrayList<>();
        
        // 收集批量消息
        messageQueue.drainTo(batch, 100);
        if (batch.isEmpty()) {
            return;
        }
        
        try {
            // 批量处理
            processMessages(batch);
        } catch (Exception e) {
            log.error("Failed to process batch", e);
            // 失败的消息重新加入队列
            batch.forEach(this::addMessage);
        }
    }
    
    @Transactional
    private void processMessages(List<Message> messages) {
        // 1. 数据验证
        List<Message> validMessages = messages.stream()
            .filter(this::validateMessage)
            .collect(Collectors.toList());
        
        // 2. 批量插入数据库
        if (!validMessages.isEmpty()) {
            messageRepository.batchInsert(validMessages);
        }
        
        // 3. 批量发送通知
        List<Notification> notifications = validMessages.stream()
            .map(this::createNotification)
            .collect(Collectors.toList());
        
        if (!notifications.isEmpty()) {
            notificationService.batchSend(notifications);
        }
        
        // 4. 批量更新统计
        updateStatistics(validMessages);
    }
    
    private void cleanup() {
        // 清理过期消息
        messageRepository.deleteExpiredMessages();
    }
}

// 批量数据库操作
@Repository
public class MessageRepository {
    
    @Autowired
    private SqlSessionTemplate sqlSessionTemplate;
    
    public void batchInsert(List<Message> messages) {
        SqlSession session = sqlSessionTemplate.getSqlSessionFactory()
            .openSession(ExecutorType.BATCH, false);
        
        try {
            MessageMapper mapper = session.getMapper(MessageMapper.class);
            
            for (int i = 0; i < messages.size(); i++) {
                mapper.insert(messages.get(i));
                
                // 每100条提交一次
                if (i % 100 == 0) {
                    session.flushStatements();
                }
            }
            
            session.commit();
        } catch (Exception e) {
            session.rollback();
            throw new RuntimeException("Batch insert failed", e);
        } finally {
            session.close();
        }
    }
}
批量API调用优化
// 批量API客户端
@Component
public class BatchApiClient {
    
    private final RestTemplate restTemplate;
    private final ExecutorService executorService;
    
    public BatchApiClient() {
        this.restTemplate = new RestTemplate();
        this.executorService = Executors.newFixedThreadPool(10);
    }
    
    // 批量获取用户信息
    public List<UserDTO> batchGetUsers(List<Long> userIds) {
        if (userIds.isEmpty()) {
            return Collections.emptyList();
        }
        
        // 将大量ID分组
        List<List<Long>> batches = Lists.partition(userIds, 100);
        
        // 并发处理多个批次
        List<CompletableFuture<List<UserDTO>>> futures = batches.stream()
            .map(batch -> CompletableFuture.supplyAsync(
                () -> getUsersBatch(batch), executorService))
            .collect(Collectors.toList());
        
        // 合并结果
        return futures.stream()
            .map(CompletableFuture::join)
            .flatMap(List::stream)
            .collect(Collectors.toList());
    }
    
    private List<UserDTO> getUsersBatch(List<Long> userIds) {
        String url = "http://user-service/api/users/batch?ids=" + 
                    userIds.stream().map(String::valueOf).collect(Collectors.joining(","));
        
        ResponseEntity<List<UserDTO>> response = restTemplate.exchange(
            url, HttpMethod.GET, null, new ParameterizedTypeReference<List<UserDTO>>() {});
        
        return response.getBody();
    }
    
    // 批量更新操作
    public void batchUpdateUsers(List<UserUpdateRequest> updates) {
        List<List<UserUpdateRequest>> batches = Lists.partition(updates, 50);
        
        batches.parallelStream().forEach(batch -> {
            String url = "http://user-service/api/users/batch";
            restTemplate.put(url, batch);
        });
    }
}

2. 事件驱动架构实现示例

Spring Cloud Stream事件总线
# application-event.yml
spring:
  cloud:
    stream:
      bindings:
        order-output:
          destination: order-events
          content-type: application/json
        user-output:
          destination: user-events
          content-type: application/json
        inventory-input:
          destination: order-events
          group: inventory-service
          consumer:
            max-attempts: 3
            back-off-initial-interval: 1000
        notification-input:
          destination: user-events
          group: notification-service
        analytics-input:
          destination: order-events
          group: analytics-service
            concurrency: 5
      
      kafka:
        binder:
          brokers: kafka1:9092,kafka2:9092,kafka3:9092
          configuration:
            acks: all
            retries: 3
            batch.size: 16384
            linger.ms: 10
事件发布与消费
// 事件发布者
@Component
public class EventPublisher {
    
    @Autowired
    private StreamBridge streamBridge;
    
    public void publishOrderCreated(Order order) {
        OrderCreatedEvent event = OrderCreatedEvent.builder()
            .orderId(order.getId())
            .userId(order.getUserId())
            .totalAmount(order.getTotalAmount())
            .items(order.getItems())
            .timestamp(Instant.now())
            .build();
        
        boolean sent = streamBridge.send("order-output", event);
        
        if (!sent) {
            // 处理发送失败
            handleSendFailure(event);
        }
    }
    
    public void publishUserRegistered(User user) {
        UserRegisteredEvent event = UserRegisteredEvent.builder()
            .userId(user.getId())
            .email(user.getEmail())
            .registrationTime(user.getCreatedAt())
            .build();
        
        streamBridge.send("user-output", event);
    }
    
    private void handleSendFailure(Event event) {
        // 记录失败事件,等待重试
        failedEventRepository.save(new FailedEvent(event));
    }
}

// 事件消费者
@Component
public class InventoryEventHandler {
    
    @Autowired
    private InventoryService inventoryService;
    
    @StreamListener("inventory-input")
    public void handleOrderCreated(OrderCreatedEvent event) {
        log.info("Handling order created event: {}", event.getOrderId());
        
        try {
            // 1. 预留库存
            inventoryService.reserveInventory(
                event.getOrderId(), 
                event.getItems()
            );
            
            // 2. 发布库存预留成功事件
            publishInventoryReservedEvent(event);
            
        } catch (InsufficientInventoryException e) {
            // 3. 发布库存不足事件
            publishInventoryInsufficientEvent(event, e);
            
            // 4. 抛出异常触发重试
            throw new InventoryReservationException(event.getOrderId(), e);
        }
    }
    
    private void publishInventoryReservedEvent(OrderCreatedEvent event) {
        InventoryReservedEvent reservedEvent = InventoryReservedEvent.builder()
            .orderId(event.getOrderId())
            .items(event.getItems())
            .timestamp(Instant.now())
            .build();
        
        streamBridge.send("inventory-reserved-output", reservedEvent);
    }
}

// 事件溯源存储
@Entity
@Table(name = "domain_events")
public class DomainEvent {
    
    @Id
    private String eventId;
    
    @Column(nullable = false)
    private String aggregateId;
    
    @Column(nullable = false)
    private String eventType;
    
    @Column(columnDefinition = "TEXT")
    private String eventData;
    
    @Column(nullable = false)
    private Instant createdAt;
    
    @Version
    private Long version;
}

@Repository
public interface DomainEventRepository extends JpaRepository<DomainEvent, String> {
    
    List<DomainEvent> findByAggregateIdOrderByCreatedAt(String aggregateId);
    
    List<DomainEvent> findByEventTypeAndCreatedAtBetween(
        String eventType, Instant start, Instant end
    );
}

3. 共享服务架构实现示例

统一认证服务
// 统一认证服务
@RestController
@RequestMapping("/auth")
public class AuthenticationController {
    
    @Autowired
    private AuthenticationService authService;
    
    @PostMapping("/login")
    public ResponseEntity<AuthResponse> login(@RequestBody LoginRequest request) {
        AuthResult result = authService.authenticate(
            request.getUsername(), 
            request.getPassword()
        );
        
        if (result.isSuccess()) {
            return ResponseEntity.ok(AuthResponse.builder()
                .token(result.getToken())
                .refreshToken(result.getRefreshToken())
                .expiresIn(result.getExpiresIn())
                .userInfo(result.getUserInfo())
                .build());
        }
        
        throw new AuthenticationException("Invalid credentials");
    }
    
    @PostMapping("/validate")
    public ResponseEntity<ValidationResponse> validateToken(
            @RequestHeader("Authorization") String token) {
        
        ValidationResult result = authService.validateToken(
            token.replace("Bearer ", "")
        );
        
        return ResponseEntity.ok(ValidationResponse.builder()
            .valid(result.isValid())
            .userId(result.getUserId())
            .authorities(result.getAuthorities())
            .build());
    }
    
    @PostMapping("/refresh")
    public ResponseEntity<AuthResponse> refreshToken(
            @RequestBody RefreshTokenRequest request) {
        
        AuthResult result = authService.refreshToken(request.getRefreshToken());
        
        return ResponseEntity.ok(AuthResponse.builder()
            .token(result.getToken())
            .refreshToken(result.getRefreshToken())
            .expiresIn(result.getExpiresIn())
            .build());
    }
}

// 认证客户端
@Component
public class AuthenticationClient {
    
    @Autowired
    private RestTemplate restTemplate;
    
    @Value("${auth.service.url}")
    private String authServiceUrl;
    
    public UserInfo validateToken(String token) {
        try {
            HttpHeaders headers = new HttpHeaders();
            headers.set("Authorization", "Bearer " + token);
            
            HttpEntity<Void> entity = new HttpEntity<>(headers);
            
            ResponseEntity<ValidationResponse> response = restTemplate.exchange(
                authServiceUrl + "/auth/validate",
                HttpMethod.POST,
                entity,
                ValidationResponse.class
            );
            
            ValidationResponse validation = response.getBody();
            if (validation != null && validation.isValid()) {
                return UserInfo.builder()
                    .userId(validation.getUserId())
                    .authorities(validation.getAuthorities())
                    .build();
            }
        } catch (Exception e) {
            log.error("Token validation failed", e);
        }
        
        throw new InvalidTokenException("Invalid or expired token");
    }
}

// 认证拦截器
@Component
public class AuthenticationInterceptor implements HandlerInterceptor {
    
    @Autowired
    private AuthenticationClient authClient;
    
    @Override
    public boolean preHandle(HttpServletRequest request, 
                           HttpServletResponse response, 
                           Object handler) throws Exception {
        
        String token = extractToken(request);
        if (token == null) {
            response.setStatus(HttpStatus.UNAUTHORIZED.value());
            return false;
        }
        
        try {
            UserInfo userInfo = authClient.validateToken(token);
            request.setAttribute("currentUser", userInfo);
            return true;
        } catch (InvalidTokenException e) {
            response.setStatus(HttpStatus.UNAUTHORIZED.value());
            return false;
        }
    }
    
    private String extractToken(HttpServletRequest request) {
        String bearerToken = request.getHeader("Authorization");
        if (bearerToken != null && bearerToken.startsWith("Bearer ")) {
            return bearerToken.substring(7);
        }
        return null;
    }
}
统一配置中心
// 配置管理服务
@RestController
@RequestMapping("/config")
public class ConfigurationController {
    
    @Autowired
    private ConfigurationService configService;
    
    @GetMapping("/{application}/{profile}")
    public ResponseEntity<Configuration> getConfiguration(
            @PathVariable String application,
            @PathVariable String profile,
            @RequestParam(required = false) String label) {
        
        Configuration config = configService.getConfiguration(application, profile, label);
        return ResponseEntity.ok(config);
    }
    
    @PostMapping("/{application}/{profile}")
    public ResponseEntity<Configuration> updateConfiguration(
            @PathVariable String application,
            @PathVariable String profile,
            @RequestBody ConfigurationUpdateRequest request) {
        
        Configuration updated = configService.updateConfiguration(
            application, profile, request
        );
        
        // 发布配置变更事件
        eventPublisher.publishEvent(new ConfigurationChangedEvent(application, profile));
        
        return ResponseEntity.ok(updated);
    }
    
    @GetMapping("/{application}/{profile}/refresh")
    public ResponseEntity<Void> refreshConfiguration(
            @PathVariable String application,
            @PathVariable String profile) {
        
        configService.refreshConfiguration(application, profile);
        return ResponseEntity.ok().build();
    }
}

// 配置客户端
@Component
@ConfigurationProperties(prefix = "app.config")
public class ConfigurationClient {
    
    private final RestTemplate restTemplate = new RestTemplate();
    private final Map<String, Object> localCache = new ConcurrentHashMap<>();
    
    @Value("${config.server.url}")
    private String configServerUrl;
    
    @Value("${spring.application.name}")
    private String applicationName;
    
    @Value("${spring.profiles.active}")
    private String activeProfile;
    
    @PostConstruct
    public void init() {
        // 初始化时加载配置
        refreshConfiguration();
        
        // 定时刷新配置
        Executors.newSingleThreadScheduledExecutor().scheduleWithFixedDelay(
            this::refreshConfiguration, 30, 30, TimeUnit.SECONDS
        );
    }
    
    public <T> T getProperty(String key, Class<T> targetType) {
        Object value = localCache.get(key);
        if (value != null) {
            return convertValue(value, targetType);
        }
        
        // 缓存未命中,从配置中心获取
        return fetchFromConfigServer(key, targetType);
    }
    
    private void refreshConfiguration() {
        try {
            String url = String.format("%s/config/%s/%s", 
                configServerUrl, applicationName, activeProfile);
            
            ResponseEntity<Map> response = restTemplate.getForEntity(url, Map.class);
            Map<String, Object> config = response.getBody();
            
            if (config != null) {
                localCache.clear();
                localCache.putAll(config);
                log.info("Configuration refreshed successfully");
            }
        } catch (Exception e) {
            log.error("Failed to refresh configuration", e);
        }
    }
}

4. 数据湖实现示例

数据摄取服务
// 数据摄取服务
@Service
public class DataIngestionService {
    
    @Autowired
    private KafkaTemplate<String, Object> kafkaTemplate;
    
    @Autowired
    private MinioClient minioClient;
    
    @Autowired
    private SparkSession sparkSession;
    
    // 批量数据摄取
    public void ingestBatchData(String sourcePath, String destinationBucket) {
        try {
            // 1. 读取源数据
            Dataset<Row> rawData = sparkSession.read()
                .option("header", "true")
                .option("inferSchema", "true")
                .csv(sourcePath);
            
            // 2. 数据验证和清洗
            Dataset<Row> cleanedData = rawData.filter(row -> 
                validateRow(row)
            ).map(this::cleanRow, RowEncoder.apply(getSchema()));
            
            // 3. 分区存储
            String partitionPath = String.format("s3a://%s/raw/%s/", 
                destinationBucket, getCurrentDatePartition());
            
            cleanedData.write()
                .mode(SaveMode.Append)
                .partitionBy("year", "month", "day")
                .parquet(partitionPath);
            
            // 4. 记录元数据
            recordIngestionMetadata(sourcePath, destinationBucket, 
                cleanedData.count());
            
        } catch (Exception e) {
            log.error("Batch ingestion failed", e);
            throw new DataIngestionException("Failed to ingest data", e);
        }
    }
    
    // 实时数据摄取
    @KafkaListener(topics = "real-time-events", groupId = "data-ingestion")
    public void ingestRealTimeData(ConsumerRecord<String, Object> record) {
        try {
            // 1. 解析事件
            DataEvent event = objectMapper.convertValue(record.value(), DataEvent.class);
            
            // 2. 验证数据
            if (!validateEvent(event)) {
                log.warn("Invalid event received: {}", event);
                return;
            }
            
            // 3. 存储到数据湖
            String path = String.format("s3a://data-lake/streaming/%s/%s/", 
                event.getEventType(), getCurrentHourPartition());
            
            // 4. 写入Parquet文件
            writeToParquet(event, path);
            
            // 5. 更新实时索引
            updateRealTimeIndex(event);
            
        } catch (Exception e) {
            log.error("Real-time ingestion failed", e);
            // 发送到死信队列
            kafkaTemplate.send("dlq-data-ingestion", record.value());
        }
    }
}

// 数据处理服务
@Service
public class DataProcessingService {
    
    @Autowired
    private SparkSession sparkSession;
    
    // 批处理作业
    public void runBatchProcessingJob() {
        try {
            // 1. 读取原始数据
            Dataset<Row> rawData = sparkSession.read()
                .parquet("s3a://data-lake/raw/*");
            
            // 2. 数据转换
            Dataset<Row> processedData = rawData
                .withColumn("processed_date", current_date())
                .withColumn("data_quality_score", calculateQualityScore(col("data")))
                .filter(col("data_quality_score").gt(0.8));
            
            // 3. 数据聚合
            Dataset<Row> aggregatedData = processedData
                .groupBy("category", "date")
                .agg(
                    count("*").alias("record_count"),
                    avg("value").alias("avg_value"),
                    max("timestamp").alias("latest_timestamp")
                );
            
            // 4. 存储处理结果
            aggregatedData.write()
                .mode(SaveMode.Overwrite)
                .partitionBy("date")
                .parquet("s3a://data-lake/processed/aggregated/");
            
            // 5. 更新数据目录
            updateDataCatalog("processed_data", aggregatedData.schema());
            
        } catch (Exception e) {
            log.error("Batch processing failed", e);
            throw new DataProcessingException("Failed to process batch data", e);
        }
    }
    
    // 流处理作业
    public void runStreamProcessingJob() {
        try {
            // 1. 创建流式DataFrame
            Dataset<Row> streamingDF = sparkSession.readStream()
                .format("kafka")
                .option("kafka.bootstrap.servers", "kafka:9092")
                .option("subscribe", "real-time-events")
                .load();
            
            // 2. 解析和转换数据
            Dataset<Row> parsedDF = streamingDF
                .selectExpr("CAST(value AS STRING)")
                .select(from_json(col("value"), getEventSchema()).alias("event"))
                .select("event.*");
            
            // 3. 实时聚合
            Dataset<Row> aggregatedStream = parsedDF
                .withWatermark("timestamp", "10 minutes")
                .groupBy(
                    window(col("timestamp"), "5 minutes", "1 minute"),
                    col("eventType")
                )
                .agg(
                    count("*").alias("event_count"),
                    approx_count_distinct("userId").alias("unique_users")
                );
            
            // 4. 输出结果
            StreamingQuery query = aggregatedStream.writeStream()
                .outputMode("append")
                .format("parquet")
                .option("path", "s3a://data-lake/streaming/processed/")
                .option("checkpointLocation", "s3a://data-lake/checkpoints/streaming/")
                .trigger(Trigger.ProcessingTime("1 minute"))
                .start();
            
            query.awaitTermination();
            
        } catch (Exception e) {
            log.error("Stream processing failed", e);
            throw new DataProcessingException("Failed to process stream data", e);
        }
    }
}

性能优化最佳实践

1. 分场景优化策略

// 连接池优化
@Configuration
public class ConnectionPoolConfig {
    
    @Bean
    public DataSource dataSource() {
        HikariConfig config = new HikariConfig();
        
        // 根据分片数量调整连接池大小
        config.setMaximumPoolSize(20); // 每个分片20个连接
        config.setMinimumIdle(5);
        config.setConnectionTimeout(30000);
        config.setIdleTimeout(600000);
        config.setMaxLifetime(1800000);
        
        // 连接测试
        config.setConnectionTestQuery("SELECT 1");
        config.setTestWhileIdle(true);
        config.setTestOnBorrow(false);
        config.setTestOnReturn(false);
        
        return new HikariDataSource(config);
    }
}

// 线程池优化
@Configuration
public class ThreadPoolConfig {
    
    @Bean("queryThreadPool")
    public ExecutorService queryThreadPool() {
        ThreadPoolExecutor executor = new ThreadPoolExecutor(
            20, // 核心线程数
            100, // 最大线程数
            60L, TimeUnit.SECONDS,
            new LinkedBlockingQueue<>(1000),
            new ThreadFactoryBuilder()
                .setNameFormat("query-thread-%d")
                .setDaemon(true)
                .build(),
            new ThreadPoolExecutor.CallerRunsPolicy()
        );
        
        return executor;
    }
    
    @Bean("batchThreadPool")
    public ExecutorService batchThreadPool() {
        return new ThreadPoolExecutor(
            10,
            50,
            120L, TimeUnit.SECONDS,
            new LinkedBlockingQueue<>(5000),
            new ThreadFactoryBuilder()
                .setNameFormat("batch-thread-%d")
                .setDaemon(true)
                .build()
        );
    }
}

2. 合场景优化策略

// 批处理大小优化
@Component
public class BatchOptimizer {
    
    // 动态批处理大小
    public int calculateOptimalBatchSize(int availableMemory, int recordSize) {
        // 预留30%内存作为缓冲区
        int maxBatchSize = (int) ((availableMemory * 0.7) / recordSize);
        
        // 限制在合理范围内
        return Math.min(Math.max(maxBatchSize, 100), 10000);
    }
    
    // 自适应批处理
    public void adaptiveBatchProcessing(List<DataRecord> records) {
        int recordSize = estimateRecordSize(records.get(0));
        int availableMemory = getAvailableMemory();
        int batchSize = calculateOptimalBatchSize(availableMemory, recordSize);
        
        List<List<DataRecord>> batches = Lists.partition(records, batchSize);
        
        batches.parallelStream().forEach(batch -> {
            processBatchWithTimeout(batch, calculateTimeout(batch.size()));
        });
    }
    
    private long calculateTimeout(int batchSize) {
        // 基础超时时间 + 每记录处理时间
        return 5000 + (batchSize * 10L); // 5秒基础 + 每记录10ms
    }
}

// 缓存预热策略
@Component
public class CacheWarmupStrategy {
    
    @Autowired
    private RedisTemplate<String, Object> redisTemplate;
    
    @EventListener(ApplicationReadyEvent.class)
    public void warmupCache() {
        // 1. 识别热点数据
        List<String> hotKeys = identifyHotKeys();
        
        // 2. 并行加载热点数据
        hotKeys.parallelStream().forEach(this::loadAndCache);
        
        log.info("Cache warmup completed for {} keys", hotKeys.size());
    }
    
    // 智能缓存加载
    public void smartCacheLoading() {
        // 基于访问模式预测
        AccessPattern pattern = analyzeAccessPattern();
        
        if (pattern.isSequential()) {
            // 顺序加载
            preloadSequentialData(pattern);
        } else if (pattern.isRandom()) {
            // 随机加载热门数据
            preloadPopularData(pattern);
        } else if (pattern.isTemporal()) {
            // 时间局部性加载
            preloadTemporalData(pattern);
        }
    }
}

3. 监控与度量

// 性能指标收集
@Component
public class PerformanceMetricsCollector {
    
    private final MeterRegistry meterRegistry;
    private final Counter requestCounter;
    private final Timer responseTimer;
    private final Gauge activeConnections;
    
    public PerformanceMetricsCollector(MeterRegistry meterRegistry) {
        this.meterRegistry = meterRegistry;
        
        this.requestCounter = Counter.builder("api.requests.total")
            .description("Total number of API requests")
            .register(meterRegistry);
            
        this.responseTimer = Timer.builder("api.response.time")
            .description("API response time")
            .register(meterRegistry);
            
        this.activeConnections = Gauge.builder("db.connections.active")
            .description("Number of active database connections")
            .register(meterRegistry, this, 
                PerformanceMetricsCollector::getActiveConnectionCount);
    }
    
    @Around("@within(org.springframework.web.bind.annotation.RestController)")
    public Object measureApiPerformance(ProceedingJoinPoint joinPoint) throws Throwable {
        requestCounter.increment();
        
        return responseTimer.recordCallable(() -> {
            try {
                return joinPoint.proceed();
            } catch (Throwable t) {
                throw new RuntimeException(t);
            }
        });
    }
    
    private double getActiveConnectionCount() {
        // 返回当前活跃连接数
        return dataSource.getHikariPoolMXBean().getActiveConnections();
    }
}

// 架构健康检查
@Component
public class ArchitectureHealthIndicator implements HealthIndicator {
    
    @Autowired
    private DataSource dataSource;
    
    @Autowired
    private RedisTemplate<String, String> redisTemplate;
    
    @Override
    public Health health() {
        Health.Builder builder = Health.up();
        
        try {
            // 检查数据库连接
            checkDatabaseHealth(builder);
            
            // 检查Redis连接
            checkRedisHealth(builder);
            
            // 检查服务间调用
            checkServiceDependencies(builder);
            
            // 检查分片健康
            checkShardingHealth(builder);
            
        } catch (Exception e) {
            builder.down().withDetail("error", e.getMessage());
        }
        
        return builder.build();
    }
    
    private void checkDatabaseHealth(Health.Builder builder) {
        try (Connection connection = dataSource.getConnection()) {
            PreparedStatement stmt = connection.prepareStatement("SELECT 1");
            ResultSet rs = stmt.executeQuery();
            
            if (rs.next()) {
                builder.withDetail("database", "UP");
            } else {
                builder.down().withDetail("database", "DOWN");
            }
        } catch (SQLException e) {
            builder.down().withDetail("database", "ERROR: " + e.getMessage());
        }
    }
}

这些实现示例展示了如何在实际项目中应用"分"与"合"的架构原则,通过具体的代码和配置帮助理解这些模式的最佳实践。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值