<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:context="http://www.springframework.org/schema/context"
xmlns:aop="http://www.springframework.org/schema/aop"
xmlns:tx="http://www.springframework.org/schema/tx"
xsi:schemaLocation="
http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context.xsd
http://www.springframework.org/schema/aop
http://www.springframework.org/schema/aop/spring-aop.xsd
http://www.springframework.org/schema/jee
http://www.springframework.org/schema/jee/spring-jee.xsd
http://www.springframework.org/schema/tx
http://www.springframework.org/schema/tx/spring-tx.xsd">
<!-- 配置文件加载 -->
<context:property-placeholder location="classpath*:config.properties"/>
<!-- 启动对@Aspectj的支持 -->
<aop:aspectj-autoproxy proxy-target-class="true" />
<!-- 开启组件扫描 -->
<context:component-scan base-package="com.dji.pa">
<context:exclude-filter type="annotation" expression="org.springframework.stereotype.Controller"/>
</context:component-scan>
<!-- 数据库连接池配置 -->
<bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close" scope="prototype">
<property name="url" value="${jdbc_url}"/>
<property name="username" value="${jdbc_username}"/>
<property name="password" value="${jdbc_password}"/>
<property name="initialSize" value="10"/>
<property name="minIdle" value="10"/>
<property name="maxActive" value="20"/>
<!--配置获取连接等待超时的时间 -->
<property name="maxWait" value="2000"/>
<!-- 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒 -->
<property name="timeBetweenEvictionRunsMillis" value="60000"/>
<!-- 配置一个连接在池中最小生存的时间,单位是毫秒 -->
<property name="minEvictableIdleTimeMillis" value="30000"/>
<property name="testWhileIdle" value="true"/>
<property name="testOnBorrow" value="false"/>
<property name="testOnReturn" value="false"/>
<!-- 如果用Oracle,则把poolPreparedStatements配置为true,mysql可以配置为false。分库分表较多的数据库,建议配置为false。 -->
<property name="poolPreparedStatements" value="true"/>
<property name="maxPoolPreparedStatementPerConnectionSize" value="20"/>
<!-- 配置监控统计拦截的filters -->
<property name="filters" value="stat"/>
</bean>
<bean id="sqlSessionFactory" class="org.mybatis.spring.SqlSessionFactoryBean">
<property name="dataSource" ref="clusterDataSource"/>
<property name="mapperLocations" value="classpath*:com/dji/pa/dao/mapper/*Mapper.xml"/>
</bean>
<!-- Mapper接口所在包名,Spring会自动查找其下的类 -->
<bean class="org.mybatis.spring.mapper.MapperScannerConfigurer">
<property name="basePackage" value="com.dji.pa.dao.mapper" />
<property name="sqlSessionFactoryBeanName" value="sqlSessionFactory" />
</bean>
<!-- 配置事务管理器 -->
<bean id="transactionManager" class="org.springframework.jdbc.datasource.DataSourceTransactionManager">
<property name="dataSource" ref="clusterDataSource" />
</bean>
<!--事物启用切点 -->
<tx:advice id="txAdvice" transaction-manager="transactionManager">
<tx:attributes>
<!-- 查询方法 -->
<tx:method name="get*" propagation="NOT_SUPPORTED" isolation="DEFAULT" read-only="true"/>
<tx:method name="find*" propagation="NOT_SUPPORTED" isolation="DEFAULT" read-only="true"/>
<tx:method name="query*" propagation="NOT_SUPPORTED" isolation="DEFAULT" read-only="true"/>
<tx:method name="count*" propagation="NOT_SUPPORTED" isolation="DEFAULT" read-only="true"/>
<!-- 更新方法 -->
<tx:method name="update*" propagation="REQUIRED" isolation="DEFAULT" read-only="false"/>
<tx:method name="add*" propagation="REQUIRED" isolation="DEFAULT" read-only="false"/>
<tx:method name="insert*" propagation="REQUIRED" isolation="DEFAULT" read-only="false"/>
<tx:method name="del*" propagation="REQUIRED" isolation="DEFAULT" read-only="false"/>
</tx:attributes>
</tx:advice>
<aop:config expose-proxy="true" >
<!-- 只对业务逻辑层实施事务 -->
<aop:pointcut id="txPointCut" expression="execution(* com.dji.pa.service.imp.*.*(..))" />
<aop:advisor advice-ref="txAdvice" pointcut-ref="txPointCut" />
<aop:aspect order="-2147483648" ref="dataSourceProcessor">
<aop:around pointcut-ref="txPointCut" method="determineReadOrWriteDB"/>
</aop:aspect>
</aop:config>
<!-- 集群数据源失败重试机制+读写分离-->
<bean id="clusterDataSource" class="com.dji.cluster.db.dataSouce.ClusterDataSource">
<!-- 模板数据源 -->
<property name="templetDataSource" value="dataSource"/>
<property name="processor" ref="dataSourceProcessor"/>
<!-- 读写服务器配置列表 -->
<property name="writeDBList">
<list>
<bean class="com.dji.cluster.db.dataSouce.Server">
<property name="ipAndPort" value="127.0.0.1:3306"/>
<property name="weight" value="2"/>
<property name="serverType" value="2"/>
</bean>
</list>
</property>
<property name="readDBList">
<list>
<bean class="com.dji.cluster.db.dataSouce.Server">
<property name="ipAndPort" value="127.0.0.1:3307"/>
<property name="weight" value="2"/>
<property name="serverType" value="1"/>
</bean>
<bean class="com.dji.cluster.db.dataSouce.Server">
<property name="ipAndPort" value="127.0.0.1:3308"/>
<property name="weight" value="1"/>
<property name="serverType" value="1"/>
</bean>
</list>
</property>
</bean>
<bean id="dataSourceProcessor" class="com.dji.cluster.db.dataSouce.DataSourceProcessor">
<!-- 当写服务器挂掉时候是否允许读服务器作为写的备份 -->
<property name="enableAsWriteWhenWriteDown" value="true"/>
<!-- 当读服务器挂掉时候是否允许到写服务器查询 -->
<property name="enableAsReadWhenAllReadDown" value="true"/>
<property name="clusterDataSource" ref="clusterDataSource"/>
<!-- 失败后重试次数 -->
<property name="failedTimeToTryAgain" value="2"/>
</bean>
</beans>
package com.dji.cluster.db.dataSouce;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.ApplicationContext;
import org.springframework.jdbc.datasource.AbstractDataSource;
import org.springframework.util.CollectionUtils;
import org.springframework.util.StringUtils;
import com.alibaba.druid.pool.DruidDataSource;
/**
* 集群代理数据源
*
* @author xinlin.wang
*
*/
public class ClusterDataSource extends AbstractDataSource implements InitializingBean {
private static final Logger log = LoggerFactory.getLogger(ClusterDataSource.class);
/**
* 读服务器列表
*/
private CopyOnWriteArrayList<Server> readDBList;
/**
* 写服务器
*/
private CopyOnWriteArrayList<Server> writeDBList;
/**
* 模板数据源
*/
private String templetDataSource;
/**
* 当前线程使用的数据源
*/
private ThreadLocal<Server> holder = new ThreadLocal<Server>();
/**
* 决策处理器
*/
private DataSourceProcessor processor;
//呵呵,这块写的挺丑的
@Autowired
private ApplicationContext applicationContext;
/**
* 下边这些代码不用考虑多线程,只会加载一次
*/
public void afterPropertiesSet() throws Exception {
if (!StringUtils.hasText(templetDataSource)) {
log.error("未指定模板库,多数据源将根据URL参数进行初始化和此模板进行配置");
System.exit(-1);
}
if (CollectionUtils.isEmpty(readDBList) && !CollectionUtils.isEmpty(writeDBList)) {
log.error("读库和写库至少要指定一个");
System.exit(-1);
}
Server server = null;
DruidDataSource dataSource = null;
if (!CollectionUtils.isEmpty(readDBList) && CollectionUtils.isEmpty(writeDBList)) {
server = readDBList.get(0);
dataSource = creatDataSource(server.getIpAndPort());
if (readDBList.size() == 1) {
server.setDataSource(dataSource);
readDBList.remove(0);
} else {
for (Server item : readDBList) {
item.setDataSource(creatDataSource(item.getIpAndPort()));
}
}
} else if (CollectionUtils.isEmpty(readDBList) && !CollectionUtils.isEmpty(writeDBList)) {
if (writeDBList.size() == 1) {
server = writeDBList.get(0);
dataSource = creatDataSource(server.getIpAndPort());
server.setDataSource(dataSource);
readDBList.add(server);// 只指定了写库,读写操作都将在此库进行(此处相当于单库)
} else {
server = writeDBList.get(0);
dataSource = creatDataSource(server.getIpAndPort());
server.setDataSource(dataSource);
readDBList.add(server);
writeDBList.remove(0);
for (Server item : writeDBList) {
item.setDataSource(creatDataSource(item.getIpAndPort()));
}
}
} else if (!CollectionUtils.isEmpty(readDBList) && !CollectionUtils.isEmpty(writeDBList)) {
for (Server item : readDBList) {
item.setDataSource(creatDataSource(item.getIpAndPort()));
}
for (Server item : writeDBList) {
item.setDataSource(creatDataSource(item.getIpAndPort()));
}
}
}
private DruidDataSource creatDataSource(String ipAndPort) {
DruidDataSource dataSource = null;
try {
dataSource = applicationContext.getBean(templetDataSource, DruidDataSource.class);
String url = dataSource.getUrl();
dataSource.restart(); // 否则无法生效
url = url.replaceAll(CommonUtils.getIPAndPort(url), ipAndPort);
dataSource.setUrl(url);
dataSource.init();
log.debug("创建数据连接池:" + url);
} catch (Exception e) {
log.error(e.getMessage(), e);
System.exit(-1);
}
return dataSource;
}
public List<Server> getReadDBList() {
return readDBList;
}
public ThreadLocal<Server> getHolder() {
return holder;
}
public void setHolder(ThreadLocal<Server> holder) {
this.holder = holder;
}
public void setTempletDataSource(String templetDataSource) {
this.templetDataSource = templetDataSource;
}
public Connection getConnection() throws SQLException {
return processor.getMatchServer().getDataSource().getConnection();
}
public Connection getConnection(String username, String password) throws SQLException {
return processor.getMatchServer().getDataSource().getConnection(username, password);
}
public void setProcessor(DataSourceProcessor processor) {
this.processor = processor;
}
public void setApplicationContext(ApplicationContext applicationContext) {
this.applicationContext = applicationContext;
}
public List<Server> getWriteDBList() {
return writeDBList;
}
public void setWriteDBList(CopyOnWriteArrayList<Server> writeDBList) {
this.writeDBList = writeDBList;
}
public void setReadDBList(CopyOnWriteArrayList<Server> readDBList) {
this.readDBList = readDBList;
}
}
package com.dji.cluster.db.dataSouce;
import java.util.List;
public class CommonUtils {
public static String getIPAndPort(String templetDriverUrl) {
if (templetDriverUrl == null) {
return null;
}
if (templetDriverUrl.startsWith("jdbc:mysql:") || templetDriverUrl.startsWith("jdbc:cobar:")
|| templetDriverUrl.startsWith("jdbc:log4jdbc:mysql:")) {
return templetDriverUrl.substring(13, 27);
} else if (templetDriverUrl.startsWith("jdbc:oracle:")
|| templetDriverUrl.startsWith("jdbc:log4jdbc:oracle:")) {
return templetDriverUrl.substring(18, 33);
} else {
throw new IllegalArgumentException("不支持的数据库类型");
}
}
/**
* 加权随机算法
*/
public static Server getServerByWeightedRandomAlgorithm(List<Server> readDBServerList) {
return null;
}
/**
* 加权顺序算法 这里必须用到多线程包的东西,否则提高了性能,破坏了顺序加权机制,后期可以将算法独立抽取,采用加权随机算法, 避免锁机制提升性能
*
* @param readDBList
* @return
*/
public static Server getServerByoptimizationOfWeightedOrder(List<Server> readDBList) {
Server server = null;
Server best = null;
int total = 0;
for (int i = 0, len = readDBList.size(); i < len; i++) {
// 当前服务器对象
server = readDBList.get(i);
server.setCurrentWeight(server.getCurrentWeight() + server.getEffectiveWeight());
total += server.getEffectiveWeight();
if (server.getEffectiveWeight() < server.getWeight()) {
server.setEffectiveWeight(server.getEffectiveWeight() + 1);
}
if (best == null || server.getCurrentWeight() > best.getCurrentWeight()) {
best = server;
}
}
if (best == null) {
return null;
} else {
best.setCurrentWeight(best.getCurrentWeight() - total);
return best;
}
}
public static boolean isCommunicationsException(String exceptionMsg) {
return exceptionMsg.indexOf("CannotGetJdbcConnectionException") != -1
|| exceptionMsg.indexOf("CommunicationsException") != -1;
}
}
package com.dji.cluster.db.dataSouce;
import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Vector;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.aspectj.lang.ProceedingJoinPoint;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.config.BeanPostProcessor;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.interceptor.NameMatchTransactionAttributeSource;
import org.springframework.transaction.interceptor.RuleBasedTransactionAttribute;
import org.springframework.transaction.interceptor.TransactionAttribute;
import org.springframework.util.PatternMatchUtils;
import org.springframework.util.ReflectionUtils;
public class DataSourceProcessor implements BeanPostProcessor, InitializingBean {
private static final Logger log = LoggerFactory.getLogger(DataSourceProcessor.class);
private Map<String, Boolean> readMethodMap = new HashMap<String, Boolean>();
private List<Server> filaDataSouceList;
private ClusterDataSource clusterDataSource;
private boolean forceChoiceReadWhenWrite = false;
private boolean enableAsWriteWhenWriteDown = false;
private boolean enableAsReadWhenAllReadDown = false;
private int failedTimeToTryAgain = 2;
private ServerWatcher watcher;
private Lock lock;
/**
* 创建监控
*/
public void afterPropertiesSet() throws Exception {
filaDataSouceList = new Vector<Server>();
watcher = new ServerWatcher(filaDataSouceList, clusterDataSource, lock = new ReentrantLock());
Thread watcherThread = new Thread(watcher);
watcherThread.setDaemon(true);
watcherThread.start();
}
/**
* 1
*/
@SuppressWarnings("unchecked")
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (!(bean instanceof NameMatchTransactionAttributeSource)) {
return bean;
}
try {
NameMatchTransactionAttributeSource nmtas = (NameMatchTransactionAttributeSource) bean;
Field nameMapField = ReflectionUtils.findField(NameMatchTransactionAttributeSource.class, "nameMap");
nameMapField.setAccessible(true);
Map<String, TransactionAttribute> nameMap = (Map<String, TransactionAttribute>) nameMapField.get(nmtas);
for (Entry<String, TransactionAttribute> entry : nameMap.entrySet()) {
RuleBasedTransactionAttribute attr = (RuleBasedTransactionAttribute) entry.getValue();
if (!attr.isReadOnly()) {// 仅对read-only的处理
continue;
}
Boolean isForceChoiceRead = Boolean.FALSE;
if (forceChoiceReadWhenWrite) {
// 不管之前操作是写,默认强制从读库读(设置为NOT_SUPPORTED即可)NOT_SUPPORTED会挂起之前的事务
attr.setPropagationBehavior(Propagation.NOT_SUPPORTED.value());
isForceChoiceRead = Boolean.TRUE;
} else {
// 否则 设置为SUPPORTS(这样可以参与到写事务)
attr.setPropagationBehavior(Propagation.SUPPORTS.value());
}
log.debug(entry.getKey() + " , " + isForceChoiceRead);
readMethodMap.put(entry.getKey(), isForceChoiceRead);
}
} catch (Exception e) {
throw new RuntimeException("选择器:处理读写事务异常", e);
}
return bean;
}
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException {
return bean;
}
/**
* 2
*
* @param pjp
* @return
* @throws Throwable
*/
public Object determineReadOrWriteDB(ProceedingJoinPoint pjp) throws Throwable {
String methodName = pjp.getSignature().getName();
int time = 0;
try {
if (isChoiceReadDB(methodName)) {
ReadWriteDecision.markRead();// 读库进行失败重试机制
while (time <= failedTimeToTryAgain) {
try {
return pjp.proceed();
} catch (Throwable e) {
time++;
switchDBIfReadError(pjp, e);
}
}
} else {
while (time <= failedTimeToTryAgain) {
try {
ReadWriteDecision.markWrite();
return pjp.proceed();
} catch (Throwable e) {
time++;
// 写服务器down机是否允许写到读服务器(高并发下分库坚决不允许,内部互为备份时候可用)
switchDBIfWriteError(pjp, e);
}
}
}
} finally {
ReadWriteDecision.reset();// 此处顺序不能颠倒
}
return null;
}
/**
* 2.1
*
* @param methodName
* @return
*/
private boolean isChoiceReadDB(String methodName) {
String bestNameMatch = null;
for (String mappedName : this.readMethodMap.keySet()) {
if (PatternMatchUtils.simpleMatch(mappedName, methodName)) {
bestNameMatch = mappedName;
break;
}
}
Boolean isForceChoiceRead = readMethodMap.get(bestNameMatch);
// 表示强制选择 读 库
if (isForceChoiceRead == Boolean.TRUE) {
return true;
}
// 如果之前选择了写库 现在还选择 写库
if (ReadWriteDecision.isChoiceWrite()) {
return false;
}
// 表示应该选择读库
if (isForceChoiceRead != null) {
return true;
}
// 默认选择 写库
return false;
}
/**
* 2.2 此处存在多线程问题,来异常的时候会大量的请求进入此方法
*
* @param pjp
* @param class1
*/
private void switchDBIfReadError(ProceedingJoinPoint pjp, Throwable error) {
if (CommonUtils.isCommunicationsException(error.getCause().toString()) ? true : false) {
Server failServer = clusterDataSource.getHolder().get();
String ipAndPort = failServer.getIpAndPort();
synchronized (lock) {
List<Server> readServerList = clusterDataSource.getReadDBList();
for (int index = 0; index < readServerList.size(); index++) {// 可以参与负载均衡的服务器列表要删除
Server iap = readServerList.get(index);
if (iap.getIpAndPort().equals(ipAndPort)) {
try {
if (readServerList.contains(failServer)) {
if (readServerList.remove(iap)) {// 多线程就是他妈的麻烦
log.warn("选择器:数据库down机,移除读机器" + ipAndPort);
if (readServerList.size() == 0 && enableAsReadWhenAllReadDown) {
if (clusterDataSource.getWriteDBList().size() > 0) {
log.warn("选择器:警告,读服务器列表为空,将尝试在写服务器进行查询"
+clusterDataSource.getWriteDBList().get(0).getServerType());
readServerList.add(clusterDataSource.getWriteDBList().get(0));// 读写共用:读库全部挂掉了就去写库查,这在高并发的时候是坚决不允许的,但是业务量少的时候可以接受
} else {
log.debug("尝试在写服务器做读操作...写服务器不可用");
}
}
if (!filaDataSouceList.contains(failServer)) {
filaDataSouceList.add(failServer);
}
}
}
break;
} catch (Exception e) {
e.printStackTrace();
} finally {
lock.notify();
}
}
}
}
log.warn("选择器:重试新的读服务器");
}
}
/**
* 写数据异常时候切库
*
* @param pjp
* @param error
*/
private void switchDBIfWriteError(ProceedingJoinPoint pjp, Throwable error) {
log.warn("选择器:重试新的写服务器);");
if (CommonUtils.isCommunicationsException(error.getCause().toString()) ? true : false) {
Server failServer = clusterDataSource.getHolder().get();
String ipAndPort = failServer.getIpAndPort();
Server server = null;
try {
synchronized (lock) {
List<Server> readServerList = clusterDataSource.getReadDBList();
List<Server> writeServerList = clusterDataSource.getWriteDBList();
if (writeServerList.remove(failServer)) {// 多线程条件下,多线程发现的异常是同一个数据源,只要处理一个就行了
if (enableAsWriteWhenWriteDown) {// 如果允许读服务器作为写服务器
if (readServerList.size() == 1) {
writeServerList.add(readServerList.get(0));
} else if (readServerList.size() > 1) {
// 在拥有多台读服务器的情况下,第一台读服务器切换为写服务器,其他为写
server = readServerList.get(0);
writeServerList.add(server);
if (readServerList.contains(server)) {
readServerList.remove(server);
}
log.warn("选择器:由于写服务器" + failServer.getIpAndPort() + "down机,读服务器" + server.getIpAndPort()
+ "切换为主写服务器");
} else {
log.warn("选择器:警告,当前已经没有可用的数据库服务器,读写操作均不可用");
}
for (Server iap : readServerList) {// 可以参与负载均衡的服务器列表要删除
if (iap.getIpAndPort().equals(ipAndPort)) {
clusterDataSource.getReadDBList().remove(iap);
log.warn("选择器:数据库down机,移除读机器" + ipAndPort);
break;
}
}
}
if (!filaDataSouceList.contains(failServer)) {
filaDataSouceList.add(failServer);
}
lock.notify();
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
public Server getMatchServer() {
Server result = null;
if (ReadWriteDecision.isChoiceWrite()) {
result = clusterDataSource.getWriteDBList().get(0);
log.debug("选择器:写库" + result.getIpAndPort());
} else if (ReadWriteDecision.isChoiceRead()) {
List<Server> readDBList = clusterDataSource.getReadDBList();
// 这里要是返回空,1:读服务器全挂了,2:读写服务器都挂了
result = CommonUtils.getServerByoptimizationOfWeightedOrder(readDBList);
log.debug("选择器:读库" + result.getIpAndPort());
} else {// 未知
result = clusterDataSource.getWriteDBList().get(0);
log.debug("选择器:写库,不符合标准的方法" + result.getIpAndPort());
}
clusterDataSource.getHolder().set(result);
return result;
}
public void setForceChoiceReadWhenWrite(boolean forceChoiceReadWhenWrite) {
this.forceChoiceReadWhenWrite = forceChoiceReadWhenWrite;
}
public void setFailedTimeToTryAgain(int failedTimeToTryAgain) {
this.failedTimeToTryAgain = failedTimeToTryAgain;
}
public void setClusterDataSource(ClusterDataSource clusterDataSource) {
this.clusterDataSource = clusterDataSource;
}
public List<Server> getFilaDataSouceList() {
return filaDataSouceList;
}
public void setEnableAsWriteWhenWriteDown(boolean enableAsWriteWhenWriteDown) {
this.enableAsWriteWhenWriteDown = enableAsWriteWhenWriteDown;
}
public void setEnableAsReadWhenAllReadDown(boolean enableAsReadWhenAllReadDown) {
this.enableAsReadWhenAllReadDown = enableAsReadWhenAllReadDown;
}
}
package com.dji.cluster.db.dataSouce;
/**
* 决定一个方法是读还是写
* @author xinlin.wang
*
*/
public class ReadWriteDecision {
public enum DataSourceType {
write, read;
}
private static final ThreadLocal<DataSourceType> holder = new ThreadLocal<DataSourceType>();
public static void markWrite() {
holder.set(DataSourceType.write);
}
public static void markRead() {
holder.set(DataSourceType.read);
}
public static void reset() {
holder.set(null);
}
public static boolean isChoiceNone() {
return null == holder.get();
}
public static boolean isChoiceWrite() {
return DataSourceType.write == holder.get();
}
public static boolean isChoiceRead() {
return DataSourceType.read == holder.get();
}
}
package com.dji.cluster.db.dataSouce;
import com.alibaba.druid.pool.DruidDataSource;
public class Server {
public static final int READ = 1;
public static final int WRITE = 2;
/**
* 服务器名称和端口
*/
private String ipAndPort;
/**
* 服务器名称-对应的物理数据源
*/
private DruidDataSource dataSource;
/**
* 权重-写服务器忽略
*/
private int weight;
private int effectiveWeight;
/**
* 当前权重
*/
private int currentWeight;
/**
* 服务器类型 1-读,2-写
*/
private int serverType;
public String getIpAndPort() {
return ipAndPort;
}
public void setIpAndPort(String ipAndPort) {
this.ipAndPort = ipAndPort;
}
public DruidDataSource getDataSource() {
return dataSource;
}
public void setDataSource(DruidDataSource dataSource) {
this.dataSource = dataSource;
}
public int getWeight() {
return weight;
}
public void setWeight(int weight) {
this.weight = weight;
}
public int getEffectiveWeight() {
return effectiveWeight;
}
public void setEffectiveWeight(int effectiveWeight) {
this.effectiveWeight = effectiveWeight;
}
public int getCurrentWeight() {
return currentWeight;
}
public void setCurrentWeight(int currentWeight) {
this.currentWeight = currentWeight;
}
public int getServerType() {
return serverType;
}
public void setServerType(int serverType) {
this.serverType = serverType;
}
}
package com.dji.cluster.db.dataSouce;
import java.util.List;
import java.util.concurrent.locks.Lock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ServerWatcher implements Runnable {
private static final Logger log = LoggerFactory.getLogger(ServerWatcher.class);
private List<Server> failServerList = null;
private ClusterDataSource cds;
private Lock lock;
public ServerWatcher(List<Server> failServerList, ClusterDataSource cds, Lock lock) {
this.lock = lock;
this.failServerList = failServerList;
this.cds = cds;
}
public void run() {
while (true) {
if (failServerList == null || failServerList.size() == 0) {
log.debug("监控:暂无异常服务器....");
synchronized (lock) {
try {
lock.notify();
lock.wait();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
} else {
for (int index = 0; index < failServerList.size(); index++) {
Server failServer = failServerList.get(index);
try {
log.debug("监控:连接失败服务器总计 " + failServerList.size() + "台,当前正在测试:" + failServer.getIpAndPort());
if (failServer.getDataSource().getConnection().isValid(2000)) {// 故障服务器恢复再加锁处理,否则影响正常服务器切换
synchronized (lock) {
log.debug("监控:" + failServer.getIpAndPort() + "恢复正常");
failServerList.remove(failServer);
if (failServer.getServerType() == Server.READ) {
// 读服务器恢复的时候如果原先用了写服务器,就要将写服务器从目前的读服务器列表删除
for (int item = 0; item < cds.getReadDBList().size(); item++) {
if (cds.getReadDBList().get(item).getServerType() == Server.WRITE) {
cds.getReadDBList().remove(item);
}
}
cds.getReadDBList().add(failServer);
log.debug("将" + failServer.getIpAndPort() + "恢复到读序列");
} else if (failServer.getServerType() == Server.WRITE) {
// 写服务器恢复
log.debug("将" + failServer.getIpAndPort() + "恢复到写序列");
cds.getWriteDBList().add(failServer);
for (int item = 0; item < cds.getWriteDBList().size(); item++) {
if (cds.getWriteDBList().get(item).getServerType() != Server.WRITE) {// 为可读可写预留
cds.getReadDBList().add(cds.getWriteDBList().remove(item));
}
}
}
}
}
} catch (Exception e) {
}
}
}
}
}
}
读写分离
最新推荐文章于 2025-01-07 15:22:38 发布