上一篇分析了创建连接线程,今天来看下销毁连接线程。
1. 2种方式执行 destroyTask 中封装的逻辑
分2种情况,取决于使用者有没有在初始化连接池前设置 destroyScheduler:
protected void createAndStartDestroyThread() {
// 这里创建真正销毁连接的任务
destroyTask = new DestroyTask();
// 如果在初始化连接池之前设置了销毁连接的定时执行器 destroyScheduler,就进入
if (destroyScheduler != null) {
long period = timeBetweenEvictionRunsMillis;
if (period <= 0) {
period = 1000;
}
// 以 timeBetweenEvictionRunsMillis 为执行周期,执行销毁连接任务
destroySchedulerFuture = destroyScheduler.scheduleAtFixedRate(destroyTask, period, period,
TimeUnit.MILLISECONDS);
initedLatch.countDown();
return;
}
// 如果没有设置 destroyScheduler,则创建 DestroyConnectionThread
String threadName = "Druid-ConnectionPool-Destroy-" + System.identityHashCode(this);
destroyConnectionThread = new DestroyConnectionThread(threadName);
destroyConnectionThread.start();
}
我们先看第2种情况,创建的 DestroyConnectionThread 里面的逻辑:
public void run() {
initedLatch.countDown();
for (;;) {
// 从前面开始删除
try {
if (closed || closing) {
break;
}
if (timeBetweenEvictionRunsMillis > 0) {
Thread.sleep(timeBetweenEvictionRunsMillis);
} else {
Thread.sleep(1000); //
}
if (Thread.interrupted()) {
break;
}
destroyTask.run();
} catch (InterruptedException e) {
break;
}
}
}
可以看到,使用了 Sleep 的方式,定时执行 destroyTask 中封装的逻辑;而第1种使用的线程池定时执行 destroyTask 中封装的逻辑。
间隔时间都是 timeBetweenEvictionRunsMillis,如果不设置,默认值是 60000;
protected volatile long timeBetweenEvictionRunsMillis = DEFAULT_TIME_BETWEEN_EVICTION_RUNS_MILLIS;
public static final long DEFAULT_TIME_BETWEEN_EVICTION_RUNS_MILLIS = 60 * 1000L;
2. destroyTask 中封装的逻辑
可以看到,将逻辑封装到了 shrink 这个方法中了。
public DestroyTask() {
}
@Override
public void run() {
shrink(true, keepAlive);
if (isRemoveAbandoned()) {
removeAbandoned();
}
}
因为销毁连接的线程不是主线程,而且下面的判断逻辑使用了 DruidDataSource 的 volatile 属性了,所以先要获取锁。
如果在看源码过程中看不懂各个参数,可以先自行搜索,或参考 druid 官方文档,先把一些参数含义捋清楚,然后再看源码,效果会好一些。这个链接供参考:DruidDataSource配置 - DruidDataSource配置属性列表 - 《Alibaba Druid v1.0 使用手册》 - 书栈网 · BookStack
public void shrink(boolean checkTime, boolean keepAlive) {
try {
lock.lockInterruptibly();
} catch (InterruptedException e) {
return;
}
boolean needFill = false;
int evictCount = 0;
int keepAliveCount = 0;
int fatalErrorIncrement = fatalErrorCount - fatalErrorCountLastShrink;
fatalErrorCountLastShrink = fatalErrorCount;
try {
// 必须等到初始化之后,再进入销毁连接的逻辑
if (!inited) {
return;
}
// 线程池大小减去最小空闲数,即需要销毁的连接数
final int checkCount = poolingCount - minIdle;
final long currentTimeMillis = System.currentTimeMillis();
// 遍历连接池中所有连接
for (int i = 0; i < poolingCount; ++i) {
DruidConnectionHolder connection = connections[i];
// 如果有异常,将这个连接放到 keepAliveConnections 中
if ((onFatalError || fatalErrorIncrement > 0) && (lastFatalErrorTimeMillis > connection.connectTimeMillis)) {
keepAliveConnections[keepAliveCount++] = connection;
continue;
}
// 需要检查相关时间
if (checkTime) {
// 如果调用者设置了 druid.phyTimeoutMillis,并且大于0
if (phyTimeoutMillis > 0) {
long phyConnectTimeMillis = currentTimeMillis - connection.connectTimeMillis;
// 需要检验是否超时,超时了的连接会放到 evictConnections 数组中
if (phyConnectTimeMillis > phyTimeoutMillis) {
evictConnections[evictCount++] = connection;
continue;
}
}
// 这个连接的空闲时间
long idleMillis = currentTimeMillis - connection.lastActiveTimeMillis;
// 空闲时间小于 连接保持空闲而不被驱逐的最小时间
// 并且 空闲时间小于 keepAliveBetweenTimeMillis
// 满足条件了直接退出 for 循环
if (idleMillis < minEvictableIdleTimeMillis
&& idleMillis < keepAliveBetweenTimeMillis
) {
break;
}
if (idleMillis >= minEvictableIdleTimeMillis) {
if (checkTime && i < checkCount) {
evictConnections[evictCount++] = connection;
continue;
} else if (idleMillis > maxEvictableIdleTimeMillis) {
evictConnections[evictCount++] = connection;
continue;
}
}
if (keepAlive && idleMillis >= keepAliveBetweenTimeMillis) {
keepAliveConnections[keepAliveCount++] = connection;
}
} else {
// 如果不需要检查时间,只需要判断是否超过了需要销毁的连接数
if (i < checkCount) {
evictConnections[evictCount++] = connection;
} else {
break;
}
}
}
// 把 evict 和 keepAlive 的从 connections 数组中剔除
int removeCount = evictCount + keepAliveCount;
if (removeCount > 0) {
System.arraycopy(connections, removeCount, connections, 0, poolingCount - removeCount);
Arrays.fill(connections, poolingCount - removeCount, poolingCount, null);
poolingCount -= removeCount;
}
keepAliveCheckCount += keepAliveCount;
// 是否要往连接池补充连接
if (keepAlive && poolingCount + activeCount < minIdle) {
needFill = true;
}
} finally {
lock.unlock();
}
// 如果存在需要剔除的连接,清空 evictConnections 数组
if (evictCount > 0) {
for (int i = 0; i < evictCount; ++i) {
DruidConnectionHolder item = evictConnections[i];
Connection connection = item.getConnection();
JdbcUtils.close(connection);
destroyCountUpdater.incrementAndGet(this);
}
Arrays.fill(evictConnections, null);
}
if (keepAliveCount > 0) {
// keep order
for (int i = keepAliveCount - 1; i >= 0; --i) {
DruidConnectionHolder holer = keepAliveConnections[i];
Connection connection = holer.getConnection();
holer.incrementKeepAliveCheckCount();
boolean validate = false;
try {
// 验证连接是否正常
this.validateConnection(connection);
validate = true;
} catch (Throwable error) {
if (LOG.isDebugEnabled()) {
LOG.debug("keepAliveErr", error);
}
// skip
}
boolean discard = !validate;
if (validate) {
// 如果连接正常,保存到 connections 数组中
holer.lastKeepTimeMillis = System.currentTimeMillis();
boolean putOk = put(holer, 0L, true);
if (!putOk) {
discard = true;
}
}
// 如果连接不正常,把连接关闭
if (discard) {
try {
connection.close();
} catch (Exception e) {
// skip
}
lock.lock();
try {
discardCount++;
if (activeCount + poolingCount <= minIdle) {
// 如果不够最小空闲数了,创建连接
emptySignal();
}
} finally {
lock.unlock();
}
}
}
this.getDataSourceStat().addKeepAliveCheckCount(keepAliveCount);
Arrays.fill(keepAliveConnections, null);
}
if (needFill) {
// 连接池中需要补充连接
lock.lock();
try {
int fillCount = minIdle - (activeCount + poolingCount + createTaskCount);
for (int i = 0; i < fillCount; ++i) {
emptySignal();
}
} finally {
lock.unlock();
}
} else if (onFatalError || fatalErrorIncrement > 0) {
// 有异常时,也要创建连接
lock.lock();
try {
emptySignal();
} finally {
lock.unlock();
}
}
}