public static class StopHookThread extends Thread
{
public void run()
{
persistentAlldata();
}
}
private static void persistentAlldata()
{
TableName_Field[] tnfs = null;
while (true)
{
tnfs = getTNFFromlList();
if (tnfs == null || tnfs.length == 0)
{
break;
}
else
{
batchInsertToDB(tnfs);
}
}
}
private static TableName_Field[] getTNFFromlList()
{
TableName_Field[] subList = null;
synchronized (object)
{
// 如果数组中的数据量超过每批次入库的量,只取批次入库的量,
// 例如每批只能有100条数据,
// 但是总的数据量有10000条,实际可以取出只能100条
int maxeaxhBatch = Tools.getMaxeachBatch();
if (count >= maxeaxhBatch)
{
TableName_Field[] allsql = new TableName_Field[sqlList.length];
subList = new TableName_Field[maxeaxhBatch];
// 每次取出的数量
System.arraycopy(sqlList, 0, subList, 0, maxeaxhBatch);
// 还没有取出的数量
System.arraycopy(sqlList, 0, allsql, 0, count - maxeaxhBatch);
sqlList = allsql;
count = count - maxeaxhBatch;
}
else if (count > 0)// 取出所有的数据
{
subList = new TableName_Field[count];
System.arraycopy(sqlList, 0, subList, 0, count);
sqlList = new TableName_Field[INITSIZE];
count = 0;
}
}
return subList;
}
private static void batchInsertToDB(TableName_Field[] tnfs)
{
try
{
logger.debug("Batch SQL insert into db is ready size = "
+ tnfs.length);
OperateDB.batchInsert(tnfs);
logger.debug("Batch SQL insert into db is successfully! size = "
+ tnfs.length);
}
catch (Exception e)
{
// 写入数据库失败,需要将这些数据写入日志中
StringBuffer sb = null;
for (TableName_Field tnf : tnfs)
{
sb = new StringBuffer(500);
sb.append("insert DB is fail, tablename = ").append(
tnf.getTableName());
for (EsbTableField etf : tnf.getList())
{
sb.append("\n").append("field name:")
.append(etf.getFieldName()).append(" Value:")
.append(etf.getFieldValue());
}
logger.error(sb.toString());
}
logger.error("batchInsertToDB has error msg:" + e.getMessage());
}
}
{
public void run()
{
persistentAlldata();
}
}
private static void persistentAlldata()
{
TableName_Field[] tnfs = null;
while (true)
{
tnfs = getTNFFromlList();
if (tnfs == null || tnfs.length == 0)
{
break;
}
else
{
batchInsertToDB(tnfs);
}
}
}
private static TableName_Field[] getTNFFromlList()
{
TableName_Field[] subList = null;
synchronized (object)
{
// 如果数组中的数据量超过每批次入库的量,只取批次入库的量,
// 例如每批只能有100条数据,
// 但是总的数据量有10000条,实际可以取出只能100条
int maxeaxhBatch = Tools.getMaxeachBatch();
if (count >= maxeaxhBatch)
{
TableName_Field[] allsql = new TableName_Field[sqlList.length];
subList = new TableName_Field[maxeaxhBatch];
// 每次取出的数量
System.arraycopy(sqlList, 0, subList, 0, maxeaxhBatch);
// 还没有取出的数量
System.arraycopy(sqlList, 0, allsql, 0, count - maxeaxhBatch);
sqlList = allsql;
count = count - maxeaxhBatch;
}
else if (count > 0)// 取出所有的数据
{
subList = new TableName_Field[count];
System.arraycopy(sqlList, 0, subList, 0, count);
sqlList = new TableName_Field[INITSIZE];
count = 0;
}
}
return subList;
}
private static void batchInsertToDB(TableName_Field[] tnfs)
{
try
{
logger.debug("Batch SQL insert into db is ready size = "
+ tnfs.length);
OperateDB.batchInsert(tnfs);
logger.debug("Batch SQL insert into db is successfully! size = "
+ tnfs.length);
}
catch (Exception e)
{
// 写入数据库失败,需要将这些数据写入日志中
StringBuffer sb = null;
for (TableName_Field tnf : tnfs)
{
sb = new StringBuffer(500);
sb.append("insert DB is fail, tablename = ").append(
tnf.getTableName());
for (EsbTableField etf : tnf.getList())
{
sb.append("\n").append("field name:")
.append(etf.getFieldName()).append(" Value:")
.append(etf.getFieldValue());
}
logger.error(sb.toString());
}
logger.error("batchInsertToDB has error msg:" + e.getMessage());
}
}
141

被折叠的 条评论
为什么被折叠?



