废话不多说,直接上代码。因为是新手,代码中有不完善的地方欢迎指正。
/**
* 数据备份
* @param backup
* @return
*/
public Map<String, Object> doDataBackup(BackupDTO backup) {
String schedule = backup.getSchedule();
String config = backup.getConfig();
BackupConfigDTO cfg = (BackupConfigDTO) JSONObject.toBean(JSONObject.fromObject(config),BackupConfigDTO.class);
String target = cfg.getTarget();
String[] tables = null;
String database = null;
String backupSource = backup.getBackupSource();
if(StringUtils.contains(backupSource, ":")){
String[] tempArr = backupSource.split(":");
database = tempArr[0];
tables = tempArr[1].split(",");
}else{
database = backupSource;
try {
List<String> tnames = new ArrayList<String>();
List<Map<String, Object>> tablelist = getTables(cfg);
for (Map<String, Object> map : tablelist) {
tnames.add(map.get("tableName").toString());
}
tables = tnames.toArray(new String[tnames.size()]);
} catch (JSchException e) {
logger.debug(e);
return UtilMisc.toMap("flag","failure","msg",e.getMessage());
} catch (Exception e) {
logger.debug(e);
return UtilMisc.toMap("flag","failure","msg",e.getMessage());
}
}
if(tables.length < 1){
return UtilMisc.toMap("flag","failure","msg","数据库"+ database +" 中没有可以备份的表 ");
}
String backupLocation = backup.getBackupLocation();
String backupName = backup.getBackupName();
String destFileDir = FileUtil.normalize(backupLocation);
String dirStr = FileUtil.normalize(destFileDir + "/" + backupName +".sql");
File dirFile = new File(dirStr);
if(!dirFile.isAbsolute()){
return UtilMisc.toMap("flag","failure","msg","备份文件存放路径必须是绝对路径");
}
if("immediate".equals(schedule)){
DataSource ds = getDataSource(cfg);
JdbcTemplate jdbcTemplate = new JdbcTemplate(ds);
boolean isZip = backup.getNeedZip().intValue() == 1 ? true : false;
boolean isCover = backup.getCover().intValue() == 1 ? true : false;
if(isCover && dirFile.exists()){
dirFile.delete();
}
if(cfg.isIncludeCreateDB()){
try {
writeFile(dirFile, "CREATE DATABASE `"+ database +"`;\n\n");
} catch (IOException e) {
logger.debug(e);
return UtilMisc.toMap("flag","failure","msg","备份发生错误");
}
}
if(cfg.isIncludeUseDB()){
try {
writeFile(dirFile, "USE `"+ database +"`;\n\n");
} catch (IOException e) {
logger.debug(e);
return UtilMisc.toMap("flag","failure","msg","备份发生错误");
}
}
//备份结构
if(target.equals(BackupConst.BACKUP_TARGET_STRUCTURE_ONLY)){
for (String table : tables) {
String structure = null;
try {
structure = getTableStructure(jdbcTemplate, table, cfg);
} catch (Exception e1) {
logger.debug(e1);
return UtilMisc.toMap("flag","failure","msg","获取表结构发生错误");
}
try {
writeFile(dirFile, structure);
} catch (IOException e) {
logger.debug(e);
return UtilMisc.toMap("flag","failure","msg","向文件写入表结构时发生错误");
}
}
}
//备份数据
else if(target.equals(BackupConst.BACKUP_TARGET_DATA_ONLY)){
for (String table : tables) {
try {
writeData(jdbcTemplate, table, cfg, dirFile);
} catch (IOException e) {
logger.debug(e);
return UtilMisc.toMap("flag","failure","msg","向文件写入表数据时发生错误");
}
}
}
//备份数据&结构
else if(target.equals(BackupConst.BACKUP_TARGET_STRUCTURE_AND_DATA)){
for (String table : tables) {
String structure = null;
try {
structure = getTableStructure(jdbcTemplate, table, cfg);
} catch (Exception e1) {
logger.debug(e1);
return UtilMisc.toMap("flag","failure","msg","获取表结构发生错误");
}
try {
writeFile(dirFile, structure);
} catch (IOException e) {
logger.debug(e);
return UtilMisc.toMap("flag","failure","msg","向文件写入表结构时发生错误");
}
try {
writeData(jdbcTemplate, table, cfg, dirFile);
} catch (IOException e) {
logger.debug(e);
return UtilMisc.toMap("flag","failure","msg","向文件写入表数据时发生错误");
}
}
}
}else{
return UtilMisc.toMap("flag","success");
}
return null;
}
/**
* 向文件中写入表数据
* @param jdbcTemplate
* @param table
* @param cfg
* @param dirFile
* @throws IOException
*/
private void writeData(JdbcTemplate jdbcTemplate, String table,
BackupConfigDTO cfg, File dirFile) throws IOException {
StringBuffer content = new StringBuffer();
boolean batchInsert = cfg.isBatchInsert();
boolean lockInsert = cfg.isLockInsert();
StringBuffer insertStrBuff = new StringBuffer("insert into `"+ table +"`(");
String sql = "DESCRIBE `"+ table +"`";
List<String> fields = new ArrayList<String>();
List<Map<String, Object>> result = jdbcTemplate.queryForList(sql);
for (Map<String, Object> map : result) {
fields.add(map.get("Field").toString());
}
for (String field : fields) {
insertStrBuff.append("`"+ field +"`,");
}
insertStrBuff = insertStrBuff.delete(insertStrBuff.length()-1, insertStrBuff.length());
insertStrBuff.append(")").append(" values ");
String insertStr = insertStrBuff.toString();
writeFile(dirFile, "/*Data for the table `"+ table +"` */\n\n");
if(fields.size() > 0){
//计算分次写入文件的写入次数
int rcdCount = jdbcTemplate.queryForObject("SELECT COUNT(*) FROM `"+ table +"`", Integer.class);
int cycleNum = 1;
if(rcdCount > 1000){
if(rcdCount%1000 == 0){
cycleNum = (int) Math.floor(rcdCount/1000);
}else{
cycleNum = (int) Math.floor(rcdCount/1000) + 1;
}
}
if(lockInsert){
writeFile(dirFile, "LOCK TABLES `"+ table +"` WRITE;\n\n");
}
if(batchInsert){
writeFile(dirFile, insertStr);
}
//多次写入
for (int i = 0; i < cycleNum; i++) {
content = new StringBuffer();
insertStrBuff = new StringBuffer(insertStr);
int recordStart = i*1000;
String sqlTemplate = "select * from "+ table +" limit "+ recordStart +",1000 ;";
List<Map<String, Object>> records = jdbcTemplate.queryForList(sqlTemplate);
StringBuffer valuesBuff = new StringBuffer();
StringBuffer rowValues = null;
//迭代查询结果
for (Map<String, Object> map : records) {
//迭代字段
rowValues = new StringBuffer("(");
for (String field : fields) {
Object value = map.get(field);
String fieldValue = null;
if(value instanceof byte[]){
fieldValue = getHexString(((byte[]) value));
}
if(!(value instanceof byte[]) && value != null){
fieldValue = "'"+StringUtils.replace(value.toString(), "'", "\'")+"'";
fieldValue = StringUtils.replace(fieldValue, "\"", "\\\"");
fieldValue = StringUtils.replace(fieldValue, "\n", "\\n");
fieldValue = StringUtils.replace(fieldValue, "\r", "\\r");
}
if(value == null){
fieldValue = "NULL";
}
rowValues.append(fieldValue).append(",");
}
rowValues = rowValues.delete(rowValues.length()-1,rowValues.length());
rowValues.append(")");
if(batchInsert){
valuesBuff.append(rowValues).append(",");
}else{
content.append(insertStr).append(rowValues).append(";\r\n");
}
}
if(batchInsert && valuesBuff != null){
if(cycleNum > 0 && i < cycleNum - 1){
content = valuesBuff;
}else{
content = valuesBuff.delete(valuesBuff.length()-1, valuesBuff.length());
}
}
writeFile(dirFile, content.toString());
}
if(batchInsert){
writeFile(dirFile, ";\n\n");
}else{
writeFile(dirFile, "\r\n");
}
if(lockInsert){
writeFile(dirFile, "UNLOCK TABLES;\n\n");
}
}
}
private String getHexString(byte[] bs) {
String result = "";
for (int i = 0; i < bs.length; i++) {
result += Integer.toString((bs[i] & 0xff) + 0x100, 16).substring(1);
}
return "0x" + result;
}
/**
* 获取表创建语句
* @param jdbcTemplate
* @param tableName
* @return
*/
public String getTableStructure(JdbcTemplate jdbcTemplate, String tableName, BackupConfigDTO config) throws Exception{
String structure = null;
String remark = "/*Table structure for table `"+ tableName +"` */";
String sql = "SHOW CREATE TABLE `"+ tableName +"`";
List<Map<String, Object>> result = jdbcTemplate.queryForList(sql);
for (Map<String, Object> map : result) {
structure = map.get("Create Table").toString();
}
StringBuffer strbuff = new StringBuffer();
strbuff.append(remark);
strbuff.append("\n\n");
if(config.isIncludeDel()){
strbuff.append("DROP TABLE IF EXISTS `"+ tableName +"`;");
strbuff.append("\n\n");
}
strbuff.append(structure);
strbuff.append(";\n\n");
return strbuff.toString();
}
/**
* 写文件
* @param destFile
* @param isZip
* @param isCover
* @param structure
* @throws IOException
*/
private void writeFile(File destFile, String structure) throws IOException {
if(!destFile.exists()){
destFile.createNewFile();
}
FileOutputStream fos = new FileOutputStream(destFile,true);
BufferedWriter out = new BufferedWriter(new OutputStreamWriter(fos, "utf8"));
out.write(structure);
out.flush();
fos.close();
out.close();
}
在解析表数据的过程中遇到一个问题,和大家分享一下。
就是mysql的longblob类型数据的解析问题。longblob类型的字段可以存储4G大小的数据,可以是文本也可以使图片。当字段类型是longblob型时,我解析到的数据是一个byte[]数组。当这个字段的值是文本的时候将它转为String写入文件是可以的,而当它的值是图片的时候问题就出来了,写入到目标文件中的sql语句是乱码。
这个时候怎么处理呢?请注意代码中的getHexString(byte[] bs)方法。我的处理方法是将这个字段的值转换为16进制的字符串。写入sql文件中的insert语句是这样:
insert into `act_ge_bytearray`(`ID_`,`REV_`,`NAME_`,`DEPLOYMENT_ID_`,`BYTES_`,`GENERATED_`) values ('37111','1','review.form---','37110',0x0d0a3c0a3c2f7461626c653e0d0a,'0');
这个语句在sqlyog中执行是正确的,longblob类型字段的值与原值是保持一致的。此外,这个语句与sqlyog导出的sql文件是有区别的。
上面的代码后来做了优化,修改了writeData这个方法,原因是它的效率太底下了,遇到超过10W条数据的表导致系统比较卡,系统资源没有得到充分的利用,后来做了修改,将其改为了多线程处理:遇到超过10W条数据的表就新开多个线程同时去查不同的数据,同时向文件中写sql,这样备份超过百万条数据的表也会相对的轻松一些。
其实java解析数据库的思路很明确,就是获取连接,查询表信息,写文件。其中查询表信息需要掌握几个mysql的常用sql语句。
1,select * from information_schema.'TABLES' where table_schema='databaseName'.这是查询数据库中表的详细信息,它其中包括表和视图。如果只查询数据库中所有表和视图名称的话用 show tables语句查询的效率更快,但是它并不能查询到表的其他信息。
2,查询触发器和事件是和上面的语句是相同的。information_schema.trigger/information_schema.event.
3,获取表,视图,触发器,事件(记不太清了)的创建语句:show create table 'tableName'/'viewName'/'triggerName'
4,SHOW { PROCEDURE | FUNCTION } STATUS,获取存储过程和函数。
5,DESCRIBE tableName.描述表的基本信息。