场景:一个sql需要关联4、5张表,查询出来的数据达到两百多万,这个时候,很容易造成cup100%使用率,慢查询,服务器内存溢出等问题。
解决方案:1:使用索引。2:把这个sql查询出来的数据,尽可能的按照某种条件分批次查询出来。
既然是分批次查询出来,那么就需要解决如何在原有文件的基础上,在末尾动态的追加数据
下边有一个工具可以使用。
1:工具类:缓冲区用的是:BufferedWriter
参数解释
List exportData:参数类型:List<LinkedHashMap<String, String>> exportData = new ArrayList<LinkedHashMap<String, String>>();存储的是要导出的数据集合
LinkedHashMap map, 表头,这个你看见工具类会发现是表头和数据对应的关键
String outPutPath, csv文件要导出的位置
String fileName,文件名字但不包含.csv,工具类里边有不用写
String exportName:文件名字,是用来判断生成是否生成过csv文件
在工具类中1.2是关键,true是关键中的关键,没有他是不会追加的,我用的是append方法进行末尾追加
public static String createCSVFileMore(
List exportData, LinkedHashMap map, String outPutPath, String fileName,String exportName) {
File csvFile = null;
BufferedWriter csvFileOutputStream = null;
String path = null;
try {
File file = new File(outPutPath);
if (!file.exists()) {
file.mkdirs();
}
List<String> ls=new ArrayList<String>();
//1:根据文件路径查询文件夹下是否含有文件
File[] listFiles = file.listFiles();
if(listFiles.length>0) {
//1.1:文件夹下含有文件,并获取所有文件的名字
for(int i=0;i<listFiles.length;i++) {
if(listFiles[i].isFile()) {
String nowName=listFiles[i].getName();
ls.add(nowName);
}
}
//1.2:判断参数exportName的文件是否在1.1的文件名集合中
boolean contains = ls.contains(exportName);
if(contains) {
//exportName存在
csvFile = new File(outPutPath,exportName);
path = csvFile.getCanonicalPath();
// System.out.println(csvFile.getName());
// System.out.println("csvFile:" + path);
// UTF-8使正确读取分隔符"," osw.write(new String(new byte[] { (byte) 0xEF, (byte) 0xBB,(byte) 0xBF }));
// 如果生产文件乱码,windows下用gbk,linux用UTF-8
csvFileOutputStream = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(csvFile, true), "UTF-8"), 1024);
csvFileOutputStream.append('\ufeff');
csvFileOutputStream.newLine();
// 写入文件内容
for (Iterator iterator = exportData.iterator(); iterator.hasNext();) {
Object row = (Object) iterator.next();
for (Iterator propertyIterator = map.entrySet().iterator(); propertyIterator.hasNext();) {
java.util.Map.Entry propertyEntry = (java.util.Map.Entry) propertyIterator.next();
// csvFileOutputStream.write((String) BeanUtils.getProperty(row, (String) propertyEntry.getValue() != null ? (String) propertyEntry.getValue() : ""));
String aas=BeanUtils.getProperty(row, (String) propertyEntry.getKey());
csvFileOutputStream.append(aas);
if (propertyIterator.hasNext()) {
Writer append = csvFileOutputStream.append(",");
}
}
if (iterator.hasNext()) {
csvFileOutputStream.newLine();
}
}
csvFileOutputStream.flush();
}else {
//exportName不存在,创建新的文件
csvFile = File.createTempFile(fileName, ".csv", new File(outPutPath));
path = csvFile.getCanonicalPath();
// System.out.println(csvFile.getName());
// System.out.println("csvFile:" + path);
// UTF-8使正确读取分隔符"," osw.write(new String(new byte[] { (byte) 0xEF, (byte) 0xBB,(byte) 0xBF }));
// 如果生产文件乱码,windows下用gbk,linux用UTF-8
csvFileOutputStream = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(csvFile), "UTF-8"), 1024);
csvFileOutputStream.write('\ufeff');
// 写入文件头部
for (Iterator propertyIterator = map.entrySet().iterator(); propertyIterator.hasNext();) {
java.util.Map.Entry propertyEntry = (java.util.Map.Entry) propertyIterator.next();
csvFileOutputStream.write((String) propertyEntry.getValue() != null ? (String) propertyEntry.getValue() : "");
if (propertyIterator.hasNext()) {
csvFileOutputStream.write(",");
}
}
csvFileOutputStream.newLine();
// 写入文件内容
for (Iterator iterator = exportData.iterator(); iterator.hasNext();) {
Object row = (Object) iterator.next();
for (Iterator propertyIterator = map.entrySet().iterator(); propertyIterator.hasNext();) {
java.util.Map.Entry propertyEntry = (java.util.Map.Entry) propertyIterator.next();
// csvFileOutputStream.write((String) BeanUtils.getProperty(row, (String) propertyEntry.getValue() != null ? (String) propertyEntry.getValue() : ""));
csvFileOutputStream.write((String) BeanUtils.getProperty(row, (String) propertyEntry.getKey()));
if (propertyIterator.hasNext()) {
csvFileOutputStream.write(",");
}
}
if (iterator.hasNext()) {
csvFileOutputStream.newLine();
}
}
csvFileOutputStream.flush();
}
//2:文件夹下不含有文件
}else {
csvFile = File.createTempFile(fileName, ".csv", new File(outPutPath));
path = csvFile.getCanonicalPath();
// System.out.println(csvFile.getName());
// System.out.println("csvFile:" + path);
// UTF-8使正确读取分隔符"," osw.write(new String(new byte[] { (byte) 0xEF, (byte) 0xBB,(byte) 0xBF }));
// 如果生产文件乱码,windows下用gbk,linux用UTF-8
csvFileOutputStream = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(csvFile), "UTF-8"), 1024);
csvFileOutputStream.write('\ufeff');
// 写入文件头部
for (Iterator propertyIterator = map.entrySet().iterator(); propertyIterator.hasNext();) {
java.util.Map.Entry propertyEntry = (java.util.Map.Entry) propertyIterator.next();
csvFileOutputStream.write((String) propertyEntry.getValue() != null ? (String) propertyEntry.getValue() : "");
if (propertyIterator.hasNext()) {
csvFileOutputStream.write(",");
}
}
csvFileOutputStream.newLine();
// 写入文件内容
for (Iterator iterator = exportData.iterator(); iterator.hasNext();) {
Object row = (Object) iterator.next();
for (Iterator propertyIterator = map.entrySet().iterator(); propertyIterator.hasNext();) {
java.util.Map.Entry propertyEntry = (java.util.Map.Entry) propertyIterator.next();
// csvFileOutputStream.write((String) BeanUtils.getProperty(row, (String) propertyEntry.getValue() != null ? (String) propertyEntry.getValue() : ""));
csvFileOutputStream.write((String) BeanUtils.getProperty(row, (String) propertyEntry.getKey()));
if (propertyIterator.hasNext()) {
csvFileOutputStream.write(",");
}
}
if (iterator.hasNext()) {
csvFileOutputStream.newLine();
}
}
csvFileOutputStream.flush();
}
} catch (Exception e) {
e.printStackTrace();
} finally {
try {
csvFileOutputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
return csvFile.getName();
}