本案例用springboot实现了大文件分片上传、上传完成后自动将文件合成为大文件的相关功能
前后端代码以及sql都在下面贴出来了
前端代码
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>分片上传</title>
<script src="https://cdn.bootcdn.net/ajax/libs/spark-md5/3.0.2/spark-md5.min.js"></script>
</head>
<body>
分片上传
<form enctype="multipart/form-data">
<input type="file" name="fileInput" id="fileInput">
<input type="button" value="计算文件MD5" onclick="calculateFileMD5()">
<input type="button" value="上传" onclick="uploadFile()">
<input type="button" value="检测文件完整性" onclick="checkFile()">
</form>
<p>
文件MD5:
<span id="fileMd5"></span>
</p>
<p>
上传结果:
<span id="uploadResult"></span>
</p>
<p>
检测文件完整性:
<span id="checkFileRes"></span>
</p>
<script>
//每片的大小
var chunkSize = 1 * 1024 * 1024;// 1兆
var uploadResult = document.getElementById("uploadResult")
var fileMd5Span = document.getElementById("fileMd5")
var checkFileRes = document.getElementById("checkFileRes")
var fileMd5;
function calculateFileMD5(){
var fileInput = document.getElementById('fileInput');
var file = fileInput.files[0];
getFileMd5(file).then((md5) => {
console.info(md5)
fileMd5=md5;
fileMd5Span.innerHTML=md5;
})
}
function uploadFile() {
var fileInput = document.getElementById('fileInput');
var file = fileInput.files[0];
console.log(file);
if (!file) return;
// if (!fileMd5) return;
//获取到文件
let fileArr = this.sliceFile(file);
//保存文件名称
let fileName = file.name;
fileArr.forEach((e, i) => {
//创建formdata对象
let data = new FormData();
data.append("fileName", fileName)
data.append("chunks", fileArr.length)
data.append("chunkSize", chunkSize)
data.append("chunk", i)
data.append("md5", fileMd5)
data.append("file", new File([e],fileName));
upload(data);
})
}
/**
* 计算文件md5值
*/
function getFileMd5(file) {
return new Promise((resolve, reject) => {
let fileReader = new FileReader()
fileReader.onload = function (event) {
let fileMd5 = SparkMD5.ArrayBuffer.hash(event.target.result)
resolve(fileMd5)
}
fileReader.readAsArrayBuffer(file)
})
}
function upload(data) {
var xhr = new XMLHttpRequest();
// 当上传完成时调用
xhr.onload = function () {
if (xhr.status === 200) {
uploadResult.append( '上传成功分片:' +data.get("chunkNumber")+'\t' ) ;
}
}
xhr.onerror = function () {
uploadResult.innerHTML = '上传失败';
}
// 发送请求
xhr.open('POST', 'http://127.0.0.1:9088/sliceUpload', true);
xhr.send(data);
}
function checkFile() {
var xhr = new XMLHttpRequest();
// 当上传完成时调用
xhr.onload = function () {
if (xhr.status === 200) {
checkFileRes.innerHTML = '检测文件完整性成功:' + xhr.responseText;
}
}
xhr.onerror = function () {
checkFileRes.innerHTML = '检测文件完整性失败';
}
// 发送请求
xhr.open('POST', 'http://127.0.0.1:9088/checkFile', true);
let data = new FormData();
data.append("md5", fileMd5)
xhr.send(data);
}
function sliceFile(file) {
const chunks = [];
let start = 0;
let end;
while (start < file.size) {
end = Math.min(start + chunkSize, file.size);
chunks.push(file.slice(start, end));
start = end;
}
return chunks;
}
</script>
</body>
</html>
相关数据库表
CREATE TABLE `gz_upload_file` (
`id` bigint(20) NOT NULL,
`state` int(1) DEFAULT '0' COMMENT '上传状态:0-上传未完成;1-已上传;2-已上传ftp;',
`file_name` varchar(50) DEFAULT NULL COMMENT '文件名称',
`file_path` varchar(255) DEFAULT NULL COMMENT '上传本地路径',
`chunks` int(10) DEFAULT NULL COMMENT '分片总数',
`chunk_size` bigint(20) DEFAULT NULL COMMENT '分片大小',
`md5` varchar(50) DEFAULT NULL COMMENT 'MD ',
`created_date` datetime DEFAULT NULL,
`deleted` varchar(1) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='文件上传任务表';
//
CREATE TABLE `gz_file_slice` (
`id` bigint(20) DEFAULT NULL,
`md5` varchar(50) DEFAULT NULL COMMENT '文件md5值',
`state` int(1) DEFAULT NULL COMMENT '分片上传结果:0-未完成;1-已完成;',
`chunk` int(10) DEFAULT NULL COMMENT '分片序号',
`size` bigint(20) DEFAULT NULL COMMENT '当前分片的大小',
`bytes` longblob COMMENT '分片文件字节数组',
`created_date` datetime DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='文件分片存储表';
java代码
这里实现了两种将分片合成为目标文件的方式
- 本地存储
- 将文件上传到ftp服务器
Maven依赖包
<!-- https://mvnrepository.com/artifact/commons-net/commons-net -->
<dependency>
<groupId>commons-net</groupId>
<artifactId>commons-net</artifactId>
<version>3.9.0</version>
</dependency>
package com.za.edu.service;
import cn.hutool.core.util.CharsetUtil;
import com.alibaba.fastjson.JSON;
import com.google.common.collect.Lists;
import com.sun.jndi.toolkit.url.UrlUtil;
import com.za.edu.base.BaseBean;
import com.za.edu.bean.FileSlice;
import com.za.edu.bean.UploadFile;
import com.za.edu.dto.FileUploadDTO;
import com.za.edu.enums.UploadStatus;
import com.za.edu.exception.BusinessException;
import com.za.edu.mapper.FileSliceMapper;
import com.za.edu.mapper.UploadFileMapper;
import com.za.edu.sandbox.TenantContext;
import com.za.edu.utils.ExceptionUtil;
import com.za.edu.utils.FileUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.net.ftp.FTP;
import org.apache.commons.net.ftp.FTPClient;
import org.apache.commons.net.ftp.FTPFile;
import org.springframework.beans.BeanUtils;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import javax.annotation.Resource;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.charset.StandardCharsets;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
@Slf4j
@Service
public class FileUploadService {
// 全局变量来记录已上传的分片序号
private static final ConcurrentHashMap<String, Integer> uploadChunksMap = new ConcurrentHashMap<>();
@Resource
private UploadFileMapper uploadFileMapper;
@Resource
private FileSliceMapper fileSliceMapper;
@Resource
FTPPoolService ftpPoolService;
@Transactional
public void sliceUpload(FileUploadDTO uploadDTO) throws IOException {
ExceptionUtil.isNull(uploadDTO.getFile(), "文件不能为空");
if(uploadDTO.getChunk() == 0){
//保存文件上传任务
UploadFile uploadFile = uploadFileMapper.queryByMd5(uploadDTO.getMd5());
if(uploadFile == null){
uploadFile = new UploadFile();
BeanUtils.copyProperties(uploadDTO, uploadFile);
uploadFile.setId(UploadFile.idGenerator.nextId());
uploadFile.setFileName(uploadDTO.getFileName());
uploadFile.setFilePath(uploadDTO.getFilePath());
uploadFileMapper.add(uploadFile);
}
}
//保存分片
FileSlice fileSlice = new FileSlice();
fileSlice.setId(UploadFile.idGenerator.nextId());
fileSlice.setMd5(uploadDTO.getMd5());
fileSlice.setState(BaseBean.TRUE);
fileSlice.setSize(uploadDTO.getSize());
fileSlice.setChunk(uploadDTO.getChunk());
fileSlice.setBytes(uploadDTO.getFile().getBytes());
fileSliceMapper.add(fileSlice);
//线程变量中保存已上传的分片序号
uploadChunksMap.put(uploadDTO.getMd5()+"_"+fileSlice.getChunk(), fileSlice.getChunk());
//当完成了90%的分片上传后,开启是否完成上传任务的判断
if(fileSlice.getChunk() > (uploadDTO.getChunks()*0.9)){
synchronized (uploadDTO.getMd5()){
List<String> chunks = Lists.newArrayList(uploadChunksMap.keySet());
System.out.println("当前已完成了"+chunks.size()+"分片上传");
System.out.println("当前已完成了"+chunks.size()+"分片上传");
if(chunks.size() == uploadDTO.getChunks()){
System.out.println("共计"+chunks.size()+"分片,已上传完成");
new Thread(() -> {
List<FileSlice> fileSlices = fileSliceMapper.queryByMd5(uploadDTO.getMd5());
/** 将文件写入本地 **/
File mergeFile = new File("G:\\Downloads\\"+uploadDTO.getFileName());
try(RandomAccessFile randomAccessFileWriter = new RandomAccessFile(mergeFile, "rw")){
for (FileSlice chunkFile : fileSlices) {
//单个字节的写入
// for(byte b : chunkFile.getBytes()){
// randomAccessFileWriter.write(b);
// }
//批量字节的写入
randomAccessFileWriter.write(chunkFile.getBytes(), 0, chunkFile.getBytes().length);
}
}catch (Exception err){
log.error("", err);
}
/** 将文件写入ftp **/
// FTPClient ftpClient = ftpPoolService.borrowObject();
// String fName = uploadDTO.getFileName();
// try {
// final String ffName = new String(fName.getBytes("GBK"), StandardCharsets.UTF_8);
// // 设置PassiveMode传输
// ftpClient.enterLocalPassiveMode();
// // 设置以二进制流的方式传输
// ftpClient.setFileType(FTP.BINARY_FILE_TYPE);
// ftpClient.setControlEncoding(CharsetUtil.GBK);
// // 检查远程是否存在文件
// String uploadPath = "ftp";
// //切换到工作目录
// if (!ftpClient.changeWorkingDirectory(uploadPath)) {
// ftpClient.makeDirectory(uploadPath);
// ftpClient.changeWorkingDirectory(uploadPath);
// }
// for (FileSlice chunkFile : fileSlices) {
// System.out.println("==============-----------------------------==================");
// if(chunkFile.getChunk() == 0){
// ftpClient.storeFile(ffName, new ByteArrayInputStream(chunkFile.getBytes(), 0, chunkFile.getBytes().length));
// Thread.sleep(200);
// }else{
// ftpClient.appendFile(ffName, new ByteArrayInputStream(chunkFile.getBytes(), 0, chunkFile.getBytes().length));
// Thread.sleep(200);
// }
// System.out.println("已完成了向FTP服务器上传第"+chunkFile.getChunk()+"分片数据");
// }
//
// }catch (Exception err){
// log.error("", err);
// throw new BusinessException(err.getMessage());
// }
//清空
fileSliceMapper.deleteByMd5(uploadDTO.getMd5());
}).start();
}
}
}
}
}