给大家分享一下vue3结合本地存储的思路进行断点续传的实现,以下是前端和java后端代码
前端代码:
<template>
<div class="upload-container">
<input type="file" @change="handleFileChange" />
<el-progress :percentage="uploadProgress" v-if="uploadProgress > 0"></el-progress>
<el-button @click="handleUpload">开始上传</el-button>
<el-button @click="pauseUpload">暂停上传</el-button>
<el-button @click="resumeUpload">继续上传</el-button>
</div>
</template>
<script setup>
import { ref } from "vue";
import SparkMD5 from "spark-md5";
const CHUNK_SIZE = 1 * 1024; // 2MB 切片大小
const file = ref(null);
const uploadProgress = ref(0);
const uploading = ref(false);
const paused = ref(false);
const chunks = ref([]);
const fileHash = ref("");
// 处理文件选择
const handleFileChange = (e) => {
file.value = e.target.files[0];
if (file.value) {
calculateHash();
}
};
// 计算文件hash
const calculateHash = async () => {
const spark = new SparkMD5.ArrayBuffer();
const reader = new FileReader();
reader.onload = (e) => {
spark.append(e.target.result);
fileHash.value = spark.end();
createFileChunks();
};
reader.readAsArrayBuffer(file.value);
};
// 创建文件切片
const createFileChunks = () => {
const chunkList = [];
let start = 0;
while (start < file.value.size) {
const chunk = file.value.slice(start, start + CHUNK_SIZE);
chunkList.push({
chunk,
index: chunkList.length,
hash: `${fileHash.value}-${chunkList.length}`,
});
start += CHUNK_SIZE;
}
chunks.value = chunkList;
};
// 上传切片
const uploadChunk = async (chunk) => {
const formData = new FormData();
formData.append("chunk", chunk.chunk);
formData.append("hash", chunk.hash);
formData.append("fileHash", fileHash.value);
formData.append("filename", file.value.name);
try {
await fetch("/api/upload/chunk", {
method: "POST",
body: formData,
});
// 保存上传进度
localStorage.setItem(
fileHash.value,
JSON.stringify({
uploadedChunks: chunk.index + 1,
filename: file.value.name,
})
);
updateProgress(chunk.index + 1);
} catch (error) {
console.error("上传失败:", error);
}
};
// 更新进度条
const updateProgress = (uploadedChunks) => {
uploadProgress.value = Math.round((uploadedChunks / chunks.value.length) * 100);
};
// 开始上传
const handleUpload = async () => {
if (!file.value) return;
uploading.value = true;
paused.value = false;
// 获取已上传的进度
const uploadInfo = localStorage.getItem(fileHash.value);
let startIndex = 0;
if (uploadInfo) {
const { uploadedChunks } = JSON.parse(uploadInfo);
startIndex = uploadedChunks;
updateProgress(startIndex);
}
// 上传剩余切片
for (let i = startIndex; i < chunks.value.length; i++) {
if (paused.value) break;
await uploadChunk(chunks.value[i]);
}
if (!paused.value) {
// 通知服务器所有切片上传完成
await fetch("/api/upload/merge", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
fileHash: fileHash.value,
filename: file.value.name,
}),
});
localStorage.removeItem(fileHash.value);
uploading.value = false;
}
};
// 暂停上传
const pauseUpload = () => {
paused.value = true;
uploading.value = false;
};
// 继续上传
const resumeUpload = () => {
handleUpload();
};
</script>
<style scoped>
.upload-container {
padding: 20px;
}
</style>
下面是java代码
@RestController
@RequestMapping("/api/upload")
public class UploadController {
private static final String UPLOAD_DIR = "uploads";
private static final String TEMP_DIR = "uploads_temp";
@GetMapping("/check")
public ResponseEntity<?> checkChunk(@RequestParam String fileHash) {
try {
File chunkDir = new File(TEMP_DIR, fileHash);
if (!chunkDir.exists()) {
return ResponseEntity.ok(Map.of("uploaded", new ArrayList<>()));
}
String[] uploadedChunks = chunkDir.list();
return ResponseEntity.ok(Map.of("uploaded", uploadedChunks != null ? Arrays.asList(uploadedChunks) : new ArrayList<>()));
} catch (Exception e) {
return ResponseEntity.status(500).body(Map.of("error", e.getMessage()));
}
}
@PostMapping("/chunk")
public ResponseEntity<?> uploadChunk(
@RequestParam("chunk") MultipartFile chunk,
@RequestParam("hash") String hash,
@RequestParam("fileHash") String fileHash,
@RequestParam("filename") String filename) {
try {
// 创建文件夹
File chunkDir = new File(TEMP_DIR, fileHash);
if (!chunkDir.exists()) {
chunkDir.mkdirs();
}
// 保存切片文件
File destFile = new File(chunkDir, hash);
chunk.transferTo(destFile);
return ResponseEntity.ok(Map.of("message", "chunk uploaded"));
} catch (Exception e) {
return ResponseEntity.status(500).body(Map.of("error", e.getMessage()));
}
}
@PostMapping("/merge")
public ResponseEntity<?> mergeChunks(@RequestBody Map<String, String> params) {
String filename = params.get("filename");
String fileHash = params.get("fileHash");
try {
File chunkDir = new File(TEMP_DIR, fileHash);
if (!chunkDir.exists()) {
return ResponseEntity.badRequest().body(Map.of("error", "No chunks found"));
}
File[] chunks = chunkDir.listFiles();
if (chunks == null || chunks.length == 0) {
return ResponseEntity.badRequest().body(Map.of("error", "No chunks found"));
}
// 按照切片索引排序
Arrays.sort(chunks, (f1, f2) -> {
String i1 = f1.getName().split("-")[1];
String i2 = f2.getName().split("-")[1];
return Integer.parseInt(i1) - Integer.parseInt(i2);
});
// 创建最终文件
File uploadDir = new File(UPLOAD_DIR);
if (!uploadDir.exists()) {
uploadDir.mkdirs();
}
File destFile = new File(uploadDir, filename);
// 合并文件
try (FileChannel outChannel = new FileOutputStream(destFile).getChannel()) {
for (File chunk : chunks) {
try (FileChannel inChannel = new FileInputStream(chunk).getChannel()) {
inChannel.transferTo(0, inChannel.size(), outChannel);
}
}
}
// 清理临时文件
for (File chunk : chunks) {
chunk.delete();
}
chunkDir.delete();
return ResponseEntity.ok(Map.of("message", "file merged"));
} catch (Exception e) {
return ResponseEntity.status(500).body(Map.of("error", e.getMessage()));
}
}
}
以上就是功能实现