## 大文件上传无非一下几个步骤
1、文件上传初始化
2、上传切片(需要前端对文件进行切片操作,并发请求切片上传)
3、文件合并
## 不多废话直接上代码,直接复制使用即可
依赖(我这里用的阿里云存储可以根据自己需要引入依赖)
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-s3</artifactId>
<version>1.12.261</version>
</dependency>
<!-- 阿里云存储-->
<dependency>
<groupId>com.aliyun.oss</groupId>
<artifactId>aliyun-sdk-oss</artifactId>
<version>3.10.2</version>
</dependency>
初始化AmazonS3
@Configuration
@RequiredArgsConstructor
@EnableConfigurationProperties(OssProperties.class)
public class OssAutoConfiguration {
@Bean
@ConditionalOnMissingBean
public AmazonS3 ossClient(OssProperties ossProperties) {
ClientConfiguration clientConfiguration = new ClientConfiguration();
clientConfiguration.setMaxConnections(ossProperties.getMaxConnections());
AwsClientBuilder.EndpointConfiguration endpointConfiguration = new AwsClientBuilder.EndpointConfiguration(
ossProperties.getEndpoint(), ossProperties.getRegion());
AWSCredentials awsCredentials = new BasicAWSCredentials(ossProperties.getAccessKey(),
ossProperties.getSecretKey());
AWSCredentialsProvider awsCredentialsProvider = new AWSStaticCredentialsProvider(awsCredentials);
return AmazonS3Client.builder().withEndpointConfiguration(endpointConfiguration)
.withClientConfiguration(clientConfiguration).withCredentials(awsCredentialsProvider)
.disableChunkedEncoding().withPathStyleAccessEnabled(ossProperties.getPathStyleAccess()).build();
}
}
ossProperties
/**
* Oss配置类
*/
@Data
@Configuration
@ConfigurationProperties(prefix = "oss")
public class OssProperties {
/**
* 对象存储服务endpoint
*/
private String endpoint;
/**
* 区域region
*/
private String region;
/**
* true path-style nginx 反向代理和S3默认支持 pathStyle模式 {http://endpoint/bucketname}
* false supports virtual-hosted-style 阿里云等需要配置为 virtual-hosted-style 模式{http://bucketname.endpoint}
* 只是url的显示不一样
*/
private Boolean pathStyleAccess = true;
/**
* Access key
*/
private String accessKey;
/**
* Secret key
*/
private String secretKey;
/**
* 最大线程数,默认: 100
*/
private Integer maxConnections = 1000;
/**
* 桶名
*/
private String bucketName;
/**
* oss统一域名
*/
private String ossUrl;
}
请求参数FileChunkInfo
@Data
public class FileChunkInfo implements Serializable {
private static final long serialVersionUID = 2353726406791217168L;
// 切片上传初始化时当前文件的唯一id
private String fileUploadId;
// 当前切片id
private Integer currentChunkNumber;
// 切块大小
private Integer chunkSize;
// 文件md5
private String fileMD5;
// 文件
private MultipartFile file;
// 总块数
private Integer totalChunks;
// 文件名
private String originalFileName;
// 文件上传路径
private String fileUploadPath;
}
ResultEntity
@Data
@AllArgsConstructor
@NoArgsConstructor
public class ResultEntity<T> {
private Integer code;
private String message;
private T data;
public static <T> ResultEntity<T> faill(Integer code, String msg, T t) {
return new ResultEntity<T>(code, msg, t);
}
public static <T> ResultEntity<T> faill(T t) {
return ResultEntity.faill(501, "failed", t);
}
public static <T> ResultEntity<T> success(Integer code, String msg, T t) {
return new ResultEntity<T>(code, msg, t);
}
public static <T> ResultEntity<T> success(T t) {
return ResultEntity.success(200, "success!", t);
}
public static <T> ResultEntity<T> success() {
return ResultEntity.success(200, "success!", null);
}
}
响应参数BigFileResp
@Data
public class BigFileResp implements Serializable {
private static final long serialVersionUID = 3679861506816410985L;
// 切片上传初始化时当前文件的唯一id
private String fileUploadId;
// 当前切片id
private Integer currentChunkNumber;
// 当前切片上传返回的etag值
private PartETag partETag;
// 已经上传的分片列表
private List<Integer> finishChunks;
// 文件上传路径
private String fileUploadPath;
// 文件md5
private String fileMD5;
// 文件路径
private String url;
// 文件名
private String fileName;
// 文件数据库id
private String ossId;
}
controller
/**
* 文件上传
*/
@RestController
@RequestMapping("file")
@RequiredArgsConstructor
public class UploadController {
private final AmazonS3 amazonS3;
private final OssProperties ossProperties;
private final RedisTemplate<String, String> redisTemplate;
/**
* 初始化分片上传
*
* @param fileInfo
* @return
*/
@PostMapping("/init")
public ResultEntity<BigFileResp> uploadInit(FileChunkInfo fileInfo) {
// 自己生成文件路径
String path = getPath(fileInfo);
// 初始化
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(ossProperties.getBucketName(), path);
InitiateMultipartUploadResult multipartUploadResult = amazonS3.initiateMultipartUpload(initRequest);
String uploadId = multipartUploadResult.getUploadId();
BigFileResp response = new BigFileResp();
response.setFileUploadId(uploadId);
response.setFileUploadPath(path);
return ResultEntity.success(response);
}
/**
* 上传分片
*
* @param fileInfo
* @return
* @throws Exception
*/
@PostMapping("/part")
public ResultEntity<BigFileResp> uploadPart(FileChunkInfo fileInfo) throws Exception {
UploadPartRequest request = new UploadPartRequest()
.withBucketName(ossProperties.getBucketName())
.withKey(fileInfo.getFileUploadPath())
.withUploadId(fileInfo.getFileUploadId())
.withPartNumber(fileInfo.getCurrentChunkNumber())
.withInputStream(fileInfo.getFile().getInputStream())
.withPartSize(fileInfo.getChunkSize());
// 上传切片
UploadPartResult uploadPartResult = amazonS3.uploadPart(request);
PartETag partETag = uploadPartResult.getPartETag();
String fileUploadId = fileInfo.getFileUploadId();
String etagString = JSONObject.toJSONString(partETag);
// 将已上传文件分片信息存入redis
redisTemplate.opsForHash().put(fileUploadId, fileInfo.getCurrentChunkNumber(), etagString);
BigFileResp response = new BigFileResp();
response.setFileUploadId(fileInfo.getFileUploadId());
response.setPartETag(partETag);
return ResultEntity.success(response);
}
/**
* 文件合并
*
* @param fileInfo
* @return
*/
@PostMapping("/merge")
public ResultEntity<BigFileResp> merge(FileChunkInfo fileInfo) {
// 获取上传的etag集合
Map<Object, Object> map = redisTemplate.opsForHash().entries(fileInfo.getFileUploadId());
List<PartETag> etagList = new ArrayList<>();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String value = (String) entry.getValue();
PartETag partETag = JSONObject.parseObject(value, PartETag.class);
etagList.add(partETag);
}
// 合并分片
CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(
ossProperties.getBucketName(),
fileInfo.getFileUploadPath(),
fileInfo.getFileUploadId(),
etagList);
amazonS3.completeMultipartUpload(request);
// 删除缓存
redisTemplate.delete(fileInfo.getFileUploadId());
StringBuilder url = new StringBuilder("");
url.append("https://").append(ossProperties.getOssUrl()).append("/").append(fileInfo.getFileUploadPath());
BigFileResp response = new BigFileResp();
response.setUrl(url.toString());
return ResultEntity.success(response);
}
private String getPath(FileChunkInfo fileInfo) {
String uuid = IdUtil.fastSimpleUUID();
String suffix = fileInfo.getOriginalFileName();
Date now = new Date();
String format = DateFormatUtils.format(now, "yyyy/MM/dd/");
String path = format + uuid + suffix;
return path + suffix;
}
}
## 写在最后
关于文件断点续传,因为文件分片上传时已经将已上传分片信息存入redis,出现网络问题续传时只需查出未上传的分片上传,然后合并文件即可。
关于秒传,只需将文件的MD5存入数据库,当文件上传时查询存在直接返回即可;
上述功能已自己实现即可!!希望对大家有所帮助!!!