记录一次AmazonS3大文件切片上传

本文详细介绍了如何使用阿里云OSS进行大文件上传,包括文件切片、并发请求、初始化、上传部分、合并以及断点续传和秒传的功能实现。并提供了相关的Java代码示例。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

## 大文件上传无非一下几个步骤

1、文件上传初始化

2、上传切片(需要前端对文件进行切片操作,并发请求切片上传)

3、文件合并

## 不多废话直接上代码,直接复制使用即可

依赖(我这里用的阿里云存储可以根据自己需要引入依赖)

        <dependency>
            <groupId>com.amazonaws</groupId>
            <artifactId>aws-java-sdk-s3</artifactId>
            <version>1.12.261</version>
        </dependency>

        <!--        阿里云存储-->
        <dependency>
            <groupId>com.aliyun.oss</groupId>
            <artifactId>aliyun-sdk-oss</artifactId>
            <version>3.10.2</version>
        </dependency>

初始化AmazonS3

@Configuration
@RequiredArgsConstructor
@EnableConfigurationProperties(OssProperties.class)
public class OssAutoConfiguration {

    @Bean
    @ConditionalOnMissingBean
    public AmazonS3 ossClient(OssProperties ossProperties) {
        ClientConfiguration clientConfiguration = new ClientConfiguration();
        clientConfiguration.setMaxConnections(ossProperties.getMaxConnections());
        AwsClientBuilder.EndpointConfiguration endpointConfiguration = new AwsClientBuilder.EndpointConfiguration(
                ossProperties.getEndpoint(), ossProperties.getRegion());
        AWSCredentials awsCredentials = new BasicAWSCredentials(ossProperties.getAccessKey(),
                ossProperties.getSecretKey());
        AWSCredentialsProvider awsCredentialsProvider = new AWSStaticCredentialsProvider(awsCredentials);
        return AmazonS3Client.builder().withEndpointConfiguration(endpointConfiguration)
                .withClientConfiguration(clientConfiguration).withCredentials(awsCredentialsProvider)
                .disableChunkedEncoding().withPathStyleAccessEnabled(ossProperties.getPathStyleAccess()).build();
    }
}

ossProperties

/**
 * Oss配置类
 */
@Data
@Configuration
@ConfigurationProperties(prefix = "oss")
public class OssProperties {
    /**
     * 对象存储服务endpoint
     */
    private String endpoint;

    /**
     * 区域region
     */
    private String region;

    /**
     * true path-style nginx 反向代理和S3默认支持 pathStyle模式 {http://endpoint/bucketname}
     * false supports virtual-hosted-style 阿里云等需要配置为 virtual-hosted-style 模式{http://bucketname.endpoint}
     * 只是url的显示不一样
     */
    private Boolean pathStyleAccess = true;

    /**
     * Access key
     */
    private String accessKey;

    /**
     * Secret key
     */
    private String secretKey;

    /**
     * 最大线程数,默认: 100
     */
    private Integer maxConnections = 1000;

    /**
     * 桶名
     */
    private String bucketName;

    /**
     * oss统一域名
     */
    private String ossUrl;
}

请求参数FileChunkInfo

@Data
public class FileChunkInfo implements Serializable {
    private static final long serialVersionUID = 2353726406791217168L;
    // 切片上传初始化时当前文件的唯一id
    private String fileUploadId;

    // 当前切片id
    private Integer currentChunkNumber;

    // 切块大小
    private Integer chunkSize;

    // 文件md5
    private String fileMD5;

    // 文件
    private MultipartFile file;

    // 总块数
    private Integer totalChunks;

    // 文件名
    private String originalFileName;

    // 文件上传路径
    private String fileUploadPath;
}

ResultEntity

@Data
@AllArgsConstructor
@NoArgsConstructor
public class ResultEntity<T> {
    private Integer code;

    private String message;

    private T data;

    public static <T> ResultEntity<T>  faill(Integer code, String msg, T t) {
        return new ResultEntity<T>(code, msg, t);
    }

    public static <T> ResultEntity<T> faill(T t) {
        return ResultEntity.faill(501, "failed", t);
    }

    public static <T> ResultEntity<T> success(Integer code, String msg, T t) {
        return new ResultEntity<T>(code, msg, t);
    }

    public static <T> ResultEntity<T> success(T t) {
        return ResultEntity.success(200, "success!", t);
    }

    public static <T> ResultEntity<T> success() {
        return ResultEntity.success(200, "success!", null);
    }
}

响应参数BigFileResp

@Data
public class BigFileResp implements Serializable {
    private static final long serialVersionUID = 3679861506816410985L;

    // 切片上传初始化时当前文件的唯一id
    private String fileUploadId;

    // 当前切片id
    private Integer currentChunkNumber;

    // 当前切片上传返回的etag值
    private PartETag partETag;

    // 已经上传的分片列表
    private List<Integer> finishChunks;

    // 文件上传路径
    private String fileUploadPath;

    // 文件md5
    private String fileMD5;

    // 文件路径
    private String url;

    // 文件名
    private String fileName;

    // 文件数据库id
    private String ossId;
}

controller

/**
 * 文件上传
 */
@RestController
@RequestMapping("file")
@RequiredArgsConstructor
public class UploadController {

    private final AmazonS3 amazonS3;

    private final OssProperties ossProperties;

    private final RedisTemplate<String, String> redisTemplate;

    /**
     * 初始化分片上传
     *
     * @param fileInfo
     * @return
     */
    @PostMapping("/init")
    public ResultEntity<BigFileResp> uploadInit(FileChunkInfo fileInfo) {
        // 自己生成文件路径
        String path = getPath(fileInfo);

        // 初始化
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(ossProperties.getBucketName(), path);
        InitiateMultipartUploadResult multipartUploadResult = amazonS3.initiateMultipartUpload(initRequest);
        String uploadId = multipartUploadResult.getUploadId();
        BigFileResp response = new BigFileResp();
        response.setFileUploadId(uploadId);
        response.setFileUploadPath(path);
        return ResultEntity.success(response);
    }

    /**
     * 上传分片
     *
     * @param fileInfo
     * @return
     * @throws Exception
     */
    @PostMapping("/part")
    public ResultEntity<BigFileResp> uploadPart(FileChunkInfo fileInfo) throws Exception {
        UploadPartRequest request = new UploadPartRequest()
                .withBucketName(ossProperties.getBucketName())
                .withKey(fileInfo.getFileUploadPath())
                .withUploadId(fileInfo.getFileUploadId())
                .withPartNumber(fileInfo.getCurrentChunkNumber())
                .withInputStream(fileInfo.getFile().getInputStream())
                .withPartSize(fileInfo.getChunkSize());

        // 上传切片
        UploadPartResult uploadPartResult = amazonS3.uploadPart(request);
        PartETag partETag = uploadPartResult.getPartETag();
        String fileUploadId = fileInfo.getFileUploadId();
        String etagString = JSONObject.toJSONString(partETag);

        // 将已上传文件分片信息存入redis
        redisTemplate.opsForHash().put(fileUploadId, fileInfo.getCurrentChunkNumber(), etagString);

        BigFileResp response = new BigFileResp();
        response.setFileUploadId(fileInfo.getFileUploadId());
        response.setPartETag(partETag);
        return ResultEntity.success(response);
    }


    /**
     * 文件合并
     *
     * @param fileInfo
     * @return
     */
    @PostMapping("/merge")
    public ResultEntity<BigFileResp> merge(FileChunkInfo fileInfo) {
        // 获取上传的etag集合
        Map<Object, Object> map = redisTemplate.opsForHash().entries(fileInfo.getFileUploadId());
        List<PartETag> etagList = new ArrayList<>();
        for (Map.Entry<Object, Object> entry : map.entrySet()) {
            String value = (String) entry.getValue();
            PartETag partETag = JSONObject.parseObject(value, PartETag.class);
            etagList.add(partETag);
        }

        // 合并分片
        CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(
                ossProperties.getBucketName(),
                fileInfo.getFileUploadPath(),
                fileInfo.getFileUploadId(),
                etagList);
        amazonS3.completeMultipartUpload(request);

        // 删除缓存
        redisTemplate.delete(fileInfo.getFileUploadId());

        StringBuilder url = new StringBuilder("");
        url.append("https://").append(ossProperties.getOssUrl()).append("/").append(fileInfo.getFileUploadPath());
        BigFileResp response = new BigFileResp();
        response.setUrl(url.toString());
        return ResultEntity.success(response);
    }

    private String getPath(FileChunkInfo fileInfo) {
        String uuid = IdUtil.fastSimpleUUID();
        String suffix = fileInfo.getOriginalFileName();
        Date now = new Date();
        String format = DateFormatUtils.format(now, "yyyy/MM/dd/");
        String path = format + uuid + suffix;
        return path + suffix;
    }
}

## 写在最后

关于文件断点续传,因为文件分片上传时已经将已上传分片信息存入redis,出现网络问题续传时只需查出未上传的分片上传,然后合并文件即可。

关于秒传,只需将文件的MD5存入数据库,当文件上传时查询存在直接返回即可;

上述功能已自己实现即可!!希望对大家有所帮助!!!

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值