基于SpringBoot實現(xiàn)大文件分塊上傳功能
1.分塊上傳使用場景
大文件加速上傳:當文件大小超過100MB時,使用分片上傳可實現(xiàn)并行上傳多個Part以加快上傳速度。
網(wǎng)絡環(huán)境較差:網(wǎng)絡環(huán)境較差時,建議使用分片上傳。當出現(xiàn)上傳失敗的時候,您僅需重傳失敗的Part。
文件大小不確定: 可以在需要上傳的文件大小還不確定的情況下開始上傳,這種場景在視頻 監(jiān)控等行業(yè)應用中比較常見。
2.實現(xiàn)原理
實現(xiàn)原理其實很簡單,核心就是客戶端把大文件按照一定規(guī)則進行拆分,比如20MB為一個小塊,分解成一個一個的文件塊,然后把這些文件塊單獨上傳到服務端,等到所有的文件塊都上傳完畢之后,客戶端再通知服務端進行文件合并的操作,合并完成之后整個任務結束。
3.代碼工程
實驗目的
實現(xiàn)大文件分塊上傳
pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>springboot-demo</artifactId>
<groupId>com.et</groupId>
<version>1.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>file</artifactId>
<properties>
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-autoconfigure</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpmime</artifactId>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
</dependency>
<dependency>
<groupId>cn.hutool</groupId>
<artifactId>hutool-core</artifactId>
<version>5.8.15</version>
</dependency>
</dependencies>
</project>
controller
package com.et.controller;
import com.et.bean.Chunk;
import com.et.bean.FileInfo;
import com.et.service.ChunkService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.io.Resource;
import org.springframework.http.HttpHeaders;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;
import java.util.List;
@RestController
@RequestMapping("file")
public class ChunkController {
@Autowired
private ChunkService chunkService;
/**
* upload by part
*
* @param chunk
* @return
*/
@PostMapping(value = "chunk")
public ResponseEntity<String> chunk(Chunk chunk) {
chunkService.chunk(chunk);
return ResponseEntity.ok("File Chunk Upload Success");
}
/**
* merge
*
* @param filename
* @return
*/
@GetMapping(value = "merge")
public ResponseEntity<Void> merge(@RequestParam("filename") String filename) {
chunkService.merge(filename);
return ResponseEntity.ok().build();
}
/**
* get fileName
*
* @return files
*/
@GetMapping("/files")
public ResponseEntity<List<FileInfo>> list() {
return ResponseEntity.ok(chunkService.list());
}
/**
* get single file
*
* @param filename
* @return file
*/
@GetMapping("/files/{filename:.+}")
public ResponseEntity<Resource> getFile(@PathVariable("filename") String filename) {
return ResponseEntity.ok().header(HttpHeaders.CONTENT_DISPOSITION,
"attachment; filename=\"" + filename + "\"").body(chunkService.getFile(filename));
}
}
config
package com.et.config;
import com.et.service.FileClient;
import com.et.service.impl.LocalFileSystemClient;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Supplier;
@Configuration
public class FileClientConfig {
@Value("${file.client.type:local-file}")
private String fileClientType;
private static final Map<String, Supplier<FileClient>> FILE_CLIENT_SUPPLY = new HashMap<String, Supplier<FileClient>>() {
{
put("local-file", LocalFileSystemClient::new);
// put("aws-s3", AWSFileClient::new);
}
};
/**
* get client
*
* @return
*/
@Bean
public FileClient fileClient() {
return FILE_CLIENT_SUPPLY.get(fileClientType).get();
}
}
service
package com.et.service;
import com.et.bean.Chunk;
import com.et.bean.FileInfo;
import org.springframework.core.io.Resource;
import java.util.List;
public interface ChunkService {
void chunk(Chunk chunk);
void merge(String filename);
List<FileInfo> list();
Resource getFile(String filename);
}
package com.et.service.impl;
import com.et.bean.Chunk;
import com.et.bean.ChunkProcess;
import com.et.bean.FileInfo;
import com.et.service.ChunkService;
import com.et.service.FileClient;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.io.Resource;
import org.springframework.stereotype.Service;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicBoolean;
@Service
@Slf4j
public class ChunkServiceImpl implements ChunkService {
// process
private static final Map<String, ChunkProcess> CHUNK_PROCESS_STORAGE = new ConcurrentHashMap<>();
// file list
private static final List<FileInfo> FILE_STORAGE = new CopyOnWriteArrayList<>();
@Autowired
private FileClient fileClient;
@Override
public void chunk(Chunk chunk) {
String filename = chunk.getFilename();
boolean match = FILE_STORAGE.stream().anyMatch(fileInfo -> fileInfo.getFileName().equals(filename));
if (match) {
throw new RuntimeException("File [ " + filename + " ] already exist");
}
ChunkProcess chunkProcess;
String uploadId;
if (CHUNK_PROCESS_STORAGE.containsKey(filename)) {
chunkProcess = CHUNK_PROCESS_STORAGE.get(filename);
uploadId = chunkProcess.getUploadId();
AtomicBoolean isUploaded = new AtomicBoolean(false);
Optional.ofNullable(chunkProcess.getChunkList()).ifPresent(chunkPartList ->
isUploaded.set(chunkPartList.stream().anyMatch(chunkPart -> chunkPart.getChunkNumber() == chunk.getChunkNumber())));
if (isUploaded.get()) {
log.info("file【{}】chunk【{}】upload,jump", chunk.getFilename(), chunk.getChunkNumber());
return;
}
} else {
uploadId = fileClient.initTask(filename);
chunkProcess = new ChunkProcess().setFilename(filename).setUploadId(uploadId);
CHUNK_PROCESS_STORAGE.put(filename, chunkProcess);
}
List<ChunkProcess.ChunkPart> chunkList = chunkProcess.getChunkList();
String chunkId = fileClient.chunk(chunk, uploadId);
chunkList.add(new ChunkProcess.ChunkPart(chunkId, chunk.getChunkNumber()));
CHUNK_PROCESS_STORAGE.put(filename, chunkProcess.setChunkList(chunkList));
}
@Override
public void merge(String filename) {
ChunkProcess chunkProcess = CHUNK_PROCESS_STORAGE.get(filename);
fileClient.merge(chunkProcess);
SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String currentTime = simpleDateFormat.format(new Date());
FILE_STORAGE.add(new FileInfo().setUploadTime(currentTime).setFileName(filename));
CHUNK_PROCESS_STORAGE.remove(filename);
}
@Override
public List<FileInfo> list() {
return FILE_STORAGE;
}
@Override
public Resource getFile(String filename) {
return fileClient.getFile(filename);
}
}
package com.et.service.impl;
import com.et.bean.FileInfo;
import com.et.service.FileUploadService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.io.FileSystemResource;
import org.springframework.core.io.Resource;
import org.springframework.stereotype.Service;
import org.springframework.util.FileCopyUtils;
import org.springframework.web.multipart.MultipartFile;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
@Service
@Slf4j
public class FileUploadServiceImpl implements FileUploadService {
@Value("${upload.path:/data/upload/}")
private String filePath;
private static final List<FileInfo> FILE_STORAGE = new CopyOnWriteArrayList<>();
@Override
public void upload(MultipartFile[] files) {
SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
for (MultipartFile file : files) {
String fileName = file.getOriginalFilename();
boolean match = FILE_STORAGE.stream().anyMatch(fileInfo -> fileInfo.getFileName().equals(fileName));
if (match) {
throw new RuntimeException("File [ " + fileName + " ] already exist");
}
String currentTime = simpleDateFormat.format(new Date());
try (InputStream in = file.getInputStream();
OutputStream out = Files.newOutputStream(Paths.get(filePath + fileName))) {
FileCopyUtils.copy(in, out);
} catch (IOException e) {
log.error("File [{}] upload failed", fileName, e);
throw new RuntimeException(e);
}
FileInfo fileInfo = new FileInfo().setFileName(fileName).setUploadTime(currentTime);
FILE_STORAGE.add(fileInfo);
}
}
@Override
public List<FileInfo> list() {
return FILE_STORAGE;
}
@Override
public Resource getFile(String fileName) {
FILE_STORAGE.stream()
.filter(info -> info.getFileName().equals(fileName))
.findFirst()
.orElseThrow(() -> new RuntimeException("File [ " + fileName + " ] not exist"));
File file = new File(filePath + fileName);
return new FileSystemResource(file);
}
}
以上只是一些關鍵代碼,所有代碼請參見下面代碼倉庫
代碼倉庫
4.測試
啟動Sprint Boot應用
編寫測試類
@Test
public void testUpload() throws Exception {
String chunkFileFolder = "D:/tmp/";
java.io.File file = new java.io.File("D:/SoftWare/oss-browser-win32-ia32.zip");
long contentLength = file.length();
// partSize:20MB
long partSize = 20 * 1024 * 1024;
// the last partSize may less 20MB
long chunkFileNum = (long) Math.ceil(contentLength * 1.0 / partSize);
RestTemplate restTemplate = new RestTemplate();
try (RandomAccessFile raf_read = new RandomAccessFile(file, "r")) {
// buffer
byte[] b = new byte[1024];
for (int i = 1; i <= chunkFileNum; i++) {
// chunk
java.io.File chunkFile = new java.io.File(chunkFileFolder + i);
// write
try (RandomAccessFile raf_write = new RandomAccessFile(chunkFile, "rw")) {
int len;
while ((len = raf_read.read(b)) != -1) {
raf_write.write(b, 0, len);
if (chunkFile.length() >= partSize) {
break;
}
}
// upload
MultiValueMap<String, Object> body = new LinkedMultiValueMap<>();
body.add("file", new FileSystemResource(chunkFile));
body.add("chunkNumber", i);
body.add("chunkSize", partSize);
body.add("currentChunkSize", chunkFile.length());
body.add("totalSize", contentLength);
body.add("filename", file.getName());
body.add("totalChunks", chunkFileNum);
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.MULTIPART_FORM_DATA);
HttpEntity<MultiValueMap<String, Object>> requestEntity = new HttpEntity<>(body, headers);
String serverUrl = "http://localhost:8080/file/chunk";
ResponseEntity<String> response = restTemplate.postForEntity(serverUrl, requestEntity, String.class);
System.out.println("Response code: " + response.getStatusCode() + " Response body: " + response.getBody());
} finally {
FileUtil.del(chunkFile);
}
}
}
// merge file
String mergeUrl = "http://localhost:8080/file/merge?filename=" + file.getName();
ResponseEntity<String> response = restTemplate.getForEntity(mergeUrl, String.class);
System.out.println("Response code: " + response.getStatusCode() + " Response body: " + response.getBody());
}
運行測試類,日志如下

以上就是基于SpringBoot實現(xiàn)大文件分塊上傳功能的詳細內容,更多關于SpringBoot大文件分塊上傳的資料請關注腳本之家其它相關文章!
相關文章
Springboot自動配置原理及DataSource的應用方式
這篇文章主要介紹了Springboot自動配置原理及DataSource的應用方式,具有很好的參考價值,希望對大家有所幫助,如有錯誤或未考慮完全的地方,望不吝賜教2024-07-07
Win10系統(tǒng)下配置Java環(huán)境變量
今天給大家?guī)淼氖顷P于Java的相關知識,文章圍繞著Win10系統(tǒng)下配置Java環(huán)境變量展開,文中有非常詳細的介紹及圖文示例,需要的朋友可以參考下2021-06-06
使用IDEA創(chuàng)建Java Web項目并部署訪問的圖文教程
本文通過圖文并茂的形式給大家介紹了使用IDEA創(chuàng)建Java Web項目并部署訪問的教程,非常不錯,具有一定的參考借鑒價值,需要的朋友可以參考下2018-08-08

