解锁Windows 10安卓生态:无需升级的跨平台革命
2026/4/20 16:50:18
作为内蒙古IT行业软件公司项目负责人,针对公司产品部门的文件传输需求,我提出以下专业解决方案:
localStorage+Redis双缓存(支持IE8/刷新/关闭后恢复)vue-uploader(兼容Vue2/Vue3)+spark-md5(文件哈希计算)localStorage+Redis双缓存(兼容IE8)webkitdirectory属性),生成层级元数据import SparkMD5 from 'spark-md5'; import { uploadChunk, checkResume, mergeChunks } from '@/api/fileUpload'; export default { data() { return { fileList: [], uploading: false, progress: 0, status: '', chunkSize: 1024 * 1024 * 1024, // 1G/片(可配置) redisClient: null // 连接Redis(用于断点续传) }; }, mounted() { // 初始化Redis连接(生产环境通过配置中心获取) this.redisClient = new Redis({ host: 'your-redis-host', port: 6379, password: 'your-redis-password' }); }, methods: { async selectFile() { const input = document.createElement('input'); input.type = 'file'; input.webkitdirectory = true; // 支持文件夹上传(Chrome/Firefox) input.multiple = true; input.addEventListener('change', async (e) => { const files = Array.from(e.target.files); this.fileList = files.map(file => ({ name: file.name, size: file.size, status: 'pending', hash: await this.calculateFileHash(file) })); this.startUpload(); }); input.click(); }, // 计算文件哈希(用于断点续传校验) async calculateFileHash(file) { return new Promise((resolve) => { const chunks = Math.ceil(file.size / this.chunkSize); const spark = new SparkMD5.ArrayBuffer(); const reader = new FileReader(); let currentChunk = 0; reader.onload = (e) => { spark.append(e.target.result); currentChunk++; if (currentChunk < chunks) { loadNext(); } else { resolve(spark.end()); } }; const loadNext = () => { const start = currentChunk * this.chunkSize; const end = Math.min(start + this.chunkSize, file.size); reader.readAsArrayBuffer(file.slice(start, end)); }; loadNext(); }); }, // 开始上传(支持断点续传) async startUpload() { this.uploading = true; this.status = ''; for (const file of this.fileList) { const hash = file.hash; const res = await checkResume(hash); // 查询Redis进度 if (res.progress > 0) { this.fileList = this.fileList.map(f => f.hash === hash ? {...f, status: 'resuming'} : f ); await this.resumeUpload(file, res.progress); } else { await this.uploadFile(file); } } this.uploading = false; this.status = 'success'; }, // 检查断点续传状态 async checkResume(hash) { const progress = await this.redisClient.get(`upload:${hash}:progress`); return { progress: progress ? parseInt(progress) : 0 }; }, // 分片上传(支持断点) async uploadFile(file) { const totalChunks = Math.ceil(file.size / this.chunkSize); const hash = file.hash; for (let i = 0; i < totalChunks; i++) { const start = i * this.chunkSize; const end = Math.min(start + this.chunkSize, file.size); const chunk = file.slice(start, end); const formData = new FormData(); formData.append('file', chunk); formData.append('hash', hash); formData.append('chunk', i); formData.append('total', totalChunks); try { await uploadChunk(formData); // 调用后端分片上传接口 const progress = Math.round(((i + 1) / totalChunks) * 100); this.fileList = this.fileList.map(f => f.hash === hash ? {...f, status: 'uploading', progress} : f ); await this.redisClient.set(`upload:${hash}:progress`, progress); // 更新Redis进度 } catch (err) { this.fileList = this.fileList.map(f => f.hash === hash ? {...f, status: 'failed'} : f ); throw new Error(`上传失败:${err.message}`); } } // 合并分片(后端自动触发) await mergeChunks(hash, totalChunks); this.fileList = this.fileList.map(f => f.hash === hash ? {...f, status: 'success'} : f ); await this.redisClient.del(`upload:${hash}:progress`); // 清除进度缓存 }, // 格式化文件大小 formatSize(size) { if (size >= 1024 * 1024 * 1024) { return `${(size / (1024 * 1024 * 1024)).toFixed(2)}GB`; } else if (size >= 1024 * 1024) { return `${(size / (1024 * 1024)).toFixed(2)}MB`; } return `${(size / 1024).toFixed(2)}KB`; } } };webkitdirectory属性获取文件夹结构,后端递归创建目录(支持Linux/Windows路径)XMLHttpRequest替代fetch,降级处理分片上传(单文件≤2G)localStorage+Redis双缓存记录进度,浏览器重启后可恢复<%@ page import="com.panshi.util.OSSUtil" %> <%@ page import="org.springframework.data.redis.core.RedisTemplate" %> <% response.setContentType("application/json;charset=UTF-8"); String action = request.getParameter("action"); RedisTemplate redisTemplate = (RedisTemplate) application.getAttribute("redisTemplate"); if ("uploadChunk".equals(action)) { MultipartHttpServletRequest multipartRequest = (MultipartHttpServletRequest) request; MultipartFile chunk = multipartRequest.getFile("file"); String hash = multipartRequest.getParameter("hash"); int chunkNumber = Integer.parseInt(multipartRequest.getParameter("chunk")); int totalChunks = Integer.parseInt(multipartRequest.getParameter("total")); // 校验分片哈希 String chunkHash = DigestUtils.md5Hex(chunk.getBytes()); if (!chunkHash.equals(hash)) { response.getWriter().write("{\"code\":400,\"msg\":\"分片哈希校验失败\"}"); return; } // 存储分片到临时目录(格式:{hash}/{chunkNumber}) String tempDir = application.getRealPath("/temp/" + hash); File tempDirFile = new File(tempDir); if (!tempDirFile.exists()) { tempDirFile.mkdirs(); } chunk.transferTo(new File(tempDirFile, String.valueOf(chunkNumber))); // 更新Redis进度 redisTemplate.opsForValue().increment("upload:" + hash + ":progress", 1); response.getWriter().write("{\"code\":200,\"msg\":\"分片上传成功\"}"); } else if ("mergeChunks".equals(action)) { String hash = request.getParameter("hash"); int totalChunks = Integer.parseInt(request.getParameter("totalChunks")); // 检查所有分片是否上传完成 String tempDir = application.getRealPath("/temp/" + hash); File tempDirFile = new File(tempDir); if (!tempDirFile.exists()) { response.getWriter().write("{\"code\":400,\"msg\":\"分片目录不存在\"}"); return; } File[] chunks = tempDirFile.listFiles(); if (chunks == null || chunks.length != totalChunks) { response.getWriter().write("{\"code\":400,\"msg\":\"分片缺失\"}"); return; } // 合并分片到OSS(私有云) OSSUtil ossUtil = new OSSUtil(); String objectKey = "uploads/" + hash + ".dat"; // OSS存储路径 ossUtil.uploadFile(tempDirFile, objectKey); // 删除临时分片 for (File chunk : chunks) { chunk.delete(); } tempDirFile.delete(); // 清除Redis进度 redisTemplate.delete("upload:" + hash + ":progress"); response.getWriter().write("{\"code\":200,\"msg\":\"合并成功\"}"); } %>@ServicepublicclassEncryptionService{@Value("${encryption.algorithm:SM4}")privateStringalgorithm;// 可配置SM4/AES// SM4加密(国密)publicbyte[]sm4Encrypt(byte[]data,Stringkey)throwsException{SM4sm4=newSM4();sm4.setKey(key.getBytes(StandardCharsets.UTF_8),SM4.ENCRYPT_MODE);returnsm4.doFinal(data);}// AES加密(国际标准)publicbyte[]aesEncrypt(byte[]data,Stringkey)throwsException{Ciphercipher=Cipher.getInstance("AES/GCM/NoPadding");SecretKeySpeckeySpec=newSecretKeySpec(key.getBytes(StandardCharsets.UTF_8),"AES");GCMParameterSpecgcmParameterSpec=newGCMParameterSpec(128,newbyte[12]);cipher.init(Cipher.ENCRYPT_MODE,keySpec,gcmParameterSpec);byte[]iv=cipher.getIV();byte[]encrypted=cipher.doFinal(data);returnBytes.concat(iv,encrypted);// IV+密文}}// OSS上传工具类(私有云适配)@ComponentpublicclassOSSUtil{@Value("${oss.endpoint}")privateStringendpoint;@Value("${oss.accessKeyId}")privateStringaccessKeyId;@Value("${oss.accessKeySecret}")privateStringaccessKeySecret;@Value("${oss.bucketName}")privateStringbucketName;publicvoiduploadFile(FilelocalFile,StringobjectKey)throwsException{// 初始化OSS客户端(私有云配置)ClientBuilderConfigurationconfig=newClientBuilderConfiguration();OSSossClient=newOSSClientBuilder().build(endpoint,accessKeyId,accessKeySecret,config);// 上传文件ossClient.putObject(bucketName,objectKey,newFileInputStream(localFile));ossClient.shutdown();}}# application.ymlspring:datasource:dynamic:primary:mysql# 默认数据库datasource:mysql:url:jdbc:mysql://${mysql.host}:${mysql.port}/${mysql.db}?useSSL=falseusername:${mysql.user}password:${mysql.password}sqlserver:url:jdbc:sqlserver://${sqlserver.host}:${sqlserver.port};databaseName=${sqlserver.db}username:${sqlserver.user}password:${sqlserver.password}oracle:url:jdbc:oracle:thin:@${oracle.host}:${oracle.port}:${oracle.sid}username:${oracle.user}password:${oracle.password}dm:# 达梦数据库url:jdbc:dm://${dm.host}:${dm.port}/${dm.db}username:${dm.user}password:${dm.password}kingbase:# 人大金仓url:jdbc:kingbase://${kingbase.host}:${kingbase.port}/${kingbase.db}username:${kingbase.user}password:${kingbase.password}panshi-file-transfer仓库application.yml中的数据库/OSS/Redis连接信息mvn clean package -DskipTests(生成panshi-file-transfer-1.0.0.war)webapps目录(systemctl start tomcat)考虑到贵司200+项目/年的规模,我建议采用以下授权模式:
买断授权:98万元一次性买断,包含:
资质文件:我司可提供:
实施支持:
磐石大文件传输平台V1.0(登记号:2024SR000000)结语:本方案深度适配公司现有业务流程,兼顾安全性、稳定性与扩展性,源码买断模式可大幅降低研发成本。我们承诺60天内完成公司所有项目的集成验证,7×24小时技术支持保障系统稳定运行。期待与公司携手,共同打造行业领先的大文件传输解决方案!
导入到Eclipse:点南查看教程
导入到IDEA:点击查看教程
springboot统一配置:点击查看教程
NOSQL示例不需要任何配置,可以直接访问测试
选择对应的数据表脚本,这里以SQL为例
up6/upload/年/月/日/guid/filename
支持离线保存文件进度,在关闭浏览器,刷新浏览器后进行不丢失,仍然能够继续上传
支持上传文件夹并保留层级结构,同样支持进度信息离线保存,刷新页面,关闭页面,重启系统不丢失上传进度。
点击下载完整示例