您好,登錄后才能下訂單哦!
這篇文章主要介紹了Node.js如何實現分片上傳的相關知識,內容詳細易懂,操作簡單快捷,具有一定借鑒價值,相信大家閱讀完這篇Node.js如何實現分片上傳文章都會有所收獲,下面我們一起來看看吧。
大文件上傳會消耗大量的時間,而且中途有可能上傳失敗。這時我們需要前端和后端配合來解決這個問題。
解決步驟:
文件分片,減少每次請求消耗的時間,如果某次請求失敗可以單獨上傳,而不是從頭開始
通知服務端合并文件分片
控制并發的請求數量,避免瀏覽器內存溢出
當因為網絡或者其他原因導致某次的請求失敗,我們重新發送請求
在JavaScript中,FIle對象是' Blob '對象的子類,該對象包含一個重要的方法slice,通過該方法我們可以這樣分割二進制文件:
<!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Document</title> <script src="https://cdn.bootcdn.net/ajax/libs/axios/0.24.0/axios.min.js"></script> </head> <body> <input type="file" multiple="multiple" id="fileInput" /> <button onclick="SliceUpload()">上傳</button> <script> function SliceUpload() { const file = document.getElementById('fileInput').files[0] if (!file) return // 文件分片 let size = 1024 * 50; //50KB 50KB Section size let fileChunks = []; let index = 0; //Section num for (let cur = 0; cur < file.size; cur += size) { fileChunks.push({ hash: index++, chunk: file.slice(cur, cur + size), }); } // 上傳分片 const uploadList = fileChunks.map((item, index) => { let formData = new FormData(); formData.append("filename", file.name); formData.append("hash", item.hash); formData.append("chunk", item.chunk); return axios({ method: "post", url: "/upload", data: formData, }); }); await Promise.all(uploadList); // 所有分片上傳完成,通知服務器合并分片 await axios({ method: "get", url: "/merge", params: { filename: file.name, }, }); console.log("Upload to complete"); } </script> </body> </html>
如果文件很大,這樣切分的分片會很多,瀏覽器短時間內就會發起大量的請求,可能會導致內存耗盡,所以要進行并發控制。
這里我們結合Promise.race()方法 控制并發請求的數量,避免瀏覽器內存溢出。
// 加入并發控制 async function SliceUpload() { const file = document.getElementById('fileInput').files[0] if (!file) return // 文件分片 let size = 1024 * 50; //50KB 50KB Section size let fileChunks = []; let index = 0; //Section num for (let cur = 0; cur < file.size; cur += size) { fileChunks.push({ hash: index++, chunk: file.slice(cur, cur + size), }); } let pool = []; //Concurrent pool let max = 3; //Maximum concurrency for (let i = 0; i < fileChunks.length; i++) { let item = fileChunks[i]; let formData = new FormData(); formData.append("filename", file.name); formData.append("hash", item.hash); formData.append("chunk", item.chunk); // 上傳分片 let task = axios({ method: "post", url: "/upload", data: formData, }); task.then(() => { // 從并發池中移除已經完成的請求 let index = pool.findIndex((t) => t === task); pool.splice(index); }); // 把請求放入并發池中,如果已經達到最大并發量 pool.push(task); if (pool.length === max) { //All requests are requested complete await Promise.race(pool); } } // 所有分片上傳完成,通知服務器合并分片 await axios({ method: "get", url: "/merge", params: { filename: file.name, }, }); console.log("Upload to complete"); }
function SliceUpload() { const file = document.getElementById('fileInput').files[0] if (!file) return // 文件分片 let size = 1024 * 50; // 分片大小設置 let fileChunks = []; let index = 0; // 分片序號 for (let cur = 0; cur < file.size; cur += size) { fileChunks.push({ hash: index++, chunk: file.slice(cur, cur + size), }); } const uploadFileChunks = async function(list){ if(list.length === 0){ // 所有分片上傳完成,通知如無 await axios({ method: 'get', url: '/merge', params: { filename: file.name } }); console.log('Upload to complete') return } let pool = [] // 并發池 let max = 3 // 最大并發數 let finish = 0 // 完成數量 let failList = [] // 失敗列表 for(let i=0;i<list.length;i++){ let item = list[i] let formData = new FormData() formData.append('filename', file.name) formData.append('hash', item.hash) formData.append('chunk', item.chunk) let task = axios({ method: 'post', url: '/upload', data: formData }) task.then((data)=>{ // 從并發池中移除已經完成的請求 let index = pool.findIndex(t=> t===task) pool.splice(index) }).catch(()=>{ failList.push(item) }).finally(()=>{ finish++ // 如果有失敗的重新上傳 if(finish===list.length){ uploadFileChunks(failList) } }) pool.push(task) if(pool.length === max){ await Promise.race(pool) } } } uploadFileChunks(fileChunks) }
const express = require('express') const multiparty = require('multiparty') const fs = require('fs') const path = require('path') const { Buffer } = require('buffer') // file path const STATIC_FILES = path.join(__dirname, './static/files') // Temporary path to upload files const STATIC_TEMPORARY = path.join(__dirname, './static/temporary') const server = express() // Static file hosting server.use(express.static(path.join(__dirname, './dist'))) // Interface for uploading slices server.post('/upload', (req, res) => { const form = new multiparty.Form(); form.parse(req, function(err, fields, files) { let filename = fields.filename[0] let hash = fields.hash[0] let chunk = files.chunk[0] let dir = `${STATIC_TEMPORARY}/${filename}` // console.log(filename, hash, chunk) try { if (!fs.existsSync(dir)) fs.mkdirSync(dir) const buffer = fs.readFileSync(chunk.path) const ws = fs.createWriteStream(`${dir}/${hash}`) ws.write(buffer) ws.close() res.send(`${filename}-${hash} Section uploaded successfully`) } catch (error) { console.error(error) res.status(500).send(`${filename}-${hash} Section uploading failed`) } }) }) //Merged slice interface server.get('/merge', async (req, res) => { const { filename } = req.query try { let len = 0 const bufferList = fs.readdirSync(`${STATIC_TEMPORARY}/${filename}`).map((hash,index) => { const buffer = fs.readFileSync(`${STATIC_TEMPORARY}/${filename}/${index}`) len += buffer.length return buffer }); //Merge files const buffer = Buffer.concat(bufferList, len); const ws = fs.createWriteStream(`${STATIC_FILES}/${filename}`) ws.write(buffer); ws.close(); res.send(`Section merge completed`); } catch (error) { console.error(error); } }) server.listen(3000, _ => { console.log('http://localhost:3000/') })
關于“Node.js如何實現分片上傳”這篇文章的內容就介紹到這里,感謝各位的閱讀!相信大家對“Node.js如何實現分片上傳”知識都有一定的了解,大家如果還想學習更多知識,歡迎關注億速云行業資訊頻道。
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。