diff --git a/dist/filepond.esm.js b/dist/filepond.esm.js index 4fce130..4d37a48 100644 --- a/dist/filepond.esm.js +++ b/dist/filepond.esm.js @@ -1864,6 +1864,7 @@ const defaultOptions = { chunkForce: [false, Type.BOOLEAN], // Force use of chunk uploads even for files smaller than chunk size chunkSize: [5000000, Type.INT], // Size of chunks (5MB default) chunkRetryDelays: [[500, 1000, 3000], Type.ARRAY], // Amount of times to retry upload of a chunk when it fails + chunkParallelize: [false, Type.BOOLEAN], // Enable uploads of chuncks in parallel // The server api end points to use for uploading (see docs) server: [null, Type.SERVER_API], @@ -2768,7 +2769,9 @@ const processFileChunked = ( ) => { // all chunks const chunks = []; - const { chunkTransferId, chunkServer, chunkSize, chunkRetryDelays } = options; + console.log('options', options); + const { chunkTransferId, chunkServer, chunkSize, chunkRetryDelays, chunkParallelize } = options; + // console.log('processFileChunked', chunkTransferId, chunkServer, chunkSize, chunkParallelize); // default state const state = { @@ -2873,6 +2876,7 @@ const processFileChunked = ( timeout: null, }; } + const lastChunk = chunks.pop(); const completeProcessingChunks = () => load(state.serverId); @@ -2908,17 +2912,91 @@ const processFileChunked = ( // send request object const requestUrl = buildURL(apiUrl, chunkServer.url, state.serverId); + if (!chunkParallelize) { + const headers = + typeof chunkServer.headers === 'function' + ? chunkServer.headers(chunk) + : { + ...chunkServer.headers, + 'Content-Type': 'application/offset+octet-stream', + 'Upload-Offset': chunk.offset, + 'Upload-Length': file.size, + 'Upload-Name': file.name, + }; + + const request = (chunk.request = sendRequest(ondata(chunk.data), requestUrl, { + ...chunkServer, + headers, + })); + + request.onload = () => { + // done! + chunk.status = ChunkStatus.COMPLETE; + + // remove request reference + chunk.request = null; + + // start processing more chunks + processChunks(); + }; + + request.onprogress = (lengthComputable, loaded, total) => { + chunk.progress = lengthComputable ? loaded : null; + updateTotalProgress(); + }; + + request.onerror = xhr => { + chunk.status = ChunkStatus.ERROR; + chunk.request = null; + chunk.error = onerror(xhr.response) || xhr.statusText; + if (!retryProcessChunk(chunk)) { + error( + createResponse( + 'error', + xhr.status, + onerror(xhr.response) || xhr.statusText, + xhr.getAllResponseHeaders() + ) + ); + } + }; + + request.ontimeout = xhr => { + chunk.status = ChunkStatus.ERROR; + chunk.request = null; + if (!retryProcessChunk(chunk)) { + createTimeoutResponse(error)(xhr); + } + }; + + request.onabort = () => { + chunk.status = ChunkStatus.QUEUED; + chunk.request = null; + abort(); + }; + } else { + for (const chunk of chunks) { + processChunkRequest(chunk); + } + } + }; + + const processChunkRequest = (chunk, isLastChunk = false) => { const headers = typeof chunkServer.headers === 'function' ? chunkServer.headers(chunk) : { ...chunkServer.headers, 'Content-Type': 'application/offset+octet-stream', - 'Upload-Offset': chunk.offset, - 'Upload-Length': file.size, + 'Upload-Index': chunk.index, + 'Upload-Chunks-Number': chunks.length + 1, + // 'Upload-Offset': chunk.offset, + // 'Upload-Length': file.size, 'Upload-Name': file.name, }; + // send request object + const requestUrl = buildURL(apiUrl, chunkServer.url, state.serverId); const request = (chunk.request = sendRequest(ondata(chunk.data), requestUrl, { ...chunkServer, headers, @@ -2927,12 +3005,20 @@ const processFileChunked = ( request.onload = () => { // done! chunk.status = ChunkStatus.COMPLETE; - // remove request reference chunk.request = null; - // start processing more chunks - processChunks(); + // processChunks(); + if ( + chunks.length === chunks.filter(c => c.status === ChunkStatus.COMPLETE).length && + !isLastChunk + ) { + console.log('processo ultimo chunk', lastChunk); + processChunkRequest(lastChunk, true); + } + if (isLastChunk) { + completeProcessingChunks(); + } }; request.onprogress = (lengthComputable, loaded, total) => { @@ -4831,6 +4917,7 @@ const actions = (dispatch, query, state) => ({ chunkForce: options.chunkForce, chunkSize: options.chunkSize, chunkRetryDelays: options.chunkRetryDelays, + chunkParallelize: options.chunkParallelize, }), { allowMinimumUploadDuration: query('GET_ALLOW_MINIMUM_UPLOAD_DURATION'), diff --git a/dist/filepond.js b/dist/filepond.js index ce829df..dd3508b 100644 --- a/dist/filepond.js +++ b/dist/filepond.js @@ -3790,6 +3790,7 @@ chunkForce: [false, Type.BOOLEAN], // Force use of chunk uploads even for files smaller than chunk size chunkSize: [5000000, Type.INT], // Size of chunks (5MB default) chunkRetryDelays: [[500, 1000, 3000], Type.ARRAY], // Amount of times to retry upload of a chunk when it fails + chunkParallelize: [false, Type.BOOLEAN], // Enable uploads of chuncks in parallel // The server api end points to use for uploading (see docs) server: [null, Type.SERVER_API], @@ -4840,10 +4841,13 @@ ) { // all chunks var chunks = []; + console.log('options', options); var chunkTransferId = options.chunkTransferId, chunkServer = options.chunkServer, chunkSize = options.chunkSize, - chunkRetryDelays = options.chunkRetryDelays; + chunkRetryDelays = options.chunkRetryDelays, + chunkParallelize = options.chunkParallelize; + // console.log('processFileChunked', chunkTransferId, chunkServer, chunkSize, chunkParallelize); // default state var state = { @@ -4963,6 +4967,7 @@ timeout: null, }; } + var lastChunk = chunks.pop(); var completeProcessingChunks = function completeProcessingChunks() { return load(state.serverId); @@ -5013,16 +5018,96 @@ // send request object var requestUrl = buildURL(apiUrl, chunkServer.url, state.serverId); + if (!chunkParallelize) { + var headers = + typeof chunkServer.headers === 'function' + ? chunkServer.headers(chunk) + : Object.assign({}, chunkServer.headers, { + 'Content-Type': 'application/offset+octet-stream', + 'Upload-Offset': chunk.offset, + 'Upload-Length': file.size, + 'Upload-Name': file.name, + }); + + var request = (chunk.request = sendRequest( + ondata(chunk.data), + requestUrl, + Object.assign({}, chunkServer, { + headers: headers, + }) + )); + + request.onload = function() { + // done! + chunk.status = ChunkStatus.COMPLETE; + + // remove request reference + chunk.request = null; + + // start processing more chunks + processChunks(); + }; + + request.onprogress = function(lengthComputable, loaded, total) { + chunk.progress = lengthComputable ? loaded : null; + updateTotalProgress(); + }; + + request.onerror = function(xhr) { + chunk.status = ChunkStatus.ERROR; + chunk.request = null; + chunk.error = onerror(xhr.response) || xhr.statusText; + if (!retryProcessChunk(chunk)) { + error( + createResponse( + 'error', + xhr.status, + onerror(xhr.response) || xhr.statusText, + xhr.getAllResponseHeaders() + ) + ); + } + }; + + request.ontimeout = function(xhr) { + chunk.status = ChunkStatus.ERROR; + chunk.request = null; + if (!retryProcessChunk(chunk)) { + createTimeoutResponse(error)(xhr); + } + }; + + request.onabort = function() { + chunk.status = ChunkStatus.QUEUED; + chunk.request = null; + abort(); + }; + } else { + for (var _i = 0, _chunks = chunks; _i < _chunks.length; _i++) { + var _chunk = _chunks[_i]; + processChunkRequest(_chunk); + } + } + }; + + var processChunkRequest = function processChunkRequest(chunk) { + var isLastChunk = + arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; + var headers = typeof chunkServer.headers === 'function' ? chunkServer.headers(chunk) : Object.assign({}, chunkServer.headers, { 'Content-Type': 'application/offset+octet-stream', - 'Upload-Offset': chunk.offset, - 'Upload-Length': file.size, + 'Upload-Index': chunk.index, + 'Upload-Chunks-Number': chunks.length + 1, + // 'Upload-Offset': chunk.offset, + // 'Upload-Length': file.size, 'Upload-Name': file.name, }); + // send request object + var requestUrl = buildURL(apiUrl, chunkServer.url, state.serverId); var request = (chunk.request = sendRequest( ondata(chunk.data), requestUrl, @@ -5034,12 +5119,23 @@ request.onload = function() { // done! chunk.status = ChunkStatus.COMPLETE; - // remove request reference chunk.request = null; - // start processing more chunks - processChunks(); + // processChunks(); + if ( + chunks.length === + chunks.filter(function(c) { + return c.status === ChunkStatus.COMPLETE; + }).length && + !isLastChunk + ) { + console.log('processo ultimo chunk', lastChunk); + processChunkRequest(lastChunk, true); + } + if (isLastChunk) { + completeProcessingChunks(); + } }; request.onprogress = function(lengthComputable, loaded, total) { @@ -7223,6 +7319,7 @@ chunkForce: options.chunkForce, chunkSize: options.chunkSize, chunkRetryDelays: options.chunkRetryDelays, + chunkParallelize: options.chunkParallelize, } ), diff --git a/src/js/app/actions.js b/src/js/app/actions.js index 10aa99f..24c9765 100644 --- a/src/js/app/actions.js +++ b/src/js/app/actions.js @@ -852,6 +852,7 @@ export const actions = (dispatch, query, state) => ({ chunkForce: options.chunkForce, chunkSize: options.chunkSize, chunkRetryDelays: options.chunkRetryDelays, + chunkParallelize: options.chunkParallelize }), { allowMinimumUploadDuration: query('GET_ALLOW_MINIMUM_UPLOAD_DURATION'), diff --git a/src/js/app/options.js b/src/js/app/options.js index b32430f..2a77889 100644 --- a/src/js/app/options.js +++ b/src/js/app/options.js @@ -95,6 +95,7 @@ export const defaultOptions = { chunkForce: [false, Type.BOOLEAN], // Force use of chunk uploads even for files smaller than chunk size chunkSize: [5000000, Type.INT], // Size of chunks (5MB default) chunkRetryDelays: [[500, 1000, 3000], Type.ARRAY], // Amount of times to retry upload of a chunk when it fails + chunkParallelize: [false, Type.BOOLEAN], // Enable uploads of chuncks in parallel // The server api end points to use for uploading (see docs) server: [null, Type.SERVER_API], diff --git a/src/js/app/utils/processFileChunked.js b/src/js/app/utils/processFileChunked.js index b3c4316..016b9b2 100644 --- a/src/js/app/utils/processFileChunked.js +++ b/src/js/app/utils/processFileChunked.js @@ -1,9 +1,9 @@ -import { sendRequest } from '../../utils/sendRequest'; -import { createResponse } from '../../utils/createResponse'; -import { createTimeoutResponse } from '../../utils/createDefaultResponse'; -import { isObject } from '../../utils/isObject'; -import { buildURL } from './buildURL'; -import { ChunkStatus } from '../enum/ChunkStatus'; +import {sendRequest} from '../../utils/sendRequest'; +import {createResponse} from '../../utils/createResponse'; +import {createTimeoutResponse} from '../../utils/createDefaultResponse'; +import {isObject} from '../../utils/isObject'; +import {buildURL} from './buildURL'; +import {ChunkStatus} from '../enum/ChunkStatus'; /* function signature: @@ -19,8 +19,10 @@ export const processFileChunked = (apiUrl, action, name, file, metadata, load, e // all chunks const chunks = []; - const { chunkTransferId, chunkServer, chunkSize, chunkRetryDelays } = options; - + console.log('options', options); + const {chunkTransferId, chunkServer, chunkSize, chunkRetryDelays, chunkParallelize} = options; + // console.log('processFileChunked', chunkTransferId, chunkServer, chunkSize, chunkParallelize); + // default state const state = { serverId: chunkTransferId, @@ -36,10 +38,10 @@ export const processFileChunked = (apiUrl, action, name, file, metadata, load, e const requestTransferId = cb => { const formData = new FormData(); - + // add metadata under same name if (isObject(metadata)) formData.append(name, JSON.stringify(metadata)); - + const headers = typeof action.headers === 'function' ? action.headers(file, metadata) : { ...action.headers, 'Upload-Length': file.size @@ -70,11 +72,11 @@ export const processFileChunked = (apiUrl, action, name, file, metadata, load, e const requestTransferOffset = cb => { const requestUrl = buildURL(apiUrl, chunkServer.url, state.serverId); - + const headers = typeof action.headers === 'function' ? action.headers(state.serverId) : { ...action.headers }; - + const requestParams = { headers, method: 'HEAD' @@ -96,9 +98,9 @@ export const processFileChunked = (apiUrl, action, name, file, metadata, load, e request.ontimeout = createTimeoutResponse(error); } - // create chunks + // create chunks const lastChunkIndex = Math.floor(file.size / chunkSize); - for (let i = 0; i <= lastChunkIndex; i++) { + for (let i = 0; i <= lastChunkIndex; i++) { const offset = i * chunkSize; const data = file.slice(offset, offset + chunkSize, 'application/offset+octet-stream'); chunks[i] = { @@ -115,6 +117,7 @@ export const processFileChunked = (apiUrl, action, name, file, metadata, load, e timeout: null } } + const lastChunk = chunks.pop(); const completeProcessingChunks = () => load(state.serverId); @@ -124,7 +127,7 @@ export const processFileChunked = (apiUrl, action, name, file, metadata, load, e // processing is paused, wait here if (state.aborted) return; - + // get next chunk to process chunk = chunk || chunks.find(canProcessChunk); @@ -138,7 +141,7 @@ export const processFileChunked = (apiUrl, action, name, file, metadata, load, e // no chunk to handle return; - }; + } // now processing this chunk chunk.status = ChunkStatus.PROCESSING; @@ -151,29 +154,110 @@ export const processFileChunked = (apiUrl, action, name, file, metadata, load, e // send request object const requestUrl = buildURL(apiUrl, chunkServer.url, state.serverId); + if (!chunkParallelize) { + + const headers = typeof chunkServer.headers === 'function' ? chunkServer.headers(chunk) : { + ...chunkServer.headers, + 'Content-Type': 'application/offset+octet-stream', + 'Upload-Offset': chunk.offset, + 'Upload-Length': file.size, + 'Upload-Name': file.name + }; + + const request = chunk.request = sendRequest(ondata(chunk.data), requestUrl, { + ...chunkServer, + headers + }); + + request.onload = () => { + + // done! + chunk.status = ChunkStatus.COMPLETE; + + // remove request reference + chunk.request = null; + + // start processing more chunks + processChunks(); + }; + + request.onprogress = (lengthComputable, loaded, total) => { + chunk.progress = lengthComputable ? loaded : null; + updateTotalProgress(); + }; + + request.onerror = (xhr) => { + chunk.status = ChunkStatus.ERROR; + chunk.request = null; + chunk.error = onerror(xhr.response) || xhr.statusText; + if (!retryProcessChunk(chunk)) { + error( + createResponse( + 'error', + xhr.status, + onerror(xhr.response) || xhr.statusText, + xhr.getAllResponseHeaders() + ) + ); + } + }; + + request.ontimeout = (xhr) => { + chunk.status = ChunkStatus.ERROR; + chunk.request = null; + if (!retryProcessChunk(chunk)) { + createTimeoutResponse(error)(xhr); + } + }; + + request.onabort = () => { + chunk.status = ChunkStatus.QUEUED; + chunk.request = null; + abort(); + }; + + } else { + + for (const chunk of chunks) { + processChunkRequest(chunk); + } + + } + } + + const processChunkRequest = (chunk, isLastChunk= false) => { + const headers = typeof chunkServer.headers === 'function' ? chunkServer.headers(chunk) : { ...chunkServer.headers, 'Content-Type': 'application/offset+octet-stream', - 'Upload-Offset': chunk.offset, - 'Upload-Length': file.size, + 'Upload-Index': chunk.index, + 'Upload-Chunks-Number': chunks.length + 1, + // 'Upload-Offset': chunk.offset, + // 'Upload-Length': file.size, 'Upload-Name': file.name }; + // send request object + const requestUrl = buildURL(apiUrl, chunkServer.url, state.serverId); const request = chunk.request = sendRequest(ondata(chunk.data), requestUrl, { ...chunkServer, headers }); request.onload = () => { - // done! chunk.status = ChunkStatus.COMPLETE; - // remove request reference chunk.request = null; - // start processing more chunks - processChunks(); + // processChunks(); + if (chunks.length === chunks.filter(c => c.status === ChunkStatus.COMPLETE).length && !isLastChunk) { + console.log('processo ultimo chunk', lastChunk); + processChunkRequest(lastChunk, true); + } + if (isLastChunk) { + completeProcessingChunks(); + } }; request.onprogress = (lengthComputable, loaded, total) => { @@ -210,7 +294,6 @@ export const processFileChunked = (apiUrl, action, name, file, metadata, load, e chunk.request = null; abort(); }; - } const retryProcessChunk = (chunk) => { @@ -224,7 +307,7 @@ export const processFileChunked = (apiUrl, action, name, file, metadata, load, e chunk.timeout = setTimeout(() => { processChunk(chunk); }, chunk.retries.shift()); - + // we're going to retry return true; } @@ -253,7 +336,7 @@ export const processFileChunked = (apiUrl, action, name, file, metadata, load, e if (totalProcessing >= 1) return; processChunk(); }; - + const abortChunks = () => { chunks.forEach(chunk => { clearTimeout(chunk.timeout); @@ -277,8 +360,7 @@ export const processFileChunked = (apiUrl, action, name, file, metadata, load, e state.serverId = serverId; processChunks(); }) - } - else { + } else { requestTransferOffset(offset => { // stop here if aborted, might have happened in between request and callback @@ -287,20 +369,20 @@ export const processFileChunked = (apiUrl, action, name, file, metadata, load, e // mark chunks with lower offset as complete chunks.filter(chunk => chunk.offset < offset) .forEach(chunk => { - chunk.status = ChunkStatus.COMPLETE; - chunk.progress = chunk.size; - } - ); + chunk.status = ChunkStatus.COMPLETE; + chunk.progress = chunk.size; + } + ); // continue processing processChunks(); }) } - + return { abort: () => { state.aborted = true; abortChunks(); } - } + } };