1
0
mirror of https://github.com/ascribe/onion.git synced 2024-06-30 13:41:57 +02:00

moved hashing to file_utils

This commit is contained in:
diminator 2015-07-23 13:03:30 +02:00
parent 1a1f14e04c
commit 0cc48a73e4
2 changed files with 61 additions and 44 deletions

View File

@ -2,7 +2,6 @@
import React from 'react/addons';
import Raven from 'raven-js';
import SparkMD5 from 'spark-md5';
import { getCookie } from '../../utils/fetch_api_utils';
import { getLangText } from '../../utils/lang_utils';
@ -485,8 +484,6 @@ var ReactS3FineUploader = React.createClass({
this.state.uploader.addFiles(files);
let oldFiles = this.state.filesToUpload;
let oldAndNewFiles = this.state.uploader.getUploads();
// Compute the hash of the file instead of uploading... needs UX design!
// this.computeHashOfFile(0);
// Add fineuploader specific information to new files
for(let i = 0; i < oldAndNewFiles.length; i++) {
for(let j = 0; j < files.length; j++) {
@ -544,48 +541,7 @@ var ReactS3FineUploader = React.createClass({
});
this.setState(newState);
},
makeTextFile(text) {
let data = new Blob([text], {type: 'text/plain'});
return window.URL.createObjectURL(data);
},
computeHashOfFile(fileId) {
let blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice,
file = this.state.uploader.getFile(fileId),
chunkSize = 2097152, // Read in chunks of 2MB
chunks = Math.ceil(file.size / chunkSize),
currentChunk = 0,
spark = new SparkMD5.ArrayBuffer(),
fileReader = new FileReader();
let startTime = new Date();
fileReader.onload = function (e) {
//console.log('read chunk nr', currentChunk + 1, 'of', chunks);
spark.append(e.target.result); // Append array buffer
currentChunk++;
if (currentChunk < chunks) {
loadNext();
} else {
let fileHash = spark.end();
console.info('computed hash %s (took %d s)',
fileHash,
Math.round(((new Date() - startTime) / 1000) % 60)); // Compute hash
console.log(this.makeTextFile(fileHash));
}
}.bind(this);
fileReader.onerror = function () {
console.warn('oops, something went wrong.');
};
function loadNext() {
var start = currentChunk * chunkSize,
end = ((start + chunkSize) >= file.size) ? file.size : start + chunkSize;
fileReader.readAsArrayBuffer(blobSlice.call(file, start, end));
}
loadNext();
},
render() {
return (

61
js/utils/file_utils.js Normal file
View File

@ -0,0 +1,61 @@
'use strict';
import SparkMD5 from 'spark-md5';
/**
* Takes a string, creates a text file and returns the URL
*
* @param {string} text regular javascript string
* @return {string} regular javascript string
*/
export function makeTextFile(text) {
let data = new Blob([text], {type: 'text/plain'});
return window.URL.createObjectURL(data);
}
/**
* Takes a file Object, computes the MD5 hash and returns the URL of the textfile with the hash
*
* @param {File} file javascript File object
* @return {string} regular javascript string
*/
export function computeHashOfFile(file) {
let blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice,
chunkSize = 2097152, // Read in chunks of 2MB
chunks = Math.ceil(file.size / chunkSize),
currentChunk = 0,
spark = new SparkMD5.ArrayBuffer(),
fileReader = new FileReader();
let startTime = new Date();
fileReader.onload = function (e) {
//console.log('read chunk nr', currentChunk + 1, 'of', chunks);
spark.append(e.target.result); // Append array buffer
currentChunk++;
if (currentChunk < chunks) {
loadNext();
} else {
let fileHash = spark.end();
console.info('computed hash %s (took %d s)',
fileHash,
Math.round(((new Date() - startTime) / 1000) % 60)); // Compute hash
let hashFile = this.makeTextFile(fileHash);
console.info('hash: ', hashFile);
return hashFile;
}
}.bind(this);
fileReader.onerror = function () {
console.warn('oops, something went wrong.');
};
function loadNext() {
var start = currentChunk * chunkSize,
end = ((start + chunkSize) >= file.size) ? file.size : start + chunkSize;
fileReader.readAsArrayBuffer(blobSlice.call(file, start, end));
}
loadNext();
}