jszip.js
jszip.js
JSZip v3.2.1 - A JavaScript class for generating and reading zip files
<http://stuartk.com/jszip>
JSZip uses the library pako released under the MIT license :
https://github.com/nodeca/pako/blob/master/LICENSE
*/
if (!isArray) {
chr1 = input.charCodeAt(i++);
chr2 = i < len ? input.charCodeAt(i++) : 0;
chr3 = i < len ? input.charCodeAt(i++) : 0;
} else {
chr1 = input[i++];
chr2 = i < len ? input[i++] : 0;
chr3 = i < len ? input[i++] : 0;
}
output.push(_keyStr.charAt(enc1) + _keyStr.charAt(enc2) +
_keyStr.charAt(enc3) + _keyStr.charAt(enc4));
}
return output.join("");
};
enc1 = _keyStr.indexOf(input.charAt(i++));
enc2 = _keyStr.indexOf(input.charAt(i++));
enc3 = _keyStr.indexOf(input.charAt(i++));
enc4 = _keyStr.indexOf(input.charAt(i++));
return output;
};
},{"./support":30,"./utils":32}],2:[function(require,module,exports){
'use strict';
/**
* Represent a compressed object, with everything needed to decompress it.
* @constructor
* @param {number} compressedSize the size of the data compressed.
* @param {number} uncompressedSize the size of the data after decompression.
* @param {number} crc32 the crc32 of the decompressed file.
* @param {object} compression the type of compression, see lib/compressions.js.
* @param {String|ArrayBuffer|Uint8Array|Buffer} data the compressed data.
*/
function CompressedObject(compressedSize, uncompressedSize, crc32, compression,
data) {
this.compressedSize = compressedSize;
this.uncompressedSize = uncompressedSize;
this.crc32 = crc32;
this.compression = compression;
this.compressedContent = data;
}
CompressedObject.prototype = {
/**
* Create a worker to get the uncompressed content.
* @return {GenericWorker} the worker.
*/
getContentWorker : function () {
var worker = new
DataWorker(external.Promise.resolve(this.compressedContent))
.pipe(this.compression.uncompressWorker())
.pipe(new DataLengthProbe("data_length"));
/**
* Chain the given worker with other workers to compress the content with the
* given compresion.
* @param {GenericWorker} uncompressedWorker the worker to pipe.
* @param {Object} compression the compression object.
* @param {Object} compressionOptions the options to use when compressing.
* @return {GenericWorker} the new worker compressing the content.
*/
CompressedObject.createWorkerFrom = function (uncompressedWorker, compression,
compressionOptions) {
return uncompressedWorker
.pipe(new Crc32Probe())
.pipe(new DataLengthProbe("uncompressedSize"))
.pipe(compression.compressWorker(compressionOptions))
.pipe(new DataLengthProbe("compressedSize"))
.withStreamInfo("compression", compression);
};
module.exports = CompressedObject;
},{"./external":6,"./stream/Crc32Probe":25,"./stream/DataLengthProbe":26,"./
stream/DataWorker":27}],3:[function(require,module,exports){
'use strict';
exports.STORE = {
magic: "\x00\x00",
compressWorker : function (compressionOptions) {
return new GenericWorker("STORE compression");
},
uncompressWorker : function () {
return new GenericWorker("STORE decompression");
}
};
exports.DEFLATE = require('./flate');
},{"./flate":7,"./stream/GenericWorker":28}],4:[function(require,module,exports){
'use strict';
/**
* The following functions come from pako, from pako/lib/zlib/crc32.js
* released under the MIT license, see pako https://github.com/nodeca/pako/
*/
return table;
}
/**
* Compute the crc32 of a string.
* This is almost the same as the function crc32, but for strings. Using the
* same function for the two use cases leads to horrible performances.
* @param {Number} crc the starting value of the crc.
* @param {String} str the string to use.
* @param {Number} len the length of the string.
* @param {Number} pos the starting position for the crc32 computation.
* @return {Number} the computed crc32.
*/
function crc32str(crc, str, len, pos) {
var t = crcTable, end = pos + len;
if(isArray) {
return crc32(crc|0, input, input.length, 0);
} else {
return crc32str(crc|0, input, input.length, 0);
}
};
},{"./utils":32}],5:[function(require,module,exports){
'use strict';
exports.base64 = false;
exports.binary = false;
exports.dir = false;
exports.createFolders = true;
exports.date = null;
exports.compression = null;
exports.compressionOptions = null;
exports.comment = null;
exports.unixPermissions = null;
exports.dosPermissions = null;
},{}],6:[function(require,module,exports){
/* global Promise */
'use strict';
/**
* Let the user use/change some implementations.
*/
module.exports = {
Promise: ES6Promise
};
},{"lie":37}],7:[function(require,module,exports){
'use strict';
var USE_TYPEDARRAY = (typeof Uint8Array !== 'undefined') && (typeof Uint16Array !==
'undefined') && (typeof Uint32Array !== 'undefined');
/**
* Create a worker that uses pako to inflate/deflate.
* @constructor
* @param {String} action the name of the pako function to call : either "Deflate"
or "Inflate".
* @param {Object} options the options to use when (de)compressing.
*/
function FlateWorker(action, options) {
GenericWorker.call(this, "FlateWorker/" + action);
this._pako = null;
this._pakoAction = action;
this._pakoOptions = options;
// the `meta` object from the last chunk received
// this allow this worker to pass around metadata
this.meta = {};
}
utils.inherits(FlateWorker, GenericWorker);
/**
* @see GenericWorker.processChunk
*/
FlateWorker.prototype.processChunk = function (chunk) {
this.meta = chunk.meta;
if (this._pako === null) {
this._createPako();
}
this._pako.push(utils.transformTo(ARRAY_TYPE, chunk.data), false);
};
/**
* @see GenericWorker.flush
*/
FlateWorker.prototype.flush = function () {
GenericWorker.prototype.flush.call(this);
if (this._pako === null) {
this._createPako();
}
this._pako.push([], true);
};
/**
* @see GenericWorker.cleanUp
*/
FlateWorker.prototype.cleanUp = function () {
GenericWorker.prototype.cleanUp.call(this);
this._pako = null;
};
/**
* Create the _pako object.
* TODO: lazy-loading this object isn't the best solution but it's the
* quickest. The best solution is to lazy-load the worker list. See also the
* issue #446.
*/
FlateWorker.prototype._createPako = function () {
this._pako = new pako[this._pakoAction]({
raw: true,
level: this._pakoOptions.level || -1 // default compression
});
var self = this;
this._pako.onData = function(data) {
self.push({
data : data,
meta : self.meta
});
};
};
},{"./stream/GenericWorker":28,"./utils":32,"pako":38}],8:
[function(require,module,exports){
'use strict';
/**
* Transform an integer into a string in hexadecimal.
* @private
* @param {number} dec the number to convert.
* @param {number} bytes the number of bytes to generate.
* @returns {string} the result.
*/
var decToHex = function(dec, bytes) {
var hex = "", i;
for (i = 0; i < bytes; i++) {
hex += String.fromCharCode(dec & 0xff);
dec = dec >>> 8;
}
return hex;
};
/**
* Generate the UNIX part of the external file attributes.
* @param {Object} unixPermissions the unix permissions or null.
* @param {Boolean} isDir true if the entry is a directory, false otherwise.
* @return {Number} a 32 bit integer.
*
* adapted from http://unix.stackexchange.com/questions/14705/the-zip-formats-
external-file-attribute :
*
* TTTTsstrwxrwxrwx0000000000ADVSHR
* ^^^^____________________________ file type, see zipinfo.c (UNX_*)
* ^^^_________________________ setuid, setgid, sticky
* ^^^^^^^^^________________ permissions
* ^^^^^^^^^^______ not used ?
* ^^^^^^ DOS attribute bits : Archive, Directory, Volume
label, System file, Hidden, Read only
*/
var generateUnixExternalFileAttr = function (unixPermissions, isDir) {
/**
* Generate the DOS part of the external file attributes.
* @param {Object} dosPermissions the dos permissions or null.
* @param {Boolean} isDir true if the entry is a directory, false otherwise.
* @return {Number} a 32 bit integer.
*
* Bit 0 Read-Only
* Bit 1 Hidden
* Bit 2 System
* Bit 3 Volume Label
* Bit 4 Directory
* Bit 5 Archive
*/
var generateDosExternalFileAttr = function (dosPermissions, isDir) {
/**
* Generate the various parts used in the construction of the final zip file.
* @param {Object} streamInfo the hash with informations about the compressed file.
* @param {Boolean} streamedContent is the content streamed ?
* @param {Boolean} streamingEnded is the stream finished ?
* @param {number} offset the current offset from the start of the zip file.
* @param {String} platform let's pretend we are this platform (change platform
dependents fields)
* @param {Function} encodeFileName the function to encode the file name / comment.
* @return {Object} the zip parts.
*/
var generateZipParts = function(streamInfo, streamedContent, streamingEnded,
offset, platform, encodeFileName) {
var file = streamInfo['file'],
compression = streamInfo['compression'],
useCustomEncoding = encodeFileName !== utf8.utf8encode,
encodedFileName = utils.transformTo("string", encodeFileName(file.name)),
utfEncodedFileName = utils.transformTo("string", utf8.utf8encode(file.name)),
comment = file.comment,
encodedComment = utils.transformTo("string", encodeFileName(comment)),
utfEncodedComment = utils.transformTo("string", utf8.utf8encode(comment)),
useUTF8ForFileName = utfEncodedFileName.length !== file.name.length,
useUTF8ForComment = utfEncodedComment.length !== comment.length,
dosTime,
dosDate,
extraFields = "",
unicodePathExtraField = "",
unicodeCommentExtraField = "",
dir = file.dir,
date = file.date;
var dataInfo = {
crc32 : 0,
compressedSize : 0,
uncompressedSize : 0
};
var bitflag = 0;
if (streamedContent) {
// Bit 3: the sizes/crc32 are set to zero in the local header.
// The correct values are put in the data descriptor immediately
// following the compressed data.
bitflag |= 0x0008;
}
if (!useCustomEncoding && (useUTF8ForFileName || useUTF8ForComment)) {
// Bit 11: Language encoding flag (EFS).
bitflag |= 0x0800;
}
var extFileAttr = 0;
var versionMadeBy = 0;
if (dir) {
// dos or unix, we set the dos dir flag
extFileAttr |= 0x00010;
}
if(platform === "UNIX") {
versionMadeBy = 0x031E; // UNIX, version 3.0
extFileAttr |= generateUnixExternalFileAttr(file.unixPermissions, dir);
} else { // DOS or other, fallback to DOS
versionMadeBy = 0x0014; // DOS, version 2.0
extFileAttr |= generateDosExternalFileAttr(file.dosPermissions, dir);
}
// date
// @see http://www.delorie.com/djgpp/doc/rbinter/it/52/13.html
// @see http://www.delorie.com/djgpp/doc/rbinter/it/65/16.html
// @see http://www.delorie.com/djgpp/doc/rbinter/it/66/16.html
dosTime = date.getUTCHours();
dosTime = dosTime << 6;
dosTime = dosTime | date.getUTCMinutes();
dosTime = dosTime << 5;
dosTime = dosTime | date.getUTCSeconds() / 2;
dosDate = date.getUTCFullYear() - 1980;
dosDate = dosDate << 4;
dosDate = dosDate | (date.getUTCMonth() + 1);
dosDate = dosDate << 5;
dosDate = dosDate | date.getUTCDate();
if (useUTF8ForFileName) {
// set the unicode path extra field. unzip needs at least one extra
// field to correctly handle unicode path, so using the path is as good
// as any other information. This could improve the situation with
// other archive managers too.
// This field is usually used without the utf8 flag, with a non
// unicode path in the header (winrar, winzip). This helps (a bit)
// with the messy Windows' default compressed folders feature but
// breaks on p7zip which doesn't seek the unicode path extra field.
// So for now, UTF-8 everywhere !
unicodePathExtraField =
// Version
decToHex(1, 1) +
// NameCRC32
decToHex(crc32(encodedFileName), 4) +
// UnicodeName
utfEncodedFileName;
extraFields +=
// Info-ZIP Unicode Path Extra Field
"\x75\x70" +
// size
decToHex(unicodePathExtraField.length, 2) +
// content
unicodePathExtraField;
}
if(useUTF8ForComment) {
unicodeCommentExtraField =
// Version
decToHex(1, 1) +
// CommentCRC32
decToHex(crc32(encodedComment), 4) +
// UnicodeName
utfEncodedComment;
extraFields +=
// Info-ZIP Unicode Path Extra Field
"\x75\x63" +
// size
decToHex(unicodeCommentExtraField.length, 2) +
// content
unicodeCommentExtraField;
}
return {
fileRecord: fileRecord,
dirRecord: dirRecord
};
};
/**
* Generate the EOCD record.
* @param {Number} entriesCount the number of entries in the zip file.
* @param {Number} centralDirLength the length (in bytes) of the central dir.
* @param {Number} localDirLength the length (in bytes) of the local dir.
* @param {String} comment the zip file comment as a binary string.
* @param {Function} encodeFileName the function to encode the comment.
* @return {String} the EOCD record.
*/
var generateCentralDirectoryEnd = function (entriesCount, centralDirLength,
localDirLength, comment, encodeFileName) {
var dirEnd = "";
var encodedComment = utils.transformTo("string", encodeFileName(comment));
return dirEnd;
};
/**
* Generate data descriptors for a file entry.
* @param {Object} streamInfo the hash generated by a worker, containing
informations
* on the file entry.
* @return {String} the data descriptors.
*/
var generateDataDescriptors = function (streamInfo) {
var descriptor = "";
descriptor = signature.DATA_DESCRIPTOR +
// crc-32 4 bytes
decToHex(streamInfo['crc32'], 4) +
// compressed size 4 bytes
decToHex(streamInfo['compressedSize'], 4) +
// uncompressed size 4 bytes
decToHex(streamInfo['uncompressedSize'], 4);
return descriptor;
};
/**
* A worker to concatenate other workers to create a zip file.
* @param {Boolean} streamFiles `true` to stream the content of the files,
* `false` to accumulate it.
* @param {String} comment the comment to use.
* @param {String} platform the platform to use, "UNIX" or "DOS".
* @param {Function} encodeFileName the function to encode file names and comments.
*/
function ZipFileWorker(streamFiles, comment, platform, encodeFileName) {
GenericWorker.call(this, "ZipFileWorker");
// The number of bytes written so far. This doesn't count accumulated chunks.
this.bytesWritten = 0;
// The comment of the zip file
this.zipComment = comment;
// The platform "generating" the zip file.
this.zipPlatform = platform;
// the function to encode file names and comments.
this.encodeFileName = encodeFileName;
// Should we stream the content of the files ?
this.streamFiles = streamFiles;
// If `streamFiles` is false, we will need to accumulate the content of the
// files to calculate sizes / crc32 (and write them *before* the content).
// This boolean indicates if we are accumulating chunks (it will change a lot
// during the lifetime of this worker).
this.accumulate = false;
// The buffer receiving chunks when accumulating content.
this.contentBuffer = [];
// The list of generated directory records.
this.dirRecords = [];
// The offset (in bytes) from the beginning of the zip file for the current
source.
this.currentSourceOffset = 0;
// The total number of entries in this zip file.
this.entriesCount = 0;
// the name of the file currently being added, null when handling the end of
the zip file.
// Used for the emited metadata.
this.currentFile = null;
this._sources = [];
}
utils.inherits(ZipFileWorker, GenericWorker);
/**
* @see GenericWorker.push
*/
ZipFileWorker.prototype.push = function (chunk) {
if(this.accumulate) {
this.contentBuffer.push(chunk);
} else {
this.bytesWritten += chunk.data.length;
GenericWorker.prototype.push.call(this, {
data : chunk.data,
meta : {
currentFile : this.currentFile,
percent : entriesCount ? (currentFilePercent + 100 * (entriesCount
- remainingFiles - 1)) / entriesCount : 100
}
});
}
};
/**
* The worker started a new source (an other worker).
* @param {Object} streamInfo the streamInfo object from the new source.
*/
ZipFileWorker.prototype.openedSource = function (streamInfo) {
this.currentSourceOffset = this.bytesWritten;
this.currentFile = streamInfo['file'].name;
/**
* The worker finished a source (an other worker).
* @param {Object} streamInfo the streamInfo object from the finished source.
*/
ZipFileWorker.prototype.closedSource = function (streamInfo) {
this.accumulate = false;
var streamedContent = this.streamFiles && !streamInfo['file'].dir;
var record = generateZipParts(streamInfo, streamedContent, true,
this.currentSourceOffset, this.zipPlatform, this.encodeFileName);
this.dirRecords.push(record.dirRecord);
if(streamedContent) {
// after the streamed file, we put data descriptors
this.push({
data : generateDataDescriptors(streamInfo),
meta : {percent:100}
});
} else {
// the content wasn't streamed, we need to push everything now
// first the file record, then the content
this.push({
data : record.fileRecord,
meta : {percent:0}
});
while(this.contentBuffer.length) {
this.push(this.contentBuffer.shift());
}
}
this.currentFile = null;
};
/**
* @see GenericWorker.flush
*/
ZipFileWorker.prototype.flush = function () {
var localDirLength = this.bytesWritten;
for(var i = 0; i < this.dirRecords.length; i++) {
this.push({
data : this.dirRecords[i],
meta : {percent:100}
});
}
var centralDirLength = this.bytesWritten - localDirLength;
this.push({
data : dirEnd,
meta : {percent:100}
});
};
/**
* Prepare the next source to be read.
*/
ZipFileWorker.prototype.prepareNextSource = function () {
this.previous = this._sources.shift();
this.openedSource(this.previous.streamInfo);
if (this.isPaused) {
this.previous.pause();
} else {
this.previous.resume();
}
};
/**
* @see GenericWorker.registerPrevious
*/
ZipFileWorker.prototype.registerPrevious = function (previous) {
this._sources.push(previous);
var self = this;
/**
* @see GenericWorker.resume
*/
ZipFileWorker.prototype.resume = function () {
if(!GenericWorker.prototype.resume.call(this)) {
return false;
}
/**
* @see GenericWorker.error
*/
ZipFileWorker.prototype.error = function (e) {
var sources = this._sources;
if(!GenericWorker.prototype.error.call(this, e)) {
return false;
}
for(var i = 0; i < sources.length; i++) {
try {
sources[i].error(e);
} catch(e) {
// the `error` exploded, nothing to do
}
}
return true;
};
/**
* @see GenericWorker.lock
*/
ZipFileWorker.prototype.lock = function () {
GenericWorker.prototype.lock.call(this);
var sources = this._sources;
for(var i = 0; i < sources.length; i++) {
sources[i].lock();
}
};
module.exports = ZipFileWorker;
},{"../crc32":4,"../signature":23,"../stream/GenericWorker":28,"../utf8":31,"../
utils":32}],9:[function(require,module,exports){
'use strict';
/**
* Find the compression to use.
* @param {String} fileCompression the compression defined at the file level, if
any.
* @param {String} zipCompression the compression defined at the load() level.
* @return {Object} the compression object to use.
*/
var getCompression = function (fileCompression, zipCompression) {
/**
* Create a worker to generate a zip file.
* @param {JSZip} zip the JSZip instance at the right root level.
* @param {Object} options to generate the zip file.
* @param {String} comment the comment to use.
*/
exports.generateWorker = function (zip, options, comment) {
file._compressWorker(compression, compressionOptions)
.withStreamInfo("file", {
name : relativePath,
dir : dir,
date : date,
comment : file.comment || "",
unixPermissions : file.unixPermissions,
dosPermissions : file.dosPermissions
})
.pipe(zipFileWorker);
});
zipFileWorker.entriesCount = entriesCount;
} catch (e) {
zipFileWorker.error(e);
}
return zipFileWorker;
};
},{"../compressions":3,"./ZipFileWorker":8}],10:[function(require,module,exports){
'use strict';
/**
* Representation a of zip file in js
* @constructor
*/
function JSZip() {
// if this constructor is used without `new`, it adds `new` before itself:
if(!(this instanceof JSZip)) {
return new JSZip();
}
if(arguments.length) {
throw new Error("The constructor with parameters has been removed in JSZip
3.0, please check the upgrade guide.");
}
this.comment = null;
JSZip.external = require("./external");
module.exports = JSZip;
},{"./defaults":5,"./external":6,"./load":11,"./object":15,"./support":30}],11:
[function(require,module,exports){
'use strict';
var utils = require('./utils');
var external = require("./external");
var utf8 = require('./utf8');
var utils = require('./utils');
var ZipEntries = require('./zipEntries');
var Crc32Probe = require('./stream/Crc32Probe');
var nodejsUtils = require("./nodejsUtils");
/**
* Check the CRC32 of an entry.
* @param {ZipEntry} zipEntry the zip entry to check.
* @return {Promise} the result.
*/
function checkEntryCRC32(zipEntry) {
return new external.Promise(function (resolve, reject) {
var worker = zipEntry.decompressed.getContentWorker().pipe(new
Crc32Probe());
worker.on("error", function (e) {
reject(e);
})
.on("end", function () {
if (worker.streamInfo.crc32 !== zipEntry.decompressed.crc32) {
reject(new Error("Corrupted zip : CRC32 mismatch"));
} else {
resolve();
}
})
.resume();
});
}
return zip;
});
};
},{"./external":6,"./nodejsUtils":14,"./stream/Crc32Probe":25,"./utf8":31,"./
utils":32,"./zipEntries":33}],12:[function(require,module,exports){
"use strict";
/**
* A worker that use a nodejs stream as source.
* @constructor
* @param {String} filename the name of the file entry for this stream.
* @param {Readable} stream the nodejs stream.
*/
function NodejsStreamInputAdapter(filename, stream) {
GenericWorker.call(this, "Nodejs stream input adapter for " + filename);
this._upstreamEnded = false;
this._bindStream(stream);
}
utils.inherits(NodejsStreamInputAdapter, GenericWorker);
/**
* Prepare the stream and bind the callbacks on it.
* Do this ASAP on node 0.10 ! A lazy binding doesn't always work.
* @param {Stream} stream the nodejs stream to use.
*/
NodejsStreamInputAdapter.prototype._bindStream = function (stream) {
var self = this;
this._stream = stream;
stream.pause();
stream
.on("data", function (chunk) {
self.push({
data: chunk,
meta : {
percent : 0
}
});
})
.on("error", function (e) {
if(self.isPaused) {
this.generatedError = e;
} else {
self.error(e);
}
})
.on("end", function () {
if(self.isPaused) {
self._upstreamEnded = true;
} else {
self.end();
}
});
};
NodejsStreamInputAdapter.prototype.pause = function () {
if(!GenericWorker.prototype.pause.call(this)) {
return false;
}
this._stream.pause();
return true;
};
NodejsStreamInputAdapter.prototype.resume = function () {
if(!GenericWorker.prototype.resume.call(this)) {
return false;
}
if(this._upstreamEnded) {
this.end();
} else {
this._stream.resume();
}
return true;
};
module.exports = NodejsStreamInputAdapter;
},{"../stream/GenericWorker":28,"../utils":32}],13:
[function(require,module,exports){
'use strict';
/**
* A nodejs stream using a worker as source.
* @see the SourceWrapper in http://nodejs.org/api/stream.html
* @constructor
* @param {StreamHelper} helper the helper wrapping the worker
* @param {Object} options the nodejs stream options
* @param {Function} updateCb the update callback.
*/
function NodejsStreamOutputAdapter(helper, options, updateCb) {
Readable.call(this, options);
this._helper = helper;
NodejsStreamOutputAdapter.prototype._read = function() {
this._helper.resume();
};
module.exports = NodejsStreamOutputAdapter;
},{"../utils":32,"readable-stream":16}],14:[function(require,module,exports){
'use strict';
module.exports = {
/**
* True if this is running in Nodejs, will be undefined in a browser.
* In a browser, browserify won't include this file and the whole module
* will be resolved an empty object.
*/
isNode : typeof Buffer !== "undefined",
/**
* Create a new nodejs Buffer from an existing content.
* @param {Object} data the data to pass to the constructor.
* @param {String} encoding the encoding to use.
* @return {Buffer} a new Buffer.
*/
newBufferFrom: function(data, encoding) {
if (Buffer.from && Buffer.from !== Uint8Array.from) {
return Buffer.from(data, encoding);
} else {
if (typeof data === "number") {
// Safeguard for old Node.js versions. On newer versions,
// Buffer.from(number) / Buffer(number, encoding) already throw.
throw new Error("The \"data\" argument must not be a number");
}
return new Buffer(data, encoding);
}
},
/**
* Create a new nodejs Buffer with the specified size.
* @param {Integer} size the size of the buffer.
* @return {Buffer} a new Buffer.
*/
allocBuffer: function (size) {
if (Buffer.alloc) {
return Buffer.alloc(size);
} else {
var buf = new Buffer(size);
buf.fill(0);
return buf;
}
},
/**
* Find out if an object is a Buffer.
* @param {Object} b the object to test.
* @return {Boolean} true if the object is a Buffer, false otherwise.
*/
isBuffer : function(b){
return Buffer.isBuffer(b);
},
},{}],15:[function(require,module,exports){
'use strict';
var utf8 = require('./utf8');
var utils = require('./utils');
var GenericWorker = require('./stream/GenericWorker');
var StreamHelper = require('./stream/StreamHelper');
var defaults = require('./defaults');
var CompressedObject = require('./compressedObject');
var ZipObject = require('./zipObject');
var generate = require("./generate");
var nodejsUtils = require("./nodejsUtils");
var NodejsStreamInputAdapter = require("./nodejs/NodejsStreamInputAdapter");
/**
* Add a file in the current folder.
* @private
* @param {string} name the name of the file
* @param {String|ArrayBuffer|Uint8Array|Buffer} data the data of the file
* @param {Object} originalOptions the options of the file
* @return {Object} the new file.
*/
var fileAdd = function(name, data, originalOptions) {
// be sure sub folders exist
var dataType = utils.getTypeOf(data),
parent;
/*
* Correct options.
*/
if (o.dir) {
name = forceTrailingSlash(name);
}
if (o.createFolders && (parent = parentFolder(name))) {
folderAdd.call(this, parent, true);
}
var isUnicodeString = dataType === "string" && o.binary === false && o.base64
=== false;
if (!originalOptions || typeof originalOptions.binary === "undefined") {
o.binary = !isUnicodeString;
}
/*
* Convert content to fit.
*/
return external.Promise.resolve(zipObjectContent)
.then(function () {
return object;
});
*/
};
/**
* Find the parent folder of the path.
* @private
* @param {string} path the path to use
* @return {string} the parent folder, or ""
*/
var parentFolder = function (path) {
if (path.slice(-1) === '/') {
path = path.substring(0, path.length - 1);
}
var lastSlash = path.lastIndexOf('/');
return (lastSlash > 0) ? path.substring(0, lastSlash) : "";
};
/**
* Returns the path with a slash at the end.
* @private
* @param {String} path the path to check.
* @return {String} the path with a trailing slash.
*/
var forceTrailingSlash = function(path) {
// Check the name ends with a /
if (path.slice(-1) !== "/") {
path += "/"; // IE doesn't like substr(-1)
}
return path;
};
/**
* Add a (sub) folder in the current folder.
* @private
* @param {string} name the folder's name
* @param {boolean=} [createFolders] If true, automatically create sub
* folders. Defaults to false.
* @return {Object} the new folder.
*/
var folderAdd = function(name, createFolders) {
createFolders = (typeof createFolders !== 'undefined') ? createFolders :
defaults.createFolders;
name = forceTrailingSlash(name);
/**
* Cross-window, cross-Node-context regular expression detection
* @param {Object} object Anything
* @return {Boolean} true if the object is a regular expression,
* false otherwise
*/
function isRegExp(object) {
return Object.prototype.toString.call(object) === "[object RegExp]";
}
/**
* Call a callback function for each entry at this folder level.
* @param {Function} cb the callback function:
* function (relativePath, file) {...}
* It takes 2 arguments : the relative path and the file.
*/
forEach: function(cb) {
var filename, relativePath, file;
for (filename in this.files) {
if (!this.files.hasOwnProperty(filename)) {
continue;
}
file = this.files[filename];
relativePath = filename.slice(this.root.length, filename.length);
if (relativePath && filename.slice(0, this.root.length) === this.root)
{ // the file is in the current root
cb(relativePath, file); // TODO reverse the parameters ? need to be
clean AND consistent with the filter search fn...
}
}
},
/**
* Filter nested files/folders with the specified function.
* @param {Function} search the predicate to use :
* function (relativePath, file) {...}
* It takes 2 arguments : the relative path and the file.
* @return {Array} An array of matching elements.
*/
filter: function(search) {
var result = [];
this.forEach(function (relativePath, entry) {
if (search(relativePath, entry)) { // the file matches the function
result.push(entry);
}
});
return result;
},
/**
* Add a file to the zip file, or search a file.
* @param {string|RegExp} name The name of the file to add (if data is
defined),
* the name of the file to find (if no data) or a regex to match files.
* @param {String|ArrayBuffer|Uint8Array|Buffer} data The file data, either
raw or base64 encoded
* @param {Object} o File options
* @return {JSZip|Object|Array} this JSZip object (when adding a file),
* a file (when searching by string) or an array of files (when searching by
regex).
*/
file: function(name, data, o) {
if (arguments.length === 1) {
if (isRegExp(name)) {
var regexp = name;
return this.filter(function(relativePath, file) {
return !file.dir && regexp.test(relativePath);
});
}
else { // text
var obj = this.files[this.root + name];
if (obj && !obj.dir) {
return obj;
} else {
return null;
}
}
}
else { // more than one argument : we have data !
name = this.root + name;
fileAdd.call(this, name, data, o);
}
return this;
},
/**
* Add a directory to the zip file, or search.
* @param {String|RegExp} arg The name of the directory to add, or a regex to
search folders.
* @return {JSZip} an object with the new directory as the root, or an array
containing matching folders.
*/
folder: function(arg) {
if (!arg) {
return this;
}
if (isRegExp(arg)) {
return this.filter(function(relativePath, file) {
return file.dir && arg.test(relativePath);
});
}
// Allow chaining by returning a new object with this folder as the root
var ret = this.clone();
ret.root = newFolder.name;
return ret;
},
/**
* Delete a file, or a directory and all sub-files, from the zip
* @param {string} name the name of the file to delete
* @return {JSZip} this JSZip object
*/
remove: function(name) {
name = this.root + name;
var file = this.files[name];
if (!file) {
// Look for any folders
if (name.slice(-1) !== "/") {
name += "/";
}
file = this.files[name];
}
return this;
},
/**
* Generate the complete zip file
* @param {Object} options the options to generate the zip file :
* - compression, "STORE" by default.
* - type, "base64" by default. Values are : string, base64, uint8array,
arraybuffer, blob.
* @return {String|Uint8Array|ArrayBuffer|Buffer|Blob} the zip file
*/
generate: function(options) {
throw new Error("This method has been removed in JSZip 3.0, please check
the upgrade guide.");
},
/**
* Generate the complete zip file as an internal stream.
* @param {Object} options the options to generate the zip file :
* - compression, "STORE" by default.
* - type, "base64" by default. Values are : string, base64, uint8array,
arraybuffer, blob.
* @return {StreamHelper} the streamed zip file.
*/
generateInternalStream: function(options) {
var worker, opts = {};
try {
opts = utils.extend(options || {}, {
streamFiles: false,
compression: "STORE",
compressionOptions : null,
type: "",
platform: "DOS",
comment: null,
mimeType: 'application/zip',
encodeFileName: utf8.utf8encode
});
opts.type = opts.type.toLowerCase();
opts.compression = opts.compression.toUpperCase();
if (!opts.type) {
throw new Error("No output type specified.");
}
utils.checkSupport(opts.type);
},{"./compressedObject":2,"./defaults":5,"./generate":9,"./nodejs/
NodejsStreamInputAdapter":12,"./nodejsUtils":14,"./stream/GenericWorker":28,"./
stream/StreamHelper":29,"./utf8":31,"./utils":32,"./zipObject":35}],16:
[function(require,module,exports){
/*
* This file is used by module bundlers (browserify/webpack/etc) when
* including a stream implementation. We use "readable-stream" to get a
* consistent behavior between nodejs versions but bundlers often have a shim
* for "stream". Using this shim greatly improve the compatibility and greatly
* reduce the final size of the bundle (only one stream implementation, not
* two).
*/
module.exports = require("stream");
},{"stream":undefined}],17:[function(require,module,exports){
'use strict';
var DataReader = require('./DataReader');
var utils = require('../utils');
function ArrayReader(data) {
DataReader.call(this, data);
for(var i = 0; i < this.data.length; i++) {
data[i] = data[i] & 0xFF;
}
}
utils.inherits(ArrayReader, DataReader);
/**
* @see DataReader.byteAt
*/
ArrayReader.prototype.byteAt = function(i) {
return this.data[this.zero + i];
};
/**
* @see DataReader.lastIndexOfSignature
*/
ArrayReader.prototype.lastIndexOfSignature = function(sig) {
var sig0 = sig.charCodeAt(0),
sig1 = sig.charCodeAt(1),
sig2 = sig.charCodeAt(2),
sig3 = sig.charCodeAt(3);
for (var i = this.length - 4; i >= 0; --i) {
if (this.data[i] === sig0 && this.data[i + 1] === sig1 && this.data[i + 2]
=== sig2 && this.data[i + 3] === sig3) {
return i - this.zero;
}
}
return -1;
};
/**
* @see DataReader.readAndCheckSignature
*/
ArrayReader.prototype.readAndCheckSignature = function (sig) {
var sig0 = sig.charCodeAt(0),
sig1 = sig.charCodeAt(1),
sig2 = sig.charCodeAt(2),
sig3 = sig.charCodeAt(3),
data = this.readData(4);
return sig0 === data[0] && sig1 === data[1] && sig2 === data[2] && sig3 ===
data[3];
};
/**
* @see DataReader.readData
*/
ArrayReader.prototype.readData = function(size) {
this.checkOffset(size);
if(size === 0) {
return [];
}
var result = this.data.slice(this.zero + this.index, this.zero + this.index +
size);
this.index += size;
return result;
};
module.exports = ArrayReader;
},{"../utils":32,"./DataReader":18}],18:[function(require,module,exports){
'use strict';
var utils = require('../utils');
function DataReader(data) {
this.data = data; // type : see implementation
this.length = data.length;
this.index = 0;
this.zero = 0;
}
DataReader.prototype = {
/**
* Check that the offset will not go too far.
* @param {string} offset the additional offset to check.
* @throws {Error} an Error if the offset is out of bounds.
*/
checkOffset: function(offset) {
this.checkIndex(this.index + offset);
},
/**
* Check that the specified index will not be too far.
* @param {string} newIndex the index to check.
* @throws {Error} an Error if the index is out of bounds.
*/
checkIndex: function(newIndex) {
if (this.length < this.zero + newIndex || newIndex < 0) {
throw new Error("End of data reached (data length = " + this.length +
", asked index = " + (newIndex) + "). Corrupted zip ?");
}
},
/**
* Change the index.
* @param {number} newIndex The new index.
* @throws {Error} if the new index is out of the data.
*/
setIndex: function(newIndex) {
this.checkIndex(newIndex);
this.index = newIndex;
},
/**
* Skip the next n bytes.
* @param {number} n the number of bytes to skip.
* @throws {Error} if the new index is out of the data.
*/
skip: function(n) {
this.setIndex(this.index + n);
},
/**
* Get the byte at the specified index.
* @param {number} i the index to use.
* @return {number} a byte.
*/
byteAt: function(i) {
// see implementations
},
/**
* Get the next number with a given byte size.
* @param {number} size the number of bytes to read.
* @return {number} the corresponding number.
*/
readInt: function(size) {
var result = 0,
i;
this.checkOffset(size);
for (i = this.index + size - 1; i >= this.index; i--) {
result = (result << 8) + this.byteAt(i);
}
this.index += size;
return result;
},
/**
* Get the next string with a given byte size.
* @param {number} size the number of bytes to read.
* @return {string} the corresponding string.
*/
readString: function(size) {
return utils.transformTo("string", this.readData(size));
},
/**
* Get raw data without conversion, <size> bytes.
* @param {number} size the number of bytes to read.
* @return {Object} the raw data, implementation specific.
*/
readData: function(size) {
// see implementations
},
/**
* Find the last occurence of a zip signature (4 bytes).
* @param {string} sig the signature to find.
* @return {number} the index of the last occurence, -1 if not found.
*/
lastIndexOfSignature: function(sig) {
// see implementations
},
/**
* Read the signature (4 bytes) at the current position and compare it with
sig.
* @param {string} sig the expected signature
* @return {boolean} true if the signature matches, false otherwise.
*/
readAndCheckSignature: function(sig) {
// see implementations
},
/**
* Get the next date.
* @return {Date} the date.
*/
readDate: function() {
var dostime = this.readInt(4);
return new Date(Date.UTC(
((dostime >> 25) & 0x7f) + 1980, // year
((dostime >> 21) & 0x0f) - 1, // month
(dostime >> 16) & 0x1f, // day
(dostime >> 11) & 0x1f, // hour
(dostime >> 5) & 0x3f, // minute
(dostime & 0x1f) << 1)); // second
}
};
module.exports = DataReader;
},{"../utils":32}],19:[function(require,module,exports){
'use strict';
var Uint8ArrayReader = require('./Uint8ArrayReader');
var utils = require('../utils');
function NodeBufferReader(data) {
Uint8ArrayReader.call(this, data);
}
utils.inherits(NodeBufferReader, Uint8ArrayReader);
/**
* @see DataReader.readData
*/
NodeBufferReader.prototype.readData = function(size) {
this.checkOffset(size);
var result = this.data.slice(this.zero + this.index, this.zero + this.index +
size);
this.index += size;
return result;
};
module.exports = NodeBufferReader;
},{"../utils":32,"./Uint8ArrayReader":21}],20:[function(require,module,exports){
'use strict';
var DataReader = require('./DataReader');
var utils = require('../utils');
function StringReader(data) {
DataReader.call(this, data);
}
utils.inherits(StringReader, DataReader);
/**
* @see DataReader.byteAt
*/
StringReader.prototype.byteAt = function(i) {
return this.data.charCodeAt(this.zero + i);
};
/**
* @see DataReader.lastIndexOfSignature
*/
StringReader.prototype.lastIndexOfSignature = function(sig) {
return this.data.lastIndexOf(sig) - this.zero;
};
/**
* @see DataReader.readAndCheckSignature
*/
StringReader.prototype.readAndCheckSignature = function (sig) {
var data = this.readData(4);
return sig === data;
};
/**
* @see DataReader.readData
*/
StringReader.prototype.readData = function(size) {
this.checkOffset(size);
// this will work because the constructor applied the "& 0xff" mask.
var result = this.data.slice(this.zero + this.index, this.zero + this.index +
size);
this.index += size;
return result;
};
module.exports = StringReader;
},{"../utils":32,"./DataReader":18}],21:[function(require,module,exports){
'use strict';
var ArrayReader = require('./ArrayReader');
var utils = require('../utils');
function Uint8ArrayReader(data) {
ArrayReader.call(this, data);
}
utils.inherits(Uint8ArrayReader, ArrayReader);
/**
* @see DataReader.readData
*/
Uint8ArrayReader.prototype.readData = function(size) {
this.checkOffset(size);
if(size === 0) {
// in IE10, when using subarray(idx, idx), we get the array [0x00] instead
of [].
return new Uint8Array(0);
}
var result = this.data.subarray(this.zero + this.index, this.zero + this.index
+ size);
this.index += size;
return result;
};
module.exports = Uint8ArrayReader;
},{"../utils":32,"./ArrayReader":17}],22:[function(require,module,exports){
'use strict';
/**
* Create a reader adapted to the data.
* @param {String|ArrayBuffer|Uint8Array|Buffer} data the data to read.
* @return {DataReader} the data reader.
*/
module.exports = function (data) {
var type = utils.getTypeOf(data);
utils.checkSupport(type);
if (type === "string" && !support.uint8array) {
return new StringReader(data);
}
if (type === "nodebuffer") {
return new NodeBufferReader(data);
}
if (support.uint8array) {
return new Uint8ArrayReader(utils.transformTo("uint8array", data));
}
return new ArrayReader(utils.transformTo("array", data));
};
},{"../support":30,"../utils":32,"./ArrayReader":17,"./NodeBufferReader":19,"./
StringReader":20,"./Uint8ArrayReader":21}],23:[function(require,module,exports){
'use strict';
exports.LOCAL_FILE_HEADER = "PK\x03\x04";
exports.CENTRAL_FILE_HEADER = "PK\x01\x02";
exports.CENTRAL_DIRECTORY_END = "PK\x05\x06";
exports.ZIP64_CENTRAL_DIRECTORY_LOCATOR = "PK\x06\x07";
exports.ZIP64_CENTRAL_DIRECTORY_END = "PK\x06\x06";
exports.DATA_DESCRIPTOR = "PK\x07\x08";
},{}],24:[function(require,module,exports){
'use strict';
/**
* A worker which convert chunks to a specified type.
* @constructor
* @param {String} destType the destination type.
*/
function ConvertWorker(destType) {
GenericWorker.call(this, "ConvertWorker to " + destType);
this.destType = destType;
}
utils.inherits(ConvertWorker, GenericWorker);
/**
* @see GenericWorker.processChunk
*/
ConvertWorker.prototype.processChunk = function (chunk) {
this.push({
data : utils.transformTo(this.destType, chunk.data),
meta : chunk.meta
});
};
module.exports = ConvertWorker;
},{"../utils":32,"./GenericWorker":28}],25:[function(require,module,exports){
'use strict';
/**
* A worker which calculate the crc32 of the data flowing through.
* @constructor
*/
function Crc32Probe() {
GenericWorker.call(this, "Crc32Probe");
this.withStreamInfo("crc32", 0);
}
utils.inherits(Crc32Probe, GenericWorker);
/**
* @see GenericWorker.processChunk
*/
Crc32Probe.prototype.processChunk = function (chunk) {
this.streamInfo.crc32 = crc32(chunk.data, this.streamInfo.crc32 || 0);
this.push(chunk);
};
module.exports = Crc32Probe;
},{"../crc32":4,"../utils":32,"./GenericWorker":28}],26:
[function(require,module,exports){
'use strict';
/**
* A worker which calculate the total length of the data flowing through.
* @constructor
* @param {String} propName the name used to expose the length
*/
function DataLengthProbe(propName) {
GenericWorker.call(this, "DataLengthProbe for " + propName);
this.propName = propName;
this.withStreamInfo(propName, 0);
}
utils.inherits(DataLengthProbe, GenericWorker);
/**
* @see GenericWorker.processChunk
*/
DataLengthProbe.prototype.processChunk = function (chunk) {
if(chunk) {
var length = this.streamInfo[this.propName] || 0;
this.streamInfo[this.propName] = length + chunk.data.length;
}
GenericWorker.prototype.processChunk.call(this, chunk);
};
module.exports = DataLengthProbe;
},{"../utils":32,"./GenericWorker":28}],27:[function(require,module,exports){
'use strict';
/**
* A worker that reads a content and emits chunks.
* @constructor
* @param {Promise} dataP the promise of the data to split
*/
function DataWorker(dataP) {
GenericWorker.call(this, "DataWorker");
var self = this;
this.dataIsReady = false;
this.index = 0;
this.max = 0;
this.data = null;
this.type = "";
this._tickScheduled = false;
dataP.then(function (data) {
self.dataIsReady = true;
self.data = data;
self.max = data && data.length || 0;
self.type = utils.getTypeOf(data);
if(!self.isPaused) {
self._tickAndRepeat();
}
}, function (e) {
self.error(e);
});
}
utils.inherits(DataWorker, GenericWorker);
/**
* @see GenericWorker.cleanUp
*/
DataWorker.prototype.cleanUp = function () {
GenericWorker.prototype.cleanUp.call(this);
this.data = null;
};
/**
* @see GenericWorker.resume
*/
DataWorker.prototype.resume = function () {
if(!GenericWorker.prototype.resume.call(this)) {
return false;
}
/**
* Trigger a tick a schedule an other call to this function.
*/
DataWorker.prototype._tickAndRepeat = function() {
this._tickScheduled = false;
if(this.isPaused || this.isFinished) {
return;
}
this._tick();
if(!this.isFinished) {
utils.delay(this._tickAndRepeat, [], this);
this._tickScheduled = true;
}
};
/**
* Read and push a chunk.
*/
DataWorker.prototype._tick = function() {
if(this.isPaused || this.isFinished) {
return false;
}
module.exports = DataWorker;
},{"../utils":32,"./GenericWorker":28}],28:[function(require,module,exports){
'use strict';
/**
* A worker that does nothing but passing chunks to the next one. This is like
* a nodejs stream but with some differences. On the good side :
* - it works on IE 6-9 without any issue / polyfill
* - it weights less than the full dependencies bundled with browserify
* - it forwards errors (no need to declare an error handler EVERYWHERE)
*
* A chunk is an object with 2 attributes : `meta` and `data`. The former is an
* object containing anything (`percent` for example), see each worker for more
* details. The latter is the real data (String, Uint8Array, etc).
*
* @constructor
* @param {String} name the name of the stream (mainly used for debugging purposes)
*/
function GenericWorker(name) {
// the name of the worker
this.name = name || "default";
// an object containing metadata about the workers chain
this.streamInfo = {};
// an error which happened when the worker was paused
this.generatedError = null;
// an object containing metadata to be merged by this worker into the general
metadata
this.extraStreamInfo = {};
// true if the stream is paused (and should not do anything), false otherwise
this.isPaused = true;
// true if the stream is finished (and should not do anything), false otherwise
this.isFinished = false;
// true if the stream is locked to prevent further structure updates (pipe),
false otherwise
this.isLocked = false;
// the event listeners
this._listeners = {
'data':[],
'end':[],
'error':[]
};
// the previous worker, if any
this.previous = null;
}
GenericWorker.prototype = {
/**
* Push a chunk to the next workers.
* @param {Object} chunk the chunk to push
*/
push : function (chunk) {
this.emit("data", chunk);
},
/**
* End the stream.
* @return {Boolean} true if this call ended the worker, false otherwise.
*/
end : function () {
if (this.isFinished) {
return false;
}
this.flush();
try {
this.emit("end");
this.cleanUp();
this.isFinished = true;
} catch (e) {
this.emit("error", e);
}
return true;
},
/**
* End the stream with an error.
* @param {Error} e the error which caused the premature end.
* @return {Boolean} true if this call ended the worker with an error, false
otherwise.
*/
error : function (e) {
if (this.isFinished) {
return false;
}
if(this.isPaused) {
this.generatedError = e;
} else {
this.isFinished = true;
this.emit("error", e);
this.cleanUp();
}
return true;
},
/**
* Add a callback on an event.
* @param {String} name the name of the event (data, end, error)
* @param {Function} listener the function to call when the event is triggered
* @return {GenericWorker} the current object for chainability
*/
on : function (name, listener) {
this._listeners[name].push(listener);
return this;
},
/**
* Clean any references when a worker is ending.
*/
cleanUp : function () {
this.streamInfo = this.generatedError = this.extraStreamInfo = null;
this._listeners = [];
},
/**
* Trigger an event. This will call registered callback with the provided arg.
* @param {String} name the name of the event (data, end, error)
* @param {Object} arg the argument to call the callback with.
*/
emit : function (name, arg) {
if (this._listeners[name]) {
for(var i = 0; i < this._listeners[name].length; i++) {
this._listeners[name][i].call(this, arg);
}
}
},
/**
* Chain a worker with an other.
* @param {Worker} next the worker receiving events from the current one.
* @return {worker} the next worker for chainability
*/
pipe : function (next) {
return next.registerPrevious(this);
},
/**
* Same as `pipe` in the other direction.
* Using an API with `pipe(next)` is very easy.
* Implementing the API with the point of view of the next one registering
* a source is easier, see the ZipFileWorker.
* @param {Worker} previous the previous worker, sending events to this one
* @return {Worker} the current worker for chainability
*/
registerPrevious : function (previous) {
if (this.isLocked) {
throw new Error("The stream '" + this + "' has already been used.");
}
if(this.previous) {
this.previous.pause();
}
return true;
},
/**
* Resume a paused stream.
* @return {Boolean} true if this call resumed the worker, false otherwise.
*/
resume : function () {
if(!this.isPaused || this.isFinished) {
return false;
}
this.isPaused = false;
return !withError;
},
/**
* Flush any remaining bytes as the stream is ending.
*/
flush : function () {},
/**
* Process a chunk. This is usually the method overridden.
* @param {Object} chunk the chunk to process.
*/
processChunk : function(chunk) {
this.push(chunk);
},
/**
* Add a key/value to be added in the workers chain streamInfo once activated.
* @param {String} key the key to use
* @param {Object} value the associated value
* @return {Worker} the current worker for chainability
*/
withStreamInfo : function (key, value) {
this.extraStreamInfo[key] = value;
this.mergeStreamInfo();
return this;
},
/**
* Merge this worker's streamInfo into the chain's streamInfo.
*/
mergeStreamInfo : function () {
for(var key in this.extraStreamInfo) {
if (!this.extraStreamInfo.hasOwnProperty(key)) {
continue;
}
this.streamInfo[key] = this.extraStreamInfo[key];
}
},
/**
* Lock the stream to prevent further updates on the workers chain.
* After calling this method, all calls to pipe will fail.
*/
lock: function () {
if (this.isLocked) {
throw new Error("The stream '" + this + "' has already been used.");
}
this.isLocked = true;
if (this.previous) {
this.previous.lock();
}
},
/**
*
* Pretty print the workers chain.
*/
toString : function () {
var me = "Worker " + this.name;
if (this.previous) {
return this.previous + " -> " + me;
} else {
return me;
}
}
};
module.exports = GenericWorker;
},{}],29:[function(require,module,exports){
'use strict';
/**
* Apply the final transformation of the data. If the user wants a Blob for
* example, it's easier to work with an U8intArray and finally do the
* ArrayBuffer/Blob conversion.
* @param {String} type the name of the final type
* @param {String|Uint8Array|Buffer} content the content to transform
* @param {String} mimeType the mime type of the content, if applicable.
* @return {String|Uint8Array|ArrayBuffer|Buffer|Blob} the content in the right
format.
*/
function transformZipOutput(type, content, mimeType) {
switch(type) {
case "blob" :
return utils.newBlob(utils.transformTo("arraybuffer", content),
mimeType);
case "base64" :
return base64.encode(content);
default :
return utils.transformTo(type, content);
}
}
/**
* Concatenate an array of data of the given type.
* @param {String} type the type of the data in the given array.
* @param {Array} dataArray the array containing the data chunks to concatenate
* @return {String|Uint8Array|Buffer} the concatenated data
* @throws Error if the asked type is unsupported
*/
function concat (type, dataArray) {
var i, index = 0, res = null, totalLength = 0;
for(i = 0; i < dataArray.length; i++) {
totalLength += dataArray[i].length;
}
switch(type) {
case "string":
return dataArray.join("");
case "array":
return Array.prototype.concat.apply([], dataArray);
case "uint8array":
res = new Uint8Array(totalLength);
for(i = 0; i < dataArray.length; i++) {
res.set(dataArray[i], index);
index += dataArray[i].length;
}
return res;
case "nodebuffer":
return Buffer.concat(dataArray);
default:
throw new Error("concat : unsupported type '" + type + "'");
}
}
/**
* Listen a StreamHelper, accumulate its content and concatenate it into a
* complete block.
* @param {StreamHelper} helper the helper to use.
* @param {Function} updateCallback a callback called on each update. Called
* with one arg :
* - the metadata linked to the update received.
* @return Promise the promise for the accumulation.
*/
function accumulate(helper, updateCallback) {
return new external.Promise(function (resolve, reject){
var dataArray = [];
var chunkType = helper._internalType,
resultType = helper._outputType,
mimeType = helper._mimeType;
helper
.on('data', function (data, meta) {
dataArray.push(data);
if(updateCallback) {
updateCallback(meta);
}
})
.on('error', function(err) {
dataArray = [];
reject(err);
})
.on('end', function (){
try {
var result = transformZipOutput(resultType, concat(chunkType,
dataArray), mimeType);
resolve(result);
} catch (e) {
reject(e);
}
dataArray = [];
})
.resume();
});
}
/**
* An helper to easily use workers outside of JSZip.
* @constructor
* @param {Worker} worker the worker to wrap
* @param {String} outputType the type of data expected by the use
* @param {String} mimeType the mime type of the content, if applicable.
*/
function StreamHelper(worker, outputType, mimeType) {
var internalType = outputType;
switch(outputType) {
case "blob":
case "arraybuffer":
internalType = "uint8array";
break;
case "base64":
internalType = "string";
break;
}
try {
// the type used internally
this._internalType = internalType;
// the type used to output results
this._outputType = outputType;
// the mime type
this._mimeType = mimeType;
utils.checkSupport(internalType);
this._worker = worker.pipe(new ConvertWorker(internalType));
// the last workers can be rewired without issues but we need to
// prevent any updates on previous workers.
worker.lock();
} catch(e) {
this._worker = new GenericWorker("error");
this._worker.error(e);
}
}
StreamHelper.prototype = {
/**
* Listen a StreamHelper, accumulate its content and concatenate it into a
* complete block.
* @param {Function} updateCb the update callback.
* @return Promise the promise for the accumulation.
*/
accumulate : function (updateCb) {
return accumulate(this, updateCb);
},
/**
* Add a listener on an event triggered on a stream.
* @param {String} evt the name of the event
* @param {Function} fn the listener
* @return {StreamHelper} the current helper.
*/
on : function (evt, fn) {
var self = this;
module.exports = StreamHelper;
},{"../base64":1,"../external":6,"../nodejs/NodejsStreamOutputAdapter":13,"../
support":30,"../utils":32,"./ConvertWorker":24,"./GenericWorker":28}],30:
[function(require,module,exports){
'use strict';
exports.base64 = true;
exports.array = true;
exports.string = true;
exports.arraybuffer = typeof ArrayBuffer !== "undefined" && typeof Uint8Array !==
"undefined";
exports.nodebuffer = typeof Buffer !== "undefined";
// contains true if JSZip can read/generate Uint8Array, false otherwise.
exports.uint8array = typeof Uint8Array !== "undefined";
try {
exports.nodestream = !!require('readable-stream').Readable;
} catch(e) {
exports.nodestream = false;
}
},{"readable-stream":16}],31:[function(require,module,exports){
'use strict';
/**
* The following functions come from pako, from pako/lib/utils/strings
* released under the MIT license, see pako https://github.com/nodeca/pako/
*/
// allocate buffer
if (support.uint8array) {
buf = new Uint8Array(buf_len);
} else {
buf = new Array(buf_len);
}
// convert
for (i=0, m_pos = 0; i < buf_len; m_pos++) {
c = str.charCodeAt(m_pos);
if ((c & 0xfc00) === 0xd800 && (m_pos+1 < str_len)) {
c2 = str.charCodeAt(m_pos+1);
if ((c2 & 0xfc00) === 0xdc00) {
c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);
m_pos++;
}
}
if (c < 0x80) {
/* one byte */
buf[i++] = c;
} else if (c < 0x800) {
/* two bytes */
buf[i++] = 0xC0 | (c >>> 6);
buf[i++] = 0x80 | (c & 0x3f);
} else if (c < 0x10000) {
/* three bytes */
buf[i++] = 0xE0 | (c >>> 12);
buf[i++] = 0x80 | (c >>> 6 & 0x3f);
buf[i++] = 0x80 | (c & 0x3f);
} else {
/* four bytes */
buf[i++] = 0xf0 | (c >>> 18);
buf[i++] = 0x80 | (c >>> 12 & 0x3f);
buf[i++] = 0x80 | (c >>> 6 & 0x3f);
buf[i++] = 0x80 | (c & 0x3f);
}
}
return buf;
};
c_len = _utf8len[c];
// skip 5 & 6 byte codes
if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len-1; continue; }
if (c < 0x10000) {
utf16buf[out++] = c;
} else {
c -= 0x10000;
utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff);
utf16buf[out++] = 0xdc00 | (c & 0x3ff);
}
}
// shrinkBuf(utf16buf, out)
if (utf16buf.length !== out) {
if(utf16buf.subarray) {
utf16buf = utf16buf.subarray(0, out);
} else {
utf16buf.length = out;
}
}
/**
* Transform a javascript string into an array (typed if possible) of bytes,
* UTF-8 encoded.
* @param {String} str the string to encode
* @return {Array|Uint8Array|Buffer} the UTF-8 encoded string.
*/
exports.utf8encode = function utf8encode(str) {
if (support.nodebuffer) {
return nodejsUtils.newBufferFrom(str, "utf-8");
}
return string2buf(str);
};
/**
* Transform a bytes array (or a representation) representing an UTF-8 encoded
* string into a javascript string.
* @param {Array|Uint8Array|Buffer} buf the data de decode
* @return {String} the decoded string.
*/
exports.utf8decode = function utf8decode(buf) {
if (support.nodebuffer) {
return utils.transformTo("nodebuffer", buf).toString("utf-8");
}
return buf2string(buf);
};
/**
* A worker to decode utf8 encoded binary chunks into string chunks.
* @constructor
*/
function Utf8DecodeWorker() {
GenericWorker.call(this, "utf-8 decode");
// the last bytes if a chunk didn't end with a complete codepoint.
this.leftOver = null;
}
utils.inherits(Utf8DecodeWorker, GenericWorker);
/**
* @see GenericWorker.processChunk
*/
Utf8DecodeWorker.prototype.processChunk = function (chunk) {
this.push({
data : exports.utf8decode(usableData),
meta : chunk.meta
});
};
/**
* @see GenericWorker.flush
*/
Utf8DecodeWorker.prototype.flush = function () {
if(this.leftOver && this.leftOver.length) {
this.push({
data : exports.utf8decode(this.leftOver),
meta : {}
});
this.leftOver = null;
}
};
exports.Utf8DecodeWorker = Utf8DecodeWorker;
/**
* A worker to endcode string chunks into utf8 encoded binary chunks.
* @constructor
*/
function Utf8EncodeWorker() {
GenericWorker.call(this, "utf-8 encode");
}
utils.inherits(Utf8EncodeWorker, GenericWorker);
/**
* @see GenericWorker.processChunk
*/
Utf8EncodeWorker.prototype.processChunk = function (chunk) {
this.push({
data : exports.utf8encode(chunk.data),
meta : chunk.meta
});
};
exports.Utf8EncodeWorker = Utf8EncodeWorker;
},{"./nodejsUtils":14,"./stream/GenericWorker":28,"./support":30,"./utils":32}],32:
[function(require,module,exports){
'use strict';
/**
* Convert a string that pass as a "binary string": it should represent a byte
* array but may have > 255 char codes. Be sure to take only the first byte
* and returns the byte array.
* @param {String} str the string to transform.
* @return {Array|Uint8Array} the string in a binary format.
*/
function string2binary(str) {
var result = null;
if (support.uint8array) {
result = new Uint8Array(str.length);
} else {
result = new Array(str.length);
}
return stringToArrayLike(str, result);
}
/**
* Create a new blob with the given content and the given type.
* @param {String|ArrayBuffer} part the content to put in the blob. DO NOT use
* an Uint8Array because the stock browser of android 4 won't accept it (it
* will be silently converted to a string, "[object Uint8Array]").
*
* Use only ONE part to build the blob to avoid a memory leak in IE11 / Edge:
* when a large amount of Array is used to create the Blob, the amount of
* memory consumed is nearly 100 times the original data amount.
*
* @param {String} type the mime type of the blob.
* @return {Blob} the created blob.
*/
exports.newBlob = function(part, type) {
exports.checkSupport("blob");
try {
// Blob constructor
return new Blob([part], {
type: type
});
}
catch (e) {
try {
// deprecated, browser only, old way
var Builder = self.BlobBuilder || self.WebKitBlobBuilder ||
self.MozBlobBuilder || self.MSBlobBuilder;
var builder = new Builder();
builder.append(part);
return builder.getBlob(type);
}
catch (e) {
// well, fuck ?!
throw new Error("Bug : can't construct the Blob.");
}
}
};
/**
* The identity function.
* @param {Object} input the input.
* @return {Object} the same input.
*/
function identity(input) {
return input;
}
/**
* Fill in an array with a string.
* @param {String} str the string to use.
* @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to fill in (will be
mutated).
* @return {Array|ArrayBuffer|Uint8Array|Buffer} the updated array.
*/
function stringToArrayLike(str, array) {
for (var i = 0; i < str.length; ++i) {
array[i] = str.charCodeAt(i) & 0xFF;
}
return array;
}
/**
* An helper for the function arrayLikeToString.
* This contains static informations and functions that
* can be optimized by the browser JIT compiler.
*/
var arrayToStringHelper = {
/**
* Transform an array of int into a string, chunk by chunk.
* See the performances notes on arrayLikeToString.
* @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to transform.
* @param {String} type the type of the array.
* @param {Integer} chunk the chunk size.
* @return {String} the resulting string.
* @throws Error if the chunk is too big for the stack.
*/
stringifyByChunk: function(array, type, chunk) {
var result = [], k = 0, len = array.length;
// shortcut
if (len <= chunk) {
return String.fromCharCode.apply(null, array);
}
while (k < len) {
if (type === "array" || type === "nodebuffer") {
result.push(String.fromCharCode.apply(null, array.slice(k,
Math.min(k + chunk, len))));
}
else {
result.push(String.fromCharCode.apply(null, array.subarray(k,
Math.min(k + chunk, len))));
}
k += chunk;
}
return result.join("");
},
/**
* Call String.fromCharCode on every item in the array.
* This is the naive implementation, which generate A LOT of intermediate
string.
* This should be used when everything else fail.
* @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to transform.
* @return {String} the result.
*/
stringifyByChar: function(array){
var resultStr = "";
for(var i = 0; i < array.length; i++) {
resultStr += String.fromCharCode(array[i]);
}
return resultStr;
},
applyCanBeUsed : {
/**
* true if the browser accepts to use String.fromCharCode on Uint8Array
*/
uint8array : (function () {
try {
return support.uint8array && String.fromCharCode.apply(null, new
Uint8Array(1)).length === 1;
} catch (e) {
return false;
}
})(),
/**
* true if the browser accepts to use String.fromCharCode on nodejs Buffer.
*/
nodebuffer : (function () {
try {
return support.nodebuffer && String.fromCharCode.apply(null,
nodejsUtils.allocBuffer(1)).length === 1;
} catch (e) {
return false;
}
})()
}
};
/**
* Transform an array-like object to a string.
* @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to transform.
* @return {String} the result.
*/
function arrayLikeToString(array) {
// Performances notes :
// --------------------
// String.fromCharCode.apply(null, array) is the fastest, see
// see http://jsperf.com/converting-a-uint8array-to-a-string/2
// but the stack is limited (and we can get huge arrays !).
//
// result += String.fromCharCode(array[i]); generate too many strings !
//
// This code is inspired by http://jsperf.com/arraybuffer-to-string-apply-
performance/2
// TODO : we now have workers that split the work. Do we still need that ?
var chunk = 65536,
type = exports.getTypeOf(array),
canUseApply = true;
if (type === "uint8array") {
canUseApply = arrayToStringHelper.applyCanBeUsed.uint8array;
} else if (type === "nodebuffer") {
canUseApply = arrayToStringHelper.applyCanBeUsed.nodebuffer;
}
if (canUseApply) {
while (chunk > 1) {
try {
return arrayToStringHelper.stringifyByChunk(array, type, chunk);
} catch (e) {
chunk = Math.floor(chunk / 2);
}
}
}
exports.applyFromCharCode = arrayLikeToString;
/**
* Copy the data from an array-like to an other array-like.
* @param {Array|ArrayBuffer|Uint8Array|Buffer} arrayFrom the origin array.
* @param {Array|ArrayBuffer|Uint8Array|Buffer} arrayTo the destination array which
will be mutated.
* @return {Array|ArrayBuffer|Uint8Array|Buffer} the updated destination array.
*/
function arrayLikeToArrayLike(arrayFrom, arrayTo) {
for (var i = 0; i < arrayFrom.length; i++) {
arrayTo[i] = arrayFrom[i];
}
return arrayTo;
}
// string to ?
transform["string"] = {
"string": identity,
"array": function(input) {
return stringToArrayLike(input, new Array(input.length));
},
"arraybuffer": function(input) {
return transform["string"]["uint8array"](input).buffer;
},
"uint8array": function(input) {
return stringToArrayLike(input, new Uint8Array(input.length));
},
"nodebuffer": function(input) {
return stringToArrayLike(input, nodejsUtils.allocBuffer(input.length));
}
};
// array to ?
transform["array"] = {
"string": arrayLikeToString,
"array": identity,
"arraybuffer": function(input) {
return (new Uint8Array(input)).buffer;
},
"uint8array": function(input) {
return new Uint8Array(input);
},
"nodebuffer": function(input) {
return nodejsUtils.newBufferFrom(input);
}
};
// arraybuffer to ?
transform["arraybuffer"] = {
"string": function(input) {
return arrayLikeToString(new Uint8Array(input));
},
"array": function(input) {
return arrayLikeToArrayLike(new Uint8Array(input), new
Array(input.byteLength));
},
"arraybuffer": identity,
"uint8array": function(input) {
return new Uint8Array(input);
},
"nodebuffer": function(input) {
return nodejsUtils.newBufferFrom(new Uint8Array(input));
}
};
// uint8array to ?
transform["uint8array"] = {
"string": arrayLikeToString,
"array": function(input) {
return arrayLikeToArrayLike(input, new Array(input.length));
},
"arraybuffer": function(input) {
return input.buffer;
},
"uint8array": identity,
"nodebuffer": function(input) {
return nodejsUtils.newBufferFrom(input);
}
};
// nodebuffer to ?
transform["nodebuffer"] = {
"string": arrayLikeToString,
"array": function(input) {
return arrayLikeToArrayLike(input, new Array(input.length));
},
"arraybuffer": function(input) {
return transform["nodebuffer"]["uint8array"](input).buffer;
},
"uint8array": function(input) {
return arrayLikeToArrayLike(input, new Uint8Array(input.length));
},
"nodebuffer": identity
};
/**
* Transform an input into any type.
* The supported output type are : string, array, uint8array, arraybuffer,
nodebuffer.
* If no output type is specified, the unmodified input will be returned.
* @param {String} outputType the output type.
* @param {String|Array|ArrayBuffer|Uint8Array|Buffer} input the input to convert.
* @throws {Error} an Error if the browser doesn't support the requested output
type.
*/
exports.transformTo = function(outputType, input) {
if (!input) {
// undefined, null, etc
// an empty string won't harm.
input = "";
}
if (!outputType) {
return input;
}
exports.checkSupport(outputType);
var inputType = exports.getTypeOf(input);
var result = transform[inputType][outputType](input);
return result;
};
/**
* Return the type of the input.
* The type will be in a format valid for JSZip.utils.transformTo : string, array,
uint8array, arraybuffer.
* @param {Object} input the input to identify.
* @return {String} the (lowercase) type of the input.
*/
exports.getTypeOf = function(input) {
if (typeof input === "string") {
return "string";
}
if (Object.prototype.toString.call(input) === "[object Array]") {
return "array";
}
if (support.nodebuffer && nodejsUtils.isBuffer(input)) {
return "nodebuffer";
}
if (support.uint8array && input instanceof Uint8Array) {
return "uint8array";
}
if (support.arraybuffer && input instanceof ArrayBuffer) {
return "arraybuffer";
}
};
/**
* Throw an exception if the type is not supported.
* @param {String} type the type to check.
* @throws {Error} an Error if the browser doesn't support the requested type.
*/
exports.checkSupport = function(type) {
var supported = support[type.toLowerCase()];
if (!supported) {
throw new Error(type + " is not supported by this platform");
}
};
exports.MAX_VALUE_16BITS = 65535;
exports.MAX_VALUE_32BITS = -1; // well, "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" is
parsed as -1
/**
* Prettify a string read as binary.
* @param {string} str the string to prettify.
* @return {string} a pretty string.
*/
exports.pretty = function(str) {
var res = '',
code, i;
for (i = 0; i < (str || "").length; i++) {
code = str.charCodeAt(i);
res += '\\x' + (code < 16 ? "0" : "") + code.toString(16).toUpperCase();
}
return res;
};
/**
* Defer the call of a function.
* @param {Function} callback the function to call asynchronously.
* @param {Array} args the arguments to give to the callback.
*/
exports.delay = function(callback, args, self) {
setImmediate(function () {
callback.apply(self || null, args || []);
});
};
/**
* Extends a prototype with an other, without calling a constructor with
* side effects. Inspired by nodejs' `utils.inherits`
* @param {Function} ctor the constructor to augment
* @param {Function} superCtor the parent constructor to use
*/
exports.inherits = function (ctor, superCtor) {
var Obj = function() {};
Obj.prototype = superCtor.prototype;
ctor.prototype = new Obj();
};
/**
* Merge the objects passed as parameters into a new one.
* @private
* @param {...Object} var_args All objects to merge.
* @return {Object} a new object with the data of the others.
*/
exports.extend = function() {
var result = {}, i, attr;
for (i = 0; i < arguments.length; i++) { // arguments is not enumerable in some
browsers
for (attr in arguments[i]) {
if (arguments[i].hasOwnProperty(attr) && typeof result[attr] ===
"undefined") {
result[attr] = arguments[i][attr];
}
}
}
return result;
};
/**
* Transform arbitrary content into a Promise.
* @param {String} name a name for the content being processed.
* @param {Object} inputData the content to process.
* @param {Boolean} isBinary true if the content is not an unicode string
* @param {Boolean} isOptimizedBinaryString true if the string content only has one
byte per character.
* @param {Boolean} isBase64 true if the string content is encoded with base64.
* @return {Promise} a promise in a format usable by JSZip.
*/
exports.prepareContent = function(name, inputData, isBinary,
isOptimizedBinaryString, isBase64) {
reader.onload = function(e) {
resolve(e.target.result);
};
reader.onerror = function(e) {
reject(e.target.error);
};
reader.readAsArrayBuffer(data);
});
} else {
return data;
}
});
return promise.then(function(data) {
var dataType = exports.getTypeOf(data);
if (!dataType) {
return external.Promise.reject(
new Error("Can't read the data of '" + name + "'. Is it " +
"in a supported JavaScript type (String, Blob,
ArrayBuffer, etc) ?")
);
}
// special case : it's way easier to work with Uint8Array than with
ArrayBuffer
if (dataType === "arraybuffer") {
data = exports.transformTo("uint8array", data);
} else if (dataType === "string") {
if (isBase64) {
data = base64.decode(data);
}
else if (isBinary) {
// optimizedBinaryString === true means that the file has already
been filtered with a 0xFF mask
if (isOptimizedBinaryString !== true) {
// this is a string, not in a base64 format.
// Be sure that this is a correct "binary string"
data = string2binary(data);
}
}
}
return data;
});
};
},{"./base64":1,"./external":6,"./nodejsUtils":14,"./support":30,"set-immediate-
shim":54}],33:[function(require,module,exports){
'use strict';
var readerFor = require('./reader/readerFor');
var utils = require('./utils');
var sig = require('./signature');
var ZipEntry = require('./zipEntry');
var utf8 = require('./utf8');
var support = require('./support');
// class ZipEntries {{{
/**
* All the entries in the zip file.
* @constructor
* @param {Object} loadOptions Options for loading the stream.
*/
function ZipEntries(loadOptions) {
this.files = [];
this.loadOptions = loadOptions;
}
ZipEntries.prototype = {
/**
* Check that the reader is on the specified signature.
* @param {string} expectedSignature the expected signature.
* @throws {Error} if it is an other signature.
*/
checkSignature: function(expectedSignature) {
if (!this.reader.readAndCheckSignature(expectedSignature)) {
this.reader.index -= 4;
var signature = this.reader.readString(4);
throw new Error("Corrupted zip or bug: unexpected signature " + "(" +
utils.pretty(signature) + ", expected " + utils.pretty(expectedSignature) + ")");
}
},
/**
* Check if the given signature is at the given index.
* @param {number} askedIndex the index to check.
* @param {string} expectedSignature the signature to expect.
* @return {boolean} true if the signature is here, false otherwise.
*/
isSignature: function(askedIndex, expectedSignature) {
var currentIndex = this.reader.index;
this.reader.setIndex(askedIndex);
var signature = this.reader.readString(4);
var result = signature === expectedSignature;
this.reader.setIndex(currentIndex);
return result;
},
/**
* Read the end of the central directory.
*/
readBlockEndOfCentral: function() {
this.diskNumber = this.reader.readInt(2);
this.diskWithCentralDirStart = this.reader.readInt(2);
this.centralDirRecordsOnThisDisk = this.reader.readInt(2);
this.centralDirRecords = this.reader.readInt(2);
this.centralDirSize = this.reader.readInt(4);
this.centralDirOffset = this.reader.readInt(4);
this.zipCommentLength = this.reader.readInt(2);
// warning : the encoding depends of the system locale
// On a linux machine with LANG=en_US.utf8, this field is utf8 encoded.
// On a windows machine, this field is encoded with the localized windows
code page.
var zipComment = this.reader.readData(this.zipCommentLength);
var decodeParamType = support.uint8array ? "uint8array" : "array";
// To get consistent behavior with the generation part, we will assume that
// this is utf8 encoded unless specified otherwise.
var decodeContent = utils.transformTo(decodeParamType, zipComment);
this.zipComment = this.loadOptions.decodeFileName(decodeContent);
},
/**
* Read the end of the Zip 64 central directory.
* Not merged with the method readEndOfCentral :
* The end of central can coexist with its Zip64 brother,
* I don't want to read the wrong number of bytes !
*/
readBlockZip64EndOfCentral: function() {
this.zip64EndOfCentralSize = this.reader.readInt(8);
this.reader.skip(4);
// this.versionMadeBy = this.reader.readString(2);
// this.versionNeeded = this.reader.readInt(2);
this.diskNumber = this.reader.readInt(4);
this.diskWithCentralDirStart = this.reader.readInt(4);
this.centralDirRecordsOnThisDisk = this.reader.readInt(8);
this.centralDirRecords = this.reader.readInt(8);
this.centralDirSize = this.reader.readInt(8);
this.centralDirOffset = this.reader.readInt(8);
this.zip64ExtensibleData = {};
var extraDataSize = this.zip64EndOfCentralSize - 44,
index = 0,
extraFieldId,
extraFieldLength,
extraFieldValue;
while (index < extraDataSize) {
extraFieldId = this.reader.readInt(2);
extraFieldLength = this.reader.readInt(4);
extraFieldValue = this.reader.readData(extraFieldLength);
this.zip64ExtensibleData[extraFieldId] = {
id: extraFieldId,
length: extraFieldLength,
value: extraFieldValue
};
}
},
/**
* Read the end of the Zip 64 central directory locator.
*/
readBlockZip64EndOfCentralLocator: function() {
this.diskWithZip64CentralDirStart = this.reader.readInt(4);
this.relativeOffsetEndOfZip64CentralDir = this.reader.readInt(8);
this.disksCount = this.reader.readInt(4);
if (this.disksCount > 1) {
throw new Error("Multi-volumes zip are not supported");
}
},
/**
* Read the local files, based on the offset read in the central part.
*/
readLocalFiles: function() {
var i, file;
for (i = 0; i < this.files.length; i++) {
file = this.files[i];
this.reader.setIndex(file.localHeaderOffset);
this.checkSignature(sig.LOCAL_FILE_HEADER);
file.readLocalPart(this.reader);
file.handleUTF8();
file.processAttributes();
}
},
/**
* Read the central directory.
*/
readCentralDir: function() {
var file;
this.reader.setIndex(this.centralDirOffset);
while (this.reader.readAndCheckSignature(sig.CENTRAL_FILE_HEADER)) {
file = new ZipEntry({
zip64: this.zip64
}, this.loadOptions);
file.readCentralPart(this.reader);
this.files.push(file);
}
if (isGarbage) {
throw new Error("Can't find end of central directory : is this a
zip file ? " +
"If it is, see
https://stuk.github.io/jszip/documentation/howto/read_zip.html");
} else {
throw new Error("Corrupted zip: can't find end of central
directory");
}
}
this.reader.setIndex(offset);
var endOfCentralDirOffset = offset;
this.checkSignature(sig.CENTRAL_DIRECTORY_END);
this.readBlockEndOfCentral();
/*
Warning : the zip64 extension is supported, but ONLY if the 64bits
integer read from
the zip file can fit into a 32bits integer. This cannot be solved :
JavaScript represents
all numbers as 64-bit double precision IEEE 754 floating point numbers.
So, we have 53bits for integers and bitwise operations treat everything
as 32bits.
see
https://developer.mozilla.org/en-US/docs/JavaScript/Reference/Operators/
Bitwise_Operators
and http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-
262.pdf section 8.5
*/
if (extraBytes > 0) {
// console.warn(extraBytes, "extra bytes at beginning or within
zipfile");
if (this.isSignature(endOfCentralDirOffset, sig.CENTRAL_FILE_HEADER)) {
// The offsets seem wrong, but we have something at the specified
offset.
// So… we keep it.
} else {
// the offset is wrong, update the "zero" of the reader
// this happens if data has been prepended (crx files for example)
this.reader.zero = extraBytes;
}
} else if (extraBytes < 0) {
throw new Error("Corrupted zip: missing " + Math.abs(extraBytes) + "
bytes.");
}
},
prepareReader: function(data) {
this.reader = readerFor(data);
},
/**
* Read a zip file and create ZipEntries.
* @param {String|ArrayBuffer|Uint8Array|Buffer} data the binary string
representing a zip file.
*/
load: function(data) {
this.prepareReader(data);
this.readEndOfCentral();
this.readCentralDir();
this.readLocalFiles();
}
};
// }}} end of ZipEntries
module.exports = ZipEntries;
},{"./reader/readerFor":22,"./signature":23,"./support":30,"./utf8":31,"./
utils":32,"./zipEntry":34}],34:[function(require,module,exports){
'use strict';
var readerFor = require('./reader/readerFor');
var utils = require('./utils');
var CompressedObject = require('./compressedObject');
var crc32fn = require('./crc32');
var utf8 = require('./utf8');
var compressions = require('./compressions');
var support = require('./support');
/**
* Find a compression registered in JSZip.
* @param {string} compressionMethod the method magic to find.
* @return {Object|null} the JSZip compression object, null if none found.
*/
var findCompression = function(compressionMethod) {
for (var method in compressions) {
if (!compressions.hasOwnProperty(method)) {
continue;
}
if (compressions[method].magic === compressionMethod) {
return compressions[method];
}
}
return null;
};
compression = findCompression(this.compressionMethod);
if (compression === null) { // no compression found
throw new Error("Corrupted zip : compression " +
utils.pretty(this.compressionMethod) + " unknown (inner file : " +
utils.transformTo("string", this.fileName) + ")");
}
this.decompressed = new CompressedObject(this.compressedSize,
this.uncompressedSize, this.crc32, compression,
reader.readData(this.compressedSize));
},
/**
* Read the central part of a zip file and add the info in this object.
* @param {DataReader} reader the reader to use.
*/
readCentralPart: function(reader) {
this.versionMadeBy = reader.readInt(2);
reader.skip(2);
// this.versionNeeded = reader.readInt(2);
this.bitFlag = reader.readInt(2);
this.compressionMethod = reader.readString(2);
this.date = reader.readDate();
this.crc32 = reader.readInt(4);
this.compressedSize = reader.readInt(4);
this.uncompressedSize = reader.readInt(4);
var fileNameLength = reader.readInt(2);
this.extraFieldsLength = reader.readInt(2);
this.fileCommentLength = reader.readInt(2);
this.diskNumberStart = reader.readInt(2);
this.internalFileAttributes = reader.readInt(2);
this.externalFileAttributes = reader.readInt(4);
this.localHeaderOffset = reader.readInt(4);
if (this.isEncrypted()) {
throw new Error("Encrypted zip are not supported");
}
/**
* Parse the external file attributes and get the unix/dos permissions.
*/
processAttributes: function () {
this.unixPermissions = null;
this.dosPermissions = null;
var madeBy = this.versionMadeBy >> 8;
/**
* Parse the ZIP64 extra field and merge the info in the current ZipEntry.
* @param {DataReader} reader the reader to use.
*/
parseZIP64ExtraField: function(reader) {
if (!this.extraFields[0x0001]) {
return;
}
// I really hope that these 64bits integer can fit in 32 bits integer,
because js
// won't let us have more.
if (this.uncompressedSize === utils.MAX_VALUE_32BITS) {
this.uncompressedSize = extraReader.readInt(8);
}
if (this.compressedSize === utils.MAX_VALUE_32BITS) {
this.compressedSize = extraReader.readInt(8);
}
if (this.localHeaderOffset === utils.MAX_VALUE_32BITS) {
this.localHeaderOffset = extraReader.readInt(8);
}
if (this.diskNumberStart === utils.MAX_VALUE_32BITS) {
this.diskNumberStart = extraReader.readInt(4);
}
},
/**
* Read the central part of a zip file and add the info in this object.
* @param {DataReader} reader the reader to use.
*/
readExtraFields: function(reader) {
var end = reader.index + this.extraFieldsLength,
extraFieldId,
extraFieldLength,
extraFieldValue;
if (!this.extraFields) {
this.extraFields = {};
}
this.extraFields[extraFieldId] = {
id: extraFieldId,
length: extraFieldLength,
value: extraFieldValue
};
}
},
/**
* Apply an UTF8 transformation if needed.
*/
handleUTF8: function() {
var decodeParamType = support.uint8array ? "uint8array" : "array";
if (this.useUTF8()) {
this.fileNameStr = utf8.utf8decode(this.fileName);
this.fileCommentStr = utf8.utf8decode(this.fileComment);
} else {
var upath = this.findExtraFieldUnicodePath();
if (upath !== null) {
this.fileNameStr = upath;
} else {
// ASCII text or unsupported code page
var fileNameByteArray = utils.transformTo(decodeParamType,
this.fileName);
this.fileNameStr =
this.loadOptions.decodeFileName(fileNameByteArray);
}
/**
* Find the unicode path declared in the extra field, if any.
* @return {String} the unicode path, null otherwise.
*/
findExtraFieldUnicodePath: function() {
var upathField = this.extraFields[0x7075];
if (upathField) {
var extraReader = readerFor(upathField.value);
// wrong version
if (extraReader.readInt(1) !== 1) {
return null;
}
/**
* Find the unicode comment declared in the extra field, if any.
* @return {String} the unicode comment, null otherwise.
*/
findExtraFieldUnicodeComment: function() {
var ucommentField = this.extraFields[0x6375];
if (ucommentField) {
var extraReader = readerFor(ucommentField.value);
// wrong version
if (extraReader.readInt(1) !== 1) {
return null;
}
},{"./compressedObject":2,"./compressions":3,"./crc32":4,"./reader/
readerFor":22,"./support":30,"./utf8":31,"./utils":32}],35:
[function(require,module,exports){
'use strict';
/**
* A simple object representing a file in the zip file.
* @constructor
* @param {string} name the name of the file
* @param {String|ArrayBuffer|Uint8Array|Buffer} data the data
* @param {Object} options the options of the file
*/
var ZipObject = function(name, data, options) {
this.name = name;
this.dir = options.dir;
this.date = options.date;
this.comment = options.comment;
this.unixPermissions = options.unixPermissions;
this.dosPermissions = options.dosPermissions;
this._data = data;
this._dataBinary = options.binary;
// keep only the compression
this.options = {
compression : options.compression,
compressionOptions : options.compressionOptions
};
};
ZipObject.prototype = {
/**
* Create an internal stream for the content of this object.
* @param {String} type the type of each chunk.
* @return StreamHelper the stream.
*/
internalStream: function (type) {
var result = null, outputType = "string";
try {
if (!type) {
throw new Error("No output type specified.");
}
outputType = type.toLowerCase();
var askUnicodeString = outputType === "string" || outputType ===
"text";
if (outputType === "binarystring" || outputType === "text") {
outputType = "string";
}
result = this._decompressWorker();
/**
* Prepare the content in the asked type.
* @param {String} type the type of the result.
* @param {Function} onUpdate a function to call on each internal update.
* @return Promise the promise of the result.
*/
async: function (type, onUpdate) {
return this.internalStream(type).accumulate(onUpdate);
},
/**
* Prepare the content as a nodejs stream.
* @param {String} type the type of each chunk.
* @param {Function} onUpdate a function to call on each internal update.
* @return Stream the stream.
*/
nodeStream: function (type, onUpdate) {
return this.internalStream(type || "nodebuffer").toNodejsStream(onUpdate);
},
/**
* Return a worker for the compressed content.
* @private
* @param {Object} compression the compression object to use.
* @param {Object} compressionOptions the options to use when compressing.
* @return Worker the worker.
*/
_compressWorker: function (compression, compressionOptions) {
if (
this._data instanceof CompressedObject &&
this._data.compression.magic === compression.magic
) {
return this._data.getCompressedWorker();
} else {
var result = this._decompressWorker();
if(!this._dataBinary) {
result = result.pipe(new utf8.Utf8EncodeWorker());
}
return CompressedObject.createWorkerFrom(result, compression,
compressionOptions);
}
},
/**
* Return a worker for the decompressed content.
* @private
* @return Worker the worker.
*/
_decompressWorker : function () {
if (this._data instanceof CompressedObject) {
return this._data.getContentWorker();
} else if (this._data instanceof GenericWorker) {
return this._data;
} else {
return new DataWorker(this._data);
}
}
};
},{"./compressedObject":2,"./stream/DataWorker":27,"./stream/GenericWorker":28,"./
stream/StreamHelper":29,"./utf8":31}],36:[function(require,module,exports){
(function (global){
'use strict';
var Mutation = global.MutationObserver || global.WebKitMutationObserver;
var scheduleDrain;
{
if (Mutation) {
var called = 0;
var observer = new Mutation(nextTick);
var element = global.document.createTextNode('');
observer.observe(element, {
characterData: true
});
scheduleDrain = function () {
element.data = (called = ++called % 2);
};
} else if (!global.setImmediate && typeof global.MessageChannel !== 'undefined')
{
var channel = new global.MessageChannel();
channel.port1.onmessage = nextTick;
scheduleDrain = function () {
channel.port2.postMessage(0);
};
} else if ('document' in global && 'onreadystatechange' in
global.document.createElement('script')) {
scheduleDrain = function () {
scriptEl.onreadystatechange = null;
scriptEl.parentNode.removeChild(scriptEl);
scriptEl = null;
};
global.document.documentElement.appendChild(scriptEl);
};
} else {
scheduleDrain = function () {
setTimeout(nextTick, 0);
};
}
}
var draining;
var queue = [];
//named nextTick for less confusing stack traces
function nextTick() {
draining = true;
var i, oldQueue;
var len = queue.length;
while (len) {
oldQueue = queue;
queue = [];
i = -1;
while (++i < len) {
oldQueue[i]();
}
len = queue.length;
}
draining = false;
}
module.exports = immediate;
function immediate(task) {
if (queue.push(task) === 1 && !draining) {
scheduleDrain();
}
}
module.exports = Promise;
function Promise(resolver) {
if (typeof resolver !== 'function') {
throw new TypeError('resolver must be a function');
}
this.state = PENDING;
this.queue = [];
this.outcome = void 0;
if (resolver !== INTERNAL) {
safelyResolveThenable(this, resolver);
}
}
function resolve(value) {
function yes () {
return value;
}
return p.resolve(callback()).then(yes);
}
function reject(reason) {
function no () {
throw reason;
}
return p.resolve(callback()).then(no);
}
};
Promise.prototype["catch"] = function (onRejected) {
return this.then(null, onRejected);
};
Promise.prototype.then = function (onFulfilled, onRejected) {
if (typeof onFulfilled !== 'function' && this.state === FULFILLED ||
typeof onRejected !== 'function' && this.state === REJECTED) {
return this;
}
var promise = new this.constructor(INTERNAL);
if (this.state !== PENDING) {
var resolver = this.state === FULFILLED ? onFulfilled : onRejected;
unwrap(promise, resolver, this.outcome);
} else {
this.queue.push(new QueueItem(promise, onFulfilled, onRejected));
}
return promise;
};
function QueueItem(promise, onFulfilled, onRejected) {
this.promise = promise;
if (typeof onFulfilled === 'function') {
this.onFulfilled = onFulfilled;
this.callFulfilled = this.otherCallFulfilled;
}
if (typeof onRejected === 'function') {
this.onRejected = onRejected;
this.callRejected = this.otherCallRejected;
}
}
QueueItem.prototype.callFulfilled = function (value) {
handlers.resolve(this.promise, value);
};
QueueItem.prototype.otherCallFulfilled = function (value) {
unwrap(this.promise, this.onFulfilled, value);
};
QueueItem.prototype.callRejected = function (value) {
handlers.reject(this.promise, value);
};
QueueItem.prototype.otherCallRejected = function (value) {
unwrap(this.promise, this.onRejected, value);
};
function unwrap(promise, func, value) {
immediate(function () {
var returnValue;
try {
returnValue = func(value);
} catch (e) {
return handlers.reject(promise, e);
}
if (returnValue === promise) {
handlers.reject(promise, new TypeError('Cannot resolve promise with
itself'));
} else {
handlers.resolve(promise, returnValue);
}
});
}
if (thenable) {
safelyResolveThenable(self, thenable);
} else {
self.state = FULFILLED;
self.outcome = value;
var i = -1;
var len = self.queue.length;
while (++i < len) {
self.queue[i].callFulfilled(value);
}
}
return self;
};
handlers.reject = function (self, error) {
self.state = REJECTED;
self.outcome = error;
var i = -1;
var len = self.queue.length;
while (++i < len) {
self.queue[i].callRejected(error);
}
return self;
};
function getThen(obj) {
// Make sure we only access the accessor once as required by the spec
var then = obj && obj.then;
if (obj && (typeof obj === 'object' || typeof obj === 'function') && typeof then
=== 'function') {
return function appyThen() {
then.apply(obj, arguments);
};
}
}
function safelyResolveThenable(self, thenable) {
// Either fulfill, reject or reject with error
var called = false;
function onError(value) {
if (called) {
return;
}
called = true;
handlers.reject(self, value);
}
function onSuccess(value) {
if (called) {
return;
}
called = true;
handlers.resolve(self, value);
}
function tryToUnwrap() {
thenable(onSuccess, onError);
}
Promise.resolve = resolve;
function resolve(value) {
if (value instanceof this) {
return value;
}
return handlers.resolve(new this(INTERNAL), value);
}
Promise.reject = reject;
function reject(reason) {
var promise = new this(INTERNAL);
return handlers.reject(promise, reason);
}
Promise.all = all;
function all(iterable) {
var self = this;
if (Object.prototype.toString.call(iterable) !== '[object Array]') {
return this.reject(new TypeError('must be an array'));
}
Promise.race = race;
function race(iterable) {
var self = this;
if (Object.prototype.toString.call(iterable) !== '[object Array]') {
return this.reject(new TypeError('must be an array'));
}
var i = -1;
var promise = new this(INTERNAL);
},{"immediate":36}],38:[function(require,module,exports){
// Top level file is just a mixin of submodules & constants
'use strict';
module.exports = pako;
},{"./lib/deflate":39,"./lib/inflate":40,"./lib/utils/common":41,"./lib/zlib/
constants":44}],39:[function(require,module,exports){
'use strict';
var Z_NO_FLUSH = 0;
var Z_FINISH = 4;
var Z_OK = 0;
var Z_STREAM_END = 1;
var Z_SYNC_FLUSH = 2;
var Z_DEFAULT_STRATEGY = 0;
var Z_DEFLATED = 8;
/* ===========================================================================*/
/**
* class Deflate
*
* Generic JS-style wrapper for zlib calls. If you don't need
* streaming behaviour - use more simple functions: [[deflate]],
* [[deflateRaw]] and [[gzip]].
**/
/* internal
* Deflate.chunks -> Array
*
* Chunks of output data, if [[Deflate#onData]] not overriden.
**/
/**
* Deflate.result -> Uint8Array|Array
*
* Compressed result, generated by default [[Deflate#onData]]
* and [[Deflate#onEnd]] handlers. Filled after you push last chunk
* (call [[Deflate#push]] with `Z_FINISH` / `true` param) or if you
* push a chunk with explicit flush (call [[Deflate#push]] with
* `Z_SYNC_FLUSH` param).
**/
/**
* Deflate.err -> Number
*
* Error code after deflate finished. 0 (Z_OK) on success.
* You will not need it in real life, because deflate errors
* are possible only on wrong options or bad `onData` / `onEnd`
* custom handlers.
**/
/**
* Deflate.msg -> String
*
* Error message, if [[Deflate.err]] != 0
**/
/**
* new Deflate(options)
* - options (Object): zlib deflate options.
*
* Creates new deflator instance with specified params. Throws exception
* on bad params. Supported options:
*
* - `level`
* - `windowBits`
* - `memLevel`
* - `strategy`
* - `dictionary`
*
* [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
* for more information on these.
*
* Additional options, for internal needs:
*
* - `chunkSize` - size of generated data chunks (16K by default)
* - `raw` (Boolean) - do raw deflate
* - `gzip` (Boolean) - create gzip wrapper
* - `to` (String) - if equal to 'string', then result will be "binary string"
* (each char code [0..255])
* - `header` (Object) - custom header for gzip
* - `text` (Boolean) - true if compressed data believed to be text
* - `time` (Number) - modification time, unix timestamp
* - `os` (Number) - operation system code
* - `extra` (Array) - array of bytes with extra data (max 65536)
* - `name` (String) - file name (binary string)
* - `comment` (String) - comment (binary string)
* - `hcrc` (Boolean) - true if header crc should be added
*
* ##### Example:
*
* ```javascript
* var pako = require('pako')
* , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9])
* , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]);
*
* var deflate = new pako.Deflate({ level: 3});
*
* deflate.push(chunk1, false);
* deflate.push(chunk2, true); // true -> last chunk
*
* if (deflate.err) { throw new Error(deflate.err); }
*
* console.log(deflate.result);
* ```
**/
function Deflate(options) {
if (!(this instanceof Deflate)) return new Deflate(options);
this.options = utils.assign({
level: Z_DEFAULT_COMPRESSION,
method: Z_DEFLATED,
chunkSize: 16384,
windowBits: 15,
memLevel: 8,
strategy: Z_DEFAULT_STRATEGY,
to: ''
}, options || {});
if (opt.header) {
zlib_deflate.deflateSetHeader(this.strm, opt.header);
}
if (opt.dictionary) {
var dict;
// Convert data if needed
if (typeof opt.dictionary === 'string') {
// If we need to compress text, change encoding to utf8.
dict = strings.string2buf(opt.dictionary);
} else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') {
dict = new Uint8Array(opt.dictionary);
} else {
dict = opt.dictionary;
}
this._dict_set = true;
}
}
/**
* Deflate#push(data[, mode]) -> Boolean
* - data (Uint8Array|Array|ArrayBuffer|String): input data. Strings will be
* converted to utf8 byte sequence.
* - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.
* See constants. Skipped or `false` means Z_NO_FLUSH, `true` meansh Z_FINISH.
*
* Sends input data to deflate pipe, generating [[Deflate#onData]] calls with
* new compressed chunks. Returns `true` on success. The last data block must have
* mode Z_FINISH (or `true`). That will flush internal pending buffers and call
* [[Deflate#onEnd]]. For interim explicit flushes (without ending the stream) you
* can use mode Z_SYNC_FLUSH, keeping the compression context.
*
* On fail call [[Deflate#onEnd]] with error code and return false.
*
* We strongly recommend to use `Uint8Array` on input for best speed (output
* array format is detected automatically). Also, don't skip last param and always
* use the same type in your code (boolean or number). That will improve JS speed.
*
* For regular `Array`-s make sure all elements are [0..255].
*
* ##### Example
*
* ```javascript
* push(chunk, false); // push one of data chunks
* ...
* push(chunk, true); // push last chunk
* ```
**/
Deflate.prototype.push = function (data, mode) {
var strm = this.strm;
var chunkSize = this.options.chunkSize;
var status, _mode;
_mode = (mode === ~~mode) ? mode : ((mode === true) ? Z_FINISH : Z_NO_FLUSH);
strm.next_in = 0;
strm.avail_in = strm.input.length;
do {
if (strm.avail_out === 0) {
strm.output = new utils.Buf8(chunkSize);
strm.next_out = 0;
strm.avail_out = chunkSize;
}
status = zlib_deflate.deflate(strm, _mode); /* no bad return value */
return true;
};
/**
* Deflate#onData(chunk) -> Void
* - chunk (Uint8Array|Array|String): ouput data. Type of array depends
* on js engine support. When string output requested, each chunk
* will be string.
*
* By default, stores data blocks in `chunks[]` property and glue
* those in `onEnd`. Override this handler, if you need another behaviour.
**/
Deflate.prototype.onData = function (chunk) {
this.chunks.push(chunk);
};
/**
* Deflate#onEnd(status) -> Void
* - status (Number): deflate status. 0 (Z_OK) on success,
* other if not.
*
* Called once after you tell deflate that the input stream is
* complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH)
* or if an error happened. By default - join collected chunks,
* free memory and fill `results` / `err` properties.
**/
Deflate.prototype.onEnd = function (status) {
// On success - join
if (status === Z_OK) {
if (this.options.to === 'string') {
this.result = this.chunks.join('');
} else {
this.result = utils.flattenChunks(this.chunks);
}
}
this.chunks = [];
this.err = status;
this.msg = this.strm.msg;
};
/**
* deflate(data[, options]) -> Uint8Array|Array|String
* - data (Uint8Array|Array|String): input data to compress.
* - options (Object): zlib deflate options.
*
* Compress `data` with deflate algorithm and `options`.
*
* Supported options are:
*
* - level
* - windowBits
* - memLevel
* - strategy
* - dictionary
*
* [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
* for more information on these.
*
* Sugar (options):
*
* - `raw` (Boolean) - say that we work with raw stream, if you don't wish to
specify
* negative windowBits implicitly.
* - `to` (String) - if equal to 'string', then result will be "binary string"
* (each char code [0..255])
*
* ##### Example:
*
* ```javascript
* var pako = require('pako')
* , data = Uint8Array([1,2,3,4,5,6,7,8,9]);
*
* console.log(pako.deflate(data));
* ```
**/
function deflate(input, options) {
var deflator = new Deflate(options);
deflator.push(input, true);
return deflator.result;
}
/**
* deflateRaw(data[, options]) -> Uint8Array|Array|String
* - data (Uint8Array|Array|String): input data to compress.
* - options (Object): zlib deflate options.
*
* The same as [[deflate]], but creates raw data, without wrapper
* (header and adler32 crc).
**/
function deflateRaw(input, options) {
options = options || {};
options.raw = true;
return deflate(input, options);
}
/**
* gzip(data[, options]) -> Uint8Array|Array|String
* - data (Uint8Array|Array|String): input data to compress.
* - options (Object): zlib deflate options.
*
* The same as [[deflate]], but create gzip wrapper instead of
* deflate one.
**/
function gzip(input, options) {
options = options || {};
options.gzip = true;
return deflate(input, options);
}
exports.Deflate = Deflate;
exports.deflate = deflate;
exports.deflateRaw = deflateRaw;
exports.gzip = gzip;
},{"./utils/common":41,"./utils/strings":42,"./zlib/deflate":46,"./zlib/
messages":51,"./zlib/zstream":53}],40:[function(require,module,exports){
'use strict';
/**
* class Inflate
*
* Generic JS-style wrapper for zlib calls. If you don't need
* streaming behaviour - use more simple functions: [[inflate]]
* and [[inflateRaw]].
**/
/* internal
* inflate.chunks -> Array
*
* Chunks of output data, if [[Inflate#onData]] not overriden.
**/
/**
* Inflate.result -> Uint8Array|Array|String
*
* Uncompressed result, generated by default [[Inflate#onData]]
* and [[Inflate#onEnd]] handlers. Filled after you push last chunk
* (call [[Inflate#push]] with `Z_FINISH` / `true` param) or if you
* push a chunk with explicit flush (call [[Inflate#push]] with
* `Z_SYNC_FLUSH` param).
**/
/**
* Inflate.err -> Number
*
* Error code after inflate finished. 0 (Z_OK) on success.
* Should be checked if broken data possible.
**/
/**
* Inflate.msg -> String
*
* Error message, if [[Inflate.err]] != 0
**/
/**
* new Inflate(options)
* - options (Object): zlib inflate options.
*
* Creates new inflator instance with specified params. Throws exception
* on bad params. Supported options:
*
* - `windowBits`
* - `dictionary`
*
* [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
* for more information on these.
*
* Additional options, for internal needs:
*
* - `chunkSize` - size of generated data chunks (16K by default)
* - `raw` (Boolean) - do raw inflate
* - `to` (String) - if equal to 'string', then result will be converted
* from utf8 to utf16 (javascript) string. When string output requested,
* chunk length can differ from `chunkSize`, depending on content.
*
* By default, when no options set, autodetect deflate/gzip data format via
* wrapper header.
*
* ##### Example:
*
* ```javascript
* var pako = require('pako')
* , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9])
* , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]);
*
* var inflate = new pako.Inflate({ level: 3});
*
* inflate.push(chunk1, false);
* inflate.push(chunk2, true); // true -> last chunk
*
* if (inflate.err) { throw new Error(inflate.err); }
*
* console.log(inflate.result);
* ```
**/
function Inflate(options) {
if (!(this instanceof Inflate)) return new Inflate(options);
this.options = utils.assign({
chunkSize: 16384,
windowBits: 0,
to: ''
}, options || {});
// If `windowBits` not defined (and mode not raw) - set autodetect flag for
gzip/deflate
if ((opt.windowBits >= 0) && (opt.windowBits < 16) &&
!(options && options.windowBits)) {
opt.windowBits += 32;
}
// Gzip header has no info about windows size, we can do autodetect only
// for deflate. So, if window size not set, force it to max when gzip possible
if ((opt.windowBits > 15) && (opt.windowBits < 48)) {
// bit 3 (16) -> gzipped data
// bit 4 (32) -> autodetect gzip/deflate
if ((opt.windowBits & 15) === 0) {
opt.windowBits |= 15;
}
}
zlib_inflate.inflateGetHeader(this.strm, this.header);
}
/**
* Inflate#push(data[, mode]) -> Boolean
* - data (Uint8Array|Array|ArrayBuffer|String): input data
* - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.
* See constants. Skipped or `false` means Z_NO_FLUSH, `true` meansh Z_FINISH.
*
* Sends input data to inflate pipe, generating [[Inflate#onData]] calls with
* new output chunks. Returns `true` on success. The last data block must have
* mode Z_FINISH (or `true`). That will flush internal pending buffers and call
* [[Inflate#onEnd]]. For interim explicit flushes (without ending the stream) you
* can use mode Z_SYNC_FLUSH, keeping the decompression context.
*
* On fail call [[Inflate#onEnd]] with error code and return false.
*
* We strongly recommend to use `Uint8Array` on input for best speed (output
* format is detected automatically). Also, don't skip last param and always
* use the same type in your code (boolean or number). That will improve JS speed.
*
* For regular `Array`-s make sure all elements are [0..255].
*
* ##### Example
*
* ```javascript
* push(chunk, false); // push one of data chunks
* ...
* push(chunk, true); // push last chunk
* ```
**/
Inflate.prototype.push = function (data, mode) {
var strm = this.strm;
var chunkSize = this.options.chunkSize;
var dictionary = this.options.dictionary;
var status, _mode;
var next_out_utf8, tail, utf8str;
var dict;
strm.next_in = 0;
strm.avail_in = strm.input.length;
do {
if (strm.avail_out === 0) {
strm.output = new utils.Buf8(chunkSize);
strm.next_out = 0;
strm.avail_out = chunkSize;
}
if (strm.next_out) {
if (strm.avail_out === 0 || status === c.Z_STREAM_END || (strm.avail_in === 0
&& (_mode === c.Z_FINISH || _mode === c.Z_SYNC_FLUSH))) {
// move tail
strm.next_out = tail;
strm.avail_out = chunkSize - tail;
if (tail) { utils.arraySet(strm.output, strm.output, next_out_utf8, tail,
0); }
this.onData(utf8str);
} else {
this.onData(utils.shrinkBuf(strm.output, strm.next_out));
}
}
}
// When no more input data, we should check that internal inflate buffers
// are flushed. The only way to do it when avail_out = 0 - run one more
// inflate pass. But if output data not exists, inflate return Z_BUF_ERROR.
// Here we set flag to process this error properly.
//
// NOTE. Deflate does not return error in this case and does not needs such
// logic.
if (strm.avail_in === 0 && strm.avail_out === 0) {
allowBufError = true;
}
return true;
};
/**
* Inflate#onData(chunk) -> Void
* - chunk (Uint8Array|Array|String): ouput data. Type of array depends
* on js engine support. When string output requested, each chunk
* will be string.
*
* By default, stores data blocks in `chunks[]` property and glue
* those in `onEnd`. Override this handler, if you need another behaviour.
**/
Inflate.prototype.onData = function (chunk) {
this.chunks.push(chunk);
};
/**
* Inflate#onEnd(status) -> Void
* - status (Number): inflate status. 0 (Z_OK) on success,
* other if not.
*
* Called either after you tell inflate that the input stream is
* complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH)
* or if an error happened. By default - join collected chunks,
* free memory and fill `results` / `err` properties.
**/
Inflate.prototype.onEnd = function (status) {
// On success - join
if (status === c.Z_OK) {
if (this.options.to === 'string') {
// Glue & convert here, until we teach pako to send
// utf8 alligned strings to onData
this.result = this.chunks.join('');
} else {
this.result = utils.flattenChunks(this.chunks);
}
}
this.chunks = [];
this.err = status;
this.msg = this.strm.msg;
};
/**
* inflate(data[, options]) -> Uint8Array|Array|String
* - data (Uint8Array|Array|String): input data to decompress.
* - options (Object): zlib inflate options.
*
* Decompress `data` with inflate/ungzip and `options`. Autodetect
* format via wrapper header by default. That's why we don't provide
* separate `ungzip` method.
*
* Supported options are:
*
* - windowBits
*
* [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
* for more information.
*
* Sugar (options):
*
* - `raw` (Boolean) - say that we work with raw stream, if you don't wish to
specify
* negative windowBits implicitly.
* - `to` (String) - if equal to 'string', then result will be converted
* from utf8 to utf16 (javascript) string. When string output requested,
* chunk length can differ from `chunkSize`, depending on content.
*
*
* ##### Example:
*
* ```javascript
* var pako = require('pako')
* , input = pako.deflate([1,2,3,4,5,6,7,8,9])
* , output;
*
* try {
* output = pako.inflate(input);
* } catch (err)
* console.log(err);
* }
* ```
**/
function inflate(input, options) {
var inflator = new Inflate(options);
inflator.push(input, true);
/**
* inflateRaw(data[, options]) -> Uint8Array|Array|String
* - data (Uint8Array|Array|String): input data to decompress.
* - options (Object): zlib inflate options.
*
* The same as [[inflate]], but creates raw data, without wrapper
* (header and adler32 crc).
**/
function inflateRaw(input, options) {
options = options || {};
options.raw = true;
return inflate(input, options);
}
/**
* ungzip(data[, options]) -> Uint8Array|Array|String
* - data (Uint8Array|Array|String): input data to decompress.
* - options (Object): zlib inflate options.
*
* Just shortcut to [[inflate]], because it autodetects format
* by header.content. Done for convenience.
**/
exports.Inflate = Inflate;
exports.inflate = inflate;
exports.inflateRaw = inflateRaw;
exports.ungzip = inflate;
},{"./utils/common":41,"./utils/strings":42,"./zlib/constants":44,"./zlib/
gzheader":47,"./zlib/inflate":49,"./zlib/messages":51,"./zlib/zstream":53}],41:
[function(require,module,exports){
'use strict';
return obj;
};
var fnTyped = {
arraySet: function (dest, src, src_offs, len, dest_offs) {
if (src.subarray && dest.subarray) {
dest.set(src.subarray(src_offs, src_offs + len), dest_offs);
return;
}
// Fallback to ordinary array
for (var i = 0; i < len; i++) {
dest[dest_offs + i] = src[src_offs + i];
}
},
// Join array of chunks to single array.
flattenChunks: function (chunks) {
var i, l, len, pos, chunk, result;
// join chunks
result = new Uint8Array(len);
pos = 0;
for (i = 0, l = chunks.length; i < l; i++) {
chunk = chunks[i];
result.set(chunk, pos);
pos += chunk.length;
}
return result;
}
};
var fnUntyped = {
arraySet: function (dest, src, src_offs, len, dest_offs) {
for (var i = 0; i < len; i++) {
dest[dest_offs + i] = src[src_offs + i];
}
},
// Join array of chunks to single array.
flattenChunks: function (chunks) {
return [].concat.apply([], chunks);
}
};
exports.setTyped(TYPED_OK);
},{}],42:[function(require,module,exports){
// String encode/decode helpers
'use strict';
// allocate buffer
buf = new utils.Buf8(buf_len);
// convert
for (i = 0, m_pos = 0; i < buf_len; m_pos++) {
c = str.charCodeAt(m_pos);
if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {
c2 = str.charCodeAt(m_pos + 1);
if ((c2 & 0xfc00) === 0xdc00) {
c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);
m_pos++;
}
}
if (c < 0x80) {
/* one byte */
buf[i++] = c;
} else if (c < 0x800) {
/* two bytes */
buf[i++] = 0xC0 | (c >>> 6);
buf[i++] = 0x80 | (c & 0x3f);
} else if (c < 0x10000) {
/* three bytes */
buf[i++] = 0xE0 | (c >>> 12);
buf[i++] = 0x80 | (c >>> 6 & 0x3f);
buf[i++] = 0x80 | (c & 0x3f);
} else {
/* four bytes */
buf[i++] = 0xf0 | (c >>> 18);
buf[i++] = 0x80 | (c >>> 12 & 0x3f);
buf[i++] = 0x80 | (c >>> 6 & 0x3f);
buf[i++] = 0x80 | (c & 0x3f);
}
}
return buf;
};
c_len = _utf8len[c];
// skip 5 & 6 byte codes
if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; }
if (c < 0x10000) {
utf16buf[out++] = c;
} else {
c -= 0x10000;
utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff);
utf16buf[out++] = 0xdc00 | (c & 0x3ff);
}
}
},{"./common":41}],43:[function(require,module,exports){
'use strict';
do {
s1 = (s1 + buf[pos++]) |0;
s2 = (s2 + s1) |0;
} while (--n);
s1 %= 65521;
s2 %= 65521;
}
module.exports = adler32;
},{}],44:[function(require,module,exports){
'use strict';
module.exports = {
/* Allowed flush values; see deflate() and inflate() below for details */
Z_NO_FLUSH: 0,
Z_PARTIAL_FLUSH: 1,
Z_SYNC_FLUSH: 2,
Z_FULL_FLUSH: 3,
Z_FINISH: 4,
Z_BLOCK: 5,
Z_TREES: 6,
/* compression levels */
Z_NO_COMPRESSION: 0,
Z_BEST_SPEED: 1,
Z_BEST_COMPRESSION: 9,
Z_DEFAULT_COMPRESSION: -1,
Z_FILTERED: 1,
Z_HUFFMAN_ONLY: 2,
Z_RLE: 3,
Z_FIXED: 4,
Z_DEFAULT_STRATEGY: 0,
},{}],45:[function(require,module,exports){
'use strict';
return table;
}
crc ^= -1;
module.exports = crc32;
},{}],46:[function(require,module,exports){
'use strict';
/* Allowed flush values; see deflate() and inflate() below for details */
var Z_NO_FLUSH = 0;
var Z_PARTIAL_FLUSH = 1;
//var Z_SYNC_FLUSH = 2;
var Z_FULL_FLUSH = 3;
var Z_FINISH = 4;
var Z_BLOCK = 5;
//var Z_TREES = 6;
/* compression levels */
//var Z_NO_COMPRESSION = 0;
//var Z_BEST_SPEED = 1;
//var Z_BEST_COMPRESSION = 9;
var Z_DEFAULT_COMPRESSION = -1;
var Z_FILTERED = 1;
var Z_HUFFMAN_ONLY = 2;
var Z_RLE = 3;
var Z_FIXED = 4;
var Z_DEFAULT_STRATEGY = 0;
/*============================================================================*/
var MAX_MEM_LEVEL = 9;
/* Maximum value for memLevel in deflateInit2 */
var MAX_WBITS = 15;
/* 32K LZ77 window */
var DEF_MEM_LEVEL = 8;
var MIN_MATCH = 3;
var MAX_MATCH = 258;
var MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1);
var BS_NEED_MORE = 1; /* block not completed, need more input or more output
*/
var BS_BLOCK_DONE = 2; /* block flush performed */
var BS_FINISH_STARTED = 3; /* finish started, need only more output at next deflate
*/
var BS_FINISH_DONE = 4; /* finish done, accept no more input or output */
function rank(f) {
return ((f) << 1) - ((f) > 4 ? 9 : 0);
}
/* =========================================================================
* Flush as much pending output as possible. All deflate() output goes
* through this function so some applications may wish to modify it
* to avoid allocating a large strm->output buffer and copying into it.
* (See also read_buf()).
*/
function flush_pending(strm) {
var s = strm.state;
//_tr_flush_bits(s);
var len = s.pending;
if (len > strm.avail_out) {
len = strm.avail_out;
}
if (len === 0) { return; }
function put_byte(s, b) {
s.pending_buf[s.pending++] = b;
}
/* =========================================================================
* Put a short in the pending buffer. The 16-bit value is put in MSB order.
* IN assertion: the stream state is correct and there is enough room in
* pending_buf.
*/
function putShortMSB(s, b) {
// put_byte(s, (Byte)(b >> 8));
// put_byte(s, (Byte)(b & 0xff));
s.pending_buf[s.pending++] = (b >>> 8) & 0xff;
s.pending_buf[s.pending++] = b & 0xff;
}
/* ===========================================================================
* Read a new buffer from the current input stream, update the adler32
* and total number of bytes read. All deflate() input goes through
* this function so some applications may wish to modify it to avoid
* allocating a large strm->input buffer and copying from it.
* (See also flush_pending()).
*/
function read_buf(strm, buf, start, size) {
var len = strm.avail_in;
strm.avail_in -= len;
strm.next_in += len;
strm.total_in += len;
return len;
}
/* ===========================================================================
* Set match_start to the longest match starting at the given string and
* return its length. Matches shorter or equal to prev_length are discarded,
* in which case the result is equal to prev_length and match_start is
* garbage.
* IN assertions: cur_match is the head of the hash chain for the current
* string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
* OUT assertion: the match length is not greater than s->lookahead.
*/
function longest_match(s, cur_match) {
var chain_length = s.max_chain_length; /* max hash chain length */
var scan = s.strstart; /* current string */
var match; /* matched string */
var len; /* length of current match */
var best_len = s.prev_length; /* best match length so far */
var nice_match = s.nice_match; /* stop if match long enough */
var limit = (s.strstart > (s.w_size - MIN_LOOKAHEAD)) ?
s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0/*NIL*/;
/* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
* It is easy to get rid of this optimization if necessary.
*/
// Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
do {
// Assert(cur_match < s->strstart, "no future");
match = cur_match;
/* ===========================================================================
* Fill the window when the lookahead becomes insufficient.
* Updates strstart and lookahead.
*
* IN assertion: lookahead < MIN_LOOKAHEAD
* OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
* At least one byte has been read, or avail_in == 0; reads are
* performed for at least two bytes (required for the zip translate_eol
* option -- not supported here).
*/
function fill_window(s) {
var _w_size = s.w_size;
var p, n, m, more, str;
do {
more = s.window_size - s.lookahead - s.strstart;
n = s.hash_size;
p = n;
do {
m = s.head[--p];
s.head[p] = (m >= _w_size ? m - _w_size : 0);
} while (--n);
n = _w_size;
p = n;
do {
m = s.prev[--p];
s.prev[p] = (m >= _w_size ? m - _w_size : 0);
/* If n is not on any hash chain, prev[n] is garbage but
* its value will never be used.
*/
} while (--n);
more += _w_size;
}
if (s.strm.avail_in === 0) {
break;
}
/* If the WIN_INIT bytes after the end of the current data have never been
* written, then zero those bytes in order to avoid memory check reports of
* the use of uninitialized (or uninitialised as Julian writes) bytes by
* the longest match routines. Update the high water mark for the next
* time through here. WIN_INIT is set to MAX_MATCH since the longest match
* routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
*/
// if (s.high_water < s.window_size) {
// var curr = s.strstart + s.lookahead;
// var init = 0;
//
// if (s.high_water < curr) {
// /* Previous high water mark below current data -- zero WIN_INIT
// * bytes or up to end of window, whichever is less.
// */
// init = s.window_size - curr;
// if (init > WIN_INIT)
// init = WIN_INIT;
// zmemzero(s->window + curr, (unsigned)init);
// s->high_water = curr + init;
// }
// else if (s->high_water < (ulg)curr + WIN_INIT) {
// /* High water mark at or above current data, but below current data
// * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
// * to end of window, whichever is less.
// */
// init = (ulg)curr + WIN_INIT - s->high_water;
// if (init > s->window_size - s->high_water)
// init = s->window_size - s->high_water;
// zmemzero(s->window + s->high_water, (unsigned)init);
// s->high_water += init;
// }
// }
//
// Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
// "not enough room for search");
}
/* ===========================================================================
* Copy without compression as much as possible from the input stream, return
* the current block state.
* This function does not insert new strings in the dictionary since
* uncompressible data is probably not useful. This function is used
* only for the level=0 compression option.
* NOTE: this function should be optimized to avoid extra copying from
* window to pending_buf.
*/
function deflate_stored(s, flush) {
/* Stored blocks are limited to 0xffff bytes, pending_buf is limited
* to pending_buf_size, and each stored block has a 5 byte header:
*/
var max_block_size = 0xffff;
fill_window(s);
if (s.lookahead === 0 && flush === Z_NO_FLUSH) {
return BS_NEED_MORE;
}
if (s.lookahead === 0) {
break;
}
/* flush the current block */
}
//Assert(s->block_start >= 0L, "block gone");
// if (s.block_start < 0) throw new Error("block gone");
s.strstart += s.lookahead;
s.lookahead = 0;
}
/* Flush if we may have to slide, otherwise block_start may become
* negative and the data will be gone:
*/
if (s.strstart - s.block_start >= (s.w_size - MIN_LOOKAHEAD)) {
/*** FLUSH_BLOCK(s, 0); ***/
flush_block_only(s, false);
if (s.strm.avail_out === 0) {
return BS_NEED_MORE;
}
/***/
}
}
s.insert = 0;
return BS_NEED_MORE;
}
/* ===========================================================================
* Compress as much as possible from the input stream, return the current
* block state.
* This function does not perform lazy evaluation of matches and inserts
* new strings in the dictionary only for unmatched strings or for short
* matches. It is used only for the fast compression options.
*/
function deflate_fast(s, flush) {
var hash_head; /* head of the hash chain */
var bflush; /* set if current block must be flushed */
for (;;) {
/* Make sure that we always have enough lookahead, except
* at the end of the input file. We need MAX_MATCH bytes
* for the next match, plus MIN_MATCH bytes to insert the
* string following the next match.
*/
if (s.lookahead < MIN_LOOKAHEAD) {
fill_window(s);
if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) {
return BS_NEED_MORE;
}
if (s.lookahead === 0) {
break; /* flush the current block */
}
}
s.lookahead -= s.match_length;
/* Insert new strings in the hash table only if the match length
* is not too large. This saves time but degrades compression.
*/
if (s.match_length <= s.max_lazy_match/*max_insert_length*/ && s.lookahead >=
MIN_MATCH) {
s.match_length--; /* string at strstart already in table */
do {
s.strstart++;
/*** INSERT_STRING(s, s.strstart, hash_head); ***/
s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH -
1]) & s.hash_mask;
hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];
s.head[s.ins_h] = s.strstart;
/***/
/* strstart never exceeds WSIZE-MAX_MATCH, so there are
* always MIN_MATCH bytes ahead.
*/
} while (--s.match_length !== 0);
s.strstart++;
} else
{
s.strstart += s.match_length;
s.match_length = 0;
s.ins_h = s.window[s.strstart];
/* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */
s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + 1]) &
s.hash_mask;
//#if MIN_MATCH != 3
// Call UPDATE_HASH() MIN_MATCH-3 more times
//#endif
/* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
* matter since it will be recomputed at next deflate call.
*/
}
} else {
/* No match, output a literal byte */
//Tracevv((stderr,"%c", s.window[s.strstart]));
/*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/
bflush = trees._tr_tally(s, 0, s.window[s.strstart]);
s.lookahead--;
s.strstart++;
}
if (bflush) {
/*** FLUSH_BLOCK(s, 0); ***/
flush_block_only(s, false);
if (s.strm.avail_out === 0) {
return BS_NEED_MORE;
}
/***/
}
}
s.insert = ((s.strstart < (MIN_MATCH - 1)) ? s.strstart : MIN_MATCH - 1);
if (flush === Z_FINISH) {
/*** FLUSH_BLOCK(s, 1); ***/
flush_block_only(s, true);
if (s.strm.avail_out === 0) {
return BS_FINISH_STARTED;
}
/***/
return BS_FINISH_DONE;
}
if (s.last_lit) {
/*** FLUSH_BLOCK(s, 0); ***/
flush_block_only(s, false);
if (s.strm.avail_out === 0) {
return BS_NEED_MORE;
}
/***/
}
return BS_BLOCK_DONE;
}
/* ===========================================================================
* Same as above, but achieves better compression. We use a lazy
* evaluation for matches: a match is finally adopted only if there is
* no better match at the next window position.
*/
function deflate_slow(s, flush) {
var hash_head; /* head of hash chain */
var bflush; /* set if current block must be flushed */
var max_insert;
if (bflush) {
/*** FLUSH_BLOCK(s, 0); ***/
flush_block_only(s, false);
if (s.strm.avail_out === 0) {
return BS_NEED_MORE;
}
/***/
}
} else if (s.match_available) {
/* If there was no match at the previous position, output a
* single literal. If there was a match but the current match
* is longer, truncate the previous match to a single literal.
*/
//Tracevv((stderr,"%c", s->window[s->strstart-1]));
/*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/
bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]);
if (bflush) {
/*** FLUSH_BLOCK_ONLY(s, 0) ***/
flush_block_only(s, false);
/***/
}
s.strstart++;
s.lookahead--;
if (s.strm.avail_out === 0) {
return BS_NEED_MORE;
}
} else {
/* There is no previous match to compare with, wait for
* the next step to decide.
*/
s.match_available = 1;
s.strstart++;
s.lookahead--;
}
}
//Assert (flush != Z_NO_FLUSH, "no flush?");
if (s.match_available) {
//Tracevv((stderr,"%c", s->window[s->strstart-1]));
/*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/
bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]);
s.match_available = 0;
}
s.insert = s.strstart < MIN_MATCH - 1 ? s.strstart : MIN_MATCH - 1;
if (flush === Z_FINISH) {
/*** FLUSH_BLOCK(s, 1); ***/
flush_block_only(s, true);
if (s.strm.avail_out === 0) {
return BS_FINISH_STARTED;
}
/***/
return BS_FINISH_DONE;
}
if (s.last_lit) {
/*** FLUSH_BLOCK(s, 0); ***/
flush_block_only(s, false);
if (s.strm.avail_out === 0) {
return BS_NEED_MORE;
}
/***/
}
return BS_BLOCK_DONE;
}
/* ===========================================================================
* For Z_RLE, simply look for runs of bytes, generate matches only of distance
* one. Do not maintain a hash table. (It will be regenerated if this run of
* deflate switches away from Z_RLE.)
*/
function deflate_rle(s, flush) {
var bflush; /* set if current block must be flushed */
var prev; /* byte at distance one to match */
var scan, strend; /* scan goes up to strend for length of run */
for (;;) {
/* Make sure that we always have enough lookahead, except
* at the end of the input file. We need MAX_MATCH bytes
* for the longest run, plus one for the unrolled loop.
*/
if (s.lookahead <= MAX_MATCH) {
fill_window(s);
if (s.lookahead <= MAX_MATCH && flush === Z_NO_FLUSH) {
return BS_NEED_MORE;
}
if (s.lookahead === 0) { break; } /* flush the current block */
}
s.lookahead -= s.match_length;
s.strstart += s.match_length;
s.match_length = 0;
} else {
/* No match, output a literal byte */
//Tracevv((stderr,"%c", s->window[s->strstart]));
/*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/
bflush = trees._tr_tally(s, 0, s.window[s.strstart]);
s.lookahead--;
s.strstart++;
}
if (bflush) {
/*** FLUSH_BLOCK(s, 0); ***/
flush_block_only(s, false);
if (s.strm.avail_out === 0) {
return BS_NEED_MORE;
}
/***/
}
}
s.insert = 0;
if (flush === Z_FINISH) {
/*** FLUSH_BLOCK(s, 1); ***/
flush_block_only(s, true);
if (s.strm.avail_out === 0) {
return BS_FINISH_STARTED;
}
/***/
return BS_FINISH_DONE;
}
if (s.last_lit) {
/*** FLUSH_BLOCK(s, 0); ***/
flush_block_only(s, false);
if (s.strm.avail_out === 0) {
return BS_NEED_MORE;
}
/***/
}
return BS_BLOCK_DONE;
}
/* ===========================================================================
* For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.
* (It will be regenerated if this run of deflate switches away from Huffman.)
*/
function deflate_huff(s, flush) {
var bflush; /* set if current block must be flushed */
for (;;) {
/* Make sure that we have a literal to write. */
if (s.lookahead === 0) {
fill_window(s);
if (s.lookahead === 0) {
if (flush === Z_NO_FLUSH) {
return BS_NEED_MORE;
}
break; /* flush the current block */
}
}
var configuration_table;
configuration_table = [
/* good lazy nice chain */
new Config(0, 0, 0, 0, deflate_stored), /* 0 store only */
new Config(4, 4, 8, 4, deflate_fast), /* 1 max speed, no lazy matches
*/
new Config(4, 5, 16, 8, deflate_fast), /* 2 */
new Config(4, 6, 32, 32, deflate_fast), /* 3 */
/* ===========================================================================
* Initialize the "longest match" routines for a new zlib stream
*/
function lm_init(s) {
s.window_size = 2 * s.w_size;
s.strstart = 0;
s.block_start = 0;
s.lookahead = 0;
s.insert = 0;
s.match_length = s.prev_length = MIN_MATCH - 1;
s.match_available = 0;
s.ins_h = 0;
}
function DeflateState() {
this.strm = null; /* pointer back to this zlib stream */
this.status = 0; /* as the name implies */
this.pending_buf = null; /* output still pending */
this.pending_buf_size = 0; /* size of pending_buf */
this.pending_out = 0; /* next pending byte to output to the stream */
this.pending = 0; /* nb of bytes in the pending buffer */
this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */
this.gzhead = null; /* gzip header information to write */
this.gzindex = 0; /* where in extra, name, or comment */
this.method = Z_DEFLATED; /* can only be DEFLATED */
this.last_flush = -1; /* value of flush param for previous deflate call */
this.window = null;
/* Sliding window. Input bytes are read into the second half of the window,
* and move to the first half later to keep a dictionary of at least wSize
* bytes. With this organization, matches are limited to a distance of
* wSize-MAX_MATCH bytes, but this ensures that IO is always
* performed with a length multiple of the block size.
*/
this.window_size = 0;
/* Actual size of window: 2*wSize, except when the user input buffer
* is directly used as sliding window.
*/
this.prev = null;
/* Link to older string with same hash index. To limit the size of this
* array to 64K, this link is maintained only for the last 32K strings.
* An index in this array is thus a window index modulo 32K.
*/
this.hash_shift = 0;
/* Number of bits by which ins_h must be shifted at each input
* step. It must be such that after MIN_MATCH steps, the oldest
* byte no longer takes part in the hash key, that is:
* hash_shift * MIN_MATCH >= hash_bits
*/
this.block_start = 0;
/* Window position at the beginning of the current output block. Gets
* negative when the window is moved backwards.
*/
this.prev_length = 0;
/* Length of the best match at previous step. Matches not greater than this
* are discarded. This is used in the lazy match evaluation.
*/
this.max_chain_length = 0;
/* To speed up deflation, hash chains are never searched beyond this
* length. A higher limit improves compression ratio but degrades the
* speed.
*/
this.max_lazy_match = 0;
/* Attempt to find a better match only when the current match is strictly
* smaller than this value. This mechanism is used only for compression
* levels >= 4.
*/
// That's alias to max_lazy_match, don't use directly
//this.max_insert_length = 0;
/* Insert new strings in the hash table only if the match length is not
* greater than this length. This saves time but degrades compression.
* max_insert_length is used only for compression levels <= 3.
*/
/* used by trees.c: */
//ush bl_count[MAX_BITS+1];
this.bl_count = new utils.Buf16(MAX_BITS + 1);
/* number of codes at each bit length for an optimal tree */
this.lit_bufsize = 0;
/* Size of match buffer for literals/lengths. There are 4 reasons for
* limiting lit_bufsize to 64K:
* - frequencies can be kept in 16 bit counters
* - if compression is not successful for the first block, all input
* data is still in the window so we can still emit a stored block even
* when input comes from standard input. (This can also be done for
* all blocks if lit_bufsize is not greater than 32K.)
* - if compression is not successful for a file smaller than 64K, we can
* even emit a stored file instead of a stored block (saving 5 bytes).
* This is applicable only for zip (not gzip or zlib).
* - creating new Huffman trees less frequently may not provide fast
* adaptation to changes in the input data statistics. (Take for
* example a binary file with poorly compressible code followed by
* a highly compressible string table.) Smaller buffer sizes give
* fast adaptation but have of course the overhead of transmitting
* trees more frequently.
* - I can't count above 4
*/
this.d_buf = 0;
/* Buffer index for distances. To simplify the code, d_buf and l_buf have
* the same number of elements. To use different lengths, an extra flag
* array would be necessary.
*/
this.bi_buf = 0;
/* Output buffer. bits are inserted starting at the bottom (least
* significant bits).
*/
this.bi_valid = 0;
/* Number of valid bits in bi_buf. All bits above the last valid bit
* are always zero.
*/
// Used for window memory init. We safely ignore it for JS. That makes
// sense only for pointers and memory check tools.
//this.high_water = 0;
/* High water mark offset in window for initialized bytes -- bytes above
* this are set to zero in order to avoid memory check warnings when
* longest match routines access bytes past the input. This is then
* updated to the new high water mark.
*/
}
function deflateResetKeep(strm) {
var s;
if (!strm || !strm.state) {
return err(strm, Z_STREAM_ERROR);
}
strm.total_in = strm.total_out = 0;
strm.data_type = Z_UNKNOWN;
s = strm.state;
s.pending = 0;
s.pending_out = 0;
if (s.wrap < 0) {
s.wrap = -s.wrap;
/* was made negative by deflate(..., Z_FINISH); */
}
s.status = (s.wrap ? INIT_STATE : BUSY_STATE);
strm.adler = (s.wrap === 2) ?
0 // crc32(0, Z_NULL, 0)
:
1; // adler32(0, Z_NULL, 0)
s.last_flush = Z_NO_FLUSH;
trees._tr_init(s);
return Z_OK;
}
function deflateReset(strm) {
var ret = deflateResetKeep(strm);
if (ret === Z_OK) {
lm_init(strm.state);
}
return ret;
}
strm.state = s;
s.strm = strm;
s.wrap = wrap;
s.gzhead = null;
s.w_bits = windowBits;
s.w_size = 1 << s.w_bits;
s.w_mask = s.w_size - 1;
s.hash_bits = memLevel + 7;
s.hash_size = 1 << s.hash_bits;
s.hash_mask = s.hash_size - 1;
s.hash_shift = ~~((s.hash_bits + MIN_MATCH - 1) / MIN_MATCH);
s.pending_buf_size = s.lit_bufsize * 4;
s.level = level;
s.strategy = strategy;
s.method = method;
return deflateReset(strm);
}
s = strm.state;
if (!strm.output ||
(!strm.input && strm.avail_in !== 0) ||
(s.status === FINISH_STATE && flush !== Z_FINISH)) {
return err(strm, (strm.avail_out === 0) ? Z_BUF_ERROR : Z_STREAM_ERROR);
}
s.status = BUSY_STATE;
putShortMSB(s, header);
//#ifdef GZIP
if (s.status === EXTRA_STATE) {
if (s.gzhead.extra/* != Z_NULL*/) {
beg = s.pending; /* start of bytes to update crc */
do {
if (s.pending === s.pending_buf_size) {
if (s.gzhead.hcrc && s.pending > beg) {
strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);
}
flush_pending(strm);
beg = s.pending;
if (s.pending === s.pending_buf_size) {
val = 1;
break;
}
}
// JS specific: little magic to add zero terminator to end of string
if (s.gzindex < s.gzhead.name.length) {
val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff;
} else {
val = 0;
}
put_byte(s, val);
} while (val !== 0);
do {
if (s.pending === s.pending_buf_size) {
if (s.gzhead.hcrc && s.pending > beg) {
strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);
}
flush_pending(strm);
beg = s.pending;
if (s.pending === s.pending_buf_size) {
val = 1;
break;
}
}
// JS specific: little magic to add zero terminator to end of string
if (s.gzindex < s.gzhead.comment.length) {
val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff;
} else {
val = 0;
}
put_byte(s, val);
} while (val !== 0);
/* User must not provide more input after the first FINISH: */
if (s.status === FINISH_STATE && strm.avail_in !== 0) {
return err(strm, Z_BUF_ERROR);
}
trees._tr_stored_block(s, 0, 0, false);
/* For a full flush, this empty block will be recognized
* as a special marker by inflate_sync().
*/
if (flush === Z_FULL_FLUSH) {
/*** CLEAR_HASH(s); ***/ /* forget history */
zero(s.head); // Fill with NIL (= 0);
if (s.lookahead === 0) {
s.strstart = 0;
s.block_start = 0;
s.insert = 0;
}
}
}
flush_pending(strm);
if (strm.avail_out === 0) {
s.last_flush = -1; /* avoid BUF_ERROR at next call, see above */
return Z_OK;
}
}
}
//Assert(strm->avail_out > 0, "bug2");
//if (strm.avail_out <= 0) { throw new Error("bug2");}
flush_pending(strm);
/* If avail_out is zero, the application will call deflate again
* to flush the rest.
*/
if (s.wrap > 0) { s.wrap = -s.wrap; }
/* write the trailer only once! */
return s.pending !== 0 ? Z_OK : Z_STREAM_END;
}
function deflateEnd(strm) {
var status;
status = strm.state.status;
if (status !== INIT_STATE &&
status !== EXTRA_STATE &&
status !== NAME_STATE &&
status !== COMMENT_STATE &&
status !== HCRC_STATE &&
status !== BUSY_STATE &&
status !== FINISH_STATE
) {
return err(strm, Z_STREAM_ERROR);
}
strm.state = null;
var s;
var str, n;
var wrap;
var avail;
var next;
var input;
var tmpDict;
s = strm.state;
wrap = s.wrap;
s.head[s.ins_h] = str;
str++;
} while (--n);
s.strstart = str;
s.lookahead = MIN_MATCH - 1;
fill_window(s);
}
s.strstart += s.lookahead;
s.block_start = s.strstart;
s.insert = s.lookahead;
s.lookahead = 0;
s.match_length = s.prev_length = MIN_MATCH - 1;
s.match_available = 0;
strm.next_in = next;
strm.input = input;
strm.avail_in = avail;
s.wrap = wrap;
return Z_OK;
}
exports.deflateInit = deflateInit;
exports.deflateInit2 = deflateInit2;
exports.deflateReset = deflateReset;
exports.deflateResetKeep = deflateResetKeep;
exports.deflateSetHeader = deflateSetHeader;
exports.deflate = deflate;
exports.deflateEnd = deflateEnd;
exports.deflateSetDictionary = deflateSetDictionary;
exports.deflateInfo = 'pako deflate (from Nodeca project)';
/* Not implemented
exports.deflateBound = deflateBound;
exports.deflateCopy = deflateCopy;
exports.deflateParams = deflateParams;
exports.deflatePending = deflatePending;
exports.deflatePrime = deflatePrime;
exports.deflateTune = deflateTune;
*/
},{"../utils/common":41,"./adler32":43,"./crc32":45,"./messages":51,"./
trees":52}],47:[function(require,module,exports){
'use strict';
function GZheader() {
/* true if compressed data believed to be text */
this.text = 0;
/* modification time */
this.time = 0;
/* extra flags (not used when writing a gzip file) */
this.xflags = 0;
/* operating system */
this.os = 0;
/* pointer to extra field or Z_NULL if none */
this.extra = null;
/* extra field length (valid if extra != Z_NULL) */
this.extra_len = 0; // Actually, we don't need it in JS,
// but leave for few code modifications
//
// Setup limits is not necessary because in js we should not preallocate memory
// for inflate use constant limit in 65536 bytes
//
module.exports = GZheader;
},{}],48:[function(require,module,exports){
'use strict';
/*
Decode literal, length, and distance codes and write out the resulting
literal and match bytes until either not enough input or output is
available, an end-of-block is encountered, or a data error is encountered.
When large enough input and output buffers are supplied to inflate(), for
example, a 16K input buffer and a 64K output buffer, more than 95% of the
inflate execution time is spent in this routine.
Entry assumptions:
Notes:
- The maximum input bits used by a length/distance pair is 15 bits for the
length code, 5 bits for the length extra, 15 bits for the distance code,
and 13 bits for the distance extra. This totals 48 bits, or six bytes.
Therefore if strm.avail_in >= 6, then there is enough input to avoid
checking for available input while decoding.
- The maximum bytes that a single length/distance pair can output is 258
bytes, which is the maximum length that can be coded. inflate_fast()
requires strm.avail_out >= 258 for each loop to avoid checking for
output space.
*/
module.exports = function inflate_fast(strm, start) {
var state;
var _in; /* local strm.input */
var last; /* have enough input while in < last */
var _out; /* local strm.output */
var beg; /* inflate()'s initial strm.output */
var end; /* while out < end, enough space available */
//#ifdef INFLATE_STRICT
var dmax; /* maximum distance from zlib header */
//#endif
var wsize; /* window size or zero if not using window */
var whave; /* valid bytes in the window */
var wnext; /* window write index */
// Use `s_window` instead `window`, avoid conflict with instrumentation tools
var s_window; /* allocated sliding window, if wsize != 0 */
var hold; /* local strm.hold */
var bits; /* local strm.bits */
var lcode; /* local strm.lencode */
var dcode; /* local strm.distcode */
var lmask; /* mask for first level of length codes */
var dmask; /* mask for first level of distance codes */
var here; /* retrieved table entry */
var op; /* code bits, operation, extra bits, or */
/* window position, window bytes to copy */
var len; /* match length, unused bytes */
var dist; /* match distance */
var from; /* where to copy match from */
var from_source;
top:
do {
if (bits < 15) {
hold += input[_in++] << bits;
bits += 8;
hold += input[_in++] << bits;
bits += 8;
}
dolen:
for (;;) { // Goto emulation
op = here >>> 24/*here.bits*/;
hold >>>= op;
bits -= op;
op = (here >>> 16) & 0xff/*here.op*/;
if (op === 0) { /* literal */
//Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
// "inflate: literal '%c'\n" :
// "inflate: literal 0x%02x\n", here.val));
output[_out++] = here & 0xffff/*here.val*/;
}
else if (op & 16) { /* length base */
len = here & 0xffff/*here.val*/;
op &= 15; /* number of extra bits */
if (op) {
if (bits < op) {
hold += input[_in++] << bits;
bits += 8;
}
len += hold & ((1 << op) - 1);
hold >>>= op;
bits -= op;
}
//Tracevv((stderr, "inflate: length %u\n", len));
if (bits < 15) {
hold += input[_in++] << bits;
bits += 8;
hold += input[_in++] << bits;
bits += 8;
}
here = dcode[hold & dmask];
dodist:
for (;;) { // goto emulation
op = here >>> 24/*here.bits*/;
hold >>>= op;
bits -= op;
op = (here >>> 16) & 0xff/*here.op*/;
/* return unused bytes (on entry, bits < 8, so in won't go too far back) */
len = bits >> 3;
_in -= len;
bits -= len << 3;
hold &= (1 << bits) - 1;
},{}],49:[function(require,module,exports){
'use strict';
var CODES = 0;
var LENS = 1;
var DISTS = 2;
/* Allowed flush values; see deflate() and inflate() below for details */
//var Z_NO_FLUSH = 0;
//var Z_PARTIAL_FLUSH = 1;
//var Z_SYNC_FLUSH = 2;
//var Z_FULL_FLUSH = 3;
var Z_FINISH = 4;
var Z_BLOCK = 5;
var Z_TREES = 6;
/* STATES ====================================================================*/
/* ===========================================================================*/
/* ===========================================================================*/
function zswap32(q) {
return (((q >>> 24) & 0xff) +
((q >>> 8) & 0xff00) +
((q & 0xff00) << 8) +
((q & 0xff) << 24));
}
function InflateState() {
this.mode = 0; /* current inflate mode */
this.last = false; /* true if processing last block */
this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */
this.havedict = false; /* true if dictionary provided */
this.flags = 0; /* gzip header method and flags (0 if zlib) */
this.dmax = 0; /* zlib header max distance (INFLATE_STRICT) */
this.check = 0; /* protected copy of check value */
this.total = 0; /* protected copy of output count */
// TODO: may be {}
this.head = null; /* where to save gzip header information */
/* sliding window */
this.wbits = 0; /* log base 2 of requested window size */
this.wsize = 0; /* window size or zero if not using window */
this.whave = 0; /* valid bytes in the window */
this.wnext = 0; /* window write index */
this.window = null; /* allocated sliding window, if needed */
/* bit accumulator */
this.hold = 0; /* input bit accumulator */
this.bits = 0; /* number of bits in "in" */
/*
because we don't have pointers in js, we use lencode and distcode directly
as buffers so we don't need codes
*/
//this.codes = new utils.Buf32(ENOUGH); /* space for code tables */
this.lendyn = null; /* dynamic table for length/literal codes (JS
specific) */
this.distdyn = null; /* dynamic table for distance codes (JS
specific) */
this.sane = 0; /* if false, allow invalid distance too far */
this.back = 0; /* bits back of last unprocessed length/lit */
this.was = 0; /* initial length of match */
}
function inflateResetKeep(strm) {
var state;
state.sane = 1;
state.back = -1;
//Tracev((stderr, "inflate: reset\n"));
return Z_OK;
}
function inflateReset(strm) {
var state;
function inflateInit(strm) {
return inflateInit2(strm, DEF_WBITS);
}
/*
Return state with length and distance decoding tables and index sizes set to
fixed code decoding. Normally this returns fixed tables from inffixed.h.
If BUILDFIXED is defined, then instead this routine builds the tables the
first time it's called, and returns those tables the first time and
thereafter. This reduces the size of the code by about 2K bytes, in
exchange for a little execution time. However, BUILDFIXED should not be
used for threaded applications, since the rewriting of the tables and virgin
may not be thread-safe.
*/
var virgin = true;
function fixedtables(state) {
/* build fixed huffman tables if first call (may not be thread safe) */
if (virgin) {
var sym;
/* literal/length table */
sym = 0;
while (sym < 144) { state.lens[sym++] = 8; }
while (sym < 256) { state.lens[sym++] = 9; }
while (sym < 280) { state.lens[sym++] = 7; }
while (sym < 288) { state.lens[sym++] = 8; }
/* distance table */
sym = 0;
while (sym < 32) { state.lens[sym++] = 5; }
state.lencode = lenfix;
state.lenbits = 9;
state.distcode = distfix;
state.distbits = 5;
}
/*
Update the window with the last wsize (normally 32K) bytes written before
returning. If window does not exist yet, create it. This is only called
when a window is already in use, or when output has been written during this
inflate call, but the end of the deflate stream has not been reached yet.
It is also called to create a window for dictionary data when a dictionary
is loaded.
Providing output buffers larger than 32K to inflate() should provide a speed
advantage, since only the last 32K of output is copied to the sliding window
upon return from inflate(), and since all distances after the first 32K of
output will fall in the output data, making match copies simpler and faster.
The advantage may be dependent on the size of the processor's data caches.
*/
function updatewindow(strm, src, end, copy) {
var dist;
var state = strm.state;
state = strm.state;
if (state.mode === TYPE) { state.mode = TYPEDO; } /* skip check */
_in = have;
_out = left;
ret = Z_OK;
//=== INITBITS();
hold = 0;
bits = 0;
//===//
state.mode = FLAGS;
break;
}
state.flags = 0; /* expect zlib header */
if (state.head) {
state.head.done = false;
}
if (!(state.wrap & 1) || /* check if zlib header allowed */
(((hold & 0xff)/*BITS(8)*/ << 8) + (hold >> 8)) % 31) {
strm.msg = 'incorrect header check';
state.mode = BAD;
break;
}
if ((hold & 0x0f)/*BITS(4)*/ !== Z_DEFLATED) {
strm.msg = 'unknown compression method';
state.mode = BAD;
break;
}
//--- DROPBITS(4) ---//
hold >>>= 4;
bits -= 4;
//---//
len = (hold & 0x0f)/*BITS(4)*/ + 8;
if (state.wbits === 0) {
state.wbits = len;
}
else if (len > state.wbits) {
strm.msg = 'invalid window size';
state.mode = BAD;
break;
}
state.dmax = 1 << len;
//Tracev((stderr, "inflate: zlib header ok\n"));
strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/;
state.mode = hold & 0x200 ? DICTID : TYPE;
//=== INITBITS();
hold = 0;
bits = 0;
//===//
break;
case FLAGS:
//=== NEEDBITS(16); */
while (bits < 16) {
if (have === 0) { break inf_leave; }
have--;
hold += input[next++] << bits;
bits += 8;
}
//===//
state.flags = hold;
if ((state.flags & 0xff) !== Z_DEFLATED) {
strm.msg = 'unknown compression method';
state.mode = BAD;
break;
}
if (state.flags & 0xe000) {
strm.msg = 'unknown header flags set';
state.mode = BAD;
break;
}
if (state.head) {
state.head.text = ((hold >> 8) & 1);
}
if (state.flags & 0x0200) {
//=== CRC2(state.check, hold);
hbuf[0] = hold & 0xff;
hbuf[1] = (hold >>> 8) & 0xff;
state.check = crc32(state.check, hbuf, 2, 0);
//===//
}
//=== INITBITS();
hold = 0;
bits = 0;
//===//
state.mode = TIME;
/* falls through */
case TIME:
//=== NEEDBITS(32); */
while (bits < 32) {
if (have === 0) { break inf_leave; }
have--;
hold += input[next++] << bits;
bits += 8;
}
//===//
if (state.head) {
state.head.time = hold;
}
if (state.flags & 0x0200) {
//=== CRC4(state.check, hold)
hbuf[0] = hold & 0xff;
hbuf[1] = (hold >>> 8) & 0xff;
hbuf[2] = (hold >>> 16) & 0xff;
hbuf[3] = (hold >>> 24) & 0xff;
state.check = crc32(state.check, hbuf, 4, 0);
//===
}
//=== INITBITS();
hold = 0;
bits = 0;
//===//
state.mode = OS;
/* falls through */
case OS:
//=== NEEDBITS(16); */
while (bits < 16) {
if (have === 0) { break inf_leave; }
have--;
hold += input[next++] << bits;
bits += 8;
}
//===//
if (state.head) {
state.head.xflags = (hold & 0xff);
state.head.os = (hold >> 8);
}
if (state.flags & 0x0200) {
//=== CRC2(state.check, hold);
hbuf[0] = hold & 0xff;
hbuf[1] = (hold >>> 8) & 0xff;
state.check = crc32(state.check, hbuf, 2, 0);
//===//
}
//=== INITBITS();
hold = 0;
bits = 0;
//===//
state.mode = EXLEN;
/* falls through */
case EXLEN:
if (state.flags & 0x0400) {
//=== NEEDBITS(16); */
while (bits < 16) {
if (have === 0) { break inf_leave; }
have--;
hold += input[next++] << bits;
bits += 8;
}
//===//
state.length = hold;
if (state.head) {
state.head.extra_len = hold;
}
if (state.flags & 0x0200) {
//=== CRC2(state.check, hold);
hbuf[0] = hold & 0xff;
hbuf[1] = (hold >>> 8) & 0xff;
state.check = crc32(state.check, hbuf, 2, 0);
//===//
}
//=== INITBITS();
hold = 0;
bits = 0;
//===//
}
else if (state.head) {
state.head.extra = null/*Z_NULL*/;
}
state.mode = EXTRA;
/* falls through */
case EXTRA:
if (state.flags & 0x0400) {
copy = state.length;
if (copy > have) { copy = have; }
if (copy) {
if (state.head) {
len = state.head.extra_len - state.length;
if (!state.head.extra) {
// Use untyped array for more conveniend processing later
state.head.extra = new Array(state.head.extra_len);
}
utils.arraySet(
state.head.extra,
input,
next,
// extra field is limited to 65536 bytes
// - no need for additional size check
copy,
/*len + copy > state.head.extra_max - len ? state.head.extra_max :
copy,*/
len
);
//zmemcpy(state.head.extra + len, next,
// len + copy > state.head.extra_max ?
// state.head.extra_max - len : copy);
}
if (state.flags & 0x0200) {
state.check = crc32(state.check, input, copy, next);
}
have -= copy;
next += copy;
state.length -= copy;
}
if (state.length) { break inf_leave; }
}
state.length = 0;
state.mode = NAME;
/* falls through */
case NAME:
if (state.flags & 0x0800) {
if (have === 0) { break inf_leave; }
copy = 0;
do {
// TODO: 2 or 1 bytes?
len = input[next + copy++];
/* use constant limit because in js we should not preallocate memory */
if (state.head && len &&
(state.length < 65536 /*state.head.name_max*/)) {
state.head.name += String.fromCharCode(len);
}
} while (len && copy < have);
if (ret) {
strm.msg = 'invalid code lengths set';
state.mode = BAD;
break;
}
//Tracev((stderr, "inflate: code lengths ok\n"));
state.have = 0;
state.mode = CODELENS;
/* falls through */
case CODELENS:
while (state.have < state.nlen + state.ndist) {
for (;;) {
here = state.lencode[hold & ((1 << state.lenbits) -
1)];/*BITS(state.lenbits)*/
here_bits = here >>> 24;
here_op = (here >>> 16) & 0xff;
here_val = here & 0xffff;
if (ret) {
strm.msg = 'invalid literal/lengths set';
state.mode = BAD;
break;
}
state.distbits = 6;
//state.distcode.copy(state.codes);
// Switch to use dynamic table
state.distcode = state.distdyn;
opts = { bits: state.distbits };
ret = inflate_table(DISTS, state.lens, state.nlen, state.ndist,
state.distcode, 0, state.work, opts);
// We have separate tables & no pointers. 2 commented lines below not needed.
// state.next_index = opts.table_index;
state.distbits = opts.bits;
// state.distcode = state.next;
if (ret) {
strm.msg = 'invalid distances set';
state.mode = BAD;
break;
}
//Tracev((stderr, 'inflate: codes ok\n'));
state.mode = LEN_;
if (flush === Z_TREES) { break inf_leave; }
/* falls through */
case LEN_:
state.mode = LEN;
/* falls through */
case LEN:
if (have >= 6 && left >= 258) {
//--- RESTORE() ---
strm.next_out = put;
strm.avail_out = left;
strm.next_in = next;
strm.avail_in = have;
state.hold = hold;
state.bits = bits;
//---
inflate_fast(strm, _out);
//--- LOAD() ---
put = strm.next_out;
output = strm.output;
left = strm.avail_out;
next = strm.next_in;
input = strm.input;
have = strm.avail_in;
hold = state.hold;
bits = state.bits;
//---
}
_out = left;
// NB: crc32 stored as signed 32-bit int, zswap32 returns signed too
if ((state.flags ? hold : zswap32(hold)) !== state.check) {
strm.msg = 'incorrect data check';
state.mode = BAD;
break;
}
//=== INITBITS();
hold = 0;
bits = 0;
//===//
//Tracev((stderr, "inflate: check matches trailer\n"));
}
state.mode = LENGTH;
/* falls through */
case LENGTH:
if (state.wrap && state.flags) {
//=== NEEDBITS(32);
while (bits < 32) {
if (have === 0) { break inf_leave; }
have--;
hold += input[next++] << bits;
bits += 8;
}
//===//
if (hold !== (state.total & 0xffffffff)) {
strm.msg = 'incorrect length check';
state.mode = BAD;
break;
}
//=== INITBITS();
hold = 0;
bits = 0;
//===//
//Tracev((stderr, "inflate: length matches trailer\n"));
}
state.mode = DONE;
/* falls through */
case DONE:
ret = Z_STREAM_END;
break inf_leave;
case BAD:
ret = Z_DATA_ERROR;
break inf_leave;
case MEM:
return Z_MEM_ERROR;
case SYNC:
/* falls through */
default:
return Z_STREAM_ERROR;
}
}
// inf_leave <- here is real place for "goto inf_leave", emulated via "break
inf_leave"
/*
Return from inflate(), updating the total counts and the check value.
If there was no progress during the inflate() call, return a buffer
error. Call updatewindow() to create and/or update the window state.
Note: a memory error from inflate() is non-recoverable.
*/
function inflateEnd(strm) {
/* check state */
if (!strm || !strm.state) { return Z_STREAM_ERROR; }
state = strm.state;
if ((state.wrap & 2) === 0) { return Z_STREAM_ERROR; }
var state;
var dictid;
var ret;
/* check state */
if (!strm /* == Z_NULL */ || !strm.state /* == Z_NULL */) { return
Z_STREAM_ERROR; }
state = strm.state;
exports.inflateReset = inflateReset;
exports.inflateReset2 = inflateReset2;
exports.inflateResetKeep = inflateResetKeep;
exports.inflateInit = inflateInit;
exports.inflateInit2 = inflateInit2;
exports.inflate = inflate;
exports.inflateEnd = inflateEnd;
exports.inflateGetHeader = inflateGetHeader;
exports.inflateSetDictionary = inflateSetDictionary;
exports.inflateInfo = 'pako inflate (from Nodeca project)';
/* Not implemented
exports.inflateCopy = inflateCopy;
exports.inflateGetDictionary = inflateGetDictionary;
exports.inflateMark = inflateMark;
exports.inflatePrime = inflatePrime;
exports.inflateSync = inflateSync;
exports.inflateSyncPoint = inflateSyncPoint;
exports.inflateUndermine = inflateUndermine;
*/
},{"../utils/common":41,"./adler32":43,"./crc32":45,"./inffast":48,"./
inftrees":50}],50:[function(require,module,exports){
'use strict';
This routine assumes, but does not check, that all of the entries in
lens[] are in the range 0..MAXBITS. The caller must assure this.
1..MAXBITS is interpreted as that code length. zero means that that
symbol does not occur in this code.
The codes are sorted by computing a count of codes for each length,
creating from that a table of starting indices for each length in the
sorted table, and then entering the symbols in order in the sorted
table. The sorted table is work[], with that space being provided by
the caller.
The length counts are used for other purposes as well, i.e. finding
the minimum and maximum length codes, determining if there are any
codes at all, checking for a valid set of lengths, and looking ahead
at length counts to determine sub-table sizes when building the
decoding tables.
*/
//table.op[opts.table_index] = 64;
//table.bits[opts.table_index] = 1;
//table.val[opts.table_index++] = 0;
table[table_index++] = (1 << 24) | (64 << 16) | 0;
opts.bits = 1;
return 0; /* no symbols, but wait for decoding to report error */
}
for (min = 1; min < max; min++) {
if (count[min] !== 0) { break; }
}
if (root < min) {
root = min;
}
/* generate offsets into symbol table for each length for sorting */
offs[1] = 0;
for (len = 1; len < MAXBITS; len++) {
offs[len + 1] = offs[len] + count[len];
}
/*
Create and fill in decoding tables. In this loop, the table being
filled is at next and has curr index bits. The code being used is huff
with length len. That code is converted to an index by dropping drop
bits off of the bottom. For codes where len is less than drop + curr,
those top drop + curr - len bits are incremented through all values to
fill the table with replicated entries.
root is the number of index bits for the root table. When len exceeds
root, sub-tables are created pointed to by the root entry with an index
of the low root bits of huff. This is saved in low to check for when a
new sub-table should be started. drop is zero when the root table is
being filled, and drop is root when sub-tables are being filled.
used keeps track of how many table entries have been allocated from the
provided *table space. It is checked for LENS and DIST tables against
the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in
the initial root table size constants. See the comments in inftrees.h
for more information.
sym increments through all symbols, and the loop terminates when
all codes of length max, i.e. all codes, have been processed. This
routine permits incomplete codes, so another loop after this one fills
in the rest of the decoding tables with invalid code markers.
*/
} else { /* DISTS */
base = dbase;
extra = dext;
end = -1;
}
/* replicate for those indices with low len bits equal to huff */
incr = 1 << (len - drop);
fill = 1 << curr;
min = fill; /* save offset to next table */
do {
fill -= incr;
table[next + (huff >> drop) + fill] = (here_bits << 24) | (here_op << 16) |
here_val |0;
} while (fill !== 0);
},{"../utils/common":41}],51:[function(require,module,exports){
'use strict';
module.exports = {
2: 'need dictionary', /* Z_NEED_DICT 2 */
1: 'stream end', /* Z_STREAM_END 1 */
0: '', /* Z_OK 0 */
'-1': 'file error', /* Z_ERRNO (-1) */
'-2': 'stream error', /* Z_STREAM_ERROR (-2) */
'-3': 'data error', /* Z_DATA_ERROR (-3) */
'-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */
'-5': 'buffer error', /* Z_BUF_ERROR (-5) */
'-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */
};
},{}],52:[function(require,module,exports){
'use strict';
//var Z_FILTERED = 1;
//var Z_HUFFMAN_ONLY = 2;
//var Z_RLE = 3;
var Z_FIXED = 4;
//var Z_DEFAULT_STRATEGY = 0;
/*============================================================================*/
// From zutil.h
var STORED_BLOCK = 0;
var STATIC_TREES = 1;
var DYN_TREES = 2;
/* The three kinds of block type */
var MIN_MATCH = 3;
var MAX_MATCH = 258;
/* The minimum and maximum match lengths */
// From deflate.h
/* ===========================================================================
* Internal compression state.
*/
/* ===========================================================================
* Constants
*/
var MAX_BL_BITS = 7;
/* Bit length codes must not exceed MAX_BL_BITS bits */
/* eslint-disable comma-spacing,array-bracket-spacing */
var extra_lbits = /* extra bits for each length code */
[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0];
var bl_order =
[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];
/* eslint-enable comma-spacing,array-bracket-spacing */
/* The lengths of the bit length codes are sent in order of decreasing
* probability, to avoid transmitting the lengths for unused bit length codes.
*/
/* ===========================================================================
* Local data. These are initialized only once.
*/
// !!!! Use flat array insdead of structure, Freq = i*2, Len = i*2+1
var static_ltree = new Array((L_CODES + 2) * 2);
zero(static_ltree);
/* The static literal tree. Since the bit lengths are imposed, there is no
* need for the L_CODES extra codes used during heap construction. However
* The codes 286 and 287 are needed to build a canonical tree (see _tr_init
* below).
*/
var static_l_desc;
var static_d_desc;
var static_bl_desc;
function d_code(dist) {
return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)];
}
/* ===========================================================================
* Output a short LSB first on the stream.
* IN assertion: there is enough room in pendingBuf.
*/
function put_short(s, w) {
// put_byte(s, (uch)((w) & 0xff));
// put_byte(s, (uch)((ush)(w) >> 8));
s.pending_buf[s.pending++] = (w) & 0xff;
s.pending_buf[s.pending++] = (w >>> 8) & 0xff;
}
/* ===========================================================================
* Send a value on a given number of bits.
* IN assertion: length <= 16 and value fits in length bits.
*/
function send_bits(s, value, length) {
if (s.bi_valid > (Buf_size - length)) {
s.bi_buf |= (value << s.bi_valid) & 0xffff;
put_short(s, s.bi_buf);
s.bi_buf = value >> (Buf_size - s.bi_valid);
s.bi_valid += length - Buf_size;
} else {
s.bi_buf |= (value << s.bi_valid) & 0xffff;
s.bi_valid += length;
}
}
/* ===========================================================================
* Reverse the first len bits of a code, using straightforward code (a faster
* method would use a table)
* IN assertion: 1 <= len <= 15
*/
function bi_reverse(code, len) {
var res = 0;
do {
res |= code & 1;
code >>>= 1;
res <<= 1;
} while (--len > 0);
return res >>> 1;
}
/* ===========================================================================
* Flush the bit buffer, keeping at most 7 bits in it.
*/
function bi_flush(s) {
if (s.bi_valid === 16) {
put_short(s, s.bi_buf);
s.bi_buf = 0;
s.bi_valid = 0;
/* ===========================================================================
* Compute the optimal bit lengths for a tree and update the total bit length
* for the current block.
* IN assertion: the fields freq and dad are set, heap[heap_max] and
* above are the tree nodes sorted by increasing frequency.
* OUT assertions: the field len is set to the optimal bit length, the
* array bl_count contains the frequencies for each bit length.
* The length opt_len is updated; static_len is also updated if stree is
* not null.
*/
function gen_bitlen(s, desc)
// deflate_state *s;
// tree_desc *desc; /* the tree descriptor */
{
var tree = desc.dyn_tree;
var max_code = desc.max_code;
var stree = desc.stat_desc.static_tree;
var has_stree = desc.stat_desc.has_stree;
var extra = desc.stat_desc.extra_bits;
var base = desc.stat_desc.extra_base;
var max_length = desc.stat_desc.max_length;
var h; /* heap index */
var n, m; /* iterate over the tree elements */
var bits; /* bit length */
var xbits; /* extra bits */
var f; /* frequency */
var overflow = 0; /* number of elements with bit length too large */
s.bl_count[bits]++;
xbits = 0;
if (n >= base) {
xbits = extra[n - base];
}
f = tree[n * 2]/*.Freq*/;
s.opt_len += f * (bits + xbits);
if (has_stree) {
s.static_len += f * (stree[n * 2 + 1]/*.Len*/ + xbits);
}
}
if (overflow === 0) { return; }
/* ===========================================================================
* Generate the codes for a given tree and bit counts (which need not be
* optimal).
* IN assertion: the array bl_count contains the bit length statistics for
* the given tree and the field len is set for all tree elements.
* OUT assertion: the field code is set for all tree elements of non
* zero code length.
*/
function gen_codes(tree, max_code, bl_count)
// ct_data *tree; /* the tree to decorate */
// int max_code; /* largest code with non zero frequency */
// ushf *bl_count; /* number of codes at each bit length */
{
var next_code = new Array(MAX_BITS + 1); /* next code value for each bit length
*/
var code = 0; /* running code value */
var bits; /* bit index */
var n; /* code index */
/* The distribution counts are first used to generate the code values
* without bit reversal.
*/
for (bits = 1; bits <= MAX_BITS; bits++) {
next_code[bits] = code = (code + bl_count[bits - 1]) << 1;
}
/* Check that the bit counts in bl_count are consistent. The last code
* must be all ones.
*/
//Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
// "inconsistent bit counts");
//Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
/* ===========================================================================
* Initialize the various 'constant' tables.
*/
function tr_static_init() {
var n; /* iterates over tree elements */
var bits; /* bit counter */
var length; /* length value */
var code; /* code value */
var dist; /* distance index */
var bl_count = new Array(MAX_BITS + 1);
/* number of codes at each bit length for an optimal tree */
// do check in _tr_init()
//if (static_init_done) return;
n = 0;
while (n <= 143) {
static_ltree[n * 2 + 1]/*.Len*/ = 8;
n++;
bl_count[8]++;
}
while (n <= 255) {
static_ltree[n * 2 + 1]/*.Len*/ = 9;
n++;
bl_count[9]++;
}
while (n <= 279) {
static_ltree[n * 2 + 1]/*.Len*/ = 7;
n++;
bl_count[7]++;
}
while (n <= 287) {
static_ltree[n * 2 + 1]/*.Len*/ = 8;
n++;
bl_count[8]++;
}
/* Codes 286 and 287 do not exist, but we must include them in the
* tree construction to get a canonical Huffman tree (longest code
* all ones)
*/
gen_codes(static_ltree, L_CODES + 1, bl_count);
//static_init_done = true;
}
/* ===========================================================================
* Initialize a new block.
*/
function init_block(s) {
var n; /* iterates over tree elements */
s.dyn_ltree[END_BLOCK * 2]/*.Freq*/ = 1;
s.opt_len = s.static_len = 0;
s.last_lit = s.matches = 0;
}
/* ===========================================================================
* Flush the bit buffer and align the output on a byte boundary
*/
function bi_windup(s)
{
if (s.bi_valid > 8) {
put_short(s, s.bi_buf);
} else if (s.bi_valid > 0) {
//put_byte(s, (Byte)s->bi_buf);
s.pending_buf[s.pending++] = s.bi_buf;
}
s.bi_buf = 0;
s.bi_valid = 0;
}
/* ===========================================================================
* Copy a stored block, storing first the length and its
* one's complement if requested.
*/
function copy_block(s, buf, len, header)
//DeflateState *s;
//charf *buf; /* the input data */
//unsigned len; /* its length */
//int header; /* true if block header must be written */
{
bi_windup(s); /* align on byte boundary */
if (header) {
put_short(s, len);
put_short(s, ~len);
}
// while (len--) {
// put_byte(s, *buf++);
// }
utils.arraySet(s.pending_buf, s.window, buf, len, s.pending);
s.pending += len;
}
/* ===========================================================================
* Compares to subtrees, using the tree depth as tie breaker when
* the subtrees have equal frequency. This minimizes the worst case length.
*/
function smaller(tree, n, m, depth) {
var _n2 = n * 2;
var _m2 = m * 2;
return (tree[_n2]/*.Freq*/ < tree[_m2]/*.Freq*/ ||
(tree[_n2]/*.Freq*/ === tree[_m2]/*.Freq*/ && depth[n] <= depth[m]));
}
/* ===========================================================================
* Restore the heap property by moving down the tree starting at node k,
* exchanging a node with the smallest of its two sons if necessary, stopping
* when the heap property is re-established (each father smaller than its
* two sons).
*/
function pqdownheap(s, tree, k)
// deflate_state *s;
// ct_data *tree; /* the tree to restore */
// int k; /* node to move down */
{
var v = s.heap[k];
var j = k << 1; /* left son of k */
while (j <= s.heap_len) {
/* Set j to the smallest of the two sons: */
if (j < s.heap_len &&
smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) {
j++;
}
/* Exit if v is smaller than both sons */
if (smaller(tree, v, s.heap[j], s.depth)) { break; }
// inlined manually
// var SMALLEST = 1;
/* ===========================================================================
* Send the block data compressed using the given Huffman trees
*/
function compress_block(s, ltree, dtree)
// deflate_state *s;
// const ct_data *ltree; /* literal tree */
// const ct_data *dtree; /* distance tree */
{
var dist; /* distance of matched string */
var lc; /* match length or unmatched char (if dist == 0) */
var lx = 0; /* running index in l_buf */
var code; /* the code to send */
var extra; /* number of extra bits to send */
if (s.last_lit !== 0) {
do {
dist = (s.pending_buf[s.d_buf + lx * 2] << 8) | (s.pending_buf[s.d_buf + lx *
2 + 1]);
lc = s.pending_buf[s.l_buf + lx];
lx++;
if (dist === 0) {
send_code(s, lc, ltree); /* send a literal byte */
//Tracecv(isgraph(lc), (stderr," '%c' ", lc));
} else {
/* Here, lc is the match length - MIN_MATCH */
code = _length_code[lc];
send_code(s, code + LITERALS + 1, ltree); /* send the length code */
extra = extra_lbits[code];
if (extra !== 0) {
lc -= base_length[code];
send_bits(s, lc, extra); /* send the extra length bits */
}
dist--; /* dist is now the match distance - 1 */
code = d_code(dist);
//Assert (code < D_CODES, "bad d_code");
/* ===========================================================================
* Construct one Huffman tree and assigns the code bit strings and lengths.
* Update the total bit length for the current block.
* IN assertion: the field freq is set for all tree elements.
* OUT assertions: the fields len and code are set to the optimal bit length
* and corresponding code. The length opt_len is updated; static_len is
* also updated if stree is not null. The field max_code is set.
*/
function build_tree(s, desc)
// deflate_state *s;
// tree_desc *desc; /* the tree descriptor */
{
var tree = desc.dyn_tree;
var stree = desc.stat_desc.static_tree;
var has_stree = desc.stat_desc.has_stree;
var elems = desc.stat_desc.elems;
var n, m; /* iterate over heap elements */
var max_code = -1; /* largest code with non zero frequency */
var node; /* new node being created */
} else {
tree[n * 2 + 1]/*.Len*/ = 0;
}
}
/* The pkzip format requires that at least one distance code exists,
* and that at least one bit should be sent even if there is only one
* possible code. So to avoid special checks later on we force at least
* two codes of non zero frequency.
*/
while (s.heap_len < 2) {
node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0);
tree[node * 2]/*.Freq*/ = 1;
s.depth[node] = 0;
s.opt_len--;
if (has_stree) {
s.static_len -= stree[node * 2 + 1]/*.Len*/;
}
/* node is 0 or 1 so it does not have extra bits */
}
desc.max_code = max_code;
s.heap[--s.heap_max] = s.heap[1/*SMALLEST*/];
/* At this point, the fields freq and dad are set. We can now
* generate the bit lengths.
*/
gen_bitlen(s, desc);
/* The field len is now set, we can generate the bit codes */
gen_codes(tree, max_code, s.bl_count);
}
/* ===========================================================================
* Scan a literal or distance tree to determine the frequencies of the codes
* in the bit length tree.
*/
function scan_tree(s, tree, max_code)
// deflate_state *s;
// ct_data *tree; /* the tree to be scanned */
// int max_code; /* and its largest code of non zero frequency */
{
var n; /* iterates over all tree elements */
var prevlen = -1; /* last emitted length */
var curlen; /* length of current code */
if (nextlen === 0) {
max_count = 138;
min_count = 3;
}
tree[(max_code + 1) * 2 + 1]/*.Len*/ = 0xffff; /* guard */
} else {
s.bl_tree[REPZ_11_138 * 2]/*.Freq*/++;
}
count = 0;
prevlen = curlen;
if (nextlen === 0) {
max_count = 138;
min_count = 3;
} else {
max_count = 7;
min_count = 4;
}
}
}
/* ===========================================================================
* Send a literal or distance tree in compressed form, using the codes in
* bl_tree.
*/
function send_tree(s, tree, max_code)
// deflate_state *s;
// ct_data *tree; /* the tree to be scanned */
// int max_code; /* and its largest code of non zero frequency */
{
var n; /* iterates over all tree elements */
var prevlen = -1; /* last emitted length */
var curlen; /* length of current code */
} else {
send_code(s, REPZ_11_138, s.bl_tree);
send_bits(s, count - 11, 7);
}
count = 0;
prevlen = curlen;
if (nextlen === 0) {
max_count = 138;
min_count = 3;
} else {
max_count = 7;
min_count = 4;
}
}
}
/* ===========================================================================
* Construct the Huffman tree for the bit lengths and return the index in
* bl_order of the last bit length code to send.
*/
function build_bl_tree(s) {
var max_blindex; /* index of last bit length code of non zero freq */
/* Determine the bit length frequencies for literal and distance trees */
scan_tree(s, s.dyn_ltree, s.l_desc.max_code);
scan_tree(s, s.dyn_dtree, s.d_desc.max_code);
/* Determine the number of bit length codes to send. The pkzip format
* requires that at least 4 bit length codes be sent. (appnote.txt says
* 3 but the actual value used is 4.)
*/
for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) {
if (s.bl_tree[bl_order[max_blindex] * 2 + 1]/*.Len*/ !== 0) {
break;
}
}
/* Update opt_len to include the bit length tree and counts */
s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;
//Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
// s->opt_len, s->static_len));
return max_blindex;
}
/* ===========================================================================
* Send the header for a block using dynamic Huffman trees: the counts, the
* lengths of the bit length codes, the literal tree and the distance tree.
* IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
*/
function send_all_trees(s, lcodes, dcodes, blcodes)
// deflate_state *s;
// int lcodes, dcodes, blcodes; /* number of codes for each tree */
{
var rank; /* index in bl_order */
//Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
//Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
// "too many codes");
//Tracev((stderr, "\nbl counts: "));
send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */
send_bits(s, dcodes - 1, 5);
send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */
for (rank = 0; rank < blcodes; rank++) {
//Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1]/*.Len*/, 3);
}
//Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
/* ===========================================================================
* Check if the data type is TEXT or BINARY, using the following algorithm:
* - TEXT if the two conditions below are satisfied:
* a) There are no non-portable control characters belonging to the
* "black list" (0..6, 14..25, 28..31).
* b) There is at least one printable character belonging to the
* "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255).
* - BINARY otherwise.
* - The following partially-portable control characters form a
* "gray list" that is ignored in this detection algorithm:
* (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}).
* IN assertion: the fields Freq of dyn_ltree are set.
*/
function detect_data_type(s) {
/* black_mask is the bit mask of black-listed bytes
* set bits 0..6, 14..25, and 28..31
* 0xf3ffc07f = binary 11110011111111111100000001111111
*/
var black_mask = 0xf3ffc07f;
var n;
/* ===========================================================================
* Initialize the tree data structures for a new zlib stream.
*/
function _tr_init(s)
{
if (!static_init_done) {
tr_static_init();
static_init_done = true;
}
s.bi_buf = 0;
s.bi_valid = 0;
/* ===========================================================================
* Send a stored block
*/
function _tr_stored_block(s, buf, stored_len, last)
//DeflateState *s;
//charf *buf; /* input block */
//ulg stored_len; /* length of input block */
//int last; /* one if this is the last block for a file */
{
send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */
copy_block(s, buf, stored_len, true); /* with header */
}
/* ===========================================================================
* Send one empty static block to give enough lookahead for inflate.
* This takes 10 bits, of which 7 may remain in the bit buffer.
*/
function _tr_align(s) {
send_bits(s, STATIC_TREES << 1, 3);
send_code(s, END_BLOCK, static_ltree);
bi_flush(s);
}
/* ===========================================================================
* Determine the best encoding for the current block: dynamic trees, static
* trees or store, and output the encoded block to the zip file.
*/
function _tr_flush_block(s, buf, stored_len, last)
//DeflateState *s;
//charf *buf; /* input block, or NULL if too old */
//ulg stored_len; /* length of input block */
//int last; /* one if this is the last block for a file */
{
var opt_lenb, static_lenb; /* opt_len and static_len in bytes */
var max_blindex = 0; /* index of last bit length code of non zero freq */
build_tree(s, s.d_desc);
// Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
// s->static_len));
/* At this point, opt_len and static_len are the total bit lengths of
* the compressed block data, excluding the tree representations.
*/
/* Build the bit length tree for the above two trees, and get the index
* in bl_order of the last bit length code to send.
*/
max_blindex = build_bl_tree(s);
} else {
send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3);
send_all_trees(s, s.l_desc.max_code + 1, s.d_desc.max_code + 1, max_blindex +
1);
compress_block(s, s.dyn_ltree, s.dyn_dtree);
}
// Assert (s->compressed_len == s->bits_sent, "bad compressed size");
/* The above check is made mod 2^32, for files larger than 512 MB
* and uLong implemented on 32 bits.
*/
init_block(s);
if (last) {
bi_windup(s);
}
// Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
// s->compressed_len-7*last));
}
/* ===========================================================================
* Save the match info and tally the frequency counts. Return true if
* the current block must be flushed.
*/
function _tr_tally(s, dist, lc)
// deflate_state *s;
// unsigned dist; /* distance of matched string */
// unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
{
//var out_length, in_length, dcode;
if (dist === 0) {
/* lc is the unmatched char */
s.dyn_ltree[lc * 2]/*.Freq*/++;
} else {
s.matches++;
/* Here, lc is the match length - MIN_MATCH */
dist--; /* dist = match distance - 1 */
//Assert((ush)dist < (ush)MAX_DIST(s) &&
// (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
// (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match");
//#ifdef TRUNCATE_BLOCK
// /* Try to guess if it is profitable to stop the current block here */
// if ((s.last_lit & 0x1fff) === 0 && s.level > 2) {
// /* Compute an upper bound for the compressed length */
// out_length = s.last_lit*8;
// in_length = s.strstart - s.block_start;
//
// for (dcode = 0; dcode < D_CODES; dcode++) {
// out_length += s.dyn_dtree[dcode*2]/*.Freq*/ * (5 + extra_dbits[dcode]);
// }
// out_length >>>= 3;
// //Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
// // s->last_lit, in_length, out_length,
// // 100L - out_length*100L/in_length));
// if (s.matches < (s.last_lit>>1)/*int /2*/ && out_length < (in_length>>1)/*int
/2*/) {
// return true;
// }
// }
//#endif
exports._tr_init = _tr_init;
exports._tr_stored_block = _tr_stored_block;
exports._tr_flush_block = _tr_flush_block;
exports._tr_tally = _tr_tally;
exports._tr_align = _tr_align;
},{"../utils/common":41}],53:[function(require,module,exports){
'use strict';
function ZStream() {
/* next input byte */
this.input = null; // JS specific, because we have no pointers
this.next_in = 0;
/* number of bytes available at input */
this.avail_in = 0;
/* total number of input bytes read so far */
this.total_in = 0;
/* next output byte should be put there */
this.output = null; // JS specific, because we have no pointers
this.next_out = 0;
/* remaining free space at output */
this.avail_out = 0;
/* total number of bytes output so far */
this.total_out = 0;
/* last error message, NULL if no error */
this.msg = ''/*Z_NULL*/;
/* not visible by applications */
this.state = null;
/* best guess about the data type: binary or text */
this.data_type = 2/*Z_UNKNOWN*/;
/* adler32 value of the uncompressed data */
this.adler = 0;
}
module.exports = ZStream;
},{}],54:[function(require,module,exports){
'use strict';
module.exports = typeof setImmediate === 'function' ? setImmediate :
function setImmediate() {
var args = [].slice.apply(arguments);
args.splice(1, 0, 0);
setTimeout.apply(null, args);
};
},{}]},{},[10])(10)
});