1 /*! 2 3 JSZip v3.2.1 - A JavaScript class for generating and reading zip files 4 <http://stuartk.com/jszip> 5 6 (c) 2009-2016 Stuart Knightley <stuart [at] stuartk.com> 7 Dual licenced under the MIT license or GPLv3. See https://raw.github.com/Stuk/jszip/master/LICENSE.markdown. 8 9 JSZip uses the library pako released under the MIT license : 10 https://github.com/nodeca/pako/blob/master/LICENSE 11 */ 12 13 (function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.JSZip = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){ 14 'use strict'; 15 var utils = require('./utils'); 16 var support = require('./support'); 17 // private property 18 var _keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; 19 20 21 // public method for encoding 22 exports.encode = function(input) { 23 var output = []; 24 var chr1, chr2, chr3, enc1, enc2, enc3, enc4; 25 var i = 0, len = input.length, remainingBytes = len; 26 27 var isArray = utils.getTypeOf(input) !== "string"; 28 while (i < input.length) { 29 remainingBytes = len - i; 30 31 if (!isArray) { 32 chr1 = input.charCodeAt(i++); 33 chr2 = i < len ? input.charCodeAt(i++) : 0; 34 chr3 = i < len ? input.charCodeAt(i++) : 0; 35 } else { 36 chr1 = input[i++]; 37 chr2 = i < len ? input[i++] : 0; 38 chr3 = i < len ? input[i++] : 0; 39 } 40 41 enc1 = chr1 >> 2; 42 enc2 = ((chr1 & 3) << 4) | (chr2 >> 4); 43 enc3 = remainingBytes > 1 ? (((chr2 & 15) << 2) | (chr3 >> 6)) : 64; 44 enc4 = remainingBytes > 2 ? (chr3 & 63) : 64; 45 46 output.push(_keyStr.charAt(enc1) + _keyStr.charAt(enc2) + _keyStr.charAt(enc3) + _keyStr.charAt(enc4)); 47 48 } 49 50 return output.join(""); 51 }; 52 53 // public method for decoding 54 exports.decode = function(input) { 55 var chr1, chr2, chr3; 56 var enc1, enc2, enc3, enc4; 57 var i = 0, resultIndex = 0; 58 59 var dataUrlPrefix = "data:"; 60 61 if (input.substr(0, dataUrlPrefix.length) === dataUrlPrefix) { 62 // This is a common error: people give a data url 63 // (data:image/png;base64,iVBOR...) with a {base64: true} and 64 // wonders why things don't work. 65 // We can detect that the string input looks like a data url but we 66 // *can't* be sure it is one: removing everything up to the comma would 67 // be too dangerous. 68 throw new Error("Invalid base64 input, it looks like a data url."); 69 } 70 71 input = input.replace(/[^A-Za-z0-9\+\/\=]/g, ""); 72 73 var totalLength = input.length * 3 / 4; 74 if(input.charAt(input.length - 1) === _keyStr.charAt(64)) { 75 totalLength--; 76 } 77 if(input.charAt(input.length - 2) === _keyStr.charAt(64)) { 78 totalLength--; 79 } 80 if (totalLength % 1 !== 0) { 81 // totalLength is not an integer, the length does not match a valid 82 // base64 content. That can happen if: 83 // - the input is not a base64 content 84 // - the input is *almost* a base64 content, with a extra chars at the 85 // beginning or at the end 86 // - the input uses a base64 variant (base64url for example) 87 throw new Error("Invalid base64 input, bad content length."); 88 } 89 var output; 90 if (support.uint8array) { 91 output = new Uint8Array(totalLength|0); 92 } else { 93 output = new Array(totalLength|0); 94 } 95 96 while (i < input.length) { 97 98 enc1 = _keyStr.indexOf(input.charAt(i++)); 99 enc2 = _keyStr.indexOf(input.charAt(i++)); 100 enc3 = _keyStr.indexOf(input.charAt(i++)); 101 enc4 = _keyStr.indexOf(input.charAt(i++)); 102 103 chr1 = (enc1 << 2) | (enc2 >> 4); 104 chr2 = ((enc2 & 15) << 4) | (enc3 >> 2); 105 chr3 = ((enc3 & 3) << 6) | enc4; 106 107 output[resultIndex++] = chr1; 108 109 if (enc3 !== 64) { 110 output[resultIndex++] = chr2; 111 } 112 if (enc4 !== 64) { 113 output[resultIndex++] = chr3; 114 } 115 116 } 117 118 return output; 119 }; 120 121 },{"./support":30,"./utils":32}],2:[function(require,module,exports){ 122 'use strict'; 123 124 var external = require("./external"); 125 var DataWorker = require('./stream/DataWorker'); 126 var DataLengthProbe = require('./stream/DataLengthProbe'); 127 var Crc32Probe = require('./stream/Crc32Probe'); 128 var DataLengthProbe = require('./stream/DataLengthProbe'); 129 130 /** 131 * Represent a compressed object, with everything needed to decompress it. 132 * @constructor 133 * @param {number} compressedSize the size of the data compressed. 134 * @param {number} uncompressedSize the size of the data after decompression. 135 * @param {number} crc32 the crc32 of the decompressed file. 136 * @param {object} compression the type of compression, see lib/compressions.js. 137 * @param {String|ArrayBuffer|Uint8Array|Buffer} data the compressed data. 138 */ 139 function CompressedObject(compressedSize, uncompressedSize, crc32, compression, data) { 140 this.compressedSize = compressedSize; 141 this.uncompressedSize = uncompressedSize; 142 this.crc32 = crc32; 143 this.compression = compression; 144 this.compressedContent = data; 145 } 146 147 CompressedObject.prototype = { 148 /** 149 * Create a worker to get the uncompressed content. 150 * @return {GenericWorker} the worker. 151 */ 152 getContentWorker : function () { 153 var worker = new DataWorker(external.Promise.resolve(this.compressedContent)) 154 .pipe(this.compression.uncompressWorker()) 155 .pipe(new DataLengthProbe("data_length")); 156 157 var that = this; 158 worker.on("end", function () { 159 if(this.streamInfo['data_length'] !== that.uncompressedSize) { 160 throw new Error("Bug : uncompressed data size mismatch"); 161 } 162 }); 163 return worker; 164 }, 165 /** 166 * Create a worker to get the compressed content. 167 * @return {GenericWorker} the worker. 168 */ 169 getCompressedWorker : function () { 170 return new DataWorker(external.Promise.resolve(this.compressedContent)) 171 .withStreamInfo("compressedSize", this.compressedSize) 172 .withStreamInfo("uncompressedSize", this.uncompressedSize) 173 .withStreamInfo("crc32", this.crc32) 174 .withStreamInfo("compression", this.compression) 175 ; 176 } 177 }; 178 179 /** 180 * Chain the given worker with other workers to compress the content with the 181 * given compresion. 182 * @param {GenericWorker} uncompressedWorker the worker to pipe. 183 * @param {Object} compression the compression object. 184 * @param {Object} compressionOptions the options to use when compressing. 185 * @return {GenericWorker} the new worker compressing the content. 186 */ 187 CompressedObject.createWorkerFrom = function (uncompressedWorker, compression, compressionOptions) { 188 return uncompressedWorker 189 .pipe(new Crc32Probe()) 190 .pipe(new DataLengthProbe("uncompressedSize")) 191 .pipe(compression.compressWorker(compressionOptions)) 192 .pipe(new DataLengthProbe("compressedSize")) 193 .withStreamInfo("compression", compression); 194 }; 195 196 module.exports = CompressedObject; 197 198 },{"./external":6,"./stream/Crc32Probe":25,"./stream/DataLengthProbe":26,"./stream/DataWorker":27}],3:[function(require,module,exports){ 199 'use strict'; 200 201 var GenericWorker = require("./stream/GenericWorker"); 202 203 exports.STORE = { 204 magic: "\x00\x00", 205 compressWorker : function (compressionOptions) { 206 return new GenericWorker("STORE compression"); 207 }, 208 uncompressWorker : function () { 209 return new GenericWorker("STORE decompression"); 210 } 211 }; 212 exports.DEFLATE = require('./flate'); 213 214 },{"./flate":7,"./stream/GenericWorker":28}],4:[function(require,module,exports){ 215 'use strict'; 216 217 var utils = require('./utils'); 218 219 /** 220 * The following functions come from pako, from pako/lib/zlib/crc32.js 221 * released under the MIT license, see pako https://github.com/nodeca/pako/ 222 */ 223 224 // Use ordinary array, since untyped makes no boost here 225 function makeTable() { 226 var c, table = []; 227 228 for(var n =0; n < 256; n++){ 229 c = n; 230 for(var k =0; k < 8; k++){ 231 c = ((c&1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1)); 232 } 233 table[n] = c; 234 } 235 236 return table; 237 } 238 239 // Create table on load. Just 255 signed longs. Not a problem. 240 var crcTable = makeTable(); 241 242 243 function crc32(crc, buf, len, pos) { 244 var t = crcTable, end = pos + len; 245 246 crc = crc ^ (-1); 247 248 for (var i = pos; i < end; i++ ) { 249 crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF]; 250 } 251 252 return (crc ^ (-1)); // >>> 0; 253 } 254 255 // That's all for the pako functions. 256 257 /** 258 * Compute the crc32 of a string. 259 * This is almost the same as the function crc32, but for strings. Using the 260 * same function for the two use cases leads to horrible performances. 261 * @param {Number} crc the starting value of the crc. 262 * @param {String} str the string to use. 263 * @param {Number} len the length of the string. 264 * @param {Number} pos the starting position for the crc32 computation. 265 * @return {Number} the computed crc32. 266 */ 267 function crc32str(crc, str, len, pos) { 268 var t = crcTable, end = pos + len; 269 270 crc = crc ^ (-1); 271 272 for (var i = pos; i < end; i++ ) { 273 crc = (crc >>> 8) ^ t[(crc ^ str.charCodeAt(i)) & 0xFF]; 274 } 275 276 return (crc ^ (-1)); // >>> 0; 277 } 278 279 module.exports = function crc32wrapper(input, crc) { 280 if (typeof input === "undefined" || !input.length) { 281 return 0; 282 } 283 284 var isArray = utils.getTypeOf(input) !== "string"; 285 286 if(isArray) { 287 return crc32(crc|0, input, input.length, 0); 288 } else { 289 return crc32str(crc|0, input, input.length, 0); 290 } 291 }; 292 293 },{"./utils":32}],5:[function(require,module,exports){ 294 'use strict'; 295 exports.base64 = false; 296 exports.binary = false; 297 exports.dir = false; 298 exports.createFolders = true; 299 exports.date = null; 300 exports.compression = null; 301 exports.compressionOptions = null; 302 exports.comment = null; 303 exports.unixPermissions = null; 304 exports.dosPermissions = null; 305 306 },{}],6:[function(require,module,exports){ 307 /* global Promise */ 308 'use strict'; 309 310 // load the global object first: 311 // - it should be better integrated in the system (unhandledRejection in node) 312 // - the environment may have a custom Promise implementation (see zone.js) 313 var ES6Promise = null; 314 if (typeof Promise !== "undefined") { 315 ES6Promise = Promise; 316 } else { 317 ES6Promise = require("lie"); 318 } 319 320 /** 321 * Let the user use/change some implementations. 322 */ 323 module.exports = { 324 Promise: ES6Promise 325 }; 326 327 },{"lie":37}],7:[function(require,module,exports){ 328 'use strict'; 329 var USE_TYPEDARRAY = (typeof Uint8Array !== 'undefined') && (typeof Uint16Array !== 'undefined') && (typeof Uint32Array !== 'undefined'); 330 331 var pako = require("pako"); 332 var utils = require("./utils"); 333 var GenericWorker = require("./stream/GenericWorker"); 334 335 var ARRAY_TYPE = USE_TYPEDARRAY ? "uint8array" : "array"; 336 337 exports.magic = "\x08\x00"; 338 339 /** 340 * Create a worker that uses pako to inflate/deflate. 341 * @constructor 342 * @param {String} action the name of the pako function to call : either "Deflate" or "Inflate". 343 * @param {Object} options the options to use when (de)compressing. 344 */ 345 function FlateWorker(action, options) { 346 GenericWorker.call(this, "FlateWorker/" + action); 347 348 this._pako = null; 349 this._pakoAction = action; 350 this._pakoOptions = options; 351 // the `meta` object from the last chunk received 352 // this allow this worker to pass around metadata 353 this.meta = {}; 354 } 355 356 utils.inherits(FlateWorker, GenericWorker); 357 358 /** 359 * @see GenericWorker.processChunk 360 */ 361 FlateWorker.prototype.processChunk = function (chunk) { 362 this.meta = chunk.meta; 363 if (this._pako === null) { 364 this._createPako(); 365 } 366 this._pako.push(utils.transformTo(ARRAY_TYPE, chunk.data), false); 367 }; 368 369 /** 370 * @see GenericWorker.flush 371 */ 372 FlateWorker.prototype.flush = function () { 373 GenericWorker.prototype.flush.call(this); 374 if (this._pako === null) { 375 this._createPako(); 376 } 377 this._pako.push([], true); 378 }; 379 /** 380 * @see GenericWorker.cleanUp 381 */ 382 FlateWorker.prototype.cleanUp = function () { 383 GenericWorker.prototype.cleanUp.call(this); 384 this._pako = null; 385 }; 386 387 /** 388 * Create the _pako object. 389 * TODO: lazy-loading this object isn't the best solution but it's the 390 * quickest. The best solution is to lazy-load the worker list. See also the 391 * issue #446. 392 */ 393 FlateWorker.prototype._createPako = function () { 394 this._pako = new pako[this._pakoAction]({ 395 raw: true, 396 level: this._pakoOptions.level || -1 // default compression 397 }); 398 var self = this; 399 this._pako.onData = function(data) { 400 self.push({ 401 data : data, 402 meta : self.meta 403 }); 404 }; 405 }; 406 407 exports.compressWorker = function (compressionOptions) { 408 return new FlateWorker("Deflate", compressionOptions); 409 }; 410 exports.uncompressWorker = function () { 411 return new FlateWorker("Inflate", {}); 412 }; 413 414 },{"./stream/GenericWorker":28,"./utils":32,"pako":38}],8:[function(require,module,exports){ 415 'use strict'; 416 417 var utils = require('../utils'); 418 var GenericWorker = require('../stream/GenericWorker'); 419 var utf8 = require('../utf8'); 420 var crc32 = require('../crc32'); 421 var signature = require('../signature'); 422 423 /** 424 * Transform an integer into a string in hexadecimal. 425 * @private 426 * @param {number} dec the number to convert. 427 * @param {number} bytes the number of bytes to generate. 428 * @returns {string} the result. 429 */ 430 var decToHex = function(dec, bytes) { 431 var hex = "", i; 432 for (i = 0; i < bytes; i++) { 433 hex += String.fromCharCode(dec & 0xff); 434 dec = dec >>> 8; 435 } 436 return hex; 437 }; 438 439 /** 440 * Generate the UNIX part of the external file attributes. 441 * @param {Object} unixPermissions the unix permissions or null. 442 * @param {Boolean} isDir true if the entry is a directory, false otherwise. 443 * @return {Number} a 32 bit integer. 444 * 445 * adapted from http://unix.stackexchange.com/questions/14705/the-zip-formats-external-file-attribute : 446 * 447 * TTTTsstrwxrwxrwx0000000000ADVSHR 448 * ^^^^____________________________ file type, see zipinfo.c (UNX_*) 449 * ^^^_________________________ setuid, setgid, sticky 450 * ^^^^^^^^^________________ permissions 451 * ^^^^^^^^^^______ not used ? 452 * ^^^^^^ DOS attribute bits : Archive, Directory, Volume label, System file, Hidden, Read only 453 */ 454 var generateUnixExternalFileAttr = function (unixPermissions, isDir) { 455 456 var result = unixPermissions; 457 if (!unixPermissions) { 458 // I can't use octal values in strict mode, hence the hexa. 459 // 040775 => 0x41fd 460 // 0100664 => 0x81b4 461 result = isDir ? 0x41fd : 0x81b4; 462 } 463 return (result & 0xFFFF) << 16; 464 }; 465 466 /** 467 * Generate the DOS part of the external file attributes. 468 * @param {Object} dosPermissions the dos permissions or null. 469 * @param {Boolean} isDir true if the entry is a directory, false otherwise. 470 * @return {Number} a 32 bit integer. 471 * 472 * Bit 0 Read-Only 473 * Bit 1 Hidden 474 * Bit 2 System 475 * Bit 3 Volume Label 476 * Bit 4 Directory 477 * Bit 5 Archive 478 */ 479 var generateDosExternalFileAttr = function (dosPermissions, isDir) { 480 481 // the dir flag is already set for compatibility 482 return (dosPermissions || 0) & 0x3F; 483 }; 484 485 /** 486 * Generate the various parts used in the construction of the final zip file. 487 * @param {Object} streamInfo the hash with informations about the compressed file. 488 * @param {Boolean} streamedContent is the content streamed ? 489 * @param {Boolean} streamingEnded is the stream finished ? 490 * @param {number} offset the current offset from the start of the zip file. 491 * @param {String} platform let's pretend we are this platform (change platform dependents fields) 492 * @param {Function} encodeFileName the function to encode the file name / comment. 493 * @return {Object} the zip parts. 494 */ 495 var generateZipParts = function(streamInfo, streamedContent, streamingEnded, offset, platform, encodeFileName) { 496 var file = streamInfo['file'], 497 compression = streamInfo['compression'], 498 useCustomEncoding = encodeFileName !== utf8.utf8encode, 499 encodedFileName = utils.transformTo("string", encodeFileName(file.name)), 500 utfEncodedFileName = utils.transformTo("string", utf8.utf8encode(file.name)), 501 comment = file.comment, 502 encodedComment = utils.transformTo("string", encodeFileName(comment)), 503 utfEncodedComment = utils.transformTo("string", utf8.utf8encode(comment)), 504 useUTF8ForFileName = utfEncodedFileName.length !== file.name.length, 505 useUTF8ForComment = utfEncodedComment.length !== comment.length, 506 dosTime, 507 dosDate, 508 extraFields = "", 509 unicodePathExtraField = "", 510 unicodeCommentExtraField = "", 511 dir = file.dir, 512 date = file.date; 513 514 515 var dataInfo = { 516 crc32 : 0, 517 compressedSize : 0, 518 uncompressedSize : 0 519 }; 520 521 // if the content is streamed, the sizes/crc32 are only available AFTER 522 // the end of the stream. 523 if (!streamedContent || streamingEnded) { 524 dataInfo.crc32 = streamInfo['crc32']; 525 dataInfo.compressedSize = streamInfo['compressedSize']; 526 dataInfo.uncompressedSize = streamInfo['uncompressedSize']; 527 } 528 529 var bitflag = 0; 530 if (streamedContent) { 531 // Bit 3: the sizes/crc32 are set to zero in the local header. 532 // The correct values are put in the data descriptor immediately 533 // following the compressed data. 534 bitflag |= 0x0008; 535 } 536 if (!useCustomEncoding && (useUTF8ForFileName || useUTF8ForComment)) { 537 // Bit 11: Language encoding flag (EFS). 538 bitflag |= 0x0800; 539 } 540 541 542 var extFileAttr = 0; 543 var versionMadeBy = 0; 544 if (dir) { 545 // dos or unix, we set the dos dir flag 546 extFileAttr |= 0x00010; 547 } 548 if(platform === "UNIX") { 549 versionMadeBy = 0x031E; // UNIX, version 3.0 550 extFileAttr |= generateUnixExternalFileAttr(file.unixPermissions, dir); 551 } else { // DOS or other, fallback to DOS 552 versionMadeBy = 0x0014; // DOS, version 2.0 553 extFileAttr |= generateDosExternalFileAttr(file.dosPermissions, dir); 554 } 555 556 // date 557 // @see http://www.delorie.com/djgpp/doc/rbinter/it/52/13.html 558 // @see http://www.delorie.com/djgpp/doc/rbinter/it/65/16.html 559 // @see http://www.delorie.com/djgpp/doc/rbinter/it/66/16.html 560 561 dosTime = date.getUTCHours(); 562 dosTime = dosTime << 6; 563 dosTime = dosTime | date.getUTCMinutes(); 564 dosTime = dosTime << 5; 565 dosTime = dosTime | date.getUTCSeconds() / 2; 566 567 dosDate = date.getUTCFullYear() - 1980; 568 dosDate = dosDate << 4; 569 dosDate = dosDate | (date.getUTCMonth() + 1); 570 dosDate = dosDate << 5; 571 dosDate = dosDate | date.getUTCDate(); 572 573 if (useUTF8ForFileName) { 574 // set the unicode path extra field. unzip needs at least one extra 575 // field to correctly handle unicode path, so using the path is as good 576 // as any other information. This could improve the situation with 577 // other archive managers too. 578 // This field is usually used without the utf8 flag, with a non 579 // unicode path in the header (winrar, winzip). This helps (a bit) 580 // with the messy Windows' default compressed folders feature but 581 // breaks on p7zip which doesn't seek the unicode path extra field. 582 // So for now, UTF-8 everywhere ! 583 unicodePathExtraField = 584 // Version 585 decToHex(1, 1) + 586 // NameCRC32 587 decToHex(crc32(encodedFileName), 4) + 588 // UnicodeName 589 utfEncodedFileName; 590 591 extraFields += 592 // Info-ZIP Unicode Path Extra Field 593 "\x75\x70" + 594 // size 595 decToHex(unicodePathExtraField.length, 2) + 596 // content 597 unicodePathExtraField; 598 } 599 600 if(useUTF8ForComment) { 601 602 unicodeCommentExtraField = 603 // Version 604 decToHex(1, 1) + 605 // CommentCRC32 606 decToHex(crc32(encodedComment), 4) + 607 // UnicodeName 608 utfEncodedComment; 609 610 extraFields += 611 // Info-ZIP Unicode Path Extra Field 612 "\x75\x63" + 613 // size 614 decToHex(unicodeCommentExtraField.length, 2) + 615 // content 616 unicodeCommentExtraField; 617 } 618 619 var header = ""; 620 621 // version needed to extract 622 header += "\x0A\x00"; 623 // general purpose bit flag 624 header += decToHex(bitflag, 2); 625 // compression method 626 header += compression.magic; 627 // last mod file time 628 header += decToHex(dosTime, 2); 629 // last mod file date 630 header += decToHex(dosDate, 2); 631 // crc-32 632 header += decToHex(dataInfo.crc32, 4); 633 // compressed size 634 header += decToHex(dataInfo.compressedSize, 4); 635 // uncompressed size 636 header += decToHex(dataInfo.uncompressedSize, 4); 637 // file name length 638 header += decToHex(encodedFileName.length, 2); 639 // extra field length 640 header += decToHex(extraFields.length, 2); 641 642 643 var fileRecord = signature.LOCAL_FILE_HEADER + header + encodedFileName + extraFields; 644 645 var dirRecord = signature.CENTRAL_FILE_HEADER + 646 // version made by (00: DOS) 647 decToHex(versionMadeBy, 2) + 648 // file header (common to file and central directory) 649 header + 650 // file comment length 651 decToHex(encodedComment.length, 2) + 652 // disk number start 653 "\x00\x00" + 654 // internal file attributes TODO 655 "\x00\x00" + 656 // external file attributes 657 decToHex(extFileAttr, 4) + 658 // relative offset of local header 659 decToHex(offset, 4) + 660 // file name 661 encodedFileName + 662 // extra field 663 extraFields + 664 // file comment 665 encodedComment; 666 667 return { 668 fileRecord: fileRecord, 669 dirRecord: dirRecord 670 }; 671 }; 672 673 /** 674 * Generate the EOCD record. 675 * @param {Number} entriesCount the number of entries in the zip file. 676 * @param {Number} centralDirLength the length (in bytes) of the central dir. 677 * @param {Number} localDirLength the length (in bytes) of the local dir. 678 * @param {String} comment the zip file comment as a binary string. 679 * @param {Function} encodeFileName the function to encode the comment. 680 * @return {String} the EOCD record. 681 */ 682 var generateCentralDirectoryEnd = function (entriesCount, centralDirLength, localDirLength, comment, encodeFileName) { 683 var dirEnd = ""; 684 var encodedComment = utils.transformTo("string", encodeFileName(comment)); 685 686 // end of central dir signature 687 dirEnd = signature.CENTRAL_DIRECTORY_END + 688 // number of this disk 689 "\x00\x00" + 690 // number of the disk with the start of the central directory 691 "\x00\x00" + 692 // total number of entries in the central directory on this disk 693 decToHex(entriesCount, 2) + 694 // total number of entries in the central directory 695 decToHex(entriesCount, 2) + 696 // size of the central directory 4 bytes 697 decToHex(centralDirLength, 4) + 698 // offset of start of central directory with respect to the starting disk number 699 decToHex(localDirLength, 4) + 700 // .ZIP file comment length 701 decToHex(encodedComment.length, 2) + 702 // .ZIP file comment 703 encodedComment; 704 705 return dirEnd; 706 }; 707 708 /** 709 * Generate data descriptors for a file entry. 710 * @param {Object} streamInfo the hash generated by a worker, containing informations 711 * on the file entry. 712 * @return {String} the data descriptors. 713 */ 714 var generateDataDescriptors = function (streamInfo) { 715 var descriptor = ""; 716 descriptor = signature.DATA_DESCRIPTOR + 717 // crc-32 4 bytes 718 decToHex(streamInfo['crc32'], 4) + 719 // compressed size 4 bytes 720 decToHex(streamInfo['compressedSize'], 4) + 721 // uncompressed size 4 bytes 722 decToHex(streamInfo['uncompressedSize'], 4); 723 724 return descriptor; 725 }; 726 727 728 /** 729 * A worker to concatenate other workers to create a zip file. 730 * @param {Boolean} streamFiles `true` to stream the content of the files, 731 * `false` to accumulate it. 732 * @param {String} comment the comment to use. 733 * @param {String} platform the platform to use, "UNIX" or "DOS". 734 * @param {Function} encodeFileName the function to encode file names and comments. 735 */ 736 function ZipFileWorker(streamFiles, comment, platform, encodeFileName) { 737 GenericWorker.call(this, "ZipFileWorker"); 738 // The number of bytes written so far. This doesn't count accumulated chunks. 739 this.bytesWritten = 0; 740 // The comment of the zip file 741 this.zipComment = comment; 742 // The platform "generating" the zip file. 743 this.zipPlatform = platform; 744 // the function to encode file names and comments. 745 this.encodeFileName = encodeFileName; 746 // Should we stream the content of the files ? 747 this.streamFiles = streamFiles; 748 // If `streamFiles` is false, we will need to accumulate the content of the 749 // files to calculate sizes / crc32 (and write them *before* the content). 750 // This boolean indicates if we are accumulating chunks (it will change a lot 751 // during the lifetime of this worker). 752 this.accumulate = false; 753 // The buffer receiving chunks when accumulating content. 754 this.contentBuffer = []; 755 // The list of generated directory records. 756 this.dirRecords = []; 757 // The offset (in bytes) from the beginning of the zip file for the current source. 758 this.currentSourceOffset = 0; 759 // The total number of entries in this zip file. 760 this.entriesCount = 0; 761 // the name of the file currently being added, null when handling the end of the zip file. 762 // Used for the emited metadata. 763 this.currentFile = null; 764 765 766 767 this._sources = []; 768 } 769 utils.inherits(ZipFileWorker, GenericWorker); 770 771 /** 772 * @see GenericWorker.push 773 */ 774 ZipFileWorker.prototype.push = function (chunk) { 775 776 var currentFilePercent = chunk.meta.percent || 0; 777 var entriesCount = this.entriesCount; 778 var remainingFiles = this._sources.length; 779 780 if(this.accumulate) { 781 this.contentBuffer.push(chunk); 782 } else { 783 this.bytesWritten += chunk.data.length; 784 785 GenericWorker.prototype.push.call(this, { 786 data : chunk.data, 787 meta : { 788 currentFile : this.currentFile, 789 percent : entriesCount ? (currentFilePercent + 100 * (entriesCount - remainingFiles - 1)) / entriesCount : 100 790 } 791 }); 792 } 793 }; 794 795 /** 796 * The worker started a new source (an other worker). 797 * @param {Object} streamInfo the streamInfo object from the new source. 798 */ 799 ZipFileWorker.prototype.openedSource = function (streamInfo) { 800 this.currentSourceOffset = this.bytesWritten; 801 this.currentFile = streamInfo['file'].name; 802 803 var streamedContent = this.streamFiles && !streamInfo['file'].dir; 804 805 // don't stream folders (because they don't have any content) 806 if(streamedContent) { 807 var record = generateZipParts(streamInfo, streamedContent, false, this.currentSourceOffset, this.zipPlatform, this.encodeFileName); 808 this.push({ 809 data : record.fileRecord, 810 meta : {percent:0} 811 }); 812 } else { 813 // we need to wait for the whole file before pushing anything 814 this.accumulate = true; 815 } 816 }; 817 818 /** 819 * The worker finished a source (an other worker). 820 * @param {Object} streamInfo the streamInfo object from the finished source. 821 */ 822 ZipFileWorker.prototype.closedSource = function (streamInfo) { 823 this.accumulate = false; 824 var streamedContent = this.streamFiles && !streamInfo['file'].dir; 825 var record = generateZipParts(streamInfo, streamedContent, true, this.currentSourceOffset, this.zipPlatform, this.encodeFileName); 826 827 this.dirRecords.push(record.dirRecord); 828 if(streamedContent) { 829 // after the streamed file, we put data descriptors 830 this.push({ 831 data : generateDataDescriptors(streamInfo), 832 meta : {percent:100} 833 }); 834 } else { 835 // the content wasn't streamed, we need to push everything now 836 // first the file record, then the content 837 this.push({ 838 data : record.fileRecord, 839 meta : {percent:0} 840 }); 841 while(this.contentBuffer.length) { 842 this.push(this.contentBuffer.shift()); 843 } 844 } 845 this.currentFile = null; 846 }; 847 848 /** 849 * @see GenericWorker.flush 850 */ 851 ZipFileWorker.prototype.flush = function () { 852 853 var localDirLength = this.bytesWritten; 854 for(var i = 0; i < this.dirRecords.length; i++) { 855 this.push({ 856 data : this.dirRecords[i], 857 meta : {percent:100} 858 }); 859 } 860 var centralDirLength = this.bytesWritten - localDirLength; 861 862 var dirEnd = generateCentralDirectoryEnd(this.dirRecords.length, centralDirLength, localDirLength, this.zipComment, this.encodeFileName); 863 864 this.push({ 865 data : dirEnd, 866 meta : {percent:100} 867 }); 868 }; 869 870 /** 871 * Prepare the next source to be read. 872 */ 873 ZipFileWorker.prototype.prepareNextSource = function () { 874 this.previous = this._sources.shift(); 875 this.openedSource(this.previous.streamInfo); 876 if (this.isPaused) { 877 this.previous.pause(); 878 } else { 879 this.previous.resume(); 880 } 881 }; 882 883 /** 884 * @see GenericWorker.registerPrevious 885 */ 886 ZipFileWorker.prototype.registerPrevious = function (previous) { 887 this._sources.push(previous); 888 var self = this; 889 890 previous.on('data', function (chunk) { 891 self.processChunk(chunk); 892 }); 893 previous.on('end', function () { 894 self.closedSource(self.previous.streamInfo); 895 if(self._sources.length) { 896 self.prepareNextSource(); 897 } else { 898 self.end(); 899 } 900 }); 901 previous.on('error', function (e) { 902 self.error(e); 903 }); 904 return this; 905 }; 906 907 /** 908 * @see GenericWorker.resume 909 */ 910 ZipFileWorker.prototype.resume = function () { 911 if(!GenericWorker.prototype.resume.call(this)) { 912 return false; 913 } 914 915 if (!this.previous && this._sources.length) { 916 this.prepareNextSource(); 917 return true; 918 } 919 if (!this.previous && !this._sources.length && !this.generatedError) { 920 this.end(); 921 return true; 922 } 923 }; 924 925 /** 926 * @see GenericWorker.error 927 */ 928 ZipFileWorker.prototype.error = function (e) { 929 var sources = this._sources; 930 if(!GenericWorker.prototype.error.call(this, e)) { 931 return false; 932 } 933 for(var i = 0; i < sources.length; i++) { 934 try { 935 sources[i].error(e); 936 } catch(e) { 937 // the `error` exploded, nothing to do 938 } 939 } 940 return true; 941 }; 942 943 /** 944 * @see GenericWorker.lock 945 */ 946 ZipFileWorker.prototype.lock = function () { 947 GenericWorker.prototype.lock.call(this); 948 var sources = this._sources; 949 for(var i = 0; i < sources.length; i++) { 950 sources[i].lock(); 951 } 952 }; 953 954 module.exports = ZipFileWorker; 955 956 },{"../crc32":4,"../signature":23,"../stream/GenericWorker":28,"../utf8":31,"../utils":32}],9:[function(require,module,exports){ 957 'use strict'; 958 959 var compressions = require('../compressions'); 960 var ZipFileWorker = require('./ZipFileWorker'); 961 962 /** 963 * Find the compression to use. 964 * @param {String} fileCompression the compression defined at the file level, if any. 965 * @param {String} zipCompression the compression defined at the load() level. 966 * @return {Object} the compression object to use. 967 */ 968 var getCompression = function (fileCompression, zipCompression) { 969 970 var compressionName = fileCompression || zipCompression; 971 var compression = compressions[compressionName]; 972 if (!compression) { 973 throw new Error(compressionName + " is not a valid compression method !"); 974 } 975 return compression; 976 }; 977 978 /** 979 * Create a worker to generate a zip file. 980 * @param {JSZip} zip the JSZip instance at the right root level. 981 * @param {Object} options to generate the zip file. 982 * @param {String} comment the comment to use. 983 */ 984 exports.generateWorker = function (zip, options, comment) { 985 986 var zipFileWorker = new ZipFileWorker(options.streamFiles, comment, options.platform, options.encodeFileName); 987 var entriesCount = 0; 988 try { 989 990 zip.forEach(function (relativePath, file) { 991 entriesCount++; 992 var compression = getCompression(file.options.compression, options.compression); 993 var compressionOptions = file.options.compressionOptions || options.compressionOptions || {}; 994 var dir = file.dir, date = file.date; 995 996 file._compressWorker(compression, compressionOptions) 997 .withStreamInfo("file", { 998 name : relativePath, 999 dir : dir, 1000 date : date, 1001 comment : file.comment || "", 1002 unixPermissions : file.unixPermissions, 1003 dosPermissions : file.dosPermissions 1004 }) 1005 .pipe(zipFileWorker); 1006 }); 1007 zipFileWorker.entriesCount = entriesCount; 1008 } catch (e) { 1009 zipFileWorker.error(e); 1010 } 1011 1012 return zipFileWorker; 1013 }; 1014 1015 },{"../compressions":3,"./ZipFileWorker":8}],10:[function(require,module,exports){ 1016 'use strict'; 1017 1018 /** 1019 * Representation a of zip file in js 1020 * @constructor 1021 */ 1022 function JSZip() { 1023 // if this constructor is used without `new`, it adds `new` before itself: 1024 if(!(this instanceof JSZip)) { 1025 return new JSZip(); 1026 } 1027 1028 if(arguments.length) { 1029 throw new Error("The constructor with parameters has been removed in JSZip 3.0, please check the upgrade guide."); 1030 } 1031 1032 // object containing the files : 1033 // { 1034 // "folder/" : {...}, 1035 // "folder/data.txt" : {...} 1036 // } 1037 this.files = {}; 1038 1039 this.comment = null; 1040 1041 // Where we are in the hierarchy 1042 this.root = ""; 1043 this.clone = function() { 1044 var newObj = new JSZip(); 1045 for (var i in this) { 1046 if (typeof this[i] !== "function") { 1047 newObj[i] = this[i]; 1048 } 1049 } 1050 return newObj; 1051 }; 1052 } 1053 JSZip.prototype = require('./object'); 1054 JSZip.prototype.loadAsync = require('./load'); 1055 JSZip.support = require('./support'); 1056 JSZip.defaults = require('./defaults'); 1057 1058 // TODO find a better way to handle this version, 1059 // a require('package.json').version doesn't work with webpack, see #327 1060 JSZip.version = "3.2.0"; 1061 1062 JSZip.loadAsync = function (content, options) { 1063 return new JSZip().loadAsync(content, options); 1064 }; 1065 1066 JSZip.external = require("./external"); 1067 module.exports = JSZip; 1068 1069 },{"./defaults":5,"./external":6,"./load":11,"./object":15,"./support":30}],11:[function(require,module,exports){ 1070 'use strict'; 1071 var utils = require('./utils'); 1072 var external = require("./external"); 1073 var utf8 = require('./utf8'); 1074 var utils = require('./utils'); 1075 var ZipEntries = require('./zipEntries'); 1076 var Crc32Probe = require('./stream/Crc32Probe'); 1077 var nodejsUtils = require("./nodejsUtils"); 1078 1079 /** 1080 * Check the CRC32 of an entry. 1081 * @param {ZipEntry} zipEntry the zip entry to check. 1082 * @return {Promise} the result. 1083 */ 1084 function checkEntryCRC32(zipEntry) { 1085 return new external.Promise(function (resolve, reject) { 1086 var worker = zipEntry.decompressed.getContentWorker().pipe(new Crc32Probe()); 1087 worker.on("error", function (e) { 1088 reject(e); 1089 }) 1090 .on("end", function () { 1091 if (worker.streamInfo.crc32 !== zipEntry.decompressed.crc32) { 1092 reject(new Error("Corrupted zip : CRC32 mismatch")); 1093 } else { 1094 resolve(); 1095 } 1096 }) 1097 .resume(); 1098 }); 1099 } 1100 1101 module.exports = function(data, options) { 1102 var zip = this; 1103 options = utils.extend(options || {}, { 1104 base64: false, 1105 checkCRC32: false, 1106 optimizedBinaryString: false, 1107 createFolders: false, 1108 decodeFileName: utf8.utf8decode 1109 }); 1110 1111 if (nodejsUtils.isNode && nodejsUtils.isStream(data)) { 1112 return external.Promise.reject(new Error("JSZip can't accept a stream when loading a zip file.")); 1113 } 1114 1115 return utils.prepareContent("the loaded zip file", data, true, options.optimizedBinaryString, options.base64) 1116 .then(function(data) { 1117 var zipEntries = new ZipEntries(options); 1118 zipEntries.load(data); 1119 return zipEntries; 1120 }).then(function checkCRC32(zipEntries) { 1121 var promises = [external.Promise.resolve(zipEntries)]; 1122 var files = zipEntries.files; 1123 if (options.checkCRC32) { 1124 for (var i = 0; i < files.length; i++) { 1125 promises.push(checkEntryCRC32(files[i])); 1126 } 1127 } 1128 return external.Promise.all(promises); 1129 }).then(function addFiles(results) { 1130 var zipEntries = results.shift(); 1131 var files = zipEntries.files; 1132 for (var i = 0; i < files.length; i++) { 1133 var input = files[i]; 1134 zip.file(input.fileNameStr, input.decompressed, { 1135 binary: true, 1136 optimizedBinaryString: true, 1137 date: input.date, 1138 dir: input.dir, 1139 comment : input.fileCommentStr.length ? input.fileCommentStr : null, 1140 unixPermissions : input.unixPermissions, 1141 dosPermissions : input.dosPermissions, 1142 createFolders: options.createFolders 1143 }); 1144 } 1145 if (zipEntries.zipComment.length) { 1146 zip.comment = zipEntries.zipComment; 1147 } 1148 1149 return zip; 1150 }); 1151 }; 1152 1153 },{"./external":6,"./nodejsUtils":14,"./stream/Crc32Probe":25,"./utf8":31,"./utils":32,"./zipEntries":33}],12:[function(require,module,exports){ 1154 "use strict"; 1155 1156 var utils = require('../utils'); 1157 var GenericWorker = require('../stream/GenericWorker'); 1158 1159 /** 1160 * A worker that use a nodejs stream as source. 1161 * @constructor 1162 * @param {String} filename the name of the file entry for this stream. 1163 * @param {Readable} stream the nodejs stream. 1164 */ 1165 function NodejsStreamInputAdapter(filename, stream) { 1166 GenericWorker.call(this, "Nodejs stream input adapter for " + filename); 1167 this._upstreamEnded = false; 1168 this._bindStream(stream); 1169 } 1170 1171 utils.inherits(NodejsStreamInputAdapter, GenericWorker); 1172 1173 /** 1174 * Prepare the stream and bind the callbacks on it. 1175 * Do this ASAP on node 0.10 ! A lazy binding doesn't always work. 1176 * @param {Stream} stream the nodejs stream to use. 1177 */ 1178 NodejsStreamInputAdapter.prototype._bindStream = function (stream) { 1179 var self = this; 1180 this._stream = stream; 1181 stream.pause(); 1182 stream 1183 .on("data", function (chunk) { 1184 self.push({ 1185 data: chunk, 1186 meta : { 1187 percent : 0 1188 } 1189 }); 1190 }) 1191 .on("error", function (e) { 1192 if(self.isPaused) { 1193 this.generatedError = e; 1194 } else { 1195 self.error(e); 1196 } 1197 }) 1198 .on("end", function () { 1199 if(self.isPaused) { 1200 self._upstreamEnded = true; 1201 } else { 1202 self.end(); 1203 } 1204 }); 1205 }; 1206 NodejsStreamInputAdapter.prototype.pause = function () { 1207 if(!GenericWorker.prototype.pause.call(this)) { 1208 return false; 1209 } 1210 this._stream.pause(); 1211 return true; 1212 }; 1213 NodejsStreamInputAdapter.prototype.resume = function () { 1214 if(!GenericWorker.prototype.resume.call(this)) { 1215 return false; 1216 } 1217 1218 if(this._upstreamEnded) { 1219 this.end(); 1220 } else { 1221 this._stream.resume(); 1222 } 1223 1224 return true; 1225 }; 1226 1227 module.exports = NodejsStreamInputAdapter; 1228 1229 },{"../stream/GenericWorker":28,"../utils":32}],13:[function(require,module,exports){ 1230 'use strict'; 1231 1232 var Readable = require('readable-stream').Readable; 1233 1234 var utils = require('../utils'); 1235 utils.inherits(NodejsStreamOutputAdapter, Readable); 1236 1237 /** 1238 * A nodejs stream using a worker as source. 1239 * @see the SourceWrapper in http://nodejs.org/api/stream.html 1240 * @constructor 1241 * @param {StreamHelper} helper the helper wrapping the worker 1242 * @param {Object} options the nodejs stream options 1243 * @param {Function} updateCb the update callback. 1244 */ 1245 function NodejsStreamOutputAdapter(helper, options, updateCb) { 1246 Readable.call(this, options); 1247 this._helper = helper; 1248 1249 var self = this; 1250 helper.on("data", function (data, meta) { 1251 if (!self.push(data)) { 1252 self._helper.pause(); 1253 } 1254 if(updateCb) { 1255 updateCb(meta); 1256 } 1257 }) 1258 .on("error", function(e) { 1259 self.emit('error', e); 1260 }) 1261 .on("end", function () { 1262 self.push(null); 1263 }); 1264 } 1265 1266 1267 NodejsStreamOutputAdapter.prototype._read = function() { 1268 this._helper.resume(); 1269 }; 1270 1271 module.exports = NodejsStreamOutputAdapter; 1272 1273 },{"../utils":32,"readable-stream":16}],14:[function(require,module,exports){ 1274 'use strict'; 1275 1276 module.exports = { 1277 /** 1278 * True if this is running in Nodejs, will be undefined in a browser. 1279 * In a browser, browserify won't include this file and the whole module 1280 * will be resolved an empty object. 1281 */ 1282 isNode : typeof Buffer !== "undefined", 1283 /** 1284 * Create a new nodejs Buffer from an existing content. 1285 * @param {Object} data the data to pass to the constructor. 1286 * @param {String} encoding the encoding to use. 1287 * @return {Buffer} a new Buffer. 1288 */ 1289 newBufferFrom: function(data, encoding) { 1290 if (Buffer.from && Buffer.from !== Uint8Array.from) { 1291 return Buffer.from(data, encoding); 1292 } else { 1293 if (typeof data === "number") { 1294 // Safeguard for old Node.js versions. On newer versions, 1295 // Buffer.from(number) / Buffer(number, encoding) already throw. 1296 throw new Error("The \"data\" argument must not be a number"); 1297 } 1298 return new Buffer(data, encoding); 1299 } 1300 }, 1301 /** 1302 * Create a new nodejs Buffer with the specified size. 1303 * @param {Integer} size the size of the buffer. 1304 * @return {Buffer} a new Buffer. 1305 */ 1306 allocBuffer: function (size) { 1307 if (Buffer.alloc) { 1308 return Buffer.alloc(size); 1309 } else { 1310 var buf = new Buffer(size); 1311 buf.fill(0); 1312 return buf; 1313 } 1314 }, 1315 /** 1316 * Find out if an object is a Buffer. 1317 * @param {Object} b the object to test. 1318 * @return {Boolean} true if the object is a Buffer, false otherwise. 1319 */ 1320 isBuffer : function(b){ 1321 return Buffer.isBuffer(b); 1322 }, 1323 1324 isStream : function (obj) { 1325 return obj && 1326 typeof obj.on === "function" && 1327 typeof obj.pause === "function" && 1328 typeof obj.resume === "function"; 1329 } 1330 }; 1331 1332 },{}],15:[function(require,module,exports){ 1333 'use strict'; 1334 var utf8 = require('./utf8'); 1335 var utils = require('./utils'); 1336 var GenericWorker = require('./stream/GenericWorker'); 1337 var StreamHelper = require('./stream/StreamHelper'); 1338 var defaults = require('./defaults'); 1339 var CompressedObject = require('./compressedObject'); 1340 var ZipObject = require('./zipObject'); 1341 var generate = require("./generate"); 1342 var nodejsUtils = require("./nodejsUtils"); 1343 var NodejsStreamInputAdapter = require("./nodejs/NodejsStreamInputAdapter"); 1344 1345 1346 /** 1347 * Add a file in the current folder. 1348 * @private 1349 * @param {string} name the name of the file 1350 * @param {String|ArrayBuffer|Uint8Array|Buffer} data the data of the file 1351 * @param {Object} originalOptions the options of the file 1352 * @return {Object} the new file. 1353 */ 1354 var fileAdd = function(name, data, originalOptions) { 1355 // be sure sub folders exist 1356 var dataType = utils.getTypeOf(data), 1357 parent; 1358 1359 1360 /* 1361 * Correct options. 1362 */ 1363 1364 var o = utils.extend(originalOptions || {}, defaults); 1365 o.date = o.date || new Date(); 1366 if (o.compression !== null) { 1367 o.compression = o.compression.toUpperCase(); 1368 } 1369 1370 if (typeof o.unixPermissions === "string") { 1371 o.unixPermissions = parseInt(o.unixPermissions, 8); 1372 } 1373 1374 // UNX_IFDIR 0040000 see zipinfo.c 1375 if (o.unixPermissions && (o.unixPermissions & 0x4000)) { 1376 o.dir = true; 1377 } 1378 // Bit 4 Directory 1379 if (o.dosPermissions && (o.dosPermissions & 0x0010)) { 1380 o.dir = true; 1381 } 1382 1383 if (o.dir) { 1384 name = forceTrailingSlash(name); 1385 } 1386 if (o.createFolders && (parent = parentFolder(name))) { 1387 folderAdd.call(this, parent, true); 1388 } 1389 1390 var isUnicodeString = dataType === "string" && o.binary === false && o.base64 === false; 1391 if (!originalOptions || typeof originalOptions.binary === "undefined") { 1392 o.binary = !isUnicodeString; 1393 } 1394 1395 1396 var isCompressedEmpty = (data instanceof CompressedObject) && data.uncompressedSize === 0; 1397 1398 if (isCompressedEmpty || o.dir || !data || data.length === 0) { 1399 o.base64 = false; 1400 o.binary = true; 1401 data = ""; 1402 o.compression = "STORE"; 1403 dataType = "string"; 1404 } 1405 1406 /* 1407 * Convert content to fit. 1408 */ 1409 1410 var zipObjectContent = null; 1411 if (data instanceof CompressedObject || data instanceof GenericWorker) { 1412 zipObjectContent = data; 1413 } else if (nodejsUtils.isNode && nodejsUtils.isStream(data)) { 1414 zipObjectContent = new NodejsStreamInputAdapter(name, data); 1415 } else { 1416 zipObjectContent = utils.prepareContent(name, data, o.binary, o.optimizedBinaryString, o.base64); 1417 } 1418 1419 var object = new ZipObject(name, zipObjectContent, o); 1420 this.files[name] = object; 1421 /* 1422 TODO: we can't throw an exception because we have async promises 1423 (we can have a promise of a Date() for example) but returning a 1424 promise is useless because file(name, data) returns the JSZip 1425 object for chaining. Should we break that to allow the user 1426 to catch the error ? 1427 1428 return external.Promise.resolve(zipObjectContent) 1429 .then(function () { 1430 return object; 1431 }); 1432 */ 1433 }; 1434 1435 /** 1436 * Find the parent folder of the path. 1437 * @private 1438 * @param {string} path the path to use 1439 * @return {string} the parent folder, or "" 1440 */ 1441 var parentFolder = function (path) { 1442 if (path.slice(-1) === '/') { 1443 path = path.substring(0, path.length - 1); 1444 } 1445 var lastSlash = path.lastIndexOf('/'); 1446 return (lastSlash > 0) ? path.substring(0, lastSlash) : ""; 1447 }; 1448 1449 /** 1450 * Returns the path with a slash at the end. 1451 * @private 1452 * @param {String} path the path to check. 1453 * @return {String} the path with a trailing slash. 1454 */ 1455 var forceTrailingSlash = function(path) { 1456 // Check the name ends with a / 1457 if (path.slice(-1) !== "/") { 1458 path += "/"; // IE doesn't like substr(-1) 1459 } 1460 return path; 1461 }; 1462 1463 /** 1464 * Add a (sub) folder in the current folder. 1465 * @private 1466 * @param {string} name the folder's name 1467 * @param {boolean=} [createFolders] If true, automatically create sub 1468 * folders. Defaults to false. 1469 * @return {Object} the new folder. 1470 */ 1471 var folderAdd = function(name, createFolders) { 1472 createFolders = (typeof createFolders !== 'undefined') ? createFolders : defaults.createFolders; 1473 1474 name = forceTrailingSlash(name); 1475 1476 // Does this folder already exist? 1477 if (!this.files[name]) { 1478 fileAdd.call(this, name, null, { 1479 dir: true, 1480 createFolders: createFolders 1481 }); 1482 } 1483 return this.files[name]; 1484 }; 1485 1486 /** 1487 * Cross-window, cross-Node-context regular expression detection 1488 * @param {Object} object Anything 1489 * @return {Boolean} true if the object is a regular expression, 1490 * false otherwise 1491 */ 1492 function isRegExp(object) { 1493 return Object.prototype.toString.call(object) === "[object RegExp]"; 1494 } 1495 1496 // return the actual prototype of JSZip 1497 var out = { 1498 /** 1499 * @see loadAsync 1500 */ 1501 load: function() { 1502 throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide."); 1503 }, 1504 1505 1506 /** 1507 * Call a callback function for each entry at this folder level. 1508 * @param {Function} cb the callback function: 1509 * function (relativePath, file) {...} 1510 * It takes 2 arguments : the relative path and the file. 1511 */ 1512 forEach: function(cb) { 1513 var filename, relativePath, file; 1514 for (filename in this.files) { 1515 if (!this.files.hasOwnProperty(filename)) { 1516 continue; 1517 } 1518 file = this.files[filename]; 1519 relativePath = filename.slice(this.root.length, filename.length); 1520 if (relativePath && filename.slice(0, this.root.length) === this.root) { // the file is in the current root 1521 cb(relativePath, file); // TODO reverse the parameters ? need to be clean AND consistent with the filter search fn... 1522 } 1523 } 1524 }, 1525 1526 /** 1527 * Filter nested files/folders with the specified function. 1528 * @param {Function} search the predicate to use : 1529 * function (relativePath, file) {...} 1530 * It takes 2 arguments : the relative path and the file. 1531 * @return {Array} An array of matching elements. 1532 */ 1533 filter: function(search) { 1534 var result = []; 1535 this.forEach(function (relativePath, entry) { 1536 if (search(relativePath, entry)) { // the file matches the function 1537 result.push(entry); 1538 } 1539 1540 }); 1541 return result; 1542 }, 1543 1544 /** 1545 * Add a file to the zip file, or search a file. 1546 * @param {string|RegExp} name The name of the file to add (if data is defined), 1547 * the name of the file to find (if no data) or a regex to match files. 1548 * @param {String|ArrayBuffer|Uint8Array|Buffer} data The file data, either raw or base64 encoded 1549 * @param {Object} o File options 1550 * @return {JSZip|Object|Array} this JSZip object (when adding a file), 1551 * a file (when searching by string) or an array of files (when searching by regex). 1552 */ 1553 file: function(name, data, o) { 1554 if (arguments.length === 1) { 1555 if (isRegExp(name)) { 1556 var regexp = name; 1557 return this.filter(function(relativePath, file) { 1558 return !file.dir && regexp.test(relativePath); 1559 }); 1560 } 1561 else { // text 1562 var obj = this.files[this.root + name]; 1563 if (obj && !obj.dir) { 1564 return obj; 1565 } else { 1566 return null; 1567 } 1568 } 1569 } 1570 else { // more than one argument : we have data ! 1571 name = this.root + name; 1572 fileAdd.call(this, name, data, o); 1573 } 1574 return this; 1575 }, 1576 1577 /** 1578 * Add a directory to the zip file, or search. 1579 * @param {String|RegExp} arg The name of the directory to add, or a regex to search folders. 1580 * @return {JSZip} an object with the new directory as the root, or an array containing matching folders. 1581 */ 1582 folder: function(arg) { 1583 if (!arg) { 1584 return this; 1585 } 1586 1587 if (isRegExp(arg)) { 1588 return this.filter(function(relativePath, file) { 1589 return file.dir && arg.test(relativePath); 1590 }); 1591 } 1592 1593 // else, name is a new folder 1594 var name = this.root + arg; 1595 var newFolder = folderAdd.call(this, name); 1596 1597 // Allow chaining by returning a new object with this folder as the root 1598 var ret = this.clone(); 1599 ret.root = newFolder.name; 1600 return ret; 1601 }, 1602 1603 /** 1604 * Delete a file, or a directory and all sub-files, from the zip 1605 * @param {string} name the name of the file to delete 1606 * @return {JSZip} this JSZip object 1607 */ 1608 remove: function(name) { 1609 name = this.root + name; 1610 var file = this.files[name]; 1611 if (!file) { 1612 // Look for any folders 1613 if (name.slice(-1) !== "/") { 1614 name += "/"; 1615 } 1616 file = this.files[name]; 1617 } 1618 1619 if (file && !file.dir) { 1620 // file 1621 delete this.files[name]; 1622 } else { 1623 // maybe a folder, delete recursively 1624 var kids = this.filter(function(relativePath, file) { 1625 return file.name.slice(0, name.length) === name; 1626 }); 1627 for (var i = 0; i < kids.length; i++) { 1628 delete this.files[kids[i].name]; 1629 } 1630 } 1631 1632 return this; 1633 }, 1634 1635 /** 1636 * Generate the complete zip file 1637 * @param {Object} options the options to generate the zip file : 1638 * - compression, "STORE" by default. 1639 * - type, "base64" by default. Values are : string, base64, uint8array, arraybuffer, blob. 1640 * @return {String|Uint8Array|ArrayBuffer|Buffer|Blob} the zip file 1641 */ 1642 generate: function(options) { 1643 throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide."); 1644 }, 1645 1646 /** 1647 * Generate the complete zip file as an internal stream. 1648 * @param {Object} options the options to generate the zip file : 1649 * - compression, "STORE" by default. 1650 * - type, "base64" by default. Values are : string, base64, uint8array, arraybuffer, blob. 1651 * @return {StreamHelper} the streamed zip file. 1652 */ 1653 generateInternalStream: function(options) { 1654 var worker, opts = {}; 1655 try { 1656 opts = utils.extend(options || {}, { 1657 streamFiles: false, 1658 compression: "STORE", 1659 compressionOptions : null, 1660 type: "", 1661 platform: "DOS", 1662 comment: null, 1663 mimeType: 'application/zip', 1664 encodeFileName: utf8.utf8encode 1665 }); 1666 1667 opts.type = opts.type.toLowerCase(); 1668 opts.compression = opts.compression.toUpperCase(); 1669 1670 // "binarystring" is prefered but the internals use "string". 1671 if(opts.type === "binarystring") { 1672 opts.type = "string"; 1673 } 1674 1675 if (!opts.type) { 1676 throw new Error("No output type specified."); 1677 } 1678 1679 utils.checkSupport(opts.type); 1680 1681 // accept nodejs `process.platform` 1682 if( 1683 opts.platform === 'darwin' || 1684 opts.platform === 'freebsd' || 1685 opts.platform === 'linux' || 1686 opts.platform === 'sunos' 1687 ) { 1688 opts.platform = "UNIX"; 1689 } 1690 if (opts.platform === 'win32') { 1691 opts.platform = "DOS"; 1692 } 1693 1694 var comment = opts.comment || this.comment || ""; 1695 worker = generate.generateWorker(this, opts, comment); 1696 } catch (e) { 1697 worker = new GenericWorker("error"); 1698 worker.error(e); 1699 } 1700 return new StreamHelper(worker, opts.type || "string", opts.mimeType); 1701 }, 1702 /** 1703 * Generate the complete zip file asynchronously. 1704 * @see generateInternalStream 1705 */ 1706 generateAsync: function(options, onUpdate) { 1707 return this.generateInternalStream(options).accumulate(onUpdate); 1708 }, 1709 /** 1710 * Generate the complete zip file asynchronously. 1711 * @see generateInternalStream 1712 */ 1713 generateNodeStream: function(options, onUpdate) { 1714 options = options || {}; 1715 if (!options.type) { 1716 options.type = "nodebuffer"; 1717 } 1718 return this.generateInternalStream(options).toNodejsStream(onUpdate); 1719 } 1720 }; 1721 module.exports = out; 1722 1723 },{"./compressedObject":2,"./defaults":5,"./generate":9,"./nodejs/NodejsStreamInputAdapter":12,"./nodejsUtils":14,"./stream/GenericWorker":28,"./stream/StreamHelper":29,"./utf8":31,"./utils":32,"./zipObject":35}],16:[function(require,module,exports){ 1724 /* 1725 * This file is used by module bundlers (browserify/webpack/etc) when 1726 * including a stream implementation. We use "readable-stream" to get a 1727 * consistent behavior between nodejs versions but bundlers often have a shim 1728 * for "stream". Using this shim greatly improve the compatibility and greatly 1729 * reduce the final size of the bundle (only one stream implementation, not 1730 * two). 1731 */ 1732 module.exports = require("stream"); 1733 1734 },{"stream":undefined}],17:[function(require,module,exports){ 1735 'use strict'; 1736 var DataReader = require('./DataReader'); 1737 var utils = require('../utils'); 1738 1739 function ArrayReader(data) { 1740 DataReader.call(this, data); 1741 for(var i = 0; i < this.data.length; i++) { 1742 data[i] = data[i] & 0xFF; 1743 } 1744 } 1745 utils.inherits(ArrayReader, DataReader); 1746 /** 1747 * @see DataReader.byteAt 1748 */ 1749 ArrayReader.prototype.byteAt = function(i) { 1750 return this.data[this.zero + i]; 1751 }; 1752 /** 1753 * @see DataReader.lastIndexOfSignature 1754 */ 1755 ArrayReader.prototype.lastIndexOfSignature = function(sig) { 1756 var sig0 = sig.charCodeAt(0), 1757 sig1 = sig.charCodeAt(1), 1758 sig2 = sig.charCodeAt(2), 1759 sig3 = sig.charCodeAt(3); 1760 for (var i = this.length - 4; i >= 0; --i) { 1761 if (this.data[i] === sig0 && this.data[i + 1] === sig1 && this.data[i + 2] === sig2 && this.data[i + 3] === sig3) { 1762 return i - this.zero; 1763 } 1764 } 1765 1766 return -1; 1767 }; 1768 /** 1769 * @see DataReader.readAndCheckSignature 1770 */ 1771 ArrayReader.prototype.readAndCheckSignature = function (sig) { 1772 var sig0 = sig.charCodeAt(0), 1773 sig1 = sig.charCodeAt(1), 1774 sig2 = sig.charCodeAt(2), 1775 sig3 = sig.charCodeAt(3), 1776 data = this.readData(4); 1777 return sig0 === data[0] && sig1 === data[1] && sig2 === data[2] && sig3 === data[3]; 1778 }; 1779 /** 1780 * @see DataReader.readData 1781 */ 1782 ArrayReader.prototype.readData = function(size) { 1783 this.checkOffset(size); 1784 if(size === 0) { 1785 return []; 1786 } 1787 var result = this.data.slice(this.zero + this.index, this.zero + this.index + size); 1788 this.index += size; 1789 return result; 1790 }; 1791 module.exports = ArrayReader; 1792 1793 },{"../utils":32,"./DataReader":18}],18:[function(require,module,exports){ 1794 'use strict'; 1795 var utils = require('../utils'); 1796 1797 function DataReader(data) { 1798 this.data = data; // type : see implementation 1799 this.length = data.length; 1800 this.index = 0; 1801 this.zero = 0; 1802 } 1803 DataReader.prototype = { 1804 /** 1805 * Check that the offset will not go too far. 1806 * @param {string} offset the additional offset to check. 1807 * @throws {Error} an Error if the offset is out of bounds. 1808 */ 1809 checkOffset: function(offset) { 1810 this.checkIndex(this.index + offset); 1811 }, 1812 /** 1813 * Check that the specified index will not be too far. 1814 * @param {string} newIndex the index to check. 1815 * @throws {Error} an Error if the index is out of bounds. 1816 */ 1817 checkIndex: function(newIndex) { 1818 if (this.length < this.zero + newIndex || newIndex < 0) { 1819 throw new Error("End of data reached (data length = " + this.length + ", asked index = " + (newIndex) + "). Corrupted zip ?"); 1820 } 1821 }, 1822 /** 1823 * Change the index. 1824 * @param {number} newIndex The new index. 1825 * @throws {Error} if the new index is out of the data. 1826 */ 1827 setIndex: function(newIndex) { 1828 this.checkIndex(newIndex); 1829 this.index = newIndex; 1830 }, 1831 /** 1832 * Skip the next n bytes. 1833 * @param {number} n the number of bytes to skip. 1834 * @throws {Error} if the new index is out of the data. 1835 */ 1836 skip: function(n) { 1837 this.setIndex(this.index + n); 1838 }, 1839 /** 1840 * Get the byte at the specified index. 1841 * @param {number} i the index to use. 1842 * @return {number} a byte. 1843 */ 1844 byteAt: function(i) { 1845 // see implementations 1846 }, 1847 /** 1848 * Get the next number with a given byte size. 1849 * @param {number} size the number of bytes to read. 1850 * @return {number} the corresponding number. 1851 */ 1852 readInt: function(size) { 1853 var result = 0, 1854 i; 1855 this.checkOffset(size); 1856 for (i = this.index + size - 1; i >= this.index; i--) { 1857 result = (result << 8) + this.byteAt(i); 1858 } 1859 this.index += size; 1860 return result; 1861 }, 1862 /** 1863 * Get the next string with a given byte size. 1864 * @param {number} size the number of bytes to read. 1865 * @return {string} the corresponding string. 1866 */ 1867 readString: function(size) { 1868 return utils.transformTo("string", this.readData(size)); 1869 }, 1870 /** 1871 * Get raw data without conversion, <size> bytes. 1872 * @param {number} size the number of bytes to read. 1873 * @return {Object} the raw data, implementation specific. 1874 */ 1875 readData: function(size) { 1876 // see implementations 1877 }, 1878 /** 1879 * Find the last occurence of a zip signature (4 bytes). 1880 * @param {string} sig the signature to find. 1881 * @return {number} the index of the last occurence, -1 if not found. 1882 */ 1883 lastIndexOfSignature: function(sig) { 1884 // see implementations 1885 }, 1886 /** 1887 * Read the signature (4 bytes) at the current position and compare it with sig. 1888 * @param {string} sig the expected signature 1889 * @return {boolean} true if the signature matches, false otherwise. 1890 */ 1891 readAndCheckSignature: function(sig) { 1892 // see implementations 1893 }, 1894 /** 1895 * Get the next date. 1896 * @return {Date} the date. 1897 */ 1898 readDate: function() { 1899 var dostime = this.readInt(4); 1900 return new Date(Date.UTC( 1901 ((dostime >> 25) & 0x7f) + 1980, // year 1902 ((dostime >> 21) & 0x0f) - 1, // month 1903 (dostime >> 16) & 0x1f, // day 1904 (dostime >> 11) & 0x1f, // hour 1905 (dostime >> 5) & 0x3f, // minute 1906 (dostime & 0x1f) << 1)); // second 1907 } 1908 }; 1909 module.exports = DataReader; 1910 1911 },{"../utils":32}],19:[function(require,module,exports){ 1912 'use strict'; 1913 var Uint8ArrayReader = require('./Uint8ArrayReader'); 1914 var utils = require('../utils'); 1915 1916 function NodeBufferReader(data) { 1917 Uint8ArrayReader.call(this, data); 1918 } 1919 utils.inherits(NodeBufferReader, Uint8ArrayReader); 1920 1921 /** 1922 * @see DataReader.readData 1923 */ 1924 NodeBufferReader.prototype.readData = function(size) { 1925 this.checkOffset(size); 1926 var result = this.data.slice(this.zero + this.index, this.zero + this.index + size); 1927 this.index += size; 1928 return result; 1929 }; 1930 module.exports = NodeBufferReader; 1931 1932 },{"../utils":32,"./Uint8ArrayReader":21}],20:[function(require,module,exports){ 1933 'use strict'; 1934 var DataReader = require('./DataReader'); 1935 var utils = require('../utils'); 1936 1937 function StringReader(data) { 1938 DataReader.call(this, data); 1939 } 1940 utils.inherits(StringReader, DataReader); 1941 /** 1942 * @see DataReader.byteAt 1943 */ 1944 StringReader.prototype.byteAt = function(i) { 1945 return this.data.charCodeAt(this.zero + i); 1946 }; 1947 /** 1948 * @see DataReader.lastIndexOfSignature 1949 */ 1950 StringReader.prototype.lastIndexOfSignature = function(sig) { 1951 return this.data.lastIndexOf(sig) - this.zero; 1952 }; 1953 /** 1954 * @see DataReader.readAndCheckSignature 1955 */ 1956 StringReader.prototype.readAndCheckSignature = function (sig) { 1957 var data = this.readData(4); 1958 return sig === data; 1959 }; 1960 /** 1961 * @see DataReader.readData 1962 */ 1963 StringReader.prototype.readData = function(size) { 1964 this.checkOffset(size); 1965 // this will work because the constructor applied the "& 0xff" mask. 1966 var result = this.data.slice(this.zero + this.index, this.zero + this.index + size); 1967 this.index += size; 1968 return result; 1969 }; 1970 module.exports = StringReader; 1971 1972 },{"../utils":32,"./DataReader":18}],21:[function(require,module,exports){ 1973 'use strict'; 1974 var ArrayReader = require('./ArrayReader'); 1975 var utils = require('../utils'); 1976 1977 function Uint8ArrayReader(data) { 1978 ArrayReader.call(this, data); 1979 } 1980 utils.inherits(Uint8ArrayReader, ArrayReader); 1981 /** 1982 * @see DataReader.readData 1983 */ 1984 Uint8ArrayReader.prototype.readData = function(size) { 1985 this.checkOffset(size); 1986 if(size === 0) { 1987 // in IE10, when using subarray(idx, idx), we get the array [0x00] instead of []. 1988 return new Uint8Array(0); 1989 } 1990 var result = this.data.subarray(this.zero + this.index, this.zero + this.index + size); 1991 this.index += size; 1992 return result; 1993 }; 1994 module.exports = Uint8ArrayReader; 1995 1996 },{"../utils":32,"./ArrayReader":17}],22:[function(require,module,exports){ 1997 'use strict'; 1998 1999 var utils = require('../utils'); 2000 var support = require('../support'); 2001 var ArrayReader = require('./ArrayReader'); 2002 var StringReader = require('./StringReader'); 2003 var NodeBufferReader = require('./NodeBufferReader'); 2004 var Uint8ArrayReader = require('./Uint8ArrayReader'); 2005 2006 /** 2007 * Create a reader adapted to the data. 2008 * @param {String|ArrayBuffer|Uint8Array|Buffer} data the data to read. 2009 * @return {DataReader} the data reader. 2010 */ 2011 module.exports = function (data) { 2012 var type = utils.getTypeOf(data); 2013 utils.checkSupport(type); 2014 if (type === "string" && !support.uint8array) { 2015 return new StringReader(data); 2016 } 2017 if (type === "nodebuffer") { 2018 return new NodeBufferReader(data); 2019 } 2020 if (support.uint8array) { 2021 return new Uint8ArrayReader(utils.transformTo("uint8array", data)); 2022 } 2023 return new ArrayReader(utils.transformTo("array", data)); 2024 }; 2025 2026 },{"../support":30,"../utils":32,"./ArrayReader":17,"./NodeBufferReader":19,"./StringReader":20,"./Uint8ArrayReader":21}],23:[function(require,module,exports){ 2027 'use strict'; 2028 exports.LOCAL_FILE_HEADER = "PK\x03\x04"; 2029 exports.CENTRAL_FILE_HEADER = "PK\x01\x02"; 2030 exports.CENTRAL_DIRECTORY_END = "PK\x05\x06"; 2031 exports.ZIP64_CENTRAL_DIRECTORY_LOCATOR = "PK\x06\x07"; 2032 exports.ZIP64_CENTRAL_DIRECTORY_END = "PK\x06\x06"; 2033 exports.DATA_DESCRIPTOR = "PK\x07\x08"; 2034 2035 },{}],24:[function(require,module,exports){ 2036 'use strict'; 2037 2038 var GenericWorker = require('./GenericWorker'); 2039 var utils = require('../utils'); 2040 2041 /** 2042 * A worker which convert chunks to a specified type. 2043 * @constructor 2044 * @param {String} destType the destination type. 2045 */ 2046 function ConvertWorker(destType) { 2047 GenericWorker.call(this, "ConvertWorker to " + destType); 2048 this.destType = destType; 2049 } 2050 utils.inherits(ConvertWorker, GenericWorker); 2051 2052 /** 2053 * @see GenericWorker.processChunk 2054 */ 2055 ConvertWorker.prototype.processChunk = function (chunk) { 2056 this.push({ 2057 data : utils.transformTo(this.destType, chunk.data), 2058 meta : chunk.meta 2059 }); 2060 }; 2061 module.exports = ConvertWorker; 2062 2063 },{"../utils":32,"./GenericWorker":28}],25:[function(require,module,exports){ 2064 'use strict'; 2065 2066 var GenericWorker = require('./GenericWorker'); 2067 var crc32 = require('../crc32'); 2068 var utils = require('../utils'); 2069 2070 /** 2071 * A worker which calculate the crc32 of the data flowing through. 2072 * @constructor 2073 */ 2074 function Crc32Probe() { 2075 GenericWorker.call(this, "Crc32Probe"); 2076 this.withStreamInfo("crc32", 0); 2077 } 2078 utils.inherits(Crc32Probe, GenericWorker); 2079 2080 /** 2081 * @see GenericWorker.processChunk 2082 */ 2083 Crc32Probe.prototype.processChunk = function (chunk) { 2084 this.streamInfo.crc32 = crc32(chunk.data, this.streamInfo.crc32 || 0); 2085 this.push(chunk); 2086 }; 2087 module.exports = Crc32Probe; 2088 2089 },{"../crc32":4,"../utils":32,"./GenericWorker":28}],26:[function(require,module,exports){ 2090 'use strict'; 2091 2092 var utils = require('../utils'); 2093 var GenericWorker = require('./GenericWorker'); 2094 2095 /** 2096 * A worker which calculate the total length of the data flowing through. 2097 * @constructor 2098 * @param {String} propName the name used to expose the length 2099 */ 2100 function DataLengthProbe(propName) { 2101 GenericWorker.call(this, "DataLengthProbe for " + propName); 2102 this.propName = propName; 2103 this.withStreamInfo(propName, 0); 2104 } 2105 utils.inherits(DataLengthProbe, GenericWorker); 2106 2107 /** 2108 * @see GenericWorker.processChunk 2109 */ 2110 DataLengthProbe.prototype.processChunk = function (chunk) { 2111 if(chunk) { 2112 var length = this.streamInfo[this.propName] || 0; 2113 this.streamInfo[this.propName] = length + chunk.data.length; 2114 } 2115 GenericWorker.prototype.processChunk.call(this, chunk); 2116 }; 2117 module.exports = DataLengthProbe; 2118 2119 2120 },{"../utils":32,"./GenericWorker":28}],27:[function(require,module,exports){ 2121 'use strict'; 2122 2123 var utils = require('../utils'); 2124 var GenericWorker = require('./GenericWorker'); 2125 2126 // the size of the generated chunks 2127 // TODO expose this as a public variable 2128 var DEFAULT_BLOCK_SIZE = 16 * 1024; 2129 2130 /** 2131 * A worker that reads a content and emits chunks. 2132 * @constructor 2133 * @param {Promise} dataP the promise of the data to split 2134 */ 2135 function DataWorker(dataP) { 2136 GenericWorker.call(this, "DataWorker"); 2137 var self = this; 2138 this.dataIsReady = false; 2139 this.index = 0; 2140 this.max = 0; 2141 this.data = null; 2142 this.type = ""; 2143 2144 this._tickScheduled = false; 2145 2146 dataP.then(function (data) { 2147 self.dataIsReady = true; 2148 self.data = data; 2149 self.max = data && data.length || 0; 2150 self.type = utils.getTypeOf(data); 2151 if(!self.isPaused) { 2152 self._tickAndRepeat(); 2153 } 2154 }, function (e) { 2155 self.error(e); 2156 }); 2157 } 2158 2159 utils.inherits(DataWorker, GenericWorker); 2160 2161 /** 2162 * @see GenericWorker.cleanUp 2163 */ 2164 DataWorker.prototype.cleanUp = function () { 2165 GenericWorker.prototype.cleanUp.call(this); 2166 this.data = null; 2167 }; 2168 2169 /** 2170 * @see GenericWorker.resume 2171 */ 2172 DataWorker.prototype.resume = function () { 2173 if(!GenericWorker.prototype.resume.call(this)) { 2174 return false; 2175 } 2176 2177 if (!this._tickScheduled && this.dataIsReady) { 2178 this._tickScheduled = true; 2179 utils.delay(this._tickAndRepeat, [], this); 2180 } 2181 return true; 2182 }; 2183 2184 /** 2185 * Trigger a tick a schedule an other call to this function. 2186 */ 2187 DataWorker.prototype._tickAndRepeat = function() { 2188 this._tickScheduled = false; 2189 if(this.isPaused || this.isFinished) { 2190 return; 2191 } 2192 this._tick(); 2193 if(!this.isFinished) { 2194 utils.delay(this._tickAndRepeat, [], this); 2195 this._tickScheduled = true; 2196 } 2197 }; 2198 2199 /** 2200 * Read and push a chunk. 2201 */ 2202 DataWorker.prototype._tick = function() { 2203 2204 if(this.isPaused || this.isFinished) { 2205 return false; 2206 } 2207 2208 var size = DEFAULT_BLOCK_SIZE; 2209 var data = null, nextIndex = Math.min(this.max, this.index + size); 2210 if (this.index >= this.max) { 2211 // EOF 2212 return this.end(); 2213 } else { 2214 switch(this.type) { 2215 case "string": 2216 data = this.data.substring(this.index, nextIndex); 2217 break; 2218 case "uint8array": 2219 data = this.data.subarray(this.index, nextIndex); 2220 break; 2221 case "array": 2222 case "nodebuffer": 2223 data = this.data.slice(this.index, nextIndex); 2224 break; 2225 } 2226 this.index = nextIndex; 2227 return this.push({ 2228 data : data, 2229 meta : { 2230 percent : this.max ? this.index / this.max * 100 : 0 2231 } 2232 }); 2233 } 2234 }; 2235 2236 module.exports = DataWorker; 2237 2238 },{"../utils":32,"./GenericWorker":28}],28:[function(require,module,exports){ 2239 'use strict'; 2240 2241 /** 2242 * A worker that does nothing but passing chunks to the next one. This is like 2243 * a nodejs stream but with some differences. On the good side : 2244 * - it works on IE 6-9 without any issue / polyfill 2245 * - it weights less than the full dependencies bundled with browserify 2246 * - it forwards errors (no need to declare an error handler EVERYWHERE) 2247 * 2248 * A chunk is an object with 2 attributes : `meta` and `data`. The former is an 2249 * object containing anything (`percent` for example), see each worker for more 2250 * details. The latter is the real data (String, Uint8Array, etc). 2251 * 2252 * @constructor 2253 * @param {String} name the name of the stream (mainly used for debugging purposes) 2254 */ 2255 function GenericWorker(name) { 2256 // the name of the worker 2257 this.name = name || "default"; 2258 // an object containing metadata about the workers chain 2259 this.streamInfo = {}; 2260 // an error which happened when the worker was paused 2261 this.generatedError = null; 2262 // an object containing metadata to be merged by this worker into the general metadata 2263 this.extraStreamInfo = {}; 2264 // true if the stream is paused (and should not do anything), false otherwise 2265 this.isPaused = true; 2266 // true if the stream is finished (and should not do anything), false otherwise 2267 this.isFinished = false; 2268 // true if the stream is locked to prevent further structure updates (pipe), false otherwise 2269 this.isLocked = false; 2270 // the event listeners 2271 this._listeners = { 2272 'data':[], 2273 'end':[], 2274 'error':[] 2275 }; 2276 // the previous worker, if any 2277 this.previous = null; 2278 } 2279 2280 GenericWorker.prototype = { 2281 /** 2282 * Push a chunk to the next workers. 2283 * @param {Object} chunk the chunk to push 2284 */ 2285 push : function (chunk) { 2286 this.emit("data", chunk); 2287 }, 2288 /** 2289 * End the stream. 2290 * @return {Boolean} true if this call ended the worker, false otherwise. 2291 */ 2292 end : function () { 2293 if (this.isFinished) { 2294 return false; 2295 } 2296 2297 this.flush(); 2298 try { 2299 this.emit("end"); 2300 this.cleanUp(); 2301 this.isFinished = true; 2302 } catch (e) { 2303 this.emit("error", e); 2304 } 2305 return true; 2306 }, 2307 /** 2308 * End the stream with an error. 2309 * @param {Error} e the error which caused the premature end. 2310 * @return {Boolean} true if this call ended the worker with an error, false otherwise. 2311 */ 2312 error : function (e) { 2313 if (this.isFinished) { 2314 return false; 2315 } 2316 2317 if(this.isPaused) { 2318 this.generatedError = e; 2319 } else { 2320 this.isFinished = true; 2321 2322 this.emit("error", e); 2323 2324 // in the workers chain exploded in the middle of the chain, 2325 // the error event will go downward but we also need to notify 2326 // workers upward that there has been an error. 2327 if(this.previous) { 2328 this.previous.error(e); 2329 } 2330 2331 this.cleanUp(); 2332 } 2333 return true; 2334 }, 2335 /** 2336 * Add a callback on an event. 2337 * @param {String} name the name of the event (data, end, error) 2338 * @param {Function} listener the function to call when the event is triggered 2339 * @return {GenericWorker} the current object for chainability 2340 */ 2341 on : function (name, listener) { 2342 this._listeners[name].push(listener); 2343 return this; 2344 }, 2345 /** 2346 * Clean any references when a worker is ending. 2347 */ 2348 cleanUp : function () { 2349 this.streamInfo = this.generatedError = this.extraStreamInfo = null; 2350 this._listeners = []; 2351 }, 2352 /** 2353 * Trigger an event. This will call registered callback with the provided arg. 2354 * @param {String} name the name of the event (data, end, error) 2355 * @param {Object} arg the argument to call the callback with. 2356 */ 2357 emit : function (name, arg) { 2358 if (this._listeners[name]) { 2359 for(var i = 0; i < this._listeners[name].length; i++) { 2360 this._listeners[name][i].call(this, arg); 2361 } 2362 } 2363 }, 2364 /** 2365 * Chain a worker with an other. 2366 * @param {Worker} next the worker receiving events from the current one. 2367 * @return {worker} the next worker for chainability 2368 */ 2369 pipe : function (next) { 2370 return next.registerPrevious(this); 2371 }, 2372 /** 2373 * Same as `pipe` in the other direction. 2374 * Using an API with `pipe(next)` is very easy. 2375 * Implementing the API with the point of view of the next one registering 2376 * a source is easier, see the ZipFileWorker. 2377 * @param {Worker} previous the previous worker, sending events to this one 2378 * @return {Worker} the current worker for chainability 2379 */ 2380 registerPrevious : function (previous) { 2381 if (this.isLocked) { 2382 throw new Error("The stream '" + this + "' has already been used."); 2383 } 2384 2385 // sharing the streamInfo... 2386 this.streamInfo = previous.streamInfo; 2387 // ... and adding our own bits 2388 this.mergeStreamInfo(); 2389 this.previous = previous; 2390 var self = this; 2391 previous.on('data', function (chunk) { 2392 self.processChunk(chunk); 2393 }); 2394 previous.on('end', function () { 2395 self.end(); 2396 }); 2397 previous.on('error', function (e) { 2398 self.error(e); 2399 }); 2400 return this; 2401 }, 2402 /** 2403 * Pause the stream so it doesn't send events anymore. 2404 * @return {Boolean} true if this call paused the worker, false otherwise. 2405 */ 2406 pause : function () { 2407 if(this.isPaused || this.isFinished) { 2408 return false; 2409 } 2410 this.isPaused = true; 2411 2412 if(this.previous) { 2413 this.previous.pause(); 2414 } 2415 return true; 2416 }, 2417 /** 2418 * Resume a paused stream. 2419 * @return {Boolean} true if this call resumed the worker, false otherwise. 2420 */ 2421 resume : function () { 2422 if(!this.isPaused || this.isFinished) { 2423 return false; 2424 } 2425 this.isPaused = false; 2426 2427 // if true, the worker tried to resume but failed 2428 var withError = false; 2429 if(this.generatedError) { 2430 this.error(this.generatedError); 2431 withError = true; 2432 } 2433 if(this.previous) { 2434 this.previous.resume(); 2435 } 2436 2437 return !withError; 2438 }, 2439 /** 2440 * Flush any remaining bytes as the stream is ending. 2441 */ 2442 flush : function () {}, 2443 /** 2444 * Process a chunk. This is usually the method overridden. 2445 * @param {Object} chunk the chunk to process. 2446 */ 2447 processChunk : function(chunk) { 2448 this.push(chunk); 2449 }, 2450 /** 2451 * Add a key/value to be added in the workers chain streamInfo once activated. 2452 * @param {String} key the key to use 2453 * @param {Object} value the associated value 2454 * @return {Worker} the current worker for chainability 2455 */ 2456 withStreamInfo : function (key, value) { 2457 this.extraStreamInfo[key] = value; 2458 this.mergeStreamInfo(); 2459 return this; 2460 }, 2461 /** 2462 * Merge this worker's streamInfo into the chain's streamInfo. 2463 */ 2464 mergeStreamInfo : function () { 2465 for(var key in this.extraStreamInfo) { 2466 if (!this.extraStreamInfo.hasOwnProperty(key)) { 2467 continue; 2468 } 2469 this.streamInfo[key] = this.extraStreamInfo[key]; 2470 } 2471 }, 2472 2473 /** 2474 * Lock the stream to prevent further updates on the workers chain. 2475 * After calling this method, all calls to pipe will fail. 2476 */ 2477 lock: function () { 2478 if (this.isLocked) { 2479 throw new Error("The stream '" + this + "' has already been used."); 2480 } 2481 this.isLocked = true; 2482 if (this.previous) { 2483 this.previous.lock(); 2484 } 2485 }, 2486 2487 /** 2488 * 2489 * Pretty print the workers chain. 2490 */ 2491 toString : function () { 2492 var me = "Worker " + this.name; 2493 if (this.previous) { 2494 return this.previous + " -> " + me; 2495 } else { 2496 return me; 2497 } 2498 } 2499 }; 2500 2501 module.exports = GenericWorker; 2502 2503 },{}],29:[function(require,module,exports){ 2504 'use strict'; 2505 2506 var utils = require('../utils'); 2507 var ConvertWorker = require('./ConvertWorker'); 2508 var GenericWorker = require('./GenericWorker'); 2509 var base64 = require('../base64'); 2510 var support = require("../support"); 2511 var external = require("../external"); 2512 2513 var NodejsStreamOutputAdapter = null; 2514 if (support.nodestream) { 2515 try { 2516 NodejsStreamOutputAdapter = require('../nodejs/NodejsStreamOutputAdapter'); 2517 } catch(e) {} 2518 } 2519 2520 /** 2521 * Apply the final transformation of the data. If the user wants a Blob for 2522 * example, it's easier to work with an U8intArray and finally do the 2523 * ArrayBuffer/Blob conversion. 2524 * @param {String} type the name of the final type 2525 * @param {String|Uint8Array|Buffer} content the content to transform 2526 * @param {String} mimeType the mime type of the content, if applicable. 2527 * @return {String|Uint8Array|ArrayBuffer|Buffer|Blob} the content in the right format. 2528 */ 2529 function transformZipOutput(type, content, mimeType) { 2530 switch(type) { 2531 case "blob" : 2532 return utils.newBlob(utils.transformTo("arraybuffer", content), mimeType); 2533 case "base64" : 2534 return base64.encode(content); 2535 default : 2536 return utils.transformTo(type, content); 2537 } 2538 } 2539 2540 /** 2541 * Concatenate an array of data of the given type. 2542 * @param {String} type the type of the data in the given array. 2543 * @param {Array} dataArray the array containing the data chunks to concatenate 2544 * @return {String|Uint8Array|Buffer} the concatenated data 2545 * @throws Error if the asked type is unsupported 2546 */ 2547 function concat (type, dataArray) { 2548 var i, index = 0, res = null, totalLength = 0; 2549 for(i = 0; i < dataArray.length; i++) { 2550 totalLength += dataArray[i].length; 2551 } 2552 switch(type) { 2553 case "string": 2554 return dataArray.join(""); 2555 case "array": 2556 return Array.prototype.concat.apply([], dataArray); 2557 case "uint8array": 2558 res = new Uint8Array(totalLength); 2559 for(i = 0; i < dataArray.length; i++) { 2560 res.set(dataArray[i], index); 2561 index += dataArray[i].length; 2562 } 2563 return res; 2564 case "nodebuffer": 2565 return Buffer.concat(dataArray); 2566 default: 2567 throw new Error("concat : unsupported type '" + type + "'"); 2568 } 2569 } 2570 2571 /** 2572 * Listen a StreamHelper, accumulate its content and concatenate it into a 2573 * complete block. 2574 * @param {StreamHelper} helper the helper to use. 2575 * @param {Function} updateCallback a callback called on each update. Called 2576 * with one arg : 2577 * - the metadata linked to the update received. 2578 * @return Promise the promise for the accumulation. 2579 */ 2580 function accumulate(helper, updateCallback) { 2581 return new external.Promise(function (resolve, reject){ 2582 var dataArray = []; 2583 var chunkType = helper._internalType, 2584 resultType = helper._outputType, 2585 mimeType = helper._mimeType; 2586 helper 2587 .on('data', function (data, meta) { 2588 dataArray.push(data); 2589 if(updateCallback) { 2590 updateCallback(meta); 2591 } 2592 }) 2593 .on('error', function(err) { 2594 dataArray = []; 2595 reject(err); 2596 }) 2597 .on('end', function (){ 2598 try { 2599 var result = transformZipOutput(resultType, concat(chunkType, dataArray), mimeType); 2600 resolve(result); 2601 } catch (e) { 2602 reject(e); 2603 } 2604 dataArray = []; 2605 }) 2606 .resume(); 2607 }); 2608 } 2609 2610 /** 2611 * An helper to easily use workers outside of JSZip. 2612 * @constructor 2613 * @param {Worker} worker the worker to wrap 2614 * @param {String} outputType the type of data expected by the use 2615 * @param {String} mimeType the mime type of the content, if applicable. 2616 */ 2617 function StreamHelper(worker, outputType, mimeType) { 2618 var internalType = outputType; 2619 switch(outputType) { 2620 case "blob": 2621 case "arraybuffer": 2622 internalType = "uint8array"; 2623 break; 2624 case "base64": 2625 internalType = "string"; 2626 break; 2627 } 2628 2629 try { 2630 // the type used internally 2631 this._internalType = internalType; 2632 // the type used to output results 2633 this._outputType = outputType; 2634 // the mime type 2635 this._mimeType = mimeType; 2636 utils.checkSupport(internalType); 2637 this._worker = worker.pipe(new ConvertWorker(internalType)); 2638 // the last workers can be rewired without issues but we need to 2639 // prevent any updates on previous workers. 2640 worker.lock(); 2641 } catch(e) { 2642 this._worker = new GenericWorker("error"); 2643 this._worker.error(e); 2644 } 2645 } 2646 2647 StreamHelper.prototype = { 2648 /** 2649 * Listen a StreamHelper, accumulate its content and concatenate it into a 2650 * complete block. 2651 * @param {Function} updateCb the update callback. 2652 * @return Promise the promise for the accumulation. 2653 */ 2654 accumulate : function (updateCb) { 2655 return accumulate(this, updateCb); 2656 }, 2657 /** 2658 * Add a listener on an event triggered on a stream. 2659 * @param {String} evt the name of the event 2660 * @param {Function} fn the listener 2661 * @return {StreamHelper} the current helper. 2662 */ 2663 on : function (evt, fn) { 2664 var self = this; 2665 2666 if(evt === "data") { 2667 this._worker.on(evt, function (chunk) { 2668 fn.call(self, chunk.data, chunk.meta); 2669 }); 2670 } else { 2671 this._worker.on(evt, function () { 2672 utils.delay(fn, arguments, self); 2673 }); 2674 } 2675 return this; 2676 }, 2677 /** 2678 * Resume the flow of chunks. 2679 * @return {StreamHelper} the current helper. 2680 */ 2681 resume : function () { 2682 utils.delay(this._worker.resume, [], this._worker); 2683 return this; 2684 }, 2685 /** 2686 * Pause the flow of chunks. 2687 * @return {StreamHelper} the current helper. 2688 */ 2689 pause : function () { 2690 this._worker.pause(); 2691 return this; 2692 }, 2693 /** 2694 * Return a nodejs stream for this helper. 2695 * @param {Function} updateCb the update callback. 2696 * @return {NodejsStreamOutputAdapter} the nodejs stream. 2697 */ 2698 toNodejsStream : function (updateCb) { 2699 utils.checkSupport("nodestream"); 2700 if (this._outputType !== "nodebuffer") { 2701 // an object stream containing blob/arraybuffer/uint8array/string 2702 // is strange and I don't know if it would be useful. 2703 // I you find this comment and have a good usecase, please open a 2704 // bug report ! 2705 throw new Error(this._outputType + " is not supported by this method"); 2706 } 2707 2708 return new NodejsStreamOutputAdapter(this, { 2709 objectMode : this._outputType !== "nodebuffer" 2710 }, updateCb); 2711 } 2712 }; 2713 2714 2715 module.exports = StreamHelper; 2716 2717 },{"../base64":1,"../external":6,"../nodejs/NodejsStreamOutputAdapter":13,"../support":30,"../utils":32,"./ConvertWorker":24,"./GenericWorker":28}],30:[function(require,module,exports){ 2718 'use strict'; 2719 2720 exports.base64 = true; 2721 exports.array = true; 2722 exports.string = true; 2723 exports.arraybuffer = typeof ArrayBuffer !== "undefined" && typeof Uint8Array !== "undefined"; 2724 exports.nodebuffer = typeof Buffer !== "undefined"; 2725 // contains true if JSZip can read/generate Uint8Array, false otherwise. 2726 exports.uint8array = typeof Uint8Array !== "undefined"; 2727 2728 if (typeof ArrayBuffer === "undefined") { 2729 exports.blob = false; 2730 } 2731 else { 2732 var buffer = new ArrayBuffer(0); 2733 try { 2734 exports.blob = new Blob([buffer], { 2735 type: "application/zip" 2736 }).size === 0; 2737 } 2738 catch (e) { 2739 try { 2740 var Builder = self.BlobBuilder || self.WebKitBlobBuilder || self.MozBlobBuilder || self.MSBlobBuilder; 2741 var builder = new Builder(); 2742 builder.append(buffer); 2743 exports.blob = builder.getBlob('application/zip').size === 0; 2744 } 2745 catch (e) { 2746 exports.blob = false; 2747 } 2748 } 2749 } 2750 2751 try { 2752 exports.nodestream = !!require('readable-stream').Readable; 2753 } catch(e) { 2754 exports.nodestream = false; 2755 } 2756 2757 },{"readable-stream":16}],31:[function(require,module,exports){ 2758 'use strict'; 2759 2760 var utils = require('./utils'); 2761 var support = require('./support'); 2762 var nodejsUtils = require('./nodejsUtils'); 2763 var GenericWorker = require('./stream/GenericWorker'); 2764 2765 /** 2766 * The following functions come from pako, from pako/lib/utils/strings 2767 * released under the MIT license, see pako https://github.com/nodeca/pako/ 2768 */ 2769 2770 // Table with utf8 lengths (calculated by first byte of sequence) 2771 // Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS, 2772 // because max possible codepoint is 0x10ffff 2773 var _utf8len = new Array(256); 2774 for (var i=0; i<256; i++) { 2775 _utf8len[i] = (i >= 252 ? 6 : i >= 248 ? 5 : i >= 240 ? 4 : i >= 224 ? 3 : i >= 192 ? 2 : 1); 2776 } 2777 _utf8len[254]=_utf8len[254]=1; // Invalid sequence start 2778 2779 // convert string to array (typed, when possible) 2780 var string2buf = function (str) { 2781 var buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0; 2782 2783 // count binary size 2784 for (m_pos = 0; m_pos < str_len; m_pos++) { 2785 c = str.charCodeAt(m_pos); 2786 if ((c & 0xfc00) === 0xd800 && (m_pos+1 < str_len)) { 2787 c2 = str.charCodeAt(m_pos+1); 2788 if ((c2 & 0xfc00) === 0xdc00) { 2789 c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); 2790 m_pos++; 2791 } 2792 } 2793 buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4; 2794 } 2795 2796 // allocate buffer 2797 if (support.uint8array) { 2798 buf = new Uint8Array(buf_len); 2799 } else { 2800 buf = new Array(buf_len); 2801 } 2802 2803 // convert 2804 for (i=0, m_pos = 0; i < buf_len; m_pos++) { 2805 c = str.charCodeAt(m_pos); 2806 if ((c & 0xfc00) === 0xd800 && (m_pos+1 < str_len)) { 2807 c2 = str.charCodeAt(m_pos+1); 2808 if ((c2 & 0xfc00) === 0xdc00) { 2809 c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); 2810 m_pos++; 2811 } 2812 } 2813 if (c < 0x80) { 2814 /* one byte */ 2815 buf[i++] = c; 2816 } else if (c < 0x800) { 2817 /* two bytes */ 2818 buf[i++] = 0xC0 | (c >>> 6); 2819 buf[i++] = 0x80 | (c & 0x3f); 2820 } else if (c < 0x10000) { 2821 /* three bytes */ 2822 buf[i++] = 0xE0 | (c >>> 12); 2823 buf[i++] = 0x80 | (c >>> 6 & 0x3f); 2824 buf[i++] = 0x80 | (c & 0x3f); 2825 } else { 2826 /* four bytes */ 2827 buf[i++] = 0xf0 | (c >>> 18); 2828 buf[i++] = 0x80 | (c >>> 12 & 0x3f); 2829 buf[i++] = 0x80 | (c >>> 6 & 0x3f); 2830 buf[i++] = 0x80 | (c & 0x3f); 2831 } 2832 } 2833 2834 return buf; 2835 }; 2836 2837 // Calculate max possible position in utf8 buffer, 2838 // that will not break sequence. If that's not possible 2839 // - (very small limits) return max size as is. 2840 // 2841 // buf[] - utf8 bytes array 2842 // max - length limit (mandatory); 2843 var utf8border = function(buf, max) { 2844 var pos; 2845 2846 max = max || buf.length; 2847 if (max > buf.length) { max = buf.length; } 2848 2849 // go back from last position, until start of sequence found 2850 pos = max-1; 2851 while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; } 2852 2853 // Fuckup - very small and broken sequence, 2854 // return max, because we should return something anyway. 2855 if (pos < 0) { return max; } 2856 2857 // If we came to start of buffer - that means vuffer is too small, 2858 // return max too. 2859 if (pos === 0) { return max; } 2860 2861 return (pos + _utf8len[buf[pos]] > max) ? pos : max; 2862 }; 2863 2864 // convert array to string 2865 var buf2string = function (buf) { 2866 var str, i, out, c, c_len; 2867 var len = buf.length; 2868 2869 // Reserve max possible length (2 words per char) 2870 // NB: by unknown reasons, Array is significantly faster for 2871 // String.fromCharCode.apply than Uint16Array. 2872 var utf16buf = new Array(len*2); 2873 2874 for (out=0, i=0; i<len;) { 2875 c = buf[i++]; 2876 // quick process ascii 2877 if (c < 0x80) { utf16buf[out++] = c; continue; } 2878 2879 c_len = _utf8len[c]; 2880 // skip 5 & 6 byte codes 2881 if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len-1; continue; } 2882 2883 // apply mask on first byte 2884 c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07; 2885 // join the rest 2886 while (c_len > 1 && i < len) { 2887 c = (c << 6) | (buf[i++] & 0x3f); 2888 c_len--; 2889 } 2890 2891 // terminated by end of string? 2892 if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; } 2893 2894 if (c < 0x10000) { 2895 utf16buf[out++] = c; 2896 } else { 2897 c -= 0x10000; 2898 utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff); 2899 utf16buf[out++] = 0xdc00 | (c & 0x3ff); 2900 } 2901 } 2902 2903 // shrinkBuf(utf16buf, out) 2904 if (utf16buf.length !== out) { 2905 if(utf16buf.subarray) { 2906 utf16buf = utf16buf.subarray(0, out); 2907 } else { 2908 utf16buf.length = out; 2909 } 2910 } 2911 2912 // return String.fromCharCode.apply(null, utf16buf); 2913 return utils.applyFromCharCode(utf16buf); 2914 }; 2915 2916 2917 // That's all for the pako functions. 2918 2919 2920 /** 2921 * Transform a javascript string into an array (typed if possible) of bytes, 2922 * UTF-8 encoded. 2923 * @param {String} str the string to encode 2924 * @return {Array|Uint8Array|Buffer} the UTF-8 encoded string. 2925 */ 2926 exports.utf8encode = function utf8encode(str) { 2927 if (support.nodebuffer) { 2928 return nodejsUtils.newBufferFrom(str, "utf-8"); 2929 } 2930 2931 return string2buf(str); 2932 }; 2933 2934 2935 /** 2936 * Transform a bytes array (or a representation) representing an UTF-8 encoded 2937 * string into a javascript string. 2938 * @param {Array|Uint8Array|Buffer} buf the data de decode 2939 * @return {String} the decoded string. 2940 */ 2941 exports.utf8decode = function utf8decode(buf) { 2942 if (support.nodebuffer) { 2943 return utils.transformTo("nodebuffer", buf).toString("utf-8"); 2944 } 2945 2946 buf = utils.transformTo(support.uint8array ? "uint8array" : "array", buf); 2947 2948 return buf2string(buf); 2949 }; 2950 2951 /** 2952 * A worker to decode utf8 encoded binary chunks into string chunks. 2953 * @constructor 2954 */ 2955 function Utf8DecodeWorker() { 2956 GenericWorker.call(this, "utf-8 decode"); 2957 // the last bytes if a chunk didn't end with a complete codepoint. 2958 this.leftOver = null; 2959 } 2960 utils.inherits(Utf8DecodeWorker, GenericWorker); 2961 2962 /** 2963 * @see GenericWorker.processChunk 2964 */ 2965 Utf8DecodeWorker.prototype.processChunk = function (chunk) { 2966 2967 var data = utils.transformTo(support.uint8array ? "uint8array" : "array", chunk.data); 2968 2969 // 1st step, re-use what's left of the previous chunk 2970 if (this.leftOver && this.leftOver.length) { 2971 if(support.uint8array) { 2972 var previousData = data; 2973 data = new Uint8Array(previousData.length + this.leftOver.length); 2974 data.set(this.leftOver, 0); 2975 data.set(previousData, this.leftOver.length); 2976 } else { 2977 data = this.leftOver.concat(data); 2978 } 2979 this.leftOver = null; 2980 } 2981 2982 var nextBoundary = utf8border(data); 2983 var usableData = data; 2984 if (nextBoundary !== data.length) { 2985 if (support.uint8array) { 2986 usableData = data.subarray(0, nextBoundary); 2987 this.leftOver = data.subarray(nextBoundary, data.length); 2988 } else { 2989 usableData = data.slice(0, nextBoundary); 2990 this.leftOver = data.slice(nextBoundary, data.length); 2991 } 2992 } 2993 2994 this.push({ 2995 data : exports.utf8decode(usableData), 2996 meta : chunk.meta 2997 }); 2998 }; 2999 3000 /** 3001 * @see GenericWorker.flush 3002 */ 3003 Utf8DecodeWorker.prototype.flush = function () { 3004 if(this.leftOver && this.leftOver.length) { 3005 this.push({ 3006 data : exports.utf8decode(this.leftOver), 3007 meta : {} 3008 }); 3009 this.leftOver = null; 3010 } 3011 }; 3012 exports.Utf8DecodeWorker = Utf8DecodeWorker; 3013 3014 /** 3015 * A worker to endcode string chunks into utf8 encoded binary chunks. 3016 * @constructor 3017 */ 3018 function Utf8EncodeWorker() { 3019 GenericWorker.call(this, "utf-8 encode"); 3020 } 3021 utils.inherits(Utf8EncodeWorker, GenericWorker); 3022 3023 /** 3024 * @see GenericWorker.processChunk 3025 */ 3026 Utf8EncodeWorker.prototype.processChunk = function (chunk) { 3027 this.push({ 3028 data : exports.utf8encode(chunk.data), 3029 meta : chunk.meta 3030 }); 3031 }; 3032 exports.Utf8EncodeWorker = Utf8EncodeWorker; 3033 3034 },{"./nodejsUtils":14,"./stream/GenericWorker":28,"./support":30,"./utils":32}],32:[function(require,module,exports){ 3035 'use strict'; 3036 3037 var support = require('./support'); 3038 var base64 = require('./base64'); 3039 var nodejsUtils = require('./nodejsUtils'); 3040 var setImmediate = require('set-immediate-shim'); 3041 var external = require("./external"); 3042 3043 3044 /** 3045 * Convert a string that pass as a "binary string": it should represent a byte 3046 * array but may have > 255 char codes. Be sure to take only the first byte 3047 * and returns the byte array. 3048 * @param {String} str the string to transform. 3049 * @return {Array|Uint8Array} the string in a binary format. 3050 */ 3051 function string2binary(str) { 3052 var result = null; 3053 if (support.uint8array) { 3054 result = new Uint8Array(str.length); 3055 } else { 3056 result = new Array(str.length); 3057 } 3058 return stringToArrayLike(str, result); 3059 } 3060 3061 /** 3062 * Create a new blob with the given content and the given type. 3063 * @param {String|ArrayBuffer} part the content to put in the blob. DO NOT use 3064 * an Uint8Array because the stock browser of android 4 won't accept it (it 3065 * will be silently converted to a string, "[object Uint8Array]"). 3066 * 3067 * Use only ONE part to build the blob to avoid a memory leak in IE11 / Edge: 3068 * when a large amount of Array is used to create the Blob, the amount of 3069 * memory consumed is nearly 100 times the original data amount. 3070 * 3071 * @param {String} type the mime type of the blob. 3072 * @return {Blob} the created blob. 3073 */ 3074 exports.newBlob = function(part, type) { 3075 exports.checkSupport("blob"); 3076 3077 try { 3078 // Blob constructor 3079 return new Blob([part], { 3080 type: type 3081 }); 3082 } 3083 catch (e) { 3084 3085 try { 3086 // deprecated, browser only, old way 3087 var Builder = self.BlobBuilder || self.WebKitBlobBuilder || self.MozBlobBuilder || self.MSBlobBuilder; 3088 var builder = new Builder(); 3089 builder.append(part); 3090 return builder.getBlob(type); 3091 } 3092 catch (e) { 3093 3094 // well, fuck ?! 3095 throw new Error("Bug : can't construct the Blob."); 3096 } 3097 } 3098 3099 3100 }; 3101 /** 3102 * The identity function. 3103 * @param {Object} input the input. 3104 * @return {Object} the same input. 3105 */ 3106 function identity(input) { 3107 return input; 3108 } 3109 3110 /** 3111 * Fill in an array with a string. 3112 * @param {String} str the string to use. 3113 * @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to fill in (will be mutated). 3114 * @return {Array|ArrayBuffer|Uint8Array|Buffer} the updated array. 3115 */ 3116 function stringToArrayLike(str, array) { 3117 for (var i = 0; i < str.length; ++i) { 3118 array[i] = str.charCodeAt(i) & 0xFF; 3119 } 3120 return array; 3121 } 3122 3123 /** 3124 * An helper for the function arrayLikeToString. 3125 * This contains static informations and functions that 3126 * can be optimized by the browser JIT compiler. 3127 */ 3128 var arrayToStringHelper = { 3129 /** 3130 * Transform an array of int into a string, chunk by chunk. 3131 * See the performances notes on arrayLikeToString. 3132 * @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to transform. 3133 * @param {String} type the type of the array. 3134 * @param {Integer} chunk the chunk size. 3135 * @return {String} the resulting string. 3136 * @throws Error if the chunk is too big for the stack. 3137 */ 3138 stringifyByChunk: function(array, type, chunk) { 3139 var result = [], k = 0, len = array.length; 3140 // shortcut 3141 if (len <= chunk) { 3142 return String.fromCharCode.apply(null, array); 3143 } 3144 while (k < len) { 3145 if (type === "array" || type === "nodebuffer") { 3146 result.push(String.fromCharCode.apply(null, array.slice(k, Math.min(k + chunk, len)))); 3147 } 3148 else { 3149 result.push(String.fromCharCode.apply(null, array.subarray(k, Math.min(k + chunk, len)))); 3150 } 3151 k += chunk; 3152 } 3153 return result.join(""); 3154 }, 3155 /** 3156 * Call String.fromCharCode on every item in the array. 3157 * This is the naive implementation, which generate A LOT of intermediate string. 3158 * This should be used when everything else fail. 3159 * @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to transform. 3160 * @return {String} the result. 3161 */ 3162 stringifyByChar: function(array){ 3163 var resultStr = ""; 3164 for(var i = 0; i < array.length; i++) { 3165 resultStr += String.fromCharCode(array[i]); 3166 } 3167 return resultStr; 3168 }, 3169 applyCanBeUsed : { 3170 /** 3171 * true if the browser accepts to use String.fromCharCode on Uint8Array 3172 */ 3173 uint8array : (function () { 3174 try { 3175 return support.uint8array && String.fromCharCode.apply(null, new Uint8Array(1)).length === 1; 3176 } catch (e) { 3177 return false; 3178 } 3179 })(), 3180 /** 3181 * true if the browser accepts to use String.fromCharCode on nodejs Buffer. 3182 */ 3183 nodebuffer : (function () { 3184 try { 3185 return support.nodebuffer && String.fromCharCode.apply(null, nodejsUtils.allocBuffer(1)).length === 1; 3186 } catch (e) { 3187 return false; 3188 } 3189 })() 3190 } 3191 }; 3192 3193 /** 3194 * Transform an array-like object to a string. 3195 * @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to transform. 3196 * @return {String} the result. 3197 */ 3198 function arrayLikeToString(array) { 3199 // Performances notes : 3200 // -------------------- 3201 // String.fromCharCode.apply(null, array) is the fastest, see 3202 // see http://jsperf.com/converting-a-uint8array-to-a-string/2 3203 // but the stack is limited (and we can get huge arrays !). 3204 // 3205 // result += String.fromCharCode(array[i]); generate too many strings ! 3206 // 3207 // This code is inspired by http://jsperf.com/arraybuffer-to-string-apply-performance/2 3208 // TODO : we now have workers that split the work. Do we still need that ? 3209 var chunk = 65536, 3210 type = exports.getTypeOf(array), 3211 canUseApply = true; 3212 if (type === "uint8array") { 3213 canUseApply = arrayToStringHelper.applyCanBeUsed.uint8array; 3214 } else if (type === "nodebuffer") { 3215 canUseApply = arrayToStringHelper.applyCanBeUsed.nodebuffer; 3216 } 3217 3218 if (canUseApply) { 3219 while (chunk > 1) { 3220 try { 3221 return arrayToStringHelper.stringifyByChunk(array, type, chunk); 3222 } catch (e) { 3223 chunk = Math.floor(chunk / 2); 3224 } 3225 } 3226 } 3227 3228 // no apply or chunk error : slow and painful algorithm 3229 // default browser on android 4.* 3230 return arrayToStringHelper.stringifyByChar(array); 3231 } 3232 3233 exports.applyFromCharCode = arrayLikeToString; 3234 3235 3236 /** 3237 * Copy the data from an array-like to an other array-like. 3238 * @param {Array|ArrayBuffer|Uint8Array|Buffer} arrayFrom the origin array. 3239 * @param {Array|ArrayBuffer|Uint8Array|Buffer} arrayTo the destination array which will be mutated. 3240 * @return {Array|ArrayBuffer|Uint8Array|Buffer} the updated destination array. 3241 */ 3242 function arrayLikeToArrayLike(arrayFrom, arrayTo) { 3243 for (var i = 0; i < arrayFrom.length; i++) { 3244 arrayTo[i] = arrayFrom[i]; 3245 } 3246 return arrayTo; 3247 } 3248 3249 // a matrix containing functions to transform everything into everything. 3250 var transform = {}; 3251 3252 // string to ? 3253 transform["string"] = { 3254 "string": identity, 3255 "array": function(input) { 3256 return stringToArrayLike(input, new Array(input.length)); 3257 }, 3258 "arraybuffer": function(input) { 3259 return transform["string"]["uint8array"](input).buffer; 3260 }, 3261 "uint8array": function(input) { 3262 return stringToArrayLike(input, new Uint8Array(input.length)); 3263 }, 3264 "nodebuffer": function(input) { 3265 return stringToArrayLike(input, nodejsUtils.allocBuffer(input.length)); 3266 } 3267 }; 3268 3269 // array to ? 3270 transform["array"] = { 3271 "string": arrayLikeToString, 3272 "array": identity, 3273 "arraybuffer": function(input) { 3274 return (new Uint8Array(input)).buffer; 3275 }, 3276 "uint8array": function(input) { 3277 return new Uint8Array(input); 3278 }, 3279 "nodebuffer": function(input) { 3280 return nodejsUtils.newBufferFrom(input); 3281 } 3282 }; 3283 3284 // arraybuffer to ? 3285 transform["arraybuffer"] = { 3286 "string": function(input) { 3287 return arrayLikeToString(new Uint8Array(input)); 3288 }, 3289 "array": function(input) { 3290 return arrayLikeToArrayLike(new Uint8Array(input), new Array(input.byteLength)); 3291 }, 3292 "arraybuffer": identity, 3293 "uint8array": function(input) { 3294 return new Uint8Array(input); 3295 }, 3296 "nodebuffer": function(input) { 3297 return nodejsUtils.newBufferFrom(new Uint8Array(input)); 3298 } 3299 }; 3300 3301 // uint8array to ? 3302 transform["uint8array"] = { 3303 "string": arrayLikeToString, 3304 "array": function(input) { 3305 return arrayLikeToArrayLike(input, new Array(input.length)); 3306 }, 3307 "arraybuffer": function(input) { 3308 return input.buffer; 3309 }, 3310 "uint8array": identity, 3311 "nodebuffer": function(input) { 3312 return nodejsUtils.newBufferFrom(input); 3313 } 3314 }; 3315 3316 // nodebuffer to ? 3317 transform["nodebuffer"] = { 3318 "string": arrayLikeToString, 3319 "array": function(input) { 3320 return arrayLikeToArrayLike(input, new Array(input.length)); 3321 }, 3322 "arraybuffer": function(input) { 3323 return transform["nodebuffer"]["uint8array"](input).buffer; 3324 }, 3325 "uint8array": function(input) { 3326 return arrayLikeToArrayLike(input, new Uint8Array(input.length)); 3327 }, 3328 "nodebuffer": identity 3329 }; 3330 3331 /** 3332 * Transform an input into any type. 3333 * The supported output type are : string, array, uint8array, arraybuffer, nodebuffer. 3334 * If no output type is specified, the unmodified input will be returned. 3335 * @param {String} outputType the output type. 3336 * @param {String|Array|ArrayBuffer|Uint8Array|Buffer} input the input to convert. 3337 * @throws {Error} an Error if the browser doesn't support the requested output type. 3338 */ 3339 exports.transformTo = function(outputType, input) { 3340 if (!input) { 3341 // undefined, null, etc 3342 // an empty string won't harm. 3343 input = ""; 3344 } 3345 if (!outputType) { 3346 return input; 3347 } 3348 exports.checkSupport(outputType); 3349 var inputType = exports.getTypeOf(input); 3350 var result = transform[inputType][outputType](input); 3351 return result; 3352 }; 3353 3354 /** 3355 * Return the type of the input. 3356 * The type will be in a format valid for JSZip.utils.transformTo : string, array, uint8array, arraybuffer. 3357 * @param {Object} input the input to identify. 3358 * @return {String} the (lowercase) type of the input. 3359 */ 3360 exports.getTypeOf = function(input) { 3361 if (typeof input === "string") { 3362 return "string"; 3363 } 3364 if (Object.prototype.toString.call(input) === "[object Array]") { 3365 return "array"; 3366 } 3367 if (support.nodebuffer && nodejsUtils.isBuffer(input)) { 3368 return "nodebuffer"; 3369 } 3370 if (support.uint8array && input instanceof Uint8Array) { 3371 return "uint8array"; 3372 } 3373 if (support.arraybuffer && input instanceof ArrayBuffer) { 3374 return "arraybuffer"; 3375 } 3376 }; 3377 3378 /** 3379 * Throw an exception if the type is not supported. 3380 * @param {String} type the type to check. 3381 * @throws {Error} an Error if the browser doesn't support the requested type. 3382 */ 3383 exports.checkSupport = function(type) { 3384 var supported = support[type.toLowerCase()]; 3385 if (!supported) { 3386 throw new Error(type + " is not supported by this platform"); 3387 } 3388 }; 3389 3390 exports.MAX_VALUE_16BITS = 65535; 3391 exports.MAX_VALUE_32BITS = -1; // well, "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" is parsed as -1 3392 3393 /** 3394 * Prettify a string read as binary. 3395 * @param {string} str the string to prettify. 3396 * @return {string} a pretty string. 3397 */ 3398 exports.pretty = function(str) { 3399 var res = '', 3400 code, i; 3401 for (i = 0; i < (str || "").length; i++) { 3402 code = str.charCodeAt(i); 3403 res += '\\x' + (code < 16 ? "0" : "") + code.toString(16).toUpperCase(); 3404 } 3405 return res; 3406 }; 3407 3408 /** 3409 * Defer the call of a function. 3410 * @param {Function} callback the function to call asynchronously. 3411 * @param {Array} args the arguments to give to the callback. 3412 */ 3413 exports.delay = function(callback, args, self) { 3414 setImmediate(function () { 3415 callback.apply(self || null, args || []); 3416 }); 3417 }; 3418 3419 /** 3420 * Extends a prototype with an other, without calling a constructor with 3421 * side effects. Inspired by nodejs' `utils.inherits` 3422 * @param {Function} ctor the constructor to augment 3423 * @param {Function} superCtor the parent constructor to use 3424 */ 3425 exports.inherits = function (ctor, superCtor) { 3426 var Obj = function() {}; 3427 Obj.prototype = superCtor.prototype; 3428 ctor.prototype = new Obj(); 3429 }; 3430 3431 /** 3432 * Merge the objects passed as parameters into a new one. 3433 * @private 3434 * @param {...Object} var_args All objects to merge. 3435 * @return {Object} a new object with the data of the others. 3436 */ 3437 exports.extend = function() { 3438 var result = {}, i, attr; 3439 for (i = 0; i < arguments.length; i++) { // arguments is not enumerable in some browsers 3440 for (attr in arguments[i]) { 3441 if (arguments[i].hasOwnProperty(attr) && typeof result[attr] === "undefined") { 3442 result[attr] = arguments[i][attr]; 3443 } 3444 } 3445 } 3446 return result; 3447 }; 3448 3449 /** 3450 * Transform arbitrary content into a Promise. 3451 * @param {String} name a name for the content being processed. 3452 * @param {Object} inputData the content to process. 3453 * @param {Boolean} isBinary true if the content is not an unicode string 3454 * @param {Boolean} isOptimizedBinaryString true if the string content only has one byte per character. 3455 * @param {Boolean} isBase64 true if the string content is encoded with base64. 3456 * @return {Promise} a promise in a format usable by JSZip. 3457 */ 3458 exports.prepareContent = function(name, inputData, isBinary, isOptimizedBinaryString, isBase64) { 3459 3460 // if inputData is already a promise, this flatten it. 3461 var promise = external.Promise.resolve(inputData).then(function(data) { 3462 3463 3464 var isBlob = support.blob && (data instanceof Blob || ['[object File]', '[object Blob]'].indexOf(Object.prototype.toString.call(data)) !== -1); 3465 3466 if (isBlob && typeof FileReader !== "undefined") { 3467 return new external.Promise(function (resolve, reject) { 3468 var reader = new FileReader(); 3469 3470 reader.onload = function(e) { 3471 resolve(e.target.result); 3472 }; 3473 reader.onerror = function(e) { 3474 reject(e.target.error); 3475 }; 3476 reader.readAsArrayBuffer(data); 3477 }); 3478 } else { 3479 return data; 3480 } 3481 }); 3482 3483 return promise.then(function(data) { 3484 var dataType = exports.getTypeOf(data); 3485 3486 if (!dataType) { 3487 return external.Promise.reject( 3488 new Error("Can't read the data of '" + name + "'. Is it " + 3489 "in a supported JavaScript type (String, Blob, ArrayBuffer, etc) ?") 3490 ); 3491 } 3492 // special case : it's way easier to work with Uint8Array than with ArrayBuffer 3493 if (dataType === "arraybuffer") { 3494 data = exports.transformTo("uint8array", data); 3495 } else if (dataType === "string") { 3496 if (isBase64) { 3497 data = base64.decode(data); 3498 } 3499 else if (isBinary) { 3500 // optimizedBinaryString === true means that the file has already been filtered with a 0xFF mask 3501 if (isOptimizedBinaryString !== true) { 3502 // this is a string, not in a base64 format. 3503 // Be sure that this is a correct "binary string" 3504 data = string2binary(data); 3505 } 3506 } 3507 } 3508 return data; 3509 }); 3510 }; 3511 3512 },{"./base64":1,"./external":6,"./nodejsUtils":14,"./support":30,"set-immediate-shim":54}],33:[function(require,module,exports){ 3513 'use strict'; 3514 var readerFor = require('./reader/readerFor'); 3515 var utils = require('./utils'); 3516 var sig = require('./signature'); 3517 var ZipEntry = require('./zipEntry'); 3518 var utf8 = require('./utf8'); 3519 var support = require('./support'); 3520 // class ZipEntries {{{ 3521 /** 3522 * All the entries in the zip file. 3523 * @constructor 3524 * @param {Object} loadOptions Options for loading the stream. 3525 */ 3526 function ZipEntries(loadOptions) { 3527 this.files = []; 3528 this.loadOptions = loadOptions; 3529 } 3530 ZipEntries.prototype = { 3531 /** 3532 * Check that the reader is on the specified signature. 3533 * @param {string} expectedSignature the expected signature. 3534 * @throws {Error} if it is an other signature. 3535 */ 3536 checkSignature: function(expectedSignature) { 3537 if (!this.reader.readAndCheckSignature(expectedSignature)) { 3538 this.reader.index -= 4; 3539 var signature = this.reader.readString(4); 3540 throw new Error("Corrupted zip or bug: unexpected signature " + "(" + utils.pretty(signature) + ", expected " + utils.pretty(expectedSignature) + ")"); 3541 } 3542 }, 3543 /** 3544 * Check if the given signature is at the given index. 3545 * @param {number} askedIndex the index to check. 3546 * @param {string} expectedSignature the signature to expect. 3547 * @return {boolean} true if the signature is here, false otherwise. 3548 */ 3549 isSignature: function(askedIndex, expectedSignature) { 3550 var currentIndex = this.reader.index; 3551 this.reader.setIndex(askedIndex); 3552 var signature = this.reader.readString(4); 3553 var result = signature === expectedSignature; 3554 this.reader.setIndex(currentIndex); 3555 return result; 3556 }, 3557 /** 3558 * Read the end of the central directory. 3559 */ 3560 readBlockEndOfCentral: function() { 3561 this.diskNumber = this.reader.readInt(2); 3562 this.diskWithCentralDirStart = this.reader.readInt(2); 3563 this.centralDirRecordsOnThisDisk = this.reader.readInt(2); 3564 this.centralDirRecords = this.reader.readInt(2); 3565 this.centralDirSize = this.reader.readInt(4); 3566 this.centralDirOffset = this.reader.readInt(4); 3567 3568 this.zipCommentLength = this.reader.readInt(2); 3569 // warning : the encoding depends of the system locale 3570 // On a linux machine with LANG=en_US.utf8, this field is utf8 encoded. 3571 // On a windows machine, this field is encoded with the localized windows code page. 3572 var zipComment = this.reader.readData(this.zipCommentLength); 3573 var decodeParamType = support.uint8array ? "uint8array" : "array"; 3574 // To get consistent behavior with the generation part, we will assume that 3575 // this is utf8 encoded unless specified otherwise. 3576 var decodeContent = utils.transformTo(decodeParamType, zipComment); 3577 this.zipComment = this.loadOptions.decodeFileName(decodeContent); 3578 }, 3579 /** 3580 * Read the end of the Zip 64 central directory. 3581 * Not merged with the method readEndOfCentral : 3582 * The end of central can coexist with its Zip64 brother, 3583 * I don't want to read the wrong number of bytes ! 3584 */ 3585 readBlockZip64EndOfCentral: function() { 3586 this.zip64EndOfCentralSize = this.reader.readInt(8); 3587 this.reader.skip(4); 3588 // this.versionMadeBy = this.reader.readString(2); 3589 // this.versionNeeded = this.reader.readInt(2); 3590 this.diskNumber = this.reader.readInt(4); 3591 this.diskWithCentralDirStart = this.reader.readInt(4); 3592 this.centralDirRecordsOnThisDisk = this.reader.readInt(8); 3593 this.centralDirRecords = this.reader.readInt(8); 3594 this.centralDirSize = this.reader.readInt(8); 3595 this.centralDirOffset = this.reader.readInt(8); 3596 3597 this.zip64ExtensibleData = {}; 3598 var extraDataSize = this.zip64EndOfCentralSize - 44, 3599 index = 0, 3600 extraFieldId, 3601 extraFieldLength, 3602 extraFieldValue; 3603 while (index < extraDataSize) { 3604 extraFieldId = this.reader.readInt(2); 3605 extraFieldLength = this.reader.readInt(4); 3606 extraFieldValue = this.reader.readData(extraFieldLength); 3607 this.zip64ExtensibleData[extraFieldId] = { 3608 id: extraFieldId, 3609 length: extraFieldLength, 3610 value: extraFieldValue 3611 }; 3612 } 3613 }, 3614 /** 3615 * Read the end of the Zip 64 central directory locator. 3616 */ 3617 readBlockZip64EndOfCentralLocator: function() { 3618 this.diskWithZip64CentralDirStart = this.reader.readInt(4); 3619 this.relativeOffsetEndOfZip64CentralDir = this.reader.readInt(8); 3620 this.disksCount = this.reader.readInt(4); 3621 if (this.disksCount > 1) { 3622 throw new Error("Multi-volumes zip are not supported"); 3623 } 3624 }, 3625 /** 3626 * Read the local files, based on the offset read in the central part. 3627 */ 3628 readLocalFiles: function() { 3629 var i, file; 3630 for (i = 0; i < this.files.length; i++) { 3631 file = this.files[i]; 3632 this.reader.setIndex(file.localHeaderOffset); 3633 this.checkSignature(sig.LOCAL_FILE_HEADER); 3634 file.readLocalPart(this.reader); 3635 file.handleUTF8(); 3636 file.processAttributes(); 3637 } 3638 }, 3639 /** 3640 * Read the central directory. 3641 */ 3642 readCentralDir: function() { 3643 var file; 3644 3645 this.reader.setIndex(this.centralDirOffset); 3646 while (this.reader.readAndCheckSignature(sig.CENTRAL_FILE_HEADER)) { 3647 file = new ZipEntry({ 3648 zip64: this.zip64 3649 }, this.loadOptions); 3650 file.readCentralPart(this.reader); 3651 this.files.push(file); 3652 } 3653 3654 if (this.centralDirRecords !== this.files.length) { 3655 if (this.centralDirRecords !== 0 && this.files.length === 0) { 3656 // We expected some records but couldn't find ANY. 3657 // This is really suspicious, as if something went wrong. 3658 throw new Error("Corrupted zip or bug: expected " + this.centralDirRecords + " records in central dir, got " + this.files.length); 3659 } else { 3660 // We found some records but not all. 3661 // Something is wrong but we got something for the user: no error here. 3662 // console.warn("expected", this.centralDirRecords, "records in central dir, got", this.files.length); 3663 } 3664 } 3665 }, 3666 /** 3667 * Read the end of central directory. 3668 */ 3669 readEndOfCentral: function() { 3670 var offset = this.reader.lastIndexOfSignature(sig.CENTRAL_DIRECTORY_END); 3671 if (offset < 0) { 3672 // Check if the content is a truncated zip or complete garbage. 3673 // A "LOCAL_FILE_HEADER" is not required at the beginning (auto 3674 // extractible zip for example) but it can give a good hint. 3675 // If an ajax request was used without responseType, we will also 3676 // get unreadable data. 3677 var isGarbage = !this.isSignature(0, sig.LOCAL_FILE_HEADER); 3678 3679 if (isGarbage) { 3680 throw new Error("Can't find end of central directory : is this a zip file ? " + 3681 "If it is, see https://stuk.github.io/jszip/documentation/howto/read_zip.html"); 3682 } else { 3683 throw new Error("Corrupted zip: can't find end of central directory"); 3684 } 3685 3686 } 3687 this.reader.setIndex(offset); 3688 var endOfCentralDirOffset = offset; 3689 this.checkSignature(sig.CENTRAL_DIRECTORY_END); 3690 this.readBlockEndOfCentral(); 3691 3692 3693 /* extract from the zip spec : 3694 4) If one of the fields in the end of central directory 3695 record is too small to hold required data, the field 3696 should be set to -1 (0xFFFF or 0xFFFFFFFF) and the 3697 ZIP64 format record should be created. 3698 5) The end of central directory record and the 3699 Zip64 end of central directory locator record must 3700 reside on the same disk when splitting or spanning 3701 an archive. 3702 */ 3703 if (this.diskNumber === utils.MAX_VALUE_16BITS || this.diskWithCentralDirStart === utils.MAX_VALUE_16BITS || this.centralDirRecordsOnThisDisk === utils.MAX_VALUE_16BITS || this.centralDirRecords === utils.MAX_VALUE_16BITS || this.centralDirSize === utils.MAX_VALUE_32BITS || this.centralDirOffset === utils.MAX_VALUE_32BITS) { 3704 this.zip64 = true; 3705 3706 /* 3707 Warning : the zip64 extension is supported, but ONLY if the 64bits integer read from 3708 the zip file can fit into a 32bits integer. This cannot be solved : JavaScript represents 3709 all numbers as 64-bit double precision IEEE 754 floating point numbers. 3710 So, we have 53bits for integers and bitwise operations treat everything as 32bits. 3711 see https://developer.mozilla.org/en-US/docs/JavaScript/Reference/Operators/Bitwise_Operators 3712 and http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf section 8.5 3713 */ 3714 3715 // should look for a zip64 EOCD locator 3716 offset = this.reader.lastIndexOfSignature(sig.ZIP64_CENTRAL_DIRECTORY_LOCATOR); 3717 if (offset < 0) { 3718 throw new Error("Corrupted zip: can't find the ZIP64 end of central directory locator"); 3719 } 3720 this.reader.setIndex(offset); 3721 this.checkSignature(sig.ZIP64_CENTRAL_DIRECTORY_LOCATOR); 3722 this.readBlockZip64EndOfCentralLocator(); 3723 3724 // now the zip64 EOCD record 3725 if (!this.isSignature(this.relativeOffsetEndOfZip64CentralDir, sig.ZIP64_CENTRAL_DIRECTORY_END)) { 3726 // console.warn("ZIP64 end of central directory not where expected."); 3727 this.relativeOffsetEndOfZip64CentralDir = this.reader.lastIndexOfSignature(sig.ZIP64_CENTRAL_DIRECTORY_END); 3728 if (this.relativeOffsetEndOfZip64CentralDir < 0) { 3729 throw new Error("Corrupted zip: can't find the ZIP64 end of central directory"); 3730 } 3731 } 3732 this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir); 3733 this.checkSignature(sig.ZIP64_CENTRAL_DIRECTORY_END); 3734 this.readBlockZip64EndOfCentral(); 3735 } 3736 3737 var expectedEndOfCentralDirOffset = this.centralDirOffset + this.centralDirSize; 3738 if (this.zip64) { 3739 expectedEndOfCentralDirOffset += 20; // end of central dir 64 locator 3740 expectedEndOfCentralDirOffset += 12 /* should not include the leading 12 bytes */ + this.zip64EndOfCentralSize; 3741 } 3742 3743 var extraBytes = endOfCentralDirOffset - expectedEndOfCentralDirOffset; 3744 3745 if (extraBytes > 0) { 3746 // console.warn(extraBytes, "extra bytes at beginning or within zipfile"); 3747 if (this.isSignature(endOfCentralDirOffset, sig.CENTRAL_FILE_HEADER)) { 3748 // The offsets seem wrong, but we have something at the specified offset. 3749 // So… we keep it. 3750 } else { 3751 // the offset is wrong, update the "zero" of the reader 3752 // this happens if data has been prepended (crx files for example) 3753 this.reader.zero = extraBytes; 3754 } 3755 } else if (extraBytes < 0) { 3756 throw new Error("Corrupted zip: missing " + Math.abs(extraBytes) + " bytes."); 3757 } 3758 }, 3759 prepareReader: function(data) { 3760 this.reader = readerFor(data); 3761 }, 3762 /** 3763 * Read a zip file and create ZipEntries. 3764 * @param {String|ArrayBuffer|Uint8Array|Buffer} data the binary string representing a zip file. 3765 */ 3766 load: function(data) { 3767 this.prepareReader(data); 3768 this.readEndOfCentral(); 3769 this.readCentralDir(); 3770 this.readLocalFiles(); 3771 } 3772 }; 3773 // }}} end of ZipEntries 3774 module.exports = ZipEntries; 3775 3776 },{"./reader/readerFor":22,"./signature":23,"./support":30,"./utf8":31,"./utils":32,"./zipEntry":34}],34:[function(require,module,exports){ 3777 'use strict'; 3778 var readerFor = require('./reader/readerFor'); 3779 var utils = require('./utils'); 3780 var CompressedObject = require('./compressedObject'); 3781 var crc32fn = require('./crc32'); 3782 var utf8 = require('./utf8'); 3783 var compressions = require('./compressions'); 3784 var support = require('./support'); 3785 3786 var MADE_BY_DOS = 0x00; 3787 var MADE_BY_UNIX = 0x03; 3788 3789 /** 3790 * Find a compression registered in JSZip. 3791 * @param {string} compressionMethod the method magic to find. 3792 * @return {Object|null} the JSZip compression object, null if none found. 3793 */ 3794 var findCompression = function(compressionMethod) { 3795 for (var method in compressions) { 3796 if (!compressions.hasOwnProperty(method)) { 3797 continue; 3798 } 3799 if (compressions[method].magic === compressionMethod) { 3800 return compressions[method]; 3801 } 3802 } 3803 return null; 3804 }; 3805 3806 // class ZipEntry {{{ 3807 /** 3808 * An entry in the zip file. 3809 * @constructor 3810 * @param {Object} options Options of the current file. 3811 * @param {Object} loadOptions Options for loading the stream. 3812 */ 3813 function ZipEntry(options, loadOptions) { 3814 this.options = options; 3815 this.loadOptions = loadOptions; 3816 } 3817 ZipEntry.prototype = { 3818 /** 3819 * say if the file is encrypted. 3820 * @return {boolean} true if the file is encrypted, false otherwise. 3821 */ 3822 isEncrypted: function() { 3823 // bit 1 is set 3824 return (this.bitFlag & 0x0001) === 0x0001; 3825 }, 3826 /** 3827 * say if the file has utf-8 filename/comment. 3828 * @return {boolean} true if the filename/comment is in utf-8, false otherwise. 3829 */ 3830 useUTF8: function() { 3831 // bit 11 is set 3832 return (this.bitFlag & 0x0800) === 0x0800; 3833 }, 3834 /** 3835 * Read the local part of a zip file and add the info in this object. 3836 * @param {DataReader} reader the reader to use. 3837 */ 3838 readLocalPart: function(reader) { 3839 var compression, localExtraFieldsLength; 3840 3841 // we already know everything from the central dir ! 3842 // If the central dir data are false, we are doomed. 3843 // On the bright side, the local part is scary : zip64, data descriptors, both, etc. 3844 // The less data we get here, the more reliable this should be. 3845 // Let's skip the whole header and dash to the data ! 3846 reader.skip(22); 3847 // in some zip created on windows, the filename stored in the central dir contains \ instead of /. 3848 // Strangely, the filename here is OK. 3849 // I would love to treat these zip files as corrupted (see http://www.info-zip.org/FAQ.html#backslashes 3850 // or APPNOTE#4.4.17.1, "All slashes MUST be forward slashes '/'") but there are a lot of bad zip generators... 3851 // Search "unzip mismatching "local" filename continuing with "central" filename version" on 3852 // the internet. 3853 // 3854 // I think I see the logic here : the central directory is used to display 3855 // content and the local directory is used to extract the files. Mixing / and \ 3856 // may be used to display \ to windows users and use / when extracting the files. 3857 // Unfortunately, this lead also to some issues : http://seclists.org/fulldisclosure/2009/Sep/394 3858 this.fileNameLength = reader.readInt(2); 3859 localExtraFieldsLength = reader.readInt(2); // can't be sure this will be the same as the central dir 3860 // the fileName is stored as binary data, the handleUTF8 method will take care of the encoding. 3861 this.fileName = reader.readData(this.fileNameLength); 3862 reader.skip(localExtraFieldsLength); 3863 3864 if (this.compressedSize === -1 || this.uncompressedSize === -1) { 3865 throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory " + "(compressedSize === -1 || uncompressedSize === -1)"); 3866 } 3867 3868 compression = findCompression(this.compressionMethod); 3869 if (compression === null) { // no compression found 3870 throw new Error("Corrupted zip : compression " + utils.pretty(this.compressionMethod) + " unknown (inner file : " + utils.transformTo("string", this.fileName) + ")"); 3871 } 3872 this.decompressed = new CompressedObject(this.compressedSize, this.uncompressedSize, this.crc32, compression, reader.readData(this.compressedSize)); 3873 }, 3874 3875 /** 3876 * Read the central part of a zip file and add the info in this object. 3877 * @param {DataReader} reader the reader to use. 3878 */ 3879 readCentralPart: function(reader) { 3880 this.versionMadeBy = reader.readInt(2); 3881 reader.skip(2); 3882 // this.versionNeeded = reader.readInt(2); 3883 this.bitFlag = reader.readInt(2); 3884 this.compressionMethod = reader.readString(2); 3885 this.date = reader.readDate(); 3886 this.crc32 = reader.readInt(4); 3887 this.compressedSize = reader.readInt(4); 3888 this.uncompressedSize = reader.readInt(4); 3889 var fileNameLength = reader.readInt(2); 3890 this.extraFieldsLength = reader.readInt(2); 3891 this.fileCommentLength = reader.readInt(2); 3892 this.diskNumberStart = reader.readInt(2); 3893 this.internalFileAttributes = reader.readInt(2); 3894 this.externalFileAttributes = reader.readInt(4); 3895 this.localHeaderOffset = reader.readInt(4); 3896 3897 if (this.isEncrypted()) { 3898 throw new Error("Encrypted zip are not supported"); 3899 } 3900 3901 // will be read in the local part, see the comments there 3902 reader.skip(fileNameLength); 3903 this.readExtraFields(reader); 3904 this.parseZIP64ExtraField(reader); 3905 this.fileComment = reader.readData(this.fileCommentLength); 3906 }, 3907 3908 /** 3909 * Parse the external file attributes and get the unix/dos permissions. 3910 */ 3911 processAttributes: function () { 3912 this.unixPermissions = null; 3913 this.dosPermissions = null; 3914 var madeBy = this.versionMadeBy >> 8; 3915 3916 // Check if we have the DOS directory flag set. 3917 // We look for it in the DOS and UNIX permissions 3918 // but some unknown platform could set it as a compatibility flag. 3919 this.dir = this.externalFileAttributes & 0x0010 ? true : false; 3920 3921 if(madeBy === MADE_BY_DOS) { 3922 // first 6 bits (0 to 5) 3923 this.dosPermissions = this.externalFileAttributes & 0x3F; 3924 } 3925 3926 if(madeBy === MADE_BY_UNIX) { 3927 this.unixPermissions = (this.externalFileAttributes >> 16) & 0xFFFF; 3928 // the octal permissions are in (this.unixPermissions & 0x01FF).toString(8); 3929 } 3930 3931 // fail safe : if the name ends with a / it probably means a folder 3932 if (!this.dir && this.fileNameStr.slice(-1) === '/') { 3933 this.dir = true; 3934 } 3935 }, 3936 3937 /** 3938 * Parse the ZIP64 extra field and merge the info in the current ZipEntry. 3939 * @param {DataReader} reader the reader to use. 3940 */ 3941 parseZIP64ExtraField: function(reader) { 3942 3943 if (!this.extraFields[0x0001]) { 3944 return; 3945 } 3946 3947 // should be something, preparing the extra reader 3948 var extraReader = readerFor(this.extraFields[0x0001].value); 3949 3950 // I really hope that these 64bits integer can fit in 32 bits integer, because js 3951 // won't let us have more. 3952 if (this.uncompressedSize === utils.MAX_VALUE_32BITS) { 3953 this.uncompressedSize = extraReader.readInt(8); 3954 } 3955 if (this.compressedSize === utils.MAX_VALUE_32BITS) { 3956 this.compressedSize = extraReader.readInt(8); 3957 } 3958 if (this.localHeaderOffset === utils.MAX_VALUE_32BITS) { 3959 this.localHeaderOffset = extraReader.readInt(8); 3960 } 3961 if (this.diskNumberStart === utils.MAX_VALUE_32BITS) { 3962 this.diskNumberStart = extraReader.readInt(4); 3963 } 3964 }, 3965 /** 3966 * Read the central part of a zip file and add the info in this object. 3967 * @param {DataReader} reader the reader to use. 3968 */ 3969 readExtraFields: function(reader) { 3970 var end = reader.index + this.extraFieldsLength, 3971 extraFieldId, 3972 extraFieldLength, 3973 extraFieldValue; 3974 3975 if (!this.extraFields) { 3976 this.extraFields = {}; 3977 } 3978 3979 while (reader.index < end) { 3980 extraFieldId = reader.readInt(2); 3981 extraFieldLength = reader.readInt(2); 3982 extraFieldValue = reader.readData(extraFieldLength); 3983 3984 this.extraFields[extraFieldId] = { 3985 id: extraFieldId, 3986 length: extraFieldLength, 3987 value: extraFieldValue 3988 }; 3989 } 3990 }, 3991 /** 3992 * Apply an UTF8 transformation if needed. 3993 */ 3994 handleUTF8: function() { 3995 var decodeParamType = support.uint8array ? "uint8array" : "array"; 3996 if (this.useUTF8()) { 3997 this.fileNameStr = utf8.utf8decode(this.fileName); 3998 this.fileCommentStr = utf8.utf8decode(this.fileComment); 3999 } else { 4000 var upath = this.findExtraFieldUnicodePath(); 4001 if (upath !== null) { 4002 this.fileNameStr = upath; 4003 } else { 4004 // ASCII text or unsupported code page 4005 var fileNameByteArray = utils.transformTo(decodeParamType, this.fileName); 4006 this.fileNameStr = this.loadOptions.decodeFileName(fileNameByteArray); 4007 } 4008 4009 var ucomment = this.findExtraFieldUnicodeComment(); 4010 if (ucomment !== null) { 4011 this.fileCommentStr = ucomment; 4012 } else { 4013 // ASCII text or unsupported code page 4014 var commentByteArray = utils.transformTo(decodeParamType, this.fileComment); 4015 this.fileCommentStr = this.loadOptions.decodeFileName(commentByteArray); 4016 } 4017 } 4018 }, 4019 4020 /** 4021 * Find the unicode path declared in the extra field, if any. 4022 * @return {String} the unicode path, null otherwise. 4023 */ 4024 findExtraFieldUnicodePath: function() { 4025 var upathField = this.extraFields[0x7075]; 4026 if (upathField) { 4027 var extraReader = readerFor(upathField.value); 4028 4029 // wrong version 4030 if (extraReader.readInt(1) !== 1) { 4031 return null; 4032 } 4033 4034 // the crc of the filename changed, this field is out of date. 4035 if (crc32fn(this.fileName) !== extraReader.readInt(4)) { 4036 return null; 4037 } 4038 4039 return utf8.utf8decode(extraReader.readData(upathField.length - 5)); 4040 } 4041 return null; 4042 }, 4043 4044 /** 4045 * Find the unicode comment declared in the extra field, if any. 4046 * @return {String} the unicode comment, null otherwise. 4047 */ 4048 findExtraFieldUnicodeComment: function() { 4049 var ucommentField = this.extraFields[0x6375]; 4050 if (ucommentField) { 4051 var extraReader = readerFor(ucommentField.value); 4052 4053 // wrong version 4054 if (extraReader.readInt(1) !== 1) { 4055 return null; 4056 } 4057 4058 // the crc of the comment changed, this field is out of date. 4059 if (crc32fn(this.fileComment) !== extraReader.readInt(4)) { 4060 return null; 4061 } 4062 4063 return utf8.utf8decode(extraReader.readData(ucommentField.length - 5)); 4064 } 4065 return null; 4066 } 4067 }; 4068 module.exports = ZipEntry; 4069 4070 },{"./compressedObject":2,"./compressions":3,"./crc32":4,"./reader/readerFor":22,"./support":30,"./utf8":31,"./utils":32}],35:[function(require,module,exports){ 4071 'use strict'; 4072 4073 var StreamHelper = require('./stream/StreamHelper'); 4074 var DataWorker = require('./stream/DataWorker'); 4075 var utf8 = require('./utf8'); 4076 var CompressedObject = require('./compressedObject'); 4077 var GenericWorker = require('./stream/GenericWorker'); 4078 4079 /** 4080 * A simple object representing a file in the zip file. 4081 * @constructor 4082 * @param {string} name the name of the file 4083 * @param {String|ArrayBuffer|Uint8Array|Buffer} data the data 4084 * @param {Object} options the options of the file 4085 */ 4086 var ZipObject = function(name, data, options) { 4087 this.name = name; 4088 this.dir = options.dir; 4089 this.date = options.date; 4090 this.comment = options.comment; 4091 this.unixPermissions = options.unixPermissions; 4092 this.dosPermissions = options.dosPermissions; 4093 4094 this._data = data; 4095 this._dataBinary = options.binary; 4096 // keep only the compression 4097 this.options = { 4098 compression : options.compression, 4099 compressionOptions : options.compressionOptions 4100 }; 4101 }; 4102 4103 ZipObject.prototype = { 4104 /** 4105 * Create an internal stream for the content of this object. 4106 * @param {String} type the type of each chunk. 4107 * @return StreamHelper the stream. 4108 */ 4109 internalStream: function (type) { 4110 var result = null, outputType = "string"; 4111 try { 4112 if (!type) { 4113 throw new Error("No output type specified."); 4114 } 4115 outputType = type.toLowerCase(); 4116 var askUnicodeString = outputType === "string" || outputType === "text"; 4117 if (outputType === "binarystring" || outputType === "text") { 4118 outputType = "string"; 4119 } 4120 result = this._decompressWorker(); 4121 4122 var isUnicodeString = !this._dataBinary; 4123 4124 if (isUnicodeString && !askUnicodeString) { 4125 result = result.pipe(new utf8.Utf8EncodeWorker()); 4126 } 4127 if (!isUnicodeString && askUnicodeString) { 4128 result = result.pipe(new utf8.Utf8DecodeWorker()); 4129 } 4130 } catch (e) { 4131 result = new GenericWorker("error"); 4132 result.error(e); 4133 } 4134 4135 return new StreamHelper(result, outputType, ""); 4136 }, 4137 4138 /** 4139 * Prepare the content in the asked type. 4140 * @param {String} type the type of the result. 4141 * @param {Function} onUpdate a function to call on each internal update. 4142 * @return Promise the promise of the result. 4143 */ 4144 async: function (type, onUpdate) { 4145 return this.internalStream(type).accumulate(onUpdate); 4146 }, 4147 4148 /** 4149 * Prepare the content as a nodejs stream. 4150 * @param {String} type the type of each chunk. 4151 * @param {Function} onUpdate a function to call on each internal update. 4152 * @return Stream the stream. 4153 */ 4154 nodeStream: function (type, onUpdate) { 4155 return this.internalStream(type || "nodebuffer").toNodejsStream(onUpdate); 4156 }, 4157 4158 /** 4159 * Return a worker for the compressed content. 4160 * @private 4161 * @param {Object} compression the compression object to use. 4162 * @param {Object} compressionOptions the options to use when compressing. 4163 * @return Worker the worker. 4164 */ 4165 _compressWorker: function (compression, compressionOptions) { 4166 if ( 4167 this._data instanceof CompressedObject && 4168 this._data.compression.magic === compression.magic 4169 ) { 4170 return this._data.getCompressedWorker(); 4171 } else { 4172 var result = this._decompressWorker(); 4173 if(!this._dataBinary) { 4174 result = result.pipe(new utf8.Utf8EncodeWorker()); 4175 } 4176 return CompressedObject.createWorkerFrom(result, compression, compressionOptions); 4177 } 4178 }, 4179 /** 4180 * Return a worker for the decompressed content. 4181 * @private 4182 * @return Worker the worker. 4183 */ 4184 _decompressWorker : function () { 4185 if (this._data instanceof CompressedObject) { 4186 return this._data.getContentWorker(); 4187 } else if (this._data instanceof GenericWorker) { 4188 return this._data; 4189 } else { 4190 return new DataWorker(this._data); 4191 } 4192 } 4193 }; 4194 4195 var removedMethods = ["asText", "asBinary", "asNodeBuffer", "asUint8Array", "asArrayBuffer"]; 4196 var removedFn = function () { 4197 throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide."); 4198 }; 4199 4200 for(var i = 0; i < removedMethods.length; i++) { 4201 ZipObject.prototype[removedMethods[i]] = removedFn; 4202 } 4203 module.exports = ZipObject; 4204 4205 },{"./compressedObject":2,"./stream/DataWorker":27,"./stream/GenericWorker":28,"./stream/StreamHelper":29,"./utf8":31}],36:[function(require,module,exports){ 4206 (function (global){ 4207 'use strict'; 4208 var Mutation = global.MutationObserver || global.WebKitMutationObserver; 4209 4210 var scheduleDrain; 4211 4212 { 4213 if (Mutation) { 4214 var called = 0; 4215 var observer = new Mutation(nextTick); 4216 var element = global.document.createTextNode(''); 4217 observer.observe(element, { 4218 characterData: true 4219 }); 4220 scheduleDrain = function () { 4221 element.data = (called = ++called % 2); 4222 }; 4223 } else if (!global.setImmediate && typeof global.MessageChannel !== 'undefined') { 4224 var channel = new global.MessageChannel(); 4225 channel.port1.onmessage = nextTick; 4226 scheduleDrain = function () { 4227 channel.port2.postMessage(0); 4228 }; 4229 } else if ('document' in global && 'onreadystatechange' in global.document.createElement('script')) { 4230 scheduleDrain = function () { 4231 4232 // Create a <script> element; its readystatechange event will be fired asynchronously once it is inserted 4233 // into the document. Do so, thus queuing up the task. Remember to clean up once it's been called. 4234 var scriptEl = global.document.createElement('script'); 4235 scriptEl.onreadystatechange = function () { 4236 nextTick(); 4237 4238 scriptEl.onreadystatechange = null; 4239 scriptEl.parentNode.removeChild(scriptEl); 4240 scriptEl = null; 4241 }; 4242 global.document.documentElement.appendChild(scriptEl); 4243 }; 4244 } else { 4245 scheduleDrain = function () { 4246 setTimeout(nextTick, 0); 4247 }; 4248 } 4249 } 4250 4251 var draining; 4252 var queue = []; 4253 //named nextTick for less confusing stack traces 4254 function nextTick() { 4255 draining = true; 4256 var i, oldQueue; 4257 var len = queue.length; 4258 while (len) { 4259 oldQueue = queue; 4260 queue = []; 4261 i = -1; 4262 while (++i < len) { 4263 oldQueue[i](); 4264 } 4265 len = queue.length; 4266 } 4267 draining = false; 4268 } 4269 4270 module.exports = immediate; 4271 function immediate(task) { 4272 if (queue.push(task) === 1 && !draining) { 4273 scheduleDrain(); 4274 } 4275 } 4276 4277 }).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) 4278 },{}],37:[function(require,module,exports){ 4279 'use strict'; 4280 var immediate = require('immediate'); 4281 4282 /* istanbul ignore next */ 4283 function INTERNAL() {} 4284 4285 var handlers = {}; 4286 4287 var REJECTED = ['REJECTED']; 4288 var FULFILLED = ['FULFILLED']; 4289 var PENDING = ['PENDING']; 4290 4291 module.exports = Promise; 4292 4293 function Promise(resolver) { 4294 if (typeof resolver !== 'function') { 4295 throw new TypeError('resolver must be a function'); 4296 } 4297 this.state = PENDING; 4298 this.queue = []; 4299 this.outcome = void 0; 4300 if (resolver !== INTERNAL) { 4301 safelyResolveThenable(this, resolver); 4302 } 4303 } 4304 4305 Promise.prototype["finally"] = function (callback) { 4306 if (typeof callback !== 'function') { 4307 return this; 4308 } 4309 var p = this.constructor; 4310 return this.then(resolve, reject); 4311 4312 function resolve(value) { 4313 function yes () { 4314 return value; 4315 } 4316 return p.resolve(callback()).then(yes); 4317 } 4318 function reject(reason) { 4319 function no () { 4320 throw reason; 4321 } 4322 return p.resolve(callback()).then(no); 4323 } 4324 }; 4325 Promise.prototype["catch"] = function (onRejected) { 4326 return this.then(null, onRejected); 4327 }; 4328 Promise.prototype.then = function (onFulfilled, onRejected) { 4329 if (typeof onFulfilled !== 'function' && this.state === FULFILLED || 4330 typeof onRejected !== 'function' && this.state === REJECTED) { 4331 return this; 4332 } 4333 var promise = new this.constructor(INTERNAL); 4334 if (this.state !== PENDING) { 4335 var resolver = this.state === FULFILLED ? onFulfilled : onRejected; 4336 unwrap(promise, resolver, this.outcome); 4337 } else { 4338 this.queue.push(new QueueItem(promise, onFulfilled, onRejected)); 4339 } 4340 4341 return promise; 4342 }; 4343 function QueueItem(promise, onFulfilled, onRejected) { 4344 this.promise = promise; 4345 if (typeof onFulfilled === 'function') { 4346 this.onFulfilled = onFulfilled; 4347 this.callFulfilled = this.otherCallFulfilled; 4348 } 4349 if (typeof onRejected === 'function') { 4350 this.onRejected = onRejected; 4351 this.callRejected = this.otherCallRejected; 4352 } 4353 } 4354 QueueItem.prototype.callFulfilled = function (value) { 4355 handlers.resolve(this.promise, value); 4356 }; 4357 QueueItem.prototype.otherCallFulfilled = function (value) { 4358 unwrap(this.promise, this.onFulfilled, value); 4359 }; 4360 QueueItem.prototype.callRejected = function (value) { 4361 handlers.reject(this.promise, value); 4362 }; 4363 QueueItem.prototype.otherCallRejected = function (value) { 4364 unwrap(this.promise, this.onRejected, value); 4365 }; 4366 4367 function unwrap(promise, func, value) { 4368 immediate(function () { 4369 var returnValue; 4370 try { 4371 returnValue = func(value); 4372 } catch (e) { 4373 return handlers.reject(promise, e); 4374 } 4375 if (returnValue === promise) { 4376 handlers.reject(promise, new TypeError('Cannot resolve promise with itself')); 4377 } else { 4378 handlers.resolve(promise, returnValue); 4379 } 4380 }); 4381 } 4382 4383 handlers.resolve = function (self, value) { 4384 var result = tryCatch(getThen, value); 4385 if (result.status === 'error') { 4386 return handlers.reject(self, result.value); 4387 } 4388 var thenable = result.value; 4389 4390 if (thenable) { 4391 safelyResolveThenable(self, thenable); 4392 } else { 4393 self.state = FULFILLED; 4394 self.outcome = value; 4395 var i = -1; 4396 var len = self.queue.length; 4397 while (++i < len) { 4398 self.queue[i].callFulfilled(value); 4399 } 4400 } 4401 return self; 4402 }; 4403 handlers.reject = function (self, error) { 4404 self.state = REJECTED; 4405 self.outcome = error; 4406 var i = -1; 4407 var len = self.queue.length; 4408 while (++i < len) { 4409 self.queue[i].callRejected(error); 4410 } 4411 return self; 4412 }; 4413 4414 function getThen(obj) { 4415 // Make sure we only access the accessor once as required by the spec 4416 var then = obj && obj.then; 4417 if (obj && (typeof obj === 'object' || typeof obj === 'function') && typeof then === 'function') { 4418 return function appyThen() { 4419 then.apply(obj, arguments); 4420 }; 4421 } 4422 } 4423 4424 function safelyResolveThenable(self, thenable) { 4425 // Either fulfill, reject or reject with error 4426 var called = false; 4427 function onError(value) { 4428 if (called) { 4429 return; 4430 } 4431 called = true; 4432 handlers.reject(self, value); 4433 } 4434 4435 function onSuccess(value) { 4436 if (called) { 4437 return; 4438 } 4439 called = true; 4440 handlers.resolve(self, value); 4441 } 4442 4443 function tryToUnwrap() { 4444 thenable(onSuccess, onError); 4445 } 4446 4447 var result = tryCatch(tryToUnwrap); 4448 if (result.status === 'error') { 4449 onError(result.value); 4450 } 4451 } 4452 4453 function tryCatch(func, value) { 4454 var out = {}; 4455 try { 4456 out.value = func(value); 4457 out.status = 'success'; 4458 } catch (e) { 4459 out.status = 'error'; 4460 out.value = e; 4461 } 4462 return out; 4463 } 4464 4465 Promise.resolve = resolve; 4466 function resolve(value) { 4467 if (value instanceof this) { 4468 return value; 4469 } 4470 return handlers.resolve(new this(INTERNAL), value); 4471 } 4472 4473 Promise.reject = reject; 4474 function reject(reason) { 4475 var promise = new this(INTERNAL); 4476 return handlers.reject(promise, reason); 4477 } 4478 4479 Promise.all = all; 4480 function all(iterable) { 4481 var self = this; 4482 if (Object.prototype.toString.call(iterable) !== '[object Array]') { 4483 return this.reject(new TypeError('must be an array')); 4484 } 4485 4486 var len = iterable.length; 4487 var called = false; 4488 if (!len) { 4489 return this.resolve([]); 4490 } 4491 4492 var values = new Array(len); 4493 var resolved = 0; 4494 var i = -1; 4495 var promise = new this(INTERNAL); 4496 4497 while (++i < len) { 4498 allResolver(iterable[i], i); 4499 } 4500 return promise; 4501 function allResolver(value, i) { 4502 self.resolve(value).then(resolveFromAll, function (error) { 4503 if (!called) { 4504 called = true; 4505 handlers.reject(promise, error); 4506 } 4507 }); 4508 function resolveFromAll(outValue) { 4509 values[i] = outValue; 4510 if (++resolved === len && !called) { 4511 called = true; 4512 handlers.resolve(promise, values); 4513 } 4514 } 4515 } 4516 } 4517 4518 Promise.race = race; 4519 function race(iterable) { 4520 var self = this; 4521 if (Object.prototype.toString.call(iterable) !== '[object Array]') { 4522 return this.reject(new TypeError('must be an array')); 4523 } 4524 4525 var len = iterable.length; 4526 var called = false; 4527 if (!len) { 4528 return this.resolve([]); 4529 } 4530 4531 var i = -1; 4532 var promise = new this(INTERNAL); 4533 4534 while (++i < len) { 4535 resolver(iterable[i]); 4536 } 4537 return promise; 4538 function resolver(value) { 4539 self.resolve(value).then(function (response) { 4540 if (!called) { 4541 called = true; 4542 handlers.resolve(promise, response); 4543 } 4544 }, function (error) { 4545 if (!called) { 4546 called = true; 4547 handlers.reject(promise, error); 4548 } 4549 }); 4550 } 4551 } 4552 4553 },{"immediate":36}],38:[function(require,module,exports){ 4554 // Top level file is just a mixin of submodules & constants 4555 'use strict'; 4556 4557 var assign = require('./lib/utils/common').assign; 4558 4559 var deflate = require('./lib/deflate'); 4560 var inflate = require('./lib/inflate'); 4561 var constants = require('./lib/zlib/constants'); 4562 4563 var pako = {}; 4564 4565 assign(pako, deflate, inflate, constants); 4566 4567 module.exports = pako; 4568 4569 },{"./lib/deflate":39,"./lib/inflate":40,"./lib/utils/common":41,"./lib/zlib/constants":44}],39:[function(require,module,exports){ 4570 'use strict'; 4571 4572 4573 var zlib_deflate = require('./zlib/deflate'); 4574 var utils = require('./utils/common'); 4575 var strings = require('./utils/strings'); 4576 var msg = require('./zlib/messages'); 4577 var ZStream = require('./zlib/zstream'); 4578 4579 var toString = Object.prototype.toString; 4580 4581 /* Public constants ==========================================================*/ 4582 /* ===========================================================================*/ 4583 4584 var Z_NO_FLUSH = 0; 4585 var Z_FINISH = 4; 4586 4587 var Z_OK = 0; 4588 var Z_STREAM_END = 1; 4589 var Z_SYNC_FLUSH = 2; 4590 4591 var Z_DEFAULT_COMPRESSION = -1; 4592 4593 var Z_DEFAULT_STRATEGY = 0; 4594 4595 var Z_DEFLATED = 8; 4596 4597 /* ===========================================================================*/ 4598 4599 4600 /** 4601 * class Deflate 4602 * 4603 * Generic JS-style wrapper for zlib calls. If you don't need 4604 * streaming behaviour - use more simple functions: [[deflate]], 4605 * [[deflateRaw]] and [[gzip]]. 4606 **/ 4607 4608 /* internal 4609 * Deflate.chunks -> Array 4610 * 4611 * Chunks of output data, if [[Deflate#onData]] not overriden. 4612 **/ 4613 4614 /** 4615 * Deflate.result -> Uint8Array|Array 4616 * 4617 * Compressed result, generated by default [[Deflate#onData]] 4618 * and [[Deflate#onEnd]] handlers. Filled after you push last chunk 4619 * (call [[Deflate#push]] with `Z_FINISH` / `true` param) or if you 4620 * push a chunk with explicit flush (call [[Deflate#push]] with 4621 * `Z_SYNC_FLUSH` param). 4622 **/ 4623 4624 /** 4625 * Deflate.err -> Number 4626 * 4627 * Error code after deflate finished. 0 (Z_OK) on success. 4628 * You will not need it in real life, because deflate errors 4629 * are possible only on wrong options or bad `onData` / `onEnd` 4630 * custom handlers. 4631 **/ 4632 4633 /** 4634 * Deflate.msg -> String 4635 * 4636 * Error message, if [[Deflate.err]] != 0 4637 **/ 4638 4639 4640 /** 4641 * new Deflate(options) 4642 * - options (Object): zlib deflate options. 4643 * 4644 * Creates new deflator instance with specified params. Throws exception 4645 * on bad params. Supported options: 4646 * 4647 * - `level` 4648 * - `windowBits` 4649 * - `memLevel` 4650 * - `strategy` 4651 * - `dictionary` 4652 * 4653 * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) 4654 * for more information on these. 4655 * 4656 * Additional options, for internal needs: 4657 * 4658 * - `chunkSize` - size of generated data chunks (16K by default) 4659 * - `raw` (Boolean) - do raw deflate 4660 * - `gzip` (Boolean) - create gzip wrapper 4661 * - `to` (String) - if equal to 'string', then result will be "binary string" 4662 * (each char code [0..255]) 4663 * - `header` (Object) - custom header for gzip 4664 * - `text` (Boolean) - true if compressed data believed to be text 4665 * - `time` (Number) - modification time, unix timestamp 4666 * - `os` (Number) - operation system code 4667 * - `extra` (Array) - array of bytes with extra data (max 65536) 4668 * - `name` (String) - file name (binary string) 4669 * - `comment` (String) - comment (binary string) 4670 * - `hcrc` (Boolean) - true if header crc should be added 4671 * 4672 * ##### Example: 4673 * 4674 * ```javascript 4675 * var pako = require('pako') 4676 * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9]) 4677 * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]); 4678 * 4679 * var deflate = new pako.Deflate({ level: 3}); 4680 * 4681 * deflate.push(chunk1, false); 4682 * deflate.push(chunk2, true); // true -> last chunk 4683 * 4684 * if (deflate.err) { throw new Error(deflate.err); } 4685 * 4686 * console.log(deflate.result); 4687 * ``` 4688 **/ 4689 function Deflate(options) { 4690 if (!(this instanceof Deflate)) return new Deflate(options); 4691 4692 this.options = utils.assign({ 4693 level: Z_DEFAULT_COMPRESSION, 4694 method: Z_DEFLATED, 4695 chunkSize: 16384, 4696 windowBits: 15, 4697 memLevel: 8, 4698 strategy: Z_DEFAULT_STRATEGY, 4699 to: '' 4700 }, options || {}); 4701 4702 var opt = this.options; 4703 4704 if (opt.raw && (opt.windowBits > 0)) { 4705 opt.windowBits = -opt.windowBits; 4706 } 4707 4708 else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) { 4709 opt.windowBits += 16; 4710 } 4711 4712 this.err = 0; // error code, if happens (0 = Z_OK) 4713 this.msg = ''; // error message 4714 this.ended = false; // used to avoid multiple onEnd() calls 4715 this.chunks = []; // chunks of compressed data 4716 4717 this.strm = new ZStream(); 4718 this.strm.avail_out = 0; 4719 4720 var status = zlib_deflate.deflateInit2( 4721 this.strm, 4722 opt.level, 4723 opt.method, 4724 opt.windowBits, 4725 opt.memLevel, 4726 opt.strategy 4727 ); 4728 4729 if (status !== Z_OK) { 4730 throw new Error(msg[status]); 4731 } 4732 4733 if (opt.header) { 4734 zlib_deflate.deflateSetHeader(this.strm, opt.header); 4735 } 4736 4737 if (opt.dictionary) { 4738 var dict; 4739 // Convert data if needed 4740 if (typeof opt.dictionary === 'string') { 4741 // If we need to compress text, change encoding to utf8. 4742 dict = strings.string2buf(opt.dictionary); 4743 } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') { 4744 dict = new Uint8Array(opt.dictionary); 4745 } else { 4746 dict = opt.dictionary; 4747 } 4748 4749 status = zlib_deflate.deflateSetDictionary(this.strm, dict); 4750 4751 if (status !== Z_OK) { 4752 throw new Error(msg[status]); 4753 } 4754 4755 this._dict_set = true; 4756 } 4757 } 4758 4759 /** 4760 * Deflate#push(data[, mode]) -> Boolean 4761 * - data (Uint8Array|Array|ArrayBuffer|String): input data. Strings will be 4762 * converted to utf8 byte sequence. 4763 * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes. 4764 * See constants. Skipped or `false` means Z_NO_FLUSH, `true` meansh Z_FINISH. 4765 * 4766 * Sends input data to deflate pipe, generating [[Deflate#onData]] calls with 4767 * new compressed chunks. Returns `true` on success. The last data block must have 4768 * mode Z_FINISH (or `true`). That will flush internal pending buffers and call 4769 * [[Deflate#onEnd]]. For interim explicit flushes (without ending the stream) you 4770 * can use mode Z_SYNC_FLUSH, keeping the compression context. 4771 * 4772 * On fail call [[Deflate#onEnd]] with error code and return false. 4773 * 4774 * We strongly recommend to use `Uint8Array` on input for best speed (output 4775 * array format is detected automatically). Also, don't skip last param and always 4776 * use the same type in your code (boolean or number). That will improve JS speed. 4777 * 4778 * For regular `Array`-s make sure all elements are [0..255]. 4779 * 4780 * ##### Example 4781 * 4782 * ```javascript 4783 * push(chunk, false); // push one of data chunks 4784 * ... 4785 * push(chunk, true); // push last chunk 4786 * ``` 4787 **/ 4788 Deflate.prototype.push = function (data, mode) { 4789 var strm = this.strm; 4790 var chunkSize = this.options.chunkSize; 4791 var status, _mode; 4792 4793 if (this.ended) { return false; } 4794 4795 _mode = (mode === ~~mode) ? mode : ((mode === true) ? Z_FINISH : Z_NO_FLUSH); 4796 4797 // Convert data if needed 4798 if (typeof data === 'string') { 4799 // If we need to compress text, change encoding to utf8. 4800 strm.input = strings.string2buf(data); 4801 } else if (toString.call(data) === '[object ArrayBuffer]') { 4802 strm.input = new Uint8Array(data); 4803 } else { 4804 strm.input = data; 4805 } 4806 4807 strm.next_in = 0; 4808 strm.avail_in = strm.input.length; 4809 4810 do { 4811 if (strm.avail_out === 0) { 4812 strm.output = new utils.Buf8(chunkSize); 4813 strm.next_out = 0; 4814 strm.avail_out = chunkSize; 4815 } 4816 status = zlib_deflate.deflate(strm, _mode); /* no bad return value */ 4817 4818 if (status !== Z_STREAM_END && status !== Z_OK) { 4819 this.onEnd(status); 4820 this.ended = true; 4821 return false; 4822 } 4823 if (strm.avail_out === 0 || (strm.avail_in === 0 && (_mode === Z_FINISH || _mode === Z_SYNC_FLUSH))) { 4824 if (this.options.to === 'string') { 4825 this.onData(strings.buf2binstring(utils.shrinkBuf(strm.output, strm.next_out))); 4826 } else { 4827 this.onData(utils.shrinkBuf(strm.output, strm.next_out)); 4828 } 4829 } 4830 } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== Z_STREAM_END); 4831 4832 // Finalize on the last chunk. 4833 if (_mode === Z_FINISH) { 4834 status = zlib_deflate.deflateEnd(this.strm); 4835 this.onEnd(status); 4836 this.ended = true; 4837 return status === Z_OK; 4838 } 4839 4840 // callback interim results if Z_SYNC_FLUSH. 4841 if (_mode === Z_SYNC_FLUSH) { 4842 this.onEnd(Z_OK); 4843 strm.avail_out = 0; 4844 return true; 4845 } 4846 4847 return true; 4848 }; 4849 4850 4851 /** 4852 * Deflate#onData(chunk) -> Void 4853 * - chunk (Uint8Array|Array|String): ouput data. Type of array depends 4854 * on js engine support. When string output requested, each chunk 4855 * will be string. 4856 * 4857 * By default, stores data blocks in `chunks[]` property and glue 4858 * those in `onEnd`. Override this handler, if you need another behaviour. 4859 **/ 4860 Deflate.prototype.onData = function (chunk) { 4861 this.chunks.push(chunk); 4862 }; 4863 4864 4865 /** 4866 * Deflate#onEnd(status) -> Void 4867 * - status (Number): deflate status. 0 (Z_OK) on success, 4868 * other if not. 4869 * 4870 * Called once after you tell deflate that the input stream is 4871 * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH) 4872 * or if an error happened. By default - join collected chunks, 4873 * free memory and fill `results` / `err` properties. 4874 **/ 4875 Deflate.prototype.onEnd = function (status) { 4876 // On success - join 4877 if (status === Z_OK) { 4878 if (this.options.to === 'string') { 4879 this.result = this.chunks.join(''); 4880 } else { 4881 this.result = utils.flattenChunks(this.chunks); 4882 } 4883 } 4884 this.chunks = []; 4885 this.err = status; 4886 this.msg = this.strm.msg; 4887 }; 4888 4889 4890 /** 4891 * deflate(data[, options]) -> Uint8Array|Array|String 4892 * - data (Uint8Array|Array|String): input data to compress. 4893 * - options (Object): zlib deflate options. 4894 * 4895 * Compress `data` with deflate algorithm and `options`. 4896 * 4897 * Supported options are: 4898 * 4899 * - level 4900 * - windowBits 4901 * - memLevel 4902 * - strategy 4903 * - dictionary 4904 * 4905 * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) 4906 * for more information on these. 4907 * 4908 * Sugar (options): 4909 * 4910 * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify 4911 * negative windowBits implicitly. 4912 * - `to` (String) - if equal to 'string', then result will be "binary string" 4913 * (each char code [0..255]) 4914 * 4915 * ##### Example: 4916 * 4917 * ```javascript 4918 * var pako = require('pako') 4919 * , data = Uint8Array([1,2,3,4,5,6,7,8,9]); 4920 * 4921 * console.log(pako.deflate(data)); 4922 * ``` 4923 **/ 4924 function deflate(input, options) { 4925 var deflator = new Deflate(options); 4926 4927 deflator.push(input, true); 4928 4929 // That will never happens, if you don't cheat with options :) 4930 if (deflator.err) { throw deflator.msg || msg[deflator.err]; } 4931 4932 return deflator.result; 4933 } 4934 4935 4936 /** 4937 * deflateRaw(data[, options]) -> Uint8Array|Array|String 4938 * - data (Uint8Array|Array|String): input data to compress. 4939 * - options (Object): zlib deflate options. 4940 * 4941 * The same as [[deflate]], but creates raw data, without wrapper 4942 * (header and adler32 crc). 4943 **/ 4944 function deflateRaw(input, options) { 4945 options = options || {}; 4946 options.raw = true; 4947 return deflate(input, options); 4948 } 4949 4950 4951 /** 4952 * gzip(data[, options]) -> Uint8Array|Array|String 4953 * - data (Uint8Array|Array|String): input data to compress. 4954 * - options (Object): zlib deflate options. 4955 * 4956 * The same as [[deflate]], but create gzip wrapper instead of 4957 * deflate one. 4958 **/ 4959 function gzip(input, options) { 4960 options = options || {}; 4961 options.gzip = true; 4962 return deflate(input, options); 4963 } 4964 4965 4966 exports.Deflate = Deflate; 4967 exports.deflate = deflate; 4968 exports.deflateRaw = deflateRaw; 4969 exports.gzip = gzip; 4970 4971 },{"./utils/common":41,"./utils/strings":42,"./zlib/deflate":46,"./zlib/messages":51,"./zlib/zstream":53}],40:[function(require,module,exports){ 4972 'use strict'; 4973 4974 4975 var zlib_inflate = require('./zlib/inflate'); 4976 var utils = require('./utils/common'); 4977 var strings = require('./utils/strings'); 4978 var c = require('./zlib/constants'); 4979 var msg = require('./zlib/messages'); 4980 var ZStream = require('./zlib/zstream'); 4981 var GZheader = require('./zlib/gzheader'); 4982 4983 var toString = Object.prototype.toString; 4984 4985 /** 4986 * class Inflate 4987 * 4988 * Generic JS-style wrapper for zlib calls. If you don't need 4989 * streaming behaviour - use more simple functions: [[inflate]] 4990 * and [[inflateRaw]]. 4991 **/ 4992 4993 /* internal 4994 * inflate.chunks -> Array 4995 * 4996 * Chunks of output data, if [[Inflate#onData]] not overriden. 4997 **/ 4998 4999 /** 5000 * Inflate.result -> Uint8Array|Array|String 5001 * 5002 * Uncompressed result, generated by default [[Inflate#onData]] 5003 * and [[Inflate#onEnd]] handlers. Filled after you push last chunk 5004 * (call [[Inflate#push]] with `Z_FINISH` / `true` param) or if you 5005 * push a chunk with explicit flush (call [[Inflate#push]] with 5006 * `Z_SYNC_FLUSH` param). 5007 **/ 5008 5009 /** 5010 * Inflate.err -> Number 5011 * 5012 * Error code after inflate finished. 0 (Z_OK) on success. 5013 * Should be checked if broken data possible. 5014 **/ 5015 5016 /** 5017 * Inflate.msg -> String 5018 * 5019 * Error message, if [[Inflate.err]] != 0 5020 **/ 5021 5022 5023 /** 5024 * new Inflate(options) 5025 * - options (Object): zlib inflate options. 5026 * 5027 * Creates new inflator instance with specified params. Throws exception 5028 * on bad params. Supported options: 5029 * 5030 * - `windowBits` 5031 * - `dictionary` 5032 * 5033 * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) 5034 * for more information on these. 5035 * 5036 * Additional options, for internal needs: 5037 * 5038 * - `chunkSize` - size of generated data chunks (16K by default) 5039 * - `raw` (Boolean) - do raw inflate 5040 * - `to` (String) - if equal to 'string', then result will be converted 5041 * from utf8 to utf16 (javascript) string. When string output requested, 5042 * chunk length can differ from `chunkSize`, depending on content. 5043 * 5044 * By default, when no options set, autodetect deflate/gzip data format via 5045 * wrapper header. 5046 * 5047 * ##### Example: 5048 * 5049 * ```javascript 5050 * var pako = require('pako') 5051 * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9]) 5052 * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]); 5053 * 5054 * var inflate = new pako.Inflate({ level: 3}); 5055 * 5056 * inflate.push(chunk1, false); 5057 * inflate.push(chunk2, true); // true -> last chunk 5058 * 5059 * if (inflate.err) { throw new Error(inflate.err); } 5060 * 5061 * console.log(inflate.result); 5062 * ``` 5063 **/ 5064 function Inflate(options) { 5065 if (!(this instanceof Inflate)) return new Inflate(options); 5066 5067 this.options = utils.assign({ 5068 chunkSize: 16384, 5069 windowBits: 0, 5070 to: '' 5071 }, options || {}); 5072 5073 var opt = this.options; 5074 5075 // Force window size for `raw` data, if not set directly, 5076 // because we have no header for autodetect. 5077 if (opt.raw && (opt.windowBits >= 0) && (opt.windowBits < 16)) { 5078 opt.windowBits = -opt.windowBits; 5079 if (opt.windowBits === 0) { opt.windowBits = -15; } 5080 } 5081 5082 // If `windowBits` not defined (and mode not raw) - set autodetect flag for gzip/deflate 5083 if ((opt.windowBits >= 0) && (opt.windowBits < 16) && 5084 !(options && options.windowBits)) { 5085 opt.windowBits += 32; 5086 } 5087 5088 // Gzip header has no info about windows size, we can do autodetect only 5089 // for deflate. So, if window size not set, force it to max when gzip possible 5090 if ((opt.windowBits > 15) && (opt.windowBits < 48)) { 5091 // bit 3 (16) -> gzipped data 5092 // bit 4 (32) -> autodetect gzip/deflate 5093 if ((opt.windowBits & 15) === 0) { 5094 opt.windowBits |= 15; 5095 } 5096 } 5097 5098 this.err = 0; // error code, if happens (0 = Z_OK) 5099 this.msg = ''; // error message 5100 this.ended = false; // used to avoid multiple onEnd() calls 5101 this.chunks = []; // chunks of compressed data 5102 5103 this.strm = new ZStream(); 5104 this.strm.avail_out = 0; 5105 5106 var status = zlib_inflate.inflateInit2( 5107 this.strm, 5108 opt.windowBits 5109 ); 5110 5111 if (status !== c.Z_OK) { 5112 throw new Error(msg[status]); 5113 } 5114 5115 this.header = new GZheader(); 5116 5117 zlib_inflate.inflateGetHeader(this.strm, this.header); 5118 } 5119 5120 /** 5121 * Inflate#push(data[, mode]) -> Boolean 5122 * - data (Uint8Array|Array|ArrayBuffer|String): input data 5123 * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes. 5124 * See constants. Skipped or `false` means Z_NO_FLUSH, `true` meansh Z_FINISH. 5125 * 5126 * Sends input data to inflate pipe, generating [[Inflate#onData]] calls with 5127 * new output chunks. Returns `true` on success. The last data block must have 5128 * mode Z_FINISH (or `true`). That will flush internal pending buffers and call 5129 * [[Inflate#onEnd]]. For interim explicit flushes (without ending the stream) you 5130 * can use mode Z_SYNC_FLUSH, keeping the decompression context. 5131 * 5132 * On fail call [[Inflate#onEnd]] with error code and return false. 5133 * 5134 * We strongly recommend to use `Uint8Array` on input for best speed (output 5135 * format is detected automatically). Also, don't skip last param and always 5136 * use the same type in your code (boolean or number). That will improve JS speed. 5137 * 5138 * For regular `Array`-s make sure all elements are [0..255]. 5139 * 5140 * ##### Example 5141 * 5142 * ```javascript 5143 * push(chunk, false); // push one of data chunks 5144 * ... 5145 * push(chunk, true); // push last chunk 5146 * ``` 5147 **/ 5148 Inflate.prototype.push = function (data, mode) { 5149 var strm = this.strm; 5150 var chunkSize = this.options.chunkSize; 5151 var dictionary = this.options.dictionary; 5152 var status, _mode; 5153 var next_out_utf8, tail, utf8str; 5154 var dict; 5155 5156 // Flag to properly process Z_BUF_ERROR on testing inflate call 5157 // when we check that all output data was flushed. 5158 var allowBufError = false; 5159 5160 if (this.ended) { return false; } 5161 _mode = (mode === ~~mode) ? mode : ((mode === true) ? c.Z_FINISH : c.Z_NO_FLUSH); 5162 5163 // Convert data if needed 5164 if (typeof data === 'string') { 5165 // Only binary strings can be decompressed on practice 5166 strm.input = strings.binstring2buf(data); 5167 } else if (toString.call(data) === '[object ArrayBuffer]') { 5168 strm.input = new Uint8Array(data); 5169 } else { 5170 strm.input = data; 5171 } 5172 5173 strm.next_in = 0; 5174 strm.avail_in = strm.input.length; 5175 5176 do { 5177 if (strm.avail_out === 0) { 5178 strm.output = new utils.Buf8(chunkSize); 5179 strm.next_out = 0; 5180 strm.avail_out = chunkSize; 5181 } 5182 5183 status = zlib_inflate.inflate(strm, c.Z_NO_FLUSH); /* no bad return value */ 5184 5185 if (status === c.Z_NEED_DICT && dictionary) { 5186 // Convert data if needed 5187 if (typeof dictionary === 'string') { 5188 dict = strings.string2buf(dictionary); 5189 } else if (toString.call(dictionary) === '[object ArrayBuffer]') { 5190 dict = new Uint8Array(dictionary); 5191 } else { 5192 dict = dictionary; 5193 } 5194 5195 status = zlib_inflate.inflateSetDictionary(this.strm, dict); 5196 5197 } 5198 5199 if (status === c.Z_BUF_ERROR && allowBufError === true) { 5200 status = c.Z_OK; 5201 allowBufError = false; 5202 } 5203 5204 if (status !== c.Z_STREAM_END && status !== c.Z_OK) { 5205 this.onEnd(status); 5206 this.ended = true; 5207 return false; 5208 } 5209 5210 if (strm.next_out) { 5211 if (strm.avail_out === 0 || status === c.Z_STREAM_END || (strm.avail_in === 0 && (_mode === c.Z_FINISH || _mode === c.Z_SYNC_FLUSH))) { 5212 5213 if (this.options.to === 'string') { 5214 5215 next_out_utf8 = strings.utf8border(strm.output, strm.next_out); 5216 5217 tail = strm.next_out - next_out_utf8; 5218 utf8str = strings.buf2string(strm.output, next_out_utf8); 5219 5220 // move tail 5221 strm.next_out = tail; 5222 strm.avail_out = chunkSize - tail; 5223 if (tail) { utils.arraySet(strm.output, strm.output, next_out_utf8, tail, 0); } 5224 5225 this.onData(utf8str); 5226 5227 } else { 5228 this.onData(utils.shrinkBuf(strm.output, strm.next_out)); 5229 } 5230 } 5231 } 5232 5233 // When no more input data, we should check that internal inflate buffers 5234 // are flushed. The only way to do it when avail_out = 0 - run one more 5235 // inflate pass. But if output data not exists, inflate return Z_BUF_ERROR. 5236 // Here we set flag to process this error properly. 5237 // 5238 // NOTE. Deflate does not return error in this case and does not needs such 5239 // logic. 5240 if (strm.avail_in === 0 && strm.avail_out === 0) { 5241 allowBufError = true; 5242 } 5243 5244 } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== c.Z_STREAM_END); 5245 5246 if (status === c.Z_STREAM_END) { 5247 _mode = c.Z_FINISH; 5248 } 5249 5250 // Finalize on the last chunk. 5251 if (_mode === c.Z_FINISH) { 5252 status = zlib_inflate.inflateEnd(this.strm); 5253 this.onEnd(status); 5254 this.ended = true; 5255 return status === c.Z_OK; 5256 } 5257 5258 // callback interim results if Z_SYNC_FLUSH. 5259 if (_mode === c.Z_SYNC_FLUSH) { 5260 this.onEnd(c.Z_OK); 5261 strm.avail_out = 0; 5262 return true; 5263 } 5264 5265 return true; 5266 }; 5267 5268 5269 /** 5270 * Inflate#onData(chunk) -> Void 5271 * - chunk (Uint8Array|Array|String): ouput data. Type of array depends 5272 * on js engine support. When string output requested, each chunk 5273 * will be string. 5274 * 5275 * By default, stores data blocks in `chunks[]` property and glue 5276 * those in `onEnd`. Override this handler, if you need another behaviour. 5277 **/ 5278 Inflate.prototype.onData = function (chunk) { 5279 this.chunks.push(chunk); 5280 }; 5281 5282 5283 /** 5284 * Inflate#onEnd(status) -> Void 5285 * - status (Number): inflate status. 0 (Z_OK) on success, 5286 * other if not. 5287 * 5288 * Called either after you tell inflate that the input stream is 5289 * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH) 5290 * or if an error happened. By default - join collected chunks, 5291 * free memory and fill `results` / `err` properties. 5292 **/ 5293 Inflate.prototype.onEnd = function (status) { 5294 // On success - join 5295 if (status === c.Z_OK) { 5296 if (this.options.to === 'string') { 5297 // Glue & convert here, until we teach pako to send 5298 // utf8 alligned strings to onData 5299 this.result = this.chunks.join(''); 5300 } else { 5301 this.result = utils.flattenChunks(this.chunks); 5302 } 5303 } 5304 this.chunks = []; 5305 this.err = status; 5306 this.msg = this.strm.msg; 5307 }; 5308 5309 5310 /** 5311 * inflate(data[, options]) -> Uint8Array|Array|String 5312 * - data (Uint8Array|Array|String): input data to decompress. 5313 * - options (Object): zlib inflate options. 5314 * 5315 * Decompress `data` with inflate/ungzip and `options`. Autodetect 5316 * format via wrapper header by default. That's why we don't provide 5317 * separate `ungzip` method. 5318 * 5319 * Supported options are: 5320 * 5321 * - windowBits 5322 * 5323 * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) 5324 * for more information. 5325 * 5326 * Sugar (options): 5327 * 5328 * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify 5329 * negative windowBits implicitly. 5330 * - `to` (String) - if equal to 'string', then result will be converted 5331 * from utf8 to utf16 (javascript) string. When string output requested, 5332 * chunk length can differ from `chunkSize`, depending on content. 5333 * 5334 * 5335 * ##### Example: 5336 * 5337 * ```javascript 5338 * var pako = require('pako') 5339 * , input = pako.deflate([1,2,3,4,5,6,7,8,9]) 5340 * , output; 5341 * 5342 * try { 5343 * output = pako.inflate(input); 5344 * } catch (err) 5345 * console.log(err); 5346 * } 5347 * ``` 5348 **/ 5349 function inflate(input, options) { 5350 var inflator = new Inflate(options); 5351 5352 inflator.push(input, true); 5353 5354 // That will never happens, if you don't cheat with options :) 5355 if (inflator.err) { throw inflator.msg || msg[inflator.err]; } 5356 5357 return inflator.result; 5358 } 5359 5360 5361 /** 5362 * inflateRaw(data[, options]) -> Uint8Array|Array|String 5363 * - data (Uint8Array|Array|String): input data to decompress. 5364 * - options (Object): zlib inflate options. 5365 * 5366 * The same as [[inflate]], but creates raw data, without wrapper 5367 * (header and adler32 crc). 5368 **/ 5369 function inflateRaw(input, options) { 5370 options = options || {}; 5371 options.raw = true; 5372 return inflate(input, options); 5373 } 5374 5375 5376 /** 5377 * ungzip(data[, options]) -> Uint8Array|Array|String 5378 * - data (Uint8Array|Array|String): input data to decompress. 5379 * - options (Object): zlib inflate options. 5380 * 5381 * Just shortcut to [[inflate]], because it autodetects format 5382 * by header.content. Done for convenience. 5383 **/ 5384 5385 5386 exports.Inflate = Inflate; 5387 exports.inflate = inflate; 5388 exports.inflateRaw = inflateRaw; 5389 exports.ungzip = inflate; 5390 5391 },{"./utils/common":41,"./utils/strings":42,"./zlib/constants":44,"./zlib/gzheader":47,"./zlib/inflate":49,"./zlib/messages":51,"./zlib/zstream":53}],41:[function(require,module,exports){ 5392 'use strict'; 5393 5394 5395 var TYPED_OK = (typeof Uint8Array !== 'undefined') && 5396 (typeof Uint16Array !== 'undefined') && 5397 (typeof Int32Array !== 'undefined'); 5398 5399 5400 exports.assign = function (obj /*from1, from2, from3, ...*/) { 5401 var sources = Array.prototype.slice.call(arguments, 1); 5402 while (sources.length) { 5403 var source = sources.shift(); 5404 if (!source) { continue; } 5405 5406 if (typeof source !== 'object') { 5407 throw new TypeError(source + 'must be non-object'); 5408 } 5409 5410 for (var p in source) { 5411 if (source.hasOwnProperty(p)) { 5412 obj[p] = source[p]; 5413 } 5414 } 5415 } 5416 5417 return obj; 5418 }; 5419 5420 5421 // reduce buffer size, avoiding mem copy 5422 exports.shrinkBuf = function (buf, size) { 5423 if (buf.length === size) { return buf; } 5424 if (buf.subarray) { return buf.subarray(0, size); } 5425 buf.length = size; 5426 return buf; 5427 }; 5428 5429 5430 var fnTyped = { 5431 arraySet: function (dest, src, src_offs, len, dest_offs) { 5432 if (src.subarray && dest.subarray) { 5433 dest.set(src.subarray(src_offs, src_offs + len), dest_offs); 5434 return; 5435 } 5436 // Fallback to ordinary array 5437 for (var i = 0; i < len; i++) { 5438 dest[dest_offs + i] = src[src_offs + i]; 5439 } 5440 }, 5441 // Join array of chunks to single array. 5442 flattenChunks: function (chunks) { 5443 var i, l, len, pos, chunk, result; 5444 5445 // calculate data length 5446 len = 0; 5447 for (i = 0, l = chunks.length; i < l; i++) { 5448 len += chunks[i].length; 5449 } 5450 5451 // join chunks 5452 result = new Uint8Array(len); 5453 pos = 0; 5454 for (i = 0, l = chunks.length; i < l; i++) { 5455 chunk = chunks[i]; 5456 result.set(chunk, pos); 5457 pos += chunk.length; 5458 } 5459 5460 return result; 5461 } 5462 }; 5463 5464 var fnUntyped = { 5465 arraySet: function (dest, src, src_offs, len, dest_offs) { 5466 for (var i = 0; i < len; i++) { 5467 dest[dest_offs + i] = src[src_offs + i]; 5468 } 5469 }, 5470 // Join array of chunks to single array. 5471 flattenChunks: function (chunks) { 5472 return [].concat.apply([], chunks); 5473 } 5474 }; 5475 5476 5477 // Enable/Disable typed arrays use, for testing 5478 // 5479 exports.setTyped = function (on) { 5480 if (on) { 5481 exports.Buf8 = Uint8Array; 5482 exports.Buf16 = Uint16Array; 5483 exports.Buf32 = Int32Array; 5484 exports.assign(exports, fnTyped); 5485 } else { 5486 exports.Buf8 = Array; 5487 exports.Buf16 = Array; 5488 exports.Buf32 = Array; 5489 exports.assign(exports, fnUntyped); 5490 } 5491 }; 5492 5493 exports.setTyped(TYPED_OK); 5494 5495 },{}],42:[function(require,module,exports){ 5496 // String encode/decode helpers 5497 'use strict'; 5498 5499 5500 var utils = require('./common'); 5501 5502 5503 // Quick check if we can use fast array to bin string conversion 5504 // 5505 // - apply(Array) can fail on Android 2.2 5506 // - apply(Uint8Array) can fail on iOS 5.1 Safary 5507 // 5508 var STR_APPLY_OK = true; 5509 var STR_APPLY_UIA_OK = true; 5510 5511 try { String.fromCharCode.apply(null, [ 0 ]); } catch (__) { STR_APPLY_OK = false; } 5512 try { String.fromCharCode.apply(null, new Uint8Array(1)); } catch (__) { STR_APPLY_UIA_OK = false; } 5513 5514 5515 // Table with utf8 lengths (calculated by first byte of sequence) 5516 // Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS, 5517 // because max possible codepoint is 0x10ffff 5518 var _utf8len = new utils.Buf8(256); 5519 for (var q = 0; q < 256; q++) { 5520 _utf8len[q] = (q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1); 5521 } 5522 _utf8len[254] = _utf8len[254] = 1; // Invalid sequence start 5523 5524 5525 // convert string to array (typed, when possible) 5526 exports.string2buf = function (str) { 5527 var buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0; 5528 5529 // count binary size 5530 for (m_pos = 0; m_pos < str_len; m_pos++) { 5531 c = str.charCodeAt(m_pos); 5532 if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) { 5533 c2 = str.charCodeAt(m_pos + 1); 5534 if ((c2 & 0xfc00) === 0xdc00) { 5535 c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); 5536 m_pos++; 5537 } 5538 } 5539 buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4; 5540 } 5541 5542 // allocate buffer 5543 buf = new utils.Buf8(buf_len); 5544 5545 // convert 5546 for (i = 0, m_pos = 0; i < buf_len; m_pos++) { 5547 c = str.charCodeAt(m_pos); 5548 if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) { 5549 c2 = str.charCodeAt(m_pos + 1); 5550 if ((c2 & 0xfc00) === 0xdc00) { 5551 c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); 5552 m_pos++; 5553 } 5554 } 5555 if (c < 0x80) { 5556 /* one byte */ 5557 buf[i++] = c; 5558 } else if (c < 0x800) { 5559 /* two bytes */ 5560 buf[i++] = 0xC0 | (c >>> 6); 5561 buf[i++] = 0x80 | (c & 0x3f); 5562 } else if (c < 0x10000) { 5563 /* three bytes */ 5564 buf[i++] = 0xE0 | (c >>> 12); 5565 buf[i++] = 0x80 | (c >>> 6 & 0x3f); 5566 buf[i++] = 0x80 | (c & 0x3f); 5567 } else { 5568 /* four bytes */ 5569 buf[i++] = 0xf0 | (c >>> 18); 5570 buf[i++] = 0x80 | (c >>> 12 & 0x3f); 5571 buf[i++] = 0x80 | (c >>> 6 & 0x3f); 5572 buf[i++] = 0x80 | (c & 0x3f); 5573 } 5574 } 5575 5576 return buf; 5577 }; 5578 5579 // Helper (used in 2 places) 5580 function buf2binstring(buf, len) { 5581 // use fallback for big arrays to avoid stack overflow 5582 if (len < 65537) { 5583 if ((buf.subarray && STR_APPLY_UIA_OK) || (!buf.subarray && STR_APPLY_OK)) { 5584 return String.fromCharCode.apply(null, utils.shrinkBuf(buf, len)); 5585 } 5586 } 5587 5588 var result = ''; 5589 for (var i = 0; i < len; i++) { 5590 result += String.fromCharCode(buf[i]); 5591 } 5592 return result; 5593 } 5594 5595 5596 // Convert byte array to binary string 5597 exports.buf2binstring = function (buf) { 5598 return buf2binstring(buf, buf.length); 5599 }; 5600 5601 5602 // Convert binary string (typed, when possible) 5603 exports.binstring2buf = function (str) { 5604 var buf = new utils.Buf8(str.length); 5605 for (var i = 0, len = buf.length; i < len; i++) { 5606 buf[i] = str.charCodeAt(i); 5607 } 5608 return buf; 5609 }; 5610 5611 5612 // convert array to string 5613 exports.buf2string = function (buf, max) { 5614 var i, out, c, c_len; 5615 var len = max || buf.length; 5616 5617 // Reserve max possible length (2 words per char) 5618 // NB: by unknown reasons, Array is significantly faster for 5619 // String.fromCharCode.apply than Uint16Array. 5620 var utf16buf = new Array(len * 2); 5621 5622 for (out = 0, i = 0; i < len;) { 5623 c = buf[i++]; 5624 // quick process ascii 5625 if (c < 0x80) { utf16buf[out++] = c; continue; } 5626 5627 c_len = _utf8len[c]; 5628 // skip 5 & 6 byte codes 5629 if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; } 5630 5631 // apply mask on first byte 5632 c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07; 5633 // join the rest 5634 while (c_len > 1 && i < len) { 5635 c = (c << 6) | (buf[i++] & 0x3f); 5636 c_len--; 5637 } 5638 5639 // terminated by end of string? 5640 if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; } 5641 5642 if (c < 0x10000) { 5643 utf16buf[out++] = c; 5644 } else { 5645 c -= 0x10000; 5646 utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff); 5647 utf16buf[out++] = 0xdc00 | (c & 0x3ff); 5648 } 5649 } 5650 5651 return buf2binstring(utf16buf, out); 5652 }; 5653 5654 5655 // Calculate max possible position in utf8 buffer, 5656 // that will not break sequence. If that's not possible 5657 // - (very small limits) return max size as is. 5658 // 5659 // buf[] - utf8 bytes array 5660 // max - length limit (mandatory); 5661 exports.utf8border = function (buf, max) { 5662 var pos; 5663 5664 max = max || buf.length; 5665 if (max > buf.length) { max = buf.length; } 5666 5667 // go back from last position, until start of sequence found 5668 pos = max - 1; 5669 while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; } 5670 5671 // Fuckup - very small and broken sequence, 5672 // return max, because we should return something anyway. 5673 if (pos < 0) { return max; } 5674 5675 // If we came to start of buffer - that means vuffer is too small, 5676 // return max too. 5677 if (pos === 0) { return max; } 5678 5679 return (pos + _utf8len[buf[pos]] > max) ? pos : max; 5680 }; 5681 5682 },{"./common":41}],43:[function(require,module,exports){ 5683 'use strict'; 5684 5685 // Note: adler32 takes 12% for level 0 and 2% for level 6. 5686 // It doesn't worth to make additional optimizationa as in original. 5687 // Small size is preferable. 5688 5689 // (C) 1995-2013 Jean-loup Gailly and Mark Adler 5690 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin 5691 // 5692 // This software is provided 'as-is', without any express or implied 5693 // warranty. In no event will the authors be held liable for any damages 5694 // arising from the use of this software. 5695 // 5696 // Permission is granted to anyone to use this software for any purpose, 5697 // including commercial applications, and to alter it and redistribute it 5698 // freely, subject to the following restrictions: 5699 // 5700 // 1. The origin of this software must not be misrepresented; you must not 5701 // claim that you wrote the original software. If you use this software 5702 // in a product, an acknowledgment in the product documentation would be 5703 // appreciated but is not required. 5704 // 2. Altered source versions must be plainly marked as such, and must not be 5705 // misrepresented as being the original software. 5706 // 3. This notice may not be removed or altered from any source distribution. 5707 5708 function adler32(adler, buf, len, pos) { 5709 var s1 = (adler & 0xffff) |0, 5710 s2 = ((adler >>> 16) & 0xffff) |0, 5711 n = 0; 5712 5713 while (len !== 0) { 5714 // Set limit ~ twice less than 5552, to keep 5715 // s2 in 31-bits, because we force signed ints. 5716 // in other case %= will fail. 5717 n = len > 2000 ? 2000 : len; 5718 len -= n; 5719 5720 do { 5721 s1 = (s1 + buf[pos++]) |0; 5722 s2 = (s2 + s1) |0; 5723 } while (--n); 5724 5725 s1 %= 65521; 5726 s2 %= 65521; 5727 } 5728 5729 return (s1 | (s2 << 16)) |0; 5730 } 5731 5732 5733 module.exports = adler32; 5734 5735 },{}],44:[function(require,module,exports){ 5736 'use strict'; 5737 5738 // (C) 1995-2013 Jean-loup Gailly and Mark Adler 5739 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin 5740 // 5741 // This software is provided 'as-is', without any express or implied 5742 // warranty. In no event will the authors be held liable for any damages 5743 // arising from the use of this software. 5744 // 5745 // Permission is granted to anyone to use this software for any purpose, 5746 // including commercial applications, and to alter it and redistribute it 5747 // freely, subject to the following restrictions: 5748 // 5749 // 1. The origin of this software must not be misrepresented; you must not 5750 // claim that you wrote the original software. If you use this software 5751 // in a product, an acknowledgment in the product documentation would be 5752 // appreciated but is not required. 5753 // 2. Altered source versions must be plainly marked as such, and must not be 5754 // misrepresented as being the original software. 5755 // 3. This notice may not be removed or altered from any source distribution. 5756 5757 module.exports = { 5758 5759 /* Allowed flush values; see deflate() and inflate() below for details */ 5760 Z_NO_FLUSH: 0, 5761 Z_PARTIAL_FLUSH: 1, 5762 Z_SYNC_FLUSH: 2, 5763 Z_FULL_FLUSH: 3, 5764 Z_FINISH: 4, 5765 Z_BLOCK: 5, 5766 Z_TREES: 6, 5767 5768 /* Return codes for the compression/decompression functions. Negative values 5769 * are errors, positive values are used for special but normal events. 5770 */ 5771 Z_OK: 0, 5772 Z_STREAM_END: 1, 5773 Z_NEED_DICT: 2, 5774 Z_ERRNO: -1, 5775 Z_STREAM_ERROR: -2, 5776 Z_DATA_ERROR: -3, 5777 //Z_MEM_ERROR: -4, 5778 Z_BUF_ERROR: -5, 5779 //Z_VERSION_ERROR: -6, 5780 5781 /* compression levels */ 5782 Z_NO_COMPRESSION: 0, 5783 Z_BEST_SPEED: 1, 5784 Z_BEST_COMPRESSION: 9, 5785 Z_DEFAULT_COMPRESSION: -1, 5786 5787 5788 Z_FILTERED: 1, 5789 Z_HUFFMAN_ONLY: 2, 5790 Z_RLE: 3, 5791 Z_FIXED: 4, 5792 Z_DEFAULT_STRATEGY: 0, 5793 5794 /* Possible values of the data_type field (though see inflate()) */ 5795 Z_BINARY: 0, 5796 Z_TEXT: 1, 5797 //Z_ASCII: 1, // = Z_TEXT (deprecated) 5798 Z_UNKNOWN: 2, 5799 5800 /* The deflate compression method */ 5801 Z_DEFLATED: 8 5802 //Z_NULL: null // Use -1 or null inline, depending on var type 5803 }; 5804 5805 },{}],45:[function(require,module,exports){ 5806 'use strict'; 5807 5808 // Note: we can't get significant speed boost here. 5809 // So write code to minimize size - no pregenerated tables 5810 // and array tools dependencies. 5811 5812 // (C) 1995-2013 Jean-loup Gailly and Mark Adler 5813 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin 5814 // 5815 // This software is provided 'as-is', without any express or implied 5816 // warranty. In no event will the authors be held liable for any damages 5817 // arising from the use of this software. 5818 // 5819 // Permission is granted to anyone to use this software for any purpose, 5820 // including commercial applications, and to alter it and redistribute it 5821 // freely, subject to the following restrictions: 5822 // 5823 // 1. The origin of this software must not be misrepresented; you must not 5824 // claim that you wrote the original software. If you use this software 5825 // in a product, an acknowledgment in the product documentation would be 5826 // appreciated but is not required. 5827 // 2. Altered source versions must be plainly marked as such, and must not be 5828 // misrepresented as being the original software. 5829 // 3. This notice may not be removed or altered from any source distribution. 5830 5831 // Use ordinary array, since untyped makes no boost here 5832 function makeTable() { 5833 var c, table = []; 5834 5835 for (var n = 0; n < 256; n++) { 5836 c = n; 5837 for (var k = 0; k < 8; k++) { 5838 c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1)); 5839 } 5840 table[n] = c; 5841 } 5842 5843 return table; 5844 } 5845 5846 // Create table on load. Just 255 signed longs. Not a problem. 5847 var crcTable = makeTable(); 5848 5849 5850 function crc32(crc, buf, len, pos) { 5851 var t = crcTable, 5852 end = pos + len; 5853 5854 crc ^= -1; 5855 5856 for (var i = pos; i < end; i++) { 5857 crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF]; 5858 } 5859 5860 return (crc ^ (-1)); // >>> 0; 5861 } 5862 5863 5864 module.exports = crc32; 5865 5866 },{}],46:[function(require,module,exports){ 5867 'use strict'; 5868 5869 // (C) 1995-2013 Jean-loup Gailly and Mark Adler 5870 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin 5871 // 5872 // This software is provided 'as-is', without any express or implied 5873 // warranty. In no event will the authors be held liable for any damages 5874 // arising from the use of this software. 5875 // 5876 // Permission is granted to anyone to use this software for any purpose, 5877 // including commercial applications, and to alter it and redistribute it 5878 // freely, subject to the following restrictions: 5879 // 5880 // 1. The origin of this software must not be misrepresented; you must not 5881 // claim that you wrote the original software. If you use this software 5882 // in a product, an acknowledgment in the product documentation would be 5883 // appreciated but is not required. 5884 // 2. Altered source versions must be plainly marked as such, and must not be 5885 // misrepresented as being the original software. 5886 // 3. This notice may not be removed or altered from any source distribution. 5887 5888 var utils = require('../utils/common'); 5889 var trees = require('./trees'); 5890 var adler32 = require('./adler32'); 5891 var crc32 = require('./crc32'); 5892 var msg = require('./messages'); 5893 5894 /* Public constants ==========================================================*/ 5895 /* ===========================================================================*/ 5896 5897 5898 /* Allowed flush values; see deflate() and inflate() below for details */ 5899 var Z_NO_FLUSH = 0; 5900 var Z_PARTIAL_FLUSH = 1; 5901 //var Z_SYNC_FLUSH = 2; 5902 var Z_FULL_FLUSH = 3; 5903 var Z_FINISH = 4; 5904 var Z_BLOCK = 5; 5905 //var Z_TREES = 6; 5906 5907 5908 /* Return codes for the compression/decompression functions. Negative values 5909 * are errors, positive values are used for special but normal events. 5910 */ 5911 var Z_OK = 0; 5912 var Z_STREAM_END = 1; 5913 //var Z_NEED_DICT = 2; 5914 //var Z_ERRNO = -1; 5915 var Z_STREAM_ERROR = -2; 5916 var Z_DATA_ERROR = -3; 5917 //var Z_MEM_ERROR = -4; 5918 var Z_BUF_ERROR = -5; 5919 //var Z_VERSION_ERROR = -6; 5920 5921 5922 /* compression levels */ 5923 //var Z_NO_COMPRESSION = 0; 5924 //var Z_BEST_SPEED = 1; 5925 //var Z_BEST_COMPRESSION = 9; 5926 var Z_DEFAULT_COMPRESSION = -1; 5927 5928 5929 var Z_FILTERED = 1; 5930 var Z_HUFFMAN_ONLY = 2; 5931 var Z_RLE = 3; 5932 var Z_FIXED = 4; 5933 var Z_DEFAULT_STRATEGY = 0; 5934 5935 /* Possible values of the data_type field (though see inflate()) */ 5936 //var Z_BINARY = 0; 5937 //var Z_TEXT = 1; 5938 //var Z_ASCII = 1; // = Z_TEXT 5939 var Z_UNKNOWN = 2; 5940 5941 5942 /* The deflate compression method */ 5943 var Z_DEFLATED = 8; 5944 5945 /*============================================================================*/ 5946 5947 5948 var MAX_MEM_LEVEL = 9; 5949 /* Maximum value for memLevel in deflateInit2 */ 5950 var MAX_WBITS = 15; 5951 /* 32K LZ77 window */ 5952 var DEF_MEM_LEVEL = 8; 5953 5954 5955 var LENGTH_CODES = 29; 5956 /* number of length codes, not counting the special END_BLOCK code */ 5957 var LITERALS = 256; 5958 /* number of literal bytes 0..255 */ 5959 var L_CODES = LITERALS + 1 + LENGTH_CODES; 5960 /* number of Literal or Length codes, including the END_BLOCK code */ 5961 var D_CODES = 30; 5962 /* number of distance codes */ 5963 var BL_CODES = 19; 5964 /* number of codes used to transfer the bit lengths */ 5965 var HEAP_SIZE = 2 * L_CODES + 1; 5966 /* maximum heap size */ 5967 var MAX_BITS = 15; 5968 /* All codes must not exceed MAX_BITS bits */ 5969 5970 var MIN_MATCH = 3; 5971 var MAX_MATCH = 258; 5972 var MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1); 5973 5974 var PRESET_DICT = 0x20; 5975 5976 var INIT_STATE = 42; 5977 var EXTRA_STATE = 69; 5978 var NAME_STATE = 73; 5979 var COMMENT_STATE = 91; 5980 var HCRC_STATE = 103; 5981 var BUSY_STATE = 113; 5982 var FINISH_STATE = 666; 5983 5984 var BS_NEED_MORE = 1; /* block not completed, need more input or more output */ 5985 var BS_BLOCK_DONE = 2; /* block flush performed */ 5986 var BS_FINISH_STARTED = 3; /* finish started, need only more output at next deflate */ 5987 var BS_FINISH_DONE = 4; /* finish done, accept no more input or output */ 5988 5989 var OS_CODE = 0x03; // Unix :) . Don't detect, use this default. 5990 5991 function err(strm, errorCode) { 5992 strm.msg = msg[errorCode]; 5993 return errorCode; 5994 } 5995 5996 function rank(f) { 5997 return ((f) << 1) - ((f) > 4 ? 9 : 0); 5998 } 5999 6000 function zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } } 6001 6002 6003 /* ========================================================================= 6004 * Flush as much pending output as possible. All deflate() output goes 6005 * through this function so some applications may wish to modify it 6006 * to avoid allocating a large strm->output buffer and copying into it. 6007 * (See also read_buf()). 6008 */ 6009 function flush_pending(strm) { 6010 var s = strm.state; 6011 6012 //_tr_flush_bits(s); 6013 var len = s.pending; 6014 if (len > strm.avail_out) { 6015 len = strm.avail_out; 6016 } 6017 if (len === 0) { return; } 6018 6019 utils.arraySet(strm.output, s.pending_buf, s.pending_out, len, strm.next_out); 6020 strm.next_out += len; 6021 s.pending_out += len; 6022 strm.total_out += len; 6023 strm.avail_out -= len; 6024 s.pending -= len; 6025 if (s.pending === 0) { 6026 s.pending_out = 0; 6027 } 6028 } 6029 6030 6031 function flush_block_only(s, last) { 6032 trees._tr_flush_block(s, (s.block_start >= 0 ? s.block_start : -1), s.strstart - s.block_start, last); 6033 s.block_start = s.strstart; 6034 flush_pending(s.strm); 6035 } 6036 6037 6038 function put_byte(s, b) { 6039 s.pending_buf[s.pending++] = b; 6040 } 6041 6042 6043 /* ========================================================================= 6044 * Put a short in the pending buffer. The 16-bit value is put in MSB order. 6045 * IN assertion: the stream state is correct and there is enough room in 6046 * pending_buf. 6047 */ 6048 function putShortMSB(s, b) { 6049 // put_byte(s, (Byte)(b >> 8)); 6050 // put_byte(s, (Byte)(b & 0xff)); 6051 s.pending_buf[s.pending++] = (b >>> 8) & 0xff; 6052 s.pending_buf[s.pending++] = b & 0xff; 6053 } 6054 6055 6056 /* =========================================================================== 6057 * Read a new buffer from the current input stream, update the adler32 6058 * and total number of bytes read. All deflate() input goes through 6059 * this function so some applications may wish to modify it to avoid 6060 * allocating a large strm->input buffer and copying from it. 6061 * (See also flush_pending()). 6062 */ 6063 function read_buf(strm, buf, start, size) { 6064 var len = strm.avail_in; 6065 6066 if (len > size) { len = size; } 6067 if (len === 0) { return 0; } 6068 6069 strm.avail_in -= len; 6070 6071 // zmemcpy(buf, strm->next_in, len); 6072 utils.arraySet(buf, strm.input, strm.next_in, len, start); 6073 if (strm.state.wrap === 1) { 6074 strm.adler = adler32(strm.adler, buf, len, start); 6075 } 6076 6077 else if (strm.state.wrap === 2) { 6078 strm.adler = crc32(strm.adler, buf, len, start); 6079 } 6080 6081 strm.next_in += len; 6082 strm.total_in += len; 6083 6084 return len; 6085 } 6086 6087 6088 /* =========================================================================== 6089 * Set match_start to the longest match starting at the given string and 6090 * return its length. Matches shorter or equal to prev_length are discarded, 6091 * in which case the result is equal to prev_length and match_start is 6092 * garbage. 6093 * IN assertions: cur_match is the head of the hash chain for the current 6094 * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 6095 * OUT assertion: the match length is not greater than s->lookahead. 6096 */ 6097 function longest_match(s, cur_match) { 6098 var chain_length = s.max_chain_length; /* max hash chain length */ 6099 var scan = s.strstart; /* current string */ 6100 var match; /* matched string */ 6101 var len; /* length of current match */ 6102 var best_len = s.prev_length; /* best match length so far */ 6103 var nice_match = s.nice_match; /* stop if match long enough */ 6104 var limit = (s.strstart > (s.w_size - MIN_LOOKAHEAD)) ? 6105 s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0/*NIL*/; 6106 6107 var _win = s.window; // shortcut 6108 6109 var wmask = s.w_mask; 6110 var prev = s.prev; 6111 6112 /* Stop when cur_match becomes <= limit. To simplify the code, 6113 * we prevent matches with the string of window index 0. 6114 */ 6115 6116 var strend = s.strstart + MAX_MATCH; 6117 var scan_end1 = _win[scan + best_len - 1]; 6118 var scan_end = _win[scan + best_len]; 6119 6120 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. 6121 * It is easy to get rid of this optimization if necessary. 6122 */ 6123 // Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); 6124 6125 /* Do not waste too much time if we already have a good match: */ 6126 if (s.prev_length >= s.good_match) { 6127 chain_length >>= 2; 6128 } 6129 /* Do not look for matches beyond the end of the input. This is necessary 6130 * to make deflate deterministic. 6131 */ 6132 if (nice_match > s.lookahead) { nice_match = s.lookahead; } 6133 6134 // Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); 6135 6136 do { 6137 // Assert(cur_match < s->strstart, "no future"); 6138 match = cur_match; 6139 6140 /* Skip to next match if the match length cannot increase 6141 * or if the match length is less than 2. Note that the checks below 6142 * for insufficient lookahead only occur occasionally for performance 6143 * reasons. Therefore uninitialized memory will be accessed, and 6144 * conditional jumps will be made that depend on those values. 6145 * However the length of the match is limited to the lookahead, so 6146 * the output of deflate is not affected by the uninitialized values. 6147 */ 6148 6149 if (_win[match + best_len] !== scan_end || 6150 _win[match + best_len - 1] !== scan_end1 || 6151 _win[match] !== _win[scan] || 6152 _win[++match] !== _win[scan + 1]) { 6153 continue; 6154 } 6155 6156 /* The check at best_len-1 can be removed because it will be made 6157 * again later. (This heuristic is not always a win.) 6158 * It is not necessary to compare scan[2] and match[2] since they 6159 * are always equal when the other bytes match, given that 6160 * the hash keys are equal and that HASH_BITS >= 8. 6161 */ 6162 scan += 2; 6163 match++; 6164 // Assert(*scan == *match, "match[2]?"); 6165 6166 /* We check for insufficient lookahead only every 8th comparison; 6167 * the 256th check will be made at strstart+258. 6168 */ 6169 do { 6170 /*jshint noempty:false*/ 6171 } while (_win[++scan] === _win[++match] && _win[++scan] === _win[++match] && 6172 _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && 6173 _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && 6174 _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && 6175 scan < strend); 6176 6177 // Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 6178 6179 len = MAX_MATCH - (strend - scan); 6180 scan = strend - MAX_MATCH; 6181 6182 if (len > best_len) { 6183 s.match_start = cur_match; 6184 best_len = len; 6185 if (len >= nice_match) { 6186 break; 6187 } 6188 scan_end1 = _win[scan + best_len - 1]; 6189 scan_end = _win[scan + best_len]; 6190 } 6191 } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length !== 0); 6192 6193 if (best_len <= s.lookahead) { 6194 return best_len; 6195 } 6196 return s.lookahead; 6197 } 6198 6199 6200 /* =========================================================================== 6201 * Fill the window when the lookahead becomes insufficient. 6202 * Updates strstart and lookahead. 6203 * 6204 * IN assertion: lookahead < MIN_LOOKAHEAD 6205 * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD 6206 * At least one byte has been read, or avail_in == 0; reads are 6207 * performed for at least two bytes (required for the zip translate_eol 6208 * option -- not supported here). 6209 */ 6210 function fill_window(s) { 6211 var _w_size = s.w_size; 6212 var p, n, m, more, str; 6213 6214 //Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); 6215 6216 do { 6217 more = s.window_size - s.lookahead - s.strstart; 6218 6219 // JS ints have 32 bit, block below not needed 6220 /* Deal with !@#$% 64K limit: */ 6221 //if (sizeof(int) <= 2) { 6222 // if (more == 0 && s->strstart == 0 && s->lookahead == 0) { 6223 // more = wsize; 6224 // 6225 // } else if (more == (unsigned)(-1)) { 6226 // /* Very unlikely, but possible on 16 bit machine if 6227 // * strstart == 0 && lookahead == 1 (input done a byte at time) 6228 // */ 6229 // more--; 6230 // } 6231 //} 6232 6233 6234 /* If the window is almost full and there is insufficient lookahead, 6235 * move the upper half to the lower one to make room in the upper half. 6236 */ 6237 if (s.strstart >= _w_size + (_w_size - MIN_LOOKAHEAD)) { 6238 6239 utils.arraySet(s.window, s.window, _w_size, _w_size, 0); 6240 s.match_start -= _w_size; 6241 s.strstart -= _w_size; 6242 /* we now have strstart >= MAX_DIST */ 6243 s.block_start -= _w_size; 6244 6245 /* Slide the hash table (could be avoided with 32 bit values 6246 at the expense of memory usage). We slide even when level == 0 6247 to keep the hash table consistent if we switch back to level > 0 6248 later. (Using level 0 permanently is not an optimal usage of 6249 zlib, so we don't care about this pathological case.) 6250 */ 6251 6252 n = s.hash_size; 6253 p = n; 6254 do { 6255 m = s.head[--p]; 6256 s.head[p] = (m >= _w_size ? m - _w_size : 0); 6257 } while (--n); 6258 6259 n = _w_size; 6260 p = n; 6261 do { 6262 m = s.prev[--p]; 6263 s.prev[p] = (m >= _w_size ? m - _w_size : 0); 6264 /* If n is not on any hash chain, prev[n] is garbage but 6265 * its value will never be used. 6266 */ 6267 } while (--n); 6268 6269 more += _w_size; 6270 } 6271 if (s.strm.avail_in === 0) { 6272 break; 6273 } 6274 6275 /* If there was no sliding: 6276 * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && 6277 * more == window_size - lookahead - strstart 6278 * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) 6279 * => more >= window_size - 2*WSIZE + 2 6280 * In the BIG_MEM or MMAP case (not yet supported), 6281 * window_size == input_size + MIN_LOOKAHEAD && 6282 * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. 6283 * Otherwise, window_size == 2*WSIZE so more >= 2. 6284 * If there was sliding, more >= WSIZE. So in all cases, more >= 2. 6285 */ 6286 //Assert(more >= 2, "more < 2"); 6287 n = read_buf(s.strm, s.window, s.strstart + s.lookahead, more); 6288 s.lookahead += n; 6289 6290 /* Initialize the hash value now that we have some input: */ 6291 if (s.lookahead + s.insert >= MIN_MATCH) { 6292 str = s.strstart - s.insert; 6293 s.ins_h = s.window[str]; 6294 6295 /* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */ 6296 s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + 1]) & s.hash_mask; 6297 //#if MIN_MATCH != 3 6298 // Call update_hash() MIN_MATCH-3 more times 6299 //#endif 6300 while (s.insert) { 6301 /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */ 6302 s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask; 6303 6304 s.prev[str & s.w_mask] = s.head[s.ins_h]; 6305 s.head[s.ins_h] = str; 6306 str++; 6307 s.insert--; 6308 if (s.lookahead + s.insert < MIN_MATCH) { 6309 break; 6310 } 6311 } 6312 } 6313 /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, 6314 * but this is not important since only literal bytes will be emitted. 6315 */ 6316 6317 } while (s.lookahead < MIN_LOOKAHEAD && s.strm.avail_in !== 0); 6318 6319 /* If the WIN_INIT bytes after the end of the current data have never been 6320 * written, then zero those bytes in order to avoid memory check reports of 6321 * the use of uninitialized (or uninitialised as Julian writes) bytes by 6322 * the longest match routines. Update the high water mark for the next 6323 * time through here. WIN_INIT is set to MAX_MATCH since the longest match 6324 * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. 6325 */ 6326 // if (s.high_water < s.window_size) { 6327 // var curr = s.strstart + s.lookahead; 6328 // var init = 0; 6329 // 6330 // if (s.high_water < curr) { 6331 // /* Previous high water mark below current data -- zero WIN_INIT 6332 // * bytes or up to end of window, whichever is less. 6333 // */ 6334 // init = s.window_size - curr; 6335 // if (init > WIN_INIT) 6336 // init = WIN_INIT; 6337 // zmemzero(s->window + curr, (unsigned)init); 6338 // s->high_water = curr + init; 6339 // } 6340 // else if (s->high_water < (ulg)curr + WIN_INIT) { 6341 // /* High water mark at or above current data, but below current data 6342 // * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up 6343 // * to end of window, whichever is less. 6344 // */ 6345 // init = (ulg)curr + WIN_INIT - s->high_water; 6346 // if (init > s->window_size - s->high_water) 6347 // init = s->window_size - s->high_water; 6348 // zmemzero(s->window + s->high_water, (unsigned)init); 6349 // s->high_water += init; 6350 // } 6351 // } 6352 // 6353 // Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, 6354 // "not enough room for search"); 6355 } 6356 6357 /* =========================================================================== 6358 * Copy without compression as much as possible from the input stream, return 6359 * the current block state. 6360 * This function does not insert new strings in the dictionary since 6361 * uncompressible data is probably not useful. This function is used 6362 * only for the level=0 compression option. 6363 * NOTE: this function should be optimized to avoid extra copying from 6364 * window to pending_buf. 6365 */ 6366 function deflate_stored(s, flush) { 6367 /* Stored blocks are limited to 0xffff bytes, pending_buf is limited 6368 * to pending_buf_size, and each stored block has a 5 byte header: 6369 */ 6370 var max_block_size = 0xffff; 6371 6372 if (max_block_size > s.pending_buf_size - 5) { 6373 max_block_size = s.pending_buf_size - 5; 6374 } 6375 6376 /* Copy as much as possible from input to output: */ 6377 for (;;) { 6378 /* Fill the window as much as possible: */ 6379 if (s.lookahead <= 1) { 6380 6381 //Assert(s->strstart < s->w_size+MAX_DIST(s) || 6382 // s->block_start >= (long)s->w_size, "slide too late"); 6383 // if (!(s.strstart < s.w_size + (s.w_size - MIN_LOOKAHEAD) || 6384 // s.block_start >= s.w_size)) { 6385 // throw new Error("slide too late"); 6386 // } 6387 6388 fill_window(s); 6389 if (s.lookahead === 0 && flush === Z_NO_FLUSH) { 6390 return BS_NEED_MORE; 6391 } 6392 6393 if (s.lookahead === 0) { 6394 break; 6395 } 6396 /* flush the current block */ 6397 } 6398 //Assert(s->block_start >= 0L, "block gone"); 6399 // if (s.block_start < 0) throw new Error("block gone"); 6400 6401 s.strstart += s.lookahead; 6402 s.lookahead = 0; 6403 6404 /* Emit a stored block if pending_buf will be full: */ 6405 var max_start = s.block_start + max_block_size; 6406 6407 if (s.strstart === 0 || s.strstart >= max_start) { 6408 /* strstart == 0 is possible when wraparound on 16-bit machine */ 6409 s.lookahead = s.strstart - max_start; 6410 s.strstart = max_start; 6411 /*** FLUSH_BLOCK(s, 0); ***/ 6412 flush_block_only(s, false); 6413 if (s.strm.avail_out === 0) { 6414 return BS_NEED_MORE; 6415 } 6416 /***/ 6417 6418 6419 } 6420 /* Flush if we may have to slide, otherwise block_start may become 6421 * negative and the data will be gone: 6422 */ 6423 if (s.strstart - s.block_start >= (s.w_size - MIN_LOOKAHEAD)) { 6424 /*** FLUSH_BLOCK(s, 0); ***/ 6425 flush_block_only(s, false); 6426 if (s.strm.avail_out === 0) { 6427 return BS_NEED_MORE; 6428 } 6429 /***/ 6430 } 6431 } 6432 6433 s.insert = 0; 6434 6435 if (flush === Z_FINISH) { 6436 /*** FLUSH_BLOCK(s, 1); ***/ 6437 flush_block_only(s, true); 6438 if (s.strm.avail_out === 0) { 6439 return BS_FINISH_STARTED; 6440 } 6441 /***/ 6442 return BS_FINISH_DONE; 6443 } 6444 6445 if (s.strstart > s.block_start) { 6446 /*** FLUSH_BLOCK(s, 0); ***/ 6447 flush_block_only(s, false); 6448 if (s.strm.avail_out === 0) { 6449 return BS_NEED_MORE; 6450 } 6451 /***/ 6452 } 6453 6454 return BS_NEED_MORE; 6455 } 6456 6457 /* =========================================================================== 6458 * Compress as much as possible from the input stream, return the current 6459 * block state. 6460 * This function does not perform lazy evaluation of matches and inserts 6461 * new strings in the dictionary only for unmatched strings or for short 6462 * matches. It is used only for the fast compression options. 6463 */ 6464 function deflate_fast(s, flush) { 6465 var hash_head; /* head of the hash chain */ 6466 var bflush; /* set if current block must be flushed */ 6467 6468 for (;;) { 6469 /* Make sure that we always have enough lookahead, except 6470 * at the end of the input file. We need MAX_MATCH bytes 6471 * for the next match, plus MIN_MATCH bytes to insert the 6472 * string following the next match. 6473 */ 6474 if (s.lookahead < MIN_LOOKAHEAD) { 6475 fill_window(s); 6476 if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) { 6477 return BS_NEED_MORE; 6478 } 6479 if (s.lookahead === 0) { 6480 break; /* flush the current block */ 6481 } 6482 } 6483 6484 /* Insert the string window[strstart .. strstart+2] in the 6485 * dictionary, and set hash_head to the head of the hash chain: 6486 */ 6487 hash_head = 0/*NIL*/; 6488 if (s.lookahead >= MIN_MATCH) { 6489 /*** INSERT_STRING(s, s.strstart, hash_head); ***/ 6490 s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; 6491 hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; 6492 s.head[s.ins_h] = s.strstart; 6493 /***/ 6494 } 6495 6496 /* Find the longest match, discarding those <= prev_length. 6497 * At this point we have always match_length < MIN_MATCH 6498 */ 6499 if (hash_head !== 0/*NIL*/ && ((s.strstart - hash_head) <= (s.w_size - MIN_LOOKAHEAD))) { 6500 /* To simplify the code, we prevent matches with the string 6501 * of window index 0 (in particular we have to avoid a match 6502 * of the string with itself at the start of the input file). 6503 */ 6504 s.match_length = longest_match(s, hash_head); 6505 /* longest_match() sets match_start */ 6506 } 6507 if (s.match_length >= MIN_MATCH) { 6508 // check_match(s, s.strstart, s.match_start, s.match_length); // for debug only 6509 6510 /*** _tr_tally_dist(s, s.strstart - s.match_start, 6511 s.match_length - MIN_MATCH, bflush); ***/ 6512 bflush = trees._tr_tally(s, s.strstart - s.match_start, s.match_length - MIN_MATCH); 6513 6514 s.lookahead -= s.match_length; 6515 6516 /* Insert new strings in the hash table only if the match length 6517 * is not too large. This saves time but degrades compression. 6518 */ 6519 if (s.match_length <= s.max_lazy_match/*max_insert_length*/ && s.lookahead >= MIN_MATCH) { 6520 s.match_length--; /* string at strstart already in table */ 6521 do { 6522 s.strstart++; 6523 /*** INSERT_STRING(s, s.strstart, hash_head); ***/ 6524 s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; 6525 hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; 6526 s.head[s.ins_h] = s.strstart; 6527 /***/ 6528 /* strstart never exceeds WSIZE-MAX_MATCH, so there are 6529 * always MIN_MATCH bytes ahead. 6530 */ 6531 } while (--s.match_length !== 0); 6532 s.strstart++; 6533 } else 6534 { 6535 s.strstart += s.match_length; 6536 s.match_length = 0; 6537 s.ins_h = s.window[s.strstart]; 6538 /* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */ 6539 s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + 1]) & s.hash_mask; 6540 6541 //#if MIN_MATCH != 3 6542 // Call UPDATE_HASH() MIN_MATCH-3 more times 6543 //#endif 6544 /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not 6545 * matter since it will be recomputed at next deflate call. 6546 */ 6547 } 6548 } else { 6549 /* No match, output a literal byte */ 6550 //Tracevv((stderr,"%c", s.window[s.strstart])); 6551 /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ 6552 bflush = trees._tr_tally(s, 0, s.window[s.strstart]); 6553 6554 s.lookahead--; 6555 s.strstart++; 6556 } 6557 if (bflush) { 6558 /*** FLUSH_BLOCK(s, 0); ***/ 6559 flush_block_only(s, false); 6560 if (s.strm.avail_out === 0) { 6561 return BS_NEED_MORE; 6562 } 6563 /***/ 6564 } 6565 } 6566 s.insert = ((s.strstart < (MIN_MATCH - 1)) ? s.strstart : MIN_MATCH - 1); 6567 if (flush === Z_FINISH) { 6568 /*** FLUSH_BLOCK(s, 1); ***/ 6569 flush_block_only(s, true); 6570 if (s.strm.avail_out === 0) { 6571 return BS_FINISH_STARTED; 6572 } 6573 /***/ 6574 return BS_FINISH_DONE; 6575 } 6576 if (s.last_lit) { 6577 /*** FLUSH_BLOCK(s, 0); ***/ 6578 flush_block_only(s, false); 6579 if (s.strm.avail_out === 0) { 6580 return BS_NEED_MORE; 6581 } 6582 /***/ 6583 } 6584 return BS_BLOCK_DONE; 6585 } 6586 6587 /* =========================================================================== 6588 * Same as above, but achieves better compression. We use a lazy 6589 * evaluation for matches: a match is finally adopted only if there is 6590 * no better match at the next window position. 6591 */ 6592 function deflate_slow(s, flush) { 6593 var hash_head; /* head of hash chain */ 6594 var bflush; /* set if current block must be flushed */ 6595 6596 var max_insert; 6597 6598 /* Process the input block. */ 6599 for (;;) { 6600 /* Make sure that we always have enough lookahead, except 6601 * at the end of the input file. We need MAX_MATCH bytes 6602 * for the next match, plus MIN_MATCH bytes to insert the 6603 * string following the next match. 6604 */ 6605 if (s.lookahead < MIN_LOOKAHEAD) { 6606 fill_window(s); 6607 if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) { 6608 return BS_NEED_MORE; 6609 } 6610 if (s.lookahead === 0) { break; } /* flush the current block */ 6611 } 6612 6613 /* Insert the string window[strstart .. strstart+2] in the 6614 * dictionary, and set hash_head to the head of the hash chain: 6615 */ 6616 hash_head = 0/*NIL*/; 6617 if (s.lookahead >= MIN_MATCH) { 6618 /*** INSERT_STRING(s, s.strstart, hash_head); ***/ 6619 s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; 6620 hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; 6621 s.head[s.ins_h] = s.strstart; 6622 /***/ 6623 } 6624 6625 /* Find the longest match, discarding those <= prev_length. 6626 */ 6627 s.prev_length = s.match_length; 6628 s.prev_match = s.match_start; 6629 s.match_length = MIN_MATCH - 1; 6630 6631 if (hash_head !== 0/*NIL*/ && s.prev_length < s.max_lazy_match && 6632 s.strstart - hash_head <= (s.w_size - MIN_LOOKAHEAD)/*MAX_DIST(s)*/) { 6633 /* To simplify the code, we prevent matches with the string 6634 * of window index 0 (in particular we have to avoid a match 6635 * of the string with itself at the start of the input file). 6636 */ 6637 s.match_length = longest_match(s, hash_head); 6638 /* longest_match() sets match_start */ 6639 6640 if (s.match_length <= 5 && 6641 (s.strategy === Z_FILTERED || (s.match_length === MIN_MATCH && s.strstart - s.match_start > 4096/*TOO_FAR*/))) { 6642 6643 /* If prev_match is also MIN_MATCH, match_start is garbage 6644 * but we will ignore the current match anyway. 6645 */ 6646 s.match_length = MIN_MATCH - 1; 6647 } 6648 } 6649 /* If there was a match at the previous step and the current 6650 * match is not better, output the previous match: 6651 */ 6652 if (s.prev_length >= MIN_MATCH && s.match_length <= s.prev_length) { 6653 max_insert = s.strstart + s.lookahead - MIN_MATCH; 6654 /* Do not insert strings in hash table beyond this. */ 6655 6656 //check_match(s, s.strstart-1, s.prev_match, s.prev_length); 6657 6658 /***_tr_tally_dist(s, s.strstart - 1 - s.prev_match, 6659 s.prev_length - MIN_MATCH, bflush);***/ 6660 bflush = trees._tr_tally(s, s.strstart - 1 - s.prev_match, s.prev_length - MIN_MATCH); 6661 /* Insert in hash table all strings up to the end of the match. 6662 * strstart-1 and strstart are already inserted. If there is not 6663 * enough lookahead, the last two strings are not inserted in 6664 * the hash table. 6665 */ 6666 s.lookahead -= s.prev_length - 1; 6667 s.prev_length -= 2; 6668 do { 6669 if (++s.strstart <= max_insert) { 6670 /*** INSERT_STRING(s, s.strstart, hash_head); ***/ 6671 s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; 6672 hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; 6673 s.head[s.ins_h] = s.strstart; 6674 /***/ 6675 } 6676 } while (--s.prev_length !== 0); 6677 s.match_available = 0; 6678 s.match_length = MIN_MATCH - 1; 6679 s.strstart++; 6680 6681 if (bflush) { 6682 /*** FLUSH_BLOCK(s, 0); ***/ 6683 flush_block_only(s, false); 6684 if (s.strm.avail_out === 0) { 6685 return BS_NEED_MORE; 6686 } 6687 /***/ 6688 } 6689 6690 } else if (s.match_available) { 6691 /* If there was no match at the previous position, output a 6692 * single literal. If there was a match but the current match 6693 * is longer, truncate the previous match to a single literal. 6694 */ 6695 //Tracevv((stderr,"%c", s->window[s->strstart-1])); 6696 /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/ 6697 bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]); 6698 6699 if (bflush) { 6700 /*** FLUSH_BLOCK_ONLY(s, 0) ***/ 6701 flush_block_only(s, false); 6702 /***/ 6703 } 6704 s.strstart++; 6705 s.lookahead--; 6706 if (s.strm.avail_out === 0) { 6707 return BS_NEED_MORE; 6708 } 6709 } else { 6710 /* There is no previous match to compare with, wait for 6711 * the next step to decide. 6712 */ 6713 s.match_available = 1; 6714 s.strstart++; 6715 s.lookahead--; 6716 } 6717 } 6718 //Assert (flush != Z_NO_FLUSH, "no flush?"); 6719 if (s.match_available) { 6720 //Tracevv((stderr,"%c", s->window[s->strstart-1])); 6721 /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/ 6722 bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]); 6723 6724 s.match_available = 0; 6725 } 6726 s.insert = s.strstart < MIN_MATCH - 1 ? s.strstart : MIN_MATCH - 1; 6727 if (flush === Z_FINISH) { 6728 /*** FLUSH_BLOCK(s, 1); ***/ 6729 flush_block_only(s, true); 6730 if (s.strm.avail_out === 0) { 6731 return BS_FINISH_STARTED; 6732 } 6733 /***/ 6734 return BS_FINISH_DONE; 6735 } 6736 if (s.last_lit) { 6737 /*** FLUSH_BLOCK(s, 0); ***/ 6738 flush_block_only(s, false); 6739 if (s.strm.avail_out === 0) { 6740 return BS_NEED_MORE; 6741 } 6742 /***/ 6743 } 6744 6745 return BS_BLOCK_DONE; 6746 } 6747 6748 6749 /* =========================================================================== 6750 * For Z_RLE, simply look for runs of bytes, generate matches only of distance 6751 * one. Do not maintain a hash table. (It will be regenerated if this run of 6752 * deflate switches away from Z_RLE.) 6753 */ 6754 function deflate_rle(s, flush) { 6755 var bflush; /* set if current block must be flushed */ 6756 var prev; /* byte at distance one to match */ 6757 var scan, strend; /* scan goes up to strend for length of run */ 6758 6759 var _win = s.window; 6760 6761 for (;;) { 6762 /* Make sure that we always have enough lookahead, except 6763 * at the end of the input file. We need MAX_MATCH bytes 6764 * for the longest run, plus one for the unrolled loop. 6765 */ 6766 if (s.lookahead <= MAX_MATCH) { 6767 fill_window(s); 6768 if (s.lookahead <= MAX_MATCH && flush === Z_NO_FLUSH) { 6769 return BS_NEED_MORE; 6770 } 6771 if (s.lookahead === 0) { break; } /* flush the current block */ 6772 } 6773 6774 /* See how many times the previous byte repeats */ 6775 s.match_length = 0; 6776 if (s.lookahead >= MIN_MATCH && s.strstart > 0) { 6777 scan = s.strstart - 1; 6778 prev = _win[scan]; 6779 if (prev === _win[++scan] && prev === _win[++scan] && prev === _win[++scan]) { 6780 strend = s.strstart + MAX_MATCH; 6781 do { 6782 /*jshint noempty:false*/ 6783 } while (prev === _win[++scan] && prev === _win[++scan] && 6784 prev === _win[++scan] && prev === _win[++scan] && 6785 prev === _win[++scan] && prev === _win[++scan] && 6786 prev === _win[++scan] && prev === _win[++scan] && 6787 scan < strend); 6788 s.match_length = MAX_MATCH - (strend - scan); 6789 if (s.match_length > s.lookahead) { 6790 s.match_length = s.lookahead; 6791 } 6792 } 6793 //Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); 6794 } 6795 6796 /* Emit match if have run of MIN_MATCH or longer, else emit literal */ 6797 if (s.match_length >= MIN_MATCH) { 6798 //check_match(s, s.strstart, s.strstart - 1, s.match_length); 6799 6800 /*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/ 6801 bflush = trees._tr_tally(s, 1, s.match_length - MIN_MATCH); 6802 6803 s.lookahead -= s.match_length; 6804 s.strstart += s.match_length; 6805 s.match_length = 0; 6806 } else { 6807 /* No match, output a literal byte */ 6808 //Tracevv((stderr,"%c", s->window[s->strstart])); 6809 /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ 6810 bflush = trees._tr_tally(s, 0, s.window[s.strstart]); 6811 6812 s.lookahead--; 6813 s.strstart++; 6814 } 6815 if (bflush) { 6816 /*** FLUSH_BLOCK(s, 0); ***/ 6817 flush_block_only(s, false); 6818 if (s.strm.avail_out === 0) { 6819 return BS_NEED_MORE; 6820 } 6821 /***/ 6822 } 6823 } 6824 s.insert = 0; 6825 if (flush === Z_FINISH) { 6826 /*** FLUSH_BLOCK(s, 1); ***/ 6827 flush_block_only(s, true); 6828 if (s.strm.avail_out === 0) { 6829 return BS_FINISH_STARTED; 6830 } 6831 /***/ 6832 return BS_FINISH_DONE; 6833 } 6834 if (s.last_lit) { 6835 /*** FLUSH_BLOCK(s, 0); ***/ 6836 flush_block_only(s, false); 6837 if (s.strm.avail_out === 0) { 6838 return BS_NEED_MORE; 6839 } 6840 /***/ 6841 } 6842 return BS_BLOCK_DONE; 6843 } 6844 6845 /* =========================================================================== 6846 * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. 6847 * (It will be regenerated if this run of deflate switches away from Huffman.) 6848 */ 6849 function deflate_huff(s, flush) { 6850 var bflush; /* set if current block must be flushed */ 6851 6852 for (;;) { 6853 /* Make sure that we have a literal to write. */ 6854 if (s.lookahead === 0) { 6855 fill_window(s); 6856 if (s.lookahead === 0) { 6857 if (flush === Z_NO_FLUSH) { 6858 return BS_NEED_MORE; 6859 } 6860 break; /* flush the current block */ 6861 } 6862 } 6863 6864 /* Output a literal byte */ 6865 s.match_length = 0; 6866 //Tracevv((stderr,"%c", s->window[s->strstart])); 6867 /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ 6868 bflush = trees._tr_tally(s, 0, s.window[s.strstart]); 6869 s.lookahead--; 6870 s.strstart++; 6871 if (bflush) { 6872 /*** FLUSH_BLOCK(s, 0); ***/ 6873 flush_block_only(s, false); 6874 if (s.strm.avail_out === 0) { 6875 return BS_NEED_MORE; 6876 } 6877 /***/ 6878 } 6879 } 6880 s.insert = 0; 6881 if (flush === Z_FINISH) { 6882 /*** FLUSH_BLOCK(s, 1); ***/ 6883 flush_block_only(s, true); 6884 if (s.strm.avail_out === 0) { 6885 return BS_FINISH_STARTED; 6886 } 6887 /***/ 6888 return BS_FINISH_DONE; 6889 } 6890 if (s.last_lit) { 6891 /*** FLUSH_BLOCK(s, 0); ***/ 6892 flush_block_only(s, false); 6893 if (s.strm.avail_out === 0) { 6894 return BS_NEED_MORE; 6895 } 6896 /***/ 6897 } 6898 return BS_BLOCK_DONE; 6899 } 6900 6901 /* Values for max_lazy_match, good_match and max_chain_length, depending on 6902 * the desired pack level (0..9). The values given below have been tuned to 6903 * exclude worst case performance for pathological files. Better values may be 6904 * found for specific files. 6905 */ 6906 function Config(good_length, max_lazy, nice_length, max_chain, func) { 6907 this.good_length = good_length; 6908 this.max_lazy = max_lazy; 6909 this.nice_length = nice_length; 6910 this.max_chain = max_chain; 6911 this.func = func; 6912 } 6913 6914 var configuration_table; 6915 6916 configuration_table = [ 6917 /* good lazy nice chain */ 6918 new Config(0, 0, 0, 0, deflate_stored), /* 0 store only */ 6919 new Config(4, 4, 8, 4, deflate_fast), /* 1 max speed, no lazy matches */ 6920 new Config(4, 5, 16, 8, deflate_fast), /* 2 */ 6921 new Config(4, 6, 32, 32, deflate_fast), /* 3 */ 6922 6923 new Config(4, 4, 16, 16, deflate_slow), /* 4 lazy matches */ 6924 new Config(8, 16, 32, 32, deflate_slow), /* 5 */ 6925 new Config(8, 16, 128, 128, deflate_slow), /* 6 */ 6926 new Config(8, 32, 128, 256, deflate_slow), /* 7 */ 6927 new Config(32, 128, 258, 1024, deflate_slow), /* 8 */ 6928 new Config(32, 258, 258, 4096, deflate_slow) /* 9 max compression */ 6929 ]; 6930 6931 6932 /* =========================================================================== 6933 * Initialize the "longest match" routines for a new zlib stream 6934 */ 6935 function lm_init(s) { 6936 s.window_size = 2 * s.w_size; 6937 6938 /*** CLEAR_HASH(s); ***/ 6939 zero(s.head); // Fill with NIL (= 0); 6940 6941 /* Set the default configuration parameters: 6942 */ 6943 s.max_lazy_match = configuration_table[s.level].max_lazy; 6944 s.good_match = configuration_table[s.level].good_length; 6945 s.nice_match = configuration_table[s.level].nice_length; 6946 s.max_chain_length = configuration_table[s.level].max_chain; 6947 6948 s.strstart = 0; 6949 s.block_start = 0; 6950 s.lookahead = 0; 6951 s.insert = 0; 6952 s.match_length = s.prev_length = MIN_MATCH - 1; 6953 s.match_available = 0; 6954 s.ins_h = 0; 6955 } 6956 6957 6958 function DeflateState() { 6959 this.strm = null; /* pointer back to this zlib stream */ 6960 this.status = 0; /* as the name implies */ 6961 this.pending_buf = null; /* output still pending */ 6962 this.pending_buf_size = 0; /* size of pending_buf */ 6963 this.pending_out = 0; /* next pending byte to output to the stream */ 6964 this.pending = 0; /* nb of bytes in the pending buffer */ 6965 this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */ 6966 this.gzhead = null; /* gzip header information to write */ 6967 this.gzindex = 0; /* where in extra, name, or comment */ 6968 this.method = Z_DEFLATED; /* can only be DEFLATED */ 6969 this.last_flush = -1; /* value of flush param for previous deflate call */ 6970 6971 this.w_size = 0; /* LZ77 window size (32K by default) */ 6972 this.w_bits = 0; /* log2(w_size) (8..16) */ 6973 this.w_mask = 0; /* w_size - 1 */ 6974 6975 this.window = null; 6976 /* Sliding window. Input bytes are read into the second half of the window, 6977 * and move to the first half later to keep a dictionary of at least wSize 6978 * bytes. With this organization, matches are limited to a distance of 6979 * wSize-MAX_MATCH bytes, but this ensures that IO is always 6980 * performed with a length multiple of the block size. 6981 */ 6982 6983 this.window_size = 0; 6984 /* Actual size of window: 2*wSize, except when the user input buffer 6985 * is directly used as sliding window. 6986 */ 6987 6988 this.prev = null; 6989 /* Link to older string with same hash index. To limit the size of this 6990 * array to 64K, this link is maintained only for the last 32K strings. 6991 * An index in this array is thus a window index modulo 32K. 6992 */ 6993 6994 this.head = null; /* Heads of the hash chains or NIL. */ 6995 6996 this.ins_h = 0; /* hash index of string to be inserted */ 6997 this.hash_size = 0; /* number of elements in hash table */ 6998 this.hash_bits = 0; /* log2(hash_size) */ 6999 this.hash_mask = 0; /* hash_size-1 */ 7000 7001 this.hash_shift = 0; 7002 /* Number of bits by which ins_h must be shifted at each input 7003 * step. It must be such that after MIN_MATCH steps, the oldest 7004 * byte no longer takes part in the hash key, that is: 7005 * hash_shift * MIN_MATCH >= hash_bits 7006 */ 7007 7008 this.block_start = 0; 7009 /* Window position at the beginning of the current output block. Gets 7010 * negative when the window is moved backwards. 7011 */ 7012 7013 this.match_length = 0; /* length of best match */ 7014 this.prev_match = 0; /* previous match */ 7015 this.match_available = 0; /* set if previous match exists */ 7016 this.strstart = 0; /* start of string to insert */ 7017 this.match_start = 0; /* start of matching string */ 7018 this.lookahead = 0; /* number of valid bytes ahead in window */ 7019 7020 this.prev_length = 0; 7021 /* Length of the best match at previous step. Matches not greater than this 7022 * are discarded. This is used in the lazy match evaluation. 7023 */ 7024 7025 this.max_chain_length = 0; 7026 /* To speed up deflation, hash chains are never searched beyond this 7027 * length. A higher limit improves compression ratio but degrades the 7028 * speed. 7029 */ 7030 7031 this.max_lazy_match = 0; 7032 /* Attempt to find a better match only when the current match is strictly 7033 * smaller than this value. This mechanism is used only for compression 7034 * levels >= 4. 7035 */ 7036 // That's alias to max_lazy_match, don't use directly 7037 //this.max_insert_length = 0; 7038 /* Insert new strings in the hash table only if the match length is not 7039 * greater than this length. This saves time but degrades compression. 7040 * max_insert_length is used only for compression levels <= 3. 7041 */ 7042 7043 this.level = 0; /* compression level (1..9) */ 7044 this.strategy = 0; /* favor or force Huffman coding*/ 7045 7046 this.good_match = 0; 7047 /* Use a faster search when the previous match is longer than this */ 7048 7049 this.nice_match = 0; /* Stop searching when current match exceeds this */ 7050 7051 /* used by trees.c: */ 7052 7053 /* Didn't use ct_data typedef below to suppress compiler warning */ 7054 7055 // struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ 7056 // struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ 7057 // struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ 7058 7059 // Use flat array of DOUBLE size, with interleaved fata, 7060 // because JS does not support effective 7061 this.dyn_ltree = new utils.Buf16(HEAP_SIZE * 2); 7062 this.dyn_dtree = new utils.Buf16((2 * D_CODES + 1) * 2); 7063 this.bl_tree = new utils.Buf16((2 * BL_CODES + 1) * 2); 7064 zero(this.dyn_ltree); 7065 zero(this.dyn_dtree); 7066 zero(this.bl_tree); 7067 7068 this.l_desc = null; /* desc. for literal tree */ 7069 this.d_desc = null; /* desc. for distance tree */ 7070 this.bl_desc = null; /* desc. for bit length tree */ 7071 7072 //ush bl_count[MAX_BITS+1]; 7073 this.bl_count = new utils.Buf16(MAX_BITS + 1); 7074 /* number of codes at each bit length for an optimal tree */ 7075 7076 //int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ 7077 this.heap = new utils.Buf16(2 * L_CODES + 1); /* heap used to build the Huffman trees */ 7078 zero(this.heap); 7079 7080 this.heap_len = 0; /* number of elements in the heap */ 7081 this.heap_max = 0; /* element of largest frequency */ 7082 /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. 7083 * The same heap array is used to build all trees. 7084 */ 7085 7086 this.depth = new utils.Buf16(2 * L_CODES + 1); //uch depth[2*L_CODES+1]; 7087 zero(this.depth); 7088 /* Depth of each subtree used as tie breaker for trees of equal frequency 7089 */ 7090 7091 this.l_buf = 0; /* buffer index for literals or lengths */ 7092 7093 this.lit_bufsize = 0; 7094 /* Size of match buffer for literals/lengths. There are 4 reasons for 7095 * limiting lit_bufsize to 64K: 7096 * - frequencies can be kept in 16 bit counters 7097 * - if compression is not successful for the first block, all input 7098 * data is still in the window so we can still emit a stored block even 7099 * when input comes from standard input. (This can also be done for 7100 * all blocks if lit_bufsize is not greater than 32K.) 7101 * - if compression is not successful for a file smaller than 64K, we can 7102 * even emit a stored file instead of a stored block (saving 5 bytes). 7103 * This is applicable only for zip (not gzip or zlib). 7104 * - creating new Huffman trees less frequently may not provide fast 7105 * adaptation to changes in the input data statistics. (Take for 7106 * example a binary file with poorly compressible code followed by 7107 * a highly compressible string table.) Smaller buffer sizes give 7108 * fast adaptation but have of course the overhead of transmitting 7109 * trees more frequently. 7110 * - I can't count above 4 7111 */ 7112 7113 this.last_lit = 0; /* running index in l_buf */ 7114 7115 this.d_buf = 0; 7116 /* Buffer index for distances. To simplify the code, d_buf and l_buf have 7117 * the same number of elements. To use different lengths, an extra flag 7118 * array would be necessary. 7119 */ 7120 7121 this.opt_len = 0; /* bit length of current block with optimal trees */ 7122 this.static_len = 0; /* bit length of current block with static trees */ 7123 this.matches = 0; /* number of string matches in current block */ 7124 this.insert = 0; /* bytes at end of window left to insert */ 7125 7126 7127 this.bi_buf = 0; 7128 /* Output buffer. bits are inserted starting at the bottom (least 7129 * significant bits). 7130 */ 7131 this.bi_valid = 0; 7132 /* Number of valid bits in bi_buf. All bits above the last valid bit 7133 * are always zero. 7134 */ 7135 7136 // Used for window memory init. We safely ignore it for JS. That makes 7137 // sense only for pointers and memory check tools. 7138 //this.high_water = 0; 7139 /* High water mark offset in window for initialized bytes -- bytes above 7140 * this are set to zero in order to avoid memory check warnings when 7141 * longest match routines access bytes past the input. This is then 7142 * updated to the new high water mark. 7143 */ 7144 } 7145 7146 7147 function deflateResetKeep(strm) { 7148 var s; 7149 7150 if (!strm || !strm.state) { 7151 return err(strm, Z_STREAM_ERROR); 7152 } 7153 7154 strm.total_in = strm.total_out = 0; 7155 strm.data_type = Z_UNKNOWN; 7156 7157 s = strm.state; 7158 s.pending = 0; 7159 s.pending_out = 0; 7160 7161 if (s.wrap < 0) { 7162 s.wrap = -s.wrap; 7163 /* was made negative by deflate(..., Z_FINISH); */ 7164 } 7165 s.status = (s.wrap ? INIT_STATE : BUSY_STATE); 7166 strm.adler = (s.wrap === 2) ? 7167 0 // crc32(0, Z_NULL, 0) 7168 : 7169 1; // adler32(0, Z_NULL, 0) 7170 s.last_flush = Z_NO_FLUSH; 7171 trees._tr_init(s); 7172 return Z_OK; 7173 } 7174 7175 7176 function deflateReset(strm) { 7177 var ret = deflateResetKeep(strm); 7178 if (ret === Z_OK) { 7179 lm_init(strm.state); 7180 } 7181 return ret; 7182 } 7183 7184 7185 function deflateSetHeader(strm, head) { 7186 if (!strm || !strm.state) { return Z_STREAM_ERROR; } 7187 if (strm.state.wrap !== 2) { return Z_STREAM_ERROR; } 7188 strm.state.gzhead = head; 7189 return Z_OK; 7190 } 7191 7192 7193 function deflateInit2(strm, level, method, windowBits, memLevel, strategy) { 7194 if (!strm) { // === Z_NULL 7195 return Z_STREAM_ERROR; 7196 } 7197 var wrap = 1; 7198 7199 if (level === Z_DEFAULT_COMPRESSION) { 7200 level = 6; 7201 } 7202 7203 if (windowBits < 0) { /* suppress zlib wrapper */ 7204 wrap = 0; 7205 windowBits = -windowBits; 7206 } 7207 7208 else if (windowBits > 15) { 7209 wrap = 2; /* write gzip wrapper instead */ 7210 windowBits -= 16; 7211 } 7212 7213 7214 if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method !== Z_DEFLATED || 7215 windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || 7216 strategy < 0 || strategy > Z_FIXED) { 7217 return err(strm, Z_STREAM_ERROR); 7218 } 7219 7220 7221 if (windowBits === 8) { 7222 windowBits = 9; 7223 } 7224 /* until 256-byte window bug fixed */ 7225 7226 var s = new DeflateState(); 7227 7228 strm.state = s; 7229 s.strm = strm; 7230 7231 s.wrap = wrap; 7232 s.gzhead = null; 7233 s.w_bits = windowBits; 7234 s.w_size = 1 << s.w_bits; 7235 s.w_mask = s.w_size - 1; 7236 7237 s.hash_bits = memLevel + 7; 7238 s.hash_size = 1 << s.hash_bits; 7239 s.hash_mask = s.hash_size - 1; 7240 s.hash_shift = ~~((s.hash_bits + MIN_MATCH - 1) / MIN_MATCH); 7241 7242 s.window = new utils.Buf8(s.w_size * 2); 7243 s.head = new utils.Buf16(s.hash_size); 7244 s.prev = new utils.Buf16(s.w_size); 7245 7246 // Don't need mem init magic for JS. 7247 //s.high_water = 0; /* nothing written to s->window yet */ 7248 7249 s.lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ 7250 7251 s.pending_buf_size = s.lit_bufsize * 4; 7252 7253 //overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); 7254 //s->pending_buf = (uchf *) overlay; 7255 s.pending_buf = new utils.Buf8(s.pending_buf_size); 7256 7257 // It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`) 7258 //s->d_buf = overlay + s->lit_bufsize/sizeof(ush); 7259 s.d_buf = 1 * s.lit_bufsize; 7260 7261 //s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; 7262 s.l_buf = (1 + 2) * s.lit_bufsize; 7263 7264 s.level = level; 7265 s.strategy = strategy; 7266 s.method = method; 7267 7268 return deflateReset(strm); 7269 } 7270 7271 function deflateInit(strm, level) { 7272 return deflateInit2(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY); 7273 } 7274 7275 7276 function deflate(strm, flush) { 7277 var old_flush, s; 7278 var beg, val; // for gzip header write only 7279 7280 if (!strm || !strm.state || 7281 flush > Z_BLOCK || flush < 0) { 7282 return strm ? err(strm, Z_STREAM_ERROR) : Z_STREAM_ERROR; 7283 } 7284 7285 s = strm.state; 7286 7287 if (!strm.output || 7288 (!strm.input && strm.avail_in !== 0) || 7289 (s.status === FINISH_STATE && flush !== Z_FINISH)) { 7290 return err(strm, (strm.avail_out === 0) ? Z_BUF_ERROR : Z_STREAM_ERROR); 7291 } 7292 7293 s.strm = strm; /* just in case */ 7294 old_flush = s.last_flush; 7295 s.last_flush = flush; 7296 7297 /* Write the header */ 7298 if (s.status === INIT_STATE) { 7299 7300 if (s.wrap === 2) { // GZIP header 7301 strm.adler = 0; //crc32(0L, Z_NULL, 0); 7302 put_byte(s, 31); 7303 put_byte(s, 139); 7304 put_byte(s, 8); 7305 if (!s.gzhead) { // s->gzhead == Z_NULL 7306 put_byte(s, 0); 7307 put_byte(s, 0); 7308 put_byte(s, 0); 7309 put_byte(s, 0); 7310 put_byte(s, 0); 7311 put_byte(s, s.level === 9 ? 2 : 7312 (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ? 7313 4 : 0)); 7314 put_byte(s, OS_CODE); 7315 s.status = BUSY_STATE; 7316 } 7317 else { 7318 put_byte(s, (s.gzhead.text ? 1 : 0) + 7319 (s.gzhead.hcrc ? 2 : 0) + 7320 (!s.gzhead.extra ? 0 : 4) + 7321 (!s.gzhead.name ? 0 : 8) + 7322 (!s.gzhead.comment ? 0 : 16) 7323 ); 7324 put_byte(s, s.gzhead.time & 0xff); 7325 put_byte(s, (s.gzhead.time >> 8) & 0xff); 7326 put_byte(s, (s.gzhead.time >> 16) & 0xff); 7327 put_byte(s, (s.gzhead.time >> 24) & 0xff); 7328 put_byte(s, s.level === 9 ? 2 : 7329 (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ? 7330 4 : 0)); 7331 put_byte(s, s.gzhead.os & 0xff); 7332 if (s.gzhead.extra && s.gzhead.extra.length) { 7333 put_byte(s, s.gzhead.extra.length & 0xff); 7334 put_byte(s, (s.gzhead.extra.length >> 8) & 0xff); 7335 } 7336 if (s.gzhead.hcrc) { 7337 strm.adler = crc32(strm.adler, s.pending_buf, s.pending, 0); 7338 } 7339 s.gzindex = 0; 7340 s.status = EXTRA_STATE; 7341 } 7342 } 7343 else // DEFLATE header 7344 { 7345 var header = (Z_DEFLATED + ((s.w_bits - 8) << 4)) << 8; 7346 var level_flags = -1; 7347 7348 if (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2) { 7349 level_flags = 0; 7350 } else if (s.level < 6) { 7351 level_flags = 1; 7352 } else if (s.level === 6) { 7353 level_flags = 2; 7354 } else { 7355 level_flags = 3; 7356 } 7357 header |= (level_flags << 6); 7358 if (s.strstart !== 0) { header |= PRESET_DICT; } 7359 header += 31 - (header % 31); 7360 7361 s.status = BUSY_STATE; 7362 putShortMSB(s, header); 7363 7364 /* Save the adler32 of the preset dictionary: */ 7365 if (s.strstart !== 0) { 7366 putShortMSB(s, strm.adler >>> 16); 7367 putShortMSB(s, strm.adler & 0xffff); 7368 } 7369 strm.adler = 1; // adler32(0L, Z_NULL, 0); 7370 } 7371 } 7372 7373 //#ifdef GZIP 7374 if (s.status === EXTRA_STATE) { 7375 if (s.gzhead.extra/* != Z_NULL*/) { 7376 beg = s.pending; /* start of bytes to update crc */ 7377 7378 while (s.gzindex < (s.gzhead.extra.length & 0xffff)) { 7379 if (s.pending === s.pending_buf_size) { 7380 if (s.gzhead.hcrc && s.pending > beg) { 7381 strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); 7382 } 7383 flush_pending(strm); 7384 beg = s.pending; 7385 if (s.pending === s.pending_buf_size) { 7386 break; 7387 } 7388 } 7389 put_byte(s, s.gzhead.extra[s.gzindex] & 0xff); 7390 s.gzindex++; 7391 } 7392 if (s.gzhead.hcrc && s.pending > beg) { 7393 strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); 7394 } 7395 if (s.gzindex === s.gzhead.extra.length) { 7396 s.gzindex = 0; 7397 s.status = NAME_STATE; 7398 } 7399 } 7400 else { 7401 s.status = NAME_STATE; 7402 } 7403 } 7404 if (s.status === NAME_STATE) { 7405 if (s.gzhead.name/* != Z_NULL*/) { 7406 beg = s.pending; /* start of bytes to update crc */ 7407 //int val; 7408 7409 do { 7410 if (s.pending === s.pending_buf_size) { 7411 if (s.gzhead.hcrc && s.pending > beg) { 7412 strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); 7413 } 7414 flush_pending(strm); 7415 beg = s.pending; 7416 if (s.pending === s.pending_buf_size) { 7417 val = 1; 7418 break; 7419 } 7420 } 7421 // JS specific: little magic to add zero terminator to end of string 7422 if (s.gzindex < s.gzhead.name.length) { 7423 val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff; 7424 } else { 7425 val = 0; 7426 } 7427 put_byte(s, val); 7428 } while (val !== 0); 7429 7430 if (s.gzhead.hcrc && s.pending > beg) { 7431 strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); 7432 } 7433 if (val === 0) { 7434 s.gzindex = 0; 7435 s.status = COMMENT_STATE; 7436 } 7437 } 7438 else { 7439 s.status = COMMENT_STATE; 7440 } 7441 } 7442 if (s.status === COMMENT_STATE) { 7443 if (s.gzhead.comment/* != Z_NULL*/) { 7444 beg = s.pending; /* start of bytes to update crc */ 7445 //int val; 7446 7447 do { 7448 if (s.pending === s.pending_buf_size) { 7449 if (s.gzhead.hcrc && s.pending > beg) { 7450 strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); 7451 } 7452 flush_pending(strm); 7453 beg = s.pending; 7454 if (s.pending === s.pending_buf_size) { 7455 val = 1; 7456 break; 7457 } 7458 } 7459 // JS specific: little magic to add zero terminator to end of string 7460 if (s.gzindex < s.gzhead.comment.length) { 7461 val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff; 7462 } else { 7463 val = 0; 7464 } 7465 put_byte(s, val); 7466 } while (val !== 0); 7467 7468 if (s.gzhead.hcrc && s.pending > beg) { 7469 strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); 7470 } 7471 if (val === 0) { 7472 s.status = HCRC_STATE; 7473 } 7474 } 7475 else { 7476 s.status = HCRC_STATE; 7477 } 7478 } 7479 if (s.status === HCRC_STATE) { 7480 if (s.gzhead.hcrc) { 7481 if (s.pending + 2 > s.pending_buf_size) { 7482 flush_pending(strm); 7483 } 7484 if (s.pending + 2 <= s.pending_buf_size) { 7485 put_byte(s, strm.adler & 0xff); 7486 put_byte(s, (strm.adler >> 8) & 0xff); 7487 strm.adler = 0; //crc32(0L, Z_NULL, 0); 7488 s.status = BUSY_STATE; 7489 } 7490 } 7491 else { 7492 s.status = BUSY_STATE; 7493 } 7494 } 7495 //#endif 7496 7497 /* Flush as much pending output as possible */ 7498 if (s.pending !== 0) { 7499 flush_pending(strm); 7500 if (strm.avail_out === 0) { 7501 /* Since avail_out is 0, deflate will be called again with 7502 * more output space, but possibly with both pending and 7503 * avail_in equal to zero. There won't be anything to do, 7504 * but this is not an error situation so make sure we 7505 * return OK instead of BUF_ERROR at next call of deflate: 7506 */ 7507 s.last_flush = -1; 7508 return Z_OK; 7509 } 7510 7511 /* Make sure there is something to do and avoid duplicate consecutive 7512 * flushes. For repeated and useless calls with Z_FINISH, we keep 7513 * returning Z_STREAM_END instead of Z_BUF_ERROR. 7514 */ 7515 } else if (strm.avail_in === 0 && rank(flush) <= rank(old_flush) && 7516 flush !== Z_FINISH) { 7517 return err(strm, Z_BUF_ERROR); 7518 } 7519 7520 /* User must not provide more input after the first FINISH: */ 7521 if (s.status === FINISH_STATE && strm.avail_in !== 0) { 7522 return err(strm, Z_BUF_ERROR); 7523 } 7524 7525 /* Start a new block or continue the current one. 7526 */ 7527 if (strm.avail_in !== 0 || s.lookahead !== 0 || 7528 (flush !== Z_NO_FLUSH && s.status !== FINISH_STATE)) { 7529 var bstate = (s.strategy === Z_HUFFMAN_ONLY) ? deflate_huff(s, flush) : 7530 (s.strategy === Z_RLE ? deflate_rle(s, flush) : 7531 configuration_table[s.level].func(s, flush)); 7532 7533 if (bstate === BS_FINISH_STARTED || bstate === BS_FINISH_DONE) { 7534 s.status = FINISH_STATE; 7535 } 7536 if (bstate === BS_NEED_MORE || bstate === BS_FINISH_STARTED) { 7537 if (strm.avail_out === 0) { 7538 s.last_flush = -1; 7539 /* avoid BUF_ERROR next call, see above */ 7540 } 7541 return Z_OK; 7542 /* If flush != Z_NO_FLUSH && avail_out == 0, the next call 7543 * of deflate should use the same flush parameter to make sure 7544 * that the flush is complete. So we don't have to output an 7545 * empty block here, this will be done at next call. This also 7546 * ensures that for a very small output buffer, we emit at most 7547 * one empty block. 7548 */ 7549 } 7550 if (bstate === BS_BLOCK_DONE) { 7551 if (flush === Z_PARTIAL_FLUSH) { 7552 trees._tr_align(s); 7553 } 7554 else if (flush !== Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ 7555 7556 trees._tr_stored_block(s, 0, 0, false); 7557 /* For a full flush, this empty block will be recognized 7558 * as a special marker by inflate_sync(). 7559 */ 7560 if (flush === Z_FULL_FLUSH) { 7561 /*** CLEAR_HASH(s); ***/ /* forget history */ 7562 zero(s.head); // Fill with NIL (= 0); 7563 7564 if (s.lookahead === 0) { 7565 s.strstart = 0; 7566 s.block_start = 0; 7567 s.insert = 0; 7568 } 7569 } 7570 } 7571 flush_pending(strm); 7572 if (strm.avail_out === 0) { 7573 s.last_flush = -1; /* avoid BUF_ERROR at next call, see above */ 7574 return Z_OK; 7575 } 7576 } 7577 } 7578 //Assert(strm->avail_out > 0, "bug2"); 7579 //if (strm.avail_out <= 0) { throw new Error("bug2");} 7580 7581 if (flush !== Z_FINISH) { return Z_OK; } 7582 if (s.wrap <= 0) { return Z_STREAM_END; } 7583 7584 /* Write the trailer */ 7585 if (s.wrap === 2) { 7586 put_byte(s, strm.adler & 0xff); 7587 put_byte(s, (strm.adler >> 8) & 0xff); 7588 put_byte(s, (strm.adler >> 16) & 0xff); 7589 put_byte(s, (strm.adler >> 24) & 0xff); 7590 put_byte(s, strm.total_in & 0xff); 7591 put_byte(s, (strm.total_in >> 8) & 0xff); 7592 put_byte(s, (strm.total_in >> 16) & 0xff); 7593 put_byte(s, (strm.total_in >> 24) & 0xff); 7594 } 7595 else 7596 { 7597 putShortMSB(s, strm.adler >>> 16); 7598 putShortMSB(s, strm.adler & 0xffff); 7599 } 7600 7601 flush_pending(strm); 7602 /* If avail_out is zero, the application will call deflate again 7603 * to flush the rest. 7604 */ 7605 if (s.wrap > 0) { s.wrap = -s.wrap; } 7606 /* write the trailer only once! */ 7607 return s.pending !== 0 ? Z_OK : Z_STREAM_END; 7608 } 7609 7610 function deflateEnd(strm) { 7611 var status; 7612 7613 if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) { 7614 return Z_STREAM_ERROR; 7615 } 7616 7617 status = strm.state.status; 7618 if (status !== INIT_STATE && 7619 status !== EXTRA_STATE && 7620 status !== NAME_STATE && 7621 status !== COMMENT_STATE && 7622 status !== HCRC_STATE && 7623 status !== BUSY_STATE && 7624 status !== FINISH_STATE 7625 ) { 7626 return err(strm, Z_STREAM_ERROR); 7627 } 7628 7629 strm.state = null; 7630 7631 return status === BUSY_STATE ? err(strm, Z_DATA_ERROR) : Z_OK; 7632 } 7633 7634 7635 /* ========================================================================= 7636 * Initializes the compression dictionary from the given byte 7637 * sequence without producing any compressed output. 7638 */ 7639 function deflateSetDictionary(strm, dictionary) { 7640 var dictLength = dictionary.length; 7641 7642 var s; 7643 var str, n; 7644 var wrap; 7645 var avail; 7646 var next; 7647 var input; 7648 var tmpDict; 7649 7650 if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) { 7651 return Z_STREAM_ERROR; 7652 } 7653 7654 s = strm.state; 7655 wrap = s.wrap; 7656 7657 if (wrap === 2 || (wrap === 1 && s.status !== INIT_STATE) || s.lookahead) { 7658 return Z_STREAM_ERROR; 7659 } 7660 7661 /* when using zlib wrappers, compute Adler-32 for provided dictionary */ 7662 if (wrap === 1) { 7663 /* adler32(strm->adler, dictionary, dictLength); */ 7664 strm.adler = adler32(strm.adler, dictionary, dictLength, 0); 7665 } 7666 7667 s.wrap = 0; /* avoid computing Adler-32 in read_buf */ 7668 7669 /* if dictionary would fill window, just replace the history */ 7670 if (dictLength >= s.w_size) { 7671 if (wrap === 0) { /* already empty otherwise */ 7672 /*** CLEAR_HASH(s); ***/ 7673 zero(s.head); // Fill with NIL (= 0); 7674 s.strstart = 0; 7675 s.block_start = 0; 7676 s.insert = 0; 7677 } 7678 /* use the tail */ 7679 // dictionary = dictionary.slice(dictLength - s.w_size); 7680 tmpDict = new utils.Buf8(s.w_size); 7681 utils.arraySet(tmpDict, dictionary, dictLength - s.w_size, s.w_size, 0); 7682 dictionary = tmpDict; 7683 dictLength = s.w_size; 7684 } 7685 /* insert dictionary into window and hash */ 7686 avail = strm.avail_in; 7687 next = strm.next_in; 7688 input = strm.input; 7689 strm.avail_in = dictLength; 7690 strm.next_in = 0; 7691 strm.input = dictionary; 7692 fill_window(s); 7693 while (s.lookahead >= MIN_MATCH) { 7694 str = s.strstart; 7695 n = s.lookahead - (MIN_MATCH - 1); 7696 do { 7697 /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */ 7698 s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask; 7699 7700 s.prev[str & s.w_mask] = s.head[s.ins_h]; 7701 7702 s.head[s.ins_h] = str; 7703 str++; 7704 } while (--n); 7705 s.strstart = str; 7706 s.lookahead = MIN_MATCH - 1; 7707 fill_window(s); 7708 } 7709 s.strstart += s.lookahead; 7710 s.block_start = s.strstart; 7711 s.insert = s.lookahead; 7712 s.lookahead = 0; 7713 s.match_length = s.prev_length = MIN_MATCH - 1; 7714 s.match_available = 0; 7715 strm.next_in = next; 7716 strm.input = input; 7717 strm.avail_in = avail; 7718 s.wrap = wrap; 7719 return Z_OK; 7720 } 7721 7722 7723 exports.deflateInit = deflateInit; 7724 exports.deflateInit2 = deflateInit2; 7725 exports.deflateReset = deflateReset; 7726 exports.deflateResetKeep = deflateResetKeep; 7727 exports.deflateSetHeader = deflateSetHeader; 7728 exports.deflate = deflate; 7729 exports.deflateEnd = deflateEnd; 7730 exports.deflateSetDictionary = deflateSetDictionary; 7731 exports.deflateInfo = 'pako deflate (from Nodeca project)'; 7732 7733 /* Not implemented 7734 exports.deflateBound = deflateBound; 7735 exports.deflateCopy = deflateCopy; 7736 exports.deflateParams = deflateParams; 7737 exports.deflatePending = deflatePending; 7738 exports.deflatePrime = deflatePrime; 7739 exports.deflateTune = deflateTune; 7740 */ 7741 7742 },{"../utils/common":41,"./adler32":43,"./crc32":45,"./messages":51,"./trees":52}],47:[function(require,module,exports){ 7743 'use strict'; 7744 7745 // (C) 1995-2013 Jean-loup Gailly and Mark Adler 7746 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin 7747 // 7748 // This software is provided 'as-is', without any express or implied 7749 // warranty. In no event will the authors be held liable for any damages 7750 // arising from the use of this software. 7751 // 7752 // Permission is granted to anyone to use this software for any purpose, 7753 // including commercial applications, and to alter it and redistribute it 7754 // freely, subject to the following restrictions: 7755 // 7756 // 1. The origin of this software must not be misrepresented; you must not 7757 // claim that you wrote the original software. If you use this software 7758 // in a product, an acknowledgment in the product documentation would be 7759 // appreciated but is not required. 7760 // 2. Altered source versions must be plainly marked as such, and must not be 7761 // misrepresented as being the original software. 7762 // 3. This notice may not be removed or altered from any source distribution. 7763 7764 function GZheader() { 7765 /* true if compressed data believed to be text */ 7766 this.text = 0; 7767 /* modification time */ 7768 this.time = 0; 7769 /* extra flags (not used when writing a gzip file) */ 7770 this.xflags = 0; 7771 /* operating system */ 7772 this.os = 0; 7773 /* pointer to extra field or Z_NULL if none */ 7774 this.extra = null; 7775 /* extra field length (valid if extra != Z_NULL) */ 7776 this.extra_len = 0; // Actually, we don't need it in JS, 7777 // but leave for few code modifications 7778 7779 // 7780 // Setup limits is not necessary because in js we should not preallocate memory 7781 // for inflate use constant limit in 65536 bytes 7782 // 7783 7784 /* space at extra (only when reading header) */ 7785 // this.extra_max = 0; 7786 /* pointer to zero-terminated file name or Z_NULL */ 7787 this.name = ''; 7788 /* space at name (only when reading header) */ 7789 // this.name_max = 0; 7790 /* pointer to zero-terminated comment or Z_NULL */ 7791 this.comment = ''; 7792 /* space at comment (only when reading header) */ 7793 // this.comm_max = 0; 7794 /* true if there was or will be a header crc */ 7795 this.hcrc = 0; 7796 /* true when done reading gzip header (not used when writing a gzip file) */ 7797 this.done = false; 7798 } 7799 7800 module.exports = GZheader; 7801 7802 },{}],48:[function(require,module,exports){ 7803 'use strict'; 7804 7805 // (C) 1995-2013 Jean-loup Gailly and Mark Adler 7806 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin 7807 // 7808 // This software is provided 'as-is', without any express or implied 7809 // warranty. In no event will the authors be held liable for any damages 7810 // arising from the use of this software. 7811 // 7812 // Permission is granted to anyone to use this software for any purpose, 7813 // including commercial applications, and to alter it and redistribute it 7814 // freely, subject to the following restrictions: 7815 // 7816 // 1. The origin of this software must not be misrepresented; you must not 7817 // claim that you wrote the original software. If you use this software 7818 // in a product, an acknowledgment in the product documentation would be 7819 // appreciated but is not required. 7820 // 2. Altered source versions must be plainly marked as such, and must not be 7821 // misrepresented as being the original software. 7822 // 3. This notice may not be removed or altered from any source distribution. 7823 7824 // See state defs from inflate.js 7825 var BAD = 30; /* got a data error -- remain here until reset */ 7826 var TYPE = 12; /* i: waiting for type bits, including last-flag bit */ 7827 7828 /* 7829 Decode literal, length, and distance codes and write out the resulting 7830 literal and match bytes until either not enough input or output is 7831 available, an end-of-block is encountered, or a data error is encountered. 7832 When large enough input and output buffers are supplied to inflate(), for 7833 example, a 16K input buffer and a 64K output buffer, more than 95% of the 7834 inflate execution time is spent in this routine. 7835 7836 Entry assumptions: 7837 7838 state.mode === LEN 7839 strm.avail_in >= 6 7840 strm.avail_out >= 258 7841 start >= strm.avail_out 7842 state.bits < 8 7843 7844 On return, state.mode is one of: 7845 7846 LEN -- ran out of enough output space or enough available input 7847 TYPE -- reached end of block code, inflate() to interpret next block 7848 BAD -- error in block data 7849 7850 Notes: 7851 7852 - The maximum input bits used by a length/distance pair is 15 bits for the 7853 length code, 5 bits for the length extra, 15 bits for the distance code, 7854 and 13 bits for the distance extra. This totals 48 bits, or six bytes. 7855 Therefore if strm.avail_in >= 6, then there is enough input to avoid 7856 checking for available input while decoding. 7857 7858 - The maximum bytes that a single length/distance pair can output is 258 7859 bytes, which is the maximum length that can be coded. inflate_fast() 7860 requires strm.avail_out >= 258 for each loop to avoid checking for 7861 output space. 7862 */ 7863 module.exports = function inflate_fast(strm, start) { 7864 var state; 7865 var _in; /* local strm.input */ 7866 var last; /* have enough input while in < last */ 7867 var _out; /* local strm.output */ 7868 var beg; /* inflate()'s initial strm.output */ 7869 var end; /* while out < end, enough space available */ 7870 //#ifdef INFLATE_STRICT 7871 var dmax; /* maximum distance from zlib header */ 7872 //#endif 7873 var wsize; /* window size or zero if not using window */ 7874 var whave; /* valid bytes in the window */ 7875 var wnext; /* window write index */ 7876 // Use `s_window` instead `window`, avoid conflict with instrumentation tools 7877 var s_window; /* allocated sliding window, if wsize != 0 */ 7878 var hold; /* local strm.hold */ 7879 var bits; /* local strm.bits */ 7880 var lcode; /* local strm.lencode */ 7881 var dcode; /* local strm.distcode */ 7882 var lmask; /* mask for first level of length codes */ 7883 var dmask; /* mask for first level of distance codes */ 7884 var here; /* retrieved table entry */ 7885 var op; /* code bits, operation, extra bits, or */ 7886 /* window position, window bytes to copy */ 7887 var len; /* match length, unused bytes */ 7888 var dist; /* match distance */ 7889 var from; /* where to copy match from */ 7890 var from_source; 7891 7892 7893 var input, output; // JS specific, because we have no pointers 7894 7895 /* copy state to local variables */ 7896 state = strm.state; 7897 //here = state.here; 7898 _in = strm.next_in; 7899 input = strm.input; 7900 last = _in + (strm.avail_in - 5); 7901 _out = strm.next_out; 7902 output = strm.output; 7903 beg = _out - (start - strm.avail_out); 7904 end = _out + (strm.avail_out - 257); 7905 //#ifdef INFLATE_STRICT 7906 dmax = state.dmax; 7907 //#endif 7908 wsize = state.wsize; 7909 whave = state.whave; 7910 wnext = state.wnext; 7911 s_window = state.window; 7912 hold = state.hold; 7913 bits = state.bits; 7914 lcode = state.lencode; 7915 dcode = state.distcode; 7916 lmask = (1 << state.lenbits) - 1; 7917 dmask = (1 << state.distbits) - 1; 7918 7919 7920 /* decode literals and length/distances until end-of-block or not enough 7921 input data or output space */ 7922 7923 top: 7924 do { 7925 if (bits < 15) { 7926 hold += input[_in++] << bits; 7927 bits += 8; 7928 hold += input[_in++] << bits; 7929 bits += 8; 7930 } 7931 7932 here = lcode[hold & lmask]; 7933 7934 dolen: 7935 for (;;) { // Goto emulation 7936 op = here >>> 24/*here.bits*/; 7937 hold >>>= op; 7938 bits -= op; 7939 op = (here >>> 16) & 0xff/*here.op*/; 7940 if (op === 0) { /* literal */ 7941 //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? 7942 // "inflate: literal '%c'\n" : 7943 // "inflate: literal 0x%02x\n", here.val)); 7944 output[_out++] = here & 0xffff/*here.val*/; 7945 } 7946 else if (op & 16) { /* length base */ 7947 len = here & 0xffff/*here.val*/; 7948 op &= 15; /* number of extra bits */ 7949 if (op) { 7950 if (bits < op) { 7951 hold += input[_in++] << bits; 7952 bits += 8; 7953 } 7954 len += hold & ((1 << op) - 1); 7955 hold >>>= op; 7956 bits -= op; 7957 } 7958 //Tracevv((stderr, "inflate: length %u\n", len)); 7959 if (bits < 15) { 7960 hold += input[_in++] << bits; 7961 bits += 8; 7962 hold += input[_in++] << bits; 7963 bits += 8; 7964 } 7965 here = dcode[hold & dmask]; 7966 7967 dodist: 7968 for (;;) { // goto emulation 7969 op = here >>> 24/*here.bits*/; 7970 hold >>>= op; 7971 bits -= op; 7972 op = (here >>> 16) & 0xff/*here.op*/; 7973 7974 if (op & 16) { /* distance base */ 7975 dist = here & 0xffff/*here.val*/; 7976 op &= 15; /* number of extra bits */ 7977 if (bits < op) { 7978 hold += input[_in++] << bits; 7979 bits += 8; 7980 if (bits < op) { 7981 hold += input[_in++] << bits; 7982 bits += 8; 7983 } 7984 } 7985 dist += hold & ((1 << op) - 1); 7986 //#ifdef INFLATE_STRICT 7987 if (dist > dmax) { 7988 strm.msg = 'invalid distance too far back'; 7989 state.mode = BAD; 7990 break top; 7991 } 7992 //#endif 7993 hold >>>= op; 7994 bits -= op; 7995 //Tracevv((stderr, "inflate: distance %u\n", dist)); 7996 op = _out - beg; /* max distance in output */ 7997 if (dist > op) { /* see if copy from window */ 7998 op = dist - op; /* distance back in window */ 7999 if (op > whave) { 8000 if (state.sane) { 8001 strm.msg = 'invalid distance too far back'; 8002 state.mode = BAD; 8003 break top; 8004 } 8005 8006 // (!) This block is disabled in zlib defailts, 8007 // don't enable it for binary compatibility 8008 //#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR 8009 // if (len <= op - whave) { 8010 // do { 8011 // output[_out++] = 0; 8012 // } while (--len); 8013 // continue top; 8014 // } 8015 // len -= op - whave; 8016 // do { 8017 // output[_out++] = 0; 8018 // } while (--op > whave); 8019 // if (op === 0) { 8020 // from = _out - dist; 8021 // do { 8022 // output[_out++] = output[from++]; 8023 // } while (--len); 8024 // continue top; 8025 // } 8026 //#endif 8027 } 8028 from = 0; // window index 8029 from_source = s_window; 8030 if (wnext === 0) { /* very common case */ 8031 from += wsize - op; 8032 if (op < len) { /* some from window */ 8033 len -= op; 8034 do { 8035 output[_out++] = s_window[from++]; 8036 } while (--op); 8037 from = _out - dist; /* rest from output */ 8038 from_source = output; 8039 } 8040 } 8041 else if (wnext < op) { /* wrap around window */ 8042 from += wsize + wnext - op; 8043 op -= wnext; 8044 if (op < len) { /* some from end of window */ 8045 len -= op; 8046 do { 8047 output[_out++] = s_window[from++]; 8048 } while (--op); 8049 from = 0; 8050 if (wnext < len) { /* some from start of window */ 8051 op = wnext; 8052 len -= op; 8053 do { 8054 output[_out++] = s_window[from++]; 8055 } while (--op); 8056 from = _out - dist; /* rest from output */ 8057 from_source = output; 8058 } 8059 } 8060 } 8061 else { /* contiguous in window */ 8062 from += wnext - op; 8063 if (op < len) { /* some from window */ 8064 len -= op; 8065 do { 8066 output[_out++] = s_window[from++]; 8067 } while (--op); 8068 from = _out - dist; /* rest from output */ 8069 from_source = output; 8070 } 8071 } 8072 while (len > 2) { 8073 output[_out++] = from_source[from++]; 8074 output[_out++] = from_source[from++]; 8075 output[_out++] = from_source[from++]; 8076 len -= 3; 8077 } 8078 if (len) { 8079 output[_out++] = from_source[from++]; 8080 if (len > 1) { 8081 output[_out++] = from_source[from++]; 8082 } 8083 } 8084 } 8085 else { 8086 from = _out - dist; /* copy direct from output */ 8087 do { /* minimum length is three */ 8088 output[_out++] = output[from++]; 8089 output[_out++] = output[from++]; 8090 output[_out++] = output[from++]; 8091 len -= 3; 8092 } while (len > 2); 8093 if (len) { 8094 output[_out++] = output[from++]; 8095 if (len > 1) { 8096 output[_out++] = output[from++]; 8097 } 8098 } 8099 } 8100 } 8101 else if ((op & 64) === 0) { /* 2nd level distance code */ 8102 here = dcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))]; 8103 continue dodist; 8104 } 8105 else { 8106 strm.msg = 'invalid distance code'; 8107 state.mode = BAD; 8108 break top; 8109 } 8110 8111 break; // need to emulate goto via "continue" 8112 } 8113 } 8114 else if ((op & 64) === 0) { /* 2nd level length code */ 8115 here = lcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))]; 8116 continue dolen; 8117 } 8118 else if (op & 32) { /* end-of-block */ 8119 //Tracevv((stderr, "inflate: end of block\n")); 8120 state.mode = TYPE; 8121 break top; 8122 } 8123 else { 8124 strm.msg = 'invalid literal/length code'; 8125 state.mode = BAD; 8126 break top; 8127 } 8128 8129 break; // need to emulate goto via "continue" 8130 } 8131 } while (_in < last && _out < end); 8132 8133 /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ 8134 len = bits >> 3; 8135 _in -= len; 8136 bits -= len << 3; 8137 hold &= (1 << bits) - 1; 8138 8139 /* update state and return */ 8140 strm.next_in = _in; 8141 strm.next_out = _out; 8142 strm.avail_in = (_in < last ? 5 + (last - _in) : 5 - (_in - last)); 8143 strm.avail_out = (_out < end ? 257 + (end - _out) : 257 - (_out - end)); 8144 state.hold = hold; 8145 state.bits = bits; 8146 return; 8147 }; 8148 8149 },{}],49:[function(require,module,exports){ 8150 'use strict'; 8151 8152 // (C) 1995-2013 Jean-loup Gailly and Mark Adler 8153 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin 8154 // 8155 // This software is provided 'as-is', without any express or implied 8156 // warranty. In no event will the authors be held liable for any damages 8157 // arising from the use of this software. 8158 // 8159 // Permission is granted to anyone to use this software for any purpose, 8160 // including commercial applications, and to alter it and redistribute it 8161 // freely, subject to the following restrictions: 8162 // 8163 // 1. The origin of this software must not be misrepresented; you must not 8164 // claim that you wrote the original software. If you use this software 8165 // in a product, an acknowledgment in the product documentation would be 8166 // appreciated but is not required. 8167 // 2. Altered source versions must be plainly marked as such, and must not be 8168 // misrepresented as being the original software. 8169 // 3. This notice may not be removed or altered from any source distribution. 8170 8171 var utils = require('../utils/common'); 8172 var adler32 = require('./adler32'); 8173 var crc32 = require('./crc32'); 8174 var inflate_fast = require('./inffast'); 8175 var inflate_table = require('./inftrees'); 8176 8177 var CODES = 0; 8178 var LENS = 1; 8179 var DISTS = 2; 8180 8181 /* Public constants ==========================================================*/ 8182 /* ===========================================================================*/ 8183 8184 8185 /* Allowed flush values; see deflate() and inflate() below for details */ 8186 //var Z_NO_FLUSH = 0; 8187 //var Z_PARTIAL_FLUSH = 1; 8188 //var Z_SYNC_FLUSH = 2; 8189 //var Z_FULL_FLUSH = 3; 8190 var Z_FINISH = 4; 8191 var Z_BLOCK = 5; 8192 var Z_TREES = 6; 8193 8194 8195 /* Return codes for the compression/decompression functions. Negative values 8196 * are errors, positive values are used for special but normal events. 8197 */ 8198 var Z_OK = 0; 8199 var Z_STREAM_END = 1; 8200 var Z_NEED_DICT = 2; 8201 //var Z_ERRNO = -1; 8202 var Z_STREAM_ERROR = -2; 8203 var Z_DATA_ERROR = -3; 8204 var Z_MEM_ERROR = -4; 8205 var Z_BUF_ERROR = -5; 8206 //var Z_VERSION_ERROR = -6; 8207 8208 /* The deflate compression method */ 8209 var Z_DEFLATED = 8; 8210 8211 8212 /* STATES ====================================================================*/ 8213 /* ===========================================================================*/ 8214 8215 8216 var HEAD = 1; /* i: waiting for magic header */ 8217 var FLAGS = 2; /* i: waiting for method and flags (gzip) */ 8218 var TIME = 3; /* i: waiting for modification time (gzip) */ 8219 var OS = 4; /* i: waiting for extra flags and operating system (gzip) */ 8220 var EXLEN = 5; /* i: waiting for extra length (gzip) */ 8221 var EXTRA = 6; /* i: waiting for extra bytes (gzip) */ 8222 var NAME = 7; /* i: waiting for end of file name (gzip) */ 8223 var COMMENT = 8; /* i: waiting for end of comment (gzip) */ 8224 var HCRC = 9; /* i: waiting for header crc (gzip) */ 8225 var DICTID = 10; /* i: waiting for dictionary check value */ 8226 var DICT = 11; /* waiting for inflateSetDictionary() call */ 8227 var TYPE = 12; /* i: waiting for type bits, including last-flag bit */ 8228 var TYPEDO = 13; /* i: same, but skip check to exit inflate on new block */ 8229 var STORED = 14; /* i: waiting for stored size (length and complement) */ 8230 var COPY_ = 15; /* i/o: same as COPY below, but only first time in */ 8231 var COPY = 16; /* i/o: waiting for input or output to copy stored block */ 8232 var TABLE = 17; /* i: waiting for dynamic block table lengths */ 8233 var LENLENS = 18; /* i: waiting for code length code lengths */ 8234 var CODELENS = 19; /* i: waiting for length/lit and distance code lengths */ 8235 var LEN_ = 20; /* i: same as LEN below, but only first time in */ 8236 var LEN = 21; /* i: waiting for length/lit/eob code */ 8237 var LENEXT = 22; /* i: waiting for length extra bits */ 8238 var DIST = 23; /* i: waiting for distance code */ 8239 var DISTEXT = 24; /* i: waiting for distance extra bits */ 8240 var MATCH = 25; /* o: waiting for output space to copy string */ 8241 var LIT = 26; /* o: waiting for output space to write literal */ 8242 var CHECK = 27; /* i: waiting for 32-bit check value */ 8243 var LENGTH = 28; /* i: waiting for 32-bit length (gzip) */ 8244 var DONE = 29; /* finished check, done -- remain here until reset */ 8245 var BAD = 30; /* got a data error -- remain here until reset */ 8246 var MEM = 31; /* got an inflate() memory error -- remain here until reset */ 8247 var SYNC = 32; /* looking for synchronization bytes to restart inflate() */ 8248 8249 /* ===========================================================================*/ 8250 8251 8252 8253 var ENOUGH_LENS = 852; 8254 var ENOUGH_DISTS = 592; 8255 //var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS); 8256 8257 var MAX_WBITS = 15; 8258 /* 32K LZ77 window */ 8259 var DEF_WBITS = MAX_WBITS; 8260 8261 8262 function zswap32(q) { 8263 return (((q >>> 24) & 0xff) + 8264 ((q >>> 8) & 0xff00) + 8265 ((q & 0xff00) << 8) + 8266 ((q & 0xff) << 24)); 8267 } 8268 8269 8270 function InflateState() { 8271 this.mode = 0; /* current inflate mode */ 8272 this.last = false; /* true if processing last block */ 8273 this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */ 8274 this.havedict = false; /* true if dictionary provided */ 8275 this.flags = 0; /* gzip header method and flags (0 if zlib) */ 8276 this.dmax = 0; /* zlib header max distance (INFLATE_STRICT) */ 8277 this.check = 0; /* protected copy of check value */ 8278 this.total = 0; /* protected copy of output count */ 8279 // TODO: may be {} 8280 this.head = null; /* where to save gzip header information */ 8281 8282 /* sliding window */ 8283 this.wbits = 0; /* log base 2 of requested window size */ 8284 this.wsize = 0; /* window size or zero if not using window */ 8285 this.whave = 0; /* valid bytes in the window */ 8286 this.wnext = 0; /* window write index */ 8287 this.window = null; /* allocated sliding window, if needed */ 8288 8289 /* bit accumulator */ 8290 this.hold = 0; /* input bit accumulator */ 8291 this.bits = 0; /* number of bits in "in" */ 8292 8293 /* for string and stored block copying */ 8294 this.length = 0; /* literal or length of data to copy */ 8295 this.offset = 0; /* distance back to copy string from */ 8296 8297 /* for table and code decoding */ 8298 this.extra = 0; /* extra bits needed */ 8299 8300 /* fixed and dynamic code tables */ 8301 this.lencode = null; /* starting table for length/literal codes */ 8302 this.distcode = null; /* starting table for distance codes */ 8303 this.lenbits = 0; /* index bits for lencode */ 8304 this.distbits = 0; /* index bits for distcode */ 8305 8306 /* dynamic table building */ 8307 this.ncode = 0; /* number of code length code lengths */ 8308 this.nlen = 0; /* number of length code lengths */ 8309 this.ndist = 0; /* number of distance code lengths */ 8310 this.have = 0; /* number of code lengths in lens[] */ 8311 this.next = null; /* next available space in codes[] */ 8312 8313 this.lens = new utils.Buf16(320); /* temporary storage for code lengths */ 8314 this.work = new utils.Buf16(288); /* work area for code table building */ 8315 8316 /* 8317 because we don't have pointers in js, we use lencode and distcode directly 8318 as buffers so we don't need codes 8319 */ 8320 //this.codes = new utils.Buf32(ENOUGH); /* space for code tables */ 8321 this.lendyn = null; /* dynamic table for length/literal codes (JS specific) */ 8322 this.distdyn = null; /* dynamic table for distance codes (JS specific) */ 8323 this.sane = 0; /* if false, allow invalid distance too far */ 8324 this.back = 0; /* bits back of last unprocessed length/lit */ 8325 this.was = 0; /* initial length of match */ 8326 } 8327 8328 function inflateResetKeep(strm) { 8329 var state; 8330 8331 if (!strm || !strm.state) { return Z_STREAM_ERROR; } 8332 state = strm.state; 8333 strm.total_in = strm.total_out = state.total = 0; 8334 strm.msg = ''; /*Z_NULL*/ 8335 if (state.wrap) { /* to support ill-conceived Java test suite */ 8336 strm.adler = state.wrap & 1; 8337 } 8338 state.mode = HEAD; 8339 state.last = 0; 8340 state.havedict = 0; 8341 state.dmax = 32768; 8342 state.head = null/*Z_NULL*/; 8343 state.hold = 0; 8344 state.bits = 0; 8345 //state.lencode = state.distcode = state.next = state.codes; 8346 state.lencode = state.lendyn = new utils.Buf32(ENOUGH_LENS); 8347 state.distcode = state.distdyn = new utils.Buf32(ENOUGH_DISTS); 8348 8349 state.sane = 1; 8350 state.back = -1; 8351 //Tracev((stderr, "inflate: reset\n")); 8352 return Z_OK; 8353 } 8354 8355 function inflateReset(strm) { 8356 var state; 8357 8358 if (!strm || !strm.state) { return Z_STREAM_ERROR; } 8359 state = strm.state; 8360 state.wsize = 0; 8361 state.whave = 0; 8362 state.wnext = 0; 8363 return inflateResetKeep(strm); 8364 8365 } 8366 8367 function inflateReset2(strm, windowBits) { 8368 var wrap; 8369 var state; 8370 8371 /* get the state */ 8372 if (!strm || !strm.state) { return Z_STREAM_ERROR; } 8373 state = strm.state; 8374 8375 /* extract wrap request from windowBits parameter */ 8376 if (windowBits < 0) { 8377 wrap = 0; 8378 windowBits = -windowBits; 8379 } 8380 else { 8381 wrap = (windowBits >> 4) + 1; 8382 if (windowBits < 48) { 8383 windowBits &= 15; 8384 } 8385 } 8386 8387 /* set number of window bits, free window if different */ 8388 if (windowBits && (windowBits < 8 || windowBits > 15)) { 8389 return Z_STREAM_ERROR; 8390 } 8391 if (state.window !== null && state.wbits !== windowBits) { 8392 state.window = null; 8393 } 8394 8395 /* update state and reset the rest of it */ 8396 state.wrap = wrap; 8397 state.wbits = windowBits; 8398 return inflateReset(strm); 8399 } 8400 8401 function inflateInit2(strm, windowBits) { 8402 var ret; 8403 var state; 8404 8405 if (!strm) { return Z_STREAM_ERROR; } 8406 //strm.msg = Z_NULL; /* in case we return an error */ 8407 8408 state = new InflateState(); 8409 8410 //if (state === Z_NULL) return Z_MEM_ERROR; 8411 //Tracev((stderr, "inflate: allocated\n")); 8412 strm.state = state; 8413 state.window = null/*Z_NULL*/; 8414 ret = inflateReset2(strm, windowBits); 8415 if (ret !== Z_OK) { 8416 strm.state = null/*Z_NULL*/; 8417 } 8418 return ret; 8419 } 8420 8421 function inflateInit(strm) { 8422 return inflateInit2(strm, DEF_WBITS); 8423 } 8424 8425 8426 /* 8427 Return state with length and distance decoding tables and index sizes set to 8428 fixed code decoding. Normally this returns fixed tables from inffixed.h. 8429 If BUILDFIXED is defined, then instead this routine builds the tables the 8430 first time it's called, and returns those tables the first time and 8431 thereafter. This reduces the size of the code by about 2K bytes, in 8432 exchange for a little execution time. However, BUILDFIXED should not be 8433 used for threaded applications, since the rewriting of the tables and virgin 8434 may not be thread-safe. 8435 */ 8436 var virgin = true; 8437 8438 var lenfix, distfix; // We have no pointers in JS, so keep tables separate 8439 8440 function fixedtables(state) { 8441 /* build fixed huffman tables if first call (may not be thread safe) */ 8442 if (virgin) { 8443 var sym; 8444 8445 lenfix = new utils.Buf32(512); 8446 distfix = new utils.Buf32(32); 8447 8448 /* literal/length table */ 8449 sym = 0; 8450 while (sym < 144) { state.lens[sym++] = 8; } 8451 while (sym < 256) { state.lens[sym++] = 9; } 8452 while (sym < 280) { state.lens[sym++] = 7; } 8453 while (sym < 288) { state.lens[sym++] = 8; } 8454 8455 inflate_table(LENS, state.lens, 0, 288, lenfix, 0, state.work, { bits: 9 }); 8456 8457 /* distance table */ 8458 sym = 0; 8459 while (sym < 32) { state.lens[sym++] = 5; } 8460 8461 inflate_table(DISTS, state.lens, 0, 32, distfix, 0, state.work, { bits: 5 }); 8462 8463 /* do this just once */ 8464 virgin = false; 8465 } 8466 8467 state.lencode = lenfix; 8468 state.lenbits = 9; 8469 state.distcode = distfix; 8470 state.distbits = 5; 8471 } 8472 8473 8474 /* 8475 Update the window with the last wsize (normally 32K) bytes written before 8476 returning. If window does not exist yet, create it. This is only called 8477 when a window is already in use, or when output has been written during this 8478 inflate call, but the end of the deflate stream has not been reached yet. 8479 It is also called to create a window for dictionary data when a dictionary 8480 is loaded. 8481 8482 Providing output buffers larger than 32K to inflate() should provide a speed 8483 advantage, since only the last 32K of output is copied to the sliding window 8484 upon return from inflate(), and since all distances after the first 32K of 8485 output will fall in the output data, making match copies simpler and faster. 8486 The advantage may be dependent on the size of the processor's data caches. 8487 */ 8488 function updatewindow(strm, src, end, copy) { 8489 var dist; 8490 var state = strm.state; 8491 8492 /* if it hasn't been done already, allocate space for the window */ 8493 if (state.window === null) { 8494 state.wsize = 1 << state.wbits; 8495 state.wnext = 0; 8496 state.whave = 0; 8497 8498 state.window = new utils.Buf8(state.wsize); 8499 } 8500 8501 /* copy state->wsize or less output bytes into the circular window */ 8502 if (copy >= state.wsize) { 8503 utils.arraySet(state.window, src, end - state.wsize, state.wsize, 0); 8504 state.wnext = 0; 8505 state.whave = state.wsize; 8506 } 8507 else { 8508 dist = state.wsize - state.wnext; 8509 if (dist > copy) { 8510 dist = copy; 8511 } 8512 //zmemcpy(state->window + state->wnext, end - copy, dist); 8513 utils.arraySet(state.window, src, end - copy, dist, state.wnext); 8514 copy -= dist; 8515 if (copy) { 8516 //zmemcpy(state->window, end - copy, copy); 8517 utils.arraySet(state.window, src, end - copy, copy, 0); 8518 state.wnext = copy; 8519 state.whave = state.wsize; 8520 } 8521 else { 8522 state.wnext += dist; 8523 if (state.wnext === state.wsize) { state.wnext = 0; } 8524 if (state.whave < state.wsize) { state.whave += dist; } 8525 } 8526 } 8527 return 0; 8528 } 8529 8530 function inflate(strm, flush) { 8531 var state; 8532 var input, output; // input/output buffers 8533 var next; /* next input INDEX */ 8534 var put; /* next output INDEX */ 8535 var have, left; /* available input and output */ 8536 var hold; /* bit buffer */ 8537 var bits; /* bits in bit buffer */ 8538 var _in, _out; /* save starting available input and output */ 8539 var copy; /* number of stored or match bytes to copy */ 8540 var from; /* where to copy match bytes from */ 8541 var from_source; 8542 var here = 0; /* current decoding table entry */ 8543 var here_bits, here_op, here_val; // paked "here" denormalized (JS specific) 8544 //var last; /* parent table entry */ 8545 var last_bits, last_op, last_val; // paked "last" denormalized (JS specific) 8546 var len; /* length to copy for repeats, bits to drop */ 8547 var ret; /* return code */ 8548 var hbuf = new utils.Buf8(4); /* buffer for gzip header crc calculation */ 8549 var opts; 8550 8551 var n; // temporary var for NEED_BITS 8552 8553 var order = /* permutation of code lengths */ 8554 [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ]; 8555 8556 8557 if (!strm || !strm.state || !strm.output || 8558 (!strm.input && strm.avail_in !== 0)) { 8559 return Z_STREAM_ERROR; 8560 } 8561 8562 state = strm.state; 8563 if (state.mode === TYPE) { state.mode = TYPEDO; } /* skip check */ 8564 8565 8566 //--- LOAD() --- 8567 put = strm.next_out; 8568 output = strm.output; 8569 left = strm.avail_out; 8570 next = strm.next_in; 8571 input = strm.input; 8572 have = strm.avail_in; 8573 hold = state.hold; 8574 bits = state.bits; 8575 //--- 8576 8577 _in = have; 8578 _out = left; 8579 ret = Z_OK; 8580 8581 inf_leave: // goto emulation 8582 for (;;) { 8583 switch (state.mode) { 8584 case HEAD: 8585 if (state.wrap === 0) { 8586 state.mode = TYPEDO; 8587 break; 8588 } 8589 //=== NEEDBITS(16); 8590 while (bits < 16) { 8591 if (have === 0) { break inf_leave; } 8592 have--; 8593 hold += input[next++] << bits; 8594 bits += 8; 8595 } 8596 //===// 8597 if ((state.wrap & 2) && hold === 0x8b1f) { /* gzip header */ 8598 state.check = 0/*crc32(0L, Z_NULL, 0)*/; 8599 //=== CRC2(state.check, hold); 8600 hbuf[0] = hold & 0xff; 8601 hbuf[1] = (hold >>> 8) & 0xff; 8602 state.check = crc32(state.check, hbuf, 2, 0); 8603 //===// 8604 8605 //=== INITBITS(); 8606 hold = 0; 8607 bits = 0; 8608 //===// 8609 state.mode = FLAGS; 8610 break; 8611 } 8612 state.flags = 0; /* expect zlib header */ 8613 if (state.head) { 8614 state.head.done = false; 8615 } 8616 if (!(state.wrap & 1) || /* check if zlib header allowed */ 8617 (((hold & 0xff)/*BITS(8)*/ << 8) + (hold >> 8)) % 31) { 8618 strm.msg = 'incorrect header check'; 8619 state.mode = BAD; 8620 break; 8621 } 8622 if ((hold & 0x0f)/*BITS(4)*/ !== Z_DEFLATED) { 8623 strm.msg = 'unknown compression method'; 8624 state.mode = BAD; 8625 break; 8626 } 8627 //--- DROPBITS(4) ---// 8628 hold >>>= 4; 8629 bits -= 4; 8630 //---// 8631 len = (hold & 0x0f)/*BITS(4)*/ + 8; 8632 if (state.wbits === 0) { 8633 state.wbits = len; 8634 } 8635 else if (len > state.wbits) { 8636 strm.msg = 'invalid window size'; 8637 state.mode = BAD; 8638 break; 8639 } 8640 state.dmax = 1 << len; 8641 //Tracev((stderr, "inflate: zlib header ok\n")); 8642 strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/; 8643 state.mode = hold & 0x200 ? DICTID : TYPE; 8644 //=== INITBITS(); 8645 hold = 0; 8646 bits = 0; 8647 //===// 8648 break; 8649 case FLAGS: 8650 //=== NEEDBITS(16); */ 8651 while (bits < 16) { 8652 if (have === 0) { break inf_leave; } 8653 have--; 8654 hold += input[next++] << bits; 8655 bits += 8; 8656 } 8657 //===// 8658 state.flags = hold; 8659 if ((state.flags & 0xff) !== Z_DEFLATED) { 8660 strm.msg = 'unknown compression method'; 8661 state.mode = BAD; 8662 break; 8663 } 8664 if (state.flags & 0xe000) { 8665 strm.msg = 'unknown header flags set'; 8666 state.mode = BAD; 8667 break; 8668 } 8669 if (state.head) { 8670 state.head.text = ((hold >> 8) & 1); 8671 } 8672 if (state.flags & 0x0200) { 8673 //=== CRC2(state.check, hold); 8674 hbuf[0] = hold & 0xff; 8675 hbuf[1] = (hold >>> 8) & 0xff; 8676 state.check = crc32(state.check, hbuf, 2, 0); 8677 //===// 8678 } 8679 //=== INITBITS(); 8680 hold = 0; 8681 bits = 0; 8682 //===// 8683 state.mode = TIME; 8684 /* falls through */ 8685 case TIME: 8686 //=== NEEDBITS(32); */ 8687 while (bits < 32) { 8688 if (have === 0) { break inf_leave; } 8689 have--; 8690 hold += input[next++] << bits; 8691 bits += 8; 8692 } 8693 //===// 8694 if (state.head) { 8695 state.head.time = hold; 8696 } 8697 if (state.flags & 0x0200) { 8698 //=== CRC4(state.check, hold) 8699 hbuf[0] = hold & 0xff; 8700 hbuf[1] = (hold >>> 8) & 0xff; 8701 hbuf[2] = (hold >>> 16) & 0xff; 8702 hbuf[3] = (hold >>> 24) & 0xff; 8703 state.check = crc32(state.check, hbuf, 4, 0); 8704 //=== 8705 } 8706 //=== INITBITS(); 8707 hold = 0; 8708 bits = 0; 8709 //===// 8710 state.mode = OS; 8711 /* falls through */ 8712 case OS: 8713 //=== NEEDBITS(16); */ 8714 while (bits < 16) { 8715 if (have === 0) { break inf_leave; } 8716 have--; 8717 hold += input[next++] << bits; 8718 bits += 8; 8719 } 8720 //===// 8721 if (state.head) { 8722 state.head.xflags = (hold & 0xff); 8723 state.head.os = (hold >> 8); 8724 } 8725 if (state.flags & 0x0200) { 8726 //=== CRC2(state.check, hold); 8727 hbuf[0] = hold & 0xff; 8728 hbuf[1] = (hold >>> 8) & 0xff; 8729 state.check = crc32(state.check, hbuf, 2, 0); 8730 //===// 8731 } 8732 //=== INITBITS(); 8733 hold = 0; 8734 bits = 0; 8735 //===// 8736 state.mode = EXLEN; 8737 /* falls through */ 8738 case EXLEN: 8739 if (state.flags & 0x0400) { 8740 //=== NEEDBITS(16); */ 8741 while (bits < 16) { 8742 if (have === 0) { break inf_leave; } 8743 have--; 8744 hold += input[next++] << bits; 8745 bits += 8; 8746 } 8747 //===// 8748 state.length = hold; 8749 if (state.head) { 8750 state.head.extra_len = hold; 8751 } 8752 if (state.flags & 0x0200) { 8753 //=== CRC2(state.check, hold); 8754 hbuf[0] = hold & 0xff; 8755 hbuf[1] = (hold >>> 8) & 0xff; 8756 state.check = crc32(state.check, hbuf, 2, 0); 8757 //===// 8758 } 8759 //=== INITBITS(); 8760 hold = 0; 8761 bits = 0; 8762 //===// 8763 } 8764 else if (state.head) { 8765 state.head.extra = null/*Z_NULL*/; 8766 } 8767 state.mode = EXTRA; 8768 /* falls through */ 8769 case EXTRA: 8770 if (state.flags & 0x0400) { 8771 copy = state.length; 8772 if (copy > have) { copy = have; } 8773 if (copy) { 8774 if (state.head) { 8775 len = state.head.extra_len - state.length; 8776 if (!state.head.extra) { 8777 // Use untyped array for more conveniend processing later 8778 state.head.extra = new Array(state.head.extra_len); 8779 } 8780 utils.arraySet( 8781 state.head.extra, 8782 input, 8783 next, 8784 // extra field is limited to 65536 bytes 8785 // - no need for additional size check 8786 copy, 8787 /*len + copy > state.head.extra_max - len ? state.head.extra_max : copy,*/ 8788 len 8789 ); 8790 //zmemcpy(state.head.extra + len, next, 8791 // len + copy > state.head.extra_max ? 8792 // state.head.extra_max - len : copy); 8793 } 8794 if (state.flags & 0x0200) { 8795 state.check = crc32(state.check, input, copy, next); 8796 } 8797 have -= copy; 8798 next += copy; 8799 state.length -= copy; 8800 } 8801 if (state.length) { break inf_leave; } 8802 } 8803 state.length = 0; 8804 state.mode = NAME; 8805 /* falls through */ 8806 case NAME: 8807 if (state.flags & 0x0800) { 8808 if (have === 0) { break inf_leave; } 8809 copy = 0; 8810 do { 8811 // TODO: 2 or 1 bytes? 8812 len = input[next + copy++]; 8813 /* use constant limit because in js we should not preallocate memory */ 8814 if (state.head && len && 8815 (state.length < 65536 /*state.head.name_max*/)) { 8816 state.head.name += String.fromCharCode(len); 8817 } 8818 } while (len && copy < have); 8819 8820 if (state.flags & 0x0200) { 8821 state.check = crc32(state.check, input, copy, next); 8822 } 8823 have -= copy; 8824 next += copy; 8825 if (len) { break inf_leave; } 8826 } 8827 else if (state.head) { 8828 state.head.name = null; 8829 } 8830 state.length = 0; 8831 state.mode = COMMENT; 8832 /* falls through */ 8833 case COMMENT: 8834 if (state.flags & 0x1000) { 8835 if (have === 0) { break inf_leave; } 8836 copy = 0; 8837 do { 8838 len = input[next + copy++]; 8839 /* use constant limit because in js we should not preallocate memory */ 8840 if (state.head && len && 8841 (state.length < 65536 /*state.head.comm_max*/)) { 8842 state.head.comment += String.fromCharCode(len); 8843 } 8844 } while (len && copy < have); 8845 if (state.flags & 0x0200) { 8846 state.check = crc32(state.check, input, copy, next); 8847 } 8848 have -= copy; 8849 next += copy; 8850 if (len) { break inf_leave; } 8851 } 8852 else if (state.head) { 8853 state.head.comment = null; 8854 } 8855 state.mode = HCRC; 8856 /* falls through */ 8857 case HCRC: 8858 if (state.flags & 0x0200) { 8859 //=== NEEDBITS(16); */ 8860 while (bits < 16) { 8861 if (have === 0) { break inf_leave; } 8862 have--; 8863 hold += input[next++] << bits; 8864 bits += 8; 8865 } 8866 //===// 8867 if (hold !== (state.check & 0xffff)) { 8868 strm.msg = 'header crc mismatch'; 8869 state.mode = BAD; 8870 break; 8871 } 8872 //=== INITBITS(); 8873 hold = 0; 8874 bits = 0; 8875 //===// 8876 } 8877 if (state.head) { 8878 state.head.hcrc = ((state.flags >> 9) & 1); 8879 state.head.done = true; 8880 } 8881 strm.adler = state.check = 0; 8882 state.mode = TYPE; 8883 break; 8884 case DICTID: 8885 //=== NEEDBITS(32); */ 8886 while (bits < 32) { 8887 if (have === 0) { break inf_leave; } 8888 have--; 8889 hold += input[next++] << bits; 8890 bits += 8; 8891 } 8892 //===// 8893 strm.adler = state.check = zswap32(hold); 8894 //=== INITBITS(); 8895 hold = 0; 8896 bits = 0; 8897 //===// 8898 state.mode = DICT; 8899 /* falls through */ 8900 case DICT: 8901 if (state.havedict === 0) { 8902 //--- RESTORE() --- 8903 strm.next_out = put; 8904 strm.avail_out = left; 8905 strm.next_in = next; 8906 strm.avail_in = have; 8907 state.hold = hold; 8908 state.bits = bits; 8909 //--- 8910 return Z_NEED_DICT; 8911 } 8912 strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/; 8913 state.mode = TYPE; 8914 /* falls through */ 8915 case TYPE: 8916 if (flush === Z_BLOCK || flush === Z_TREES) { break inf_leave; } 8917 /* falls through */ 8918 case TYPEDO: 8919 if (state.last) { 8920 //--- BYTEBITS() ---// 8921 hold >>>= bits & 7; 8922 bits -= bits & 7; 8923 //---// 8924 state.mode = CHECK; 8925 break; 8926 } 8927 //=== NEEDBITS(3); */ 8928 while (bits < 3) { 8929 if (have === 0) { break inf_leave; } 8930 have--; 8931 hold += input[next++] << bits; 8932 bits += 8; 8933 } 8934 //===// 8935 state.last = (hold & 0x01)/*BITS(1)*/; 8936 //--- DROPBITS(1) ---// 8937 hold >>>= 1; 8938 bits -= 1; 8939 //---// 8940 8941 switch ((hold & 0x03)/*BITS(2)*/) { 8942 case 0: /* stored block */ 8943 //Tracev((stderr, "inflate: stored block%s\n", 8944 // state.last ? " (last)" : "")); 8945 state.mode = STORED; 8946 break; 8947 case 1: /* fixed block */ 8948 fixedtables(state); 8949 //Tracev((stderr, "inflate: fixed codes block%s\n", 8950 // state.last ? " (last)" : "")); 8951 state.mode = LEN_; /* decode codes */ 8952 if (flush === Z_TREES) { 8953 //--- DROPBITS(2) ---// 8954 hold >>>= 2; 8955 bits -= 2; 8956 //---// 8957 break inf_leave; 8958 } 8959 break; 8960 case 2: /* dynamic block */ 8961 //Tracev((stderr, "inflate: dynamic codes block%s\n", 8962 // state.last ? " (last)" : "")); 8963 state.mode = TABLE; 8964 break; 8965 case 3: 8966 strm.msg = 'invalid block type'; 8967 state.mode = BAD; 8968 } 8969 //--- DROPBITS(2) ---// 8970 hold >>>= 2; 8971 bits -= 2; 8972 //---// 8973 break; 8974 case STORED: 8975 //--- BYTEBITS() ---// /* go to byte boundary */ 8976 hold >>>= bits & 7; 8977 bits -= bits & 7; 8978 //---// 8979 //=== NEEDBITS(32); */ 8980 while (bits < 32) { 8981 if (have === 0) { break inf_leave; } 8982 have--; 8983 hold += input[next++] << bits; 8984 bits += 8; 8985 } 8986 //===// 8987 if ((hold & 0xffff) !== ((hold >>> 16) ^ 0xffff)) { 8988 strm.msg = 'invalid stored block lengths'; 8989 state.mode = BAD; 8990 break; 8991 } 8992 state.length = hold & 0xffff; 8993 //Tracev((stderr, "inflate: stored length %u\n", 8994 // state.length)); 8995 //=== INITBITS(); 8996 hold = 0; 8997 bits = 0; 8998 //===// 8999 state.mode = COPY_; 9000 if (flush === Z_TREES) { break inf_leave; } 9001 /* falls through */ 9002 case COPY_: 9003 state.mode = COPY; 9004 /* falls through */ 9005 case COPY: 9006 copy = state.length; 9007 if (copy) { 9008 if (copy > have) { copy = have; } 9009 if (copy > left) { copy = left; } 9010 if (copy === 0) { break inf_leave; } 9011 //--- zmemcpy(put, next, copy); --- 9012 utils.arraySet(output, input, next, copy, put); 9013 //---// 9014 have -= copy; 9015 next += copy; 9016 left -= copy; 9017 put += copy; 9018 state.length -= copy; 9019 break; 9020 } 9021 //Tracev((stderr, "inflate: stored end\n")); 9022 state.mode = TYPE; 9023 break; 9024 case TABLE: 9025 //=== NEEDBITS(14); */ 9026 while (bits < 14) { 9027 if (have === 0) { break inf_leave; } 9028 have--; 9029 hold += input[next++] << bits; 9030 bits += 8; 9031 } 9032 //===// 9033 state.nlen = (hold & 0x1f)/*BITS(5)*/ + 257; 9034 //--- DROPBITS(5) ---// 9035 hold >>>= 5; 9036 bits -= 5; 9037 //---// 9038 state.ndist = (hold & 0x1f)/*BITS(5)*/ + 1; 9039 //--- DROPBITS(5) ---// 9040 hold >>>= 5; 9041 bits -= 5; 9042 //---// 9043 state.ncode = (hold & 0x0f)/*BITS(4)*/ + 4; 9044 //--- DROPBITS(4) ---// 9045 hold >>>= 4; 9046 bits -= 4; 9047 //---// 9048 //#ifndef PKZIP_BUG_WORKAROUND 9049 if (state.nlen > 286 || state.ndist > 30) { 9050 strm.msg = 'too many length or distance symbols'; 9051 state.mode = BAD; 9052 break; 9053 } 9054 //#endif 9055 //Tracev((stderr, "inflate: table sizes ok\n")); 9056 state.have = 0; 9057 state.mode = LENLENS; 9058 /* falls through */ 9059 case LENLENS: 9060 while (state.have < state.ncode) { 9061 //=== NEEDBITS(3); 9062 while (bits < 3) { 9063 if (have === 0) { break inf_leave; } 9064 have--; 9065 hold += input[next++] << bits; 9066 bits += 8; 9067 } 9068 //===// 9069 state.lens[order[state.have++]] = (hold & 0x07);//BITS(3); 9070 //--- DROPBITS(3) ---// 9071 hold >>>= 3; 9072 bits -= 3; 9073 //---// 9074 } 9075 while (state.have < 19) { 9076 state.lens[order[state.have++]] = 0; 9077 } 9078 // We have separate tables & no pointers. 2 commented lines below not needed. 9079 //state.next = state.codes; 9080 //state.lencode = state.next; 9081 // Switch to use dynamic table 9082 state.lencode = state.lendyn; 9083 state.lenbits = 7; 9084 9085 opts = { bits: state.lenbits }; 9086 ret = inflate_table(CODES, state.lens, 0, 19, state.lencode, 0, state.work, opts); 9087 state.lenbits = opts.bits; 9088 9089 if (ret) { 9090 strm.msg = 'invalid code lengths set'; 9091 state.mode = BAD; 9092 break; 9093 } 9094 //Tracev((stderr, "inflate: code lengths ok\n")); 9095 state.have = 0; 9096 state.mode = CODELENS; 9097 /* falls through */ 9098 case CODELENS: 9099 while (state.have < state.nlen + state.ndist) { 9100 for (;;) { 9101 here = state.lencode[hold & ((1 << state.lenbits) - 1)];/*BITS(state.lenbits)*/ 9102 here_bits = here >>> 24; 9103 here_op = (here >>> 16) & 0xff; 9104 here_val = here & 0xffff; 9105 9106 if ((here_bits) <= bits) { break; } 9107 //--- PULLBYTE() ---// 9108 if (have === 0) { break inf_leave; } 9109 have--; 9110 hold += input[next++] << bits; 9111 bits += 8; 9112 //---// 9113 } 9114 if (here_val < 16) { 9115 //--- DROPBITS(here.bits) ---// 9116 hold >>>= here_bits; 9117 bits -= here_bits; 9118 //---// 9119 state.lens[state.have++] = here_val; 9120 } 9121 else { 9122 if (here_val === 16) { 9123 //=== NEEDBITS(here.bits + 2); 9124 n = here_bits + 2; 9125 while (bits < n) { 9126 if (have === 0) { break inf_leave; } 9127 have--; 9128 hold += input[next++] << bits; 9129 bits += 8; 9130 } 9131 //===// 9132 //--- DROPBITS(here.bits) ---// 9133 hold >>>= here_bits; 9134 bits -= here_bits; 9135 //---// 9136 if (state.have === 0) { 9137 strm.msg = 'invalid bit length repeat'; 9138 state.mode = BAD; 9139 break; 9140 } 9141 len = state.lens[state.have - 1]; 9142 copy = 3 + (hold & 0x03);//BITS(2); 9143 //--- DROPBITS(2) ---// 9144 hold >>>= 2; 9145 bits -= 2; 9146 //---// 9147 } 9148 else if (here_val === 17) { 9149 //=== NEEDBITS(here.bits + 3); 9150 n = here_bits + 3; 9151 while (bits < n) { 9152 if (have === 0) { break inf_leave; } 9153 have--; 9154 hold += input[next++] << bits; 9155 bits += 8; 9156 } 9157 //===// 9158 //--- DROPBITS(here.bits) ---// 9159 hold >>>= here_bits; 9160 bits -= here_bits; 9161 //---// 9162 len = 0; 9163 copy = 3 + (hold & 0x07);//BITS(3); 9164 //--- DROPBITS(3) ---// 9165 hold >>>= 3; 9166 bits -= 3; 9167 //---// 9168 } 9169 else { 9170 //=== NEEDBITS(here.bits + 7); 9171 n = here_bits + 7; 9172 while (bits < n) { 9173 if (have === 0) { break inf_leave; } 9174 have--; 9175 hold += input[next++] << bits; 9176 bits += 8; 9177 } 9178 //===// 9179 //--- DROPBITS(here.bits) ---// 9180 hold >>>= here_bits; 9181 bits -= here_bits; 9182 //---// 9183 len = 0; 9184 copy = 11 + (hold & 0x7f);//BITS(7); 9185 //--- DROPBITS(7) ---// 9186 hold >>>= 7; 9187 bits -= 7; 9188 //---// 9189 } 9190 if (state.have + copy > state.nlen + state.ndist) { 9191 strm.msg = 'invalid bit length repeat'; 9192 state.mode = BAD; 9193 break; 9194 } 9195 while (copy--) { 9196 state.lens[state.have++] = len; 9197 } 9198 } 9199 } 9200 9201 /* handle error breaks in while */ 9202 if (state.mode === BAD) { break; } 9203 9204 /* check for end-of-block code (better have one) */ 9205 if (state.lens[256] === 0) { 9206 strm.msg = 'invalid code -- missing end-of-block'; 9207 state.mode = BAD; 9208 break; 9209 } 9210 9211 /* build code tables -- note: do not change the lenbits or distbits 9212 values here (9 and 6) without reading the comments in inftrees.h 9213 concerning the ENOUGH constants, which depend on those values */ 9214 state.lenbits = 9; 9215 9216 opts = { bits: state.lenbits }; 9217 ret = inflate_table(LENS, state.lens, 0, state.nlen, state.lencode, 0, state.work, opts); 9218 // We have separate tables & no pointers. 2 commented lines below not needed. 9219 // state.next_index = opts.table_index; 9220 state.lenbits = opts.bits; 9221 // state.lencode = state.next; 9222 9223 if (ret) { 9224 strm.msg = 'invalid literal/lengths set'; 9225 state.mode = BAD; 9226 break; 9227 } 9228 9229 state.distbits = 6; 9230 //state.distcode.copy(state.codes); 9231 // Switch to use dynamic table 9232 state.distcode = state.distdyn; 9233 opts = { bits: state.distbits }; 9234 ret = inflate_table(DISTS, state.lens, state.nlen, state.ndist, state.distcode, 0, state.work, opts); 9235 // We have separate tables & no pointers. 2 commented lines below not needed. 9236 // state.next_index = opts.table_index; 9237 state.distbits = opts.bits; 9238 // state.distcode = state.next; 9239 9240 if (ret) { 9241 strm.msg = 'invalid distances set'; 9242 state.mode = BAD; 9243 break; 9244 } 9245 //Tracev((stderr, 'inflate: codes ok\n')); 9246 state.mode = LEN_; 9247 if (flush === Z_TREES) { break inf_leave; } 9248 /* falls through */ 9249 case LEN_: 9250 state.mode = LEN; 9251 /* falls through */ 9252 case LEN: 9253 if (have >= 6 && left >= 258) { 9254 //--- RESTORE() --- 9255 strm.next_out = put; 9256 strm.avail_out = left; 9257 strm.next_in = next; 9258 strm.avail_in = have; 9259 state.hold = hold; 9260 state.bits = bits; 9261 //--- 9262 inflate_fast(strm, _out); 9263 //--- LOAD() --- 9264 put = strm.next_out; 9265 output = strm.output; 9266 left = strm.avail_out; 9267 next = strm.next_in; 9268 input = strm.input; 9269 have = strm.avail_in; 9270 hold = state.hold; 9271 bits = state.bits; 9272 //--- 9273 9274 if (state.mode === TYPE) { 9275 state.back = -1; 9276 } 9277 break; 9278 } 9279 state.back = 0; 9280 for (;;) { 9281 here = state.lencode[hold & ((1 << state.lenbits) - 1)]; /*BITS(state.lenbits)*/ 9282 here_bits = here >>> 24; 9283 here_op = (here >>> 16) & 0xff; 9284 here_val = here & 0xffff; 9285 9286 if (here_bits <= bits) { break; } 9287 //--- PULLBYTE() ---// 9288 if (have === 0) { break inf_leave; } 9289 have--; 9290 hold += input[next++] << bits; 9291 bits += 8; 9292 //---// 9293 } 9294 if (here_op && (here_op & 0xf0) === 0) { 9295 last_bits = here_bits; 9296 last_op = here_op; 9297 last_val = here_val; 9298 for (;;) { 9299 here = state.lencode[last_val + 9300 ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)]; 9301 here_bits = here >>> 24; 9302 here_op = (here >>> 16) & 0xff; 9303 here_val = here & 0xffff; 9304 9305 if ((last_bits + here_bits) <= bits) { break; } 9306 //--- PULLBYTE() ---// 9307 if (have === 0) { break inf_leave; } 9308 have--; 9309 hold += input[next++] << bits; 9310 bits += 8; 9311 //---// 9312 } 9313 //--- DROPBITS(last.bits) ---// 9314 hold >>>= last_bits; 9315 bits -= last_bits; 9316 //---// 9317 state.back += last_bits; 9318 } 9319 //--- DROPBITS(here.bits) ---// 9320 hold >>>= here_bits; 9321 bits -= here_bits; 9322 //---// 9323 state.back += here_bits; 9324 state.length = here_val; 9325 if (here_op === 0) { 9326 //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? 9327 // "inflate: literal '%c'\n" : 9328 // "inflate: literal 0x%02x\n", here.val)); 9329 state.mode = LIT; 9330 break; 9331 } 9332 if (here_op & 32) { 9333 //Tracevv((stderr, "inflate: end of block\n")); 9334 state.back = -1; 9335 state.mode = TYPE; 9336 break; 9337 } 9338 if (here_op & 64) { 9339 strm.msg = 'invalid literal/length code'; 9340 state.mode = BAD; 9341 break; 9342 } 9343 state.extra = here_op & 15; 9344 state.mode = LENEXT; 9345 /* falls through */ 9346 case LENEXT: 9347 if (state.extra) { 9348 //=== NEEDBITS(state.extra); 9349 n = state.extra; 9350 while (bits < n) { 9351 if (have === 0) { break inf_leave; } 9352 have--; 9353 hold += input[next++] << bits; 9354 bits += 8; 9355 } 9356 //===// 9357 state.length += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/; 9358 //--- DROPBITS(state.extra) ---// 9359 hold >>>= state.extra; 9360 bits -= state.extra; 9361 //---// 9362 state.back += state.extra; 9363 } 9364 //Tracevv((stderr, "inflate: length %u\n", state.length)); 9365 state.was = state.length; 9366 state.mode = DIST; 9367 /* falls through */ 9368 case DIST: 9369 for (;;) { 9370 here = state.distcode[hold & ((1 << state.distbits) - 1)];/*BITS(state.distbits)*/ 9371 here_bits = here >>> 24; 9372 here_op = (here >>> 16) & 0xff; 9373 here_val = here & 0xffff; 9374 9375 if ((here_bits) <= bits) { break; } 9376 //--- PULLBYTE() ---// 9377 if (have === 0) { break inf_leave; } 9378 have--; 9379 hold += input[next++] << bits; 9380 bits += 8; 9381 //---// 9382 } 9383 if ((here_op & 0xf0) === 0) { 9384 last_bits = here_bits; 9385 last_op = here_op; 9386 last_val = here_val; 9387 for (;;) { 9388 here = state.distcode[last_val + 9389 ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)]; 9390 here_bits = here >>> 24; 9391 here_op = (here >>> 16) & 0xff; 9392 here_val = here & 0xffff; 9393 9394 if ((last_bits + here_bits) <= bits) { break; } 9395 //--- PULLBYTE() ---// 9396 if (have === 0) { break inf_leave; } 9397 have--; 9398 hold += input[next++] << bits; 9399 bits += 8; 9400 //---// 9401 } 9402 //--- DROPBITS(last.bits) ---// 9403 hold >>>= last_bits; 9404 bits -= last_bits; 9405 //---// 9406 state.back += last_bits; 9407 } 9408 //--- DROPBITS(here.bits) ---// 9409 hold >>>= here_bits; 9410 bits -= here_bits; 9411 //---// 9412 state.back += here_bits; 9413 if (here_op & 64) { 9414 strm.msg = 'invalid distance code'; 9415 state.mode = BAD; 9416 break; 9417 } 9418 state.offset = here_val; 9419 state.extra = (here_op) & 15; 9420 state.mode = DISTEXT; 9421 /* falls through */ 9422 case DISTEXT: 9423 if (state.extra) { 9424 //=== NEEDBITS(state.extra); 9425 n = state.extra; 9426 while (bits < n) { 9427 if (have === 0) { break inf_leave; } 9428 have--; 9429 hold += input[next++] << bits; 9430 bits += 8; 9431 } 9432 //===// 9433 state.offset += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/; 9434 //--- DROPBITS(state.extra) ---// 9435 hold >>>= state.extra; 9436 bits -= state.extra; 9437 //---// 9438 state.back += state.extra; 9439 } 9440 //#ifdef INFLATE_STRICT 9441 if (state.offset > state.dmax) { 9442 strm.msg = 'invalid distance too far back'; 9443 state.mode = BAD; 9444 break; 9445 } 9446 //#endif 9447 //Tracevv((stderr, "inflate: distance %u\n", state.offset)); 9448 state.mode = MATCH; 9449 /* falls through */ 9450 case MATCH: 9451 if (left === 0) { break inf_leave; } 9452 copy = _out - left; 9453 if (state.offset > copy) { /* copy from window */ 9454 copy = state.offset - copy; 9455 if (copy > state.whave) { 9456 if (state.sane) { 9457 strm.msg = 'invalid distance too far back'; 9458 state.mode = BAD; 9459 break; 9460 } 9461 // (!) This block is disabled in zlib defailts, 9462 // don't enable it for binary compatibility 9463 //#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR 9464 // Trace((stderr, "inflate.c too far\n")); 9465 // copy -= state.whave; 9466 // if (copy > state.length) { copy = state.length; } 9467 // if (copy > left) { copy = left; } 9468 // left -= copy; 9469 // state.length -= copy; 9470 // do { 9471 // output[put++] = 0; 9472 // } while (--copy); 9473 // if (state.length === 0) { state.mode = LEN; } 9474 // break; 9475 //#endif 9476 } 9477 if (copy > state.wnext) { 9478 copy -= state.wnext; 9479 from = state.wsize - copy; 9480 } 9481 else { 9482 from = state.wnext - copy; 9483 } 9484 if (copy > state.length) { copy = state.length; } 9485 from_source = state.window; 9486 } 9487 else { /* copy from output */ 9488 from_source = output; 9489 from = put - state.offset; 9490 copy = state.length; 9491 } 9492 if (copy > left) { copy = left; } 9493 left -= copy; 9494 state.length -= copy; 9495 do { 9496 output[put++] = from_source[from++]; 9497 } while (--copy); 9498 if (state.length === 0) { state.mode = LEN; } 9499 break; 9500 case LIT: 9501 if (left === 0) { break inf_leave; } 9502 output[put++] = state.length; 9503 left--; 9504 state.mode = LEN; 9505 break; 9506 case CHECK: 9507 if (state.wrap) { 9508 //=== NEEDBITS(32); 9509 while (bits < 32) { 9510 if (have === 0) { break inf_leave; } 9511 have--; 9512 // Use '|' insdead of '+' to make sure that result is signed 9513 hold |= input[next++] << bits; 9514 bits += 8; 9515 } 9516 //===// 9517 _out -= left; 9518 strm.total_out += _out; 9519 state.total += _out; 9520 if (_out) { 9521 strm.adler = state.check = 9522 /*UPDATE(state.check, put - _out, _out);*/ 9523 (state.flags ? crc32(state.check, output, _out, put - _out) : adler32(state.check, output, _out, put - _out)); 9524 9525 } 9526 _out = left; 9527 // NB: crc32 stored as signed 32-bit int, zswap32 returns signed too 9528 if ((state.flags ? hold : zswap32(hold)) !== state.check) { 9529 strm.msg = 'incorrect data check'; 9530 state.mode = BAD; 9531 break; 9532 } 9533 //=== INITBITS(); 9534 hold = 0; 9535 bits = 0; 9536 //===// 9537 //Tracev((stderr, "inflate: check matches trailer\n")); 9538 } 9539 state.mode = LENGTH; 9540 /* falls through */ 9541 case LENGTH: 9542 if (state.wrap && state.flags) { 9543 //=== NEEDBITS(32); 9544 while (bits < 32) { 9545 if (have === 0) { break inf_leave; } 9546 have--; 9547 hold += input[next++] << bits; 9548 bits += 8; 9549 } 9550 //===// 9551 if (hold !== (state.total & 0xffffffff)) { 9552 strm.msg = 'incorrect length check'; 9553 state.mode = BAD; 9554 break; 9555 } 9556 //=== INITBITS(); 9557 hold = 0; 9558 bits = 0; 9559 //===// 9560 //Tracev((stderr, "inflate: length matches trailer\n")); 9561 } 9562 state.mode = DONE; 9563 /* falls through */ 9564 case DONE: 9565 ret = Z_STREAM_END; 9566 break inf_leave; 9567 case BAD: 9568 ret = Z_DATA_ERROR; 9569 break inf_leave; 9570 case MEM: 9571 return Z_MEM_ERROR; 9572 case SYNC: 9573 /* falls through */ 9574 default: 9575 return Z_STREAM_ERROR; 9576 } 9577 } 9578 9579 // inf_leave <- here is real place for "goto inf_leave", emulated via "break inf_leave" 9580 9581 /* 9582 Return from inflate(), updating the total counts and the check value. 9583 If there was no progress during the inflate() call, return a buffer 9584 error. Call updatewindow() to create and/or update the window state. 9585 Note: a memory error from inflate() is non-recoverable. 9586 */ 9587 9588 //--- RESTORE() --- 9589 strm.next_out = put; 9590 strm.avail_out = left; 9591 strm.next_in = next; 9592 strm.avail_in = have; 9593 state.hold = hold; 9594 state.bits = bits; 9595 //--- 9596 9597 if (state.wsize || (_out !== strm.avail_out && state.mode < BAD && 9598 (state.mode < CHECK || flush !== Z_FINISH))) { 9599 if (updatewindow(strm, strm.output, strm.next_out, _out - strm.avail_out)) { 9600 state.mode = MEM; 9601 return Z_MEM_ERROR; 9602 } 9603 } 9604 _in -= strm.avail_in; 9605 _out -= strm.avail_out; 9606 strm.total_in += _in; 9607 strm.total_out += _out; 9608 state.total += _out; 9609 if (state.wrap && _out) { 9610 strm.adler = state.check = /*UPDATE(state.check, strm.next_out - _out, _out);*/ 9611 (state.flags ? crc32(state.check, output, _out, strm.next_out - _out) : adler32(state.check, output, _out, strm.next_out - _out)); 9612 } 9613 strm.data_type = state.bits + (state.last ? 64 : 0) + 9614 (state.mode === TYPE ? 128 : 0) + 9615 (state.mode === LEN_ || state.mode === COPY_ ? 256 : 0); 9616 if (((_in === 0 && _out === 0) || flush === Z_FINISH) && ret === Z_OK) { 9617 ret = Z_BUF_ERROR; 9618 } 9619 return ret; 9620 } 9621 9622 function inflateEnd(strm) { 9623 9624 if (!strm || !strm.state /*|| strm->zfree == (free_func)0*/) { 9625 return Z_STREAM_ERROR; 9626 } 9627 9628 var state = strm.state; 9629 if (state.window) { 9630 state.window = null; 9631 } 9632 strm.state = null; 9633 return Z_OK; 9634 } 9635 9636 function inflateGetHeader(strm, head) { 9637 var state; 9638 9639 /* check state */ 9640 if (!strm || !strm.state) { return Z_STREAM_ERROR; } 9641 state = strm.state; 9642 if ((state.wrap & 2) === 0) { return Z_STREAM_ERROR; } 9643 9644 /* save header structure */ 9645 state.head = head; 9646 head.done = false; 9647 return Z_OK; 9648 } 9649 9650 function inflateSetDictionary(strm, dictionary) { 9651 var dictLength = dictionary.length; 9652 9653 var state; 9654 var dictid; 9655 var ret; 9656 9657 /* check state */ 9658 if (!strm /* == Z_NULL */ || !strm.state /* == Z_NULL */) { return Z_STREAM_ERROR; } 9659 state = strm.state; 9660 9661 if (state.wrap !== 0 && state.mode !== DICT) { 9662 return Z_STREAM_ERROR; 9663 } 9664 9665 /* check for correct dictionary identifier */ 9666 if (state.mode === DICT) { 9667 dictid = 1; /* adler32(0, null, 0)*/ 9668 /* dictid = adler32(dictid, dictionary, dictLength); */ 9669 dictid = adler32(dictid, dictionary, dictLength, 0); 9670 if (dictid !== state.check) { 9671 return Z_DATA_ERROR; 9672 } 9673 } 9674 /* copy dictionary to window using updatewindow(), which will amend the 9675 existing dictionary if appropriate */ 9676 ret = updatewindow(strm, dictionary, dictLength, dictLength); 9677 if (ret) { 9678 state.mode = MEM; 9679 return Z_MEM_ERROR; 9680 } 9681 state.havedict = 1; 9682 // Tracev((stderr, "inflate: dictionary set\n")); 9683 return Z_OK; 9684 } 9685 9686 exports.inflateReset = inflateReset; 9687 exports.inflateReset2 = inflateReset2; 9688 exports.inflateResetKeep = inflateResetKeep; 9689 exports.inflateInit = inflateInit; 9690 exports.inflateInit2 = inflateInit2; 9691 exports.inflate = inflate; 9692 exports.inflateEnd = inflateEnd; 9693 exports.inflateGetHeader = inflateGetHeader; 9694 exports.inflateSetDictionary = inflateSetDictionary; 9695 exports.inflateInfo = 'pako inflate (from Nodeca project)'; 9696 9697 /* Not implemented 9698 exports.inflateCopy = inflateCopy; 9699 exports.inflateGetDictionary = inflateGetDictionary; 9700 exports.inflateMark = inflateMark; 9701 exports.inflatePrime = inflatePrime; 9702 exports.inflateSync = inflateSync; 9703 exports.inflateSyncPoint = inflateSyncPoint; 9704 exports.inflateUndermine = inflateUndermine; 9705 */ 9706 9707 },{"../utils/common":41,"./adler32":43,"./crc32":45,"./inffast":48,"./inftrees":50}],50:[function(require,module,exports){ 9708 'use strict'; 9709 9710 // (C) 1995-2013 Jean-loup Gailly and Mark Adler 9711 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin 9712 // 9713 // This software is provided 'as-is', without any express or implied 9714 // warranty. In no event will the authors be held liable for any damages 9715 // arising from the use of this software. 9716 // 9717 // Permission is granted to anyone to use this software for any purpose, 9718 // including commercial applications, and to alter it and redistribute it 9719 // freely, subject to the following restrictions: 9720 // 9721 // 1. The origin of this software must not be misrepresented; you must not 9722 // claim that you wrote the original software. If you use this software 9723 // in a product, an acknowledgment in the product documentation would be 9724 // appreciated but is not required. 9725 // 2. Altered source versions must be plainly marked as such, and must not be 9726 // misrepresented as being the original software. 9727 // 3. This notice may not be removed or altered from any source distribution. 9728 9729 var utils = require('../utils/common'); 9730 9731 var MAXBITS = 15; 9732 var ENOUGH_LENS = 852; 9733 var ENOUGH_DISTS = 592; 9734 //var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS); 9735 9736 var CODES = 0; 9737 var LENS = 1; 9738 var DISTS = 2; 9739 9740 var lbase = [ /* Length codes 257..285 base */ 9741 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 9742 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 9743 ]; 9744 9745 var lext = [ /* Length codes 257..285 extra */ 9746 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 9747 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78 9748 ]; 9749 9750 var dbase = [ /* Distance codes 0..29 base */ 9751 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 9752 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 9753 8193, 12289, 16385, 24577, 0, 0 9754 ]; 9755 9756 var dext = [ /* Distance codes 0..29 extra */ 9757 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 9758 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 9759 28, 28, 29, 29, 64, 64 9760 ]; 9761 9762 module.exports = function inflate_table(type, lens, lens_index, codes, table, table_index, work, opts) 9763 { 9764 var bits = opts.bits; 9765 //here = opts.here; /* table entry for duplication */ 9766 9767 var len = 0; /* a code's length in bits */ 9768 var sym = 0; /* index of code symbols */ 9769 var min = 0, max = 0; /* minimum and maximum code lengths */ 9770 var root = 0; /* number of index bits for root table */ 9771 var curr = 0; /* number of index bits for current table */ 9772 var drop = 0; /* code bits to drop for sub-table */ 9773 var left = 0; /* number of prefix codes available */ 9774 var used = 0; /* code entries in table used */ 9775 var huff = 0; /* Huffman code */ 9776 var incr; /* for incrementing code, index */ 9777 var fill; /* index for replicating entries */ 9778 var low; /* low bits for current root entry */ 9779 var mask; /* mask for low root bits */ 9780 var next; /* next available space in table */ 9781 var base = null; /* base value table to use */ 9782 var base_index = 0; 9783 // var shoextra; /* extra bits table to use */ 9784 var end; /* use base and extra for symbol > end */ 9785 var count = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* number of codes of each length */ 9786 var offs = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* offsets in table for each length */ 9787 var extra = null; 9788 var extra_index = 0; 9789 9790 var here_bits, here_op, here_val; 9791 9792 /* 9793 Process a set of code lengths to create a canonical Huffman code. The 9794 code lengths are lens[0..codes-1]. Each length corresponds to the 9795 symbols 0..codes-1. The Huffman code is generated by first sorting the 9796 symbols by length from short to long, and retaining the symbol order 9797 for codes with equal lengths. Then the code starts with all zero bits 9798 for the first code of the shortest length, and the codes are integer 9799 increments for the same length, and zeros are appended as the length 9800 increases. For the deflate format, these bits are stored backwards 9801 from their more natural integer increment ordering, and so when the 9802 decoding tables are built in the large loop below, the integer codes 9803 are incremented backwards. 9804 9805 This routine assumes, but does not check, that all of the entries in 9806 lens[] are in the range 0..MAXBITS. The caller must assure this. 9807 1..MAXBITS is interpreted as that code length. zero means that that 9808 symbol does not occur in this code. 9809 9810 The codes are sorted by computing a count of codes for each length, 9811 creating from that a table of starting indices for each length in the 9812 sorted table, and then entering the symbols in order in the sorted 9813 table. The sorted table is work[], with that space being provided by 9814 the caller. 9815 9816 The length counts are used for other purposes as well, i.e. finding 9817 the minimum and maximum length codes, determining if there are any 9818 codes at all, checking for a valid set of lengths, and looking ahead 9819 at length counts to determine sub-table sizes when building the 9820 decoding tables. 9821 */ 9822 9823 /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ 9824 for (len = 0; len <= MAXBITS; len++) { 9825 count[len] = 0; 9826 } 9827 for (sym = 0; sym < codes; sym++) { 9828 count[lens[lens_index + sym]]++; 9829 } 9830 9831 /* bound code lengths, force root to be within code lengths */ 9832 root = bits; 9833 for (max = MAXBITS; max >= 1; max--) { 9834 if (count[max] !== 0) { break; } 9835 } 9836 if (root > max) { 9837 root = max; 9838 } 9839 if (max === 0) { /* no symbols to code at all */ 9840 //table.op[opts.table_index] = 64; //here.op = (var char)64; /* invalid code marker */ 9841 //table.bits[opts.table_index] = 1; //here.bits = (var char)1; 9842 //table.val[opts.table_index++] = 0; //here.val = (var short)0; 9843 table[table_index++] = (1 << 24) | (64 << 16) | 0; 9844 9845 9846 //table.op[opts.table_index] = 64; 9847 //table.bits[opts.table_index] = 1; 9848 //table.val[opts.table_index++] = 0; 9849 table[table_index++] = (1 << 24) | (64 << 16) | 0; 9850 9851 opts.bits = 1; 9852 return 0; /* no symbols, but wait for decoding to report error */ 9853 } 9854 for (min = 1; min < max; min++) { 9855 if (count[min] !== 0) { break; } 9856 } 9857 if (root < min) { 9858 root = min; 9859 } 9860 9861 /* check for an over-subscribed or incomplete set of lengths */ 9862 left = 1; 9863 for (len = 1; len <= MAXBITS; len++) { 9864 left <<= 1; 9865 left -= count[len]; 9866 if (left < 0) { 9867 return -1; 9868 } /* over-subscribed */ 9869 } 9870 if (left > 0 && (type === CODES || max !== 1)) { 9871 return -1; /* incomplete set */ 9872 } 9873 9874 /* generate offsets into symbol table for each length for sorting */ 9875 offs[1] = 0; 9876 for (len = 1; len < MAXBITS; len++) { 9877 offs[len + 1] = offs[len] + count[len]; 9878 } 9879 9880 /* sort symbols by length, by symbol order within each length */ 9881 for (sym = 0; sym < codes; sym++) { 9882 if (lens[lens_index + sym] !== 0) { 9883 work[offs[lens[lens_index + sym]]++] = sym; 9884 } 9885 } 9886 9887 /* 9888 Create and fill in decoding tables. In this loop, the table being 9889 filled is at next and has curr index bits. The code being used is huff 9890 with length len. That code is converted to an index by dropping drop 9891 bits off of the bottom. For codes where len is less than drop + curr, 9892 those top drop + curr - len bits are incremented through all values to 9893 fill the table with replicated entries. 9894 9895 root is the number of index bits for the root table. When len exceeds 9896 root, sub-tables are created pointed to by the root entry with an index 9897 of the low root bits of huff. This is saved in low to check for when a 9898 new sub-table should be started. drop is zero when the root table is 9899 being filled, and drop is root when sub-tables are being filled. 9900 9901 When a new sub-table is needed, it is necessary to look ahead in the 9902 code lengths to determine what size sub-table is needed. The length 9903 counts are used for this, and so count[] is decremented as codes are 9904 entered in the tables. 9905 9906 used keeps track of how many table entries have been allocated from the 9907 provided *table space. It is checked for LENS and DIST tables against 9908 the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in 9909 the initial root table size constants. See the comments in inftrees.h 9910 for more information. 9911 9912 sym increments through all symbols, and the loop terminates when 9913 all codes of length max, i.e. all codes, have been processed. This 9914 routine permits incomplete codes, so another loop after this one fills 9915 in the rest of the decoding tables with invalid code markers. 9916 */ 9917 9918 /* set up for code type */ 9919 // poor man optimization - use if-else instead of switch, 9920 // to avoid deopts in old v8 9921 if (type === CODES) { 9922 base = extra = work; /* dummy value--not used */ 9923 end = 19; 9924 9925 } else if (type === LENS) { 9926 base = lbase; 9927 base_index -= 257; 9928 extra = lext; 9929 extra_index -= 257; 9930 end = 256; 9931 9932 } else { /* DISTS */ 9933 base = dbase; 9934 extra = dext; 9935 end = -1; 9936 } 9937 9938 /* initialize opts for loop */ 9939 huff = 0; /* starting code */ 9940 sym = 0; /* starting code symbol */ 9941 len = min; /* starting code length */ 9942 next = table_index; /* current table to fill in */ 9943 curr = root; /* current table index bits */ 9944 drop = 0; /* current bits to drop from code for index */ 9945 low = -1; /* trigger new sub-table when len > root */ 9946 used = 1 << root; /* use root table entries */ 9947 mask = used - 1; /* mask for comparing low */ 9948 9949 /* check available table space */ 9950 if ((type === LENS && used > ENOUGH_LENS) || 9951 (type === DISTS && used > ENOUGH_DISTS)) { 9952 return 1; 9953 } 9954 9955 /* process all codes and make table entries */ 9956 for (;;) { 9957 /* create table entry */ 9958 here_bits = len - drop; 9959 if (work[sym] < end) { 9960 here_op = 0; 9961 here_val = work[sym]; 9962 } 9963 else if (work[sym] > end) { 9964 here_op = extra[extra_index + work[sym]]; 9965 here_val = base[base_index + work[sym]]; 9966 } 9967 else { 9968 here_op = 32 + 64; /* end of block */ 9969 here_val = 0; 9970 } 9971 9972 /* replicate for those indices with low len bits equal to huff */ 9973 incr = 1 << (len - drop); 9974 fill = 1 << curr; 9975 min = fill; /* save offset to next table */ 9976 do { 9977 fill -= incr; 9978 table[next + (huff >> drop) + fill] = (here_bits << 24) | (here_op << 16) | here_val |0; 9979 } while (fill !== 0); 9980 9981 /* backwards increment the len-bit code huff */ 9982 incr = 1 << (len - 1); 9983 while (huff & incr) { 9984 incr >>= 1; 9985 } 9986 if (incr !== 0) { 9987 huff &= incr - 1; 9988 huff += incr; 9989 } else { 9990 huff = 0; 9991 } 9992 9993 /* go to next symbol, update count, len */ 9994 sym++; 9995 if (--count[len] === 0) { 9996 if (len === max) { break; } 9997 len = lens[lens_index + work[sym]]; 9998 } 9999 10000 /* create new sub-table if needed */ 10001 if (len > root && (huff & mask) !== low) { 10002 /* if first time, transition to sub-tables */ 10003 if (drop === 0) { 10004 drop = root; 10005 } 10006 10007 /* increment past last table */ 10008 next += min; /* here min is 1 << curr */ 10009 10010 /* determine length of next table */ 10011 curr = len - drop; 10012 left = 1 << curr; 10013 while (curr + drop < max) { 10014 left -= count[curr + drop]; 10015 if (left <= 0) { break; } 10016 curr++; 10017 left <<= 1; 10018 } 10019 10020 /* check for enough space */ 10021 used += 1 << curr; 10022 if ((type === LENS && used > ENOUGH_LENS) || 10023 (type === DISTS && used > ENOUGH_DISTS)) { 10024 return 1; 10025 } 10026 10027 /* point entry in root table to sub-table */ 10028 low = huff & mask; 10029 /*table.op[low] = curr; 10030 table.bits[low] = root; 10031 table.val[low] = next - opts.table_index;*/ 10032 table[low] = (root << 24) | (curr << 16) | (next - table_index) |0; 10033 } 10034 } 10035 10036 /* fill in remaining table entry if code is incomplete (guaranteed to have 10037 at most one remaining entry, since if the code is incomplete, the 10038 maximum code length that was allowed to get this far is one bit) */ 10039 if (huff !== 0) { 10040 //table.op[next + huff] = 64; /* invalid code marker */ 10041 //table.bits[next + huff] = len - drop; 10042 //table.val[next + huff] = 0; 10043 table[next + huff] = ((len - drop) << 24) | (64 << 16) |0; 10044 } 10045 10046 /* set return parameters */ 10047 //opts.table_index += used; 10048 opts.bits = root; 10049 return 0; 10050 }; 10051 10052 },{"../utils/common":41}],51:[function(require,module,exports){ 10053 'use strict'; 10054 10055 // (C) 1995-2013 Jean-loup Gailly and Mark Adler 10056 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin 10057 // 10058 // This software is provided 'as-is', without any express or implied 10059 // warranty. In no event will the authors be held liable for any damages 10060 // arising from the use of this software. 10061 // 10062 // Permission is granted to anyone to use this software for any purpose, 10063 // including commercial applications, and to alter it and redistribute it 10064 // freely, subject to the following restrictions: 10065 // 10066 // 1. The origin of this software must not be misrepresented; you must not 10067 // claim that you wrote the original software. If you use this software 10068 // in a product, an acknowledgment in the product documentation would be 10069 // appreciated but is not required. 10070 // 2. Altered source versions must be plainly marked as such, and must not be 10071 // misrepresented as being the original software. 10072 // 3. This notice may not be removed or altered from any source distribution. 10073 10074 module.exports = { 10075 2: 'need dictionary', /* Z_NEED_DICT 2 */ 10076 1: 'stream end', /* Z_STREAM_END 1 */ 10077 0: '', /* Z_OK 0 */ 10078 '-1': 'file error', /* Z_ERRNO (-1) */ 10079 '-2': 'stream error', /* Z_STREAM_ERROR (-2) */ 10080 '-3': 'data error', /* Z_DATA_ERROR (-3) */ 10081 '-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */ 10082 '-5': 'buffer error', /* Z_BUF_ERROR (-5) */ 10083 '-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */ 10084 }; 10085 10086 },{}],52:[function(require,module,exports){ 10087 'use strict'; 10088 10089 // (C) 1995-2013 Jean-loup Gailly and Mark Adler 10090 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin 10091 // 10092 // This software is provided 'as-is', without any express or implied 10093 // warranty. In no event will the authors be held liable for any damages 10094 // arising from the use of this software. 10095 // 10096 // Permission is granted to anyone to use this software for any purpose, 10097 // including commercial applications, and to alter it and redistribute it 10098 // freely, subject to the following restrictions: 10099 // 10100 // 1. The origin of this software must not be misrepresented; you must not 10101 // claim that you wrote the original software. If you use this software 10102 // in a product, an acknowledgment in the product documentation would be 10103 // appreciated but is not required. 10104 // 2. Altered source versions must be plainly marked as such, and must not be 10105 // misrepresented as being the original software. 10106 // 3. This notice may not be removed or altered from any source distribution. 10107 10108 var utils = require('../utils/common'); 10109 10110 /* Public constants ==========================================================*/ 10111 /* ===========================================================================*/ 10112 10113 10114 //var Z_FILTERED = 1; 10115 //var Z_HUFFMAN_ONLY = 2; 10116 //var Z_RLE = 3; 10117 var Z_FIXED = 4; 10118 //var Z_DEFAULT_STRATEGY = 0; 10119 10120 /* Possible values of the data_type field (though see inflate()) */ 10121 var Z_BINARY = 0; 10122 var Z_TEXT = 1; 10123 //var Z_ASCII = 1; // = Z_TEXT 10124 var Z_UNKNOWN = 2; 10125 10126 /*============================================================================*/ 10127 10128 10129 function zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } } 10130 10131 // From zutil.h 10132 10133 var STORED_BLOCK = 0; 10134 var STATIC_TREES = 1; 10135 var DYN_TREES = 2; 10136 /* The three kinds of block type */ 10137 10138 var MIN_MATCH = 3; 10139 var MAX_MATCH = 258; 10140 /* The minimum and maximum match lengths */ 10141 10142 // From deflate.h 10143 /* =========================================================================== 10144 * Internal compression state. 10145 */ 10146 10147 var LENGTH_CODES = 29; 10148 /* number of length codes, not counting the special END_BLOCK code */ 10149 10150 var LITERALS = 256; 10151 /* number of literal bytes 0..255 */ 10152 10153 var L_CODES = LITERALS + 1 + LENGTH_CODES; 10154 /* number of Literal or Length codes, including the END_BLOCK code */ 10155 10156 var D_CODES = 30; 10157 /* number of distance codes */ 10158 10159 var BL_CODES = 19; 10160 /* number of codes used to transfer the bit lengths */ 10161 10162 var HEAP_SIZE = 2 * L_CODES + 1; 10163 /* maximum heap size */ 10164 10165 var MAX_BITS = 15; 10166 /* All codes must not exceed MAX_BITS bits */ 10167 10168 var Buf_size = 16; 10169 /* size of bit buffer in bi_buf */ 10170 10171 10172 /* =========================================================================== 10173 * Constants 10174 */ 10175 10176 var MAX_BL_BITS = 7; 10177 /* Bit length codes must not exceed MAX_BL_BITS bits */ 10178 10179 var END_BLOCK = 256; 10180 /* end of block literal code */ 10181 10182 var REP_3_6 = 16; 10183 /* repeat previous bit length 3-6 times (2 bits of repeat count) */ 10184 10185 var REPZ_3_10 = 17; 10186 /* repeat a zero length 3-10 times (3 bits of repeat count) */ 10187 10188 var REPZ_11_138 = 18; 10189 /* repeat a zero length 11-138 times (7 bits of repeat count) */ 10190 10191 /* eslint-disable comma-spacing,array-bracket-spacing */ 10192 var extra_lbits = /* extra bits for each length code */ 10193 [0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0]; 10194 10195 var extra_dbits = /* extra bits for each distance code */ 10196 [0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13]; 10197 10198 var extra_blbits = /* extra bits for each bit length code */ 10199 [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7]; 10200 10201 var bl_order = 10202 [16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15]; 10203 /* eslint-enable comma-spacing,array-bracket-spacing */ 10204 10205 /* The lengths of the bit length codes are sent in order of decreasing 10206 * probability, to avoid transmitting the lengths for unused bit length codes. 10207 */ 10208 10209 /* =========================================================================== 10210 * Local data. These are initialized only once. 10211 */ 10212 10213 // We pre-fill arrays with 0 to avoid uninitialized gaps 10214 10215 var DIST_CODE_LEN = 512; /* see definition of array dist_code below */ 10216 10217 // !!!! Use flat array insdead of structure, Freq = i*2, Len = i*2+1 10218 var static_ltree = new Array((L_CODES + 2) * 2); 10219 zero(static_ltree); 10220 /* The static literal tree. Since the bit lengths are imposed, there is no 10221 * need for the L_CODES extra codes used during heap construction. However 10222 * The codes 286 and 287 are needed to build a canonical tree (see _tr_init 10223 * below). 10224 */ 10225 10226 var static_dtree = new Array(D_CODES * 2); 10227 zero(static_dtree); 10228 /* The static distance tree. (Actually a trivial tree since all codes use 10229 * 5 bits.) 10230 */ 10231 10232 var _dist_code = new Array(DIST_CODE_LEN); 10233 zero(_dist_code); 10234 /* Distance codes. The first 256 values correspond to the distances 10235 * 3 .. 258, the last 256 values correspond to the top 8 bits of 10236 * the 15 bit distances. 10237 */ 10238 10239 var _length_code = new Array(MAX_MATCH - MIN_MATCH + 1); 10240 zero(_length_code); 10241 /* length code for each normalized match length (0 == MIN_MATCH) */ 10242 10243 var base_length = new Array(LENGTH_CODES); 10244 zero(base_length); 10245 /* First normalized length for each code (0 = MIN_MATCH) */ 10246 10247 var base_dist = new Array(D_CODES); 10248 zero(base_dist); 10249 /* First normalized distance for each code (0 = distance of 1) */ 10250 10251 10252 function StaticTreeDesc(static_tree, extra_bits, extra_base, elems, max_length) { 10253 10254 this.static_tree = static_tree; /* static tree or NULL */ 10255 this.extra_bits = extra_bits; /* extra bits for each code or NULL */ 10256 this.extra_base = extra_base; /* base index for extra_bits */ 10257 this.elems = elems; /* max number of elements in the tree */ 10258 this.max_length = max_length; /* max bit length for the codes */ 10259 10260 // show if `static_tree` has data or dummy - needed for monomorphic objects 10261 this.has_stree = static_tree && static_tree.length; 10262 } 10263 10264 10265 var static_l_desc; 10266 var static_d_desc; 10267 var static_bl_desc; 10268 10269 10270 function TreeDesc(dyn_tree, stat_desc) { 10271 this.dyn_tree = dyn_tree; /* the dynamic tree */ 10272 this.max_code = 0; /* largest code with non zero frequency */ 10273 this.stat_desc = stat_desc; /* the corresponding static tree */ 10274 } 10275 10276 10277 10278 function d_code(dist) { 10279 return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)]; 10280 } 10281 10282 10283 /* =========================================================================== 10284 * Output a short LSB first on the stream. 10285 * IN assertion: there is enough room in pendingBuf. 10286 */ 10287 function put_short(s, w) { 10288 // put_byte(s, (uch)((w) & 0xff)); 10289 // put_byte(s, (uch)((ush)(w) >> 8)); 10290 s.pending_buf[s.pending++] = (w) & 0xff; 10291 s.pending_buf[s.pending++] = (w >>> 8) & 0xff; 10292 } 10293 10294 10295 /* =========================================================================== 10296 * Send a value on a given number of bits. 10297 * IN assertion: length <= 16 and value fits in length bits. 10298 */ 10299 function send_bits(s, value, length) { 10300 if (s.bi_valid > (Buf_size - length)) { 10301 s.bi_buf |= (value << s.bi_valid) & 0xffff; 10302 put_short(s, s.bi_buf); 10303 s.bi_buf = value >> (Buf_size - s.bi_valid); 10304 s.bi_valid += length - Buf_size; 10305 } else { 10306 s.bi_buf |= (value << s.bi_valid) & 0xffff; 10307 s.bi_valid += length; 10308 } 10309 } 10310 10311 10312 function send_code(s, c, tree) { 10313 send_bits(s, tree[c * 2]/*.Code*/, tree[c * 2 + 1]/*.Len*/); 10314 } 10315 10316 10317 /* =========================================================================== 10318 * Reverse the first len bits of a code, using straightforward code (a faster 10319 * method would use a table) 10320 * IN assertion: 1 <= len <= 15 10321 */ 10322 function bi_reverse(code, len) { 10323 var res = 0; 10324 do { 10325 res |= code & 1; 10326 code >>>= 1; 10327 res <<= 1; 10328 } while (--len > 0); 10329 return res >>> 1; 10330 } 10331 10332 10333 /* =========================================================================== 10334 * Flush the bit buffer, keeping at most 7 bits in it. 10335 */ 10336 function bi_flush(s) { 10337 if (s.bi_valid === 16) { 10338 put_short(s, s.bi_buf); 10339 s.bi_buf = 0; 10340 s.bi_valid = 0; 10341 10342 } else if (s.bi_valid >= 8) { 10343 s.pending_buf[s.pending++] = s.bi_buf & 0xff; 10344 s.bi_buf >>= 8; 10345 s.bi_valid -= 8; 10346 } 10347 } 10348 10349 10350 /* =========================================================================== 10351 * Compute the optimal bit lengths for a tree and update the total bit length 10352 * for the current block. 10353 * IN assertion: the fields freq and dad are set, heap[heap_max] and 10354 * above are the tree nodes sorted by increasing frequency. 10355 * OUT assertions: the field len is set to the optimal bit length, the 10356 * array bl_count contains the frequencies for each bit length. 10357 * The length opt_len is updated; static_len is also updated if stree is 10358 * not null. 10359 */ 10360 function gen_bitlen(s, desc) 10361 // deflate_state *s; 10362 // tree_desc *desc; /* the tree descriptor */ 10363 { 10364 var tree = desc.dyn_tree; 10365 var max_code = desc.max_code; 10366 var stree = desc.stat_desc.static_tree; 10367 var has_stree = desc.stat_desc.has_stree; 10368 var extra = desc.stat_desc.extra_bits; 10369 var base = desc.stat_desc.extra_base; 10370 var max_length = desc.stat_desc.max_length; 10371 var h; /* heap index */ 10372 var n, m; /* iterate over the tree elements */ 10373 var bits; /* bit length */ 10374 var xbits; /* extra bits */ 10375 var f; /* frequency */ 10376 var overflow = 0; /* number of elements with bit length too large */ 10377 10378 for (bits = 0; bits <= MAX_BITS; bits++) { 10379 s.bl_count[bits] = 0; 10380 } 10381 10382 /* In a first pass, compute the optimal bit lengths (which may 10383 * overflow in the case of the bit length tree). 10384 */ 10385 tree[s.heap[s.heap_max] * 2 + 1]/*.Len*/ = 0; /* root of the heap */ 10386 10387 for (h = s.heap_max + 1; h < HEAP_SIZE; h++) { 10388 n = s.heap[h]; 10389 bits = tree[tree[n * 2 + 1]/*.Dad*/ * 2 + 1]/*.Len*/ + 1; 10390 if (bits > max_length) { 10391 bits = max_length; 10392 overflow++; 10393 } 10394 tree[n * 2 + 1]/*.Len*/ = bits; 10395 /* We overwrite tree[n].Dad which is no longer needed */ 10396 10397 if (n > max_code) { continue; } /* not a leaf node */ 10398 10399 s.bl_count[bits]++; 10400 xbits = 0; 10401 if (n >= base) { 10402 xbits = extra[n - base]; 10403 } 10404 f = tree[n * 2]/*.Freq*/; 10405 s.opt_len += f * (bits + xbits); 10406 if (has_stree) { 10407 s.static_len += f * (stree[n * 2 + 1]/*.Len*/ + xbits); 10408 } 10409 } 10410 if (overflow === 0) { return; } 10411 10412 // Trace((stderr,"\nbit length overflow\n")); 10413 /* This happens for example on obj2 and pic of the Calgary corpus */ 10414 10415 /* Find the first bit length which could increase: */ 10416 do { 10417 bits = max_length - 1; 10418 while (s.bl_count[bits] === 0) { bits--; } 10419 s.bl_count[bits]--; /* move one leaf down the tree */ 10420 s.bl_count[bits + 1] += 2; /* move one overflow item as its brother */ 10421 s.bl_count[max_length]--; 10422 /* The brother of the overflow item also moves one step up, 10423 * but this does not affect bl_count[max_length] 10424 */ 10425 overflow -= 2; 10426 } while (overflow > 0); 10427 10428 /* Now recompute all bit lengths, scanning in increasing frequency. 10429 * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all 10430 * lengths instead of fixing only the wrong ones. This idea is taken 10431 * from 'ar' written by Haruhiko Okumura.) 10432 */ 10433 for (bits = max_length; bits !== 0; bits--) { 10434 n = s.bl_count[bits]; 10435 while (n !== 0) { 10436 m = s.heap[--h]; 10437 if (m > max_code) { continue; } 10438 if (tree[m * 2 + 1]/*.Len*/ !== bits) { 10439 // Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); 10440 s.opt_len += (bits - tree[m * 2 + 1]/*.Len*/) * tree[m * 2]/*.Freq*/; 10441 tree[m * 2 + 1]/*.Len*/ = bits; 10442 } 10443 n--; 10444 } 10445 } 10446 } 10447 10448 10449 /* =========================================================================== 10450 * Generate the codes for a given tree and bit counts (which need not be 10451 * optimal). 10452 * IN assertion: the array bl_count contains the bit length statistics for 10453 * the given tree and the field len is set for all tree elements. 10454 * OUT assertion: the field code is set for all tree elements of non 10455 * zero code length. 10456 */ 10457 function gen_codes(tree, max_code, bl_count) 10458 // ct_data *tree; /* the tree to decorate */ 10459 // int max_code; /* largest code with non zero frequency */ 10460 // ushf *bl_count; /* number of codes at each bit length */ 10461 { 10462 var next_code = new Array(MAX_BITS + 1); /* next code value for each bit length */ 10463 var code = 0; /* running code value */ 10464 var bits; /* bit index */ 10465 var n; /* code index */ 10466 10467 /* The distribution counts are first used to generate the code values 10468 * without bit reversal. 10469 */ 10470 for (bits = 1; bits <= MAX_BITS; bits++) { 10471 next_code[bits] = code = (code + bl_count[bits - 1]) << 1; 10472 } 10473 /* Check that the bit counts in bl_count are consistent. The last code 10474 * must be all ones. 10475 */ 10476 //Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1, 10477 // "inconsistent bit counts"); 10478 //Tracev((stderr,"\ngen_codes: max_code %d ", max_code)); 10479 10480 for (n = 0; n <= max_code; n++) { 10481 var len = tree[n * 2 + 1]/*.Len*/; 10482 if (len === 0) { continue; } 10483 /* Now reverse the bits */ 10484 tree[n * 2]/*.Code*/ = bi_reverse(next_code[len]++, len); 10485 10486 //Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ", 10487 // n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1)); 10488 } 10489 } 10490 10491 10492 /* =========================================================================== 10493 * Initialize the various 'constant' tables. 10494 */ 10495 function tr_static_init() { 10496 var n; /* iterates over tree elements */ 10497 var bits; /* bit counter */ 10498 var length; /* length value */ 10499 var code; /* code value */ 10500 var dist; /* distance index */ 10501 var bl_count = new Array(MAX_BITS + 1); 10502 /* number of codes at each bit length for an optimal tree */ 10503 10504 // do check in _tr_init() 10505 //if (static_init_done) return; 10506 10507 /* For some embedded targets, global variables are not initialized: */ 10508 /*#ifdef NO_INIT_GLOBAL_POINTERS 10509 static_l_desc.static_tree = static_ltree; 10510 static_l_desc.extra_bits = extra_lbits; 10511 static_d_desc.static_tree = static_dtree; 10512 static_d_desc.extra_bits = extra_dbits; 10513 static_bl_desc.extra_bits = extra_blbits; 10514 #endif*/ 10515 10516 /* Initialize the mapping length (0..255) -> length code (0..28) */ 10517 length = 0; 10518 for (code = 0; code < LENGTH_CODES - 1; code++) { 10519 base_length[code] = length; 10520 for (n = 0; n < (1 << extra_lbits[code]); n++) { 10521 _length_code[length++] = code; 10522 } 10523 } 10524 //Assert (length == 256, "tr_static_init: length != 256"); 10525 /* Note that the length 255 (match length 258) can be represented 10526 * in two different ways: code 284 + 5 bits or code 285, so we 10527 * overwrite length_code[255] to use the best encoding: 10528 */ 10529 _length_code[length - 1] = code; 10530 10531 /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ 10532 dist = 0; 10533 for (code = 0; code < 16; code++) { 10534 base_dist[code] = dist; 10535 for (n = 0; n < (1 << extra_dbits[code]); n++) { 10536 _dist_code[dist++] = code; 10537 } 10538 } 10539 //Assert (dist == 256, "tr_static_init: dist != 256"); 10540 dist >>= 7; /* from now on, all distances are divided by 128 */ 10541 for (; code < D_CODES; code++) { 10542 base_dist[code] = dist << 7; 10543 for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) { 10544 _dist_code[256 + dist++] = code; 10545 } 10546 } 10547 //Assert (dist == 256, "tr_static_init: 256+dist != 512"); 10548 10549 /* Construct the codes of the static literal tree */ 10550 for (bits = 0; bits <= MAX_BITS; bits++) { 10551 bl_count[bits] = 0; 10552 } 10553 10554 n = 0; 10555 while (n <= 143) { 10556 static_ltree[n * 2 + 1]/*.Len*/ = 8; 10557 n++; 10558 bl_count[8]++; 10559 } 10560 while (n <= 255) { 10561 static_ltree[n * 2 + 1]/*.Len*/ = 9; 10562 n++; 10563 bl_count[9]++; 10564 } 10565 while (n <= 279) { 10566 static_ltree[n * 2 + 1]/*.Len*/ = 7; 10567 n++; 10568 bl_count[7]++; 10569 } 10570 while (n <= 287) { 10571 static_ltree[n * 2 + 1]/*.Len*/ = 8; 10572 n++; 10573 bl_count[8]++; 10574 } 10575 /* Codes 286 and 287 do not exist, but we must include them in the 10576 * tree construction to get a canonical Huffman tree (longest code 10577 * all ones) 10578 */ 10579 gen_codes(static_ltree, L_CODES + 1, bl_count); 10580 10581 /* The static distance tree is trivial: */ 10582 for (n = 0; n < D_CODES; n++) { 10583 static_dtree[n * 2 + 1]/*.Len*/ = 5; 10584 static_dtree[n * 2]/*.Code*/ = bi_reverse(n, 5); 10585 } 10586 10587 // Now data ready and we can init static trees 10588 static_l_desc = new StaticTreeDesc(static_ltree, extra_lbits, LITERALS + 1, L_CODES, MAX_BITS); 10589 static_d_desc = new StaticTreeDesc(static_dtree, extra_dbits, 0, D_CODES, MAX_BITS); 10590 static_bl_desc = new StaticTreeDesc(new Array(0), extra_blbits, 0, BL_CODES, MAX_BL_BITS); 10591 10592 //static_init_done = true; 10593 } 10594 10595 10596 /* =========================================================================== 10597 * Initialize a new block. 10598 */ 10599 function init_block(s) { 10600 var n; /* iterates over tree elements */ 10601 10602 /* Initialize the trees. */ 10603 for (n = 0; n < L_CODES; n++) { s.dyn_ltree[n * 2]/*.Freq*/ = 0; } 10604 for (n = 0; n < D_CODES; n++) { s.dyn_dtree[n * 2]/*.Freq*/ = 0; } 10605 for (n = 0; n < BL_CODES; n++) { s.bl_tree[n * 2]/*.Freq*/ = 0; } 10606 10607 s.dyn_ltree[END_BLOCK * 2]/*.Freq*/ = 1; 10608 s.opt_len = s.static_len = 0; 10609 s.last_lit = s.matches = 0; 10610 } 10611 10612 10613 /* =========================================================================== 10614 * Flush the bit buffer and align the output on a byte boundary 10615 */ 10616 function bi_windup(s) 10617 { 10618 if (s.bi_valid > 8) { 10619 put_short(s, s.bi_buf); 10620 } else if (s.bi_valid > 0) { 10621 //put_byte(s, (Byte)s->bi_buf); 10622 s.pending_buf[s.pending++] = s.bi_buf; 10623 } 10624 s.bi_buf = 0; 10625 s.bi_valid = 0; 10626 } 10627 10628 /* =========================================================================== 10629 * Copy a stored block, storing first the length and its 10630 * one's complement if requested. 10631 */ 10632 function copy_block(s, buf, len, header) 10633 //DeflateState *s; 10634 //charf *buf; /* the input data */ 10635 //unsigned len; /* its length */ 10636 //int header; /* true if block header must be written */ 10637 { 10638 bi_windup(s); /* align on byte boundary */ 10639 10640 if (header) { 10641 put_short(s, len); 10642 put_short(s, ~len); 10643 } 10644 // while (len--) { 10645 // put_byte(s, *buf++); 10646 // } 10647 utils.arraySet(s.pending_buf, s.window, buf, len, s.pending); 10648 s.pending += len; 10649 } 10650 10651 /* =========================================================================== 10652 * Compares to subtrees, using the tree depth as tie breaker when 10653 * the subtrees have equal frequency. This minimizes the worst case length. 10654 */ 10655 function smaller(tree, n, m, depth) { 10656 var _n2 = n * 2; 10657 var _m2 = m * 2; 10658 return (tree[_n2]/*.Freq*/ < tree[_m2]/*.Freq*/ || 10659 (tree[_n2]/*.Freq*/ === tree[_m2]/*.Freq*/ && depth[n] <= depth[m])); 10660 } 10661 10662 /* =========================================================================== 10663 * Restore the heap property by moving down the tree starting at node k, 10664 * exchanging a node with the smallest of its two sons if necessary, stopping 10665 * when the heap property is re-established (each father smaller than its 10666 * two sons). 10667 */ 10668 function pqdownheap(s, tree, k) 10669 // deflate_state *s; 10670 // ct_data *tree; /* the tree to restore */ 10671 // int k; /* node to move down */ 10672 { 10673 var v = s.heap[k]; 10674 var j = k << 1; /* left son of k */ 10675 while (j <= s.heap_len) { 10676 /* Set j to the smallest of the two sons: */ 10677 if (j < s.heap_len && 10678 smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) { 10679 j++; 10680 } 10681 /* Exit if v is smaller than both sons */ 10682 if (smaller(tree, v, s.heap[j], s.depth)) { break; } 10683 10684 /* Exchange v with the smallest son */ 10685 s.heap[k] = s.heap[j]; 10686 k = j; 10687 10688 /* And continue down the tree, setting j to the left son of k */ 10689 j <<= 1; 10690 } 10691 s.heap[k] = v; 10692 } 10693 10694 10695 // inlined manually 10696 // var SMALLEST = 1; 10697 10698 /* =========================================================================== 10699 * Send the block data compressed using the given Huffman trees 10700 */ 10701 function compress_block(s, ltree, dtree) 10702 // deflate_state *s; 10703 // const ct_data *ltree; /* literal tree */ 10704 // const ct_data *dtree; /* distance tree */ 10705 { 10706 var dist; /* distance of matched string */ 10707 var lc; /* match length or unmatched char (if dist == 0) */ 10708 var lx = 0; /* running index in l_buf */ 10709 var code; /* the code to send */ 10710 var extra; /* number of extra bits to send */ 10711 10712 if (s.last_lit !== 0) { 10713 do { 10714 dist = (s.pending_buf[s.d_buf + lx * 2] << 8) | (s.pending_buf[s.d_buf + lx * 2 + 1]); 10715 lc = s.pending_buf[s.l_buf + lx]; 10716 lx++; 10717 10718 if (dist === 0) { 10719 send_code(s, lc, ltree); /* send a literal byte */ 10720 //Tracecv(isgraph(lc), (stderr," '%c' ", lc)); 10721 } else { 10722 /* Here, lc is the match length - MIN_MATCH */ 10723 code = _length_code[lc]; 10724 send_code(s, code + LITERALS + 1, ltree); /* send the length code */ 10725 extra = extra_lbits[code]; 10726 if (extra !== 0) { 10727 lc -= base_length[code]; 10728 send_bits(s, lc, extra); /* send the extra length bits */ 10729 } 10730 dist--; /* dist is now the match distance - 1 */ 10731 code = d_code(dist); 10732 //Assert (code < D_CODES, "bad d_code"); 10733 10734 send_code(s, code, dtree); /* send the distance code */ 10735 extra = extra_dbits[code]; 10736 if (extra !== 0) { 10737 dist -= base_dist[code]; 10738 send_bits(s, dist, extra); /* send the extra distance bits */ 10739 } 10740 } /* literal or match pair ? */ 10741 10742 /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ 10743 //Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, 10744 // "pendingBuf overflow"); 10745 10746 } while (lx < s.last_lit); 10747 } 10748 10749 send_code(s, END_BLOCK, ltree); 10750 } 10751 10752 10753 /* =========================================================================== 10754 * Construct one Huffman tree and assigns the code bit strings and lengths. 10755 * Update the total bit length for the current block. 10756 * IN assertion: the field freq is set for all tree elements. 10757 * OUT assertions: the fields len and code are set to the optimal bit length 10758 * and corresponding code. The length opt_len is updated; static_len is 10759 * also updated if stree is not null. The field max_code is set. 10760 */ 10761 function build_tree(s, desc) 10762 // deflate_state *s; 10763 // tree_desc *desc; /* the tree descriptor */ 10764 { 10765 var tree = desc.dyn_tree; 10766 var stree = desc.stat_desc.static_tree; 10767 var has_stree = desc.stat_desc.has_stree; 10768 var elems = desc.stat_desc.elems; 10769 var n, m; /* iterate over heap elements */ 10770 var max_code = -1; /* largest code with non zero frequency */ 10771 var node; /* new node being created */ 10772 10773 /* Construct the initial heap, with least frequent element in 10774 * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. 10775 * heap[0] is not used. 10776 */ 10777 s.heap_len = 0; 10778 s.heap_max = HEAP_SIZE; 10779 10780 for (n = 0; n < elems; n++) { 10781 if (tree[n * 2]/*.Freq*/ !== 0) { 10782 s.heap[++s.heap_len] = max_code = n; 10783 s.depth[n] = 0; 10784 10785 } else { 10786 tree[n * 2 + 1]/*.Len*/ = 0; 10787 } 10788 } 10789 10790 /* The pkzip format requires that at least one distance code exists, 10791 * and that at least one bit should be sent even if there is only one 10792 * possible code. So to avoid special checks later on we force at least 10793 * two codes of non zero frequency. 10794 */ 10795 while (s.heap_len < 2) { 10796 node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0); 10797 tree[node * 2]/*.Freq*/ = 1; 10798 s.depth[node] = 0; 10799 s.opt_len--; 10800 10801 if (has_stree) { 10802 s.static_len -= stree[node * 2 + 1]/*.Len*/; 10803 } 10804 /* node is 0 or 1 so it does not have extra bits */ 10805 } 10806 desc.max_code = max_code; 10807 10808 /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, 10809 * establish sub-heaps of increasing lengths: 10810 */ 10811 for (n = (s.heap_len >> 1/*int /2*/); n >= 1; n--) { pqdownheap(s, tree, n); } 10812 10813 /* Construct the Huffman tree by repeatedly combining the least two 10814 * frequent nodes. 10815 */ 10816 node = elems; /* next internal node of the tree */ 10817 do { 10818 //pqremove(s, tree, n); /* n = node of least frequency */ 10819 /*** pqremove ***/ 10820 n = s.heap[1/*SMALLEST*/]; 10821 s.heap[1/*SMALLEST*/] = s.heap[s.heap_len--]; 10822 pqdownheap(s, tree, 1/*SMALLEST*/); 10823 /***/ 10824 10825 m = s.heap[1/*SMALLEST*/]; /* m = node of next least frequency */ 10826 10827 s.heap[--s.heap_max] = n; /* keep the nodes sorted by frequency */ 10828 s.heap[--s.heap_max] = m; 10829 10830 /* Create a new node father of n and m */ 10831 tree[node * 2]/*.Freq*/ = tree[n * 2]/*.Freq*/ + tree[m * 2]/*.Freq*/; 10832 s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1; 10833 tree[n * 2 + 1]/*.Dad*/ = tree[m * 2 + 1]/*.Dad*/ = node; 10834 10835 /* and insert the new node in the heap */ 10836 s.heap[1/*SMALLEST*/] = node++; 10837 pqdownheap(s, tree, 1/*SMALLEST*/); 10838 10839 } while (s.heap_len >= 2); 10840 10841 s.heap[--s.heap_max] = s.heap[1/*SMALLEST*/]; 10842 10843 /* At this point, the fields freq and dad are set. We can now 10844 * generate the bit lengths. 10845 */ 10846 gen_bitlen(s, desc); 10847 10848 /* The field len is now set, we can generate the bit codes */ 10849 gen_codes(tree, max_code, s.bl_count); 10850 } 10851 10852 10853 /* =========================================================================== 10854 * Scan a literal or distance tree to determine the frequencies of the codes 10855 * in the bit length tree. 10856 */ 10857 function scan_tree(s, tree, max_code) 10858 // deflate_state *s; 10859 // ct_data *tree; /* the tree to be scanned */ 10860 // int max_code; /* and its largest code of non zero frequency */ 10861 { 10862 var n; /* iterates over all tree elements */ 10863 var prevlen = -1; /* last emitted length */ 10864 var curlen; /* length of current code */ 10865 10866 var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */ 10867 10868 var count = 0; /* repeat count of the current code */ 10869 var max_count = 7; /* max repeat count */ 10870 var min_count = 4; /* min repeat count */ 10871 10872 if (nextlen === 0) { 10873 max_count = 138; 10874 min_count = 3; 10875 } 10876 tree[(max_code + 1) * 2 + 1]/*.Len*/ = 0xffff; /* guard */ 10877 10878 for (n = 0; n <= max_code; n++) { 10879 curlen = nextlen; 10880 nextlen = tree[(n + 1) * 2 + 1]/*.Len*/; 10881 10882 if (++count < max_count && curlen === nextlen) { 10883 continue; 10884 10885 } else if (count < min_count) { 10886 s.bl_tree[curlen * 2]/*.Freq*/ += count; 10887 10888 } else if (curlen !== 0) { 10889 10890 if (curlen !== prevlen) { s.bl_tree[curlen * 2]/*.Freq*/++; } 10891 s.bl_tree[REP_3_6 * 2]/*.Freq*/++; 10892 10893 } else if (count <= 10) { 10894 s.bl_tree[REPZ_3_10 * 2]/*.Freq*/++; 10895 10896 } else { 10897 s.bl_tree[REPZ_11_138 * 2]/*.Freq*/++; 10898 } 10899 10900 count = 0; 10901 prevlen = curlen; 10902 10903 if (nextlen === 0) { 10904 max_count = 138; 10905 min_count = 3; 10906 10907 } else if (curlen === nextlen) { 10908 max_count = 6; 10909 min_count = 3; 10910 10911 } else { 10912 max_count = 7; 10913 min_count = 4; 10914 } 10915 } 10916 } 10917 10918 10919 /* =========================================================================== 10920 * Send a literal or distance tree in compressed form, using the codes in 10921 * bl_tree. 10922 */ 10923 function send_tree(s, tree, max_code) 10924 // deflate_state *s; 10925 // ct_data *tree; /* the tree to be scanned */ 10926 // int max_code; /* and its largest code of non zero frequency */ 10927 { 10928 var n; /* iterates over all tree elements */ 10929 var prevlen = -1; /* last emitted length */ 10930 var curlen; /* length of current code */ 10931 10932 var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */ 10933 10934 var count = 0; /* repeat count of the current code */ 10935 var max_count = 7; /* max repeat count */ 10936 var min_count = 4; /* min repeat count */ 10937 10938 /* tree[max_code+1].Len = -1; */ /* guard already set */ 10939 if (nextlen === 0) { 10940 max_count = 138; 10941 min_count = 3; 10942 } 10943 10944 for (n = 0; n <= max_code; n++) { 10945 curlen = nextlen; 10946 nextlen = tree[(n + 1) * 2 + 1]/*.Len*/; 10947 10948 if (++count < max_count && curlen === nextlen) { 10949 continue; 10950 10951 } else if (count < min_count) { 10952 do { send_code(s, curlen, s.bl_tree); } while (--count !== 0); 10953 10954 } else if (curlen !== 0) { 10955 if (curlen !== prevlen) { 10956 send_code(s, curlen, s.bl_tree); 10957 count--; 10958 } 10959 //Assert(count >= 3 && count <= 6, " 3_6?"); 10960 send_code(s, REP_3_6, s.bl_tree); 10961 send_bits(s, count - 3, 2); 10962 10963 } else if (count <= 10) { 10964 send_code(s, REPZ_3_10, s.bl_tree); 10965 send_bits(s, count - 3, 3); 10966 10967 } else { 10968 send_code(s, REPZ_11_138, s.bl_tree); 10969 send_bits(s, count - 11, 7); 10970 } 10971 10972 count = 0; 10973 prevlen = curlen; 10974 if (nextlen === 0) { 10975 max_count = 138; 10976 min_count = 3; 10977 10978 } else if (curlen === nextlen) { 10979 max_count = 6; 10980 min_count = 3; 10981 10982 } else { 10983 max_count = 7; 10984 min_count = 4; 10985 } 10986 } 10987 } 10988 10989 10990 /* =========================================================================== 10991 * Construct the Huffman tree for the bit lengths and return the index in 10992 * bl_order of the last bit length code to send. 10993 */ 10994 function build_bl_tree(s) { 10995 var max_blindex; /* index of last bit length code of non zero freq */ 10996 10997 /* Determine the bit length frequencies for literal and distance trees */ 10998 scan_tree(s, s.dyn_ltree, s.l_desc.max_code); 10999 scan_tree(s, s.dyn_dtree, s.d_desc.max_code); 11000 11001 /* Build the bit length tree: */ 11002 build_tree(s, s.bl_desc); 11003 /* opt_len now includes the length of the tree representations, except 11004 * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. 11005 */ 11006 11007 /* Determine the number of bit length codes to send. The pkzip format 11008 * requires that at least 4 bit length codes be sent. (appnote.txt says 11009 * 3 but the actual value used is 4.) 11010 */ 11011 for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) { 11012 if (s.bl_tree[bl_order[max_blindex] * 2 + 1]/*.Len*/ !== 0) { 11013 break; 11014 } 11015 } 11016 /* Update opt_len to include the bit length tree and counts */ 11017 s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4; 11018 //Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", 11019 // s->opt_len, s->static_len)); 11020 11021 return max_blindex; 11022 } 11023 11024 11025 /* =========================================================================== 11026 * Send the header for a block using dynamic Huffman trees: the counts, the 11027 * lengths of the bit length codes, the literal tree and the distance tree. 11028 * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. 11029 */ 11030 function send_all_trees(s, lcodes, dcodes, blcodes) 11031 // deflate_state *s; 11032 // int lcodes, dcodes, blcodes; /* number of codes for each tree */ 11033 { 11034 var rank; /* index in bl_order */ 11035 11036 //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); 11037 //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, 11038 // "too many codes"); 11039 //Tracev((stderr, "\nbl counts: ")); 11040 send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */ 11041 send_bits(s, dcodes - 1, 5); 11042 send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */ 11043 for (rank = 0; rank < blcodes; rank++) { 11044 //Tracev((stderr, "\nbl code %2d ", bl_order[rank])); 11045 send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1]/*.Len*/, 3); 11046 } 11047 //Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); 11048 11049 send_tree(s, s.dyn_ltree, lcodes - 1); /* literal tree */ 11050 //Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); 11051 11052 send_tree(s, s.dyn_dtree, dcodes - 1); /* distance tree */ 11053 //Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); 11054 } 11055 11056 11057 /* =========================================================================== 11058 * Check if the data type is TEXT or BINARY, using the following algorithm: 11059 * - TEXT if the two conditions below are satisfied: 11060 * a) There are no non-portable control characters belonging to the 11061 * "black list" (0..6, 14..25, 28..31). 11062 * b) There is at least one printable character belonging to the 11063 * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). 11064 * - BINARY otherwise. 11065 * - The following partially-portable control characters form a 11066 * "gray list" that is ignored in this detection algorithm: 11067 * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). 11068 * IN assertion: the fields Freq of dyn_ltree are set. 11069 */ 11070 function detect_data_type(s) { 11071 /* black_mask is the bit mask of black-listed bytes 11072 * set bits 0..6, 14..25, and 28..31 11073 * 0xf3ffc07f = binary 11110011111111111100000001111111 11074 */ 11075 var black_mask = 0xf3ffc07f; 11076 var n; 11077 11078 /* Check for non-textual ("black-listed") bytes. */ 11079 for (n = 0; n <= 31; n++, black_mask >>>= 1) { 11080 if ((black_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) { 11081 return Z_BINARY; 11082 } 11083 } 11084 11085 /* Check for textual ("white-listed") bytes. */ 11086 if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 || 11087 s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) { 11088 return Z_TEXT; 11089 } 11090 for (n = 32; n < LITERALS; n++) { 11091 if (s.dyn_ltree[n * 2]/*.Freq*/ !== 0) { 11092 return Z_TEXT; 11093 } 11094 } 11095 11096 /* There are no "black-listed" or "white-listed" bytes: 11097 * this stream either is empty or has tolerated ("gray-listed") bytes only. 11098 */ 11099 return Z_BINARY; 11100 } 11101 11102 11103 var static_init_done = false; 11104 11105 /* =========================================================================== 11106 * Initialize the tree data structures for a new zlib stream. 11107 */ 11108 function _tr_init(s) 11109 { 11110 11111 if (!static_init_done) { 11112 tr_static_init(); 11113 static_init_done = true; 11114 } 11115 11116 s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc); 11117 s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc); 11118 s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc); 11119 11120 s.bi_buf = 0; 11121 s.bi_valid = 0; 11122 11123 /* Initialize the first block of the first file: */ 11124 init_block(s); 11125 } 11126 11127 11128 /* =========================================================================== 11129 * Send a stored block 11130 */ 11131 function _tr_stored_block(s, buf, stored_len, last) 11132 //DeflateState *s; 11133 //charf *buf; /* input block */ 11134 //ulg stored_len; /* length of input block */ 11135 //int last; /* one if this is the last block for a file */ 11136 { 11137 send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */ 11138 copy_block(s, buf, stored_len, true); /* with header */ 11139 } 11140 11141 11142 /* =========================================================================== 11143 * Send one empty static block to give enough lookahead for inflate. 11144 * This takes 10 bits, of which 7 may remain in the bit buffer. 11145 */ 11146 function _tr_align(s) { 11147 send_bits(s, STATIC_TREES << 1, 3); 11148 send_code(s, END_BLOCK, static_ltree); 11149 bi_flush(s); 11150 } 11151 11152 11153 /* =========================================================================== 11154 * Determine the best encoding for the current block: dynamic trees, static 11155 * trees or store, and output the encoded block to the zip file. 11156 */ 11157 function _tr_flush_block(s, buf, stored_len, last) 11158 //DeflateState *s; 11159 //charf *buf; /* input block, or NULL if too old */ 11160 //ulg stored_len; /* length of input block */ 11161 //int last; /* one if this is the last block for a file */ 11162 { 11163 var opt_lenb, static_lenb; /* opt_len and static_len in bytes */ 11164 var max_blindex = 0; /* index of last bit length code of non zero freq */ 11165 11166 /* Build the Huffman trees unless a stored block is forced */ 11167 if (s.level > 0) { 11168 11169 /* Check if the file is binary or text */ 11170 if (s.strm.data_type === Z_UNKNOWN) { 11171 s.strm.data_type = detect_data_type(s); 11172 } 11173 11174 /* Construct the literal and distance trees */ 11175 build_tree(s, s.l_desc); 11176 // Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, 11177 // s->static_len)); 11178 11179 build_tree(s, s.d_desc); 11180 // Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, 11181 // s->static_len)); 11182 /* At this point, opt_len and static_len are the total bit lengths of 11183 * the compressed block data, excluding the tree representations. 11184 */ 11185 11186 /* Build the bit length tree for the above two trees, and get the index 11187 * in bl_order of the last bit length code to send. 11188 */ 11189 max_blindex = build_bl_tree(s); 11190 11191 /* Determine the best encoding. Compute the block lengths in bytes. */ 11192 opt_lenb = (s.opt_len + 3 + 7) >>> 3; 11193 static_lenb = (s.static_len + 3 + 7) >>> 3; 11194 11195 // Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", 11196 // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, 11197 // s->last_lit)); 11198 11199 if (static_lenb <= opt_lenb) { opt_lenb = static_lenb; } 11200 11201 } else { 11202 // Assert(buf != (char*)0, "lost buf"); 11203 opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ 11204 } 11205 11206 if ((stored_len + 4 <= opt_lenb) && (buf !== -1)) { 11207 /* 4: two words for the lengths */ 11208 11209 /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. 11210 * Otherwise we can't have processed more than WSIZE input bytes since 11211 * the last block flush, because compression would have been 11212 * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to 11213 * transform a block into a stored block. 11214 */ 11215 _tr_stored_block(s, buf, stored_len, last); 11216 11217 } else if (s.strategy === Z_FIXED || static_lenb === opt_lenb) { 11218 11219 send_bits(s, (STATIC_TREES << 1) + (last ? 1 : 0), 3); 11220 compress_block(s, static_ltree, static_dtree); 11221 11222 } else { 11223 send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3); 11224 send_all_trees(s, s.l_desc.max_code + 1, s.d_desc.max_code + 1, max_blindex + 1); 11225 compress_block(s, s.dyn_ltree, s.dyn_dtree); 11226 } 11227 // Assert (s->compressed_len == s->bits_sent, "bad compressed size"); 11228 /* The above check is made mod 2^32, for files larger than 512 MB 11229 * and uLong implemented on 32 bits. 11230 */ 11231 init_block(s); 11232 11233 if (last) { 11234 bi_windup(s); 11235 } 11236 // Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, 11237 // s->compressed_len-7*last)); 11238 } 11239 11240 /* =========================================================================== 11241 * Save the match info and tally the frequency counts. Return true if 11242 * the current block must be flushed. 11243 */ 11244 function _tr_tally(s, dist, lc) 11245 // deflate_state *s; 11246 // unsigned dist; /* distance of matched string */ 11247 // unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ 11248 { 11249 //var out_length, in_length, dcode; 11250 11251 s.pending_buf[s.d_buf + s.last_lit * 2] = (dist >>> 8) & 0xff; 11252 s.pending_buf[s.d_buf + s.last_lit * 2 + 1] = dist & 0xff; 11253 11254 s.pending_buf[s.l_buf + s.last_lit] = lc & 0xff; 11255 s.last_lit++; 11256 11257 if (dist === 0) { 11258 /* lc is the unmatched char */ 11259 s.dyn_ltree[lc * 2]/*.Freq*/++; 11260 } else { 11261 s.matches++; 11262 /* Here, lc is the match length - MIN_MATCH */ 11263 dist--; /* dist = match distance - 1 */ 11264 //Assert((ush)dist < (ush)MAX_DIST(s) && 11265 // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && 11266 // (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); 11267 11268 s.dyn_ltree[(_length_code[lc] + LITERALS + 1) * 2]/*.Freq*/++; 11269 s.dyn_dtree[d_code(dist) * 2]/*.Freq*/++; 11270 } 11271 11272 // (!) This block is disabled in zlib defailts, 11273 // don't enable it for binary compatibility 11274 11275 //#ifdef TRUNCATE_BLOCK 11276 // /* Try to guess if it is profitable to stop the current block here */ 11277 // if ((s.last_lit & 0x1fff) === 0 && s.level > 2) { 11278 // /* Compute an upper bound for the compressed length */ 11279 // out_length = s.last_lit*8; 11280 // in_length = s.strstart - s.block_start; 11281 // 11282 // for (dcode = 0; dcode < D_CODES; dcode++) { 11283 // out_length += s.dyn_dtree[dcode*2]/*.Freq*/ * (5 + extra_dbits[dcode]); 11284 // } 11285 // out_length >>>= 3; 11286 // //Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", 11287 // // s->last_lit, in_length, out_length, 11288 // // 100L - out_length*100L/in_length)); 11289 // if (s.matches < (s.last_lit>>1)/*int /2*/ && out_length < (in_length>>1)/*int /2*/) { 11290 // return true; 11291 // } 11292 // } 11293 //#endif 11294 11295 return (s.last_lit === s.lit_bufsize - 1); 11296 /* We avoid equality with lit_bufsize because of wraparound at 64K 11297 * on 16 bit machines and because stored blocks are restricted to 11298 * 64K-1 bytes. 11299 */ 11300 } 11301 11302 exports._tr_init = _tr_init; 11303 exports._tr_stored_block = _tr_stored_block; 11304 exports._tr_flush_block = _tr_flush_block; 11305 exports._tr_tally = _tr_tally; 11306 exports._tr_align = _tr_align; 11307 11308 },{"../utils/common":41}],53:[function(require,module,exports){ 11309 'use strict'; 11310 11311 // (C) 1995-2013 Jean-loup Gailly and Mark Adler 11312 // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin 11313 // 11314 // This software is provided 'as-is', without any express or implied 11315 // warranty. In no event will the authors be held liable for any damages 11316 // arising from the use of this software. 11317 // 11318 // Permission is granted to anyone to use this software for any purpose, 11319 // including commercial applications, and to alter it and redistribute it 11320 // freely, subject to the following restrictions: 11321 // 11322 // 1. The origin of this software must not be misrepresented; you must not 11323 // claim that you wrote the original software. If you use this software 11324 // in a product, an acknowledgment in the product documentation would be 11325 // appreciated but is not required. 11326 // 2. Altered source versions must be plainly marked as such, and must not be 11327 // misrepresented as being the original software. 11328 // 3. This notice may not be removed or altered from any source distribution. 11329 11330 function ZStream() { 11331 /* next input byte */ 11332 this.input = null; // JS specific, because we have no pointers 11333 this.next_in = 0; 11334 /* number of bytes available at input */ 11335 this.avail_in = 0; 11336 /* total number of input bytes read so far */ 11337 this.total_in = 0; 11338 /* next output byte should be put there */ 11339 this.output = null; // JS specific, because we have no pointers 11340 this.next_out = 0; 11341 /* remaining free space at output */ 11342 this.avail_out = 0; 11343 /* total number of bytes output so far */ 11344 this.total_out = 0; 11345 /* last error message, NULL if no error */ 11346 this.msg = ''/*Z_NULL*/; 11347 /* not visible by applications */ 11348 this.state = null; 11349 /* best guess about the data type: binary or text */ 11350 this.data_type = 2/*Z_UNKNOWN*/; 11351 /* adler32 value of the uncompressed data */ 11352 this.adler = 0; 11353 } 11354 11355 module.exports = ZStream; 11356 11357 },{}],54:[function(require,module,exports){ 11358 'use strict'; 11359 module.exports = typeof setImmediate === 'function' ? setImmediate : 11360 function setImmediate() { 11361 var args = [].slice.apply(arguments); 11362 args.splice(1, 0, 0); 11363 setTimeout.apply(null, args); 11364 }; 11365 11366 },{}]},{},[10])(10) 11367 });