1 /* 2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. Oracle designates this 7 * particular file as subject to the "Classpath" exception as provided 8 * by Oracle in the LICENSE file that accompanied this code. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 */ 24 25 /* deflate.c -- compress data using the deflation algorithm 26 * Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler 27 * For conditions of distribution and use, see copyright notice in zlib.h 28 */ 29 30 /* 31 * ALGORITHM 32 * 33 * The "deflation" process depends on being able to identify portions 34 * of the input text which are identical to earlier input (within a 35 * sliding window trailing behind the input currently being processed). 36 * 37 * The most straightforward technique turns out to be the fastest for 38 * most input files: try all possible matches and select the longest. 39 * The key feature of this algorithm is that insertions into the string 40 * dictionary are very simple and thus fast, and deletions are avoided 41 * completely. Insertions are performed at each input character, whereas 42 * string matches are performed only when the previous match ends. So it 43 * is preferable to spend more time in matches to allow very fast string 44 * insertions and avoid deletions. The matching algorithm for small 45 * strings is inspired from that of Rabin & Karp. A brute force approach 46 * is used to find longer strings when a small match has been found. 47 * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze 48 * (by Leonid Broukhis). 49 * A previous version of this file used a more sophisticated algorithm 50 * (by Fiala and Greene) which is guaranteed to run in linear amortized 51 * time, but has a larger average cost, uses more memory and is patented. 52 * However the F&G algorithm may be faster for some highly redundant 53 * files if the parameter max_chain_length (described below) is too large. 54 * 55 * ACKNOWLEDGEMENTS 56 * 57 * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and 58 * I found it in 'freeze' written by Leonid Broukhis. 59 * Thanks to many people for bug reports and testing. 60 * 61 * REFERENCES 62 * 63 * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". 64 * Available in http://tools.ietf.org/html/rfc1951 65 * 66 * A description of the Rabin and Karp algorithm is given in the book 67 * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. 68 * 69 * Fiala,E.R., and Greene,D.H. 70 * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 71 * 72 */ 73 74 /* @(#) $Id$ */ 75 76 #include "deflate.h" 77 78 const char deflate_copyright[] = 79 " deflate 1.2.11 Copyright 1995-2017 Jean-loup Gailly and Mark Adler "; 80 /* 81 If you use the zlib library in a product, an acknowledgment is welcome 82 in the documentation of your product. If for some reason you cannot 83 include such an acknowledgment, I would appreciate that you keep this 84 copyright string in the executable of your product. 85 */ 86 87 /* =========================================================================== 88 * Function prototypes. 89 */ 90 typedef enum { 91 need_more, /* block not completed, need more input or more output */ 92 block_done, /* block flush performed */ 93 finish_started, /* finish started, need only more output at next deflate */ 94 finish_done /* finish done, accept no more input or output */ 95 } block_state; 96 97 typedef block_state (*compress_func) OF((deflate_state *s, int flush)); 98 /* Compression function. Returns the block state after the call. */ 99 100 local int deflateStateCheck OF((z_streamp strm)); 101 local void slide_hash OF((deflate_state *s)); 102 local void fill_window OF((deflate_state *s)); 103 local block_state deflate_stored OF((deflate_state *s, int flush)); 104 local block_state deflate_fast OF((deflate_state *s, int flush)); 105 #ifndef FASTEST 106 local block_state deflate_slow OF((deflate_state *s, int flush)); 107 #endif 108 local block_state deflate_rle OF((deflate_state *s, int flush)); 109 local block_state deflate_huff OF((deflate_state *s, int flush)); 110 local void lm_init OF((deflate_state *s)); 111 local void putShortMSB OF((deflate_state *s, uInt b)); 112 local void flush_pending OF((z_streamp strm)); 113 local unsigned read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); 114 #ifdef ASMV 115 # pragma message("Assembler code may have bugs -- use at your own risk") 116 void match_init OF((void)); /* asm code initialization */ 117 uInt longest_match OF((deflate_state *s, IPos cur_match)); 118 #else 119 local uInt longest_match OF((deflate_state *s, IPos cur_match)); 120 #endif 121 122 #ifdef ZLIB_DEBUG 123 local void check_match OF((deflate_state *s, IPos start, IPos match, 124 int length)); 125 #endif 126 127 /* =========================================================================== 128 * Local data 129 */ 130 131 #define NIL 0 132 /* Tail of hash chains */ 133 134 #ifndef TOO_FAR 135 # define TOO_FAR 4096 136 #endif 137 /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ 138 139 /* Values for max_lazy_match, good_match and max_chain_length, depending on 140 * the desired pack level (0..9). The values given below have been tuned to 141 * exclude worst case performance for pathological files. Better values may be 142 * found for specific files. 143 */ 144 typedef struct config_s { 145 ush good_length; /* reduce lazy search above this match length */ 146 ush max_lazy; /* do not perform lazy search above this match length */ 147 ush nice_length; /* quit search above this match length */ 148 ush max_chain; 149 compress_func func; 150 } config; 151 152 #ifdef FASTEST 153 local const config configuration_table[2] = { 154 /* good lazy nice chain */ 155 /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ 156 /* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */ 157 #else 158 local const config configuration_table[10] = { 159 /* good lazy nice chain */ 160 /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ 161 /* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */ 162 /* 2 */ {4, 5, 16, 8, deflate_fast}, 163 /* 3 */ {4, 6, 32, 32, deflate_fast}, 164 165 /* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ 166 /* 5 */ {8, 16, 32, 32, deflate_slow}, 167 /* 6 */ {8, 16, 128, 128, deflate_slow}, 168 /* 7 */ {8, 32, 128, 256, deflate_slow}, 169 /* 8 */ {32, 128, 258, 1024, deflate_slow}, 170 /* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */ 171 #endif 172 173 /* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 174 * For deflate_fast() (levels <= 3) good is ignored and lazy has a different 175 * meaning. 176 */ 177 178 /* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */ 179 #define RANK(f) (((f) * 2) - ((f) > 4 ? 9 : 0)) 180 181 /* =========================================================================== 182 * Update a hash value with the given input byte 183 * IN assertion: all calls to UPDATE_HASH are made with consecutive input 184 * characters, so that a running hash key can be computed from the previous 185 * key instead of complete recalculation each time. 186 */ 187 #define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask) 188 189 190 /* =========================================================================== 191 * Insert string str in the dictionary and set match_head to the previous head 192 * of the hash chain (the most recent string with same hash key). Return 193 * the previous length of the hash chain. 194 * If this file is compiled with -DFASTEST, the compression level is forced 195 * to 1, and no hash chains are maintained. 196 * IN assertion: all calls to INSERT_STRING are made with consecutive input 197 * characters and the first MIN_MATCH bytes of str are valid (except for 198 * the last MIN_MATCH-1 bytes of the input file). 199 */ 200 #ifdef FASTEST 201 #define INSERT_STRING(s, str, match_head) \ 202 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ 203 match_head = s->head[s->ins_h], \ 204 s->head[s->ins_h] = (Pos)(str)) 205 #else 206 #define INSERT_STRING(s, str, match_head) \ 207 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ 208 match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \ 209 s->head[s->ins_h] = (Pos)(str)) 210 #endif 211 212 /* =========================================================================== 213 * Initialize the hash table (avoiding 64K overflow for 16 bit systems). 214 * prev[] will be initialized on the fly. 215 */ 216 #define CLEAR_HASH(s) \ 217 s->head[s->hash_size-1] = NIL; \ 218 zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head)); 219 220 /* =========================================================================== 221 * Slide the hash table when sliding the window down (could be avoided with 32 222 * bit values at the expense of memory usage). We slide even when level == 0 to 223 * keep the hash table consistent if we switch back to level > 0 later. 224 */ 225 local void slide_hash(s) 226 deflate_state *s; 227 { 228 unsigned n, m; 229 Posf *p; 230 uInt wsize = s->w_size; 231 232 n = s->hash_size; 233 p = &s->head[n]; 234 do { 235 m = *--p; 236 *p = (Pos)(m >= wsize ? m - wsize : NIL); 237 } while (--n); 238 n = wsize; 239 #ifndef FASTEST 240 p = &s->prev[n]; 241 do { 242 m = *--p; 243 *p = (Pos)(m >= wsize ? m - wsize : NIL); 244 /* If n is not on any hash chain, prev[n] is garbage but 245 * its value will never be used. 246 */ 247 } while (--n); 248 #endif 249 } 250 251 /* ========================================================================= */ 252 int ZEXPORT deflateInit_(strm, level, version, stream_size) 253 z_streamp strm; 254 int level; 255 const char *version; 256 int stream_size; 257 { 258 return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, 259 Z_DEFAULT_STRATEGY, version, stream_size); 260 /* To do: ignore strm->next_in if we use it as window */ 261 } 262 263 /* ========================================================================= */ 264 int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, 265 version, stream_size) 266 z_streamp strm; 267 int level; 268 int method; 269 int windowBits; 270 int memLevel; 271 int strategy; 272 const char *version; 273 int stream_size; 274 { 275 deflate_state *s; 276 int wrap = 1; 277 static const char my_version[] = ZLIB_VERSION; 278 279 ushf *overlay; 280 /* We overlay pending_buf and d_buf+l_buf. This works since the average 281 * output size for (length,distance) codes is <= 24 bits. 282 */ 283 284 if (version == Z_NULL || version[0] != my_version[0] || 285 stream_size != sizeof(z_stream)) { 286 return Z_VERSION_ERROR; 287 } 288 if (strm == Z_NULL) return Z_STREAM_ERROR; 289 290 strm->msg = Z_NULL; 291 if (strm->zalloc == (alloc_func)0) { 292 #ifdef Z_SOLO 293 return Z_STREAM_ERROR; 294 #else 295 strm->zalloc = zcalloc; 296 strm->opaque = (voidpf)0; 297 #endif 298 } 299 if (strm->zfree == (free_func)0) 300 #ifdef Z_SOLO 301 return Z_STREAM_ERROR; 302 #else 303 strm->zfree = zcfree; 304 #endif 305 306 #ifdef FASTEST 307 if (level != 0) level = 1; 308 #else 309 if (level == Z_DEFAULT_COMPRESSION) level = 6; 310 #endif 311 312 if (windowBits < 0) { /* suppress zlib wrapper */ 313 wrap = 0; 314 windowBits = -windowBits; 315 } 316 #ifdef GZIP 317 else if (windowBits > 15) { 318 wrap = 2; /* write gzip wrapper instead */ 319 windowBits -= 16; 320 } 321 #endif 322 if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || 323 windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || 324 strategy < 0 || strategy > Z_FIXED || (windowBits == 8 && wrap != 1)) { 325 return Z_STREAM_ERROR; 326 } 327 if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */ 328 s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); 329 if (s == Z_NULL) return Z_MEM_ERROR; 330 strm->state = (struct internal_state FAR *)s; 331 s->strm = strm; 332 s->status = INIT_STATE; /* to pass state test in deflateReset() */ 333 334 s->wrap = wrap; 335 s->gzhead = Z_NULL; 336 s->w_bits = (uInt)windowBits; 337 s->w_size = 1 << s->w_bits; 338 s->w_mask = s->w_size - 1; 339 340 s->hash_bits = (uInt)memLevel + 7; 341 s->hash_size = 1 << s->hash_bits; 342 s->hash_mask = s->hash_size - 1; 343 s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); 344 345 s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); 346 s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); 347 s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); 348 349 s->high_water = 0; /* nothing written to s->window yet */ 350 351 s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ 352 353 overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); 354 s->pending_buf = (uchf *) overlay; 355 s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); 356 357 if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || 358 s->pending_buf == Z_NULL) { 359 s->status = FINISH_STATE; 360 strm->msg = ERR_MSG(Z_MEM_ERROR); 361 deflateEnd (strm); 362 return Z_MEM_ERROR; 363 } 364 s->d_buf = overlay + s->lit_bufsize/sizeof(ush); 365 s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; 366 367 s->level = level; 368 s->strategy = strategy; 369 s->method = (Byte)method; 370 371 return deflateReset(strm); 372 } 373 374 /* ========================================================================= 375 * Check for a valid deflate stream state. Return 0 if ok, 1 if not. 376 */ 377 local int deflateStateCheck (strm) 378 z_streamp strm; 379 { 380 deflate_state *s; 381 if (strm == Z_NULL || 382 strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) 383 return 1; 384 s = strm->state; 385 if (s == Z_NULL || s->strm != strm || (s->status != INIT_STATE && 386 #ifdef GZIP 387 s->status != GZIP_STATE && 388 #endif 389 s->status != EXTRA_STATE && 390 s->status != NAME_STATE && 391 s->status != COMMENT_STATE && 392 s->status != HCRC_STATE && 393 s->status != BUSY_STATE && 394 s->status != FINISH_STATE)) 395 return 1; 396 return 0; 397 } 398 399 /* ========================================================================= */ 400 int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength) 401 z_streamp strm; 402 const Bytef *dictionary; 403 uInt dictLength; 404 { 405 deflate_state *s; 406 uInt str, n; 407 int wrap; 408 unsigned avail; 409 z_const unsigned char *next; 410 411 if (deflateStateCheck(strm) || dictionary == Z_NULL) 412 return Z_STREAM_ERROR; 413 s = strm->state; 414 wrap = s->wrap; 415 if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead) 416 return Z_STREAM_ERROR; 417 418 /* when using zlib wrappers, compute Adler-32 for provided dictionary */ 419 if (wrap == 1) 420 strm->adler = adler32(strm->adler, dictionary, dictLength); 421 s->wrap = 0; /* avoid computing Adler-32 in read_buf */ 422 423 /* if dictionary would fill window, just replace the history */ 424 if (dictLength >= s->w_size) { 425 if (wrap == 0) { /* already empty otherwise */ 426 CLEAR_HASH(s); 427 s->strstart = 0; 428 s->block_start = 0L; 429 s->insert = 0; 430 } 431 dictionary += dictLength - s->w_size; /* use the tail */ 432 dictLength = s->w_size; 433 } 434 435 /* insert dictionary into window and hash */ 436 avail = strm->avail_in; 437 next = strm->next_in; 438 strm->avail_in = dictLength; 439 strm->next_in = (z_const Bytef *)dictionary; 440 fill_window(s); 441 while (s->lookahead >= MIN_MATCH) { 442 str = s->strstart; 443 n = s->lookahead - (MIN_MATCH-1); 444 do { 445 UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); 446 #ifndef FASTEST 447 s->prev[str & s->w_mask] = s->head[s->ins_h]; 448 #endif 449 s->head[s->ins_h] = (Pos)str; 450 str++; 451 } while (--n); 452 s->strstart = str; 453 s->lookahead = MIN_MATCH-1; 454 fill_window(s); 455 } 456 s->strstart += s->lookahead; 457 s->block_start = (long)s->strstart; 458 s->insert = s->lookahead; 459 s->lookahead = 0; 460 s->match_length = s->prev_length = MIN_MATCH-1; 461 s->match_available = 0; 462 strm->next_in = next; 463 strm->avail_in = avail; 464 s->wrap = wrap; 465 return Z_OK; 466 } 467 468 /* ========================================================================= */ 469 int ZEXPORT deflateGetDictionary (strm, dictionary, dictLength) 470 z_streamp strm; 471 Bytef *dictionary; 472 uInt *dictLength; 473 { 474 deflate_state *s; 475 uInt len; 476 477 if (deflateStateCheck(strm)) 478 return Z_STREAM_ERROR; 479 s = strm->state; 480 len = s->strstart + s->lookahead; 481 if (len > s->w_size) 482 len = s->w_size; 483 if (dictionary != Z_NULL && len) 484 zmemcpy(dictionary, s->window + s->strstart + s->lookahead - len, len); 485 if (dictLength != Z_NULL) 486 *dictLength = len; 487 return Z_OK; 488 } 489 490 /* ========================================================================= */ 491 int ZEXPORT deflateResetKeep (strm) 492 z_streamp strm; 493 { 494 deflate_state *s; 495 496 if (deflateStateCheck(strm)) { 497 return Z_STREAM_ERROR; 498 } 499 500 strm->total_in = strm->total_out = 0; 501 strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ 502 strm->data_type = Z_UNKNOWN; 503 504 s = (deflate_state *)strm->state; 505 s->pending = 0; 506 s->pending_out = s->pending_buf; 507 508 s->high_water = 0; /* reset to its inital value 0 */ 509 510 if (s->wrap < 0) { 511 s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */ 512 } 513 s->status = 514 #ifdef GZIP 515 s->wrap == 2 ? GZIP_STATE : 516 #endif 517 s->wrap ? INIT_STATE : BUSY_STATE; 518 strm->adler = 519 #ifdef GZIP 520 s->wrap == 2 ? crc32(0L, Z_NULL, 0) : 521 #endif 522 adler32(0L, Z_NULL, 0); 523 s->last_flush = Z_NO_FLUSH; 524 525 _tr_init(s); 526 527 return Z_OK; 528 } 529 530 /* ========================================================================= */ 531 int ZEXPORT deflateReset (strm) 532 z_streamp strm; 533 { 534 int ret; 535 536 ret = deflateResetKeep(strm); 537 if (ret == Z_OK) 538 lm_init(strm->state); 539 return ret; 540 } 541 542 /* ========================================================================= */ 543 int ZEXPORT deflateSetHeader (strm, head) 544 z_streamp strm; 545 gz_headerp head; 546 { 547 if (deflateStateCheck(strm) || strm->state->wrap != 2) 548 return Z_STREAM_ERROR; 549 strm->state->gzhead = head; 550 return Z_OK; 551 } 552 553 /* ========================================================================= */ 554 int ZEXPORT deflatePending (strm, pending, bits) 555 unsigned *pending; 556 int *bits; 557 z_streamp strm; 558 { 559 if (deflateStateCheck(strm)) return Z_STREAM_ERROR; 560 if (pending != Z_NULL) 561 *pending = strm->state->pending; 562 if (bits != Z_NULL) 563 *bits = strm->state->bi_valid; 564 return Z_OK; 565 } 566 567 /* ========================================================================= */ 568 int ZEXPORT deflatePrime (strm, bits, value) 569 z_streamp strm; 570 int bits; 571 int value; 572 { 573 deflate_state *s; 574 int put; 575 576 if (deflateStateCheck(strm)) return Z_STREAM_ERROR; 577 s = strm->state; 578 if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3)) 579 return Z_BUF_ERROR; 580 do { 581 put = Buf_size - s->bi_valid; 582 if (put > bits) 583 put = bits; 584 s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid); 585 s->bi_valid += put; 586 _tr_flush_bits(s); 587 value >>= put; 588 bits -= put; 589 } while (bits); 590 return Z_OK; 591 } 592 593 /* ========================================================================= */ 594 int ZEXPORT deflateParams(strm, level, strategy) 595 z_streamp strm; 596 int level; 597 int strategy; 598 { 599 deflate_state *s; 600 compress_func func; 601 602 if (deflateStateCheck(strm)) return Z_STREAM_ERROR; 603 s = strm->state; 604 605 #ifdef FASTEST 606 if (level != 0) level = 1; 607 #else 608 if (level == Z_DEFAULT_COMPRESSION) level = 6; 609 #endif 610 if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) { 611 return Z_STREAM_ERROR; 612 } 613 func = configuration_table[s->level].func; 614 615 if ((strategy != s->strategy || func != configuration_table[level].func) && 616 s->high_water) { 617 /* Flush the last buffer: */ 618 int err = deflate(strm, Z_BLOCK); 619 if (err == Z_STREAM_ERROR) 620 return err; 621 if (strm->avail_out == 0) 622 return Z_BUF_ERROR; 623 } 624 if (s->level != level) { 625 if (s->level == 0 && s->matches != 0) { 626 if (s->matches == 1) 627 slide_hash(s); 628 else 629 CLEAR_HASH(s); 630 s->matches = 0; 631 } 632 s->level = level; 633 s->max_lazy_match = configuration_table[level].max_lazy; 634 s->good_match = configuration_table[level].good_length; 635 s->nice_match = configuration_table[level].nice_length; 636 s->max_chain_length = configuration_table[level].max_chain; 637 } 638 s->strategy = strategy; 639 return Z_OK; 640 } 641 642 /* ========================================================================= */ 643 int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain) 644 z_streamp strm; 645 int good_length; 646 int max_lazy; 647 int nice_length; 648 int max_chain; 649 { 650 deflate_state *s; 651 652 if (deflateStateCheck(strm)) return Z_STREAM_ERROR; 653 s = strm->state; 654 s->good_match = (uInt)good_length; 655 s->max_lazy_match = (uInt)max_lazy; 656 s->nice_match = nice_length; 657 s->max_chain_length = (uInt)max_chain; 658 return Z_OK; 659 } 660 661 /* ========================================================================= 662 * For the default windowBits of 15 and memLevel of 8, this function returns 663 * a close to exact, as well as small, upper bound on the compressed size. 664 * They are coded as constants here for a reason--if the #define's are 665 * changed, then this function needs to be changed as well. The return 666 * value for 15 and 8 only works for those exact settings. 667 * 668 * For any setting other than those defaults for windowBits and memLevel, 669 * the value returned is a conservative worst case for the maximum expansion 670 * resulting from using fixed blocks instead of stored blocks, which deflate 671 * can emit on compressed data for some combinations of the parameters. 672 * 673 * This function could be more sophisticated to provide closer upper bounds for 674 * every combination of windowBits and memLevel. But even the conservative 675 * upper bound of about 14% expansion does not seem onerous for output buffer 676 * allocation. 677 */ 678 uLong ZEXPORT deflateBound(strm, sourceLen) 679 z_streamp strm; 680 uLong sourceLen; 681 { 682 deflate_state *s; 683 uLong complen, wraplen; 684 685 /* conservative upper bound for compressed data */ 686 complen = sourceLen + 687 ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5; 688 689 /* if can't get parameters, return conservative bound plus zlib wrapper */ 690 if (deflateStateCheck(strm)) 691 return complen + 6; 692 693 /* compute wrapper length */ 694 s = strm->state; 695 switch (s->wrap) { 696 case 0: /* raw deflate */ 697 wraplen = 0; 698 break; 699 case 1: /* zlib wrapper */ 700 wraplen = 6 + (s->strstart ? 4 : 0); 701 break; 702 #ifdef GZIP 703 case 2: /* gzip wrapper */ 704 wraplen = 18; 705 if (s->gzhead != Z_NULL) { /* user-supplied gzip header */ 706 Bytef *str; 707 if (s->gzhead->extra != Z_NULL) 708 wraplen += 2 + s->gzhead->extra_len; 709 str = s->gzhead->name; 710 if (str != Z_NULL) 711 do { 712 wraplen++; 713 } while (*str++); 714 str = s->gzhead->comment; 715 if (str != Z_NULL) 716 do { 717 wraplen++; 718 } while (*str++); 719 if (s->gzhead->hcrc) 720 wraplen += 2; 721 } 722 break; 723 #endif 724 default: /* for compiler happiness */ 725 wraplen = 6; 726 } 727 728 /* if not default parameters, return conservative bound */ 729 if (s->w_bits != 15 || s->hash_bits != 8 + 7) 730 return complen + wraplen; 731 732 /* default settings: return tight bound for that case */ 733 return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + 734 (sourceLen >> 25) + 13 - 6 + wraplen; 735 } 736 737 /* ========================================================================= 738 * Put a short in the pending buffer. The 16-bit value is put in MSB order. 739 * IN assertion: the stream state is correct and there is enough room in 740 * pending_buf. 741 */ 742 local void putShortMSB (s, b) 743 deflate_state *s; 744 uInt b; 745 { 746 put_byte(s, (Byte)(b >> 8)); 747 put_byte(s, (Byte)(b & 0xff)); 748 } 749 750 /* ========================================================================= 751 * Flush as much pending output as possible. All deflate() output, except for 752 * some deflate_stored() output, goes through this function so some 753 * applications may wish to modify it to avoid allocating a large 754 * strm->next_out buffer and copying into it. (See also read_buf()). 755 */ 756 local void flush_pending(strm) 757 z_streamp strm; 758 { 759 unsigned len; 760 deflate_state *s = strm->state; 761 762 _tr_flush_bits(s); 763 len = s->pending; 764 if (len > strm->avail_out) len = strm->avail_out; 765 if (len == 0) return; 766 767 zmemcpy(strm->next_out, s->pending_out, len); 768 strm->next_out += len; 769 s->pending_out += len; 770 strm->total_out += len; 771 strm->avail_out -= len; 772 s->pending -= len; 773 if (s->pending == 0) { 774 s->pending_out = s->pending_buf; 775 } 776 } 777 778 /* =========================================================================== 779 * Update the header CRC with the bytes s->pending_buf[beg..s->pending - 1]. 780 */ 781 #define HCRC_UPDATE(beg) \ 782 do { \ 783 if (s->gzhead->hcrc && s->pending > (beg)) \ 784 strm->adler = crc32(strm->adler, s->pending_buf + (beg), \ 785 s->pending - (beg)); \ 786 } while (0) 787 788 /* ========================================================================= */ 789 int ZEXPORT deflate (strm, flush) 790 z_streamp strm; 791 int flush; 792 { 793 int old_flush; /* value of flush param for previous deflate call */ 794 deflate_state *s; 795 796 if (deflateStateCheck(strm) || flush > Z_BLOCK || flush < 0) { 797 return Z_STREAM_ERROR; 798 } 799 s = strm->state; 800 801 if (strm->next_out == Z_NULL || 802 (strm->avail_in != 0 && strm->next_in == Z_NULL) || 803 (s->status == FINISH_STATE && flush != Z_FINISH)) { 804 ERR_RETURN(strm, Z_STREAM_ERROR); 805 } 806 if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); 807 808 old_flush = s->last_flush; 809 s->last_flush = flush; 810 811 /* Flush as much pending output as possible */ 812 if (s->pending != 0) { 813 flush_pending(strm); 814 if (strm->avail_out == 0) { 815 /* Since avail_out is 0, deflate will be called again with 816 * more output space, but possibly with both pending and 817 * avail_in equal to zero. There won't be anything to do, 818 * but this is not an error situation so make sure we 819 * return OK instead of BUF_ERROR at next call of deflate: 820 */ 821 s->last_flush = -1; 822 return Z_OK; 823 } 824 825 /* Make sure there is something to do and avoid duplicate consecutive 826 * flushes. For repeated and useless calls with Z_FINISH, we keep 827 * returning Z_STREAM_END instead of Z_BUF_ERROR. 828 */ 829 } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) && 830 flush != Z_FINISH) { 831 ERR_RETURN(strm, Z_BUF_ERROR); 832 } 833 834 /* User must not provide more input after the first FINISH: */ 835 if (s->status == FINISH_STATE && strm->avail_in != 0) { 836 ERR_RETURN(strm, Z_BUF_ERROR); 837 } 838 839 /* Write the header */ 840 if (s->status == INIT_STATE) { 841 /* zlib header */ 842 uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; 843 uInt level_flags; 844 845 if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2) 846 level_flags = 0; 847 else if (s->level < 6) 848 level_flags = 1; 849 else if (s->level == 6) 850 level_flags = 2; 851 else 852 level_flags = 3; 853 header |= (level_flags << 6); 854 if (s->strstart != 0) header |= PRESET_DICT; 855 header += 31 - (header % 31); 856 857 putShortMSB(s, header); 858 859 /* Save the adler32 of the preset dictionary: */ 860 if (s->strstart != 0) { 861 putShortMSB(s, (uInt)(strm->adler >> 16)); 862 putShortMSB(s, (uInt)(strm->adler & 0xffff)); 863 } 864 strm->adler = adler32(0L, Z_NULL, 0); 865 s->status = BUSY_STATE; 866 867 /* Compression must start with an empty pending buffer */ 868 flush_pending(strm); 869 if (s->pending != 0) { 870 s->last_flush = -1; 871 return Z_OK; 872 } 873 } 874 #ifdef GZIP 875 if (s->status == GZIP_STATE) { 876 /* gzip header */ 877 strm->adler = crc32(0L, Z_NULL, 0); 878 put_byte(s, 31); 879 put_byte(s, 139); 880 put_byte(s, 8); 881 if (s->gzhead == Z_NULL) { 882 put_byte(s, 0); 883 put_byte(s, 0); 884 put_byte(s, 0); 885 put_byte(s, 0); 886 put_byte(s, 0); 887 put_byte(s, s->level == 9 ? 2 : 888 (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 889 4 : 0)); 890 put_byte(s, OS_CODE); 891 s->status = BUSY_STATE; 892 893 /* Compression must start with an empty pending buffer */ 894 flush_pending(strm); 895 if (s->pending != 0) { 896 s->last_flush = -1; 897 return Z_OK; 898 } 899 } 900 else { 901 put_byte(s, (s->gzhead->text ? 1 : 0) + 902 (s->gzhead->hcrc ? 2 : 0) + 903 (s->gzhead->extra == Z_NULL ? 0 : 4) + 904 (s->gzhead->name == Z_NULL ? 0 : 8) + 905 (s->gzhead->comment == Z_NULL ? 0 : 16) 906 ); 907 put_byte(s, (Byte)(s->gzhead->time & 0xff)); 908 put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff)); 909 put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff)); 910 put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff)); 911 put_byte(s, s->level == 9 ? 2 : 912 (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 913 4 : 0)); 914 put_byte(s, s->gzhead->os & 0xff); 915 if (s->gzhead->extra != Z_NULL) { 916 put_byte(s, s->gzhead->extra_len & 0xff); 917 put_byte(s, (s->gzhead->extra_len >> 8) & 0xff); 918 } 919 if (s->gzhead->hcrc) 920 strm->adler = crc32(strm->adler, s->pending_buf, 921 s->pending); 922 s->gzindex = 0; 923 s->status = EXTRA_STATE; 924 } 925 } 926 if (s->status == EXTRA_STATE) { 927 if (s->gzhead->extra != Z_NULL) { 928 ulg beg = s->pending; /* start of bytes to update crc */ 929 uInt left = (s->gzhead->extra_len & 0xffff) - s->gzindex; 930 while (s->pending + left > s->pending_buf_size) { 931 uInt copy = s->pending_buf_size - s->pending; 932 zmemcpy(s->pending_buf + s->pending, 933 s->gzhead->extra + s->gzindex, copy); 934 s->pending = s->pending_buf_size; 935 HCRC_UPDATE(beg); 936 s->gzindex += copy; 937 flush_pending(strm); 938 if (s->pending != 0) { 939 s->last_flush = -1; 940 return Z_OK; 941 } 942 beg = 0; 943 left -= copy; 944 } 945 zmemcpy(s->pending_buf + s->pending, 946 s->gzhead->extra + s->gzindex, left); 947 s->pending += left; 948 HCRC_UPDATE(beg); 949 s->gzindex = 0; 950 } 951 s->status = NAME_STATE; 952 } 953 if (s->status == NAME_STATE) { 954 if (s->gzhead->name != Z_NULL) { 955 ulg beg = s->pending; /* start of bytes to update crc */ 956 int val; 957 do { 958 if (s->pending == s->pending_buf_size) { 959 HCRC_UPDATE(beg); 960 flush_pending(strm); 961 if (s->pending != 0) { 962 s->last_flush = -1; 963 return Z_OK; 964 } 965 beg = 0; 966 } 967 val = s->gzhead->name[s->gzindex++]; 968 put_byte(s, val); 969 } while (val != 0); 970 HCRC_UPDATE(beg); 971 s->gzindex = 0; 972 } 973 s->status = COMMENT_STATE; 974 } 975 if (s->status == COMMENT_STATE) { 976 if (s->gzhead->comment != Z_NULL) { 977 ulg beg = s->pending; /* start of bytes to update crc */ 978 int val; 979 do { 980 if (s->pending == s->pending_buf_size) { 981 HCRC_UPDATE(beg); 982 flush_pending(strm); 983 if (s->pending != 0) { 984 s->last_flush = -1; 985 return Z_OK; 986 } 987 beg = 0; 988 } 989 val = s->gzhead->comment[s->gzindex++]; 990 put_byte(s, val); 991 } while (val != 0); 992 HCRC_UPDATE(beg); 993 } 994 s->status = HCRC_STATE; 995 } 996 if (s->status == HCRC_STATE) { 997 if (s->gzhead->hcrc) { 998 if (s->pending + 2 > s->pending_buf_size) { 999 flush_pending(strm); 1000 if (s->pending != 0) { 1001 s->last_flush = -1; 1002 return Z_OK; 1003 } 1004 } 1005 put_byte(s, (Byte)(strm->adler & 0xff)); 1006 put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); 1007 strm->adler = crc32(0L, Z_NULL, 0); 1008 } 1009 s->status = BUSY_STATE; 1010 1011 /* Compression must start with an empty pending buffer */ 1012 flush_pending(strm); 1013 if (s->pending != 0) { 1014 s->last_flush = -1; 1015 return Z_OK; 1016 } 1017 } 1018 #endif 1019 1020 /* Start a new block or continue the current one. 1021 */ 1022 if (strm->avail_in != 0 || s->lookahead != 0 || 1023 (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { 1024 block_state bstate; 1025 1026 bstate = s->level == 0 ? deflate_stored(s, flush) : 1027 s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) : 1028 s->strategy == Z_RLE ? deflate_rle(s, flush) : 1029 (*(configuration_table[s->level].func))(s, flush); 1030 1031 if (bstate == finish_started || bstate == finish_done) { 1032 s->status = FINISH_STATE; 1033 } 1034 if (bstate == need_more || bstate == finish_started) { 1035 if (strm->avail_out == 0) { 1036 s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ 1037 } 1038 return Z_OK; 1039 /* If flush != Z_NO_FLUSH && avail_out == 0, the next call 1040 * of deflate should use the same flush parameter to make sure 1041 * that the flush is complete. So we don't have to output an 1042 * empty block here, this will be done at next call. This also 1043 * ensures that for a very small output buffer, we emit at most 1044 * one empty block. 1045 */ 1046 } 1047 if (bstate == block_done) { 1048 if (flush == Z_PARTIAL_FLUSH) { 1049 _tr_align(s); 1050 } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ 1051 _tr_stored_block(s, (char*)0, 0L, 0); 1052 /* For a full flush, this empty block will be recognized 1053 * as a special marker by inflate_sync(). 1054 */ 1055 if (flush == Z_FULL_FLUSH) { 1056 CLEAR_HASH(s); /* forget history */ 1057 if (s->lookahead == 0) { 1058 s->strstart = 0; 1059 s->block_start = 0L; 1060 s->insert = 0; 1061 } 1062 } 1063 } 1064 flush_pending(strm); 1065 if (strm->avail_out == 0) { 1066 s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ 1067 return Z_OK; 1068 } 1069 } 1070 } 1071 1072 if (flush != Z_FINISH) return Z_OK; 1073 if (s->wrap <= 0) return Z_STREAM_END; 1074 1075 /* Write the trailer */ 1076 #ifdef GZIP 1077 if (s->wrap == 2) { 1078 put_byte(s, (Byte)(strm->adler & 0xff)); 1079 put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); 1080 put_byte(s, (Byte)((strm->adler >> 16) & 0xff)); 1081 put_byte(s, (Byte)((strm->adler >> 24) & 0xff)); 1082 put_byte(s, (Byte)(strm->total_in & 0xff)); 1083 put_byte(s, (Byte)((strm->total_in >> 8) & 0xff)); 1084 put_byte(s, (Byte)((strm->total_in >> 16) & 0xff)); 1085 put_byte(s, (Byte)((strm->total_in >> 24) & 0xff)); 1086 } 1087 else 1088 #endif 1089 { 1090 putShortMSB(s, (uInt)(strm->adler >> 16)); 1091 putShortMSB(s, (uInt)(strm->adler & 0xffff)); 1092 } 1093 flush_pending(strm); 1094 /* If avail_out is zero, the application will call deflate again 1095 * to flush the rest. 1096 */ 1097 if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */ 1098 return s->pending != 0 ? Z_OK : Z_STREAM_END; 1099 } 1100 1101 /* ========================================================================= */ 1102 int ZEXPORT deflateEnd (strm) 1103 z_streamp strm; 1104 { 1105 int status; 1106 1107 if (deflateStateCheck(strm)) return Z_STREAM_ERROR; 1108 1109 status = strm->state->status; 1110 1111 /* Deallocate in reverse order of allocations: */ 1112 TRY_FREE(strm, strm->state->pending_buf); 1113 TRY_FREE(strm, strm->state->head); 1114 TRY_FREE(strm, strm->state->prev); 1115 TRY_FREE(strm, strm->state->window); 1116 1117 ZFREE(strm, strm->state); 1118 strm->state = Z_NULL; 1119 1120 return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; 1121 } 1122 1123 /* ========================================================================= 1124 * Copy the source state to the destination state. 1125 * To simplify the source, this is not supported for 16-bit MSDOS (which 1126 * doesn't have enough memory anyway to duplicate compression states). 1127 */ 1128 int ZEXPORT deflateCopy (dest, source) 1129 z_streamp dest; 1130 z_streamp source; 1131 { 1132 #ifdef MAXSEG_64K 1133 return Z_STREAM_ERROR; 1134 #else 1135 deflate_state *ds; 1136 deflate_state *ss; 1137 ushf *overlay; 1138 1139 1140 if (deflateStateCheck(source) || dest == Z_NULL) { 1141 return Z_STREAM_ERROR; 1142 } 1143 1144 ss = source->state; 1145 1146 zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); 1147 1148 ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); 1149 if (ds == Z_NULL) return Z_MEM_ERROR; 1150 dest->state = (struct internal_state FAR *) ds; 1151 zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state)); 1152 ds->strm = dest; 1153 1154 ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); 1155 ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); 1156 ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); 1157 overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); 1158 ds->pending_buf = (uchf *) overlay; 1159 1160 if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || 1161 ds->pending_buf == Z_NULL) { 1162 deflateEnd (dest); 1163 return Z_MEM_ERROR; 1164 } 1165 /* following zmemcpy do not work for 16-bit MSDOS */ 1166 zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); 1167 zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos)); 1168 zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos)); 1169 zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); 1170 1171 ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); 1172 ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); 1173 ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; 1174 1175 ds->l_desc.dyn_tree = ds->dyn_ltree; 1176 ds->d_desc.dyn_tree = ds->dyn_dtree; 1177 ds->bl_desc.dyn_tree = ds->bl_tree; 1178 1179 return Z_OK; 1180 #endif /* MAXSEG_64K */ 1181 } 1182 1183 /* =========================================================================== 1184 * Read a new buffer from the current input stream, update the adler32 1185 * and total number of bytes read. All deflate() input goes through 1186 * this function so some applications may wish to modify it to avoid 1187 * allocating a large strm->next_in buffer and copying from it. 1188 * (See also flush_pending()). 1189 */ 1190 local unsigned read_buf(strm, buf, size) 1191 z_streamp strm; 1192 Bytef *buf; 1193 unsigned size; 1194 { 1195 unsigned len = strm->avail_in; 1196 1197 if (len > size) len = size; 1198 if (len == 0) return 0; 1199 1200 strm->avail_in -= len; 1201 1202 zmemcpy(buf, strm->next_in, len); 1203 if (strm->state->wrap == 1) { 1204 strm->adler = adler32(strm->adler, buf, len); 1205 } 1206 #ifdef GZIP 1207 else if (strm->state->wrap == 2) { 1208 strm->adler = crc32(strm->adler, buf, len); 1209 } 1210 #endif 1211 strm->next_in += len; 1212 strm->total_in += len; 1213 1214 return len; 1215 } 1216 1217 /* =========================================================================== 1218 * Initialize the "longest match" routines for a new zlib stream 1219 */ 1220 local void lm_init (s) 1221 deflate_state *s; 1222 { 1223 s->window_size = (ulg)2L*s->w_size; 1224 1225 CLEAR_HASH(s); 1226 1227 /* Set the default configuration parameters: 1228 */ 1229 s->max_lazy_match = configuration_table[s->level].max_lazy; 1230 s->good_match = configuration_table[s->level].good_length; 1231 s->nice_match = configuration_table[s->level].nice_length; 1232 s->max_chain_length = configuration_table[s->level].max_chain; 1233 1234 s->strstart = 0; 1235 s->block_start = 0L; 1236 s->lookahead = 0; 1237 s->insert = 0; 1238 s->match_length = s->prev_length = MIN_MATCH-1; 1239 s->match_available = 0; 1240 s->ins_h = 0; 1241 #ifndef FASTEST 1242 #ifdef ASMV 1243 match_init(); /* initialize the asm code */ 1244 #endif 1245 #endif 1246 } 1247 1248 #ifndef FASTEST 1249 /* =========================================================================== 1250 * Set match_start to the longest match starting at the given string and 1251 * return its length. Matches shorter or equal to prev_length are discarded, 1252 * in which case the result is equal to prev_length and match_start is 1253 * garbage. 1254 * IN assertions: cur_match is the head of the hash chain for the current 1255 * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 1256 * OUT assertion: the match length is not greater than s->lookahead. 1257 */ 1258 #ifndef ASMV 1259 /* For 80x86 and 680x0, an optimized version will be provided in match.asm or 1260 * match.S. The code will be functionally equivalent. 1261 */ 1262 local uInt longest_match(s, cur_match) 1263 deflate_state *s; 1264 IPos cur_match; /* current match */ 1265 { 1266 unsigned chain_length = s->max_chain_length;/* max hash chain length */ 1267 register Bytef *scan = s->window + s->strstart; /* current string */ 1268 register Bytef *match; /* matched string */ 1269 register int len; /* length of current match */ 1270 int best_len = (int)s->prev_length; /* best match length so far */ 1271 int nice_match = s->nice_match; /* stop if match long enough */ 1272 IPos limit = s->strstart > (IPos)MAX_DIST(s) ? 1273 s->strstart - (IPos)MAX_DIST(s) : NIL; 1274 /* Stop when cur_match becomes <= limit. To simplify the code, 1275 * we prevent matches with the string of window index 0. 1276 */ 1277 Posf *prev = s->prev; 1278 uInt wmask = s->w_mask; 1279 1280 #ifdef UNALIGNED_OK 1281 /* Compare two bytes at a time. Note: this is not always beneficial. 1282 * Try with and without -DUNALIGNED_OK to check. 1283 */ 1284 register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; 1285 register ush scan_start = *(ushf*)scan; 1286 register ush scan_end = *(ushf*)(scan+best_len-1); 1287 #else 1288 register Bytef *strend = s->window + s->strstart + MAX_MATCH; 1289 register Byte scan_end1 = scan[best_len-1]; 1290 register Byte scan_end = scan[best_len]; 1291 #endif 1292 1293 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. 1294 * It is easy to get rid of this optimization if necessary. 1295 */ 1296 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); 1297 1298 /* Do not waste too much time if we already have a good match: */ 1299 if (s->prev_length >= s->good_match) { 1300 chain_length >>= 2; 1301 } 1302 /* Do not look for matches beyond the end of the input. This is necessary 1303 * to make deflate deterministic. 1304 */ 1305 if ((uInt)nice_match > s->lookahead) nice_match = (int)s->lookahead; 1306 1307 Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); 1308 1309 do { 1310 Assert(cur_match < s->strstart, "no future"); 1311 match = s->window + cur_match; 1312 1313 /* Skip to next match if the match length cannot increase 1314 * or if the match length is less than 2. Note that the checks below 1315 * for insufficient lookahead only occur occasionally for performance 1316 * reasons. Therefore uninitialized memory will be accessed, and 1317 * conditional jumps will be made that depend on those values. 1318 * However the length of the match is limited to the lookahead, so 1319 * the output of deflate is not affected by the uninitialized values. 1320 */ 1321 #if (defined(UNALIGNED_OK) && MAX_MATCH == 258) 1322 /* This code assumes sizeof(unsigned short) == 2. Do not use 1323 * UNALIGNED_OK if your compiler uses a different size. 1324 */ 1325 if (*(ushf*)(match+best_len-1) != scan_end || 1326 *(ushf*)match != scan_start) continue; 1327 1328 /* It is not necessary to compare scan[2] and match[2] since they are 1329 * always equal when the other bytes match, given that the hash keys 1330 * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at 1331 * strstart+3, +5, ... up to strstart+257. We check for insufficient 1332 * lookahead only every 4th comparison; the 128th check will be made 1333 * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is 1334 * necessary to put more guard bytes at the end of the window, or 1335 * to check more often for insufficient lookahead. 1336 */ 1337 Assert(scan[2] == match[2], "scan[2]?"); 1338 scan++, match++; 1339 do { 1340 } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1341 *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1342 *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1343 *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1344 scan < strend); 1345 /* The funny "do {}" generates better code on most compilers */ 1346 1347 /* Here, scan <= window+strstart+257 */ 1348 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1349 if (*scan == *match) scan++; 1350 1351 len = (MAX_MATCH - 1) - (int)(strend-scan); 1352 scan = strend - (MAX_MATCH-1); 1353 1354 #else /* UNALIGNED_OK */ 1355 1356 if (match[best_len] != scan_end || 1357 match[best_len-1] != scan_end1 || 1358 *match != *scan || 1359 *++match != scan[1]) continue; 1360 1361 /* The check at best_len-1 can be removed because it will be made 1362 * again later. (This heuristic is not always a win.) 1363 * It is not necessary to compare scan[2] and match[2] since they 1364 * are always equal when the other bytes match, given that 1365 * the hash keys are equal and that HASH_BITS >= 8. 1366 */ 1367 scan += 2, match++; 1368 Assert(*scan == *match, "match[2]?"); 1369 1370 /* We check for insufficient lookahead only every 8th comparison; 1371 * the 256th check will be made at strstart+258. 1372 */ 1373 do { 1374 } while (*++scan == *++match && *++scan == *++match && 1375 *++scan == *++match && *++scan == *++match && 1376 *++scan == *++match && *++scan == *++match && 1377 *++scan == *++match && *++scan == *++match && 1378 scan < strend); 1379 1380 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1381 1382 len = MAX_MATCH - (int)(strend - scan); 1383 scan = strend - MAX_MATCH; 1384 1385 #endif /* UNALIGNED_OK */ 1386 1387 if (len > best_len) { 1388 s->match_start = cur_match; 1389 best_len = len; 1390 if (len >= nice_match) break; 1391 #ifdef UNALIGNED_OK 1392 scan_end = *(ushf*)(scan+best_len-1); 1393 #else 1394 scan_end1 = scan[best_len-1]; 1395 scan_end = scan[best_len]; 1396 #endif 1397 } 1398 } while ((cur_match = prev[cur_match & wmask]) > limit 1399 && --chain_length != 0); 1400 1401 if ((uInt)best_len <= s->lookahead) return (uInt)best_len; 1402 return s->lookahead; 1403 } 1404 #endif /* ASMV */ 1405 1406 #else /* FASTEST */ 1407 1408 /* --------------------------------------------------------------------------- 1409 * Optimized version for FASTEST only 1410 */ 1411 local uInt longest_match(s, cur_match) 1412 deflate_state *s; 1413 IPos cur_match; /* current match */ 1414 { 1415 register Bytef *scan = s->window + s->strstart; /* current string */ 1416 register Bytef *match; /* matched string */ 1417 register int len; /* length of current match */ 1418 register Bytef *strend = s->window + s->strstart + MAX_MATCH; 1419 1420 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. 1421 * It is easy to get rid of this optimization if necessary. 1422 */ 1423 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); 1424 1425 Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); 1426 1427 Assert(cur_match < s->strstart, "no future"); 1428 1429 match = s->window + cur_match; 1430 1431 /* Return failure if the match length is less than 2: 1432 */ 1433 if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1; 1434 1435 /* The check at best_len-1 can be removed because it will be made 1436 * again later. (This heuristic is not always a win.) 1437 * It is not necessary to compare scan[2] and match[2] since they 1438 * are always equal when the other bytes match, given that 1439 * the hash keys are equal and that HASH_BITS >= 8. 1440 */ 1441 scan += 2, match += 2; 1442 Assert(*scan == *match, "match[2]?"); 1443 1444 /* We check for insufficient lookahead only every 8th comparison; 1445 * the 256th check will be made at strstart+258. 1446 */ 1447 do { 1448 } while (*++scan == *++match && *++scan == *++match && 1449 *++scan == *++match && *++scan == *++match && 1450 *++scan == *++match && *++scan == *++match && 1451 *++scan == *++match && *++scan == *++match && 1452 scan < strend); 1453 1454 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1455 1456 len = MAX_MATCH - (int)(strend - scan); 1457 1458 if (len < MIN_MATCH) return MIN_MATCH - 1; 1459 1460 s->match_start = cur_match; 1461 return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead; 1462 } 1463 1464 #endif /* FASTEST */ 1465 1466 #ifdef ZLIB_DEBUG 1467 1468 #define EQUAL 0 1469 /* result of memcmp for equal strings */ 1470 1471 /* =========================================================================== 1472 * Check that the match at match_start is indeed a match. 1473 */ 1474 local void check_match(s, start, match, length) 1475 deflate_state *s; 1476 IPos start, match; 1477 int length; 1478 { 1479 /* check that the match is indeed a match */ 1480 if (zmemcmp(s->window + match, 1481 s->window + start, length) != EQUAL) { 1482 fprintf(stderr, " start %u, match %u, length %d\n", 1483 start, match, length); 1484 do { 1485 fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); 1486 } while (--length != 0); 1487 z_error("invalid match"); 1488 } 1489 if (z_verbose > 1) { 1490 fprintf(stderr,"\\[%d,%d]", start-match, length); 1491 do { putc(s->window[start++], stderr); } while (--length != 0); 1492 } 1493 } 1494 #else 1495 # define check_match(s, start, match, length) 1496 #endif /* ZLIB_DEBUG */ 1497 1498 /* =========================================================================== 1499 * Fill the window when the lookahead becomes insufficient. 1500 * Updates strstart and lookahead. 1501 * 1502 * IN assertion: lookahead < MIN_LOOKAHEAD 1503 * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD 1504 * At least one byte has been read, or avail_in == 0; reads are 1505 * performed for at least two bytes (required for the zip translate_eol 1506 * option -- not supported here). 1507 */ 1508 local void fill_window(s) 1509 deflate_state *s; 1510 { 1511 unsigned n; 1512 unsigned more; /* Amount of free space at the end of the window. */ 1513 uInt wsize = s->w_size; 1514 1515 Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); 1516 1517 do { 1518 more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); 1519 1520 /* Deal with !@#$% 64K limit: */ 1521 if (sizeof(int) <= 2) { 1522 if (more == 0 && s->strstart == 0 && s->lookahead == 0) { 1523 more = wsize; 1524 1525 } else if (more == (unsigned)(-1)) { 1526 /* Very unlikely, but possible on 16 bit machine if 1527 * strstart == 0 && lookahead == 1 (input done a byte at time) 1528 */ 1529 more--; 1530 } 1531 } 1532 1533 /* If the window is almost full and there is insufficient lookahead, 1534 * move the upper half to the lower one to make room in the upper half. 1535 */ 1536 if (s->strstart >= wsize+MAX_DIST(s)) { 1537 1538 zmemcpy(s->window, s->window+wsize, (unsigned)wsize - more); 1539 s->match_start -= wsize; 1540 s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ 1541 s->block_start -= (long) wsize; 1542 slide_hash(s); 1543 more += wsize; 1544 } 1545 if (s->strm->avail_in == 0) break; 1546 1547 /* If there was no sliding: 1548 * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && 1549 * more == window_size - lookahead - strstart 1550 * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) 1551 * => more >= window_size - 2*WSIZE + 2 1552 * In the BIG_MEM or MMAP case (not yet supported), 1553 * window_size == input_size + MIN_LOOKAHEAD && 1554 * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. 1555 * Otherwise, window_size == 2*WSIZE so more >= 2. 1556 * If there was sliding, more >= WSIZE. So in all cases, more >= 2. 1557 */ 1558 Assert(more >= 2, "more < 2"); 1559 1560 n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more); 1561 s->lookahead += n; 1562 1563 /* Initialize the hash value now that we have some input: */ 1564 if (s->lookahead + s->insert >= MIN_MATCH) { 1565 uInt str = s->strstart - s->insert; 1566 s->ins_h = s->window[str]; 1567 UPDATE_HASH(s, s->ins_h, s->window[str + 1]); 1568 #if MIN_MATCH != 3 1569 Call UPDATE_HASH() MIN_MATCH-3 more times 1570 #endif 1571 while (s->insert) { 1572 UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); 1573 #ifndef FASTEST 1574 s->prev[str & s->w_mask] = s->head[s->ins_h]; 1575 #endif 1576 s->head[s->ins_h] = (Pos)str; 1577 str++; 1578 s->insert--; 1579 if (s->lookahead + s->insert < MIN_MATCH) 1580 break; 1581 } 1582 } 1583 /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, 1584 * but this is not important since only literal bytes will be emitted. 1585 */ 1586 1587 } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); 1588 1589 /* If the WIN_INIT bytes after the end of the current data have never been 1590 * written, then zero those bytes in order to avoid memory check reports of 1591 * the use of uninitialized (or uninitialised as Julian writes) bytes by 1592 * the longest match routines. Update the high water mark for the next 1593 * time through here. WIN_INIT is set to MAX_MATCH since the longest match 1594 * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. 1595 */ 1596 if (s->high_water < s->window_size) { 1597 ulg curr = s->strstart + (ulg)(s->lookahead); 1598 ulg init; 1599 1600 if (s->high_water < curr) { 1601 /* Previous high water mark below current data -- zero WIN_INIT 1602 * bytes or up to end of window, whichever is less. 1603 */ 1604 init = s->window_size - curr; 1605 if (init > WIN_INIT) 1606 init = WIN_INIT; 1607 zmemzero(s->window + curr, (unsigned)init); 1608 s->high_water = curr + init; 1609 } 1610 else if (s->high_water < (ulg)curr + WIN_INIT) { 1611 /* High water mark at or above current data, but below current data 1612 * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up 1613 * to end of window, whichever is less. 1614 */ 1615 init = (ulg)curr + WIN_INIT - s->high_water; 1616 if (init > s->window_size - s->high_water) 1617 init = s->window_size - s->high_water; 1618 zmemzero(s->window + s->high_water, (unsigned)init); 1619 s->high_water += init; 1620 } 1621 } 1622 1623 Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, 1624 "not enough room for search"); 1625 } 1626 1627 /* =========================================================================== 1628 * Flush the current block, with given end-of-file flag. 1629 * IN assertion: strstart is set to the end of the current match. 1630 */ 1631 #define FLUSH_BLOCK_ONLY(s, last) { \ 1632 _tr_flush_block(s, (s->block_start >= 0L ? \ 1633 (charf *)&s->window[(unsigned)s->block_start] : \ 1634 (charf *)Z_NULL), \ 1635 (ulg)((long)s->strstart - s->block_start), \ 1636 (last)); \ 1637 s->block_start = s->strstart; \ 1638 flush_pending(s->strm); \ 1639 Tracev((stderr,"[FLUSH]")); \ 1640 } 1641 1642 /* Same but force premature exit if necessary. */ 1643 #define FLUSH_BLOCK(s, last) { \ 1644 FLUSH_BLOCK_ONLY(s, last); \ 1645 if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \ 1646 } 1647 1648 /* Maximum stored block length in deflate format (not including header). */ 1649 #define MAX_STORED 65535 1650 1651 /* Minimum of a and b. */ 1652 #define MIN(a, b) ((a) > (b) ? (b) : (a)) 1653 1654 /* =========================================================================== 1655 * Copy without compression as much as possible from the input stream, return 1656 * the current block state. 1657 * 1658 * In case deflateParams() is used to later switch to a non-zero compression 1659 * level, s->matches (otherwise unused when storing) keeps track of the number 1660 * of hash table slides to perform. If s->matches is 1, then one hash table 1661 * slide will be done when switching. If s->matches is 2, the maximum value 1662 * allowed here, then the hash table will be cleared, since two or more slides 1663 * is the same as a clear. 1664 * 1665 * deflate_stored() is written to minimize the number of times an input byte is 1666 * copied. It is most efficient with large input and output buffers, which 1667 * maximizes the opportunites to have a single copy from next_in to next_out. 1668 */ 1669 local block_state deflate_stored(s, flush) 1670 deflate_state *s; 1671 int flush; 1672 { 1673 /* Smallest worthy block size when not flushing or finishing. By default 1674 * this is 32K. This can be as small as 507 bytes for memLevel == 1. For 1675 * large input and output buffers, the stored block size will be larger. 1676 */ 1677 unsigned min_block = MIN(s->pending_buf_size - 5, s->w_size); 1678 1679 /* Copy as many min_block or larger stored blocks directly to next_out as 1680 * possible. If flushing, copy the remaining available input to next_out as 1681 * stored blocks, if there is enough space. 1682 */ 1683 unsigned len, left, have, last = 0; 1684 unsigned used = s->strm->avail_in; 1685 do { 1686 /* Set len to the maximum size block that we can copy directly with the 1687 * available input data and output space. Set left to how much of that 1688 * would be copied from what's left in the window. 1689 */ 1690 len = MAX_STORED; /* maximum deflate stored block length */ 1691 have = (s->bi_valid + 42) >> 3; /* number of header bytes */ 1692 if (s->strm->avail_out < have) /* need room for header */ 1693 break; 1694 /* maximum stored block length that will fit in avail_out: */ 1695 have = s->strm->avail_out - have; 1696 left = s->strstart - s->block_start; /* bytes left in window */ 1697 if (len > (ulg)left + s->strm->avail_in) 1698 len = left + s->strm->avail_in; /* limit len to the input */ 1699 if (len > have) 1700 len = have; /* limit len to the output */ 1701 1702 /* If the stored block would be less than min_block in length, or if 1703 * unable to copy all of the available input when flushing, then try 1704 * copying to the window and the pending buffer instead. Also don't 1705 * write an empty block when flushing -- deflate() does that. 1706 */ 1707 if (len < min_block && ((len == 0 && flush != Z_FINISH) || 1708 flush == Z_NO_FLUSH || 1709 len != left + s->strm->avail_in)) 1710 break; 1711 1712 /* Make a dummy stored block in pending to get the header bytes, 1713 * including any pending bits. This also updates the debugging counts. 1714 */ 1715 last = flush == Z_FINISH && len == left + s->strm->avail_in ? 1 : 0; 1716 _tr_stored_block(s, (char *)0, 0L, last); 1717 1718 /* Replace the lengths in the dummy stored block with len. */ 1719 s->pending_buf[s->pending - 4] = len; 1720 s->pending_buf[s->pending - 3] = len >> 8; 1721 s->pending_buf[s->pending - 2] = ~len; 1722 s->pending_buf[s->pending - 1] = ~len >> 8; 1723 1724 /* Write the stored block header bytes. */ 1725 flush_pending(s->strm); 1726 1727 #ifdef ZLIB_DEBUG 1728 /* Update debugging counts for the data about to be copied. */ 1729 s->compressed_len += len << 3; 1730 s->bits_sent += len << 3; 1731 #endif 1732 1733 /* Copy uncompressed bytes from the window to next_out. */ 1734 if (left) { 1735 if (left > len) 1736 left = len; 1737 zmemcpy(s->strm->next_out, s->window + s->block_start, left); 1738 s->strm->next_out += left; 1739 s->strm->avail_out -= left; 1740 s->strm->total_out += left; 1741 s->block_start += left; 1742 len -= left; 1743 } 1744 1745 /* Copy uncompressed bytes directly from next_in to next_out, updating 1746 * the check value. 1747 */ 1748 if (len) { 1749 read_buf(s->strm, s->strm->next_out, len); 1750 s->strm->next_out += len; 1751 s->strm->avail_out -= len; 1752 s->strm->total_out += len; 1753 } 1754 } while (last == 0); 1755 1756 /* Update the sliding window with the last s->w_size bytes of the copied 1757 * data, or append all of the copied data to the existing window if less 1758 * than s->w_size bytes were copied. Also update the number of bytes to 1759 * insert in the hash tables, in the event that deflateParams() switches to 1760 * a non-zero compression level. 1761 */ 1762 used -= s->strm->avail_in; /* number of input bytes directly copied */ 1763 if (used) { 1764 /* If any input was used, then no unused input remains in the window, 1765 * therefore s->block_start == s->strstart. 1766 */ 1767 if (used >= s->w_size) { /* supplant the previous history */ 1768 s->matches = 2; /* clear hash */ 1769 zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size); 1770 s->strstart = s->w_size; 1771 } 1772 else { 1773 if (s->window_size - s->strstart <= used) { 1774 /* Slide the window down. */ 1775 s->strstart -= s->w_size; 1776 zmemcpy(s->window, s->window + s->w_size, s->strstart); 1777 if (s->matches < 2) 1778 s->matches++; /* add a pending slide_hash() */ 1779 } 1780 zmemcpy(s->window + s->strstart, s->strm->next_in - used, used); 1781 s->strstart += used; 1782 } 1783 s->block_start = s->strstart; 1784 s->insert += MIN(used, s->w_size - s->insert); 1785 } 1786 if (s->high_water < s->strstart) 1787 s->high_water = s->strstart; 1788 1789 /* If the last block was written to next_out, then done. */ 1790 if (last) 1791 return finish_done; 1792 1793 /* If flushing and all input has been consumed, then done. */ 1794 if (flush != Z_NO_FLUSH && flush != Z_FINISH && 1795 s->strm->avail_in == 0 && (long)s->strstart == s->block_start) 1796 return block_done; 1797 1798 /* Fill the window with any remaining input. */ 1799 have = s->window_size - s->strstart - 1; 1800 if (s->strm->avail_in > have && s->block_start >= (long)s->w_size) { 1801 /* Slide the window down. */ 1802 s->block_start -= s->w_size; 1803 s->strstart -= s->w_size; 1804 zmemcpy(s->window, s->window + s->w_size, s->strstart); 1805 if (s->matches < 2) 1806 s->matches++; /* add a pending slide_hash() */ 1807 have += s->w_size; /* more space now */ 1808 } 1809 if (have > s->strm->avail_in) 1810 have = s->strm->avail_in; 1811 if (have) { 1812 read_buf(s->strm, s->window + s->strstart, have); 1813 s->strstart += have; 1814 } 1815 if (s->high_water < s->strstart) 1816 s->high_water = s->strstart; 1817 1818 /* There was not enough avail_out to write a complete worthy or flushed 1819 * stored block to next_out. Write a stored block to pending instead, if we 1820 * have enough input for a worthy block, or if flushing and there is enough 1821 * room for the remaining input as a stored block in the pending buffer. 1822 */ 1823 have = (s->bi_valid + 42) >> 3; /* number of header bytes */ 1824 /* maximum stored block length that will fit in pending: */ 1825 have = MIN(s->pending_buf_size - have, MAX_STORED); 1826 min_block = MIN(have, s->w_size); 1827 left = s->strstart - s->block_start; 1828 if (left >= min_block || 1829 ((left || flush == Z_FINISH) && flush != Z_NO_FLUSH && 1830 s->strm->avail_in == 0 && left <= have)) { 1831 len = MIN(left, have); 1832 last = flush == Z_FINISH && s->strm->avail_in == 0 && 1833 len == left ? 1 : 0; 1834 _tr_stored_block(s, (charf *)s->window + s->block_start, len, last); 1835 s->block_start += len; 1836 flush_pending(s->strm); 1837 } 1838 1839 /* We've done all we can with the available input and output. */ 1840 return last ? finish_started : need_more; 1841 } 1842 1843 /* =========================================================================== 1844 * Compress as much as possible from the input stream, return the current 1845 * block state. 1846 * This function does not perform lazy evaluation of matches and inserts 1847 * new strings in the dictionary only for unmatched strings or for short 1848 * matches. It is used only for the fast compression options. 1849 */ 1850 local block_state deflate_fast(s, flush) 1851 deflate_state *s; 1852 int flush; 1853 { 1854 IPos hash_head; /* head of the hash chain */ 1855 int bflush; /* set if current block must be flushed */ 1856 1857 for (;;) { 1858 /* Make sure that we always have enough lookahead, except 1859 * at the end of the input file. We need MAX_MATCH bytes 1860 * for the next match, plus MIN_MATCH bytes to insert the 1861 * string following the next match. 1862 */ 1863 if (s->lookahead < MIN_LOOKAHEAD) { 1864 fill_window(s); 1865 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { 1866 return need_more; 1867 } 1868 if (s->lookahead == 0) break; /* flush the current block */ 1869 } 1870 1871 /* Insert the string window[strstart .. strstart+2] in the 1872 * dictionary, and set hash_head to the head of the hash chain: 1873 */ 1874 hash_head = NIL; 1875 if (s->lookahead >= MIN_MATCH) { 1876 INSERT_STRING(s, s->strstart, hash_head); 1877 } 1878 1879 /* Find the longest match, discarding those <= prev_length. 1880 * At this point we have always match_length < MIN_MATCH 1881 */ 1882 if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { 1883 /* To simplify the code, we prevent matches with the string 1884 * of window index 0 (in particular we have to avoid a match 1885 * of the string with itself at the start of the input file). 1886 */ 1887 s->match_length = longest_match (s, hash_head); 1888 /* longest_match() sets match_start */ 1889 } 1890 if (s->match_length >= MIN_MATCH) { 1891 check_match(s, s->strstart, s->match_start, s->match_length); 1892 1893 _tr_tally_dist(s, s->strstart - s->match_start, 1894 s->match_length - MIN_MATCH, bflush); 1895 1896 s->lookahead -= s->match_length; 1897 1898 /* Insert new strings in the hash table only if the match length 1899 * is not too large. This saves time but degrades compression. 1900 */ 1901 #ifndef FASTEST 1902 if (s->match_length <= s->max_insert_length && 1903 s->lookahead >= MIN_MATCH) { 1904 s->match_length--; /* string at strstart already in table */ 1905 do { 1906 s->strstart++; 1907 INSERT_STRING(s, s->strstart, hash_head); 1908 /* strstart never exceeds WSIZE-MAX_MATCH, so there are 1909 * always MIN_MATCH bytes ahead. 1910 */ 1911 } while (--s->match_length != 0); 1912 s->strstart++; 1913 } else 1914 #endif 1915 { 1916 s->strstart += s->match_length; 1917 s->match_length = 0; 1918 s->ins_h = s->window[s->strstart]; 1919 UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); 1920 #if MIN_MATCH != 3 1921 Call UPDATE_HASH() MIN_MATCH-3 more times 1922 #endif 1923 /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not 1924 * matter since it will be recomputed at next deflate call. 1925 */ 1926 } 1927 } else { 1928 /* No match, output a literal byte */ 1929 Tracevv((stderr,"%c", s->window[s->strstart])); 1930 _tr_tally_lit (s, s->window[s->strstart], bflush); 1931 s->lookahead--; 1932 s->strstart++; 1933 } 1934 if (bflush) FLUSH_BLOCK(s, 0); 1935 } 1936 s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; 1937 if (flush == Z_FINISH) { 1938 FLUSH_BLOCK(s, 1); 1939 return finish_done; 1940 } 1941 if (s->last_lit) 1942 FLUSH_BLOCK(s, 0); 1943 return block_done; 1944 } 1945 1946 #ifndef FASTEST 1947 /* =========================================================================== 1948 * Same as above, but achieves better compression. We use a lazy 1949 * evaluation for matches: a match is finally adopted only if there is 1950 * no better match at the next window position. 1951 */ 1952 local block_state deflate_slow(s, flush) 1953 deflate_state *s; 1954 int flush; 1955 { 1956 IPos hash_head; /* head of hash chain */ 1957 int bflush; /* set if current block must be flushed */ 1958 1959 /* Process the input block. */ 1960 for (;;) { 1961 /* Make sure that we always have enough lookahead, except 1962 * at the end of the input file. We need MAX_MATCH bytes 1963 * for the next match, plus MIN_MATCH bytes to insert the 1964 * string following the next match. 1965 */ 1966 if (s->lookahead < MIN_LOOKAHEAD) { 1967 fill_window(s); 1968 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { 1969 return need_more; 1970 } 1971 if (s->lookahead == 0) break; /* flush the current block */ 1972 } 1973 1974 /* Insert the string window[strstart .. strstart+2] in the 1975 * dictionary, and set hash_head to the head of the hash chain: 1976 */ 1977 hash_head = NIL; 1978 if (s->lookahead >= MIN_MATCH) { 1979 INSERT_STRING(s, s->strstart, hash_head); 1980 } 1981 1982 /* Find the longest match, discarding those <= prev_length. 1983 */ 1984 s->prev_length = s->match_length, s->prev_match = s->match_start; 1985 s->match_length = MIN_MATCH-1; 1986 1987 if (hash_head != NIL && s->prev_length < s->max_lazy_match && 1988 s->strstart - hash_head <= MAX_DIST(s)) { 1989 /* To simplify the code, we prevent matches with the string 1990 * of window index 0 (in particular we have to avoid a match 1991 * of the string with itself at the start of the input file). 1992 */ 1993 s->match_length = longest_match (s, hash_head); 1994 /* longest_match() sets match_start */ 1995 1996 if (s->match_length <= 5 && (s->strategy == Z_FILTERED 1997 #if TOO_FAR <= 32767 1998 || (s->match_length == MIN_MATCH && 1999 s->strstart - s->match_start > TOO_FAR) 2000 #endif 2001 )) { 2002 2003 /* If prev_match is also MIN_MATCH, match_start is garbage 2004 * but we will ignore the current match anyway. 2005 */ 2006 s->match_length = MIN_MATCH-1; 2007 } 2008 } 2009 /* If there was a match at the previous step and the current 2010 * match is not better, output the previous match: 2011 */ 2012 if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { 2013 uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; 2014 /* Do not insert strings in hash table beyond this. */ 2015 2016 check_match(s, s->strstart-1, s->prev_match, s->prev_length); 2017 2018 _tr_tally_dist(s, s->strstart -1 - s->prev_match, 2019 s->prev_length - MIN_MATCH, bflush); 2020 2021 /* Insert in hash table all strings up to the end of the match. 2022 * strstart-1 and strstart are already inserted. If there is not 2023 * enough lookahead, the last two strings are not inserted in 2024 * the hash table. 2025 */ 2026 s->lookahead -= s->prev_length-1; 2027 s->prev_length -= 2; 2028 do { 2029 if (++s->strstart <= max_insert) { 2030 INSERT_STRING(s, s->strstart, hash_head); 2031 } 2032 } while (--s->prev_length != 0); 2033 s->match_available = 0; 2034 s->match_length = MIN_MATCH-1; 2035 s->strstart++; 2036 2037 if (bflush) FLUSH_BLOCK(s, 0); 2038 2039 } else if (s->match_available) { 2040 /* If there was no match at the previous position, output a 2041 * single literal. If there was a match but the current match 2042 * is longer, truncate the previous match to a single literal. 2043 */ 2044 Tracevv((stderr,"%c", s->window[s->strstart-1])); 2045 _tr_tally_lit(s, s->window[s->strstart-1], bflush); 2046 if (bflush) { 2047 FLUSH_BLOCK_ONLY(s, 0); 2048 } 2049 s->strstart++; 2050 s->lookahead--; 2051 if (s->strm->avail_out == 0) return need_more; 2052 } else { 2053 /* There is no previous match to compare with, wait for 2054 * the next step to decide. 2055 */ 2056 s->match_available = 1; 2057 s->strstart++; 2058 s->lookahead--; 2059 } 2060 } 2061 Assert (flush != Z_NO_FLUSH, "no flush?"); 2062 if (s->match_available) { 2063 Tracevv((stderr,"%c", s->window[s->strstart-1])); 2064 _tr_tally_lit(s, s->window[s->strstart-1], bflush); 2065 s->match_available = 0; 2066 } 2067 s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; 2068 if (flush == Z_FINISH) { 2069 FLUSH_BLOCK(s, 1); 2070 return finish_done; 2071 } 2072 if (s->last_lit) 2073 FLUSH_BLOCK(s, 0); 2074 return block_done; 2075 } 2076 #endif /* FASTEST */ 2077 2078 /* =========================================================================== 2079 * For Z_RLE, simply look for runs of bytes, generate matches only of distance 2080 * one. Do not maintain a hash table. (It will be regenerated if this run of 2081 * deflate switches away from Z_RLE.) 2082 */ 2083 local block_state deflate_rle(s, flush) 2084 deflate_state *s; 2085 int flush; 2086 { 2087 int bflush; /* set if current block must be flushed */ 2088 uInt prev; /* byte at distance one to match */ 2089 Bytef *scan, *strend; /* scan goes up to strend for length of run */ 2090 2091 for (;;) { 2092 /* Make sure that we always have enough lookahead, except 2093 * at the end of the input file. We need MAX_MATCH bytes 2094 * for the longest run, plus one for the unrolled loop. 2095 */ 2096 if (s->lookahead <= MAX_MATCH) { 2097 fill_window(s); 2098 if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) { 2099 return need_more; 2100 } 2101 if (s->lookahead == 0) break; /* flush the current block */ 2102 } 2103 2104 /* See how many times the previous byte repeats */ 2105 s->match_length = 0; 2106 if (s->lookahead >= MIN_MATCH && s->strstart > 0) { 2107 scan = s->window + s->strstart - 1; 2108 prev = *scan; 2109 if (prev == *++scan && prev == *++scan && prev == *++scan) { 2110 strend = s->window + s->strstart + MAX_MATCH; 2111 do { 2112 } while (prev == *++scan && prev == *++scan && 2113 prev == *++scan && prev == *++scan && 2114 prev == *++scan && prev == *++scan && 2115 prev == *++scan && prev == *++scan && 2116 scan < strend); 2117 s->match_length = MAX_MATCH - (uInt)(strend - scan); 2118 if (s->match_length > s->lookahead) 2119 s->match_length = s->lookahead; 2120 } 2121 Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); 2122 } 2123 2124 /* Emit match if have run of MIN_MATCH or longer, else emit literal */ 2125 if (s->match_length >= MIN_MATCH) { 2126 check_match(s, s->strstart, s->strstart - 1, s->match_length); 2127 2128 _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush); 2129 2130 s->lookahead -= s->match_length; 2131 s->strstart += s->match_length; 2132 s->match_length = 0; 2133 } else { 2134 /* No match, output a literal byte */ 2135 Tracevv((stderr,"%c", s->window[s->strstart])); 2136 _tr_tally_lit (s, s->window[s->strstart], bflush); 2137 s->lookahead--; 2138 s->strstart++; 2139 } 2140 if (bflush) FLUSH_BLOCK(s, 0); 2141 } 2142 s->insert = 0; 2143 if (flush == Z_FINISH) { 2144 FLUSH_BLOCK(s, 1); 2145 return finish_done; 2146 } 2147 if (s->last_lit) 2148 FLUSH_BLOCK(s, 0); 2149 return block_done; 2150 } 2151 2152 /* =========================================================================== 2153 * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. 2154 * (It will be regenerated if this run of deflate switches away from Huffman.) 2155 */ 2156 local block_state deflate_huff(s, flush) 2157 deflate_state *s; 2158 int flush; 2159 { 2160 int bflush; /* set if current block must be flushed */ 2161 2162 for (;;) { 2163 /* Make sure that we have a literal to write. */ 2164 if (s->lookahead == 0) { 2165 fill_window(s); 2166 if (s->lookahead == 0) { 2167 if (flush == Z_NO_FLUSH) 2168 return need_more; 2169 break; /* flush the current block */ 2170 } 2171 } 2172 2173 /* Output a literal byte */ 2174 s->match_length = 0; 2175 Tracevv((stderr,"%c", s->window[s->strstart])); 2176 _tr_tally_lit (s, s->window[s->strstart], bflush); 2177 s->lookahead--; 2178 s->strstart++; 2179 if (bflush) FLUSH_BLOCK(s, 0); 2180 } 2181 s->insert = 0; 2182 if (flush == Z_FINISH) { 2183 FLUSH_BLOCK(s, 1); 2184 return finish_done; 2185 } 2186 if (s->last_lit) 2187 FLUSH_BLOCK(s, 0); 2188 return block_done; 2189 }