1 /* 2 * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.FilterOutputStream; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.nio.ByteBuffer; 37 import java.nio.MappedByteBuffer; 38 import java.nio.channels.Channels; 39 import java.nio.channels.FileChannel; 40 import java.nio.channels.FileLock; 41 import java.nio.channels.NonWritableChannelException; 42 import java.nio.channels.ReadableByteChannel; 43 import java.nio.channels.SeekableByteChannel; 44 import java.nio.channels.WritableByteChannel; 45 import java.nio.file.*; 46 import java.nio.file.attribute.FileAttribute; 47 import java.nio.file.attribute.FileTime; 48 import java.nio.file.attribute.UserPrincipalLookupService; 49 import java.nio.file.spi.FileSystemProvider; 50 import java.security.AccessController; 51 import java.security.PrivilegedAction; 52 import java.security.PrivilegedActionException; 53 import java.security.PrivilegedExceptionAction; 54 import java.util.*; 55 import java.util.concurrent.locks.ReadWriteLock; 56 import java.util.concurrent.locks.ReentrantReadWriteLock; 57 import java.util.regex.Pattern; 58 import java.util.zip.CRC32; 59 import java.util.zip.Deflater; 60 import java.util.zip.DeflaterOutputStream; 61 import java.util.zip.Inflater; 62 import java.util.zip.InflaterInputStream; 63 import java.util.zip.ZipException; 64 65 import static java.lang.Boolean.TRUE; 66 import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES; 67 import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; 68 import static java.nio.file.StandardOpenOption.APPEND; 69 import static java.nio.file.StandardOpenOption.CREATE; 70 import static java.nio.file.StandardOpenOption.CREATE_NEW; 71 import static java.nio.file.StandardOpenOption.READ; 72 import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; 73 import static java.nio.file.StandardOpenOption.WRITE; 74 import static jdk.nio.zipfs.ZipConstants.*; 75 import static jdk.nio.zipfs.ZipUtils.*; 76 77 /** 78 * A FileSystem built on a zip file 79 * 80 * @author Xueming Shen 81 */ 82 class ZipFileSystem extends FileSystem { 83 private final ZipFileSystemProvider provider; 84 private final Path zfpath; 85 final ZipCoder zc; 86 private final ZipPath rootdir; 87 private boolean readOnly = false; // readonly file system 88 89 // configurable by env map 90 private final boolean noExtt; // see readExtra() 91 private final boolean useTempFile; // use a temp file for newOS, default 92 // is to use BAOS for better performance 93 private static final boolean isWindows = AccessController.doPrivileged( 94 (PrivilegedAction<Boolean>)() -> System.getProperty("os.name") 95 .startsWith("Windows")); 96 private final boolean forceEnd64; 97 private final int defaultMethod; // METHOD_STORED if "noCompression=true" 98 // METHOD_DEFLATED otherwise 99 100 ZipFileSystem(ZipFileSystemProvider provider, 101 Path zfpath, 102 Map<String, ?> env) throws IOException 103 { 104 // default encoding for name/comment 105 String nameEncoding = env.containsKey("encoding") ? 106 (String)env.get("encoding") : "UTF-8"; 107 this.noExtt = "false".equals(env.get("zipinfo-time")); 108 this.useTempFile = isTrue(env, "useTempFile"); 109 this.forceEnd64 = isTrue(env, "forceZIP64End"); 110 this.defaultMethod = isTrue(env, "noCompression") ? METHOD_STORED: METHOD_DEFLATED; 111 if (Files.notExists(zfpath)) { 112 // create a new zip if not exists 113 if (isTrue(env, "create")) { 114 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 115 new END().write(os, 0, forceEnd64); 116 } 117 } else { 118 throw new FileSystemNotFoundException(zfpath.toString()); 119 } 120 } 121 // sm and existence check 122 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 123 boolean writeable = AccessController.doPrivileged( 124 (PrivilegedAction<Boolean>) () -> Files.isWritable(zfpath)); 125 this.readOnly = !writeable; 126 this.zc = ZipCoder.get(nameEncoding); 127 this.rootdir = new ZipPath(this, new byte[]{'/'}); 128 this.ch = Files.newByteChannel(zfpath, READ); 129 try { 130 this.cen = initCEN(); 131 } catch (IOException x) { 132 try { 133 this.ch.close(); 134 } catch (IOException xx) { 135 x.addSuppressed(xx); 136 } 137 throw x; 138 } 139 this.provider = provider; 140 this.zfpath = zfpath; 141 } 142 143 // returns true if there is a name=true/"true" setting in env 144 private static boolean isTrue(Map<String, ?> env, String name) { 145 return "true".equals(env.get(name)) || TRUE.equals(env.get(name)); 146 } 147 148 @Override 149 public FileSystemProvider provider() { 150 return provider; 151 } 152 153 @Override 154 public String getSeparator() { 155 return "/"; 156 } 157 158 @Override 159 public boolean isOpen() { 160 return isOpen; 161 } 162 163 @Override 164 public boolean isReadOnly() { 165 return readOnly; 166 } 167 168 private void checkWritable() throws IOException { 169 if (readOnly) 170 throw new ReadOnlyFileSystemException(); 171 } 172 173 void setReadOnly() { 174 this.readOnly = true; 175 } 176 177 @Override 178 public Iterable<Path> getRootDirectories() { 179 return List.of(rootdir); 180 } 181 182 ZipPath getRootDir() { 183 return rootdir; 184 } 185 186 @Override 187 public ZipPath getPath(String first, String... more) { 188 if (more.length == 0) { 189 return new ZipPath(this, first); 190 } 191 StringBuilder sb = new StringBuilder(); 192 sb.append(first); 193 for (String path : more) { 194 if (path.length() > 0) { 195 if (sb.length() > 0) { 196 sb.append('/'); 197 } 198 sb.append(path); 199 } 200 } 201 return new ZipPath(this, sb.toString()); 202 } 203 204 @Override 205 public UserPrincipalLookupService getUserPrincipalLookupService() { 206 throw new UnsupportedOperationException(); 207 } 208 209 @Override 210 public WatchService newWatchService() { 211 throw new UnsupportedOperationException(); 212 } 213 214 FileStore getFileStore(ZipPath path) { 215 return new ZipFileStore(path); 216 } 217 218 @Override 219 public Iterable<FileStore> getFileStores() { 220 return List.of(new ZipFileStore(rootdir)); 221 } 222 223 private static final Set<String> supportedFileAttributeViews = 224 Set.of("basic", "zip"); 225 226 @Override 227 public Set<String> supportedFileAttributeViews() { 228 return supportedFileAttributeViews; 229 } 230 231 @Override 232 public String toString() { 233 return zfpath.toString(); 234 } 235 236 Path getZipFile() { 237 return zfpath; 238 } 239 240 private static final String GLOB_SYNTAX = "glob"; 241 private static final String REGEX_SYNTAX = "regex"; 242 243 @Override 244 public PathMatcher getPathMatcher(String syntaxAndInput) { 245 int pos = syntaxAndInput.indexOf(':'); 246 if (pos <= 0 || pos == syntaxAndInput.length()) { 247 throw new IllegalArgumentException(); 248 } 249 String syntax = syntaxAndInput.substring(0, pos); 250 String input = syntaxAndInput.substring(pos + 1); 251 String expr; 252 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 253 expr = toRegexPattern(input); 254 } else { 255 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 256 expr = input; 257 } else { 258 throw new UnsupportedOperationException("Syntax '" + syntax + 259 "' not recognized"); 260 } 261 } 262 // return matcher 263 final Pattern pattern = Pattern.compile(expr); 264 return new PathMatcher() { 265 @Override 266 public boolean matches(Path path) { 267 return pattern.matcher(path.toString()).matches(); 268 } 269 }; 270 } 271 272 @Override 273 public void close() throws IOException { 274 beginWrite(); 275 try { 276 if (!isOpen) 277 return; 278 isOpen = false; // set closed 279 } finally { 280 endWrite(); 281 } 282 if (!streams.isEmpty()) { // unlock and close all remaining streams 283 Set<InputStream> copy = new HashSet<>(streams); 284 for (InputStream is : copy) 285 is.close(); 286 } 287 beginWrite(); // lock and sync 288 try { 289 AccessController.doPrivileged((PrivilegedExceptionAction<Void>)() -> { 290 sync(); return null; 291 }); 292 ch.close(); // close the ch just in case no update 293 // and sync didn't close the ch 294 } catch (PrivilegedActionException e) { 295 throw (IOException)e.getException(); 296 } finally { 297 endWrite(); 298 } 299 300 synchronized (inflaters) { 301 for (Inflater inf : inflaters) 302 inf.end(); 303 } 304 synchronized (deflaters) { 305 for (Deflater def : deflaters) 306 def.end(); 307 } 308 309 IOException ioe = null; 310 synchronized (tmppaths) { 311 for (Path p : tmppaths) { 312 try { 313 AccessController.doPrivileged( 314 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 315 } catch (PrivilegedActionException e) { 316 IOException x = (IOException)e.getException(); 317 if (ioe == null) 318 ioe = x; 319 else 320 ioe.addSuppressed(x); 321 } 322 } 323 } 324 provider.removeFileSystem(zfpath, this); 325 if (ioe != null) 326 throw ioe; 327 } 328 329 ZipFileAttributes getFileAttributes(byte[] path) 330 throws IOException 331 { 332 Entry e; 333 beginRead(); 334 try { 335 ensureOpen(); 336 e = getEntry(path); 337 if (e == null) { 338 IndexNode inode = getInode(path); 339 if (inode == null) 340 return null; 341 // pseudo directory, uses METHOD_STORED 342 e = new Entry(inode.name, inode.isdir, METHOD_STORED); 343 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 344 } 345 } finally { 346 endRead(); 347 } 348 return e; 349 } 350 351 void checkAccess(byte[] path) throws IOException { 352 beginRead(); 353 try { 354 ensureOpen(); 355 // is it necessary to readCEN as a sanity check? 356 if (getInode(path) == null) { 357 throw new NoSuchFileException(toString()); 358 } 359 360 } finally { 361 endRead(); 362 } 363 } 364 365 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 366 throws IOException 367 { 368 checkWritable(); 369 beginWrite(); 370 try { 371 ensureOpen(); 372 Entry e = getEntry(path); // ensureOpen checked 373 if (e == null) 374 throw new NoSuchFileException(getString(path)); 375 if (e.type == Entry.CEN) 376 e.type = Entry.COPY; // copy e 377 if (mtime != null) 378 e.mtime = mtime.toMillis(); 379 if (atime != null) 380 e.atime = atime.toMillis(); 381 if (ctime != null) 382 e.ctime = ctime.toMillis(); 383 update(e); 384 } finally { 385 endWrite(); 386 } 387 } 388 389 boolean exists(byte[] path) 390 throws IOException 391 { 392 beginRead(); 393 try { 394 ensureOpen(); 395 return getInode(path) != null; 396 } finally { 397 endRead(); 398 } 399 } 400 401 boolean isDirectory(byte[] path) 402 throws IOException 403 { 404 beginRead(); 405 try { 406 IndexNode n = getInode(path); 407 return n != null && n.isDir(); 408 } finally { 409 endRead(); 410 } 411 } 412 413 // returns the list of child paths of "path" 414 Iterator<Path> iteratorOf(ZipPath dir, 415 DirectoryStream.Filter<? super Path> filter) 416 throws IOException 417 { 418 beginWrite(); // iteration of inodes needs exclusive lock 419 try { 420 ensureOpen(); 421 byte[] path = dir.getResolvedPath(); 422 IndexNode inode = getInode(path); 423 if (inode == null) 424 throw new NotDirectoryException(getString(path)); 425 List<Path> list = new ArrayList<>(); 426 IndexNode child = inode.child; 427 while (child != null) { 428 // (1) Assume each path from the zip file itself is "normalized" 429 // (2) IndexNode.name is absolute. see IndexNode(byte[],int,int) 430 // (3) If parent "dir" is relative when ZipDirectoryStream 431 // is created, the returned child path needs to be relative 432 // as well. 433 byte[] cname = child.name; 434 ZipPath childPath = new ZipPath(this, cname, true); 435 ZipPath childFileName = childPath.getFileName(); 436 ZipPath zpath = dir.resolve(childFileName); 437 if (filter == null || filter.accept(zpath)) 438 list.add(zpath); 439 child = child.sibling; 440 } 441 return list.iterator(); 442 } finally { 443 endWrite(); 444 } 445 } 446 447 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 448 throws IOException 449 { 450 checkWritable(); 451 // dir = toDirectoryPath(dir); 452 beginWrite(); 453 try { 454 ensureOpen(); 455 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 456 throw new FileAlreadyExistsException(getString(dir)); 457 checkParents(dir); 458 Entry e = new Entry(dir, Entry.NEW, true, METHOD_STORED); 459 update(e); 460 } finally { 461 endWrite(); 462 } 463 } 464 465 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 466 throws IOException 467 { 468 checkWritable(); 469 if (Arrays.equals(src, dst)) 470 return; // do nothing, src and dst are the same 471 472 beginWrite(); 473 try { 474 ensureOpen(); 475 Entry eSrc = getEntry(src); // ensureOpen checked 476 477 if (eSrc == null) 478 throw new NoSuchFileException(getString(src)); 479 if (eSrc.isDir()) { // spec says to create dst dir 480 createDirectory(dst); 481 return; 482 } 483 boolean hasReplace = false; 484 boolean hasCopyAttrs = false; 485 for (CopyOption opt : options) { 486 if (opt == REPLACE_EXISTING) 487 hasReplace = true; 488 else if (opt == COPY_ATTRIBUTES) 489 hasCopyAttrs = true; 490 } 491 Entry eDst = getEntry(dst); 492 if (eDst != null) { 493 if (!hasReplace) 494 throw new FileAlreadyExistsException(getString(dst)); 495 } else { 496 checkParents(dst); 497 } 498 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 499 u.name(dst); // change name 500 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 501 { 502 u.type = eSrc.type; // make it the same type 503 if (deletesrc) { // if it's a "rename", take the data 504 u.bytes = eSrc.bytes; 505 u.file = eSrc.file; 506 } else { // if it's not "rename", copy the data 507 if (eSrc.bytes != null) 508 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 509 else if (eSrc.file != null) { 510 u.file = getTempPathForEntry(null); 511 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 512 } 513 } 514 } 515 if (!hasCopyAttrs) 516 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 517 update(u); 518 if (deletesrc) 519 updateDelete(eSrc); 520 } finally { 521 endWrite(); 522 } 523 } 524 525 // Returns an output stream for writing the contents into the specified 526 // entry. 527 OutputStream newOutputStream(byte[] path, OpenOption... options) 528 throws IOException 529 { 530 checkWritable(); 531 boolean hasCreateNew = false; 532 boolean hasCreate = false; 533 boolean hasAppend = false; 534 boolean hasTruncate = false; 535 for (OpenOption opt : options) { 536 if (opt == READ) 537 throw new IllegalArgumentException("READ not allowed"); 538 if (opt == CREATE_NEW) 539 hasCreateNew = true; 540 if (opt == CREATE) 541 hasCreate = true; 542 if (opt == APPEND) 543 hasAppend = true; 544 if (opt == TRUNCATE_EXISTING) 545 hasTruncate = true; 546 } 547 if (hasAppend && hasTruncate) 548 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 549 beginRead(); // only need a readlock, the "update()" will 550 try { // try to obtain a writelock when the os is 551 ensureOpen(); // being closed. 552 Entry e = getEntry(path); 553 if (e != null) { 554 if (e.isDir() || hasCreateNew) 555 throw new FileAlreadyExistsException(getString(path)); 556 if (hasAppend) { 557 InputStream is = getInputStream(e); 558 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 559 is.transferTo(os); 560 is.close(); 561 return os; 562 } 563 return getOutputStream(new Entry(e, Entry.NEW)); 564 } else { 565 if (!hasCreate && !hasCreateNew) 566 throw new NoSuchFileException(getString(path)); 567 checkParents(path); 568 return getOutputStream(new Entry(path, Entry.NEW, false, defaultMethod)); 569 } 570 } finally { 571 endRead(); 572 } 573 } 574 575 // Returns an input stream for reading the contents of the specified 576 // file entry. 577 InputStream newInputStream(byte[] path) throws IOException { 578 beginRead(); 579 try { 580 ensureOpen(); 581 Entry e = getEntry(path); 582 if (e == null) 583 throw new NoSuchFileException(getString(path)); 584 if (e.isDir()) 585 throw new FileSystemException(getString(path), "is a directory", null); 586 return getInputStream(e); 587 } finally { 588 endRead(); 589 } 590 } 591 592 private void checkOptions(Set<? extends OpenOption> options) { 593 // check for options of null type and option is an intance of StandardOpenOption 594 for (OpenOption option : options) { 595 if (option == null) 596 throw new NullPointerException(); 597 if (!(option instanceof StandardOpenOption)) 598 throw new IllegalArgumentException(); 599 } 600 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 601 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 602 } 603 604 // Returns an output SeekableByteChannel for either 605 // (1) writing the contents of a new entry, if the entry doesn't exit, or 606 // (2) updating/replacing the contents of an existing entry. 607 // Note: The content of the channel is not compressed until the 608 // channel is closed 609 private class EntryOutputChannel extends ByteArrayChannel { 610 Entry e; 611 612 EntryOutputChannel(Entry e) throws IOException { 613 super(e.size > 0? (int)e.size : 8192, false); 614 this.e = e; 615 if (e.mtime == -1) 616 e.mtime = System.currentTimeMillis(); 617 if (e.method == -1) 618 e.method = defaultMethod; 619 // store size, compressed size, and crc-32 in datadescriptor 620 e.flag = FLAG_DATADESCR; 621 if (zc.isUTF8()) 622 e.flag |= FLAG_USE_UTF8; 623 } 624 625 @Override 626 public void close() throws IOException { 627 OutputStream os = getOutputStream(e); 628 os.write(toByteArray()); 629 os.close(); // will update the entry 630 super.close(); 631 } 632 } 633 634 private int getCompressMethod(FileAttribute<?>... attrs) { 635 return defaultMethod; 636 } 637 638 // Returns a Writable/ReadByteChannel for now. Might consider to use 639 // newFileChannel() instead, which dump the entry data into a regular 640 // file on the default file system and create a FileChannel on top of 641 // it. 642 SeekableByteChannel newByteChannel(byte[] path, 643 Set<? extends OpenOption> options, 644 FileAttribute<?>... attrs) 645 throws IOException 646 { 647 checkOptions(options); 648 if (options.contains(StandardOpenOption.WRITE) || 649 options.contains(StandardOpenOption.APPEND)) { 650 checkWritable(); 651 beginRead(); // only need a read lock, the "update()" will obtain 652 // the write lock when the channel is closed 653 try { 654 Entry e = getEntry(path); 655 if (e != null) { 656 if (e.isDir() || options.contains(CREATE_NEW)) 657 throw new FileAlreadyExistsException(getString(path)); 658 SeekableByteChannel sbc = 659 new EntryOutputChannel(new Entry(e, Entry.NEW)); 660 if (options.contains(APPEND)) { 661 try (InputStream is = getInputStream(e)) { // copyover 662 byte[] buf = new byte[8192]; 663 ByteBuffer bb = ByteBuffer.wrap(buf); 664 int n; 665 while ((n = is.read(buf)) != -1) { 666 bb.position(0); 667 bb.limit(n); 668 sbc.write(bb); 669 } 670 } 671 } 672 return sbc; 673 } 674 if (!options.contains(CREATE) && !options.contains(CREATE_NEW)) 675 throw new NoSuchFileException(getString(path)); 676 checkParents(path); 677 return new EntryOutputChannel( 678 new Entry(path, Entry.NEW, false, getCompressMethod(attrs))); 679 } finally { 680 endRead(); 681 } 682 } else { 683 beginRead(); 684 try { 685 ensureOpen(); 686 Entry e = getEntry(path); 687 if (e == null || e.isDir()) 688 throw new NoSuchFileException(getString(path)); 689 try (InputStream is = getInputStream(e)) { 690 // TBD: if (e.size < NNNNN); 691 return new ByteArrayChannel(is.readAllBytes(), true); 692 } 693 } finally { 694 endRead(); 695 } 696 } 697 } 698 699 // Returns a FileChannel of the specified entry. 700 // 701 // This implementation creates a temporary file on the default file system, 702 // copy the entry data into it if the entry exists, and then create a 703 // FileChannel on top of it. 704 FileChannel newFileChannel(byte[] path, 705 Set<? extends OpenOption> options, 706 FileAttribute<?>... attrs) 707 throws IOException 708 { 709 checkOptions(options); 710 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 711 options.contains(StandardOpenOption.APPEND)); 712 beginRead(); 713 try { 714 ensureOpen(); 715 Entry e = getEntry(path); 716 if (forWrite) { 717 checkWritable(); 718 if (e == null) { 719 if (!options.contains(StandardOpenOption.CREATE) && 720 !options.contains(StandardOpenOption.CREATE_NEW)) { 721 throw new NoSuchFileException(getString(path)); 722 } 723 } else { 724 if (options.contains(StandardOpenOption.CREATE_NEW)) { 725 throw new FileAlreadyExistsException(getString(path)); 726 } 727 if (e.isDir()) 728 throw new FileAlreadyExistsException("directory <" 729 + getString(path) + "> exists"); 730 } 731 options = new HashSet<>(options); 732 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 733 } else if (e == null || e.isDir()) { 734 throw new NoSuchFileException(getString(path)); 735 } 736 737 final boolean isFCH = (e != null && e.type == Entry.FILECH); 738 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 739 final FileChannel fch = tmpfile.getFileSystem() 740 .provider() 741 .newFileChannel(tmpfile, options, attrs); 742 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH); 743 if (forWrite) { 744 u.flag = FLAG_DATADESCR; 745 u.method = getCompressMethod(); 746 } 747 // is there a better way to hook into the FileChannel's close method? 748 return new FileChannel() { 749 public int write(ByteBuffer src) throws IOException { 750 return fch.write(src); 751 } 752 public long write(ByteBuffer[] srcs, int offset, int length) 753 throws IOException 754 { 755 return fch.write(srcs, offset, length); 756 } 757 public long position() throws IOException { 758 return fch.position(); 759 } 760 public FileChannel position(long newPosition) 761 throws IOException 762 { 763 fch.position(newPosition); 764 return this; 765 } 766 public long size() throws IOException { 767 return fch.size(); 768 } 769 public FileChannel truncate(long size) 770 throws IOException 771 { 772 fch.truncate(size); 773 return this; 774 } 775 public void force(boolean metaData) 776 throws IOException 777 { 778 fch.force(metaData); 779 } 780 public long transferTo(long position, long count, 781 WritableByteChannel target) 782 throws IOException 783 { 784 return fch.transferTo(position, count, target); 785 } 786 public long transferFrom(ReadableByteChannel src, 787 long position, long count) 788 throws IOException 789 { 790 return fch.transferFrom(src, position, count); 791 } 792 public int read(ByteBuffer dst) throws IOException { 793 return fch.read(dst); 794 } 795 public int read(ByteBuffer dst, long position) 796 throws IOException 797 { 798 return fch.read(dst, position); 799 } 800 public long read(ByteBuffer[] dsts, int offset, int length) 801 throws IOException 802 { 803 return fch.read(dsts, offset, length); 804 } 805 public int write(ByteBuffer src, long position) 806 throws IOException 807 { 808 return fch.write(src, position); 809 } 810 public MappedByteBuffer map(MapMode mode, 811 long position, long size) 812 throws IOException 813 { 814 throw new UnsupportedOperationException(); 815 } 816 public FileLock lock(long position, long size, boolean shared) 817 throws IOException 818 { 819 return fch.lock(position, size, shared); 820 } 821 public FileLock tryLock(long position, long size, boolean shared) 822 throws IOException 823 { 824 return fch.tryLock(position, size, shared); 825 } 826 protected void implCloseChannel() throws IOException { 827 fch.close(); 828 if (forWrite) { 829 u.mtime = System.currentTimeMillis(); 830 u.size = Files.size(u.file); 831 832 update(u); 833 } else { 834 if (!isFCH) // if this is a new fch for reading 835 removeTempPathForEntry(tmpfile); 836 } 837 } 838 }; 839 } finally { 840 endRead(); 841 } 842 } 843 844 // the outstanding input streams that need to be closed 845 private Set<InputStream> streams = 846 Collections.synchronizedSet(new HashSet<>()); 847 848 // the ex-channel and ex-path that need to close when their outstanding 849 // input streams are all closed by the obtainers. 850 private Set<ExChannelCloser> exChClosers = new HashSet<>(); 851 852 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 853 private Path getTempPathForEntry(byte[] path) throws IOException { 854 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 855 if (path != null) { 856 Entry e = getEntry(path); 857 if (e != null) { 858 try (InputStream is = newInputStream(path)) { 859 Files.copy(is, tmpPath, REPLACE_EXISTING); 860 } 861 } 862 } 863 return tmpPath; 864 } 865 866 private void removeTempPathForEntry(Path path) throws IOException { 867 Files.delete(path); 868 tmppaths.remove(path); 869 } 870 871 // check if all parents really exit. ZIP spec does not require 872 // the existence of any "parent directory". 873 private void checkParents(byte[] path) throws IOException { 874 beginRead(); 875 try { 876 while ((path = getParent(path)) != null && 877 path != ROOTPATH) { 878 if (!inodes.containsKey(IndexNode.keyOf(path))) { 879 throw new NoSuchFileException(getString(path)); 880 } 881 } 882 } finally { 883 endRead(); 884 } 885 } 886 887 private static byte[] ROOTPATH = new byte[] { '/' }; 888 private static byte[] getParent(byte[] path) { 889 int off = getParentOff(path); 890 if (off <= 1) 891 return ROOTPATH; 892 return Arrays.copyOf(path, off); 893 } 894 895 private static int getParentOff(byte[] path) { 896 int off = path.length - 1; 897 if (off > 0 && path[off] == '/') // isDirectory 898 off--; 899 while (off > 0 && path[off] != '/') { off--; } 900 return off; 901 } 902 903 private final void beginWrite() { 904 rwlock.writeLock().lock(); 905 } 906 907 private final void endWrite() { 908 rwlock.writeLock().unlock(); 909 } 910 911 private final void beginRead() { 912 rwlock.readLock().lock(); 913 } 914 915 private final void endRead() { 916 rwlock.readLock().unlock(); 917 } 918 919 /////////////////////////////////////////////////////////////////// 920 921 private volatile boolean isOpen = true; 922 private final SeekableByteChannel ch; // channel to the zipfile 923 final byte[] cen; // CEN & ENDHDR 924 private END end; 925 private long locpos; // position of first LOC header (usually 0) 926 927 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 928 929 // name -> pos (in cen), IndexNode itself can be used as a "key" 930 private LinkedHashMap<IndexNode, IndexNode> inodes; 931 932 final byte[] getBytes(String name) { 933 return zc.getBytes(name); 934 } 935 936 final String getString(byte[] name) { 937 return zc.toString(name); 938 } 939 940 @SuppressWarnings("deprecation") 941 protected void finalize() throws IOException { 942 close(); 943 } 944 945 // Reads len bytes of data from the specified offset into buf. 946 // Returns the total number of bytes read. 947 // Each/every byte read from here (except the cen, which is mapped). 948 final long readFullyAt(byte[] buf, int off, long len, long pos) 949 throws IOException 950 { 951 ByteBuffer bb = ByteBuffer.wrap(buf); 952 bb.position(off); 953 bb.limit((int)(off + len)); 954 return readFullyAt(bb, pos); 955 } 956 957 private final long readFullyAt(ByteBuffer bb, long pos) 958 throws IOException 959 { 960 synchronized(ch) { 961 return ch.position(pos).read(bb); 962 } 963 } 964 965 // Searches for end of central directory (END) header. The contents of 966 // the END header will be read and placed in endbuf. Returns the file 967 // position of the END header, otherwise returns -1 if the END header 968 // was not found or an error occurred. 969 private END findEND() throws IOException 970 { 971 byte[] buf = new byte[READBLOCKSZ]; 972 long ziplen = ch.size(); 973 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 974 long minPos = minHDR - (buf.length - ENDHDR); 975 976 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 977 { 978 int off = 0; 979 if (pos < 0) { 980 // Pretend there are some NUL bytes before start of file 981 off = (int)-pos; 982 Arrays.fill(buf, 0, off, (byte)0); 983 } 984 int len = buf.length - off; 985 if (readFullyAt(buf, off, len, pos + off) != len) 986 zerror("zip END header not found"); 987 988 // Now scan the block backwards for END header signature 989 for (int i = buf.length - ENDHDR; i >= 0; i--) { 990 if (buf[i+0] == (byte)'P' && 991 buf[i+1] == (byte)'K' && 992 buf[i+2] == (byte)'\005' && 993 buf[i+3] == (byte)'\006' && 994 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 995 // Found END header 996 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 997 END end = new END(); 998 end.endsub = ENDSUB(buf); 999 end.centot = ENDTOT(buf); 1000 end.cenlen = ENDSIZ(buf); 1001 end.cenoff = ENDOFF(buf); 1002 end.comlen = ENDCOM(buf); 1003 end.endpos = pos + i; 1004 // try if there is zip64 end; 1005 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1006 if (end.endpos < ZIP64_LOCHDR || 1007 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1008 != loc64.length || 1009 !locator64SigAt(loc64, 0)) { 1010 return end; 1011 } 1012 long end64pos = ZIP64_LOCOFF(loc64); 1013 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1014 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1015 != end64buf.length || 1016 !end64SigAt(end64buf, 0)) { 1017 return end; 1018 } 1019 // end64 found, 1020 long cenlen64 = ZIP64_ENDSIZ(end64buf); 1021 long cenoff64 = ZIP64_ENDOFF(end64buf); 1022 long centot64 = ZIP64_ENDTOT(end64buf); 1023 // double-check 1024 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1025 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1026 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1027 return end; 1028 } 1029 // to use the end64 values 1030 end.cenlen = cenlen64; 1031 end.cenoff = cenoff64; 1032 end.centot = (int)centot64; // assume total < 2g 1033 end.endpos = end64pos; 1034 return end; 1035 } 1036 } 1037 } 1038 zerror("zip END header not found"); 1039 return null; //make compiler happy 1040 } 1041 1042 // Reads zip file central directory. Returns the file position of first 1043 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1044 // then the error was a zip format error and zip->msg has the error text. 1045 // Always pass in -1 for knownTotal; it's used for a recursive call. 1046 private byte[] initCEN() throws IOException { 1047 end = findEND(); 1048 if (end.endpos == 0) { 1049 inodes = new LinkedHashMap<>(10); 1050 locpos = 0; 1051 buildNodeTree(); 1052 return null; // only END header present 1053 } 1054 if (end.cenlen > end.endpos) 1055 zerror("invalid END header (bad central directory size)"); 1056 long cenpos = end.endpos - end.cenlen; // position of CEN table 1057 1058 // Get position of first local file (LOC) header, taking into 1059 // account that there may be a stub prefixed to the zip file. 1060 locpos = cenpos - end.cenoff; 1061 if (locpos < 0) 1062 zerror("invalid END header (bad central directory offset)"); 1063 1064 // read in the CEN and END 1065 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1066 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1067 zerror("read CEN tables failed"); 1068 } 1069 // Iterate through the entries in the central directory 1070 inodes = new LinkedHashMap<>(end.centot + 1); 1071 int pos = 0; 1072 int limit = cen.length - ENDHDR; 1073 while (pos < limit) { 1074 if (!cenSigAt(cen, pos)) 1075 zerror("invalid CEN header (bad signature)"); 1076 int method = CENHOW(cen, pos); 1077 int nlen = CENNAM(cen, pos); 1078 int elen = CENEXT(cen, pos); 1079 int clen = CENCOM(cen, pos); 1080 if ((CENFLG(cen, pos) & 1) != 0) { 1081 zerror("invalid CEN header (encrypted entry)"); 1082 } 1083 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1084 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1085 } 1086 if (pos + CENHDR + nlen > limit) { 1087 zerror("invalid CEN header (bad header size)"); 1088 } 1089 IndexNode inode = new IndexNode(cen, pos, nlen); 1090 inodes.put(inode, inode); 1091 1092 // skip ext and comment 1093 pos += (CENHDR + nlen + elen + clen); 1094 } 1095 if (pos + ENDHDR != cen.length) { 1096 zerror("invalid CEN header (bad header size)"); 1097 } 1098 buildNodeTree(); 1099 return cen; 1100 } 1101 1102 private void ensureOpen() throws IOException { 1103 if (!isOpen) 1104 throw new ClosedFileSystemException(); 1105 } 1106 1107 // Creates a new empty temporary file in the same directory as the 1108 // specified file. A variant of Files.createTempFile. 1109 private Path createTempFileInSameDirectoryAs(Path path) 1110 throws IOException 1111 { 1112 Path parent = path.toAbsolutePath().getParent(); 1113 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1114 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1115 tmppaths.add(tmpPath); 1116 return tmpPath; 1117 } 1118 1119 ////////////////////update & sync ////////////////////////////////////// 1120 1121 private boolean hasUpdate = false; 1122 1123 // shared key. consumer guarantees the "writeLock" before use it. 1124 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1125 1126 private void updateDelete(IndexNode inode) { 1127 beginWrite(); 1128 try { 1129 removeFromTree(inode); 1130 inodes.remove(inode); 1131 hasUpdate = true; 1132 } finally { 1133 endWrite(); 1134 } 1135 } 1136 1137 private void update(Entry e) { 1138 beginWrite(); 1139 try { 1140 IndexNode old = inodes.put(e, e); 1141 if (old != null) { 1142 removeFromTree(old); 1143 } 1144 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1145 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1146 e.sibling = parent.child; 1147 parent.child = e; 1148 } 1149 hasUpdate = true; 1150 } finally { 1151 endWrite(); 1152 } 1153 } 1154 1155 // copy over the whole LOC entry (header if necessary, data and ext) from 1156 // old zip to the new one. 1157 private long copyLOCEntry(Entry e, boolean updateHeader, 1158 OutputStream os, 1159 long written, byte[] buf) 1160 throws IOException 1161 { 1162 long locoff = e.locoff; // where to read 1163 e.locoff = written; // update the e.locoff with new value 1164 1165 // calculate the size need to write out 1166 long size = 0; 1167 // if there is A ext 1168 if ((e.flag & FLAG_DATADESCR) != 0) { 1169 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1170 size = 24; 1171 else 1172 size = 16; 1173 } 1174 // read loc, use the original loc.elen/nlen 1175 // 1176 // an extra byte after loc is read, which should be the first byte of the 1177 // 'name' field of the loc. if this byte is '/', which means the original 1178 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1179 // is used to output the loc, in which the leading "/" will be removed 1180 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1181 throw new ZipException("loc: reading failed"); 1182 1183 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1184 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1185 size += e.csize; 1186 written = e.writeLOC(os) + size; 1187 } else { 1188 os.write(buf, 0, LOCHDR); // write out the loc header 1189 locoff += LOCHDR; 1190 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1191 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1192 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1193 written = LOCHDR + size; 1194 } 1195 int n; 1196 while (size > 0 && 1197 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1198 { 1199 if (size < n) 1200 n = (int)size; 1201 os.write(buf, 0, n); 1202 size -= n; 1203 locoff += n; 1204 } 1205 return written; 1206 } 1207 1208 private long writeEntry(Entry e, OutputStream os) 1209 throws IOException { 1210 1211 if (e.bytes == null && e.file == null) // dir, 0-length data 1212 return 0; 1213 1214 long written = 0; 1215 if (e.crc != 0 && e.csize > 0) { 1216 // pre-compressed entry, write directly to output stream 1217 writeTo(e, os); 1218 } else { 1219 try (OutputStream os2 = (e.method == METHOD_STORED) ? 1220 new EntryOutputStreamCRC32(e, os) : new EntryOutputStreamDef(e, os)) { 1221 writeTo(e, os2); 1222 } 1223 } 1224 written += e.csize; 1225 if ((e.flag & FLAG_DATADESCR) != 0) { 1226 written += e.writeEXT(os); 1227 } 1228 return written; 1229 } 1230 1231 private void writeTo(Entry e, OutputStream os) throws IOException { 1232 if (e.bytes != null) { 1233 os.write(e.bytes, 0, e.bytes.length); 1234 } else if (e.file != null) { 1235 if (e.type == Entry.NEW || e.type == Entry.FILECH) { 1236 try (InputStream is = Files.newInputStream(e.file)) { 1237 is.transferTo(os); 1238 } 1239 } 1240 Files.delete(e.file); 1241 tmppaths.remove(e.file); 1242 } 1243 } 1244 1245 // sync the zip file system, if there is any udpate 1246 private void sync() throws IOException { 1247 // check ex-closer 1248 if (!exChClosers.isEmpty()) { 1249 for (ExChannelCloser ecc : exChClosers) { 1250 if (ecc.streams.isEmpty()) { 1251 ecc.ch.close(); 1252 Files.delete(ecc.path); 1253 exChClosers.remove(ecc); 1254 } 1255 } 1256 } 1257 if (!hasUpdate) 1258 return; 1259 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1260 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) { 1261 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1262 long written = 0; 1263 byte[] buf = null; 1264 Entry e; 1265 1266 // write loc 1267 for (IndexNode inode : inodes.values()) { 1268 if (inode instanceof Entry) { // an updated inode 1269 e = (Entry)inode; 1270 try { 1271 if (e.type == Entry.COPY) { 1272 // entry copy: the only thing changed is the "name" 1273 // and "nlen" in LOC header, so we udpate/rewrite the 1274 // LOC in new file and simply copy the rest (data and 1275 // ext) without enflating/deflating from the old zip 1276 // file LOC entry. 1277 if (buf == null) 1278 buf = new byte[8192]; 1279 written += copyLOCEntry(e, true, os, written, buf); 1280 } else { // NEW, FILECH or CEN 1281 e.locoff = written; 1282 written += e.writeLOC(os); // write loc header 1283 written += writeEntry(e, os); 1284 } 1285 elist.add(e); 1286 } catch (IOException x) { 1287 x.printStackTrace(); // skip any in-accurate entry 1288 } 1289 } else { // unchanged inode 1290 if (inode.pos == -1) { 1291 continue; // pseudo directory node 1292 } 1293 if (inode.name.length == 1 && inode.name[0] == '/') { 1294 continue; // no root '/' directory even it 1295 // exits in original zip/jar file. 1296 } 1297 e = Entry.readCEN(this, inode); 1298 try { 1299 if (buf == null) 1300 buf = new byte[8192]; 1301 written += copyLOCEntry(e, false, os, written, buf); 1302 elist.add(e); 1303 } catch (IOException x) { 1304 x.printStackTrace(); // skip any wrong entry 1305 } 1306 } 1307 } 1308 1309 // now write back the cen and end table 1310 end.cenoff = written; 1311 for (Entry entry : elist) { 1312 written += entry.writeCEN(os); 1313 } 1314 end.centot = elist.size(); 1315 end.cenlen = written - end.cenoff; 1316 end.write(os, written, forceEnd64); 1317 } 1318 if (!streams.isEmpty()) { 1319 // 1320 // There are outstanding input streams open on existing "ch", 1321 // so, don't close the "cha" and delete the "file for now, let 1322 // the "ex-channel-closer" to handle them 1323 ExChannelCloser ecc = new ExChannelCloser( 1324 createTempFileInSameDirectoryAs(zfpath), 1325 ch, 1326 streams); 1327 Files.move(zfpath, ecc.path, REPLACE_EXISTING); 1328 exChClosers.add(ecc); 1329 streams = Collections.synchronizedSet(new HashSet<InputStream>()); 1330 } else { 1331 ch.close(); 1332 Files.delete(zfpath); 1333 } 1334 1335 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1336 hasUpdate = false; // clear 1337 } 1338 1339 IndexNode getInode(byte[] path) { 1340 if (path == null) 1341 throw new NullPointerException("path"); 1342 return inodes.get(IndexNode.keyOf(path)); 1343 } 1344 1345 Entry getEntry(byte[] path) throws IOException { 1346 IndexNode inode = getInode(path); 1347 if (inode instanceof Entry) 1348 return (Entry)inode; 1349 if (inode == null || inode.pos == -1) 1350 return null; 1351 return Entry.readCEN(this, inode); 1352 } 1353 1354 public void deleteFile(byte[] path, boolean failIfNotExists) 1355 throws IOException 1356 { 1357 checkWritable(); 1358 1359 IndexNode inode = getInode(path); 1360 if (inode == null) { 1361 if (path != null && path.length == 0) 1362 throw new ZipException("root directory </> can't not be delete"); 1363 if (failIfNotExists) 1364 throw new NoSuchFileException(getString(path)); 1365 } else { 1366 if (inode.isDir() && inode.child != null) 1367 throw new DirectoryNotEmptyException(getString(path)); 1368 updateDelete(inode); 1369 } 1370 } 1371 1372 // Returns an out stream for either 1373 // (1) writing the contents of a new entry, if the entry exits, or 1374 // (2) updating/replacing the contents of the specified existing entry. 1375 private OutputStream getOutputStream(Entry e) throws IOException { 1376 1377 if (e.mtime == -1) 1378 e.mtime = System.currentTimeMillis(); 1379 if (e.method == -1) 1380 e.method = defaultMethod; 1381 // store size, compressed size, and crc-32 in datadescr 1382 e.flag = FLAG_DATADESCR; 1383 if (zc.isUTF8()) 1384 e.flag |= FLAG_USE_UTF8; 1385 OutputStream os; 1386 if (useTempFile) { 1387 e.file = getTempPathForEntry(null); 1388 os = Files.newOutputStream(e.file, WRITE); 1389 } else { 1390 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1391 } 1392 if (e.method == METHOD_DEFLATED) { 1393 return new DeflatingEntryOutputStream(e, os); 1394 } else { 1395 return new EntryOutputStream(e, os); 1396 } 1397 } 1398 1399 private class EntryOutputStream extends FilterOutputStream { 1400 private final Entry e; 1401 private long written; 1402 private boolean isClosed; 1403 1404 EntryOutputStream(Entry e, OutputStream os) throws IOException { 1405 super(os); 1406 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1407 // this.written = 0; 1408 } 1409 1410 @Override 1411 public synchronized void write(int b) throws IOException { 1412 out.write(b); 1413 written += 1; 1414 } 1415 1416 @Override 1417 public synchronized void write(byte b[], int off, int len) 1418 throws IOException { 1419 out.write(b, off, len); 1420 written += len; 1421 } 1422 1423 @Override 1424 public synchronized void close() throws IOException { 1425 if (isClosed) { 1426 return; 1427 } 1428 isClosed = true; 1429 e.size = written; 1430 if (out instanceof ByteArrayOutputStream) 1431 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1432 super.close(); 1433 update(e); 1434 } 1435 } 1436 1437 // Output stream returned when writing "deflated" entries into memory, 1438 // to enable eager (possibly parallel) deflation and reduce memory required. 1439 private class DeflatingEntryOutputStream extends DeflaterOutputStream { 1440 private final CRC32 crc; 1441 private final Entry e; 1442 private boolean isClosed; 1443 1444 DeflatingEntryOutputStream(Entry e, OutputStream os) throws IOException { 1445 super(os, getDeflater()); 1446 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1447 this.crc = new CRC32(); 1448 } 1449 1450 @Override 1451 public synchronized void write(int b) throws IOException { 1452 super.write(b); 1453 crc.update(b); 1454 } 1455 1456 @Override 1457 public synchronized void write(byte b[], int off, int len) 1458 throws IOException { 1459 super.write(b, off, len); 1460 crc.update(b, off, len); 1461 } 1462 1463 @Override 1464 public synchronized void close() throws IOException { 1465 if (isClosed) 1466 return; 1467 isClosed = true; 1468 finish(); 1469 e.size = def.getBytesRead(); 1470 e.csize = def.getBytesWritten(); 1471 e.crc = crc.getValue(); 1472 if (out instanceof ByteArrayOutputStream) 1473 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1474 super.close(); 1475 update(e); 1476 releaseDeflater(def); 1477 } 1478 } 1479 1480 // Wrapper output stream class to write out a "stored" entry. 1481 // (1) this class does not close the underlying out stream when 1482 // being closed. 1483 // (2) no need to be "synchronized", only used by sync() 1484 private class EntryOutputStreamCRC32 extends FilterOutputStream { 1485 private final CRC32 crc; 1486 private final Entry e; 1487 private long written; 1488 private boolean isClosed; 1489 1490 EntryOutputStreamCRC32(Entry e, OutputStream os) throws IOException { 1491 super(os); 1492 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1493 this.crc = new CRC32(); 1494 } 1495 1496 @Override 1497 public void write(int b) throws IOException { 1498 out.write(b); 1499 crc.update(b); 1500 written += 1; 1501 } 1502 1503 @Override 1504 public void write(byte b[], int off, int len) 1505 throws IOException { 1506 out.write(b, off, len); 1507 crc.update(b, off, len); 1508 written += len; 1509 } 1510 1511 @Override 1512 public void close() throws IOException { 1513 if (isClosed) 1514 return; 1515 isClosed = true; 1516 e.size = e.csize = written; 1517 e.crc = crc.getValue(); 1518 } 1519 } 1520 1521 // Wrapper output stream class to write out a "deflated" entry. 1522 // (1) this class does not close the underlying out stream when 1523 // being closed. 1524 // (2) no need to be "synchronized", only used by sync() 1525 private class EntryOutputStreamDef extends DeflaterOutputStream { 1526 private final CRC32 crc; 1527 private final Entry e; 1528 private boolean isClosed; 1529 1530 EntryOutputStreamDef(Entry e, OutputStream os) throws IOException { 1531 super(os, getDeflater()); 1532 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1533 this.crc = new CRC32(); 1534 } 1535 1536 @Override 1537 public void write(byte b[], int off, int len) 1538 throws IOException { 1539 super.write(b, off, len); 1540 crc.update(b, off, len); 1541 } 1542 1543 @Override 1544 public void close() throws IOException { 1545 if (isClosed) 1546 return; 1547 isClosed = true; 1548 finish(); 1549 e.size = def.getBytesRead(); 1550 e.csize = def.getBytesWritten(); 1551 e.crc = crc.getValue(); 1552 releaseDeflater(def); 1553 } 1554 } 1555 1556 private InputStream getInputStream(Entry e) 1557 throws IOException 1558 { 1559 InputStream eis; 1560 if (e.type == Entry.NEW) { 1561 if (e.bytes != null) 1562 eis = new ByteArrayInputStream(e.bytes); 1563 else if (e.file != null) 1564 eis = Files.newInputStream(e.file); 1565 else 1566 throw new ZipException("update entry data is missing"); 1567 } else if (e.type == Entry.FILECH) { 1568 // FILECH result is un-compressed. 1569 eis = Files.newInputStream(e.file); 1570 // TBD: wrap to hook close() 1571 // streams.add(eis); 1572 return eis; 1573 } else { // untouched CEN or COPY 1574 eis = new EntryInputStream(e, ch); 1575 } 1576 if (e.method == METHOD_DEFLATED) { 1577 // MORE: Compute good size for inflater stream: 1578 long bufSize = e.size + 2; // Inflater likes a bit of slack 1579 if (bufSize > 65536) 1580 bufSize = 8192; 1581 final long size = e.size; 1582 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1583 private boolean isClosed = false; 1584 public void close() throws IOException { 1585 if (!isClosed) { 1586 releaseInflater(inf); 1587 this.in.close(); 1588 isClosed = true; 1589 streams.remove(this); 1590 } 1591 } 1592 // Override fill() method to provide an extra "dummy" byte 1593 // at the end of the input stream. This is required when 1594 // using the "nowrap" Inflater option. (it appears the new 1595 // zlib in 7 does not need it, but keep it for now) 1596 protected void fill() throws IOException { 1597 if (eof) { 1598 throw new EOFException( 1599 "Unexpected end of ZLIB input stream"); 1600 } 1601 len = this.in.read(buf, 0, buf.length); 1602 if (len == -1) { 1603 buf[0] = 0; 1604 len = 1; 1605 eof = true; 1606 } 1607 inf.setInput(buf, 0, len); 1608 } 1609 private boolean eof; 1610 1611 public int available() throws IOException { 1612 if (isClosed) 1613 return 0; 1614 long avail = size - inf.getBytesWritten(); 1615 return avail > (long) Integer.MAX_VALUE ? 1616 Integer.MAX_VALUE : (int) avail; 1617 } 1618 }; 1619 } else if (e.method == METHOD_STORED) { 1620 // TBD: wrap/ it does not seem necessary 1621 } else { 1622 throw new ZipException("invalid compression method"); 1623 } 1624 streams.add(eis); 1625 return eis; 1626 } 1627 1628 // Inner class implementing the input stream used to read 1629 // a (possibly compressed) zip file entry. 1630 private class EntryInputStream extends InputStream { 1631 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1632 // point to a new channel after sync() 1633 private long pos; // current position within entry data 1634 protected long rem; // number of remaining bytes within entry 1635 1636 EntryInputStream(Entry e, SeekableByteChannel zfch) 1637 throws IOException 1638 { 1639 this.zfch = zfch; 1640 rem = e.csize; 1641 pos = e.locoff; 1642 if (pos == -1) { 1643 Entry e2 = getEntry(e.name); 1644 if (e2 == null) { 1645 throw new ZipException("invalid loc for entry <" + e.name + ">"); 1646 } 1647 pos = e2.locoff; 1648 } 1649 pos = -pos; // lazy initialize the real data offset 1650 } 1651 1652 public int read(byte b[], int off, int len) throws IOException { 1653 ensureOpen(); 1654 initDataPos(); 1655 if (rem == 0) { 1656 return -1; 1657 } 1658 if (len <= 0) { 1659 return 0; 1660 } 1661 if (len > rem) { 1662 len = (int) rem; 1663 } 1664 // readFullyAt() 1665 long n; 1666 ByteBuffer bb = ByteBuffer.wrap(b); 1667 bb.position(off); 1668 bb.limit(off + len); 1669 synchronized(zfch) { 1670 n = zfch.position(pos).read(bb); 1671 } 1672 if (n > 0) { 1673 pos += n; 1674 rem -= n; 1675 } 1676 if (rem == 0) { 1677 close(); 1678 } 1679 return (int)n; 1680 } 1681 1682 public int read() throws IOException { 1683 byte[] b = new byte[1]; 1684 if (read(b, 0, 1) == 1) { 1685 return b[0] & 0xff; 1686 } else { 1687 return -1; 1688 } 1689 } 1690 1691 public long skip(long n) throws IOException { 1692 ensureOpen(); 1693 if (n > rem) 1694 n = rem; 1695 pos += n; 1696 rem -= n; 1697 if (rem == 0) { 1698 close(); 1699 } 1700 return n; 1701 } 1702 1703 public int available() { 1704 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1705 } 1706 1707 public void close() { 1708 rem = 0; 1709 streams.remove(this); 1710 } 1711 1712 private void initDataPos() throws IOException { 1713 if (pos <= 0) { 1714 pos = -pos + locpos; 1715 byte[] buf = new byte[LOCHDR]; 1716 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 1717 throw new ZipException("invalid loc " + pos + " for entry reading"); 1718 } 1719 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 1720 } 1721 } 1722 } 1723 1724 static void zerror(String msg) throws ZipException { 1725 throw new ZipException(msg); 1726 } 1727 1728 // Maxmum number of de/inflater we cache 1729 private final int MAX_FLATER = 20; 1730 // List of available Inflater objects for decompression 1731 private final List<Inflater> inflaters = new ArrayList<>(); 1732 1733 // Gets an inflater from the list of available inflaters or allocates 1734 // a new one. 1735 private Inflater getInflater() { 1736 synchronized (inflaters) { 1737 int size = inflaters.size(); 1738 if (size > 0) { 1739 Inflater inf = inflaters.remove(size - 1); 1740 return inf; 1741 } else { 1742 return new Inflater(true); 1743 } 1744 } 1745 } 1746 1747 // Releases the specified inflater to the list of available inflaters. 1748 private void releaseInflater(Inflater inf) { 1749 synchronized (inflaters) { 1750 if (inflaters.size() < MAX_FLATER) { 1751 inf.reset(); 1752 inflaters.add(inf); 1753 } else { 1754 inf.end(); 1755 } 1756 } 1757 } 1758 1759 // List of available Deflater objects for compression 1760 private final List<Deflater> deflaters = new ArrayList<>(); 1761 1762 // Gets a deflater from the list of available deflaters or allocates 1763 // a new one. 1764 private Deflater getDeflater() { 1765 synchronized (deflaters) { 1766 int size = deflaters.size(); 1767 if (size > 0) { 1768 Deflater def = deflaters.remove(size - 1); 1769 return def; 1770 } else { 1771 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1772 } 1773 } 1774 } 1775 1776 // Releases the specified inflater to the list of available inflaters. 1777 private void releaseDeflater(Deflater def) { 1778 synchronized (deflaters) { 1779 if (inflaters.size() < MAX_FLATER) { 1780 def.reset(); 1781 deflaters.add(def); 1782 } else { 1783 def.end(); 1784 } 1785 } 1786 } 1787 1788 // End of central directory record 1789 static class END { 1790 // these 2 fields are not used by anyone and write() uses "0" 1791 // int disknum; 1792 // int sdisknum; 1793 int endsub; // endsub 1794 int centot; // 4 bytes 1795 long cenlen; // 4 bytes 1796 long cenoff; // 4 bytes 1797 int comlen; // comment length 1798 byte[] comment; 1799 1800 /* members of Zip64 end of central directory locator */ 1801 // int diskNum; 1802 long endpos; 1803 // int disktot; 1804 1805 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 1806 boolean hasZip64 = forceEnd64; // false; 1807 long xlen = cenlen; 1808 long xoff = cenoff; 1809 if (xlen >= ZIP64_MINVAL) { 1810 xlen = ZIP64_MINVAL; 1811 hasZip64 = true; 1812 } 1813 if (xoff >= ZIP64_MINVAL) { 1814 xoff = ZIP64_MINVAL; 1815 hasZip64 = true; 1816 } 1817 int count = centot; 1818 if (count >= ZIP64_MINVAL32) { 1819 count = ZIP64_MINVAL32; 1820 hasZip64 = true; 1821 } 1822 if (hasZip64) { 1823 long off64 = offset; 1824 //zip64 end of central directory record 1825 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1826 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1827 writeShort(os, 45); // version made by 1828 writeShort(os, 45); // version needed to extract 1829 writeInt(os, 0); // number of this disk 1830 writeInt(os, 0); // central directory start disk 1831 writeLong(os, centot); // number of directory entries on disk 1832 writeLong(os, centot); // number of directory entries 1833 writeLong(os, cenlen); // length of central directory 1834 writeLong(os, cenoff); // offset of central directory 1835 1836 //zip64 end of central directory locator 1837 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1838 writeInt(os, 0); // zip64 END start disk 1839 writeLong(os, off64); // offset of zip64 END 1840 writeInt(os, 1); // total number of disks (?) 1841 } 1842 writeInt(os, ENDSIG); // END record signature 1843 writeShort(os, 0); // number of this disk 1844 writeShort(os, 0); // central directory start disk 1845 writeShort(os, count); // number of directory entries on disk 1846 writeShort(os, count); // total number of directory entries 1847 writeInt(os, xlen); // length of central directory 1848 writeInt(os, xoff); // offset of central directory 1849 if (comment != null) { // zip file comment 1850 writeShort(os, comment.length); 1851 writeBytes(os, comment); 1852 } else { 1853 writeShort(os, 0); 1854 } 1855 } 1856 } 1857 1858 // Internal node that links a "name" to its pos in cen table. 1859 // The node itself can be used as a "key" to lookup itself in 1860 // the HashMap inodes. 1861 static class IndexNode { 1862 byte[] name; 1863 int hashcode; // node is hashable/hashed by its name 1864 int pos = -1; // position in cen table, -1 menas the 1865 // entry does not exists in zip file 1866 boolean isdir; 1867 1868 IndexNode(byte[] name, boolean isdir) { 1869 name(name); 1870 this.isdir = isdir; 1871 this.pos = -1; 1872 } 1873 1874 IndexNode(byte[] name, int pos) { 1875 name(name); 1876 this.pos = pos; 1877 } 1878 1879 // constructor for cenInit() (1) remove tailing '/' (2) pad leading '/' 1880 IndexNode(byte[] cen, int pos, int nlen) { 1881 int noff = pos + CENHDR; 1882 if (cen[noff + nlen - 1] == '/') { 1883 isdir = true; 1884 nlen--; 1885 } 1886 if (nlen > 0 && cen[noff] == '/') { 1887 name = Arrays.copyOfRange(cen, noff, noff + nlen); 1888 } else { 1889 name = new byte[nlen + 1]; 1890 System.arraycopy(cen, noff, name, 1, nlen); 1891 name[0] = '/'; 1892 } 1893 name(name); 1894 this.pos = pos; 1895 } 1896 1897 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 1898 1899 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1900 IndexNode key = cachedKey.get(); 1901 if (key == null) { 1902 key = new IndexNode(name, -1); 1903 cachedKey.set(key); 1904 } 1905 return key.as(name); 1906 } 1907 1908 final void name(byte[] name) { 1909 this.name = name; 1910 this.hashcode = Arrays.hashCode(name); 1911 } 1912 1913 final IndexNode as(byte[] name) { // reuse the node, mostly 1914 name(name); // as a lookup "key" 1915 return this; 1916 } 1917 1918 boolean isDir() { 1919 return isdir; 1920 } 1921 1922 public boolean equals(Object other) { 1923 if (!(other instanceof IndexNode)) { 1924 return false; 1925 } 1926 if (other instanceof ParentLookup) { 1927 return ((ParentLookup)other).equals(this); 1928 } 1929 return Arrays.equals(name, ((IndexNode)other).name); 1930 } 1931 1932 public int hashCode() { 1933 return hashcode; 1934 } 1935 1936 IndexNode() {} 1937 IndexNode sibling; 1938 IndexNode child; // 1st child 1939 } 1940 1941 static class Entry extends IndexNode implements ZipFileAttributes { 1942 1943 static final int CEN = 1; // entry read from cen 1944 static final int NEW = 2; // updated contents in bytes or file 1945 static final int FILECH = 3; // fch update in "file" 1946 static final int COPY = 4; // copy of a CEN entry 1947 1948 byte[] bytes; // updated content bytes 1949 Path file; // use tmp file to store bytes; 1950 int type = CEN; // default is the entry read from cen 1951 1952 // entry attributes 1953 int version; 1954 int flag; 1955 int method = -1; // compression method 1956 long mtime = -1; // last modification time (in DOS time) 1957 long atime = -1; // last access time 1958 long ctime = -1; // create time 1959 long crc = -1; // crc-32 of entry data 1960 long csize = -1; // compressed size of entry data 1961 long size = -1; // uncompressed size of entry data 1962 byte[] extra; 1963 1964 // cen 1965 1966 // these fields are not used by anyone and writeCEN uses "0" 1967 // int versionMade; 1968 // int disk; 1969 // int attrs; 1970 // long attrsEx; 1971 long locoff; 1972 byte[] comment; 1973 1974 Entry() {} 1975 1976 Entry(byte[] name, boolean isdir, int method) { 1977 name(name); 1978 this.isdir = isdir; 1979 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 1980 this.crc = 0; 1981 this.size = 0; 1982 this.csize = 0; 1983 this.method = method; 1984 } 1985 1986 Entry(byte[] name, int type, boolean isdir, int method) { 1987 this(name, isdir, method); 1988 this.type = type; 1989 } 1990 1991 Entry(Entry e, int type) { 1992 name(e.name); 1993 this.isdir = e.isdir; 1994 this.version = e.version; 1995 this.ctime = e.ctime; 1996 this.atime = e.atime; 1997 this.mtime = e.mtime; 1998 this.crc = e.crc; 1999 this.size = e.size; 2000 this.csize = e.csize; 2001 this.method = e.method; 2002 this.extra = e.extra; 2003 /* 2004 this.versionMade = e.versionMade; 2005 this.disk = e.disk; 2006 this.attrs = e.attrs; 2007 this.attrsEx = e.attrsEx; 2008 */ 2009 this.locoff = e.locoff; 2010 this.comment = e.comment; 2011 this.type = type; 2012 } 2013 2014 Entry(byte[] name, Path file, int type) { 2015 this(name, type, false, METHOD_STORED); 2016 this.file = file; 2017 } 2018 2019 int version() throws ZipException { 2020 if (method == METHOD_DEFLATED) 2021 return 20; 2022 else if (method == METHOD_STORED) 2023 return 10; 2024 throw new ZipException("unsupported compression method"); 2025 } 2026 2027 ///////////////////// CEN ////////////////////// 2028 static Entry readCEN(ZipFileSystem zipfs, IndexNode inode) 2029 throws IOException 2030 { 2031 return new Entry().cen(zipfs, inode); 2032 } 2033 2034 private Entry cen(ZipFileSystem zipfs, IndexNode inode) 2035 throws IOException 2036 { 2037 byte[] cen = zipfs.cen; 2038 int pos = inode.pos; 2039 if (!cenSigAt(cen, pos)) 2040 zerror("invalid CEN header (bad signature)"); 2041 version = CENVER(cen, pos); 2042 flag = CENFLG(cen, pos); 2043 method = CENHOW(cen, pos); 2044 mtime = dosToJavaTime(CENTIM(cen, pos)); 2045 crc = CENCRC(cen, pos); 2046 csize = CENSIZ(cen, pos); 2047 size = CENLEN(cen, pos); 2048 int nlen = CENNAM(cen, pos); 2049 int elen = CENEXT(cen, pos); 2050 int clen = CENCOM(cen, pos); 2051 /* 2052 versionMade = CENVEM(cen, pos); 2053 disk = CENDSK(cen, pos); 2054 attrs = CENATT(cen, pos); 2055 attrsEx = CENATX(cen, pos); 2056 */ 2057 locoff = CENOFF(cen, pos); 2058 pos += CENHDR; 2059 this.name = inode.name; 2060 this.isdir = inode.isdir; 2061 this.hashcode = inode.hashcode; 2062 2063 pos += nlen; 2064 if (elen > 0) { 2065 extra = Arrays.copyOfRange(cen, pos, pos + elen); 2066 pos += elen; 2067 readExtra(zipfs); 2068 } 2069 if (clen > 0) { 2070 comment = Arrays.copyOfRange(cen, pos, pos + clen); 2071 } 2072 return this; 2073 } 2074 2075 int writeCEN(OutputStream os) throws IOException { 2076 int version0 = version(); 2077 long csize0 = csize; 2078 long size0 = size; 2079 long locoff0 = locoff; 2080 int elen64 = 0; // extra for ZIP64 2081 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 2082 int elenEXTT = 0; // extra for Extended Timestamp 2083 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 2084 2085 byte[] zname = isdir ? toDirectoryPath(name) : name; 2086 2087 // confirm size/length 2088 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 2089 int elen = (extra != null) ? extra.length : 0; 2090 int eoff = 0; 2091 int clen = (comment != null) ? comment.length : 0; 2092 if (csize >= ZIP64_MINVAL) { 2093 csize0 = ZIP64_MINVAL; 2094 elen64 += 8; // csize(8) 2095 } 2096 if (size >= ZIP64_MINVAL) { 2097 size0 = ZIP64_MINVAL; // size(8) 2098 elen64 += 8; 2099 } 2100 if (locoff >= ZIP64_MINVAL) { 2101 locoff0 = ZIP64_MINVAL; 2102 elen64 += 8; // offset(8) 2103 } 2104 if (elen64 != 0) { 2105 elen64 += 4; // header and data sz 4 bytes 2106 } 2107 while (eoff + 4 < elen) { 2108 int tag = SH(extra, eoff); 2109 int sz = SH(extra, eoff + 2); 2110 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2111 foundExtraTime = true; 2112 } 2113 eoff += (4 + sz); 2114 } 2115 if (!foundExtraTime) { 2116 if (isWindows) { // use NTFS 2117 elenNTFS = 36; // total 36 bytes 2118 } else { // Extended Timestamp otherwise 2119 elenEXTT = 9; // only mtime in cen 2120 } 2121 } 2122 writeInt(os, CENSIG); // CEN header signature 2123 if (elen64 != 0) { 2124 writeShort(os, 45); // ver 4.5 for zip64 2125 writeShort(os, 45); 2126 } else { 2127 writeShort(os, version0); // version made by 2128 writeShort(os, version0); // version needed to extract 2129 } 2130 writeShort(os, flag); // general purpose bit flag 2131 writeShort(os, method); // compression method 2132 // last modification time 2133 writeInt(os, (int)javaToDosTime(mtime)); 2134 writeInt(os, crc); // crc-32 2135 writeInt(os, csize0); // compressed size 2136 writeInt(os, size0); // uncompressed size 2137 writeShort(os, nlen); 2138 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2139 2140 if (comment != null) { 2141 writeShort(os, Math.min(clen, 0xffff)); 2142 } else { 2143 writeShort(os, 0); 2144 } 2145 writeShort(os, 0); // starting disk number 2146 writeShort(os, 0); // internal file attributes (unused) 2147 writeInt(os, 0); // external file attributes (unused) 2148 writeInt(os, locoff0); // relative offset of local header 2149 writeBytes(os, zname, 1, nlen); 2150 if (elen64 != 0) { 2151 writeShort(os, EXTID_ZIP64);// Zip64 extra 2152 writeShort(os, elen64 - 4); // size of "this" extra block 2153 if (size0 == ZIP64_MINVAL) 2154 writeLong(os, size); 2155 if (csize0 == ZIP64_MINVAL) 2156 writeLong(os, csize); 2157 if (locoff0 == ZIP64_MINVAL) 2158 writeLong(os, locoff); 2159 } 2160 if (elenNTFS != 0) { 2161 writeShort(os, EXTID_NTFS); 2162 writeShort(os, elenNTFS - 4); 2163 writeInt(os, 0); // reserved 2164 writeShort(os, 0x0001); // NTFS attr tag 2165 writeShort(os, 24); 2166 writeLong(os, javaToWinTime(mtime)); 2167 writeLong(os, javaToWinTime(atime)); 2168 writeLong(os, javaToWinTime(ctime)); 2169 } 2170 if (elenEXTT != 0) { 2171 writeShort(os, EXTID_EXTT); 2172 writeShort(os, elenEXTT - 4); 2173 if (ctime == -1) 2174 os.write(0x3); // mtime and atime 2175 else 2176 os.write(0x7); // mtime, atime and ctime 2177 writeInt(os, javaToUnixTime(mtime)); 2178 } 2179 if (extra != null) // whatever not recognized 2180 writeBytes(os, extra); 2181 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2182 writeBytes(os, comment); 2183 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2184 } 2185 2186 ///////////////////// LOC ////////////////////// 2187 2188 int writeLOC(OutputStream os) throws IOException { 2189 int version0 = version(); 2190 byte[] zname = isdir ? toDirectoryPath(name) : name; 2191 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2192 int elen = (extra != null) ? extra.length : 0; 2193 boolean foundExtraTime = false; // if extra timestamp present 2194 int eoff = 0; 2195 int elen64 = 0; 2196 int elenEXTT = 0; 2197 int elenNTFS = 0; 2198 writeInt(os, LOCSIG); // LOC header signature 2199 if ((flag & FLAG_DATADESCR) != 0) { 2200 writeShort(os, version0); // version needed to extract 2201 writeShort(os, flag); // general purpose bit flag 2202 writeShort(os, method); // compression method 2203 // last modification time 2204 writeInt(os, (int)javaToDosTime(mtime)); 2205 // store size, uncompressed size, and crc-32 in data descriptor 2206 // immediately following compressed entry data 2207 writeInt(os, 0); 2208 writeInt(os, 0); 2209 writeInt(os, 0); 2210 } else { 2211 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2212 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2213 writeShort(os, 45); // ver 4.5 for zip64 2214 } else { 2215 writeShort(os, version0); // version needed to extract 2216 } 2217 writeShort(os, flag); // general purpose bit flag 2218 writeShort(os, method); // compression method 2219 // last modification time 2220 writeInt(os, (int)javaToDosTime(mtime)); 2221 writeInt(os, crc); // crc-32 2222 if (elen64 != 0) { 2223 writeInt(os, ZIP64_MINVAL); 2224 writeInt(os, ZIP64_MINVAL); 2225 } else { 2226 writeInt(os, csize); // compressed size 2227 writeInt(os, size); // uncompressed size 2228 } 2229 } 2230 while (eoff + 4 < elen) { 2231 int tag = SH(extra, eoff); 2232 int sz = SH(extra, eoff + 2); 2233 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2234 foundExtraTime = true; 2235 } 2236 eoff += (4 + sz); 2237 } 2238 if (!foundExtraTime) { 2239 if (isWindows) { 2240 elenNTFS = 36; // NTFS, total 36 bytes 2241 } else { // on unix use "ext time" 2242 elenEXTT = 9; 2243 if (atime != -1) 2244 elenEXTT += 4; 2245 if (ctime != -1) 2246 elenEXTT += 4; 2247 } 2248 } 2249 writeShort(os, nlen); 2250 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2251 writeBytes(os, zname, 1, nlen); 2252 if (elen64 != 0) { 2253 writeShort(os, EXTID_ZIP64); 2254 writeShort(os, 16); 2255 writeLong(os, size); 2256 writeLong(os, csize); 2257 } 2258 if (elenNTFS != 0) { 2259 writeShort(os, EXTID_NTFS); 2260 writeShort(os, elenNTFS - 4); 2261 writeInt(os, 0); // reserved 2262 writeShort(os, 0x0001); // NTFS attr tag 2263 writeShort(os, 24); 2264 writeLong(os, javaToWinTime(mtime)); 2265 writeLong(os, javaToWinTime(atime)); 2266 writeLong(os, javaToWinTime(ctime)); 2267 } 2268 if (elenEXTT != 0) { 2269 writeShort(os, EXTID_EXTT); 2270 writeShort(os, elenEXTT - 4);// size for the folowing data block 2271 int fbyte = 0x1; 2272 if (atime != -1) // mtime and atime 2273 fbyte |= 0x2; 2274 if (ctime != -1) // mtime, atime and ctime 2275 fbyte |= 0x4; 2276 os.write(fbyte); // flags byte 2277 writeInt(os, javaToUnixTime(mtime)); 2278 if (atime != -1) 2279 writeInt(os, javaToUnixTime(atime)); 2280 if (ctime != -1) 2281 writeInt(os, javaToUnixTime(ctime)); 2282 } 2283 if (extra != null) { 2284 writeBytes(os, extra); 2285 } 2286 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2287 } 2288 2289 // Data Descriptior 2290 int writeEXT(OutputStream os) throws IOException { 2291 writeInt(os, EXTSIG); // EXT header signature 2292 writeInt(os, crc); // crc-32 2293 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2294 writeLong(os, csize); 2295 writeLong(os, size); 2296 return 24; 2297 } else { 2298 writeInt(os, csize); // compressed size 2299 writeInt(os, size); // uncompressed size 2300 return 16; 2301 } 2302 } 2303 2304 // read NTFS, UNIX and ZIP64 data from cen.extra 2305 void readExtra(ZipFileSystem zipfs) throws IOException { 2306 if (extra == null) 2307 return; 2308 int elen = extra.length; 2309 int off = 0; 2310 int newOff = 0; 2311 while (off + 4 < elen) { 2312 // extra spec: HeaderID+DataSize+Data 2313 int pos = off; 2314 int tag = SH(extra, pos); 2315 int sz = SH(extra, pos + 2); 2316 pos += 4; 2317 if (pos + sz > elen) // invalid data 2318 break; 2319 switch (tag) { 2320 case EXTID_ZIP64 : 2321 if (size == ZIP64_MINVAL) { 2322 if (pos + 8 > elen) // invalid zip64 extra 2323 break; // fields, just skip 2324 size = LL(extra, pos); 2325 pos += 8; 2326 } 2327 if (csize == ZIP64_MINVAL) { 2328 if (pos + 8 > elen) 2329 break; 2330 csize = LL(extra, pos); 2331 pos += 8; 2332 } 2333 if (locoff == ZIP64_MINVAL) { 2334 if (pos + 8 > elen) 2335 break; 2336 locoff = LL(extra, pos); 2337 pos += 8; 2338 } 2339 break; 2340 case EXTID_NTFS: 2341 if (sz < 32) 2342 break; 2343 pos += 4; // reserved 4 bytes 2344 if (SH(extra, pos) != 0x0001) 2345 break; 2346 if (SH(extra, pos + 2) != 24) 2347 break; 2348 // override the loc field, datatime here is 2349 // more "accurate" 2350 mtime = winToJavaTime(LL(extra, pos + 4)); 2351 atime = winToJavaTime(LL(extra, pos + 12)); 2352 ctime = winToJavaTime(LL(extra, pos + 20)); 2353 break; 2354 case EXTID_EXTT: 2355 // spec says the Extened timestamp in cen only has mtime 2356 // need to read the loc to get the extra a/ctime, if flag 2357 // "zipinfo-time" is not specified to false; 2358 // there is performance cost (move up to loc and read) to 2359 // access the loc table foreach entry; 2360 if (zipfs.noExtt) { 2361 if (sz == 5) 2362 mtime = unixToJavaTime(LG(extra, pos + 1)); 2363 break; 2364 } 2365 byte[] buf = new byte[LOCHDR]; 2366 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2367 != buf.length) 2368 throw new ZipException("loc: reading failed"); 2369 if (!locSigAt(buf, 0)) 2370 throw new ZipException("loc: wrong sig ->" 2371 + Long.toString(getSig(buf, 0), 16)); 2372 int locElen = LOCEXT(buf); 2373 if (locElen < 9) // EXTT is at lease 9 bytes 2374 break; 2375 int locNlen = LOCNAM(buf); 2376 buf = new byte[locElen]; 2377 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2378 != buf.length) 2379 throw new ZipException("loc extra: reading failed"); 2380 int locPos = 0; 2381 while (locPos + 4 < buf.length) { 2382 int locTag = SH(buf, locPos); 2383 int locSZ = SH(buf, locPos + 2); 2384 locPos += 4; 2385 if (locTag != EXTID_EXTT) { 2386 locPos += locSZ; 2387 continue; 2388 } 2389 int end = locPos + locSZ - 4; 2390 int flag = CH(buf, locPos++); 2391 if ((flag & 0x1) != 0 && locPos <= end) { 2392 mtime = unixToJavaTime(LG(buf, locPos)); 2393 locPos += 4; 2394 } 2395 if ((flag & 0x2) != 0 && locPos <= end) { 2396 atime = unixToJavaTime(LG(buf, locPos)); 2397 locPos += 4; 2398 } 2399 if ((flag & 0x4) != 0 && locPos <= end) { 2400 ctime = unixToJavaTime(LG(buf, locPos)); 2401 locPos += 4; 2402 } 2403 break; 2404 } 2405 break; 2406 default: // unknown tag 2407 System.arraycopy(extra, off, extra, newOff, sz + 4); 2408 newOff += (sz + 4); 2409 } 2410 off += (sz + 4); 2411 } 2412 if (newOff != 0 && newOff != extra.length) 2413 extra = Arrays.copyOf(extra, newOff); 2414 else 2415 extra = null; 2416 } 2417 2418 ///////// basic file attributes /////////// 2419 @Override 2420 public FileTime creationTime() { 2421 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 2422 } 2423 2424 @Override 2425 public boolean isDirectory() { 2426 return isDir(); 2427 } 2428 2429 @Override 2430 public boolean isOther() { 2431 return false; 2432 } 2433 2434 @Override 2435 public boolean isRegularFile() { 2436 return !isDir(); 2437 } 2438 2439 @Override 2440 public FileTime lastAccessTime() { 2441 return FileTime.fromMillis(atime == -1 ? mtime : atime); 2442 } 2443 2444 @Override 2445 public FileTime lastModifiedTime() { 2446 return FileTime.fromMillis(mtime); 2447 } 2448 2449 @Override 2450 public long size() { 2451 return size; 2452 } 2453 2454 @Override 2455 public boolean isSymbolicLink() { 2456 return false; 2457 } 2458 2459 @Override 2460 public Object fileKey() { 2461 return null; 2462 } 2463 2464 ///////// zip entry attributes /////////// 2465 public long compressedSize() { 2466 return csize; 2467 } 2468 2469 public long crc() { 2470 return crc; 2471 } 2472 2473 public int method() { 2474 return method; 2475 } 2476 2477 public byte[] extra() { 2478 if (extra != null) 2479 return Arrays.copyOf(extra, extra.length); 2480 return null; 2481 } 2482 2483 public byte[] comment() { 2484 if (comment != null) 2485 return Arrays.copyOf(comment, comment.length); 2486 return null; 2487 } 2488 2489 public String toString() { 2490 StringBuilder sb = new StringBuilder(1024); 2491 Formatter fm = new Formatter(sb); 2492 fm.format(" name : %s%n", new String(name)); 2493 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2494 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2495 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2496 fm.format(" isRegularFile : %b%n", isRegularFile()); 2497 fm.format(" isDirectory : %b%n", isDirectory()); 2498 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2499 fm.format(" isOther : %b%n", isOther()); 2500 fm.format(" fileKey : %s%n", fileKey()); 2501 fm.format(" size : %d%n", size()); 2502 fm.format(" compressedSize : %d%n", compressedSize()); 2503 fm.format(" crc : %x%n", crc()); 2504 fm.format(" method : %d%n", method()); 2505 fm.close(); 2506 return sb.toString(); 2507 } 2508 } 2509 2510 private static class ExChannelCloser { 2511 Path path; 2512 SeekableByteChannel ch; 2513 Set<InputStream> streams; 2514 ExChannelCloser(Path path, 2515 SeekableByteChannel ch, 2516 Set<InputStream> streams) 2517 { 2518 this.path = path; 2519 this.ch = ch; 2520 this.streams = streams; 2521 } 2522 } 2523 2524 // ZIP directory has two issues: 2525 // (1) ZIP spec does not require the ZIP file to include 2526 // directory entry 2527 // (2) all entries are not stored/organized in a "tree" 2528 // structure. 2529 // A possible solution is to build the node tree ourself as 2530 // implemented below. 2531 2532 // default time stamp for pseudo entries 2533 private long zfsDefaultTimeStamp = System.currentTimeMillis(); 2534 2535 private void removeFromTree(IndexNode inode) { 2536 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2537 IndexNode child = parent.child; 2538 if (child.equals(inode)) { 2539 parent.child = child.sibling; 2540 } else { 2541 IndexNode last = child; 2542 while ((child = child.sibling) != null) { 2543 if (child.equals(inode)) { 2544 last.sibling = child.sibling; 2545 break; 2546 } else { 2547 last = child; 2548 } 2549 } 2550 } 2551 } 2552 2553 // purely for parent lookup, so we don't have to copy the parent 2554 // name every time 2555 static class ParentLookup extends IndexNode { 2556 int len; 2557 ParentLookup() {} 2558 2559 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 2560 name(name, len); 2561 return this; 2562 } 2563 2564 void name(byte[] name, int len) { 2565 this.name = name; 2566 this.len = len; 2567 // calculate the hashcode the same way as Arrays.hashCode() does 2568 int result = 1; 2569 for (int i = 0; i < len; i++) 2570 result = 31 * result + name[i]; 2571 this.hashcode = result; 2572 } 2573 2574 @Override 2575 public boolean equals(Object other) { 2576 if (!(other instanceof IndexNode)) { 2577 return false; 2578 } 2579 byte[] oname = ((IndexNode)other).name; 2580 return Arrays.equals(name, 0, len, 2581 oname, 0, oname.length); 2582 } 2583 2584 } 2585 2586 private void buildNodeTree() throws IOException { 2587 beginWrite(); 2588 try { 2589 IndexNode root = inodes.get(LOOKUPKEY.as(ROOTPATH)); 2590 if (root == null) { 2591 root = new IndexNode(ROOTPATH, true); 2592 } else { 2593 inodes.remove(root); 2594 } 2595 IndexNode[] nodes = inodes.keySet().toArray(new IndexNode[0]); 2596 inodes.put(root, root); 2597 ParentLookup lookup = new ParentLookup(); 2598 for (IndexNode node : nodes) { 2599 IndexNode parent; 2600 while (true) { 2601 int off = getParentOff(node.name); 2602 if (off <= 1) { // parent is root 2603 node.sibling = root.child; 2604 root.child = node; 2605 break; 2606 } 2607 lookup = lookup.as(node.name, off); 2608 if (inodes.containsKey(lookup)) { 2609 parent = inodes.get(lookup); 2610 node.sibling = parent.child; 2611 parent.child = node; 2612 break; 2613 } 2614 // add new pseudo directory entry 2615 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 2616 inodes.put(parent, parent); 2617 node.sibling = parent.child; 2618 parent.child = node; 2619 node = parent; 2620 } 2621 } 2622 } finally { 2623 endWrite(); 2624 } 2625 } 2626 }