1 /* 2 * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.FilterOutputStream; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.lang.Runtime.Version; 37 import java.nio.ByteBuffer; 38 import java.nio.MappedByteBuffer; 39 import java.nio.channels.FileChannel; 40 import java.nio.channels.FileLock; 41 import java.nio.channels.ReadableByteChannel; 42 import java.nio.channels.SeekableByteChannel; 43 import java.nio.channels.WritableByteChannel; 44 import java.nio.file.*; 45 import java.nio.file.attribute.*; 46 import java.nio.file.spi.FileSystemProvider; 47 import java.security.AccessController; 48 import java.security.PrivilegedAction; 49 import java.security.PrivilegedActionException; 50 import java.security.PrivilegedExceptionAction; 51 import java.util.*; 52 import java.util.concurrent.locks.ReadWriteLock; 53 import java.util.concurrent.locks.ReentrantReadWriteLock; 54 import java.util.function.Consumer; 55 import java.util.function.Function; 56 import java.util.jar.Attributes; 57 import java.util.jar.Manifest; 58 import java.util.regex.Pattern; 59 import java.util.zip.CRC32; 60 import java.util.zip.Deflater; 61 import java.util.zip.DeflaterOutputStream; 62 import java.util.zip.Inflater; 63 import java.util.zip.InflaterInputStream; 64 import java.util.zip.ZipException; 65 66 import static java.lang.Boolean.TRUE; 67 import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES; 68 import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; 69 import static java.nio.file.StandardOpenOption.APPEND; 70 import static java.nio.file.StandardOpenOption.CREATE; 71 import static java.nio.file.StandardOpenOption.CREATE_NEW; 72 import static java.nio.file.StandardOpenOption.READ; 73 import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; 74 import static java.nio.file.StandardOpenOption.WRITE; 75 import static jdk.nio.zipfs.ZipConstants.*; 76 import static jdk.nio.zipfs.ZipUtils.*; 77 78 /** 79 * A FileSystem built on a zip file 80 * 81 * @author Xueming Shen 82 */ 83 class ZipFileSystem extends FileSystem { 84 // statics 85 private static final boolean isWindows = AccessController.doPrivileged( 86 (PrivilegedAction<Boolean>)()->System.getProperty("os.name") 87 .startsWith("Windows")); 88 private static final byte[] ROOTPATH = new byte[] { '/' }; 89 private static final String PROPERTY_POSIX = "enablePosixFileAttributes"; 90 private static final String PROPERTY_DEFAULT_OWNER = "defaultOwner"; 91 private static final String PROPERTY_DEFAULT_GROUP = "defaultGroup"; 92 private static final String PROPERTY_DEFAULT_PERMISSIONS = "defaultPermissions"; 93 // Property used to specify the entry version to use for a multi-release JAR 94 private static final String PROPERTY_RELEASE_VERSION = "releaseVersion"; 95 // Original property used to specify the entry version to use for a 96 // multi-release JAR which is kept for backwards compatibility. 97 private static final String PROPERTY_MULTI_RELEASE = "multi-release"; 98 99 private static final Set<PosixFilePermission> DEFAULT_PERMISSIONS = 100 PosixFilePermissions.fromString("rwxrwxrwx"); 101 // Property used to specify the compression mode to use 102 private static final String PROPERTY_COMPRESSION_METHOD = "compressionMethod"; 103 // Value specified for compressionMethod property to compress Zip entries 104 private static final String COMPRESSION_METHOD_DEFLATED = "DEFLATED"; 105 // Value specified for compressionMethod property to not compress Zip entries 106 private static final String COMPRESSION_METHOD_STORED = "STORED"; 107 108 private final ZipFileSystemProvider provider; 109 private final Path zfpath; 110 final ZipCoder zc; 111 private final ZipPath rootdir; 112 private boolean readOnly; // readonly file system, false by default 113 114 // default time stamp for pseudo entries 115 private final long zfsDefaultTimeStamp = System.currentTimeMillis(); 116 117 // configurable by env map 118 private final boolean noExtt; // see readExtra() 119 private final boolean useTempFile; // use a temp file for newOS, default 120 // is to use BAOS for better performance 121 private final boolean forceEnd64; 122 private final int defaultCompressionMethod; // METHOD_STORED if "noCompression=true" 123 // METHOD_DEFLATED otherwise 124 125 // entryLookup is identity by default, will be overridden for multi-release jars 126 private Function<byte[], byte[]> entryLookup = Function.identity(); 127 128 // POSIX support 129 final boolean supportPosix; 130 private final UserPrincipal defaultOwner; 131 private final GroupPrincipal defaultGroup; 132 private final Set<PosixFilePermission> defaultPermissions; 133 134 private final Set<String> supportedFileAttributeViews; 135 136 ZipFileSystem(ZipFileSystemProvider provider, 137 Path zfpath, 138 Map<String, ?> env) throws IOException 139 { 140 // default encoding for name/comment 141 String nameEncoding = env.containsKey("encoding") ? 142 (String)env.get("encoding") : "UTF-8"; 143 this.noExtt = "false".equals(env.get("zipinfo-time")); 144 this.useTempFile = isTrue(env, "useTempFile"); 145 this.forceEnd64 = isTrue(env, "forceZIP64End"); 146 this.defaultCompressionMethod = getDefaultCompressionMethod(env); 147 this.supportPosix = isTrue(env, PROPERTY_POSIX); 148 this.defaultOwner = initOwner(zfpath, env); 149 this.defaultGroup = initGroup(zfpath, env); 150 this.defaultPermissions = initPermissions(env); 151 this.supportedFileAttributeViews = supportPosix ? 152 Set.of("basic", "posix", "zip") : Set.of("basic", "zip"); 153 if (Files.notExists(zfpath)) { 154 // create a new zip if it doesn't exist 155 if (isTrue(env, "create")) { 156 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 157 new END().write(os, 0, forceEnd64); 158 } 159 } else { 160 throw new NoSuchFileException(zfpath.toString()); 161 } 162 } 163 // sm and existence check 164 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 165 boolean writeable = AccessController.doPrivileged( 166 (PrivilegedAction<Boolean>)()->Files.isWritable(zfpath)); 167 this.readOnly = !writeable; 168 this.zc = ZipCoder.get(nameEncoding); 169 this.rootdir = new ZipPath(this, new byte[]{'/'}); 170 this.ch = Files.newByteChannel(zfpath, READ); 171 try { 172 this.cen = initCEN(); 173 } catch (IOException x) { 174 try { 175 this.ch.close(); 176 } catch (IOException xx) { 177 x.addSuppressed(xx); 178 } 179 throw x; 180 } 181 this.provider = provider; 182 this.zfpath = zfpath; 183 184 initializeReleaseVersion(env); 185 } 186 187 /** 188 * Return the compression method to use (STORED or DEFLATED). If the 189 * property {@code commpressionMethod} is set use its value to determine 190 * the compression method to use. If the property is not set, then the 191 * default compression is DEFLATED unless the property {@code noCompression} 192 * is set which is supported for backwards compatibility. 193 * @param env Zip FS map of properties 194 * @return The Compression method to use 195 */ 196 private int getDefaultCompressionMethod(Map<String, ?> env) { 197 int result = 198 isTrue(env, "noCompression") ? METHOD_STORED : METHOD_DEFLATED; 199 if (env.containsKey(PROPERTY_COMPRESSION_METHOD)) { 200 Object compressionMethod = env.get(PROPERTY_COMPRESSION_METHOD); 201 if (compressionMethod != null) { 202 if (compressionMethod instanceof String) { 203 switch (((String) compressionMethod).toUpperCase()) { 204 case COMPRESSION_METHOD_STORED: 205 result = METHOD_STORED; 206 break; 207 case COMPRESSION_METHOD_DEFLATED: 208 result = METHOD_DEFLATED; 209 break; 210 default: 211 throw new IllegalArgumentException(String.format( 212 "The value for the %s property must be %s or %s", 213 PROPERTY_COMPRESSION_METHOD, COMPRESSION_METHOD_STORED, 214 COMPRESSION_METHOD_DEFLATED)); 215 } 216 } else { 217 throw new IllegalArgumentException(String.format( 218 "The Object type for the %s property must be a String", 219 PROPERTY_COMPRESSION_METHOD)); 220 } 221 } else { 222 throw new IllegalArgumentException(String.format( 223 "The value for the %s property must be %s or %s", 224 PROPERTY_COMPRESSION_METHOD, COMPRESSION_METHOD_STORED, 225 COMPRESSION_METHOD_DEFLATED)); 226 } 227 } 228 return result; 229 } 230 231 // returns true if there is a name=true/"true" setting in env 232 private static boolean isTrue(Map<String, ?> env, String name) { 233 return "true".equals(env.get(name)) || TRUE.equals(env.get(name)); 234 } 235 236 // Initialize the default owner for files inside the zip archive. 237 // If not specified in env, it is the owner of the archive. If no owner can 238 // be determined, we try to go with system property "user.name". If that's not 239 // accessible, we return "<zipfs_default>". 240 private UserPrincipal initOwner(Path zfpath, Map<String, ?> env) throws IOException { 241 Object o = env.get(PROPERTY_DEFAULT_OWNER); 242 if (o == null) { 243 try { 244 PrivilegedExceptionAction<UserPrincipal> pa = ()->Files.getOwner(zfpath); 245 return AccessController.doPrivileged(pa); 246 } catch (UnsupportedOperationException | PrivilegedActionException e) { 247 if (e instanceof UnsupportedOperationException || 248 e.getCause() instanceof NoSuchFileException) 249 { 250 PrivilegedAction<String> pa = ()->System.getProperty("user.name"); 251 String userName = AccessController.doPrivileged(pa); 252 return ()->userName; 253 } else { 254 throw new IOException(e); 255 } 256 } 257 } 258 if (o instanceof String) { 259 if (((String)o).isEmpty()) { 260 throw new IllegalArgumentException("Value for property " + 261 PROPERTY_DEFAULT_OWNER + " must not be empty."); 262 } 263 return ()->(String)o; 264 } 265 if (o instanceof UserPrincipal) { 266 return (UserPrincipal)o; 267 } 268 throw new IllegalArgumentException("Value for property " + 269 PROPERTY_DEFAULT_OWNER + " must be of type " + String.class + 270 " or " + UserPrincipal.class); 271 } 272 273 // Initialize the default group for files inside the zip archive. 274 // If not specified in env, we try to determine the group of the zip archive itself. 275 // If this is not possible/unsupported, we will return a group principal going by 276 // the same name as the default owner. 277 private GroupPrincipal initGroup(Path zfpath, Map<String, ?> env) throws IOException { 278 Object o = env.get(PROPERTY_DEFAULT_GROUP); 279 if (o == null) { 280 try { 281 PosixFileAttributeView zfpv = Files.getFileAttributeView(zfpath, PosixFileAttributeView.class); 282 if (zfpv == null) { 283 return defaultOwner::getName; 284 } 285 PrivilegedExceptionAction<GroupPrincipal> pa = ()->zfpv.readAttributes().group(); 286 return AccessController.doPrivileged(pa); 287 } catch (UnsupportedOperationException | PrivilegedActionException e) { 288 if (e instanceof UnsupportedOperationException || 289 e.getCause() instanceof NoSuchFileException) 290 { 291 return defaultOwner::getName; 292 } else { 293 throw new IOException(e); 294 } 295 } 296 } 297 if (o instanceof String) { 298 if (((String)o).isEmpty()) { 299 throw new IllegalArgumentException("Value for property " + 300 PROPERTY_DEFAULT_GROUP + " must not be empty."); 301 } 302 return ()->(String)o; 303 } 304 if (o instanceof GroupPrincipal) { 305 return (GroupPrincipal)o; 306 } 307 throw new IllegalArgumentException("Value for property " + 308 PROPERTY_DEFAULT_GROUP + " must be of type " + String.class + 309 " or " + GroupPrincipal.class); 310 } 311 312 // Initialize the default permissions for files inside the zip archive. 313 // If not specified in env, it will return 777. 314 private Set<PosixFilePermission> initPermissions(Map<String, ?> env) { 315 Object o = env.get(PROPERTY_DEFAULT_PERMISSIONS); 316 if (o == null) { 317 return DEFAULT_PERMISSIONS; 318 } 319 if (o instanceof String) { 320 return PosixFilePermissions.fromString((String)o); 321 } 322 if (!(o instanceof Set)) { 323 throw new IllegalArgumentException("Value for property " + 324 PROPERTY_DEFAULT_PERMISSIONS + " must be of type " + String.class + 325 " or " + Set.class); 326 } 327 Set<PosixFilePermission> perms = new HashSet<>(); 328 for (Object o2 : (Set<?>)o) { 329 if (o2 instanceof PosixFilePermission) { 330 perms.add((PosixFilePermission)o2); 331 } else { 332 throw new IllegalArgumentException(PROPERTY_DEFAULT_PERMISSIONS + 333 " must only contain objects of type " + PosixFilePermission.class); 334 } 335 } 336 return perms; 337 } 338 339 @Override 340 public FileSystemProvider provider() { 341 return provider; 342 } 343 344 @Override 345 public String getSeparator() { 346 return "/"; 347 } 348 349 @Override 350 public boolean isOpen() { 351 return isOpen; 352 } 353 354 @Override 355 public boolean isReadOnly() { 356 return readOnly; 357 } 358 359 private void checkWritable() { 360 if (readOnly) { 361 throw new ReadOnlyFileSystemException(); 362 } 363 } 364 365 void setReadOnly() { 366 this.readOnly = true; 367 } 368 369 @Override 370 public Iterable<Path> getRootDirectories() { 371 return List.of(rootdir); 372 } 373 374 ZipPath getRootDir() { 375 return rootdir; 376 } 377 378 @Override 379 public ZipPath getPath(String first, String... more) { 380 if (more.length == 0) { 381 return new ZipPath(this, first); 382 } 383 StringBuilder sb = new StringBuilder(); 384 sb.append(first); 385 for (String path : more) { 386 if (path.length() > 0) { 387 if (sb.length() > 0) { 388 sb.append('/'); 389 } 390 sb.append(path); 391 } 392 } 393 return new ZipPath(this, sb.toString()); 394 } 395 396 @Override 397 public UserPrincipalLookupService getUserPrincipalLookupService() { 398 throw new UnsupportedOperationException(); 399 } 400 401 @Override 402 public WatchService newWatchService() { 403 throw new UnsupportedOperationException(); 404 } 405 406 FileStore getFileStore(ZipPath path) { 407 return new ZipFileStore(path); 408 } 409 410 @Override 411 public Iterable<FileStore> getFileStores() { 412 return List.of(new ZipFileStore(rootdir)); 413 } 414 415 @Override 416 public Set<String> supportedFileAttributeViews() { 417 return supportedFileAttributeViews; 418 } 419 420 @Override 421 public String toString() { 422 return zfpath.toString(); 423 } 424 425 Path getZipFile() { 426 return zfpath; 427 } 428 429 private static final String GLOB_SYNTAX = "glob"; 430 private static final String REGEX_SYNTAX = "regex"; 431 432 @Override 433 public PathMatcher getPathMatcher(String syntaxAndInput) { 434 int pos = syntaxAndInput.indexOf(':'); 435 if (pos <= 0 || pos == syntaxAndInput.length()) { 436 throw new IllegalArgumentException(); 437 } 438 String syntax = syntaxAndInput.substring(0, pos); 439 String input = syntaxAndInput.substring(pos + 1); 440 String expr; 441 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 442 expr = toRegexPattern(input); 443 } else { 444 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 445 expr = input; 446 } else { 447 throw new UnsupportedOperationException("Syntax '" + syntax + 448 "' not recognized"); 449 } 450 } 451 // return matcher 452 final Pattern pattern = Pattern.compile(expr); 453 return (path)->pattern.matcher(path.toString()).matches(); 454 } 455 456 @Override 457 public void close() throws IOException { 458 beginWrite(); 459 try { 460 if (!isOpen) 461 return; 462 isOpen = false; // set closed 463 } finally { 464 endWrite(); 465 } 466 if (!streams.isEmpty()) { // unlock and close all remaining streams 467 Set<InputStream> copy = new HashSet<>(streams); 468 for (InputStream is : copy) 469 is.close(); 470 } 471 beginWrite(); // lock and sync 472 try { 473 AccessController.doPrivileged((PrivilegedExceptionAction<Void>)() -> { 474 sync(); return null; 475 }); 476 ch.close(); // close the ch just in case no update 477 // and sync didn't close the ch 478 } catch (PrivilegedActionException e) { 479 throw (IOException)e.getException(); 480 } finally { 481 endWrite(); 482 } 483 484 synchronized (inflaters) { 485 for (Inflater inf : inflaters) 486 inf.end(); 487 } 488 synchronized (deflaters) { 489 for (Deflater def : deflaters) 490 def.end(); 491 } 492 beginWrite(); // lock and sync 493 try { 494 // dereference the inodes map, since each entry in that map 495 // can potentially hold on to large amounts of data (especially 496 // IndexNode of type ZipFileSystem$Entry) 497 inodes = null; 498 } finally { 499 endWrite(); 500 } 501 502 IOException ioe = null; 503 synchronized (tmppaths) { 504 for (Path p : tmppaths) { 505 try { 506 AccessController.doPrivileged( 507 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 508 } catch (PrivilegedActionException e) { 509 IOException x = (IOException)e.getException(); 510 if (ioe == null) 511 ioe = x; 512 else 513 ioe.addSuppressed(x); 514 } 515 } 516 } 517 provider.removeFileSystem(zfpath, this); 518 if (ioe != null) 519 throw ioe; 520 } 521 522 ZipFileAttributes getFileAttributes(byte[] path) 523 throws IOException 524 { 525 beginRead(); 526 try { 527 ensureOpen(); 528 IndexNode inode = getInode(path); 529 if (inode == null) { 530 return null; 531 } else if (inode instanceof Entry) { 532 return (Entry)inode; 533 } else if (inode.pos == -1) { 534 // pseudo directory, uses METHOD_STORED 535 Entry e = supportPosix ? 536 new PosixEntry(inode.name, inode.isdir, METHOD_STORED) : 537 new Entry(inode.name, inode.isdir, METHOD_STORED); 538 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 539 return e; 540 } else { 541 return supportPosix ? new PosixEntry(this, inode) : new Entry(this, inode); 542 } 543 } finally { 544 endRead(); 545 } 546 } 547 548 void checkAccess(byte[] path) throws IOException { 549 beginRead(); 550 try { 551 ensureOpen(); 552 // is it necessary to readCEN as a sanity check? 553 if (getInode(path) == null) { 554 throw new NoSuchFileException(toString()); 555 } 556 557 } finally { 558 endRead(); 559 } 560 } 561 562 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 563 throws IOException 564 { 565 checkWritable(); 566 beginWrite(); 567 try { 568 ensureOpen(); 569 Entry e = getEntry(path); // ensureOpen checked 570 if (e == null) 571 throw new NoSuchFileException(getString(path)); 572 if (e.type == Entry.CEN) 573 e.type = Entry.COPY; // copy e 574 if (mtime != null) 575 e.mtime = mtime.toMillis(); 576 if (atime != null) 577 e.atime = atime.toMillis(); 578 if (ctime != null) 579 e.ctime = ctime.toMillis(); 580 update(e); 581 } finally { 582 endWrite(); 583 } 584 } 585 586 void setOwner(byte[] path, UserPrincipal owner) throws IOException { 587 checkWritable(); 588 beginWrite(); 589 try { 590 ensureOpen(); 591 Entry e = getEntry(path); // ensureOpen checked 592 if (e == null) { 593 throw new NoSuchFileException(getString(path)); 594 } 595 // as the owner information is not persistent, we don't need to 596 // change e.type to Entry.COPY 597 if (e instanceof PosixEntry) { 598 ((PosixEntry)e).owner = owner; 599 update(e); 600 } 601 } finally { 602 endWrite(); 603 } 604 } 605 606 void setGroup(byte[] path, GroupPrincipal group) throws IOException { 607 checkWritable(); 608 beginWrite(); 609 try { 610 ensureOpen(); 611 Entry e = getEntry(path); // ensureOpen checked 612 if (e == null) { 613 throw new NoSuchFileException(getString(path)); 614 } 615 // as the group information is not persistent, we don't need to 616 // change e.type to Entry.COPY 617 if (e instanceof PosixEntry) { 618 ((PosixEntry)e).group = group; 619 update(e); 620 } 621 } finally { 622 endWrite(); 623 } 624 } 625 626 void setPermissions(byte[] path, Set<PosixFilePermission> perms) throws IOException { 627 checkWritable(); 628 beginWrite(); 629 try { 630 ensureOpen(); 631 Entry e = getEntry(path); // ensureOpen checked 632 if (e == null) { 633 throw new NoSuchFileException(getString(path)); 634 } 635 if (e.type == Entry.CEN) { 636 e.type = Entry.COPY; // copy e 637 } 638 e.posixPerms = perms == null ? -1 : ZipUtils.permsToFlags(perms); 639 update(e); 640 } finally { 641 endWrite(); 642 } 643 } 644 645 boolean exists(byte[] path) { 646 beginRead(); 647 try { 648 ensureOpen(); 649 return getInode(path) != null; 650 } finally { 651 endRead(); 652 } 653 } 654 655 boolean isDirectory(byte[] path) { 656 beginRead(); 657 try { 658 IndexNode n = getInode(path); 659 return n != null && n.isDir(); 660 } finally { 661 endRead(); 662 } 663 } 664 665 // returns the list of child paths of "path" 666 Iterator<Path> iteratorOf(ZipPath dir, 667 DirectoryStream.Filter<? super Path> filter) 668 throws IOException 669 { 670 beginWrite(); // iteration of inodes needs exclusive lock 671 try { 672 ensureOpen(); 673 byte[] path = dir.getResolvedPath(); 674 IndexNode inode = getInode(path); 675 if (inode == null) 676 throw new NotDirectoryException(getString(path)); 677 List<Path> list = new ArrayList<>(); 678 IndexNode child = inode.child; 679 while (child != null) { 680 // (1) Assume each path from the zip file itself is "normalized" 681 // (2) IndexNode.name is absolute. see IndexNode(byte[],int,int) 682 // (3) If parent "dir" is relative when ZipDirectoryStream 683 // is created, the returned child path needs to be relative 684 // as well. 685 ZipPath childPath = new ZipPath(this, child.name, true); 686 ZipPath childFileName = childPath.getFileName(); 687 ZipPath zpath = dir.resolve(childFileName); 688 if (filter == null || filter.accept(zpath)) 689 list.add(zpath); 690 child = child.sibling; 691 } 692 return list.iterator(); 693 } finally { 694 endWrite(); 695 } 696 } 697 698 void createDirectory(byte[] dir, FileAttribute<?>... attrs) throws IOException { 699 checkWritable(); 700 beginWrite(); 701 try { 702 ensureOpen(); 703 if (dir.length == 0 || exists(dir)) // root dir, or existing dir 704 throw new FileAlreadyExistsException(getString(dir)); 705 checkParents(dir); 706 Entry e = supportPosix ? 707 new PosixEntry(dir, Entry.NEW, true, METHOD_STORED, attrs) : 708 new Entry(dir, Entry.NEW, true, METHOD_STORED, attrs); 709 update(e); 710 } finally { 711 endWrite(); 712 } 713 } 714 715 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 716 throws IOException 717 { 718 checkWritable(); 719 if (Arrays.equals(src, dst)) 720 return; // do nothing, src and dst are the same 721 722 beginWrite(); 723 try { 724 ensureOpen(); 725 Entry eSrc = getEntry(src); // ensureOpen checked 726 727 if (eSrc == null) 728 throw new NoSuchFileException(getString(src)); 729 if (eSrc.isDir()) { // spec says to create dst dir 730 createDirectory(dst); 731 return; 732 } 733 boolean hasReplace = false; 734 boolean hasCopyAttrs = false; 735 for (CopyOption opt : options) { 736 if (opt == REPLACE_EXISTING) 737 hasReplace = true; 738 else if (opt == COPY_ATTRIBUTES) 739 hasCopyAttrs = true; 740 } 741 Entry eDst = getEntry(dst); 742 if (eDst != null) { 743 if (!hasReplace) 744 throw new FileAlreadyExistsException(getString(dst)); 745 } else { 746 checkParents(dst); 747 } 748 // copy eSrc entry and change name 749 Entry u = supportPosix ? 750 new PosixEntry((PosixEntry)eSrc, Entry.COPY) : 751 new Entry(eSrc, Entry.COPY); 752 u.name(dst); 753 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) { 754 u.type = eSrc.type; // make it the same type 755 if (deletesrc) { // if it's a "rename", take the data 756 u.bytes = eSrc.bytes; 757 u.file = eSrc.file; 758 } else { // if it's not "rename", copy the data 759 if (eSrc.bytes != null) 760 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 761 else if (eSrc.file != null) { 762 u.file = getTempPathForEntry(null); 763 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 764 } 765 } 766 } else if (eSrc.type == Entry.CEN && eSrc.method != defaultCompressionMethod) { 767 768 /** 769 * We are copying a file within the same Zip file using a 770 * different compression method. 771 */ 772 try (InputStream in = newInputStream(src); 773 OutputStream out = newOutputStream(dst, 774 CREATE, TRUNCATE_EXISTING, WRITE)) { 775 in.transferTo(out); 776 } 777 u = getEntry(dst); 778 } 779 780 if (!hasCopyAttrs) 781 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 782 update(u); 783 if (deletesrc) 784 updateDelete(eSrc); 785 } finally { 786 endWrite(); 787 } 788 } 789 790 // Returns an output stream for writing the contents into the specified 791 // entry. 792 OutputStream newOutputStream(byte[] path, OpenOption... options) 793 throws IOException 794 { 795 checkWritable(); 796 boolean hasCreateNew = false; 797 boolean hasCreate = false; 798 boolean hasAppend = false; 799 boolean hasTruncate = false; 800 for (OpenOption opt : options) { 801 if (opt == READ) 802 throw new IllegalArgumentException("READ not allowed"); 803 if (opt == CREATE_NEW) 804 hasCreateNew = true; 805 if (opt == CREATE) 806 hasCreate = true; 807 if (opt == APPEND) 808 hasAppend = true; 809 if (opt == TRUNCATE_EXISTING) 810 hasTruncate = true; 811 } 812 if (hasAppend && hasTruncate) 813 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 814 beginRead(); // only need a readlock, the "update()" will 815 try { // try to obtain a writelock when the os is 816 ensureOpen(); // being closed. 817 Entry e = getEntry(path); 818 if (e != null) { 819 if (e.isDir() || hasCreateNew) 820 throw new FileAlreadyExistsException(getString(path)); 821 if (hasAppend) { 822 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 823 try (InputStream is = getInputStream(e)) { 824 is.transferTo(os); 825 } 826 return os; 827 } 828 return getOutputStream(supportPosix ? 829 new PosixEntry((PosixEntry)e, Entry.NEW, defaultCompressionMethod) 830 : new Entry(e, Entry.NEW, defaultCompressionMethod)); 831 } else { 832 if (!hasCreate && !hasCreateNew) 833 throw new NoSuchFileException(getString(path)); 834 checkParents(path); 835 return getOutputStream(supportPosix ? 836 new PosixEntry(path, Entry.NEW, false, defaultCompressionMethod) : 837 new Entry(path, Entry.NEW, false, defaultCompressionMethod)); 838 } 839 } finally { 840 endRead(); 841 } 842 } 843 844 // Returns an input stream for reading the contents of the specified 845 // file entry. 846 InputStream newInputStream(byte[] path) throws IOException { 847 beginRead(); 848 try { 849 ensureOpen(); 850 Entry e = getEntry(path); 851 if (e == null) 852 throw new NoSuchFileException(getString(path)); 853 if (e.isDir()) 854 throw new FileSystemException(getString(path), "is a directory", null); 855 return getInputStream(e); 856 } finally { 857 endRead(); 858 } 859 } 860 861 private void checkOptions(Set<? extends OpenOption> options) { 862 // check for options of null type and option is an intance of StandardOpenOption 863 for (OpenOption option : options) { 864 if (option == null) 865 throw new NullPointerException(); 866 if (!(option instanceof StandardOpenOption)) 867 throw new IllegalArgumentException(); 868 } 869 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 870 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 871 } 872 873 // Returns an output SeekableByteChannel for either 874 // (1) writing the contents of a new entry, if the entry doesn't exist, or 875 // (2) updating/replacing the contents of an existing entry. 876 // Note: The content of the channel is not compressed until the 877 // channel is closed 878 private class EntryOutputChannel extends ByteArrayChannel { 879 final Entry e; 880 881 EntryOutputChannel(Entry e) { 882 super(e.size > 0? (int)e.size : 8192, false); 883 this.e = e; 884 if (e.mtime == -1) 885 e.mtime = System.currentTimeMillis(); 886 if (e.method == -1) 887 e.method = defaultCompressionMethod; 888 // store size, compressed size, and crc-32 in datadescriptor 889 e.flag = FLAG_DATADESCR; 890 if (zc.isUTF8()) 891 e.flag |= FLAG_USE_UTF8; 892 } 893 894 @Override 895 public void close() throws IOException { 896 // will update the entry 897 try (OutputStream os = getOutputStream(e)) { 898 os.write(toByteArray()); 899 } 900 super.close(); 901 } 902 } 903 904 // Returns a Writable/ReadByteChannel for now. Might consider to use 905 // newFileChannel() instead, which dump the entry data into a regular 906 // file on the default file system and create a FileChannel on top of it. 907 SeekableByteChannel newByteChannel(byte[] path, 908 Set<? extends OpenOption> options, 909 FileAttribute<?>... attrs) 910 throws IOException 911 { 912 checkOptions(options); 913 if (options.contains(StandardOpenOption.WRITE) || 914 options.contains(StandardOpenOption.APPEND)) { 915 checkWritable(); 916 beginRead(); // only need a read lock, the "update()" will obtain 917 // the write lock when the channel is closed 918 try { 919 Entry e = getEntry(path); 920 if (e != null) { 921 if (e.isDir() || options.contains(CREATE_NEW)) 922 throw new FileAlreadyExistsException(getString(path)); 923 SeekableByteChannel sbc = 924 new EntryOutputChannel(supportPosix ? 925 new PosixEntry((PosixEntry)e, Entry.NEW) : 926 new Entry(e, Entry.NEW)); 927 if (options.contains(APPEND)) { 928 try (InputStream is = getInputStream(e)) { // copyover 929 byte[] buf = new byte[8192]; 930 ByteBuffer bb = ByteBuffer.wrap(buf); 931 int n; 932 while ((n = is.read(buf)) != -1) { 933 bb.position(0); 934 bb.limit(n); 935 sbc.write(bb); 936 } 937 } 938 } 939 return sbc; 940 } 941 if (!options.contains(CREATE) && !options.contains(CREATE_NEW)) 942 throw new NoSuchFileException(getString(path)); 943 checkParents(path); 944 return new EntryOutputChannel( 945 supportPosix ? 946 new PosixEntry(path, Entry.NEW, false, defaultCompressionMethod, attrs) : 947 new Entry(path, Entry.NEW, false, defaultCompressionMethod, attrs)); 948 } finally { 949 endRead(); 950 } 951 } else { 952 beginRead(); 953 try { 954 ensureOpen(); 955 Entry e = getEntry(path); 956 if (e == null || e.isDir()) 957 throw new NoSuchFileException(getString(path)); 958 try (InputStream is = getInputStream(e)) { 959 // TBD: if (e.size < NNNNN); 960 return new ByteArrayChannel(is.readAllBytes(), true); 961 } 962 } finally { 963 endRead(); 964 } 965 } 966 } 967 968 // Returns a FileChannel of the specified entry. 969 // 970 // This implementation creates a temporary file on the default file system, 971 // copy the entry data into it if the entry exists, and then create a 972 // FileChannel on top of it. 973 FileChannel newFileChannel(byte[] path, 974 Set<? extends OpenOption> options, 975 FileAttribute<?>... attrs) 976 throws IOException 977 { 978 checkOptions(options); 979 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 980 options.contains(StandardOpenOption.APPEND)); 981 beginRead(); 982 try { 983 ensureOpen(); 984 Entry e = getEntry(path); 985 if (forWrite) { 986 checkWritable(); 987 if (e == null) { 988 if (!options.contains(StandardOpenOption.CREATE) && 989 !options.contains(StandardOpenOption.CREATE_NEW)) { 990 throw new NoSuchFileException(getString(path)); 991 } 992 } else { 993 if (options.contains(StandardOpenOption.CREATE_NEW)) { 994 throw new FileAlreadyExistsException(getString(path)); 995 } 996 if (e.isDir()) 997 throw new FileAlreadyExistsException("directory <" 998 + getString(path) + "> exists"); 999 } 1000 options = new HashSet<>(options); 1001 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 1002 } else if (e == null || e.isDir()) { 1003 throw new NoSuchFileException(getString(path)); 1004 } 1005 1006 final boolean isFCH = (e != null && e.type == Entry.FILECH); 1007 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 1008 final FileChannel fch = tmpfile.getFileSystem() 1009 .provider() 1010 .newFileChannel(tmpfile, options, attrs); 1011 final Entry u = isFCH ? e : ( 1012 supportPosix ? 1013 new PosixEntry(path, tmpfile, Entry.FILECH, attrs) : 1014 new Entry(path, tmpfile, Entry.FILECH, attrs)); 1015 if (forWrite) { 1016 u.flag = FLAG_DATADESCR; 1017 u.method = defaultCompressionMethod; 1018 } 1019 // is there a better way to hook into the FileChannel's close method? 1020 return new FileChannel() { 1021 public int write(ByteBuffer src) throws IOException { 1022 return fch.write(src); 1023 } 1024 public long write(ByteBuffer[] srcs, int offset, int length) 1025 throws IOException 1026 { 1027 return fch.write(srcs, offset, length); 1028 } 1029 public long position() throws IOException { 1030 return fch.position(); 1031 } 1032 public FileChannel position(long newPosition) 1033 throws IOException 1034 { 1035 fch.position(newPosition); 1036 return this; 1037 } 1038 public long size() throws IOException { 1039 return fch.size(); 1040 } 1041 public FileChannel truncate(long size) 1042 throws IOException 1043 { 1044 fch.truncate(size); 1045 return this; 1046 } 1047 public void force(boolean metaData) 1048 throws IOException 1049 { 1050 fch.force(metaData); 1051 } 1052 public long transferTo(long position, long count, 1053 WritableByteChannel target) 1054 throws IOException 1055 { 1056 return fch.transferTo(position, count, target); 1057 } 1058 public long transferFrom(ReadableByteChannel src, 1059 long position, long count) 1060 throws IOException 1061 { 1062 return fch.transferFrom(src, position, count); 1063 } 1064 public int read(ByteBuffer dst) throws IOException { 1065 return fch.read(dst); 1066 } 1067 public int read(ByteBuffer dst, long position) 1068 throws IOException 1069 { 1070 return fch.read(dst, position); 1071 } 1072 public long read(ByteBuffer[] dsts, int offset, int length) 1073 throws IOException 1074 { 1075 return fch.read(dsts, offset, length); 1076 } 1077 public int write(ByteBuffer src, long position) 1078 throws IOException 1079 { 1080 return fch.write(src, position); 1081 } 1082 public MappedByteBuffer map(MapMode mode, 1083 long position, long size) 1084 { 1085 throw new UnsupportedOperationException(); 1086 } 1087 public FileLock lock(long position, long size, boolean shared) 1088 throws IOException 1089 { 1090 return fch.lock(position, size, shared); 1091 } 1092 public FileLock tryLock(long position, long size, boolean shared) 1093 throws IOException 1094 { 1095 return fch.tryLock(position, size, shared); 1096 } 1097 protected void implCloseChannel() throws IOException { 1098 fch.close(); 1099 if (forWrite) { 1100 u.mtime = System.currentTimeMillis(); 1101 u.size = Files.size(u.file); 1102 update(u); 1103 } else { 1104 if (!isFCH) // if this is a new fch for reading 1105 removeTempPathForEntry(tmpfile); 1106 } 1107 } 1108 }; 1109 } finally { 1110 endRead(); 1111 } 1112 } 1113 1114 // the outstanding input streams that need to be closed 1115 private Set<InputStream> streams = 1116 Collections.synchronizedSet(new HashSet<>()); 1117 1118 // the ex-channel and ex-path that need to close when their outstanding 1119 // input streams are all closed by the obtainers. 1120 private final Set<ExistingChannelCloser> exChClosers = new HashSet<>(); 1121 1122 private final Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<>()); 1123 private Path getTempPathForEntry(byte[] path) throws IOException { 1124 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 1125 if (path != null) { 1126 Entry e = getEntry(path); 1127 if (e != null) { 1128 try (InputStream is = newInputStream(path)) { 1129 Files.copy(is, tmpPath, REPLACE_EXISTING); 1130 } 1131 } 1132 } 1133 return tmpPath; 1134 } 1135 1136 private void removeTempPathForEntry(Path path) throws IOException { 1137 Files.delete(path); 1138 tmppaths.remove(path); 1139 } 1140 1141 // check if all parents really exist. ZIP spec does not require 1142 // the existence of any "parent directory". 1143 private void checkParents(byte[] path) throws IOException { 1144 beginRead(); 1145 try { 1146 while ((path = getParent(path)) != null && 1147 path != ROOTPATH) { 1148 if (!inodes.containsKey(IndexNode.keyOf(path))) { 1149 throw new NoSuchFileException(getString(path)); 1150 } 1151 } 1152 } finally { 1153 endRead(); 1154 } 1155 } 1156 1157 private static byte[] getParent(byte[] path) { 1158 int off = getParentOff(path); 1159 if (off <= 1) 1160 return ROOTPATH; 1161 return Arrays.copyOf(path, off); 1162 } 1163 1164 private static int getParentOff(byte[] path) { 1165 int off = path.length - 1; 1166 if (off > 0 && path[off] == '/') // isDirectory 1167 off--; 1168 while (off > 0 && path[off] != '/') { off--; } 1169 return off; 1170 } 1171 1172 private void beginWrite() { 1173 rwlock.writeLock().lock(); 1174 } 1175 1176 private void endWrite() { 1177 rwlock.writeLock().unlock(); 1178 } 1179 1180 private void beginRead() { 1181 rwlock.readLock().lock(); 1182 } 1183 1184 private void endRead() { 1185 rwlock.readLock().unlock(); 1186 } 1187 1188 /////////////////////////////////////////////////////////////////// 1189 1190 private volatile boolean isOpen = true; 1191 private final SeekableByteChannel ch; // channel to the zipfile 1192 final byte[] cen; // CEN & ENDHDR 1193 private END end; 1194 private long locpos; // position of first LOC header (usually 0) 1195 1196 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 1197 1198 // name -> pos (in cen), IndexNode itself can be used as a "key" 1199 private LinkedHashMap<IndexNode, IndexNode> inodes; 1200 1201 final byte[] getBytes(String name) { 1202 return zc.getBytes(name); 1203 } 1204 1205 final String getString(byte[] name) { 1206 return zc.toString(name); 1207 } 1208 1209 @SuppressWarnings("deprecation") 1210 protected void finalize() throws IOException { 1211 close(); 1212 } 1213 1214 // Reads len bytes of data from the specified offset into buf. 1215 // Returns the total number of bytes read. 1216 // Each/every byte read from here (except the cen, which is mapped). 1217 final long readFullyAt(byte[] buf, int off, long len, long pos) 1218 throws IOException 1219 { 1220 ByteBuffer bb = ByteBuffer.wrap(buf); 1221 bb.position(off); 1222 bb.limit((int)(off + len)); 1223 return readFullyAt(bb, pos); 1224 } 1225 1226 private long readFullyAt(ByteBuffer bb, long pos) throws IOException { 1227 synchronized(ch) { 1228 return ch.position(pos).read(bb); 1229 } 1230 } 1231 1232 // Searches for end of central directory (END) header. The contents of 1233 // the END header will be read and placed in endbuf. Returns the file 1234 // position of the END header, otherwise returns -1 if the END header 1235 // was not found or an error occurred. 1236 private END findEND() throws IOException { 1237 byte[] buf = new byte[READBLOCKSZ]; 1238 long ziplen = ch.size(); 1239 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 1240 long minPos = minHDR - (buf.length - ENDHDR); 1241 1242 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) { 1243 int off = 0; 1244 if (pos < 0) { 1245 // Pretend there are some NUL bytes before start of file 1246 off = (int)-pos; 1247 Arrays.fill(buf, 0, off, (byte)0); 1248 } 1249 int len = buf.length - off; 1250 if (readFullyAt(buf, off, len, pos + off) != len) 1251 throw new ZipException("zip END header not found"); 1252 1253 // Now scan the block backwards for END header signature 1254 for (int i = buf.length - ENDHDR; i >= 0; i--) { 1255 if (buf[i] == (byte)'P' && 1256 buf[i+1] == (byte)'K' && 1257 buf[i+2] == (byte)'\005' && 1258 buf[i+3] == (byte)'\006' && 1259 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 1260 // Found END header 1261 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 1262 END end = new END(); 1263 // end.endsub = ENDSUB(buf); // not used 1264 end.centot = ENDTOT(buf); 1265 end.cenlen = ENDSIZ(buf); 1266 end.cenoff = ENDOFF(buf); 1267 // end.comlen = ENDCOM(buf); // not used 1268 end.endpos = pos + i; 1269 // try if there is zip64 end; 1270 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1271 if (end.endpos < ZIP64_LOCHDR || 1272 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1273 != loc64.length || 1274 !locator64SigAt(loc64, 0)) { 1275 return end; 1276 } 1277 long end64pos = ZIP64_LOCOFF(loc64); 1278 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1279 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1280 != end64buf.length || 1281 !end64SigAt(end64buf, 0)) { 1282 return end; 1283 } 1284 // end64 found, 1285 long cenlen64 = ZIP64_ENDSIZ(end64buf); 1286 long cenoff64 = ZIP64_ENDOFF(end64buf); 1287 long centot64 = ZIP64_ENDTOT(end64buf); 1288 // double-check 1289 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1290 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1291 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1292 return end; 1293 } 1294 // to use the end64 values 1295 end.cenlen = cenlen64; 1296 end.cenoff = cenoff64; 1297 end.centot = (int)centot64; // assume total < 2g 1298 end.endpos = end64pos; 1299 return end; 1300 } 1301 } 1302 } 1303 throw new ZipException("zip END header not found"); 1304 } 1305 1306 private void makeParentDirs(IndexNode node, IndexNode root) { 1307 IndexNode parent; 1308 ParentLookup lookup = new ParentLookup(); 1309 while (true) { 1310 int off = getParentOff(node.name); 1311 // parent is root 1312 if (off <= 1) { 1313 node.sibling = root.child; 1314 root.child = node; 1315 break; 1316 } 1317 // parent exists 1318 lookup = lookup.as(node.name, off); 1319 if (inodes.containsKey(lookup)) { 1320 parent = inodes.get(lookup); 1321 node.sibling = parent.child; 1322 parent.child = node; 1323 break; 1324 } 1325 // parent does not exist, add new pseudo directory entry 1326 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 1327 inodes.put(parent, parent); 1328 node.sibling = parent.child; 1329 parent.child = node; 1330 node = parent; 1331 } 1332 } 1333 1334 // ZIP directory has two issues: 1335 // (1) ZIP spec does not require the ZIP file to include 1336 // directory entry 1337 // (2) all entries are not stored/organized in a "tree" 1338 // structure. 1339 // A possible solution is to build the node tree ourself as 1340 // implemented below. 1341 private void buildNodeTree() { 1342 beginWrite(); 1343 try { 1344 IndexNode root = inodes.remove(LOOKUPKEY.as(ROOTPATH)); 1345 if (root == null) { 1346 root = new IndexNode(ROOTPATH, true); 1347 } 1348 IndexNode[] nodes = inodes.values().toArray(new IndexNode[0]); 1349 inodes.put(root, root); 1350 for (IndexNode node : nodes) { 1351 makeParentDirs(node, root); 1352 } 1353 } finally { 1354 endWrite(); 1355 } 1356 } 1357 1358 private void removeFromTree(IndexNode inode) { 1359 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 1360 IndexNode child = parent.child; 1361 if (child.equals(inode)) { 1362 parent.child = child.sibling; 1363 } else { 1364 IndexNode last = child; 1365 while ((child = child.sibling) != null) { 1366 if (child.equals(inode)) { 1367 last.sibling = child.sibling; 1368 break; 1369 } else { 1370 last = child; 1371 } 1372 } 1373 } 1374 } 1375 1376 /** 1377 * If a version property has been specified and the file represents a multi-release JAR, 1378 * determine the requested runtime version and initialize the ZipFileSystem instance accordingly. 1379 * 1380 * Checks if the Zip File System property "releaseVersion" has been specified. If it has, 1381 * use its value to determine the requested version. If not use the value of the "multi-release" property. 1382 */ 1383 private void initializeReleaseVersion(Map<String, ?> env) throws IOException { 1384 Object o = env.containsKey(PROPERTY_RELEASE_VERSION) ? 1385 env.get(PROPERTY_RELEASE_VERSION) : 1386 env.get(PROPERTY_MULTI_RELEASE); 1387 1388 if (o != null && isMultiReleaseJar()) { 1389 int version; 1390 if (o instanceof String) { 1391 String s = (String)o; 1392 if (s.equals("runtime")) { 1393 version = Runtime.version().feature(); 1394 } else if (s.matches("^[1-9][0-9]*$")) { 1395 version = Version.parse(s).feature(); 1396 } else { 1397 throw new IllegalArgumentException("Invalid runtime version"); 1398 } 1399 } else if (o instanceof Integer) { 1400 version = Version.parse(((Integer)o).toString()).feature(); 1401 } else if (o instanceof Version) { 1402 version = ((Version)o).feature(); 1403 } else { 1404 throw new IllegalArgumentException("env parameter must be String, " + 1405 "Integer, or Version"); 1406 } 1407 createVersionedLinks(version < 0 ? 0 : version); 1408 setReadOnly(); 1409 } 1410 } 1411 1412 /** 1413 * Returns true if the Manifest main attribute "Multi-Release" is set to true; false otherwise. 1414 */ 1415 private boolean isMultiReleaseJar() throws IOException { 1416 try (InputStream is = newInputStream(getBytes("/META-INF/MANIFEST.MF"))) { 1417 String multiRelease = new Manifest(is).getMainAttributes() 1418 .getValue(Attributes.Name.MULTI_RELEASE); 1419 return "true".equalsIgnoreCase(multiRelease); 1420 } catch (NoSuchFileException x) { 1421 return false; 1422 } 1423 } 1424 1425 /** 1426 * Create a map of aliases for versioned entries, for example: 1427 * version/PackagePrivate.class -> META-INF/versions/9/version/PackagePrivate.class 1428 * version/PackagePrivate.java -> META-INF/versions/9/version/PackagePrivate.java 1429 * version/Version.class -> META-INF/versions/10/version/Version.class 1430 * version/Version.java -> META-INF/versions/10/version/Version.java 1431 * 1432 * Then wrap the map in a function that getEntry can use to override root 1433 * entry lookup for entries that have corresponding versioned entries. 1434 */ 1435 private void createVersionedLinks(int version) { 1436 IndexNode verdir = getInode(getBytes("/META-INF/versions")); 1437 // nothing to do, if no /META-INF/versions 1438 if (verdir == null) { 1439 return; 1440 } 1441 // otherwise, create a map and for each META-INF/versions/{n} directory 1442 // put all the leaf inodes, i.e. entries, into the alias map 1443 // possibly shadowing lower versioned entries 1444 HashMap<IndexNode, byte[]> aliasMap = new HashMap<>(); 1445 getVersionMap(version, verdir).values().forEach(versionNode -> 1446 walk(versionNode.child, entryNode -> 1447 aliasMap.put( 1448 getOrCreateInode(getRootName(entryNode, versionNode), entryNode.isdir), 1449 entryNode.name)) 1450 ); 1451 entryLookup = path -> { 1452 byte[] entry = aliasMap.get(IndexNode.keyOf(path)); 1453 return entry == null ? path : entry; 1454 }; 1455 } 1456 1457 /** 1458 * Create a sorted version map of version -> inode, for inodes <= max version. 1459 * 9 -> META-INF/versions/9 1460 * 10 -> META-INF/versions/10 1461 */ 1462 private TreeMap<Integer, IndexNode> getVersionMap(int version, IndexNode metaInfVersions) { 1463 TreeMap<Integer,IndexNode> map = new TreeMap<>(); 1464 IndexNode child = metaInfVersions.child; 1465 while (child != null) { 1466 Integer key = getVersion(child, metaInfVersions); 1467 if (key != null && key <= version) { 1468 map.put(key, child); 1469 } 1470 child = child.sibling; 1471 } 1472 return map; 1473 } 1474 1475 /** 1476 * Extract the integer version number -- META-INF/versions/9 returns 9. 1477 */ 1478 private Integer getVersion(IndexNode inode, IndexNode metaInfVersions) { 1479 try { 1480 byte[] fullName = inode.name; 1481 return Integer.parseInt(getString(Arrays 1482 .copyOfRange(fullName, metaInfVersions.name.length + 1, fullName.length))); 1483 } catch (NumberFormatException x) { 1484 // ignore this even though it might indicate issues with the JAR structure 1485 return null; 1486 } 1487 } 1488 1489 /** 1490 * Walk the IndexNode tree processing all leaf nodes. 1491 */ 1492 private void walk(IndexNode inode, Consumer<IndexNode> consumer) { 1493 if (inode == null) return; 1494 if (inode.isDir()) { 1495 walk(inode.child, consumer); 1496 } else { 1497 consumer.accept(inode); 1498 } 1499 walk(inode.sibling, consumer); 1500 } 1501 1502 /** 1503 * Extract the root name from a versioned entry name. 1504 * E.g. given inode 'META-INF/versions/9/foo/bar.class' 1505 * and prefix 'META-INF/versions/9/' returns 'foo/bar.class'. 1506 */ 1507 private byte[] getRootName(IndexNode inode, IndexNode prefix) { 1508 byte[] fullName = inode.name; 1509 return Arrays.copyOfRange(fullName, prefix.name.length, fullName.length); 1510 } 1511 1512 // Reads zip file central directory. Returns the file position of first 1513 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1514 // then the error was a zip format error and zip->msg has the error text. 1515 // Always pass in -1 for knownTotal; it's used for a recursive call. 1516 private byte[] initCEN() throws IOException { 1517 end = findEND(); 1518 if (end.endpos == 0) { 1519 inodes = new LinkedHashMap<>(10); 1520 locpos = 0; 1521 buildNodeTree(); 1522 return null; // only END header present 1523 } 1524 if (end.cenlen > end.endpos) 1525 throw new ZipException("invalid END header (bad central directory size)"); 1526 long cenpos = end.endpos - end.cenlen; // position of CEN table 1527 1528 // Get position of first local file (LOC) header, taking into 1529 // account that there may be a stub prefixed to the zip file. 1530 locpos = cenpos - end.cenoff; 1531 if (locpos < 0) 1532 throw new ZipException("invalid END header (bad central directory offset)"); 1533 1534 // read in the CEN and END 1535 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1536 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1537 throw new ZipException("read CEN tables failed"); 1538 } 1539 // Iterate through the entries in the central directory 1540 inodes = new LinkedHashMap<>(end.centot + 1); 1541 int pos = 0; 1542 int limit = cen.length - ENDHDR; 1543 while (pos < limit) { 1544 if (!cenSigAt(cen, pos)) 1545 throw new ZipException("invalid CEN header (bad signature)"); 1546 int method = CENHOW(cen, pos); 1547 int nlen = CENNAM(cen, pos); 1548 int elen = CENEXT(cen, pos); 1549 int clen = CENCOM(cen, pos); 1550 if ((CENFLG(cen, pos) & 1) != 0) { 1551 throw new ZipException("invalid CEN header (encrypted entry)"); 1552 } 1553 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1554 throw new ZipException("invalid CEN header (unsupported compression method: " + method + ")"); 1555 } 1556 if (pos + CENHDR + nlen > limit) { 1557 throw new ZipException("invalid CEN header (bad header size)"); 1558 } 1559 IndexNode inode = new IndexNode(cen, pos, nlen); 1560 inodes.put(inode, inode); 1561 1562 // skip ext and comment 1563 pos += (CENHDR + nlen + elen + clen); 1564 } 1565 if (pos + ENDHDR != cen.length) { 1566 throw new ZipException("invalid CEN header (bad header size)"); 1567 } 1568 buildNodeTree(); 1569 return cen; 1570 } 1571 1572 private void ensureOpen() { 1573 if (!isOpen) 1574 throw new ClosedFileSystemException(); 1575 } 1576 1577 // Creates a new empty temporary file in the same directory as the 1578 // specified file. A variant of Files.createTempFile. 1579 private Path createTempFileInSameDirectoryAs(Path path) throws IOException { 1580 Path parent = path.toAbsolutePath().getParent(); 1581 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1582 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1583 tmppaths.add(tmpPath); 1584 return tmpPath; 1585 } 1586 1587 ////////////////////update & sync ////////////////////////////////////// 1588 1589 private boolean hasUpdate = false; 1590 1591 // shared key. consumer guarantees the "writeLock" before use it. 1592 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1593 1594 private void updateDelete(IndexNode inode) { 1595 beginWrite(); 1596 try { 1597 removeFromTree(inode); 1598 inodes.remove(inode); 1599 hasUpdate = true; 1600 } finally { 1601 endWrite(); 1602 } 1603 } 1604 1605 private void update(Entry e) { 1606 beginWrite(); 1607 try { 1608 IndexNode old = inodes.put(e, e); 1609 if (old != null) { 1610 removeFromTree(old); 1611 } 1612 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1613 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1614 e.sibling = parent.child; 1615 parent.child = e; 1616 } 1617 hasUpdate = true; 1618 } finally { 1619 endWrite(); 1620 } 1621 } 1622 1623 // copy over the whole LOC entry (header if necessary, data and ext) from 1624 // old zip to the new one. 1625 private long copyLOCEntry(Entry e, boolean updateHeader, 1626 OutputStream os, 1627 long written, byte[] buf) 1628 throws IOException 1629 { 1630 long locoff = e.locoff; // where to read 1631 e.locoff = written; // update the e.locoff with new value 1632 1633 // calculate the size need to write out 1634 long size = 0; 1635 // if there is A ext 1636 if ((e.flag & FLAG_DATADESCR) != 0) { 1637 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1638 size = 24; 1639 else 1640 size = 16; 1641 } 1642 // read loc, use the original loc.elen/nlen 1643 // 1644 // an extra byte after loc is read, which should be the first byte of the 1645 // 'name' field of the loc. if this byte is '/', which means the original 1646 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1647 // is used to output the loc, in which the leading "/" will be removed 1648 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1649 throw new ZipException("loc: reading failed"); 1650 1651 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1652 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1653 size += e.csize; 1654 written = e.writeLOC(os) + size; 1655 } else { 1656 os.write(buf, 0, LOCHDR); // write out the loc header 1657 locoff += LOCHDR; 1658 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1659 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1660 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1661 written = LOCHDR + size; 1662 } 1663 int n; 1664 while (size > 0 && 1665 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1666 { 1667 if (size < n) 1668 n = (int)size; 1669 os.write(buf, 0, n); 1670 size -= n; 1671 locoff += n; 1672 } 1673 return written; 1674 } 1675 1676 private long writeEntry(Entry e, OutputStream os) 1677 throws IOException { 1678 1679 if (e.bytes == null && e.file == null) // dir, 0-length data 1680 return 0; 1681 1682 long written = 0; 1683 if (e.method != METHOD_STORED && e.csize > 0 && (e.crc != 0 || e.size == 0)) { 1684 // pre-compressed entry, write directly to output stream 1685 writeTo(e, os); 1686 } else { 1687 try (OutputStream os2 = (e.method == METHOD_STORED) ? 1688 new EntryOutputStreamCRC32(e, os) : new EntryOutputStreamDef(e, os)) { 1689 writeTo(e, os2); 1690 } 1691 } 1692 written += e.csize; 1693 if ((e.flag & FLAG_DATADESCR) != 0) { 1694 written += e.writeEXT(os); 1695 } 1696 return written; 1697 } 1698 1699 private void writeTo(Entry e, OutputStream os) throws IOException { 1700 if (e.bytes != null) { 1701 os.write(e.bytes, 0, e.bytes.length); 1702 } else if (e.file != null) { 1703 if (e.type == Entry.NEW || e.type == Entry.FILECH) { 1704 try (InputStream is = Files.newInputStream(e.file)) { 1705 is.transferTo(os); 1706 } 1707 } 1708 Files.delete(e.file); 1709 tmppaths.remove(e.file); 1710 } 1711 } 1712 1713 // sync the zip file system, if there is any update 1714 private void sync() throws IOException { 1715 // check ex-closer 1716 if (!exChClosers.isEmpty()) { 1717 for (ExistingChannelCloser ecc : exChClosers) { 1718 if (ecc.closeAndDeleteIfDone()) { 1719 exChClosers.remove(ecc); 1720 } 1721 } 1722 } 1723 if (!hasUpdate) 1724 return; 1725 PosixFileAttributes attrs = getPosixAttributes(zfpath); 1726 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1727 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) { 1728 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1729 long written = 0; 1730 byte[] buf = null; 1731 Entry e; 1732 1733 // write loc 1734 for (IndexNode inode : inodes.values()) { 1735 if (inode instanceof Entry) { // an updated inode 1736 e = (Entry)inode; 1737 try { 1738 if (e.type == Entry.COPY) { 1739 // entry copy: the only thing changed is the "name" 1740 // and "nlen" in LOC header, so we update/rewrite the 1741 // LOC in new file and simply copy the rest (data and 1742 // ext) without enflating/deflating from the old zip 1743 // file LOC entry. 1744 if (buf == null) 1745 buf = new byte[8192]; 1746 written += copyLOCEntry(e, true, os, written, buf); 1747 } else { // NEW, FILECH or CEN 1748 e.locoff = written; 1749 written += e.writeLOC(os); // write loc header 1750 written += writeEntry(e, os); 1751 } 1752 elist.add(e); 1753 } catch (IOException x) { 1754 x.printStackTrace(); // skip any in-accurate entry 1755 } 1756 } else { // unchanged inode 1757 if (inode.pos == -1) { 1758 continue; // pseudo directory node 1759 } 1760 if (inode.name.length == 1 && inode.name[0] == '/') { 1761 continue; // no root '/' directory even if it 1762 // exists in original zip/jar file. 1763 } 1764 e = supportPosix ? new PosixEntry(this, inode) : new Entry(this, inode); 1765 try { 1766 if (buf == null) 1767 buf = new byte[8192]; 1768 written += copyLOCEntry(e, false, os, written, buf); 1769 elist.add(e); 1770 } catch (IOException x) { 1771 x.printStackTrace(); // skip any wrong entry 1772 } 1773 } 1774 } 1775 1776 // now write back the cen and end table 1777 end.cenoff = written; 1778 for (Entry entry : elist) { 1779 written += entry.writeCEN(os); 1780 } 1781 end.centot = elist.size(); 1782 end.cenlen = written - end.cenoff; 1783 end.write(os, written, forceEnd64); 1784 } 1785 if (!streams.isEmpty()) { 1786 // 1787 // There are outstanding input streams open on existing "ch", 1788 // so, don't close the "cha" and delete the "file for now, let 1789 // the "ex-channel-closer" to handle them 1790 Path path = createTempFileInSameDirectoryAs(zfpath); 1791 ExistingChannelCloser ecc = new ExistingChannelCloser(path, 1792 ch, 1793 streams); 1794 Files.move(zfpath, path, REPLACE_EXISTING); 1795 exChClosers.add(ecc); 1796 streams = Collections.synchronizedSet(new HashSet<>()); 1797 } else { 1798 ch.close(); 1799 Files.delete(zfpath); 1800 } 1801 1802 // Set the POSIX permissions of the original Zip File if available 1803 // before moving the temp file 1804 if (attrs != null) { 1805 Files.setPosixFilePermissions(tmpFile, attrs.permissions()); 1806 } 1807 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1808 hasUpdate = false; // clear 1809 } 1810 1811 /** 1812 * Returns a file's POSIX file attributes. 1813 * @param path The path to the file 1814 * @return The POSIX file attributes for the specified file or 1815 * null if the POSIX attribute view is not available 1816 * @throws IOException If an error occurs obtaining the POSIX attributes for 1817 * the specified file 1818 */ 1819 private PosixFileAttributes getPosixAttributes(Path path) throws IOException { 1820 try { 1821 PosixFileAttributeView view = 1822 Files.getFileAttributeView(path, PosixFileAttributeView.class); 1823 // Return if the attribute view is not supported 1824 if (view == null) { 1825 return null; 1826 } 1827 return view.readAttributes(); 1828 } catch (UnsupportedOperationException e) { 1829 // PosixFileAttributes not available 1830 return null; 1831 } 1832 } 1833 1834 private IndexNode getInode(byte[] path) { 1835 return inodes.get(IndexNode.keyOf(Objects.requireNonNull(entryLookup.apply(path), "path"))); 1836 } 1837 1838 /** 1839 * Return the IndexNode from the root tree. If it doesn't exist, 1840 * it gets created along with all parent directory IndexNodes. 1841 */ 1842 private IndexNode getOrCreateInode(byte[] path, boolean isdir) { 1843 IndexNode node = getInode(path); 1844 // if node exists, return it 1845 if (node != null) { 1846 return node; 1847 } 1848 1849 // otherwise create new pseudo node and parent directory hierarchy 1850 node = new IndexNode(path, isdir); 1851 beginWrite(); 1852 try { 1853 makeParentDirs(node, Objects.requireNonNull(inodes.get(IndexNode.keyOf(ROOTPATH)), "no root node found")); 1854 return node; 1855 } finally { 1856 endWrite(); 1857 } 1858 } 1859 1860 private Entry getEntry(byte[] path) throws IOException { 1861 IndexNode inode = getInode(path); 1862 if (inode instanceof Entry) 1863 return (Entry)inode; 1864 if (inode == null || inode.pos == -1) 1865 return null; 1866 return supportPosix ? new PosixEntry(this, inode): new Entry(this, inode); 1867 } 1868 1869 public void deleteFile(byte[] path, boolean failIfNotExists) 1870 throws IOException 1871 { 1872 checkWritable(); 1873 IndexNode inode = getInode(path); 1874 if (inode == null) { 1875 if (path != null && path.length == 0) 1876 throw new ZipException("root directory </> can't not be delete"); 1877 if (failIfNotExists) 1878 throw new NoSuchFileException(getString(path)); 1879 } else { 1880 if (inode.isDir() && inode.child != null) 1881 throw new DirectoryNotEmptyException(getString(path)); 1882 updateDelete(inode); 1883 } 1884 } 1885 1886 // Returns an out stream for either 1887 // (1) writing the contents of a new entry, if the entry exists, or 1888 // (2) updating/replacing the contents of the specified existing entry. 1889 private OutputStream getOutputStream(Entry e) throws IOException { 1890 if (e.mtime == -1) 1891 e.mtime = System.currentTimeMillis(); 1892 if (e.method == -1) 1893 e.method = defaultCompressionMethod; 1894 // store size, compressed size, and crc-32 in datadescr 1895 e.flag = FLAG_DATADESCR; 1896 if (zc.isUTF8()) 1897 e.flag |= FLAG_USE_UTF8; 1898 OutputStream os; 1899 if (useTempFile) { 1900 e.file = getTempPathForEntry(null); 1901 os = Files.newOutputStream(e.file, WRITE); 1902 } else { 1903 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1904 } 1905 if (e.method == METHOD_DEFLATED) { 1906 return new DeflatingEntryOutputStream(e, os); 1907 } else { 1908 return new EntryOutputStream(e, os); 1909 } 1910 } 1911 1912 private class EntryOutputStream extends FilterOutputStream { 1913 private final Entry e; 1914 private long written; 1915 private boolean isClosed; 1916 1917 EntryOutputStream(Entry e, OutputStream os) { 1918 super(os); 1919 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1920 // this.written = 0; 1921 } 1922 1923 @Override 1924 public synchronized void write(int b) throws IOException { 1925 out.write(b); 1926 written += 1; 1927 } 1928 1929 @Override 1930 public synchronized void write(byte[] b, int off, int len) 1931 throws IOException { 1932 out.write(b, off, len); 1933 written += len; 1934 } 1935 1936 @Override 1937 public synchronized void close() throws IOException { 1938 if (isClosed) { 1939 return; 1940 } 1941 isClosed = true; 1942 e.size = written; 1943 if (out instanceof ByteArrayOutputStream) 1944 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1945 super.close(); 1946 update(e); 1947 } 1948 } 1949 1950 // Output stream returned when writing "deflated" entries into memory, 1951 // to enable eager (possibly parallel) deflation and reduce memory required. 1952 private class DeflatingEntryOutputStream extends DeflaterOutputStream { 1953 private final CRC32 crc; 1954 private final Entry e; 1955 private boolean isClosed; 1956 1957 DeflatingEntryOutputStream(Entry e, OutputStream os) { 1958 super(os, getDeflater()); 1959 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1960 this.crc = new CRC32(); 1961 } 1962 1963 @Override 1964 public synchronized void write(byte[] b, int off, int len) 1965 throws IOException { 1966 super.write(b, off, len); 1967 crc.update(b, off, len); 1968 } 1969 1970 @Override 1971 public synchronized void close() throws IOException { 1972 if (isClosed) 1973 return; 1974 isClosed = true; 1975 finish(); 1976 e.size = def.getBytesRead(); 1977 e.csize = def.getBytesWritten(); 1978 e.crc = crc.getValue(); 1979 if (out instanceof ByteArrayOutputStream) 1980 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1981 super.close(); 1982 update(e); 1983 releaseDeflater(def); 1984 } 1985 } 1986 1987 // Wrapper output stream class to write out a "stored" entry. 1988 // (1) this class does not close the underlying out stream when 1989 // being closed. 1990 // (2) no need to be "synchronized", only used by sync() 1991 private class EntryOutputStreamCRC32 extends FilterOutputStream { 1992 private final CRC32 crc; 1993 private final Entry e; 1994 private long written; 1995 private boolean isClosed; 1996 1997 EntryOutputStreamCRC32(Entry e, OutputStream os) { 1998 super(os); 1999 this.e = Objects.requireNonNull(e, "Zip entry is null"); 2000 this.crc = new CRC32(); 2001 } 2002 2003 @Override 2004 public void write(int b) throws IOException { 2005 out.write(b); 2006 crc.update(b); 2007 written += 1; 2008 } 2009 2010 @Override 2011 public void write(byte[] b, int off, int len) 2012 throws IOException { 2013 out.write(b, off, len); 2014 crc.update(b, off, len); 2015 written += len; 2016 } 2017 2018 @Override 2019 public void close() { 2020 if (isClosed) 2021 return; 2022 isClosed = true; 2023 e.size = e.csize = written; 2024 e.crc = crc.getValue(); 2025 } 2026 } 2027 2028 // Wrapper output stream class to write out a "deflated" entry. 2029 // (1) this class does not close the underlying out stream when 2030 // being closed. 2031 // (2) no need to be "synchronized", only used by sync() 2032 private class EntryOutputStreamDef extends DeflaterOutputStream { 2033 private final CRC32 crc; 2034 private final Entry e; 2035 private boolean isClosed; 2036 2037 EntryOutputStreamDef(Entry e, OutputStream os) { 2038 super(os, getDeflater()); 2039 this.e = Objects.requireNonNull(e, "Zip entry is null"); 2040 this.crc = new CRC32(); 2041 } 2042 2043 @Override 2044 public void write(byte[] b, int off, int len) throws IOException { 2045 super.write(b, off, len); 2046 crc.update(b, off, len); 2047 } 2048 2049 @Override 2050 public void close() throws IOException { 2051 if (isClosed) 2052 return; 2053 isClosed = true; 2054 finish(); 2055 e.size = def.getBytesRead(); 2056 e.csize = def.getBytesWritten(); 2057 e.crc = crc.getValue(); 2058 releaseDeflater(def); 2059 } 2060 } 2061 2062 private InputStream getInputStream(Entry e) 2063 throws IOException 2064 { 2065 InputStream eis; 2066 if (e.type == Entry.NEW) { 2067 if (e.bytes != null) 2068 eis = new ByteArrayInputStream(e.bytes); 2069 else if (e.file != null) 2070 eis = Files.newInputStream(e.file); 2071 else 2072 throw new ZipException("update entry data is missing"); 2073 } else if (e.type == Entry.FILECH) { 2074 // FILECH result is un-compressed. 2075 eis = Files.newInputStream(e.file); 2076 // TBD: wrap to hook close() 2077 // streams.add(eis); 2078 return eis; 2079 } else { // untouched CEN or COPY 2080 eis = new EntryInputStream(e, ch); 2081 } 2082 if (e.method == METHOD_DEFLATED) { 2083 // MORE: Compute good size for inflater stream: 2084 long bufSize = e.size + 2; // Inflater likes a bit of slack 2085 if (bufSize > 65536) 2086 bufSize = 8192; 2087 final long size = e.size; 2088 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 2089 private boolean isClosed = false; 2090 public void close() throws IOException { 2091 if (!isClosed) { 2092 releaseInflater(inf); 2093 this.in.close(); 2094 isClosed = true; 2095 streams.remove(this); 2096 } 2097 } 2098 // Override fill() method to provide an extra "dummy" byte 2099 // at the end of the input stream. This is required when 2100 // using the "nowrap" Inflater option. (it appears the new 2101 // zlib in 7 does not need it, but keep it for now) 2102 protected void fill() throws IOException { 2103 if (eof) { 2104 throw new EOFException( 2105 "Unexpected end of ZLIB input stream"); 2106 } 2107 len = this.in.read(buf, 0, buf.length); 2108 if (len == -1) { 2109 buf[0] = 0; 2110 len = 1; 2111 eof = true; 2112 } 2113 inf.setInput(buf, 0, len); 2114 } 2115 private boolean eof; 2116 2117 public int available() { 2118 if (isClosed) 2119 return 0; 2120 long avail = size - inf.getBytesWritten(); 2121 return avail > (long) Integer.MAX_VALUE ? 2122 Integer.MAX_VALUE : (int) avail; 2123 } 2124 }; 2125 } else if (e.method == METHOD_STORED) { 2126 // TBD: wrap/ it does not seem necessary 2127 } else { 2128 throw new ZipException("invalid compression method"); 2129 } 2130 streams.add(eis); 2131 return eis; 2132 } 2133 2134 // Inner class implementing the input stream used to read 2135 // a (possibly compressed) zip file entry. 2136 private class EntryInputStream extends InputStream { 2137 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 2138 // point to a new channel after sync() 2139 private long pos; // current position within entry data 2140 private long rem; // number of remaining bytes within entry 2141 2142 EntryInputStream(Entry e, SeekableByteChannel zfch) 2143 throws IOException 2144 { 2145 this.zfch = zfch; 2146 rem = e.csize; 2147 pos = e.locoff; 2148 if (pos == -1) { 2149 Entry e2 = getEntry(e.name); 2150 if (e2 == null) { 2151 throw new ZipException("invalid loc for entry <" + getString(e.name) + ">"); 2152 } 2153 pos = e2.locoff; 2154 } 2155 pos = -pos; // lazy initialize the real data offset 2156 } 2157 2158 public int read(byte[] b, int off, int len) throws IOException { 2159 ensureOpen(); 2160 initDataPos(); 2161 if (rem == 0) { 2162 return -1; 2163 } 2164 if (len <= 0) { 2165 return 0; 2166 } 2167 if (len > rem) { 2168 len = (int) rem; 2169 } 2170 // readFullyAt() 2171 long n; 2172 ByteBuffer bb = ByteBuffer.wrap(b); 2173 bb.position(off); 2174 bb.limit(off + len); 2175 synchronized(zfch) { 2176 n = zfch.position(pos).read(bb); 2177 } 2178 if (n > 0) { 2179 pos += n; 2180 rem -= n; 2181 } 2182 if (rem == 0) { 2183 close(); 2184 } 2185 return (int)n; 2186 } 2187 2188 public int read() throws IOException { 2189 byte[] b = new byte[1]; 2190 if (read(b, 0, 1) == 1) { 2191 return b[0] & 0xff; 2192 } else { 2193 return -1; 2194 } 2195 } 2196 2197 public long skip(long n) { 2198 ensureOpen(); 2199 if (n > rem) 2200 n = rem; 2201 pos += n; 2202 rem -= n; 2203 if (rem == 0) { 2204 close(); 2205 } 2206 return n; 2207 } 2208 2209 public int available() { 2210 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 2211 } 2212 2213 public void close() { 2214 rem = 0; 2215 streams.remove(this); 2216 } 2217 2218 private void initDataPos() throws IOException { 2219 if (pos <= 0) { 2220 pos = -pos + locpos; 2221 byte[] buf = new byte[LOCHDR]; 2222 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 2223 throw new ZipException("invalid loc " + pos + " for entry reading"); 2224 } 2225 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 2226 } 2227 } 2228 } 2229 2230 // Maxmum number of de/inflater we cache 2231 private final int MAX_FLATER = 20; 2232 // List of available Inflater objects for decompression 2233 private final List<Inflater> inflaters = new ArrayList<>(); 2234 2235 // Gets an inflater from the list of available inflaters or allocates 2236 // a new one. 2237 private Inflater getInflater() { 2238 synchronized (inflaters) { 2239 int size = inflaters.size(); 2240 if (size > 0) { 2241 return inflaters.remove(size - 1); 2242 } else { 2243 return new Inflater(true); 2244 } 2245 } 2246 } 2247 2248 // Releases the specified inflater to the list of available inflaters. 2249 private void releaseInflater(Inflater inf) { 2250 synchronized (inflaters) { 2251 if (inflaters.size() < MAX_FLATER) { 2252 inf.reset(); 2253 inflaters.add(inf); 2254 } else { 2255 inf.end(); 2256 } 2257 } 2258 } 2259 2260 // List of available Deflater objects for compression 2261 private final List<Deflater> deflaters = new ArrayList<>(); 2262 2263 // Gets a deflater from the list of available deflaters or allocates 2264 // a new one. 2265 private Deflater getDeflater() { 2266 synchronized (deflaters) { 2267 int size = deflaters.size(); 2268 if (size > 0) { 2269 return deflaters.remove(size - 1); 2270 } else { 2271 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 2272 } 2273 } 2274 } 2275 2276 // Releases the specified inflater to the list of available inflaters. 2277 private void releaseDeflater(Deflater def) { 2278 synchronized (deflaters) { 2279 if (deflaters.size() < MAX_FLATER) { 2280 def.reset(); 2281 deflaters.add(def); 2282 } else { 2283 def.end(); 2284 } 2285 } 2286 } 2287 2288 // End of central directory record 2289 static class END { 2290 // The fields that are commented out below are not used by anyone and write() uses "0" 2291 // int disknum; 2292 // int sdisknum; 2293 // int endsub; 2294 int centot; // 4 bytes 2295 long cenlen; // 4 bytes 2296 long cenoff; // 4 bytes 2297 // int comlen; // comment length 2298 // byte[] comment; 2299 2300 // members of Zip64 end of central directory locator 2301 // int diskNum; 2302 long endpos; 2303 // int disktot; 2304 2305 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 2306 boolean hasZip64 = forceEnd64; // false; 2307 long xlen = cenlen; 2308 long xoff = cenoff; 2309 if (xlen >= ZIP64_MINVAL) { 2310 xlen = ZIP64_MINVAL; 2311 hasZip64 = true; 2312 } 2313 if (xoff >= ZIP64_MINVAL) { 2314 xoff = ZIP64_MINVAL; 2315 hasZip64 = true; 2316 } 2317 int count = centot; 2318 if (count >= ZIP64_MINVAL32) { 2319 count = ZIP64_MINVAL32; 2320 hasZip64 = true; 2321 } 2322 if (hasZip64) { 2323 //zip64 end of central directory record 2324 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 2325 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 2326 writeShort(os, 45); // version made by 2327 writeShort(os, 45); // version needed to extract 2328 writeInt(os, 0); // number of this disk 2329 writeInt(os, 0); // central directory start disk 2330 writeLong(os, centot); // number of directory entries on disk 2331 writeLong(os, centot); // number of directory entries 2332 writeLong(os, cenlen); // length of central directory 2333 writeLong(os, cenoff); // offset of central directory 2334 2335 //zip64 end of central directory locator 2336 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 2337 writeInt(os, 0); // zip64 END start disk 2338 writeLong(os, offset); // offset of zip64 END 2339 writeInt(os, 1); // total number of disks (?) 2340 } 2341 writeInt(os, ENDSIG); // END record signature 2342 writeShort(os, 0); // number of this disk 2343 writeShort(os, 0); // central directory start disk 2344 writeShort(os, count); // number of directory entries on disk 2345 writeShort(os, count); // total number of directory entries 2346 writeInt(os, xlen); // length of central directory 2347 writeInt(os, xoff); // offset of central directory 2348 writeShort(os, 0); // zip file comment, not used 2349 } 2350 } 2351 2352 // Internal node that links a "name" to its pos in cen table. 2353 // The node itself can be used as a "key" to lookup itself in 2354 // the HashMap inodes. 2355 static class IndexNode { 2356 byte[] name; 2357 int hashcode; // node is hashable/hashed by its name 2358 boolean isdir; 2359 int pos = -1; // position in cen table, -1 means the 2360 // entry does not exist in zip file 2361 IndexNode child; // first child 2362 IndexNode sibling; // next sibling 2363 2364 IndexNode() {} 2365 2366 IndexNode(byte[] name, boolean isdir) { 2367 name(name); 2368 this.isdir = isdir; 2369 this.pos = -1; 2370 } 2371 2372 IndexNode(byte[] name, int pos) { 2373 name(name); 2374 this.pos = pos; 2375 } 2376 2377 // constructor for initCEN() (1) remove trailing '/' (2) pad leading '/' 2378 IndexNode(byte[] cen, int pos, int nlen) { 2379 int noff = pos + CENHDR; 2380 if (cen[noff + nlen - 1] == '/') { 2381 isdir = true; 2382 nlen--; 2383 } 2384 if (nlen > 0 && cen[noff] == '/') { 2385 name = Arrays.copyOfRange(cen, noff, noff + nlen); 2386 } else { 2387 name = new byte[nlen + 1]; 2388 System.arraycopy(cen, noff, name, 1, nlen); 2389 name[0] = '/'; 2390 } 2391 name(normalize(name)); 2392 this.pos = pos; 2393 } 2394 2395 // Normalize the IndexNode.name field. 2396 private byte[] normalize(byte[] path) { 2397 int len = path.length; 2398 if (len == 0) 2399 return path; 2400 byte prevC = 0; 2401 for (int pathPos = 0; pathPos < len; pathPos++) { 2402 byte c = path[pathPos]; 2403 if (c == '/' && prevC == '/') 2404 return normalize(path, pathPos - 1); 2405 prevC = c; 2406 } 2407 if (len > 1 && prevC == '/') { 2408 return Arrays.copyOf(path, len - 1); 2409 } 2410 return path; 2411 } 2412 2413 private byte[] normalize(byte[] path, int off) { 2414 // As we know we have at least one / to trim, we can reduce 2415 // the size of the resulting array 2416 byte[] to = new byte[path.length - 1]; 2417 int pathPos = 0; 2418 while (pathPos < off) { 2419 to[pathPos] = path[pathPos]; 2420 pathPos++; 2421 } 2422 int toPos = pathPos; 2423 byte prevC = 0; 2424 while (pathPos < path.length) { 2425 byte c = path[pathPos++]; 2426 if (c == '/' && prevC == '/') 2427 continue; 2428 to[toPos++] = c; 2429 prevC = c; 2430 } 2431 if (toPos > 1 && to[toPos - 1] == '/') 2432 toPos--; 2433 return (toPos == to.length) ? to : Arrays.copyOf(to, toPos); 2434 } 2435 2436 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 2437 2438 static final IndexNode keyOf(byte[] name) { // get a lookup key; 2439 IndexNode key = cachedKey.get(); 2440 if (key == null) { 2441 key = new IndexNode(name, -1); 2442 cachedKey.set(key); 2443 } 2444 return key.as(name); 2445 } 2446 2447 final void name(byte[] name) { 2448 this.name = name; 2449 this.hashcode = Arrays.hashCode(name); 2450 } 2451 2452 final IndexNode as(byte[] name) { // reuse the node, mostly 2453 name(name); // as a lookup "key" 2454 return this; 2455 } 2456 2457 boolean isDir() { 2458 return isdir; 2459 } 2460 2461 @Override 2462 public boolean equals(Object other) { 2463 if (!(other instanceof IndexNode)) { 2464 return false; 2465 } 2466 if (other instanceof ParentLookup) { 2467 return ((ParentLookup)other).equals(this); 2468 } 2469 return Arrays.equals(name, ((IndexNode)other).name); 2470 } 2471 2472 @Override 2473 public int hashCode() { 2474 return hashcode; 2475 } 2476 2477 @Override 2478 public String toString() { 2479 return new String(name) + (isdir ? " (dir)" : " ") + ", index: " + pos; 2480 } 2481 } 2482 2483 static class Entry extends IndexNode implements ZipFileAttributes { 2484 static final int CEN = 1; // entry read from cen 2485 static final int NEW = 2; // updated contents in bytes or file 2486 static final int FILECH = 3; // fch update in "file" 2487 static final int COPY = 4; // copy of a CEN entry 2488 2489 byte[] bytes; // updated content bytes 2490 Path file; // use tmp file to store bytes; 2491 int type = CEN; // default is the entry read from cen 2492 2493 // entry attributes 2494 int version; 2495 int flag; 2496 int posixPerms = -1; // posix permissions 2497 int method = -1; // compression method 2498 long mtime = -1; // last modification time (in DOS time) 2499 long atime = -1; // last access time 2500 long ctime = -1; // create time 2501 long crc = -1; // crc-32 of entry data 2502 long csize = -1; // compressed size of entry data 2503 long size = -1; // uncompressed size of entry data 2504 byte[] extra; 2505 2506 // CEN 2507 // The fields that are commented out below are not used by anyone and write() uses "0" 2508 // int versionMade; 2509 // int disk; 2510 // int attrs; 2511 // long attrsEx; 2512 long locoff; 2513 byte[] comment; 2514 2515 Entry(byte[] name, boolean isdir, int method) { 2516 name(name); 2517 this.isdir = isdir; 2518 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 2519 this.crc = 0; 2520 this.size = 0; 2521 this.csize = 0; 2522 this.method = method; 2523 } 2524 2525 @SuppressWarnings("unchecked") 2526 Entry(byte[] name, int type, boolean isdir, int method, FileAttribute<?>... attrs) { 2527 this(name, isdir, method); 2528 this.type = type; 2529 for (FileAttribute<?> attr : attrs) { 2530 String attrName = attr.name(); 2531 if (attrName.equals("posix:permissions")) { 2532 posixPerms = ZipUtils.permsToFlags((Set<PosixFilePermission>)attr.value()); 2533 } 2534 } 2535 } 2536 2537 Entry(byte[] name, Path file, int type, FileAttribute<?>... attrs) { 2538 this(name, type, false, METHOD_STORED, attrs); 2539 this.file = file; 2540 } 2541 2542 Entry(Entry e, int type, int compressionMethod) { 2543 this(e, type); 2544 this.method = compressionMethod; 2545 } 2546 2547 Entry(Entry e, int type) { 2548 name(e.name); 2549 this.isdir = e.isdir; 2550 this.version = e.version; 2551 this.ctime = e.ctime; 2552 this.atime = e.atime; 2553 this.mtime = e.mtime; 2554 this.crc = e.crc; 2555 this.size = e.size; 2556 this.csize = e.csize; 2557 this.method = e.method; 2558 this.extra = e.extra; 2559 /* 2560 this.versionMade = e.versionMade; 2561 this.disk = e.disk; 2562 this.attrs = e.attrs; 2563 this.attrsEx = e.attrsEx; 2564 */ 2565 this.locoff = e.locoff; 2566 this.comment = e.comment; 2567 this.posixPerms = e.posixPerms; 2568 this.type = type; 2569 } 2570 2571 Entry(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2572 readCEN(zipfs, inode); 2573 } 2574 2575 // Calculates a suitable base for the version number to 2576 // be used for fields version made by/version needed to extract. 2577 // The lower bytes of these 2 byte fields hold the version number 2578 // (value/10 = major; value%10 = minor) 2579 // For different features certain minimum versions apply: 2580 // stored = 10 (1.0), deflated = 20 (2.0), zip64 = 45 (4.5) 2581 private int version(boolean zip64) throws ZipException { 2582 if (zip64) { 2583 return 45; 2584 } 2585 if (method == METHOD_DEFLATED) 2586 return 20; 2587 else if (method == METHOD_STORED) 2588 return 10; 2589 throw new ZipException("unsupported compression method"); 2590 } 2591 2592 /** 2593 * Adds information about compatibility of file attribute information 2594 * to a version value. 2595 */ 2596 private int versionMadeBy(int version) { 2597 return (posixPerms < 0) ? version : 2598 VERSION_MADE_BY_BASE_UNIX | (version & 0xff); 2599 } 2600 2601 ///////////////////// CEN ////////////////////// 2602 private void readCEN(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2603 byte[] cen = zipfs.cen; 2604 int pos = inode.pos; 2605 if (!cenSigAt(cen, pos)) 2606 throw new ZipException("invalid CEN header (bad signature)"); 2607 version = CENVER(cen, pos); 2608 flag = CENFLG(cen, pos); 2609 method = CENHOW(cen, pos); 2610 mtime = dosToJavaTime(CENTIM(cen, pos)); 2611 crc = CENCRC(cen, pos); 2612 csize = CENSIZ(cen, pos); 2613 size = CENLEN(cen, pos); 2614 int nlen = CENNAM(cen, pos); 2615 int elen = CENEXT(cen, pos); 2616 int clen = CENCOM(cen, pos); 2617 /* 2618 versionMade = CENVEM(cen, pos); 2619 disk = CENDSK(cen, pos); 2620 attrs = CENATT(cen, pos); 2621 attrsEx = CENATX(cen, pos); 2622 */ 2623 if (CENVEM_FA(cen, pos) == FILE_ATTRIBUTES_UNIX) { 2624 posixPerms = CENATX_PERMS(cen, pos) & 0xFFF; // 12 bits for setuid, setgid, sticky + perms 2625 } 2626 locoff = CENOFF(cen, pos); 2627 pos += CENHDR; 2628 this.name = inode.name; 2629 this.isdir = inode.isdir; 2630 this.hashcode = inode.hashcode; 2631 2632 pos += nlen; 2633 if (elen > 0) { 2634 extra = Arrays.copyOfRange(cen, pos, pos + elen); 2635 pos += elen; 2636 readExtra(zipfs); 2637 } 2638 if (clen > 0) { 2639 comment = Arrays.copyOfRange(cen, pos, pos + clen); 2640 } 2641 } 2642 2643 private int writeCEN(OutputStream os) throws IOException { 2644 long csize0 = csize; 2645 long size0 = size; 2646 long locoff0 = locoff; 2647 int elen64 = 0; // extra for ZIP64 2648 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 2649 int elenEXTT = 0; // extra for Extended Timestamp 2650 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 2651 2652 byte[] zname = isdir ? toDirectoryPath(name) : name; 2653 2654 // confirm size/length 2655 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 2656 int elen = (extra != null) ? extra.length : 0; 2657 int eoff = 0; 2658 int clen = (comment != null) ? comment.length : 0; 2659 if (csize >= ZIP64_MINVAL) { 2660 csize0 = ZIP64_MINVAL; 2661 elen64 += 8; // csize(8) 2662 } 2663 if (size >= ZIP64_MINVAL) { 2664 size0 = ZIP64_MINVAL; // size(8) 2665 elen64 += 8; 2666 } 2667 if (locoff >= ZIP64_MINVAL) { 2668 locoff0 = ZIP64_MINVAL; 2669 elen64 += 8; // offset(8) 2670 } 2671 if (elen64 != 0) { 2672 elen64 += 4; // header and data sz 4 bytes 2673 } 2674 boolean zip64 = (elen64 != 0); 2675 int version0 = version(zip64); 2676 while (eoff + 4 < elen) { 2677 int tag = SH(extra, eoff); 2678 int sz = SH(extra, eoff + 2); 2679 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2680 foundExtraTime = true; 2681 } 2682 eoff += (4 + sz); 2683 } 2684 if (!foundExtraTime) { 2685 if (isWindows) { // use NTFS 2686 elenNTFS = 36; // total 36 bytes 2687 } else { // Extended Timestamp otherwise 2688 elenEXTT = 9; // only mtime in cen 2689 } 2690 } 2691 writeInt(os, CENSIG); // CEN header signature 2692 writeShort(os, versionMadeBy(version0)); // version made by 2693 writeShort(os, version0); // version needed to extract 2694 writeShort(os, flag); // general purpose bit flag 2695 writeShort(os, method); // compression method 2696 // last modification time 2697 writeInt(os, (int)javaToDosTime(mtime)); 2698 writeInt(os, crc); // crc-32 2699 writeInt(os, csize0); // compressed size 2700 writeInt(os, size0); // uncompressed size 2701 writeShort(os, nlen); 2702 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2703 2704 if (comment != null) { 2705 writeShort(os, Math.min(clen, 0xffff)); 2706 } else { 2707 writeShort(os, 0); 2708 } 2709 writeShort(os, 0); // starting disk number 2710 writeShort(os, 0); // internal file attributes (unused) 2711 writeInt(os, posixPerms > 0 ? posixPerms << 16 : 0); // external file 2712 // attributes, used for storing posix 2713 // permissions 2714 writeInt(os, locoff0); // relative offset of local header 2715 writeBytes(os, zname, 1, nlen); 2716 if (zip64) { 2717 writeShort(os, EXTID_ZIP64);// Zip64 extra 2718 writeShort(os, elen64 - 4); // size of "this" extra block 2719 if (size0 == ZIP64_MINVAL) 2720 writeLong(os, size); 2721 if (csize0 == ZIP64_MINVAL) 2722 writeLong(os, csize); 2723 if (locoff0 == ZIP64_MINVAL) 2724 writeLong(os, locoff); 2725 } 2726 if (elenNTFS != 0) { 2727 writeShort(os, EXTID_NTFS); 2728 writeShort(os, elenNTFS - 4); 2729 writeInt(os, 0); // reserved 2730 writeShort(os, 0x0001); // NTFS attr tag 2731 writeShort(os, 24); 2732 writeLong(os, javaToWinTime(mtime)); 2733 writeLong(os, javaToWinTime(atime)); 2734 writeLong(os, javaToWinTime(ctime)); 2735 } 2736 if (elenEXTT != 0) { 2737 writeShort(os, EXTID_EXTT); 2738 writeShort(os, elenEXTT - 4); 2739 if (ctime == -1) 2740 os.write(0x3); // mtime and atime 2741 else 2742 os.write(0x7); // mtime, atime and ctime 2743 writeInt(os, javaToUnixTime(mtime)); 2744 } 2745 if (extra != null) // whatever not recognized 2746 writeBytes(os, extra); 2747 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2748 writeBytes(os, comment); 2749 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2750 } 2751 2752 ///////////////////// LOC ////////////////////// 2753 2754 private int writeLOC(OutputStream os) throws IOException { 2755 byte[] zname = isdir ? toDirectoryPath(name) : name; 2756 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2757 int elen = (extra != null) ? extra.length : 0; 2758 boolean foundExtraTime = false; // if extra timestamp present 2759 int eoff = 0; 2760 int elen64 = 0; 2761 boolean zip64 = false; 2762 int elenEXTT = 0; 2763 int elenNTFS = 0; 2764 writeInt(os, LOCSIG); // LOC header signature 2765 if ((flag & FLAG_DATADESCR) != 0) { 2766 writeShort(os, version(false)); // version needed to extract 2767 writeShort(os, flag); // general purpose bit flag 2768 writeShort(os, method); // compression method 2769 // last modification time 2770 writeInt(os, (int)javaToDosTime(mtime)); 2771 // store size, uncompressed size, and crc-32 in data descriptor 2772 // immediately following compressed entry data 2773 writeInt(os, 0); 2774 writeInt(os, 0); 2775 writeInt(os, 0); 2776 } else { 2777 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2778 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2779 zip64 = true; 2780 } 2781 writeShort(os, version(zip64)); // version needed to extract 2782 writeShort(os, flag); // general purpose bit flag 2783 writeShort(os, method); // compression method 2784 // last modification time 2785 writeInt(os, (int)javaToDosTime(mtime)); 2786 writeInt(os, crc); // crc-32 2787 if (zip64) { 2788 writeInt(os, ZIP64_MINVAL); 2789 writeInt(os, ZIP64_MINVAL); 2790 } else { 2791 writeInt(os, csize); // compressed size 2792 writeInt(os, size); // uncompressed size 2793 } 2794 } 2795 while (eoff + 4 < elen) { 2796 int tag = SH(extra, eoff); 2797 int sz = SH(extra, eoff + 2); 2798 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2799 foundExtraTime = true; 2800 } 2801 eoff += (4 + sz); 2802 } 2803 if (!foundExtraTime) { 2804 if (isWindows) { 2805 elenNTFS = 36; // NTFS, total 36 bytes 2806 } else { // on unix use "ext time" 2807 elenEXTT = 9; 2808 if (atime != -1) 2809 elenEXTT += 4; 2810 if (ctime != -1) 2811 elenEXTT += 4; 2812 } 2813 } 2814 writeShort(os, nlen); 2815 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2816 writeBytes(os, zname, 1, nlen); 2817 if (zip64) { 2818 writeShort(os, EXTID_ZIP64); 2819 writeShort(os, 16); 2820 writeLong(os, size); 2821 writeLong(os, csize); 2822 } 2823 if (elenNTFS != 0) { 2824 writeShort(os, EXTID_NTFS); 2825 writeShort(os, elenNTFS - 4); 2826 writeInt(os, 0); // reserved 2827 writeShort(os, 0x0001); // NTFS attr tag 2828 writeShort(os, 24); 2829 writeLong(os, javaToWinTime(mtime)); 2830 writeLong(os, javaToWinTime(atime)); 2831 writeLong(os, javaToWinTime(ctime)); 2832 } 2833 if (elenEXTT != 0) { 2834 writeShort(os, EXTID_EXTT); 2835 writeShort(os, elenEXTT - 4);// size for the folowing data block 2836 int fbyte = 0x1; 2837 if (atime != -1) // mtime and atime 2838 fbyte |= 0x2; 2839 if (ctime != -1) // mtime, atime and ctime 2840 fbyte |= 0x4; 2841 os.write(fbyte); // flags byte 2842 writeInt(os, javaToUnixTime(mtime)); 2843 if (atime != -1) 2844 writeInt(os, javaToUnixTime(atime)); 2845 if (ctime != -1) 2846 writeInt(os, javaToUnixTime(ctime)); 2847 } 2848 if (extra != null) { 2849 writeBytes(os, extra); 2850 } 2851 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2852 } 2853 2854 // Data Descriptor 2855 private int writeEXT(OutputStream os) throws IOException { 2856 writeInt(os, EXTSIG); // EXT header signature 2857 writeInt(os, crc); // crc-32 2858 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2859 writeLong(os, csize); 2860 writeLong(os, size); 2861 return 24; 2862 } else { 2863 writeInt(os, csize); // compressed size 2864 writeInt(os, size); // uncompressed size 2865 return 16; 2866 } 2867 } 2868 2869 // read NTFS, UNIX and ZIP64 data from cen.extra 2870 private void readExtra(ZipFileSystem zipfs) throws IOException { 2871 if (extra == null) 2872 return; 2873 int elen = extra.length; 2874 int off = 0; 2875 int newOff = 0; 2876 while (off + 4 < elen) { 2877 // extra spec: HeaderID+DataSize+Data 2878 int pos = off; 2879 int tag = SH(extra, pos); 2880 int sz = SH(extra, pos + 2); 2881 pos += 4; 2882 if (pos + sz > elen) // invalid data 2883 break; 2884 switch (tag) { 2885 case EXTID_ZIP64 : 2886 if (size == ZIP64_MINVAL) { 2887 if (pos + 8 > elen) // invalid zip64 extra 2888 break; // fields, just skip 2889 size = LL(extra, pos); 2890 pos += 8; 2891 } 2892 if (csize == ZIP64_MINVAL) { 2893 if (pos + 8 > elen) 2894 break; 2895 csize = LL(extra, pos); 2896 pos += 8; 2897 } 2898 if (locoff == ZIP64_MINVAL) { 2899 if (pos + 8 > elen) 2900 break; 2901 locoff = LL(extra, pos); 2902 } 2903 break; 2904 case EXTID_NTFS: 2905 if (sz < 32) 2906 break; 2907 pos += 4; // reserved 4 bytes 2908 if (SH(extra, pos) != 0x0001) 2909 break; 2910 if (SH(extra, pos + 2) != 24) 2911 break; 2912 // override the loc field, datatime here is 2913 // more "accurate" 2914 mtime = winToJavaTime(LL(extra, pos + 4)); 2915 atime = winToJavaTime(LL(extra, pos + 12)); 2916 ctime = winToJavaTime(LL(extra, pos + 20)); 2917 break; 2918 case EXTID_EXTT: 2919 // spec says the Extened timestamp in cen only has mtime 2920 // need to read the loc to get the extra a/ctime, if flag 2921 // "zipinfo-time" is not specified to false; 2922 // there is performance cost (move up to loc and read) to 2923 // access the loc table foreach entry; 2924 if (zipfs.noExtt) { 2925 if (sz == 5) 2926 mtime = unixToJavaTime(LG(extra, pos + 1)); 2927 break; 2928 } 2929 byte[] buf = new byte[LOCHDR]; 2930 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2931 != buf.length) 2932 throw new ZipException("loc: reading failed"); 2933 if (!locSigAt(buf, 0)) 2934 throw new ZipException("loc: wrong sig ->" 2935 + Long.toString(getSig(buf, 0), 16)); 2936 int locElen = LOCEXT(buf); 2937 if (locElen < 9) // EXTT is at least 9 bytes 2938 break; 2939 int locNlen = LOCNAM(buf); 2940 buf = new byte[locElen]; 2941 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2942 != buf.length) 2943 throw new ZipException("loc extra: reading failed"); 2944 int locPos = 0; 2945 while (locPos + 4 < buf.length) { 2946 int locTag = SH(buf, locPos); 2947 int locSZ = SH(buf, locPos + 2); 2948 locPos += 4; 2949 if (locTag != EXTID_EXTT) { 2950 locPos += locSZ; 2951 continue; 2952 } 2953 int end = locPos + locSZ - 4; 2954 int flag = CH(buf, locPos++); 2955 if ((flag & 0x1) != 0 && locPos <= end) { 2956 mtime = unixToJavaTime(LG(buf, locPos)); 2957 locPos += 4; 2958 } 2959 if ((flag & 0x2) != 0 && locPos <= end) { 2960 atime = unixToJavaTime(LG(buf, locPos)); 2961 locPos += 4; 2962 } 2963 if ((flag & 0x4) != 0 && locPos <= end) { 2964 ctime = unixToJavaTime(LG(buf, locPos)); 2965 } 2966 break; 2967 } 2968 break; 2969 default: // unknown tag 2970 System.arraycopy(extra, off, extra, newOff, sz + 4); 2971 newOff += (sz + 4); 2972 } 2973 off += (sz + 4); 2974 } 2975 if (newOff != 0 && newOff != extra.length) 2976 extra = Arrays.copyOf(extra, newOff); 2977 else 2978 extra = null; 2979 } 2980 2981 @Override 2982 public String toString() { 2983 StringBuilder sb = new StringBuilder(1024); 2984 Formatter fm = new Formatter(sb); 2985 fm.format(" name : %s%n", new String(name)); 2986 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2987 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2988 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2989 fm.format(" isRegularFile : %b%n", isRegularFile()); 2990 fm.format(" isDirectory : %b%n", isDirectory()); 2991 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2992 fm.format(" isOther : %b%n", isOther()); 2993 fm.format(" fileKey : %s%n", fileKey()); 2994 fm.format(" size : %d%n", size()); 2995 fm.format(" compressedSize : %d%n", compressedSize()); 2996 fm.format(" crc : %x%n", crc()); 2997 fm.format(" method : %d%n", method()); 2998 Set<PosixFilePermission> permissions = storedPermissions().orElse(null); 2999 if (permissions != null) { 3000 fm.format(" permissions : %s%n", permissions); 3001 } 3002 fm.close(); 3003 return sb.toString(); 3004 } 3005 3006 ///////// basic file attributes /////////// 3007 @Override 3008 public FileTime creationTime() { 3009 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 3010 } 3011 3012 @Override 3013 public boolean isDirectory() { 3014 return isDir(); 3015 } 3016 3017 @Override 3018 public boolean isOther() { 3019 return false; 3020 } 3021 3022 @Override 3023 public boolean isRegularFile() { 3024 return !isDir(); 3025 } 3026 3027 @Override 3028 public FileTime lastAccessTime() { 3029 return FileTime.fromMillis(atime == -1 ? mtime : atime); 3030 } 3031 3032 @Override 3033 public FileTime lastModifiedTime() { 3034 return FileTime.fromMillis(mtime); 3035 } 3036 3037 @Override 3038 public long size() { 3039 return size; 3040 } 3041 3042 @Override 3043 public boolean isSymbolicLink() { 3044 return false; 3045 } 3046 3047 @Override 3048 public Object fileKey() { 3049 return null; 3050 } 3051 3052 ///////// zip file attributes /////////// 3053 3054 @Override 3055 public long compressedSize() { 3056 return csize; 3057 } 3058 3059 @Override 3060 public long crc() { 3061 return crc; 3062 } 3063 3064 @Override 3065 public int method() { 3066 return method; 3067 } 3068 3069 @Override 3070 public byte[] extra() { 3071 if (extra != null) 3072 return Arrays.copyOf(extra, extra.length); 3073 return null; 3074 } 3075 3076 @Override 3077 public byte[] comment() { 3078 if (comment != null) 3079 return Arrays.copyOf(comment, comment.length); 3080 return null; 3081 } 3082 3083 @Override 3084 public Optional<Set<PosixFilePermission>> storedPermissions() { 3085 Set<PosixFilePermission> perms = null; 3086 if (posixPerms != -1) { 3087 perms = new HashSet<>(PosixFilePermission.values().length); 3088 for (PosixFilePermission perm : PosixFilePermission.values()) { 3089 if ((posixPerms & ZipUtils.permToFlag(perm)) != 0) { 3090 perms.add(perm); 3091 } 3092 } 3093 } 3094 return Optional.ofNullable(perms); 3095 } 3096 } 3097 3098 final class PosixEntry extends Entry implements PosixFileAttributes { 3099 private UserPrincipal owner = defaultOwner; 3100 private GroupPrincipal group = defaultGroup; 3101 3102 PosixEntry(byte[] name, boolean isdir, int method) { 3103 super(name, isdir, method); 3104 } 3105 3106 PosixEntry(byte[] name, int type, boolean isdir, int method, FileAttribute<?>... attrs) { 3107 super(name, type, isdir, method, attrs); 3108 } 3109 3110 PosixEntry(byte[] name, Path file, int type, FileAttribute<?>... attrs) { 3111 super(name, file, type, attrs); 3112 } 3113 3114 PosixEntry(PosixEntry e, int type, int compressionMethod) { 3115 super(e, type); 3116 this.method = compressionMethod; 3117 } 3118 3119 PosixEntry(PosixEntry e, int type) { 3120 super(e, type); 3121 this.owner = e.owner; 3122 this.group = e.group; 3123 } 3124 3125 PosixEntry(ZipFileSystem zipfs, IndexNode inode) throws IOException { 3126 super(zipfs, inode); 3127 } 3128 3129 @Override 3130 public UserPrincipal owner() { 3131 return owner; 3132 } 3133 3134 @Override 3135 public GroupPrincipal group() { 3136 return group; 3137 } 3138 3139 @Override 3140 public Set<PosixFilePermission> permissions() { 3141 return storedPermissions().orElse(Set.copyOf(defaultPermissions)); 3142 } 3143 } 3144 3145 private static class ExistingChannelCloser { 3146 private final Path path; 3147 private final SeekableByteChannel ch; 3148 private final Set<InputStream> streams; 3149 ExistingChannelCloser(Path path, 3150 SeekableByteChannel ch, 3151 Set<InputStream> streams) { 3152 this.path = path; 3153 this.ch = ch; 3154 this.streams = streams; 3155 } 3156 3157 /** 3158 * If there are no more outstanding streams, close the channel and 3159 * delete the backing file 3160 * 3161 * @return true if we're done and closed the backing file, 3162 * otherwise false 3163 * @throws IOException 3164 */ 3165 private boolean closeAndDeleteIfDone() throws IOException { 3166 if (streams.isEmpty()) { 3167 ch.close(); 3168 Files.delete(path); 3169 return true; 3170 } 3171 return false; 3172 } 3173 } 3174 3175 // purely for parent lookup, so we don't have to copy the parent 3176 // name every time 3177 static class ParentLookup extends IndexNode { 3178 int len; 3179 ParentLookup() {} 3180 3181 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 3182 name(name, len); 3183 return this; 3184 } 3185 3186 void name(byte[] name, int len) { 3187 this.name = name; 3188 this.len = len; 3189 // calculate the hashcode the same way as Arrays.hashCode() does 3190 int result = 1; 3191 for (int i = 0; i < len; i++) 3192 result = 31 * result + name[i]; 3193 this.hashcode = result; 3194 } 3195 3196 @Override 3197 public boolean equals(Object other) { 3198 if (!(other instanceof IndexNode)) { 3199 return false; 3200 } 3201 byte[] oname = ((IndexNode)other).name; 3202 return Arrays.equals(name, 0, len, 3203 oname, 0, oname.length); 3204 } 3205 } 3206 }