1 /* 2 * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.FilterOutputStream; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.nio.ByteBuffer; 37 import java.nio.MappedByteBuffer; 38 import java.nio.channels.FileChannel; 39 import java.nio.channels.FileLock; 40 import java.nio.channels.ReadableByteChannel; 41 import java.nio.channels.SeekableByteChannel; 42 import java.nio.channels.WritableByteChannel; 43 import java.nio.file.*; 44 import java.nio.file.attribute.*; 45 import java.nio.file.spi.FileSystemProvider; 46 import java.security.AccessController; 47 import java.security.PrivilegedAction; 48 import java.security.PrivilegedActionException; 49 import java.security.PrivilegedExceptionAction; 50 import java.util.*; 51 import java.util.concurrent.locks.ReadWriteLock; 52 import java.util.concurrent.locks.ReentrantReadWriteLock; 53 import java.util.regex.Pattern; 54 import java.util.zip.CRC32; 55 import java.util.zip.Deflater; 56 import java.util.zip.DeflaterOutputStream; 57 import java.util.zip.Inflater; 58 import java.util.zip.InflaterInputStream; 59 import java.util.zip.ZipException; 60 61 import static java.lang.Boolean.TRUE; 62 import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES; 63 import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; 64 import static java.nio.file.StandardOpenOption.APPEND; 65 import static java.nio.file.StandardOpenOption.CREATE; 66 import static java.nio.file.StandardOpenOption.CREATE_NEW; 67 import static java.nio.file.StandardOpenOption.READ; 68 import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; 69 import static java.nio.file.StandardOpenOption.WRITE; 70 import static jdk.nio.zipfs.ZipConstants.*; 71 import static jdk.nio.zipfs.ZipUtils.*; 72 73 /** 74 * A FileSystem built on a zip file 75 * 76 * @author Xueming Shen 77 */ 78 class ZipFileSystem extends FileSystem { 79 // statics 80 private static final boolean isWindows = AccessController.doPrivileged( 81 (PrivilegedAction<Boolean>)()->System.getProperty("os.name") 82 .startsWith("Windows")); 83 private static final byte[] ROOTPATH = new byte[] { '/' }; 84 private static final String OPT_POSIX = "enablePosixFileAttributes"; 85 private static final String OPT_DEFAULT_OWNER = "defaultOwner"; 86 private static final String OPT_DEFAULT_GROUP = "defaultGroup"; 87 private static final String OPT_DEFAULT_PERMISSIONS = "defaultPermissions"; 88 89 private static final Set<PosixFilePermission> DEFAULT_PERMISSIONS = 90 PosixFilePermissions.fromString("rwxrwxrwx"); 91 // Property used to specify the compression mode to use 92 private static final String COMPRESSION_METHOD = "compressionMethod"; 93 // Value specified for compressionMethod property to compress Zip entries 94 public static final String DEFLATED_COMPRESSION_METHOD = "DEFLATED"; 95 // Value specified for compressionMethod property to not compress Zip entries 96 public static final String STORED_COMPRESSION_METHOD = "STORED"; 97 98 private final ZipFileSystemProvider provider; 99 private final Path zfpath; 100 final ZipCoder zc; 101 private final ZipPath rootdir; 102 private boolean readOnly; // readonly file system, false by default 103 104 // default time stamp for pseudo entries 105 private final long zfsDefaultTimeStamp = System.currentTimeMillis(); 106 107 // configurable by env map 108 private final boolean noExtt; // see readExtra() 109 private final boolean useTempFile; // use a temp file for newOS, default 110 // is to use BAOS for better performance 111 private final boolean forceEnd64; 112 private final int defaultCompressionMethod; // METHOD_STORED if "noCompression=true" 113 // METHOD_DEFLATED otherwise 114 115 // POSIX support 116 final boolean supportPosix; 117 private final UserPrincipal defaultOwner; 118 private final GroupPrincipal defaultGroup; 119 private final Set<PosixFilePermission> defaultPermissions; 120 121 private final Set<String> supportedFileAttributeViews; 122 123 ZipFileSystem(ZipFileSystemProvider provider, 124 Path zfpath, 125 Map<String, ?> env) throws IOException 126 { 127 // default encoding for name/comment 128 String nameEncoding = env.containsKey("encoding") ? 129 (String)env.get("encoding") : "UTF-8"; 130 this.noExtt = "false".equals(env.get("zipinfo-time")); 131 this.useTempFile = isTrue(env, "useTempFile"); 132 this.forceEnd64 = isTrue(env, "forceZIP64End"); 133 this.defaultCompressionMethod = getDefaultCompressionMethod(env); 134 this.supportPosix = isTrue(env, OPT_POSIX); 135 this.defaultOwner = initOwner(zfpath, env); 136 this.defaultGroup = initGroup(zfpath, env); 137 this.defaultPermissions = initPermissions(env); 138 this.supportedFileAttributeViews = supportPosix ? 139 Set.of("basic", "posix", "zip") : Set.of("basic", "zip"); 140 if (Files.notExists(zfpath)) { 141 // create a new zip if it doesn't exist 142 if (isTrue(env, "create")) { 143 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 144 new END().write(os, 0, forceEnd64); 145 } 146 } else { 147 throw new FileSystemNotFoundException(zfpath.toString()); 148 } 149 } 150 // sm and existence check 151 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 152 boolean writeable = AccessController.doPrivileged( 153 (PrivilegedAction<Boolean>)()->Files.isWritable(zfpath)); 154 this.readOnly = !writeable; 155 this.zc = ZipCoder.get(nameEncoding); 156 this.rootdir = new ZipPath(this, new byte[]{'/'}); 157 this.ch = Files.newByteChannel(zfpath, READ); 158 try { 159 this.cen = initCEN(); 160 } catch (IOException x) { 161 try { 162 this.ch.close(); 163 } catch (IOException xx) { 164 x.addSuppressed(xx); 165 } 166 throw x; 167 } 168 this.provider = provider; 169 this.zfpath = zfpath; 170 } 171 172 /** 173 * Return the compression method to use(STORED or DEFLATED). If the 174 * property {@code commpressionMethod} is set use its value to determine 175 * the compression method to use. If the property is not set, then the 176 * default compression is DEFLATED unless the property {@code noCompression} 177 * is set which is supported for backwards compatability. 178 * @param env Zip FS map of properties 179 * @return The Compression method to use 180 */ 181 private int getDefaultCompressionMethod(Map<String, ?> env) { 182 int result = 183 isTrue(env, "noCompression") ? METHOD_STORED : METHOD_DEFLATED; 184 if(env.containsKey(COMPRESSION_METHOD)) { 185 Object compressionMethod = env.get(COMPRESSION_METHOD); 186 if(compressionMethod != null) { 187 if(compressionMethod instanceof String) { 188 switch(((String) compressionMethod).toUpperCase()) { 189 case STORED_COMPRESSION_METHOD: 190 result = METHOD_STORED; 191 break; 192 case DEFLATED_COMPRESSION_METHOD: 193 result = METHOD_DEFLATED; 194 break; 195 default: 196 throw new IllegalArgumentException(String.format( 197 "The value for the %s property must be %s or %s", 198 COMPRESSION_METHOD, STORED_COMPRESSION_METHOD, 199 DEFLATED_COMPRESSION_METHOD)); 200 } 201 } else { 202 throw new IllegalArgumentException(String.format( 203 "The Object type for the %s property must be a String")); 204 } 205 } else { 206 throw new IllegalArgumentException(String.format( 207 "The value for the %s property must be %s or %s", 208 COMPRESSION_METHOD, STORED_COMPRESSION_METHOD, 209 DEFLATED_COMPRESSION_METHOD)); 210 } 211 } 212 return result; 213 } 214 215 // returns true if there is a name=true/"true" setting in env 216 private static boolean isTrue(Map<String, ?> env, String name) { 217 return "true".equals(env.get(name)) || TRUE.equals(env.get(name)); 218 } 219 220 // Initialize the default owner for files inside the zip archive. 221 // If not specified in env, it is the owner of the archive. If no owner can 222 // be determined, we try to go with system property "user.name". If that's not 223 // accessible, we return "<zipfs_default>". 224 private UserPrincipal initOwner(Path zfpath, Map<String, ?> env) throws IOException { 225 Object o = env.get(OPT_DEFAULT_OWNER); 226 if (o == null) { 227 try { 228 PrivilegedExceptionAction<UserPrincipal> pa = ()->Files.getOwner(zfpath); 229 return AccessController.doPrivileged(pa); 230 } catch (UnsupportedOperationException | PrivilegedActionException e) { 231 if (e instanceof UnsupportedOperationException || 232 e.getCause() instanceof NoSuchFileException) 233 { 234 PrivilegedAction<String> pa = ()->System.getProperty("user.name"); 235 String userName = AccessController.doPrivileged(pa); 236 return ()->userName; 237 } else { 238 throw new IOException(e); 239 } 240 } 241 } 242 if (o instanceof String) { 243 if (((String)o).isEmpty()) { 244 throw new IllegalArgumentException("Value for property " + 245 OPT_DEFAULT_OWNER + " must not be empty."); 246 } 247 return ()->(String)o; 248 } 249 if (o instanceof UserPrincipal) { 250 return (UserPrincipal)o; 251 } 252 throw new IllegalArgumentException("Value for property " + 253 OPT_DEFAULT_OWNER + " must be of type " + String.class + 254 " or " + UserPrincipal.class); 255 } 256 257 // Initialize the default group for files inside the zip archive. 258 // If not specified in env, we try to determine the group of the zip archive itself. 259 // If this is not possible/unsupported, we will return a group principal going by 260 // the same name as the default owner. 261 private GroupPrincipal initGroup(Path zfpath, Map<String, ?> env) throws IOException { 262 Object o = env.get(OPT_DEFAULT_GROUP); 263 if (o == null) { 264 try { 265 PosixFileAttributeView zfpv = Files.getFileAttributeView(zfpath, PosixFileAttributeView.class); 266 if (zfpv == null) { 267 return defaultOwner::getName; 268 } 269 PrivilegedExceptionAction<GroupPrincipal> pa = ()->zfpv.readAttributes().group(); 270 return AccessController.doPrivileged(pa); 271 } catch (UnsupportedOperationException | PrivilegedActionException e) { 272 if (e instanceof UnsupportedOperationException || 273 e.getCause() instanceof NoSuchFileException) 274 { 275 return defaultOwner::getName; 276 } else { 277 throw new IOException(e); 278 } 279 } 280 } 281 if (o instanceof String) { 282 if (((String)o).isEmpty()) { 283 throw new IllegalArgumentException("Value for property " + 284 OPT_DEFAULT_GROUP + " must not be empty."); 285 } 286 return ()->(String)o; 287 } 288 if (o instanceof GroupPrincipal) { 289 return (GroupPrincipal)o; 290 } 291 throw new IllegalArgumentException("Value for property " + 292 OPT_DEFAULT_GROUP + " must be of type " + String.class + 293 " or " + GroupPrincipal.class); 294 } 295 296 // Initialize the default permissions for files inside the zip archive. 297 // If not specified in env, it will return 777. 298 private Set<PosixFilePermission> initPermissions(Map<String, ?> env) { 299 Object o = env.get(OPT_DEFAULT_PERMISSIONS); 300 if (o == null) { 301 return DEFAULT_PERMISSIONS; 302 } 303 if (o instanceof String) { 304 return PosixFilePermissions.fromString((String)o); 305 } 306 if (!(o instanceof Set)) { 307 throw new IllegalArgumentException("Value for property " + 308 OPT_DEFAULT_PERMISSIONS + " must be of type " + String.class + 309 " or " + Set.class); 310 } 311 Set<PosixFilePermission> perms = new HashSet<>(); 312 for (Object o2 : (Set<?>)o) { 313 if (o2 instanceof PosixFilePermission) { 314 perms.add((PosixFilePermission)o2); 315 } else { 316 throw new IllegalArgumentException(OPT_DEFAULT_PERMISSIONS + 317 " must only contain objects of type " + PosixFilePermission.class); 318 } 319 } 320 return perms; 321 } 322 323 @Override 324 public FileSystemProvider provider() { 325 return provider; 326 } 327 328 @Override 329 public String getSeparator() { 330 return "/"; 331 } 332 333 @Override 334 public boolean isOpen() { 335 return isOpen; 336 } 337 338 @Override 339 public boolean isReadOnly() { 340 return readOnly; 341 } 342 343 private void checkWritable() { 344 if (readOnly) { 345 throw new ReadOnlyFileSystemException(); 346 } 347 } 348 349 void setReadOnly() { 350 this.readOnly = true; 351 } 352 353 @Override 354 public Iterable<Path> getRootDirectories() { 355 return List.of(rootdir); 356 } 357 358 ZipPath getRootDir() { 359 return rootdir; 360 } 361 362 @Override 363 public ZipPath getPath(String first, String... more) { 364 if (more.length == 0) { 365 return new ZipPath(this, first); 366 } 367 StringBuilder sb = new StringBuilder(); 368 sb.append(first); 369 for (String path : more) { 370 if (path.length() > 0) { 371 if (sb.length() > 0) { 372 sb.append('/'); 373 } 374 sb.append(path); 375 } 376 } 377 return new ZipPath(this, sb.toString()); 378 } 379 380 @Override 381 public UserPrincipalLookupService getUserPrincipalLookupService() { 382 throw new UnsupportedOperationException(); 383 } 384 385 @Override 386 public WatchService newWatchService() { 387 throw new UnsupportedOperationException(); 388 } 389 390 FileStore getFileStore(ZipPath path) { 391 return new ZipFileStore(path); 392 } 393 394 @Override 395 public Iterable<FileStore> getFileStores() { 396 return List.of(new ZipFileStore(rootdir)); 397 } 398 399 @Override 400 public Set<String> supportedFileAttributeViews() { 401 return supportedFileAttributeViews; 402 } 403 404 @Override 405 public String toString() { 406 return zfpath.toString(); 407 } 408 409 Path getZipFile() { 410 return zfpath; 411 } 412 413 private static final String GLOB_SYNTAX = "glob"; 414 private static final String REGEX_SYNTAX = "regex"; 415 416 @Override 417 public PathMatcher getPathMatcher(String syntaxAndInput) { 418 int pos = syntaxAndInput.indexOf(':'); 419 if (pos <= 0 || pos == syntaxAndInput.length()) { 420 throw new IllegalArgumentException(); 421 } 422 String syntax = syntaxAndInput.substring(0, pos); 423 String input = syntaxAndInput.substring(pos + 1); 424 String expr; 425 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 426 expr = toRegexPattern(input); 427 } else { 428 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 429 expr = input; 430 } else { 431 throw new UnsupportedOperationException("Syntax '" + syntax + 432 "' not recognized"); 433 } 434 } 435 // return matcher 436 final Pattern pattern = Pattern.compile(expr); 437 return (path)->pattern.matcher(path.toString()).matches(); 438 } 439 440 @Override 441 public void close() throws IOException { 442 beginWrite(); 443 try { 444 if (!isOpen) 445 return; 446 isOpen = false; // set closed 447 } finally { 448 endWrite(); 449 } 450 if (!streams.isEmpty()) { // unlock and close all remaining streams 451 Set<InputStream> copy = new HashSet<>(streams); 452 for (InputStream is : copy) 453 is.close(); 454 } 455 beginWrite(); // lock and sync 456 try { 457 AccessController.doPrivileged((PrivilegedExceptionAction<Void>)() -> { 458 sync(); return null; 459 }); 460 ch.close(); // close the ch just in case no update 461 // and sync didn't close the ch 462 } catch (PrivilegedActionException e) { 463 throw (IOException)e.getException(); 464 } finally { 465 endWrite(); 466 } 467 468 synchronized (inflaters) { 469 for (Inflater inf : inflaters) 470 inf.end(); 471 } 472 synchronized (deflaters) { 473 for (Deflater def : deflaters) 474 def.end(); 475 } 476 477 IOException ioe = null; 478 synchronized (tmppaths) { 479 for (Path p : tmppaths) { 480 try { 481 AccessController.doPrivileged( 482 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 483 } catch (PrivilegedActionException e) { 484 IOException x = (IOException)e.getException(); 485 if (ioe == null) 486 ioe = x; 487 else 488 ioe.addSuppressed(x); 489 } 490 } 491 } 492 provider.removeFileSystem(zfpath, this); 493 if (ioe != null) 494 throw ioe; 495 } 496 497 ZipFileAttributes getFileAttributes(byte[] path) 498 throws IOException 499 { 500 beginRead(); 501 try { 502 ensureOpen(); 503 IndexNode inode = getInode(path); 504 if (inode == null) { 505 return null; 506 } else if (inode instanceof Entry) { 507 return (Entry)inode; 508 } else if (inode.pos == -1) { 509 // pseudo directory, uses METHOD_STORED 510 Entry e = supportPosix ? 511 new PosixEntry(inode.name, inode.isdir, METHOD_STORED) : 512 new Entry(inode.name, inode.isdir, METHOD_STORED); 513 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 514 return e; 515 } else { 516 return supportPosix ? new PosixEntry(this, inode) : new Entry(this, inode); 517 } 518 } finally { 519 endRead(); 520 } 521 } 522 523 void checkAccess(byte[] path) throws IOException { 524 beginRead(); 525 try { 526 ensureOpen(); 527 // is it necessary to readCEN as a sanity check? 528 if (getInode(path) == null) { 529 throw new NoSuchFileException(toString()); 530 } 531 532 } finally { 533 endRead(); 534 } 535 } 536 537 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 538 throws IOException 539 { 540 checkWritable(); 541 beginWrite(); 542 try { 543 ensureOpen(); 544 Entry e = getEntry(path); // ensureOpen checked 545 if (e == null) 546 throw new NoSuchFileException(getString(path)); 547 if (e.type == Entry.CEN) 548 e.type = Entry.COPY; // copy e 549 if (mtime != null) 550 e.mtime = mtime.toMillis(); 551 if (atime != null) 552 e.atime = atime.toMillis(); 553 if (ctime != null) 554 e.ctime = ctime.toMillis(); 555 update(e); 556 } finally { 557 endWrite(); 558 } 559 } 560 561 void setOwner(byte[] path, UserPrincipal owner) throws IOException { 562 checkWritable(); 563 beginWrite(); 564 try { 565 ensureOpen(); 566 Entry e = getEntry(path); // ensureOpen checked 567 if (e == null) { 568 throw new NoSuchFileException(getString(path)); 569 } 570 // as the owner information is not persistent, we don't need to 571 // change e.type to Entry.COPY 572 if (e instanceof PosixEntry) { 573 ((PosixEntry)e).owner = owner; 574 update(e); 575 } 576 } finally { 577 endWrite(); 578 } 579 } 580 581 void setGroup(byte[] path, GroupPrincipal group) throws IOException { 582 checkWritable(); 583 beginWrite(); 584 try { 585 ensureOpen(); 586 Entry e = getEntry(path); // ensureOpen checked 587 if (e == null) { 588 throw new NoSuchFileException(getString(path)); 589 } 590 // as the group information is not persistent, we don't need to 591 // change e.type to Entry.COPY 592 if (e instanceof PosixEntry) { 593 ((PosixEntry)e).group = group; 594 update(e); 595 } 596 } finally { 597 endWrite(); 598 } 599 } 600 601 void setPermissions(byte[] path, Set<PosixFilePermission> perms) throws IOException { 602 checkWritable(); 603 beginWrite(); 604 try { 605 ensureOpen(); 606 Entry e = getEntry(path); // ensureOpen checked 607 if (e == null) { 608 throw new NoSuchFileException(getString(path)); 609 } 610 if (e.type == Entry.CEN) { 611 e.type = Entry.COPY; // copy e 612 } 613 e.posixPerms = perms == null ? -1 : ZipUtils.permsToFlags(perms); 614 update(e); 615 } finally { 616 endWrite(); 617 } 618 } 619 620 boolean exists(byte[] path) { 621 beginRead(); 622 try { 623 ensureOpen(); 624 return getInode(path) != null; 625 } finally { 626 endRead(); 627 } 628 } 629 630 boolean isDirectory(byte[] path) { 631 beginRead(); 632 try { 633 IndexNode n = getInode(path); 634 return n != null && n.isDir(); 635 } finally { 636 endRead(); 637 } 638 } 639 640 // returns the list of child paths of "path" 641 Iterator<Path> iteratorOf(ZipPath dir, 642 DirectoryStream.Filter<? super Path> filter) 643 throws IOException 644 { 645 beginWrite(); // iteration of inodes needs exclusive lock 646 try { 647 ensureOpen(); 648 byte[] path = dir.getResolvedPath(); 649 IndexNode inode = getInode(path); 650 if (inode == null) 651 throw new NotDirectoryException(getString(path)); 652 List<Path> list = new ArrayList<>(); 653 IndexNode child = inode.child; 654 while (child != null) { 655 // (1) Assume each path from the zip file itself is "normalized" 656 // (2) IndexNode.name is absolute. see IndexNode(byte[],int,int) 657 // (3) If parent "dir" is relative when ZipDirectoryStream 658 // is created, the returned child path needs to be relative 659 // as well. 660 ZipPath childPath = new ZipPath(this, child.name, true); 661 ZipPath childFileName = childPath.getFileName(); 662 ZipPath zpath = dir.resolve(childFileName); 663 if (filter == null || filter.accept(zpath)) 664 list.add(zpath); 665 child = child.sibling; 666 } 667 return list.iterator(); 668 } finally { 669 endWrite(); 670 } 671 } 672 673 void createDirectory(byte[] dir, FileAttribute<?>... attrs) throws IOException { 674 checkWritable(); 675 beginWrite(); 676 try { 677 ensureOpen(); 678 if (dir.length == 0 || exists(dir)) // root dir, or existing dir 679 throw new FileAlreadyExistsException(getString(dir)); 680 checkParents(dir); 681 Entry e = supportPosix ? 682 new PosixEntry(dir, Entry.NEW, true, METHOD_STORED, attrs) : 683 new Entry(dir, Entry.NEW, true, METHOD_STORED, attrs); 684 update(e); 685 } finally { 686 endWrite(); 687 } 688 } 689 690 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 691 throws IOException 692 { 693 checkWritable(); 694 if (Arrays.equals(src, dst)) 695 return; // do nothing, src and dst are the same 696 697 beginWrite(); 698 try { 699 ensureOpen(); 700 Entry eSrc = getEntry(src); // ensureOpen checked 701 702 if (eSrc == null) 703 throw new NoSuchFileException(getString(src)); 704 if (eSrc.isDir()) { // spec says to create dst dir 705 createDirectory(dst); 706 return; 707 } 708 boolean hasReplace = false; 709 boolean hasCopyAttrs = false; 710 for (CopyOption opt : options) { 711 if (opt == REPLACE_EXISTING) 712 hasReplace = true; 713 else if (opt == COPY_ATTRIBUTES) 714 hasCopyAttrs = true; 715 } 716 Entry eDst = getEntry(dst); 717 if (eDst != null) { 718 if (!hasReplace) 719 throw new FileAlreadyExistsException(getString(dst)); 720 } else { 721 checkParents(dst); 722 } 723 // copy eSrc entry and change name 724 Entry u = supportPosix ? 725 new PosixEntry((PosixEntry)eSrc, Entry.COPY) : 726 new Entry(eSrc, Entry.COPY); 727 u.name(dst); 728 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) { 729 u.type = eSrc.type; // make it the same type 730 if (deletesrc) { // if it's a "rename", take the data 731 u.bytes = eSrc.bytes; 732 u.file = eSrc.file; 733 } else { // if it's not "rename", copy the data 734 if (eSrc.bytes != null) 735 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 736 else if (eSrc.file != null) { 737 u.file = getTempPathForEntry(null); 738 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 739 } 740 } 741 } 742 if (!hasCopyAttrs) 743 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 744 update(u); 745 if (deletesrc) 746 updateDelete(eSrc); 747 } finally { 748 endWrite(); 749 } 750 } 751 752 // Returns an output stream for writing the contents into the specified 753 // entry. 754 OutputStream newOutputStream(byte[] path, OpenOption... options) 755 throws IOException 756 { 757 checkWritable(); 758 boolean hasCreateNew = false; 759 boolean hasCreate = false; 760 boolean hasAppend = false; 761 boolean hasTruncate = false; 762 for (OpenOption opt : options) { 763 if (opt == READ) 764 throw new IllegalArgumentException("READ not allowed"); 765 if (opt == CREATE_NEW) 766 hasCreateNew = true; 767 if (opt == CREATE) 768 hasCreate = true; 769 if (opt == APPEND) 770 hasAppend = true; 771 if (opt == TRUNCATE_EXISTING) 772 hasTruncate = true; 773 } 774 if (hasAppend && hasTruncate) 775 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 776 beginRead(); // only need a readlock, the "update()" will 777 try { // try to obtain a writelock when the os is 778 ensureOpen(); // being closed. 779 Entry e = getEntry(path); 780 if (e != null) { 781 if (e.isDir() || hasCreateNew) 782 throw new FileAlreadyExistsException(getString(path)); 783 if (hasAppend) { 784 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 785 try (InputStream is = getInputStream(e)) { 786 is.transferTo(os); 787 } 788 return os; 789 } 790 return getOutputStream(supportPosix ? 791 new PosixEntry((PosixEntry)e, Entry.NEW) : new Entry(e, Entry.NEW)); 792 } else { 793 if (!hasCreate && !hasCreateNew) 794 throw new NoSuchFileException(getString(path)); 795 checkParents(path); 796 return getOutputStream(supportPosix ? 797 new PosixEntry(path, Entry.NEW, false, defaultCompressionMethod) : 798 new Entry(path, Entry.NEW, false, defaultCompressionMethod)); 799 } 800 } finally { 801 endRead(); 802 } 803 } 804 805 // Returns an input stream for reading the contents of the specified 806 // file entry. 807 InputStream newInputStream(byte[] path) throws IOException { 808 beginRead(); 809 try { 810 ensureOpen(); 811 Entry e = getEntry(path); 812 if (e == null) 813 throw new NoSuchFileException(getString(path)); 814 if (e.isDir()) 815 throw new FileSystemException(getString(path), "is a directory", null); 816 return getInputStream(e); 817 } finally { 818 endRead(); 819 } 820 } 821 822 private void checkOptions(Set<? extends OpenOption> options) { 823 // check for options of null type and option is an intance of StandardOpenOption 824 for (OpenOption option : options) { 825 if (option == null) 826 throw new NullPointerException(); 827 if (!(option instanceof StandardOpenOption)) 828 throw new IllegalArgumentException(); 829 } 830 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 831 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 832 } 833 834 // Returns an output SeekableByteChannel for either 835 // (1) writing the contents of a new entry, if the entry doesn't exist, or 836 // (2) updating/replacing the contents of an existing entry. 837 // Note: The content of the channel is not compressed until the 838 // channel is closed 839 private class EntryOutputChannel extends ByteArrayChannel { 840 final Entry e; 841 842 EntryOutputChannel(Entry e) { 843 super(e.size > 0? (int)e.size : 8192, false); 844 this.e = e; 845 if (e.mtime == -1) 846 e.mtime = System.currentTimeMillis(); 847 if (e.method == -1) 848 e.method = defaultCompressionMethod; 849 // store size, compressed size, and crc-32 in datadescriptor 850 e.flag = FLAG_DATADESCR; 851 if (zc.isUTF8()) 852 e.flag |= FLAG_USE_UTF8; 853 } 854 855 @Override 856 public void close() throws IOException { 857 // will update the entry 858 try (OutputStream os = getOutputStream(e)) { 859 os.write(toByteArray()); 860 } 861 super.close(); 862 } 863 } 864 865 // Returns a Writable/ReadByteChannel for now. Might consider to use 866 // newFileChannel() instead, which dump the entry data into a regular 867 // file on the default file system and create a FileChannel on top of it. 868 SeekableByteChannel newByteChannel(byte[] path, 869 Set<? extends OpenOption> options, 870 FileAttribute<?>... attrs) 871 throws IOException 872 { 873 checkOptions(options); 874 if (options.contains(StandardOpenOption.WRITE) || 875 options.contains(StandardOpenOption.APPEND)) { 876 checkWritable(); 877 beginRead(); // only need a read lock, the "update()" will obtain 878 // the write lock when the channel is closed 879 try { 880 Entry e = getEntry(path); 881 if (e != null) { 882 if (e.isDir() || options.contains(CREATE_NEW)) 883 throw new FileAlreadyExistsException(getString(path)); 884 SeekableByteChannel sbc = 885 new EntryOutputChannel(supportPosix ? 886 new PosixEntry((PosixEntry)e, Entry.NEW) : 887 new Entry(e, Entry.NEW)); 888 if (options.contains(APPEND)) { 889 try (InputStream is = getInputStream(e)) { // copyover 890 byte[] buf = new byte[8192]; 891 ByteBuffer bb = ByteBuffer.wrap(buf); 892 int n; 893 while ((n = is.read(buf)) != -1) { 894 bb.position(0); 895 bb.limit(n); 896 sbc.write(bb); 897 } 898 } 899 } 900 return sbc; 901 } 902 if (!options.contains(CREATE) && !options.contains(CREATE_NEW)) 903 throw new NoSuchFileException(getString(path)); 904 checkParents(path); 905 return new EntryOutputChannel( 906 supportPosix ? 907 new PosixEntry(path, Entry.NEW, false, defaultCompressionMethod, attrs) : 908 new Entry(path, Entry.NEW, false, defaultCompressionMethod, attrs)); 909 } finally { 910 endRead(); 911 } 912 } else { 913 beginRead(); 914 try { 915 ensureOpen(); 916 Entry e = getEntry(path); 917 if (e == null || e.isDir()) 918 throw new NoSuchFileException(getString(path)); 919 try (InputStream is = getInputStream(e)) { 920 // TBD: if (e.size < NNNNN); 921 return new ByteArrayChannel(is.readAllBytes(), true); 922 } 923 } finally { 924 endRead(); 925 } 926 } 927 } 928 929 // Returns a FileChannel of the specified entry. 930 // 931 // This implementation creates a temporary file on the default file system, 932 // copy the entry data into it if the entry exists, and then create a 933 // FileChannel on top of it. 934 FileChannel newFileChannel(byte[] path, 935 Set<? extends OpenOption> options, 936 FileAttribute<?>... attrs) 937 throws IOException 938 { 939 checkOptions(options); 940 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 941 options.contains(StandardOpenOption.APPEND)); 942 beginRead(); 943 try { 944 ensureOpen(); 945 Entry e = getEntry(path); 946 if (forWrite) { 947 checkWritable(); 948 if (e == null) { 949 if (!options.contains(StandardOpenOption.CREATE) && 950 !options.contains(StandardOpenOption.CREATE_NEW)) { 951 throw new NoSuchFileException(getString(path)); 952 } 953 } else { 954 if (options.contains(StandardOpenOption.CREATE_NEW)) { 955 throw new FileAlreadyExistsException(getString(path)); 956 } 957 if (e.isDir()) 958 throw new FileAlreadyExistsException("directory <" 959 + getString(path) + "> exists"); 960 } 961 options = new HashSet<>(options); 962 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 963 } else if (e == null || e.isDir()) { 964 throw new NoSuchFileException(getString(path)); 965 } 966 967 final boolean isFCH = (e != null && e.type == Entry.FILECH); 968 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 969 final FileChannel fch = tmpfile.getFileSystem() 970 .provider() 971 .newFileChannel(tmpfile, options, attrs); 972 final Entry u = isFCH ? e : ( 973 supportPosix ? 974 new PosixEntry(path, tmpfile, Entry.FILECH, attrs) : 975 new Entry(path, tmpfile, Entry.FILECH, attrs)); 976 if (forWrite) { 977 u.flag = FLAG_DATADESCR; 978 u.method = defaultCompressionMethod; 979 } 980 // is there a better way to hook into the FileChannel's close method? 981 return new FileChannel() { 982 public int write(ByteBuffer src) throws IOException { 983 return fch.write(src); 984 } 985 public long write(ByteBuffer[] srcs, int offset, int length) 986 throws IOException 987 { 988 return fch.write(srcs, offset, length); 989 } 990 public long position() throws IOException { 991 return fch.position(); 992 } 993 public FileChannel position(long newPosition) 994 throws IOException 995 { 996 fch.position(newPosition); 997 return this; 998 } 999 public long size() throws IOException { 1000 return fch.size(); 1001 } 1002 public FileChannel truncate(long size) 1003 throws IOException 1004 { 1005 fch.truncate(size); 1006 return this; 1007 } 1008 public void force(boolean metaData) 1009 throws IOException 1010 { 1011 fch.force(metaData); 1012 } 1013 public long transferTo(long position, long count, 1014 WritableByteChannel target) 1015 throws IOException 1016 { 1017 return fch.transferTo(position, count, target); 1018 } 1019 public long transferFrom(ReadableByteChannel src, 1020 long position, long count) 1021 throws IOException 1022 { 1023 return fch.transferFrom(src, position, count); 1024 } 1025 public int read(ByteBuffer dst) throws IOException { 1026 return fch.read(dst); 1027 } 1028 public int read(ByteBuffer dst, long position) 1029 throws IOException 1030 { 1031 return fch.read(dst, position); 1032 } 1033 public long read(ByteBuffer[] dsts, int offset, int length) 1034 throws IOException 1035 { 1036 return fch.read(dsts, offset, length); 1037 } 1038 public int write(ByteBuffer src, long position) 1039 throws IOException 1040 { 1041 return fch.write(src, position); 1042 } 1043 public MappedByteBuffer map(MapMode mode, 1044 long position, long size) 1045 { 1046 throw new UnsupportedOperationException(); 1047 } 1048 public FileLock lock(long position, long size, boolean shared) 1049 throws IOException 1050 { 1051 return fch.lock(position, size, shared); 1052 } 1053 public FileLock tryLock(long position, long size, boolean shared) 1054 throws IOException 1055 { 1056 return fch.tryLock(position, size, shared); 1057 } 1058 protected void implCloseChannel() throws IOException { 1059 fch.close(); 1060 if (forWrite) { 1061 u.mtime = System.currentTimeMillis(); 1062 u.size = Files.size(u.file); 1063 update(u); 1064 } else { 1065 if (!isFCH) // if this is a new fch for reading 1066 removeTempPathForEntry(tmpfile); 1067 } 1068 } 1069 }; 1070 } finally { 1071 endRead(); 1072 } 1073 } 1074 1075 // the outstanding input streams that need to be closed 1076 private Set<InputStream> streams = 1077 Collections.synchronizedSet(new HashSet<>()); 1078 1079 // the ex-channel and ex-path that need to close when their outstanding 1080 // input streams are all closed by the obtainers. 1081 private final Set<ExistingChannelCloser> exChClosers = new HashSet<>(); 1082 1083 private final Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<>()); 1084 private Path getTempPathForEntry(byte[] path) throws IOException { 1085 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 1086 if (path != null) { 1087 Entry e = getEntry(path); 1088 if (e != null) { 1089 try (InputStream is = newInputStream(path)) { 1090 Files.copy(is, tmpPath, REPLACE_EXISTING); 1091 } 1092 } 1093 } 1094 return tmpPath; 1095 } 1096 1097 private void removeTempPathForEntry(Path path) throws IOException { 1098 Files.delete(path); 1099 tmppaths.remove(path); 1100 } 1101 1102 // check if all parents really exist. ZIP spec does not require 1103 // the existence of any "parent directory". 1104 private void checkParents(byte[] path) throws IOException { 1105 beginRead(); 1106 try { 1107 while ((path = getParent(path)) != null && 1108 path != ROOTPATH) { 1109 if (!inodes.containsKey(IndexNode.keyOf(path))) { 1110 throw new NoSuchFileException(getString(path)); 1111 } 1112 } 1113 } finally { 1114 endRead(); 1115 } 1116 } 1117 1118 private static byte[] getParent(byte[] path) { 1119 int off = getParentOff(path); 1120 if (off <= 1) 1121 return ROOTPATH; 1122 return Arrays.copyOf(path, off); 1123 } 1124 1125 private static int getParentOff(byte[] path) { 1126 int off = path.length - 1; 1127 if (off > 0 && path[off] == '/') // isDirectory 1128 off--; 1129 while (off > 0 && path[off] != '/') { off--; } 1130 return off; 1131 } 1132 1133 private void beginWrite() { 1134 rwlock.writeLock().lock(); 1135 } 1136 1137 private void endWrite() { 1138 rwlock.writeLock().unlock(); 1139 } 1140 1141 private void beginRead() { 1142 rwlock.readLock().lock(); 1143 } 1144 1145 private void endRead() { 1146 rwlock.readLock().unlock(); 1147 } 1148 1149 /////////////////////////////////////////////////////////////////// 1150 1151 private volatile boolean isOpen = true; 1152 private final SeekableByteChannel ch; // channel to the zipfile 1153 final byte[] cen; // CEN & ENDHDR 1154 private END end; 1155 private long locpos; // position of first LOC header (usually 0) 1156 1157 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 1158 1159 // name -> pos (in cen), IndexNode itself can be used as a "key" 1160 private LinkedHashMap<IndexNode, IndexNode> inodes; 1161 1162 final byte[] getBytes(String name) { 1163 return zc.getBytes(name); 1164 } 1165 1166 final String getString(byte[] name) { 1167 return zc.toString(name); 1168 } 1169 1170 @SuppressWarnings("deprecation") 1171 protected void finalize() throws IOException { 1172 close(); 1173 } 1174 1175 // Reads len bytes of data from the specified offset into buf. 1176 // Returns the total number of bytes read. 1177 // Each/every byte read from here (except the cen, which is mapped). 1178 final long readFullyAt(byte[] buf, int off, long len, long pos) 1179 throws IOException 1180 { 1181 ByteBuffer bb = ByteBuffer.wrap(buf); 1182 bb.position(off); 1183 bb.limit((int)(off + len)); 1184 return readFullyAt(bb, pos); 1185 } 1186 1187 private long readFullyAt(ByteBuffer bb, long pos) throws IOException { 1188 synchronized(ch) { 1189 return ch.position(pos).read(bb); 1190 } 1191 } 1192 1193 // Searches for end of central directory (END) header. The contents of 1194 // the END header will be read and placed in endbuf. Returns the file 1195 // position of the END header, otherwise returns -1 if the END header 1196 // was not found or an error occurred. 1197 private END findEND() throws IOException { 1198 byte[] buf = new byte[READBLOCKSZ]; 1199 long ziplen = ch.size(); 1200 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 1201 long minPos = minHDR - (buf.length - ENDHDR); 1202 1203 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) { 1204 int off = 0; 1205 if (pos < 0) { 1206 // Pretend there are some NUL bytes before start of file 1207 off = (int)-pos; 1208 Arrays.fill(buf, 0, off, (byte)0); 1209 } 1210 int len = buf.length - off; 1211 if (readFullyAt(buf, off, len, pos + off) != len) 1212 throw new ZipException("zip END header not found"); 1213 1214 // Now scan the block backwards for END header signature 1215 for (int i = buf.length - ENDHDR; i >= 0; i--) { 1216 if (buf[i] == (byte)'P' && 1217 buf[i+1] == (byte)'K' && 1218 buf[i+2] == (byte)'\005' && 1219 buf[i+3] == (byte)'\006' && 1220 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 1221 // Found END header 1222 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 1223 END end = new END(); 1224 // end.endsub = ENDSUB(buf); // not used 1225 end.centot = ENDTOT(buf); 1226 end.cenlen = ENDSIZ(buf); 1227 end.cenoff = ENDOFF(buf); 1228 // end.comlen = ENDCOM(buf); // not used 1229 end.endpos = pos + i; 1230 // try if there is zip64 end; 1231 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1232 if (end.endpos < ZIP64_LOCHDR || 1233 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1234 != loc64.length || 1235 !locator64SigAt(loc64, 0)) { 1236 return end; 1237 } 1238 long end64pos = ZIP64_LOCOFF(loc64); 1239 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1240 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1241 != end64buf.length || 1242 !end64SigAt(end64buf, 0)) { 1243 return end; 1244 } 1245 // end64 found, 1246 long cenlen64 = ZIP64_ENDSIZ(end64buf); 1247 long cenoff64 = ZIP64_ENDOFF(end64buf); 1248 long centot64 = ZIP64_ENDTOT(end64buf); 1249 // double-check 1250 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1251 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1252 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1253 return end; 1254 } 1255 // to use the end64 values 1256 end.cenlen = cenlen64; 1257 end.cenoff = cenoff64; 1258 end.centot = (int)centot64; // assume total < 2g 1259 end.endpos = end64pos; 1260 return end; 1261 } 1262 } 1263 } 1264 throw new ZipException("zip END header not found"); 1265 } 1266 1267 private void makeParentDirs(IndexNode node, IndexNode root) { 1268 IndexNode parent; 1269 ParentLookup lookup = new ParentLookup(); 1270 while (true) { 1271 int off = getParentOff(node.name); 1272 // parent is root 1273 if (off <= 1) { 1274 node.sibling = root.child; 1275 root.child = node; 1276 break; 1277 } 1278 // parent exists 1279 lookup = lookup.as(node.name, off); 1280 if (inodes.containsKey(lookup)) { 1281 parent = inodes.get(lookup); 1282 node.sibling = parent.child; 1283 parent.child = node; 1284 break; 1285 } 1286 // parent does not exist, add new pseudo directory entry 1287 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 1288 inodes.put(parent, parent); 1289 node.sibling = parent.child; 1290 parent.child = node; 1291 node = parent; 1292 } 1293 } 1294 1295 // ZIP directory has two issues: 1296 // (1) ZIP spec does not require the ZIP file to include 1297 // directory entry 1298 // (2) all entries are not stored/organized in a "tree" 1299 // structure. 1300 // A possible solution is to build the node tree ourself as 1301 // implemented below. 1302 private void buildNodeTree() { 1303 beginWrite(); 1304 try { 1305 IndexNode root = inodes.remove(LOOKUPKEY.as(ROOTPATH)); 1306 if (root == null) { 1307 root = new IndexNode(ROOTPATH, true); 1308 } 1309 IndexNode[] nodes = inodes.values().toArray(new IndexNode[0]); 1310 inodes.put(root, root); 1311 for (IndexNode node : nodes) { 1312 makeParentDirs(node, root); 1313 } 1314 } finally { 1315 endWrite(); 1316 } 1317 } 1318 1319 private void removeFromTree(IndexNode inode) { 1320 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 1321 IndexNode child = parent.child; 1322 if (child.equals(inode)) { 1323 parent.child = child.sibling; 1324 } else { 1325 IndexNode last = child; 1326 while ((child = child.sibling) != null) { 1327 if (child.equals(inode)) { 1328 last.sibling = child.sibling; 1329 break; 1330 } else { 1331 last = child; 1332 } 1333 } 1334 } 1335 } 1336 1337 // Reads zip file central directory. Returns the file position of first 1338 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1339 // then the error was a zip format error and zip->msg has the error text. 1340 // Always pass in -1 for knownTotal; it's used for a recursive call. 1341 private byte[] initCEN() throws IOException { 1342 end = findEND(); 1343 if (end.endpos == 0) { 1344 inodes = new LinkedHashMap<>(10); 1345 locpos = 0; 1346 buildNodeTree(); 1347 return null; // only END header present 1348 } 1349 if (end.cenlen > end.endpos) 1350 throw new ZipException("invalid END header (bad central directory size)"); 1351 long cenpos = end.endpos - end.cenlen; // position of CEN table 1352 1353 // Get position of first local file (LOC) header, taking into 1354 // account that there may be a stub prefixed to the zip file. 1355 locpos = cenpos - end.cenoff; 1356 if (locpos < 0) 1357 throw new ZipException("invalid END header (bad central directory offset)"); 1358 1359 // read in the CEN and END 1360 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1361 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1362 throw new ZipException("read CEN tables failed"); 1363 } 1364 // Iterate through the entries in the central directory 1365 inodes = new LinkedHashMap<>(end.centot + 1); 1366 int pos = 0; 1367 int limit = cen.length - ENDHDR; 1368 while (pos < limit) { 1369 if (!cenSigAt(cen, pos)) 1370 throw new ZipException("invalid CEN header (bad signature)"); 1371 int method = CENHOW(cen, pos); 1372 int nlen = CENNAM(cen, pos); 1373 int elen = CENEXT(cen, pos); 1374 int clen = CENCOM(cen, pos); 1375 if ((CENFLG(cen, pos) & 1) != 0) { 1376 throw new ZipException("invalid CEN header (encrypted entry)"); 1377 } 1378 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1379 throw new ZipException("invalid CEN header (unsupported compression method: " + method + ")"); 1380 } 1381 if (pos + CENHDR + nlen > limit) { 1382 throw new ZipException("invalid CEN header (bad header size)"); 1383 } 1384 IndexNode inode = new IndexNode(cen, pos, nlen); 1385 inodes.put(inode, inode); 1386 1387 // skip ext and comment 1388 pos += (CENHDR + nlen + elen + clen); 1389 } 1390 if (pos + ENDHDR != cen.length) { 1391 throw new ZipException("invalid CEN header (bad header size)"); 1392 } 1393 buildNodeTree(); 1394 return cen; 1395 } 1396 1397 private void ensureOpen() { 1398 if (!isOpen) 1399 throw new ClosedFileSystemException(); 1400 } 1401 1402 // Creates a new empty temporary file in the same directory as the 1403 // specified file. A variant of Files.createTempFile. 1404 private Path createTempFileInSameDirectoryAs(Path path) 1405 throws IOException 1406 { 1407 Path parent = path.toAbsolutePath().getParent(); 1408 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1409 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1410 tmppaths.add(tmpPath); 1411 return tmpPath; 1412 } 1413 1414 ////////////////////update & sync ////////////////////////////////////// 1415 1416 private boolean hasUpdate = false; 1417 1418 // shared key. consumer guarantees the "writeLock" before use it. 1419 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1420 1421 private void updateDelete(IndexNode inode) { 1422 beginWrite(); 1423 try { 1424 removeFromTree(inode); 1425 inodes.remove(inode); 1426 hasUpdate = true; 1427 } finally { 1428 endWrite(); 1429 } 1430 } 1431 1432 private void update(Entry e) { 1433 beginWrite(); 1434 try { 1435 IndexNode old = inodes.put(e, e); 1436 if (old != null) { 1437 removeFromTree(old); 1438 } 1439 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1440 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1441 e.sibling = parent.child; 1442 parent.child = e; 1443 } 1444 hasUpdate = true; 1445 } finally { 1446 endWrite(); 1447 } 1448 } 1449 1450 // copy over the whole LOC entry (header if necessary, data and ext) from 1451 // old zip to the new one. 1452 private long copyLOCEntry(Entry e, boolean updateHeader, 1453 OutputStream os, 1454 long written, byte[] buf) 1455 throws IOException 1456 { 1457 long locoff = e.locoff; // where to read 1458 e.locoff = written; // update the e.locoff with new value 1459 1460 // calculate the size need to write out 1461 long size = 0; 1462 // if there is A ext 1463 if ((e.flag & FLAG_DATADESCR) != 0) { 1464 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1465 size = 24; 1466 else 1467 size = 16; 1468 } 1469 // read loc, use the original loc.elen/nlen 1470 // 1471 // an extra byte after loc is read, which should be the first byte of the 1472 // 'name' field of the loc. if this byte is '/', which means the original 1473 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1474 // is used to output the loc, in which the leading "/" will be removed 1475 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1476 throw new ZipException("loc: reading failed"); 1477 1478 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1479 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1480 size += e.csize; 1481 written = e.writeLOC(os) + size; 1482 } else { 1483 os.write(buf, 0, LOCHDR); // write out the loc header 1484 locoff += LOCHDR; 1485 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1486 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1487 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1488 written = LOCHDR + size; 1489 } 1490 int n; 1491 while (size > 0 && 1492 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1493 { 1494 if (size < n) 1495 n = (int)size; 1496 os.write(buf, 0, n); 1497 size -= n; 1498 locoff += n; 1499 } 1500 return written; 1501 } 1502 1503 private long writeEntry(Entry e, OutputStream os) 1504 throws IOException { 1505 1506 if (e.bytes == null && e.file == null) // dir, 0-length data 1507 return 0; 1508 1509 long written = 0; 1510 if (e.method != METHOD_STORED && e.csize > 0 && (e.crc != 0 || e.size == 0)) { 1511 // pre-compressed entry, write directly to output stream 1512 writeTo(e, os); 1513 } else { 1514 try (OutputStream os2 = (e.method == METHOD_STORED) ? 1515 new EntryOutputStreamCRC32(e, os) : new EntryOutputStreamDef(e, os)) { 1516 writeTo(e, os2); 1517 } 1518 } 1519 written += e.csize; 1520 if ((e.flag & FLAG_DATADESCR) != 0) { 1521 written += e.writeEXT(os); 1522 } 1523 return written; 1524 } 1525 1526 private void writeTo(Entry e, OutputStream os) throws IOException { 1527 if (e.bytes != null) { 1528 os.write(e.bytes, 0, e.bytes.length); 1529 } else if (e.file != null) { 1530 if (e.type == Entry.NEW || e.type == Entry.FILECH) { 1531 try (InputStream is = Files.newInputStream(e.file)) { 1532 is.transferTo(os); 1533 } 1534 } 1535 Files.delete(e.file); 1536 tmppaths.remove(e.file); 1537 } 1538 } 1539 1540 // sync the zip file system, if there is any update 1541 private void sync() throws IOException { 1542 // check ex-closer 1543 if (!exChClosers.isEmpty()) { 1544 for (ExistingChannelCloser ecc : exChClosers) { 1545 if (ecc.closeAndDeleteIfDone()) { 1546 exChClosers.remove(ecc); 1547 } 1548 } 1549 } 1550 if (!hasUpdate) 1551 return; 1552 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1553 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) { 1554 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1555 long written = 0; 1556 byte[] buf = null; 1557 Entry e; 1558 1559 // write loc 1560 for (IndexNode inode : inodes.values()) { 1561 if (inode instanceof Entry) { // an updated inode 1562 e = (Entry)inode; 1563 try { 1564 if (e.type == Entry.COPY) { 1565 // entry copy: the only thing changed is the "name" 1566 // and "nlen" in LOC header, so we update/rewrite the 1567 // LOC in new file and simply copy the rest (data and 1568 // ext) without enflating/deflating from the old zip 1569 // file LOC entry. 1570 if (buf == null) 1571 buf = new byte[8192]; 1572 written += copyLOCEntry(e, true, os, written, buf); 1573 } else { // NEW, FILECH or CEN 1574 e.locoff = written; 1575 written += e.writeLOC(os); // write loc header 1576 written += writeEntry(e, os); 1577 } 1578 elist.add(e); 1579 } catch (IOException x) { 1580 x.printStackTrace(); // skip any in-accurate entry 1581 } 1582 } else { // unchanged inode 1583 if (inode.pos == -1) { 1584 continue; // pseudo directory node 1585 } 1586 if (inode.name.length == 1 && inode.name[0] == '/') { 1587 continue; // no root '/' directory even if it 1588 // exists in original zip/jar file. 1589 } 1590 e = supportPosix ? new PosixEntry(this, inode) : new Entry(this, inode); 1591 try { 1592 if (buf == null) 1593 buf = new byte[8192]; 1594 written += copyLOCEntry(e, false, os, written, buf); 1595 elist.add(e); 1596 } catch (IOException x) { 1597 x.printStackTrace(); // skip any wrong entry 1598 } 1599 } 1600 } 1601 1602 // now write back the cen and end table 1603 end.cenoff = written; 1604 for (Entry entry : elist) { 1605 written += entry.writeCEN(os); 1606 } 1607 end.centot = elist.size(); 1608 end.cenlen = written - end.cenoff; 1609 end.write(os, written, forceEnd64); 1610 } 1611 if (!streams.isEmpty()) { 1612 // 1613 // There are outstanding input streams open on existing "ch", 1614 // so, don't close the "cha" and delete the "file for now, let 1615 // the "ex-channel-closer" to handle them 1616 Path path = createTempFileInSameDirectoryAs(zfpath); 1617 ExistingChannelCloser ecc = new ExistingChannelCloser(path, 1618 ch, 1619 streams); 1620 Files.move(zfpath, path, REPLACE_EXISTING); 1621 exChClosers.add(ecc); 1622 streams = Collections.synchronizedSet(new HashSet<>()); 1623 } else { 1624 ch.close(); 1625 Files.delete(zfpath); 1626 } 1627 1628 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1629 hasUpdate = false; // clear 1630 } 1631 1632 IndexNode getInode(byte[] path) { 1633 return inodes.get(IndexNode.keyOf(Objects.requireNonNull(path, "path"))); 1634 } 1635 1636 /** 1637 * Return the IndexNode from the root tree. If it doesn't exist, 1638 * it gets created along with all parent directory IndexNodes. 1639 */ 1640 IndexNode getOrCreateInode(byte[] path, boolean isdir) { 1641 IndexNode node = getInode(path); 1642 // if node exists, return it 1643 if (node != null) { 1644 return node; 1645 } 1646 1647 // otherwise create new pseudo node and parent directory hierarchy 1648 node = new IndexNode(path, isdir); 1649 beginWrite(); 1650 try { 1651 makeParentDirs(node, Objects.requireNonNull(inodes.get(IndexNode.keyOf(ROOTPATH)), "no root node found")); 1652 return node; 1653 } finally { 1654 endWrite(); 1655 } 1656 } 1657 1658 private Entry getEntry(byte[] path) throws IOException { 1659 IndexNode inode = getInode(path); 1660 if (inode instanceof Entry) 1661 return (Entry)inode; 1662 if (inode == null || inode.pos == -1) 1663 return null; 1664 return supportPosix ? new PosixEntry(this, inode): new Entry(this, inode); 1665 } 1666 1667 public void deleteFile(byte[] path, boolean failIfNotExists) 1668 throws IOException 1669 { 1670 checkWritable(); 1671 IndexNode inode = getInode(path); 1672 if (inode == null) { 1673 if (path != null && path.length == 0) 1674 throw new ZipException("root directory </> can't not be delete"); 1675 if (failIfNotExists) 1676 throw new NoSuchFileException(getString(path)); 1677 } else { 1678 if (inode.isDir() && inode.child != null) 1679 throw new DirectoryNotEmptyException(getString(path)); 1680 updateDelete(inode); 1681 } 1682 } 1683 1684 // Returns an out stream for either 1685 // (1) writing the contents of a new entry, if the entry exists, or 1686 // (2) updating/replacing the contents of the specified existing entry. 1687 private OutputStream getOutputStream(Entry e) throws IOException { 1688 if (e.mtime == -1) 1689 e.mtime = System.currentTimeMillis(); 1690 if (e.method == -1) 1691 e.method = defaultCompressionMethod; 1692 // store size, compressed size, and crc-32 in datadescr 1693 e.flag = FLAG_DATADESCR; 1694 if (zc.isUTF8()) 1695 e.flag |= FLAG_USE_UTF8; 1696 OutputStream os; 1697 if (useTempFile) { 1698 e.file = getTempPathForEntry(null); 1699 os = Files.newOutputStream(e.file, WRITE); 1700 } else { 1701 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1702 } 1703 if (e.method == METHOD_DEFLATED) { 1704 return new DeflatingEntryOutputStream(e, os); 1705 } else { 1706 return new EntryOutputStream(e, os); 1707 } 1708 } 1709 1710 private class EntryOutputStream extends FilterOutputStream { 1711 private final Entry e; 1712 private long written; 1713 private boolean isClosed; 1714 1715 EntryOutputStream(Entry e, OutputStream os) { 1716 super(os); 1717 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1718 // this.written = 0; 1719 } 1720 1721 @Override 1722 public synchronized void write(int b) throws IOException { 1723 out.write(b); 1724 written += 1; 1725 } 1726 1727 @Override 1728 public synchronized void write(byte[] b, int off, int len) 1729 throws IOException { 1730 out.write(b, off, len); 1731 written += len; 1732 } 1733 1734 @Override 1735 public synchronized void close() throws IOException { 1736 if (isClosed) { 1737 return; 1738 } 1739 isClosed = true; 1740 e.size = written; 1741 if (out instanceof ByteArrayOutputStream) 1742 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1743 super.close(); 1744 update(e); 1745 } 1746 } 1747 1748 // Output stream returned when writing "deflated" entries into memory, 1749 // to enable eager (possibly parallel) deflation and reduce memory required. 1750 private class DeflatingEntryOutputStream extends DeflaterOutputStream { 1751 private final CRC32 crc; 1752 private final Entry e; 1753 private boolean isClosed; 1754 1755 DeflatingEntryOutputStream(Entry e, OutputStream os) { 1756 super(os, getDeflater()); 1757 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1758 this.crc = new CRC32(); 1759 } 1760 1761 @Override 1762 public synchronized void write(int b) throws IOException { 1763 super.write(b); 1764 crc.update(b); 1765 } 1766 1767 @Override 1768 public synchronized void write(byte[] b, int off, int len) 1769 throws IOException { 1770 super.write(b, off, len); 1771 crc.update(b, off, len); 1772 } 1773 1774 @Override 1775 public synchronized void close() throws IOException { 1776 if (isClosed) 1777 return; 1778 isClosed = true; 1779 finish(); 1780 e.size = def.getBytesRead(); 1781 e.csize = def.getBytesWritten(); 1782 e.crc = crc.getValue(); 1783 if (out instanceof ByteArrayOutputStream) 1784 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1785 super.close(); 1786 update(e); 1787 releaseDeflater(def); 1788 } 1789 } 1790 1791 // Wrapper output stream class to write out a "stored" entry. 1792 // (1) this class does not close the underlying out stream when 1793 // being closed. 1794 // (2) no need to be "synchronized", only used by sync() 1795 private class EntryOutputStreamCRC32 extends FilterOutputStream { 1796 private final CRC32 crc; 1797 private final Entry e; 1798 private long written; 1799 private boolean isClosed; 1800 1801 EntryOutputStreamCRC32(Entry e, OutputStream os) { 1802 super(os); 1803 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1804 this.crc = new CRC32(); 1805 } 1806 1807 @Override 1808 public void write(int b) throws IOException { 1809 out.write(b); 1810 crc.update(b); 1811 written += 1; 1812 } 1813 1814 @Override 1815 public void write(byte[] b, int off, int len) 1816 throws IOException { 1817 out.write(b, off, len); 1818 crc.update(b, off, len); 1819 written += len; 1820 } 1821 1822 @Override 1823 public void close() { 1824 if (isClosed) 1825 return; 1826 isClosed = true; 1827 e.size = e.csize = written; 1828 e.crc = crc.getValue(); 1829 } 1830 } 1831 1832 // Wrapper output stream class to write out a "deflated" entry. 1833 // (1) this class does not close the underlying out stream when 1834 // being closed. 1835 // (2) no need to be "synchronized", only used by sync() 1836 private class EntryOutputStreamDef extends DeflaterOutputStream { 1837 private final CRC32 crc; 1838 private final Entry e; 1839 private boolean isClosed; 1840 1841 EntryOutputStreamDef(Entry e, OutputStream os) { 1842 super(os, getDeflater()); 1843 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1844 this.crc = new CRC32(); 1845 } 1846 1847 @Override 1848 public void write(byte[] b, int off, int len) throws IOException { 1849 super.write(b, off, len); 1850 crc.update(b, off, len); 1851 } 1852 1853 @Override 1854 public void close() throws IOException { 1855 if (isClosed) 1856 return; 1857 isClosed = true; 1858 finish(); 1859 e.size = def.getBytesRead(); 1860 e.csize = def.getBytesWritten(); 1861 e.crc = crc.getValue(); 1862 releaseDeflater(def); 1863 } 1864 } 1865 1866 private InputStream getInputStream(Entry e) 1867 throws IOException 1868 { 1869 InputStream eis; 1870 if (e.type == Entry.NEW) { 1871 if (e.bytes != null) 1872 eis = new ByteArrayInputStream(e.bytes); 1873 else if (e.file != null) 1874 eis = Files.newInputStream(e.file); 1875 else 1876 throw new ZipException("update entry data is missing"); 1877 } else if (e.type == Entry.FILECH) { 1878 // FILECH result is un-compressed. 1879 eis = Files.newInputStream(e.file); 1880 // TBD: wrap to hook close() 1881 // streams.add(eis); 1882 return eis; 1883 } else { // untouched CEN or COPY 1884 eis = new EntryInputStream(e, ch); 1885 } 1886 if (e.method == METHOD_DEFLATED) { 1887 // MORE: Compute good size for inflater stream: 1888 long bufSize = e.size + 2; // Inflater likes a bit of slack 1889 if (bufSize > 65536) 1890 bufSize = 8192; 1891 final long size = e.size; 1892 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1893 private boolean isClosed = false; 1894 public void close() throws IOException { 1895 if (!isClosed) { 1896 releaseInflater(inf); 1897 this.in.close(); 1898 isClosed = true; 1899 streams.remove(this); 1900 } 1901 } 1902 // Override fill() method to provide an extra "dummy" byte 1903 // at the end of the input stream. This is required when 1904 // using the "nowrap" Inflater option. (it appears the new 1905 // zlib in 7 does not need it, but keep it for now) 1906 protected void fill() throws IOException { 1907 if (eof) { 1908 throw new EOFException( 1909 "Unexpected end of ZLIB input stream"); 1910 } 1911 len = this.in.read(buf, 0, buf.length); 1912 if (len == -1) { 1913 buf[0] = 0; 1914 len = 1; 1915 eof = true; 1916 } 1917 inf.setInput(buf, 0, len); 1918 } 1919 private boolean eof; 1920 1921 public int available() { 1922 if (isClosed) 1923 return 0; 1924 long avail = size - inf.getBytesWritten(); 1925 return avail > (long) Integer.MAX_VALUE ? 1926 Integer.MAX_VALUE : (int) avail; 1927 } 1928 }; 1929 } else if (e.method == METHOD_STORED) { 1930 // TBD: wrap/ it does not seem necessary 1931 } else { 1932 throw new ZipException("invalid compression method"); 1933 } 1934 streams.add(eis); 1935 return eis; 1936 } 1937 1938 // Inner class implementing the input stream used to read 1939 // a (possibly compressed) zip file entry. 1940 private class EntryInputStream extends InputStream { 1941 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1942 // point to a new channel after sync() 1943 private long pos; // current position within entry data 1944 private long rem; // number of remaining bytes within entry 1945 1946 EntryInputStream(Entry e, SeekableByteChannel zfch) 1947 throws IOException 1948 { 1949 this.zfch = zfch; 1950 rem = e.csize; 1951 pos = e.locoff; 1952 if (pos == -1) { 1953 Entry e2 = getEntry(e.name); 1954 if (e2 == null) { 1955 throw new ZipException("invalid loc for entry <" + getString(e.name) + ">"); 1956 } 1957 pos = e2.locoff; 1958 } 1959 pos = -pos; // lazy initialize the real data offset 1960 } 1961 1962 public int read(byte[] b, int off, int len) throws IOException { 1963 ensureOpen(); 1964 initDataPos(); 1965 if (rem == 0) { 1966 return -1; 1967 } 1968 if (len <= 0) { 1969 return 0; 1970 } 1971 if (len > rem) { 1972 len = (int) rem; 1973 } 1974 // readFullyAt() 1975 long n; 1976 ByteBuffer bb = ByteBuffer.wrap(b); 1977 bb.position(off); 1978 bb.limit(off + len); 1979 synchronized(zfch) { 1980 n = zfch.position(pos).read(bb); 1981 } 1982 if (n > 0) { 1983 pos += n; 1984 rem -= n; 1985 } 1986 if (rem == 0) { 1987 close(); 1988 } 1989 return (int)n; 1990 } 1991 1992 public int read() throws IOException { 1993 byte[] b = new byte[1]; 1994 if (read(b, 0, 1) == 1) { 1995 return b[0] & 0xff; 1996 } else { 1997 return -1; 1998 } 1999 } 2000 2001 public long skip(long n) { 2002 ensureOpen(); 2003 if (n > rem) 2004 n = rem; 2005 pos += n; 2006 rem -= n; 2007 if (rem == 0) { 2008 close(); 2009 } 2010 return n; 2011 } 2012 2013 public int available() { 2014 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 2015 } 2016 2017 public void close() { 2018 rem = 0; 2019 streams.remove(this); 2020 } 2021 2022 private void initDataPos() throws IOException { 2023 if (pos <= 0) { 2024 pos = -pos + locpos; 2025 byte[] buf = new byte[LOCHDR]; 2026 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 2027 throw new ZipException("invalid loc " + pos + " for entry reading"); 2028 } 2029 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 2030 } 2031 } 2032 } 2033 2034 // Maxmum number of de/inflater we cache 2035 private final int MAX_FLATER = 20; 2036 // List of available Inflater objects for decompression 2037 private final List<Inflater> inflaters = new ArrayList<>(); 2038 2039 // Gets an inflater from the list of available inflaters or allocates 2040 // a new one. 2041 private Inflater getInflater() { 2042 synchronized (inflaters) { 2043 int size = inflaters.size(); 2044 if (size > 0) { 2045 return inflaters.remove(size - 1); 2046 } else { 2047 return new Inflater(true); 2048 } 2049 } 2050 } 2051 2052 // Releases the specified inflater to the list of available inflaters. 2053 private void releaseInflater(Inflater inf) { 2054 synchronized (inflaters) { 2055 if (inflaters.size() < MAX_FLATER) { 2056 inf.reset(); 2057 inflaters.add(inf); 2058 } else { 2059 inf.end(); 2060 } 2061 } 2062 } 2063 2064 // List of available Deflater objects for compression 2065 private final List<Deflater> deflaters = new ArrayList<>(); 2066 2067 // Gets a deflater from the list of available deflaters or allocates 2068 // a new one. 2069 private Deflater getDeflater() { 2070 synchronized (deflaters) { 2071 int size = deflaters.size(); 2072 if (size > 0) { 2073 return deflaters.remove(size - 1); 2074 } else { 2075 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 2076 } 2077 } 2078 } 2079 2080 // Releases the specified inflater to the list of available inflaters. 2081 private void releaseDeflater(Deflater def) { 2082 synchronized (deflaters) { 2083 if (inflaters.size() < MAX_FLATER) { 2084 def.reset(); 2085 deflaters.add(def); 2086 } else { 2087 def.end(); 2088 } 2089 } 2090 } 2091 2092 // End of central directory record 2093 static class END { 2094 // The fields that are commented out below are not used by anyone and write() uses "0" 2095 // int disknum; 2096 // int sdisknum; 2097 // int endsub; 2098 int centot; // 4 bytes 2099 long cenlen; // 4 bytes 2100 long cenoff; // 4 bytes 2101 // int comlen; // comment length 2102 // byte[] comment; 2103 2104 // members of Zip64 end of central directory locator 2105 // int diskNum; 2106 long endpos; 2107 // int disktot; 2108 2109 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 2110 boolean hasZip64 = forceEnd64; // false; 2111 long xlen = cenlen; 2112 long xoff = cenoff; 2113 if (xlen >= ZIP64_MINVAL) { 2114 xlen = ZIP64_MINVAL; 2115 hasZip64 = true; 2116 } 2117 if (xoff >= ZIP64_MINVAL) { 2118 xoff = ZIP64_MINVAL; 2119 hasZip64 = true; 2120 } 2121 int count = centot; 2122 if (count >= ZIP64_MINVAL32) { 2123 count = ZIP64_MINVAL32; 2124 hasZip64 = true; 2125 } 2126 if (hasZip64) { 2127 //zip64 end of central directory record 2128 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 2129 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 2130 writeShort(os, 45); // version made by 2131 writeShort(os, 45); // version needed to extract 2132 writeInt(os, 0); // number of this disk 2133 writeInt(os, 0); // central directory start disk 2134 writeLong(os, centot); // number of directory entries on disk 2135 writeLong(os, centot); // number of directory entries 2136 writeLong(os, cenlen); // length of central directory 2137 writeLong(os, cenoff); // offset of central directory 2138 2139 //zip64 end of central directory locator 2140 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 2141 writeInt(os, 0); // zip64 END start disk 2142 writeLong(os, offset); // offset of zip64 END 2143 writeInt(os, 1); // total number of disks (?) 2144 } 2145 writeInt(os, ENDSIG); // END record signature 2146 writeShort(os, 0); // number of this disk 2147 writeShort(os, 0); // central directory start disk 2148 writeShort(os, count); // number of directory entries on disk 2149 writeShort(os, count); // total number of directory entries 2150 writeInt(os, xlen); // length of central directory 2151 writeInt(os, xoff); // offset of central directory 2152 writeShort(os, 0); // zip file comment, not used 2153 } 2154 } 2155 2156 // Internal node that links a "name" to its pos in cen table. 2157 // The node itself can be used as a "key" to lookup itself in 2158 // the HashMap inodes. 2159 static class IndexNode { 2160 byte[] name; 2161 int hashcode; // node is hashable/hashed by its name 2162 boolean isdir; 2163 int pos = -1; // position in cen table, -1 means the 2164 // entry does not exist in zip file 2165 IndexNode child; // first child 2166 IndexNode sibling; // next sibling 2167 2168 IndexNode() {} 2169 2170 IndexNode(byte[] name, boolean isdir) { 2171 name(name); 2172 this.isdir = isdir; 2173 this.pos = -1; 2174 } 2175 2176 IndexNode(byte[] name, int pos) { 2177 name(name); 2178 this.pos = pos; 2179 } 2180 2181 // constructor for initCEN() (1) remove trailing '/' (2) pad leading '/' 2182 IndexNode(byte[] cen, int pos, int nlen) { 2183 int noff = pos + CENHDR; 2184 if (cen[noff + nlen - 1] == '/') { 2185 isdir = true; 2186 nlen--; 2187 } 2188 if (nlen > 0 && cen[noff] == '/') { 2189 name = Arrays.copyOfRange(cen, noff, noff + nlen); 2190 } else { 2191 name = new byte[nlen + 1]; 2192 System.arraycopy(cen, noff, name, 1, nlen); 2193 name[0] = '/'; 2194 } 2195 name(normalize(name)); 2196 this.pos = pos; 2197 } 2198 2199 // Normalize the IndexNode.name field. 2200 private byte[] normalize(byte[] path) { 2201 int len = path.length; 2202 if (len == 0) 2203 return path; 2204 byte prevC = 0; 2205 for (int pathPos = 0; pathPos < len; pathPos++) { 2206 byte c = path[pathPos]; 2207 if (c == '/' && prevC == '/') 2208 return normalize(path, pathPos - 1); 2209 prevC = c; 2210 } 2211 if (len > 1 && prevC == '/') { 2212 return Arrays.copyOf(path, len - 1); 2213 } 2214 return path; 2215 } 2216 2217 private byte[] normalize(byte[] path, int off) { 2218 // As we know we have at least one / to trim, we can reduce 2219 // the size of the resulting array 2220 byte[] to = new byte[path.length - 1]; 2221 int pathPos = 0; 2222 while (pathPos < off) { 2223 to[pathPos] = path[pathPos]; 2224 pathPos++; 2225 } 2226 int toPos = pathPos; 2227 byte prevC = 0; 2228 while (pathPos < path.length) { 2229 byte c = path[pathPos++]; 2230 if (c == '/' && prevC == '/') 2231 continue; 2232 to[toPos++] = c; 2233 prevC = c; 2234 } 2235 if (toPos > 1 && to[toPos - 1] == '/') 2236 toPos--; 2237 return (toPos == to.length) ? to : Arrays.copyOf(to, toPos); 2238 } 2239 2240 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 2241 2242 final static IndexNode keyOf(byte[] name) { // get a lookup key; 2243 IndexNode key = cachedKey.get(); 2244 if (key == null) { 2245 key = new IndexNode(name, -1); 2246 cachedKey.set(key); 2247 } 2248 return key.as(name); 2249 } 2250 2251 final void name(byte[] name) { 2252 this.name = name; 2253 this.hashcode = Arrays.hashCode(name); 2254 } 2255 2256 final IndexNode as(byte[] name) { // reuse the node, mostly 2257 name(name); // as a lookup "key" 2258 return this; 2259 } 2260 2261 boolean isDir() { 2262 return isdir; 2263 } 2264 2265 @Override 2266 public boolean equals(Object other) { 2267 if (!(other instanceof IndexNode)) { 2268 return false; 2269 } 2270 if (other instanceof ParentLookup) { 2271 return ((ParentLookup)other).equals(this); 2272 } 2273 return Arrays.equals(name, ((IndexNode)other).name); 2274 } 2275 2276 @Override 2277 public int hashCode() { 2278 return hashcode; 2279 } 2280 2281 @Override 2282 public String toString() { 2283 return new String(name) + (isdir ? " (dir)" : " ") + ", index: " + pos; 2284 } 2285 } 2286 2287 static class Entry extends IndexNode implements ZipFileAttributes { 2288 static final int CEN = 1; // entry read from cen 2289 static final int NEW = 2; // updated contents in bytes or file 2290 static final int FILECH = 3; // fch update in "file" 2291 static final int COPY = 4; // copy of a CEN entry 2292 2293 byte[] bytes; // updated content bytes 2294 Path file; // use tmp file to store bytes; 2295 int type = CEN; // default is the entry read from cen 2296 2297 // entry attributes 2298 int version; 2299 int flag; 2300 int posixPerms = -1; // posix permissions 2301 int method = -1; // compression method 2302 long mtime = -1; // last modification time (in DOS time) 2303 long atime = -1; // last access time 2304 long ctime = -1; // create time 2305 long crc = -1; // crc-32 of entry data 2306 long csize = -1; // compressed size of entry data 2307 long size = -1; // uncompressed size of entry data 2308 byte[] extra; 2309 2310 // CEN 2311 // The fields that are commented out below are not used by anyone and write() uses "0" 2312 // int versionMade; 2313 // int disk; 2314 // int attrs; 2315 // long attrsEx; 2316 long locoff; 2317 byte[] comment; 2318 2319 Entry(byte[] name, boolean isdir, int method) { 2320 name(name); 2321 this.isdir = isdir; 2322 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 2323 this.crc = 0; 2324 this.size = 0; 2325 this.csize = 0; 2326 this.method = method; 2327 } 2328 2329 @SuppressWarnings("unchecked") 2330 Entry(byte[] name, int type, boolean isdir, int method, FileAttribute<?>... attrs) { 2331 this(name, isdir, method); 2332 this.type = type; 2333 for (FileAttribute<?> attr : attrs) { 2334 String attrName = attr.name(); 2335 if (attrName.equals("posix:permissions")) { 2336 posixPerms = ZipUtils.permsToFlags((Set<PosixFilePermission>)attr.value()); 2337 } 2338 } 2339 } 2340 2341 Entry(byte[] name, Path file, int type, FileAttribute<?>... attrs) { 2342 this(name, type, false, METHOD_STORED, attrs); 2343 this.file = file; 2344 } 2345 2346 Entry(Entry e, int type) { 2347 name(e.name); 2348 this.isdir = e.isdir; 2349 this.version = e.version; 2350 this.ctime = e.ctime; 2351 this.atime = e.atime; 2352 this.mtime = e.mtime; 2353 this.crc = e.crc; 2354 this.size = e.size; 2355 this.csize = e.csize; 2356 this.method = e.method; 2357 this.extra = e.extra; 2358 /* 2359 this.versionMade = e.versionMade; 2360 this.disk = e.disk; 2361 this.attrs = e.attrs; 2362 this.attrsEx = e.attrsEx; 2363 */ 2364 this.locoff = e.locoff; 2365 this.comment = e.comment; 2366 this.posixPerms = e.posixPerms; 2367 this.type = type; 2368 } 2369 2370 Entry(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2371 readCEN(zipfs, inode); 2372 } 2373 2374 // Calculates a suitable base for the version number to 2375 // be used for fields version made by/version needed to extract. 2376 // The lower bytes of these 2 byte fields hold the version number 2377 // (value/10 = major; value%10 = minor) 2378 // For different features certain minimum versions apply: 2379 // stored = 10 (1.0), deflated = 20 (2.0), zip64 = 45 (4.5) 2380 private int version(boolean zip64) throws ZipException { 2381 if (zip64) { 2382 return 45; 2383 } 2384 if (method == METHOD_DEFLATED) 2385 return 20; 2386 else if (method == METHOD_STORED) 2387 return 10; 2388 throw new ZipException("unsupported compression method"); 2389 } 2390 2391 /** 2392 * Adds information about compatibility of file attribute information 2393 * to a version value. 2394 */ 2395 private int versionMadeBy(int version) { 2396 return (posixPerms < 0) ? version : 2397 VERSION_MADE_BY_BASE_UNIX | (version & 0xff); 2398 } 2399 2400 ///////////////////// CEN ////////////////////// 2401 private void readCEN(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2402 byte[] cen = zipfs.cen; 2403 int pos = inode.pos; 2404 if (!cenSigAt(cen, pos)) 2405 throw new ZipException("invalid CEN header (bad signature)"); 2406 version = CENVER(cen, pos); 2407 flag = CENFLG(cen, pos); 2408 method = CENHOW(cen, pos); 2409 mtime = dosToJavaTime(CENTIM(cen, pos)); 2410 crc = CENCRC(cen, pos); 2411 csize = CENSIZ(cen, pos); 2412 size = CENLEN(cen, pos); 2413 int nlen = CENNAM(cen, pos); 2414 int elen = CENEXT(cen, pos); 2415 int clen = CENCOM(cen, pos); 2416 /* 2417 versionMade = CENVEM(cen, pos); 2418 disk = CENDSK(cen, pos); 2419 attrs = CENATT(cen, pos); 2420 attrsEx = CENATX(cen, pos); 2421 */ 2422 if (CENVEM_FA(cen, pos) == FILE_ATTRIBUTES_UNIX) { 2423 posixPerms = CENATX_PERMS(cen, pos) & 0xFFF; // 12 bits for setuid, setgid, sticky + perms 2424 } 2425 locoff = CENOFF(cen, pos); 2426 pos += CENHDR; 2427 this.name = inode.name; 2428 this.isdir = inode.isdir; 2429 this.hashcode = inode.hashcode; 2430 2431 pos += nlen; 2432 if (elen > 0) { 2433 extra = Arrays.copyOfRange(cen, pos, pos + elen); 2434 pos += elen; 2435 readExtra(zipfs); 2436 } 2437 if (clen > 0) { 2438 comment = Arrays.copyOfRange(cen, pos, pos + clen); 2439 } 2440 } 2441 2442 private int writeCEN(OutputStream os) throws IOException { 2443 long csize0 = csize; 2444 long size0 = size; 2445 long locoff0 = locoff; 2446 int elen64 = 0; // extra for ZIP64 2447 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 2448 int elenEXTT = 0; // extra for Extended Timestamp 2449 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 2450 2451 byte[] zname = isdir ? toDirectoryPath(name) : name; 2452 2453 // confirm size/length 2454 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 2455 int elen = (extra != null) ? extra.length : 0; 2456 int eoff = 0; 2457 int clen = (comment != null) ? comment.length : 0; 2458 if (csize >= ZIP64_MINVAL) { 2459 csize0 = ZIP64_MINVAL; 2460 elen64 += 8; // csize(8) 2461 } 2462 if (size >= ZIP64_MINVAL) { 2463 size0 = ZIP64_MINVAL; // size(8) 2464 elen64 += 8; 2465 } 2466 if (locoff >= ZIP64_MINVAL) { 2467 locoff0 = ZIP64_MINVAL; 2468 elen64 += 8; // offset(8) 2469 } 2470 if (elen64 != 0) { 2471 elen64 += 4; // header and data sz 4 bytes 2472 } 2473 boolean zip64 = (elen64 != 0); 2474 int version0 = version(zip64); 2475 while (eoff + 4 < elen) { 2476 int tag = SH(extra, eoff); 2477 int sz = SH(extra, eoff + 2); 2478 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2479 foundExtraTime = true; 2480 } 2481 eoff += (4 + sz); 2482 } 2483 if (!foundExtraTime) { 2484 if (isWindows) { // use NTFS 2485 elenNTFS = 36; // total 36 bytes 2486 } else { // Extended Timestamp otherwise 2487 elenEXTT = 9; // only mtime in cen 2488 } 2489 } 2490 writeInt(os, CENSIG); // CEN header signature 2491 writeShort(os, versionMadeBy(version0)); // version made by 2492 writeShort(os, version0); // version needed to extract 2493 writeShort(os, flag); // general purpose bit flag 2494 writeShort(os, method); // compression method 2495 // last modification time 2496 writeInt(os, (int)javaToDosTime(mtime)); 2497 writeInt(os, crc); // crc-32 2498 writeInt(os, csize0); // compressed size 2499 writeInt(os, size0); // uncompressed size 2500 writeShort(os, nlen); 2501 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2502 2503 if (comment != null) { 2504 writeShort(os, Math.min(clen, 0xffff)); 2505 } else { 2506 writeShort(os, 0); 2507 } 2508 writeShort(os, 0); // starting disk number 2509 writeShort(os, 0); // internal file attributes (unused) 2510 writeInt(os, posixPerms > 0 ? posixPerms << 16 : 0); // external file 2511 // attributes, used for storing posix 2512 // permissions 2513 writeInt(os, locoff0); // relative offset of local header 2514 writeBytes(os, zname, 1, nlen); 2515 if (zip64) { 2516 writeShort(os, EXTID_ZIP64);// Zip64 extra 2517 writeShort(os, elen64 - 4); // size of "this" extra block 2518 if (size0 == ZIP64_MINVAL) 2519 writeLong(os, size); 2520 if (csize0 == ZIP64_MINVAL) 2521 writeLong(os, csize); 2522 if (locoff0 == ZIP64_MINVAL) 2523 writeLong(os, locoff); 2524 } 2525 if (elenNTFS != 0) { 2526 writeShort(os, EXTID_NTFS); 2527 writeShort(os, elenNTFS - 4); 2528 writeInt(os, 0); // reserved 2529 writeShort(os, 0x0001); // NTFS attr tag 2530 writeShort(os, 24); 2531 writeLong(os, javaToWinTime(mtime)); 2532 writeLong(os, javaToWinTime(atime)); 2533 writeLong(os, javaToWinTime(ctime)); 2534 } 2535 if (elenEXTT != 0) { 2536 writeShort(os, EXTID_EXTT); 2537 writeShort(os, elenEXTT - 4); 2538 if (ctime == -1) 2539 os.write(0x3); // mtime and atime 2540 else 2541 os.write(0x7); // mtime, atime and ctime 2542 writeInt(os, javaToUnixTime(mtime)); 2543 } 2544 if (extra != null) // whatever not recognized 2545 writeBytes(os, extra); 2546 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2547 writeBytes(os, comment); 2548 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2549 } 2550 2551 ///////////////////// LOC ////////////////////// 2552 2553 private int writeLOC(OutputStream os) throws IOException { 2554 byte[] zname = isdir ? toDirectoryPath(name) : name; 2555 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2556 int elen = (extra != null) ? extra.length : 0; 2557 boolean foundExtraTime = false; // if extra timestamp present 2558 int eoff = 0; 2559 int elen64 = 0; 2560 boolean zip64 = false; 2561 int elenEXTT = 0; 2562 int elenNTFS = 0; 2563 writeInt(os, LOCSIG); // LOC header signature 2564 if ((flag & FLAG_DATADESCR) != 0) { 2565 writeShort(os, version(false)); // version needed to extract 2566 writeShort(os, flag); // general purpose bit flag 2567 writeShort(os, method); // compression method 2568 // last modification time 2569 writeInt(os, (int)javaToDosTime(mtime)); 2570 // store size, uncompressed size, and crc-32 in data descriptor 2571 // immediately following compressed entry data 2572 writeInt(os, 0); 2573 writeInt(os, 0); 2574 writeInt(os, 0); 2575 } else { 2576 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2577 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2578 zip64 = true; 2579 } 2580 writeShort(os, version(zip64)); // version needed to extract 2581 writeShort(os, flag); // general purpose bit flag 2582 writeShort(os, method); // compression method 2583 // last modification time 2584 writeInt(os, (int)javaToDosTime(mtime)); 2585 writeInt(os, crc); // crc-32 2586 if (zip64) { 2587 writeInt(os, ZIP64_MINVAL); 2588 writeInt(os, ZIP64_MINVAL); 2589 } else { 2590 writeInt(os, csize); // compressed size 2591 writeInt(os, size); // uncompressed size 2592 } 2593 } 2594 while (eoff + 4 < elen) { 2595 int tag = SH(extra, eoff); 2596 int sz = SH(extra, eoff + 2); 2597 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2598 foundExtraTime = true; 2599 } 2600 eoff += (4 + sz); 2601 } 2602 if (!foundExtraTime) { 2603 if (isWindows) { 2604 elenNTFS = 36; // NTFS, total 36 bytes 2605 } else { // on unix use "ext time" 2606 elenEXTT = 9; 2607 if (atime != -1) 2608 elenEXTT += 4; 2609 if (ctime != -1) 2610 elenEXTT += 4; 2611 } 2612 } 2613 writeShort(os, nlen); 2614 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2615 writeBytes(os, zname, 1, nlen); 2616 if (zip64) { 2617 writeShort(os, EXTID_ZIP64); 2618 writeShort(os, 16); 2619 writeLong(os, size); 2620 writeLong(os, csize); 2621 } 2622 if (elenNTFS != 0) { 2623 writeShort(os, EXTID_NTFS); 2624 writeShort(os, elenNTFS - 4); 2625 writeInt(os, 0); // reserved 2626 writeShort(os, 0x0001); // NTFS attr tag 2627 writeShort(os, 24); 2628 writeLong(os, javaToWinTime(mtime)); 2629 writeLong(os, javaToWinTime(atime)); 2630 writeLong(os, javaToWinTime(ctime)); 2631 } 2632 if (elenEXTT != 0) { 2633 writeShort(os, EXTID_EXTT); 2634 writeShort(os, elenEXTT - 4);// size for the folowing data block 2635 int fbyte = 0x1; 2636 if (atime != -1) // mtime and atime 2637 fbyte |= 0x2; 2638 if (ctime != -1) // mtime, atime and ctime 2639 fbyte |= 0x4; 2640 os.write(fbyte); // flags byte 2641 writeInt(os, javaToUnixTime(mtime)); 2642 if (atime != -1) 2643 writeInt(os, javaToUnixTime(atime)); 2644 if (ctime != -1) 2645 writeInt(os, javaToUnixTime(ctime)); 2646 } 2647 if (extra != null) { 2648 writeBytes(os, extra); 2649 } 2650 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2651 } 2652 2653 // Data Descriptor 2654 private int writeEXT(OutputStream os) throws IOException { 2655 writeInt(os, EXTSIG); // EXT header signature 2656 writeInt(os, crc); // crc-32 2657 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2658 writeLong(os, csize); 2659 writeLong(os, size); 2660 return 24; 2661 } else { 2662 writeInt(os, csize); // compressed size 2663 writeInt(os, size); // uncompressed size 2664 return 16; 2665 } 2666 } 2667 2668 // read NTFS, UNIX and ZIP64 data from cen.extra 2669 private void readExtra(ZipFileSystem zipfs) throws IOException { 2670 if (extra == null) 2671 return; 2672 int elen = extra.length; 2673 int off = 0; 2674 int newOff = 0; 2675 while (off + 4 < elen) { 2676 // extra spec: HeaderID+DataSize+Data 2677 int pos = off; 2678 int tag = SH(extra, pos); 2679 int sz = SH(extra, pos + 2); 2680 pos += 4; 2681 if (pos + sz > elen) // invalid data 2682 break; 2683 switch (tag) { 2684 case EXTID_ZIP64 : 2685 if (size == ZIP64_MINVAL) { 2686 if (pos + 8 > elen) // invalid zip64 extra 2687 break; // fields, just skip 2688 size = LL(extra, pos); 2689 pos += 8; 2690 } 2691 if (csize == ZIP64_MINVAL) { 2692 if (pos + 8 > elen) 2693 break; 2694 csize = LL(extra, pos); 2695 pos += 8; 2696 } 2697 if (locoff == ZIP64_MINVAL) { 2698 if (pos + 8 > elen) 2699 break; 2700 locoff = LL(extra, pos); 2701 } 2702 break; 2703 case EXTID_NTFS: 2704 if (sz < 32) 2705 break; 2706 pos += 4; // reserved 4 bytes 2707 if (SH(extra, pos) != 0x0001) 2708 break; 2709 if (SH(extra, pos + 2) != 24) 2710 break; 2711 // override the loc field, datatime here is 2712 // more "accurate" 2713 mtime = winToJavaTime(LL(extra, pos + 4)); 2714 atime = winToJavaTime(LL(extra, pos + 12)); 2715 ctime = winToJavaTime(LL(extra, pos + 20)); 2716 break; 2717 case EXTID_EXTT: 2718 // spec says the Extened timestamp in cen only has mtime 2719 // need to read the loc to get the extra a/ctime, if flag 2720 // "zipinfo-time" is not specified to false; 2721 // there is performance cost (move up to loc and read) to 2722 // access the loc table foreach entry; 2723 if (zipfs.noExtt) { 2724 if (sz == 5) 2725 mtime = unixToJavaTime(LG(extra, pos + 1)); 2726 break; 2727 } 2728 byte[] buf = new byte[LOCHDR]; 2729 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2730 != buf.length) 2731 throw new ZipException("loc: reading failed"); 2732 if (!locSigAt(buf, 0)) 2733 throw new ZipException("loc: wrong sig ->" 2734 + Long.toString(getSig(buf, 0), 16)); 2735 int locElen = LOCEXT(buf); 2736 if (locElen < 9) // EXTT is at least 9 bytes 2737 break; 2738 int locNlen = LOCNAM(buf); 2739 buf = new byte[locElen]; 2740 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2741 != buf.length) 2742 throw new ZipException("loc extra: reading failed"); 2743 int locPos = 0; 2744 while (locPos + 4 < buf.length) { 2745 int locTag = SH(buf, locPos); 2746 int locSZ = SH(buf, locPos + 2); 2747 locPos += 4; 2748 if (locTag != EXTID_EXTT) { 2749 locPos += locSZ; 2750 continue; 2751 } 2752 int end = locPos + locSZ - 4; 2753 int flag = CH(buf, locPos++); 2754 if ((flag & 0x1) != 0 && locPos <= end) { 2755 mtime = unixToJavaTime(LG(buf, locPos)); 2756 locPos += 4; 2757 } 2758 if ((flag & 0x2) != 0 && locPos <= end) { 2759 atime = unixToJavaTime(LG(buf, locPos)); 2760 locPos += 4; 2761 } 2762 if ((flag & 0x4) != 0 && locPos <= end) { 2763 ctime = unixToJavaTime(LG(buf, locPos)); 2764 } 2765 break; 2766 } 2767 break; 2768 default: // unknown tag 2769 System.arraycopy(extra, off, extra, newOff, sz + 4); 2770 newOff += (sz + 4); 2771 } 2772 off += (sz + 4); 2773 } 2774 if (newOff != 0 && newOff != extra.length) 2775 extra = Arrays.copyOf(extra, newOff); 2776 else 2777 extra = null; 2778 } 2779 2780 @Override 2781 public String toString() { 2782 StringBuilder sb = new StringBuilder(1024); 2783 Formatter fm = new Formatter(sb); 2784 fm.format(" name : %s%n", new String(name)); 2785 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2786 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2787 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2788 fm.format(" isRegularFile : %b%n", isRegularFile()); 2789 fm.format(" isDirectory : %b%n", isDirectory()); 2790 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2791 fm.format(" isOther : %b%n", isOther()); 2792 fm.format(" fileKey : %s%n", fileKey()); 2793 fm.format(" size : %d%n", size()); 2794 fm.format(" compressedSize : %d%n", compressedSize()); 2795 fm.format(" crc : %x%n", crc()); 2796 fm.format(" method : %d%n", method()); 2797 Set<PosixFilePermission> permissions = storedPermissions().orElse(null); 2798 if (permissions != null) { 2799 fm.format(" permissions : %s%n", permissions); 2800 } 2801 fm.close(); 2802 return sb.toString(); 2803 } 2804 2805 ///////// basic file attributes /////////// 2806 @Override 2807 public FileTime creationTime() { 2808 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 2809 } 2810 2811 @Override 2812 public boolean isDirectory() { 2813 return isDir(); 2814 } 2815 2816 @Override 2817 public boolean isOther() { 2818 return false; 2819 } 2820 2821 @Override 2822 public boolean isRegularFile() { 2823 return !isDir(); 2824 } 2825 2826 @Override 2827 public FileTime lastAccessTime() { 2828 return FileTime.fromMillis(atime == -1 ? mtime : atime); 2829 } 2830 2831 @Override 2832 public FileTime lastModifiedTime() { 2833 return FileTime.fromMillis(mtime); 2834 } 2835 2836 @Override 2837 public long size() { 2838 return size; 2839 } 2840 2841 @Override 2842 public boolean isSymbolicLink() { 2843 return false; 2844 } 2845 2846 @Override 2847 public Object fileKey() { 2848 return null; 2849 } 2850 2851 ///////// zip file attributes /////////// 2852 2853 @Override 2854 public long compressedSize() { 2855 return csize; 2856 } 2857 2858 @Override 2859 public long crc() { 2860 return crc; 2861 } 2862 2863 @Override 2864 public int method() { 2865 return method; 2866 } 2867 2868 @Override 2869 public byte[] extra() { 2870 if (extra != null) 2871 return Arrays.copyOf(extra, extra.length); 2872 return null; 2873 } 2874 2875 @Override 2876 public byte[] comment() { 2877 if (comment != null) 2878 return Arrays.copyOf(comment, comment.length); 2879 return null; 2880 } 2881 2882 @Override 2883 public Optional<Set<PosixFilePermission>> storedPermissions() { 2884 Set<PosixFilePermission> perms = null; 2885 if (posixPerms != -1) { 2886 perms = new HashSet<>(PosixFilePermission.values().length); 2887 for (PosixFilePermission perm : PosixFilePermission.values()) { 2888 if ((posixPerms & ZipUtils.permToFlag(perm)) != 0) { 2889 perms.add(perm); 2890 } 2891 } 2892 } 2893 return Optional.ofNullable(perms); 2894 } 2895 } 2896 2897 final class PosixEntry extends Entry implements PosixFileAttributes { 2898 private UserPrincipal owner = defaultOwner; 2899 private GroupPrincipal group = defaultGroup; 2900 2901 PosixEntry(byte[] name, boolean isdir, int method) { 2902 super(name, isdir, method); 2903 } 2904 2905 PosixEntry(byte[] name, int type, boolean isdir, int method, FileAttribute<?>... attrs) { 2906 super(name, type, isdir, method, attrs); 2907 } 2908 2909 PosixEntry(byte[] name, Path file, int type, FileAttribute<?>... attrs) { 2910 super(name, file, type, attrs); 2911 } 2912 2913 PosixEntry(PosixEntry e, int type) { 2914 super(e, type); 2915 this.owner = e.owner; 2916 this.group = e.group; 2917 } 2918 2919 PosixEntry(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2920 super(zipfs, inode); 2921 } 2922 2923 @Override 2924 public UserPrincipal owner() { 2925 return owner; 2926 } 2927 2928 @Override 2929 public GroupPrincipal group() { 2930 return group; 2931 } 2932 2933 @Override 2934 public Set<PosixFilePermission> permissions() { 2935 return storedPermissions().orElse(Set.copyOf(defaultPermissions)); 2936 } 2937 } 2938 2939 private static class ExistingChannelCloser { 2940 private final Path path; 2941 private final SeekableByteChannel ch; 2942 private final Set<InputStream> streams; 2943 ExistingChannelCloser(Path path, 2944 SeekableByteChannel ch, 2945 Set<InputStream> streams) { 2946 this.path = path; 2947 this.ch = ch; 2948 this.streams = streams; 2949 } 2950 2951 /** 2952 * If there are no more outstanding streams, close the channel and 2953 * delete the backing file 2954 * 2955 * @return true if we're done and closed the backing file, 2956 * otherwise false 2957 * @throws IOException 2958 */ 2959 private boolean closeAndDeleteIfDone() throws IOException { 2960 if (streams.isEmpty()) { 2961 ch.close(); 2962 Files.delete(path); 2963 return true; 2964 } 2965 return false; 2966 } 2967 } 2968 2969 // purely for parent lookup, so we don't have to copy the parent 2970 // name every time 2971 static class ParentLookup extends IndexNode { 2972 int len; 2973 ParentLookup() {} 2974 2975 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 2976 name(name, len); 2977 return this; 2978 } 2979 2980 void name(byte[] name, int len) { 2981 this.name = name; 2982 this.len = len; 2983 // calculate the hashcode the same way as Arrays.hashCode() does 2984 int result = 1; 2985 for (int i = 0; i < len; i++) 2986 result = 31 * result + name[i]; 2987 this.hashcode = result; 2988 } 2989 2990 @Override 2991 public boolean equals(Object other) { 2992 if (!(other instanceof IndexNode)) { 2993 return false; 2994 } 2995 byte[] oname = ((IndexNode)other).name; 2996 return Arrays.equals(name, 0, len, 2997 oname, 0, oname.length); 2998 } 2999 } 3000 }