1 /* 2 * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.FilterOutputStream; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.nio.ByteBuffer; 37 import java.nio.MappedByteBuffer; 38 import java.nio.channels.FileChannel; 39 import java.nio.channels.FileLock; 40 import java.nio.channels.ReadableByteChannel; 41 import java.nio.channels.SeekableByteChannel; 42 import java.nio.channels.WritableByteChannel; 43 import java.nio.file.*; 44 import java.nio.file.attribute.*; 45 import java.nio.file.spi.FileSystemProvider; 46 import java.security.AccessController; 47 import java.security.PrivilegedAction; 48 import java.security.PrivilegedActionException; 49 import java.security.PrivilegedExceptionAction; 50 import java.util.*; 51 import java.util.concurrent.locks.ReadWriteLock; 52 import java.util.concurrent.locks.ReentrantReadWriteLock; 53 import java.util.regex.Pattern; 54 import java.util.zip.CRC32; 55 import java.util.zip.Deflater; 56 import java.util.zip.DeflaterOutputStream; 57 import java.util.zip.Inflater; 58 import java.util.zip.InflaterInputStream; 59 import java.util.zip.ZipException; 60 61 import static java.lang.Boolean.TRUE; 62 import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES; 63 import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; 64 import static java.nio.file.StandardOpenOption.APPEND; 65 import static java.nio.file.StandardOpenOption.CREATE; 66 import static java.nio.file.StandardOpenOption.CREATE_NEW; 67 import static java.nio.file.StandardOpenOption.READ; 68 import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; 69 import static java.nio.file.StandardOpenOption.WRITE; 70 import static jdk.nio.zipfs.ZipConstants.*; 71 import static jdk.nio.zipfs.ZipUtils.*; 72 73 /** 74 * A FileSystem built on a zip file 75 * 76 * @author Xueming Shen 77 */ 78 class ZipFileSystem extends FileSystem { 79 // statics 80 private static final boolean isWindows = AccessController.doPrivileged( 81 (PrivilegedAction<Boolean>)()->System.getProperty("os.name") 82 .startsWith("Windows")); 83 private static final byte[] ROOTPATH = new byte[] { '/' }; 84 private static final String OPT_POSIX = "enablePosixFileAttributes"; 85 private static final String OPT_DEFAULT_OWNER = "defaultOwner"; 86 private static final String OPT_DEFAULT_GROUP = "defaultGroup"; 87 private static final String OPT_DEFAULT_PERMISSIONS = "defaultPermissions"; 88 89 private static final Set<PosixFilePermission> DEFAULT_PERMISSIONS = 90 PosixFilePermissions.fromString("rwxrwxrwx"); 91 92 private final ZipFileSystemProvider provider; 93 private final Path zfpath; 94 final ZipCoder zc; 95 private final ZipPath rootdir; 96 private boolean readOnly; // readonly file system, false by default 97 98 // default time stamp for pseudo entries 99 private final long zfsDefaultTimeStamp = System.currentTimeMillis(); 100 101 // configurable by env map 102 private final boolean noExtt; // see readExtra() 103 private final boolean useTempFile; // use a temp file for newOS, default 104 // is to use BAOS for better performance 105 private final boolean forceEnd64; 106 private final int defaultCompressionMethod; // METHOD_STORED if "noCompression=true" 107 // METHOD_DEFLATED otherwise 108 109 // POSIX support 110 final boolean supportPosix; 111 private final UserPrincipal defaultOwner; 112 private final GroupPrincipal defaultGroup; 113 private final Set<PosixFilePermission> defaultPermissions; 114 115 private final Set<String> supportedFileAttributeViews; 116 117 ZipFileSystem(ZipFileSystemProvider provider, 118 Path zfpath, 119 Map<String, ?> env) throws IOException 120 { 121 // default encoding for name/comment 122 String nameEncoding = env.containsKey("encoding") ? 123 (String)env.get("encoding") : "UTF-8"; 124 this.noExtt = "false".equals(env.get("zipinfo-time")); 125 this.useTempFile = isTrue(env, "useTempFile"); 126 this.forceEnd64 = isTrue(env, "forceZIP64End"); 127 this.defaultCompressionMethod = isTrue(env, "noCompression") ? METHOD_STORED : METHOD_DEFLATED; 128 this.supportPosix = isTrue(env, OPT_POSIX); 129 this.defaultOwner = initOwner(zfpath, env); 130 this.defaultGroup = initGroup(zfpath, env); 131 this.defaultPermissions = initPermissions(env); 132 this.supportedFileAttributeViews = supportPosix ? 133 Set.of("basic", "posix", "zip") : Set.of("basic", "zip"); 134 if (Files.notExists(zfpath)) { 135 // create a new zip if it doesn't exist 136 if (isTrue(env, "create")) { 137 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 138 new END().write(os, 0, forceEnd64); 139 } 140 } else { 141 throw new FileSystemNotFoundException(zfpath.toString()); 142 } 143 } 144 // sm and existence check 145 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 146 boolean writeable = AccessController.doPrivileged( 147 (PrivilegedAction<Boolean>)()->Files.isWritable(zfpath)); 148 this.readOnly = !writeable; 149 this.zc = ZipCoder.get(nameEncoding); 150 this.rootdir = new ZipPath(this, new byte[]{'/'}); 151 this.ch = Files.newByteChannel(zfpath, READ); 152 try { 153 this.cen = initCEN(); 154 } catch (IOException x) { 155 try { 156 this.ch.close(); 157 } catch (IOException xx) { 158 x.addSuppressed(xx); 159 } 160 throw x; 161 } 162 this.provider = provider; 163 this.zfpath = zfpath; 164 } 165 166 // returns true if there is a name=true/"true" setting in env 167 private static boolean isTrue(Map<String, ?> env, String name) { 168 return "true".equals(env.get(name)) || TRUE.equals(env.get(name)); 169 } 170 171 // Initialize the default owner for files inside the zip archive. 172 // If not specified in env, it is the owner of the archive. If no owner can 173 // be determined, we try to go with system property "user.name". If that's not 174 // accessible, we return "<zipfs_default>". 175 private UserPrincipal initOwner(Path zfpath, Map<String, ?> env) throws IOException { 176 Object o = env.get(OPT_DEFAULT_OWNER); 177 if (o == null) { 178 try { 179 PrivilegedExceptionAction<UserPrincipal> pa = ()->Files.getOwner(zfpath); 180 return AccessController.doPrivileged(pa); 181 } catch (UnsupportedOperationException | PrivilegedActionException e) { 182 if (e instanceof UnsupportedOperationException || 183 e.getCause() instanceof NoSuchFileException) 184 { 185 PrivilegedAction<String> pa = ()->System.getProperty("user.name"); 186 String userName = AccessController.doPrivileged(pa); 187 return ()->userName; 188 } else { 189 throw new IOException(e); 190 } 191 } 192 } 193 if (o instanceof String) { 194 if (((String)o).isEmpty()) { 195 throw new IllegalArgumentException("Value for property " + 196 OPT_DEFAULT_OWNER + " must not be empty."); 197 } 198 return ()->(String)o; 199 } 200 if (o instanceof UserPrincipal) { 201 return (UserPrincipal)o; 202 } 203 throw new IllegalArgumentException("Value for property " + 204 OPT_DEFAULT_OWNER + " must be of type " + String.class + 205 " or " + UserPrincipal.class); 206 } 207 208 // Initialize the default group for files inside the zip archive. 209 // If not specified in env, we try to determine the group of the zip archive itself. 210 // If this is not possible/unsupported, we will return a group principal going by 211 // the same name as the default owner. 212 private GroupPrincipal initGroup(Path zfpath, Map<String, ?> env) throws IOException { 213 Object o = env.get(OPT_DEFAULT_GROUP); 214 if (o == null) { 215 try { 216 PosixFileAttributeView zfpv = Files.getFileAttributeView(zfpath, PosixFileAttributeView.class); 217 if (zfpv == null) { 218 return defaultOwner::getName; 219 } 220 PrivilegedExceptionAction<GroupPrincipal> pa = ()->zfpv.readAttributes().group(); 221 return AccessController.doPrivileged(pa); 222 } catch (UnsupportedOperationException | PrivilegedActionException e) { 223 if (e instanceof UnsupportedOperationException || 224 e.getCause() instanceof NoSuchFileException) 225 { 226 return defaultOwner::getName; 227 } else { 228 throw new IOException(e); 229 } 230 } 231 } 232 if (o instanceof String) { 233 if (((String)o).isEmpty()) { 234 throw new IllegalArgumentException("Value for property " + 235 OPT_DEFAULT_GROUP + " must not be empty."); 236 } 237 return ()->(String)o; 238 } 239 if (o instanceof GroupPrincipal) { 240 return (GroupPrincipal)o; 241 } 242 throw new IllegalArgumentException("Value for property " + 243 OPT_DEFAULT_GROUP + " must be of type " + String.class + 244 " or " + GroupPrincipal.class); 245 } 246 247 // Initialize the default permissions for files inside the zip archive. 248 // If not specified in env, it will return 777. 249 private Set<PosixFilePermission> initPermissions(Map<String, ?> env) { 250 Object o = env.get(OPT_DEFAULT_PERMISSIONS); 251 if (o == null) { 252 return DEFAULT_PERMISSIONS; 253 } 254 if (o instanceof String) { 255 return PosixFilePermissions.fromString((String)o); 256 } 257 if (!(o instanceof Set)) { 258 throw new IllegalArgumentException("Value for property " + 259 OPT_DEFAULT_PERMISSIONS + " must be of type " + String.class + 260 " or " + Set.class); 261 } 262 Set<PosixFilePermission> perms = new HashSet<>(); 263 for (Object o2 : (Set<?>)o) { 264 if (o2 instanceof PosixFilePermission) { 265 perms.add((PosixFilePermission)o2); 266 } else { 267 throw new IllegalArgumentException(OPT_DEFAULT_PERMISSIONS + 268 " must only contain objects of type " + PosixFilePermission.class); 269 } 270 } 271 return perms; 272 } 273 274 @Override 275 public FileSystemProvider provider() { 276 return provider; 277 } 278 279 @Override 280 public String getSeparator() { 281 return "/"; 282 } 283 284 @Override 285 public boolean isOpen() { 286 return isOpen; 287 } 288 289 @Override 290 public boolean isReadOnly() { 291 return readOnly; 292 } 293 294 private void checkWritable() { 295 if (readOnly) { 296 throw new ReadOnlyFileSystemException(); 297 } 298 } 299 300 void setReadOnly() { 301 this.readOnly = true; 302 } 303 304 @Override 305 public Iterable<Path> getRootDirectories() { 306 return List.of(rootdir); 307 } 308 309 ZipPath getRootDir() { 310 return rootdir; 311 } 312 313 @Override 314 public ZipPath getPath(String first, String... more) { 315 if (more.length == 0) { 316 return new ZipPath(this, first); 317 } 318 StringBuilder sb = new StringBuilder(); 319 sb.append(first); 320 for (String path : more) { 321 if (path.length() > 0) { 322 if (sb.length() > 0) { 323 sb.append('/'); 324 } 325 sb.append(path); 326 } 327 } 328 return new ZipPath(this, sb.toString()); 329 } 330 331 @Override 332 public UserPrincipalLookupService getUserPrincipalLookupService() { 333 throw new UnsupportedOperationException(); 334 } 335 336 @Override 337 public WatchService newWatchService() { 338 throw new UnsupportedOperationException(); 339 } 340 341 FileStore getFileStore(ZipPath path) { 342 return new ZipFileStore(path); 343 } 344 345 @Override 346 public Iterable<FileStore> getFileStores() { 347 return List.of(new ZipFileStore(rootdir)); 348 } 349 350 @Override 351 public Set<String> supportedFileAttributeViews() { 352 return supportedFileAttributeViews; 353 } 354 355 @Override 356 public String toString() { 357 return zfpath.toString(); 358 } 359 360 Path getZipFile() { 361 return zfpath; 362 } 363 364 private static final String GLOB_SYNTAX = "glob"; 365 private static final String REGEX_SYNTAX = "regex"; 366 367 @Override 368 public PathMatcher getPathMatcher(String syntaxAndInput) { 369 int pos = syntaxAndInput.indexOf(':'); 370 if (pos <= 0 || pos == syntaxAndInput.length()) { 371 throw new IllegalArgumentException(); 372 } 373 String syntax = syntaxAndInput.substring(0, pos); 374 String input = syntaxAndInput.substring(pos + 1); 375 String expr; 376 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 377 expr = toRegexPattern(input); 378 } else { 379 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 380 expr = input; 381 } else { 382 throw new UnsupportedOperationException("Syntax '" + syntax + 383 "' not recognized"); 384 } 385 } 386 // return matcher 387 final Pattern pattern = Pattern.compile(expr); 388 return (path)->pattern.matcher(path.toString()).matches(); 389 } 390 391 @Override 392 public void close() throws IOException { 393 beginWrite(); 394 try { 395 if (!isOpen) 396 return; 397 isOpen = false; // set closed 398 } finally { 399 endWrite(); 400 } 401 if (!streams.isEmpty()) { // unlock and close all remaining streams 402 Set<InputStream> copy = new HashSet<>(streams); 403 for (InputStream is : copy) 404 is.close(); 405 } 406 beginWrite(); // lock and sync 407 try { 408 AccessController.doPrivileged((PrivilegedExceptionAction<Void>)() -> { 409 sync(); return null; 410 }); 411 ch.close(); // close the ch just in case no update 412 // and sync didn't close the ch 413 } catch (PrivilegedActionException e) { 414 throw (IOException)e.getException(); 415 } finally { 416 endWrite(); 417 } 418 419 synchronized (inflaters) { 420 for (Inflater inf : inflaters) 421 inf.end(); 422 } 423 synchronized (deflaters) { 424 for (Deflater def : deflaters) 425 def.end(); 426 } 427 428 IOException ioe = null; 429 synchronized (tmppaths) { 430 for (Path p : tmppaths) { 431 try { 432 AccessController.doPrivileged( 433 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 434 } catch (PrivilegedActionException e) { 435 IOException x = (IOException)e.getException(); 436 if (ioe == null) 437 ioe = x; 438 else 439 ioe.addSuppressed(x); 440 } 441 } 442 } 443 provider.removeFileSystem(zfpath, this); 444 if (ioe != null) 445 throw ioe; 446 } 447 448 ZipFileAttributes getFileAttributes(byte[] path) 449 throws IOException 450 { 451 beginRead(); 452 try { 453 ensureOpen(); 454 IndexNode inode = getInode(path); 455 if (inode == null) { 456 return null; 457 } else if (inode instanceof Entry) { 458 return (Entry)inode; 459 } else if (inode.pos == -1) { 460 // pseudo directory, uses METHOD_STORED 461 Entry e = supportPosix ? 462 new PosixEntry(inode.name, inode.isdir, METHOD_STORED) : 463 new Entry(inode.name, inode.isdir, METHOD_STORED); 464 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 465 return e; 466 } else { 467 return supportPosix ? new PosixEntry(this, inode) : new Entry(this, inode); 468 } 469 } finally { 470 endRead(); 471 } 472 } 473 474 void checkAccess(byte[] path) throws IOException { 475 beginRead(); 476 try { 477 ensureOpen(); 478 // is it necessary to readCEN as a sanity check? 479 if (getInode(path) == null) { 480 throw new NoSuchFileException(toString()); 481 } 482 483 } finally { 484 endRead(); 485 } 486 } 487 488 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 489 throws IOException 490 { 491 checkWritable(); 492 beginWrite(); 493 try { 494 ensureOpen(); 495 Entry e = getEntry(path); // ensureOpen checked 496 if (e == null) 497 throw new NoSuchFileException(getString(path)); 498 if (e.type == Entry.CEN) 499 e.type = Entry.COPY; // copy e 500 if (mtime != null) 501 e.mtime = mtime.toMillis(); 502 if (atime != null) 503 e.atime = atime.toMillis(); 504 if (ctime != null) 505 e.ctime = ctime.toMillis(); 506 update(e); 507 } finally { 508 endWrite(); 509 } 510 } 511 512 void setOwner(byte[] path, UserPrincipal owner) throws IOException { 513 checkWritable(); 514 beginWrite(); 515 try { 516 ensureOpen(); 517 Entry e = getEntry(path); // ensureOpen checked 518 if (e == null) { 519 throw new NoSuchFileException(getString(path)); 520 } 521 // as the owner information is not persistent, we don't need to 522 // change e.type to Entry.COPY 523 if (e instanceof PosixEntry) { 524 ((PosixEntry)e).owner = owner; 525 update(e); 526 } 527 } finally { 528 endWrite(); 529 } 530 } 531 532 void setGroup(byte[] path, GroupPrincipal group) throws IOException { 533 checkWritable(); 534 beginWrite(); 535 try { 536 ensureOpen(); 537 Entry e = getEntry(path); // ensureOpen checked 538 if (e == null) { 539 throw new NoSuchFileException(getString(path)); 540 } 541 // as the group information is not persistent, we don't need to 542 // change e.type to Entry.COPY 543 if (e instanceof PosixEntry) { 544 ((PosixEntry)e).group = group; 545 update(e); 546 } 547 } finally { 548 endWrite(); 549 } 550 } 551 552 void setPermissions(byte[] path, Set<PosixFilePermission> perms) throws IOException { 553 checkWritable(); 554 beginWrite(); 555 try { 556 ensureOpen(); 557 Entry e = getEntry(path); // ensureOpen checked 558 if (e == null) { 559 throw new NoSuchFileException(getString(path)); 560 } 561 if (e.type == Entry.CEN) { 562 e.type = Entry.COPY; // copy e 563 } 564 e.posixPerms = perms == null ? -1 : ZipUtils.permsToFlags(perms); 565 update(e); 566 } finally { 567 endWrite(); 568 } 569 } 570 571 boolean exists(byte[] path) { 572 beginRead(); 573 try { 574 ensureOpen(); 575 return getInode(path) != null; 576 } finally { 577 endRead(); 578 } 579 } 580 581 boolean isDirectory(byte[] path) { 582 beginRead(); 583 try { 584 IndexNode n = getInode(path); 585 return n != null && n.isDir(); 586 } finally { 587 endRead(); 588 } 589 } 590 591 // returns the list of child paths of "path" 592 Iterator<Path> iteratorOf(ZipPath dir, 593 DirectoryStream.Filter<? super Path> filter) 594 throws IOException 595 { 596 beginWrite(); // iteration of inodes needs exclusive lock 597 try { 598 ensureOpen(); 599 byte[] path = dir.getResolvedPath(); 600 IndexNode inode = getInode(path); 601 if (inode == null) 602 throw new NotDirectoryException(getString(path)); 603 List<Path> list = new ArrayList<>(); 604 IndexNode child = inode.child; 605 while (child != null) { 606 // (1) Assume each path from the zip file itself is "normalized" 607 // (2) IndexNode.name is absolute. see IndexNode(byte[],int,int) 608 // (3) If parent "dir" is relative when ZipDirectoryStream 609 // is created, the returned child path needs to be relative 610 // as well. 611 ZipPath childPath = new ZipPath(this, child.name, true); 612 ZipPath childFileName = childPath.getFileName(); 613 ZipPath zpath = dir.resolve(childFileName); 614 if (filter == null || filter.accept(zpath)) 615 list.add(zpath); 616 child = child.sibling; 617 } 618 return list.iterator(); 619 } finally { 620 endWrite(); 621 } 622 } 623 624 void createDirectory(byte[] dir, FileAttribute<?>... attrs) throws IOException { 625 checkWritable(); 626 beginWrite(); 627 try { 628 ensureOpen(); 629 if (dir.length == 0 || exists(dir)) // root dir, or existing dir 630 throw new FileAlreadyExistsException(getString(dir)); 631 checkParents(dir); 632 Entry e = supportPosix ? 633 new PosixEntry(dir, Entry.NEW, true, METHOD_STORED, attrs) : 634 new Entry(dir, Entry.NEW, true, METHOD_STORED, attrs); 635 update(e); 636 } finally { 637 endWrite(); 638 } 639 } 640 641 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 642 throws IOException 643 { 644 checkWritable(); 645 if (Arrays.equals(src, dst)) 646 return; // do nothing, src and dst are the same 647 648 beginWrite(); 649 try { 650 ensureOpen(); 651 Entry eSrc = getEntry(src); // ensureOpen checked 652 653 if (eSrc == null) 654 throw new NoSuchFileException(getString(src)); 655 if (eSrc.isDir()) { // spec says to create dst dir 656 createDirectory(dst); 657 return; 658 } 659 boolean hasReplace = false; 660 boolean hasCopyAttrs = false; 661 for (CopyOption opt : options) { 662 if (opt == REPLACE_EXISTING) 663 hasReplace = true; 664 else if (opt == COPY_ATTRIBUTES) 665 hasCopyAttrs = true; 666 } 667 Entry eDst = getEntry(dst); 668 if (eDst != null) { 669 if (!hasReplace) 670 throw new FileAlreadyExistsException(getString(dst)); 671 } else { 672 checkParents(dst); 673 } 674 // copy eSrc entry and change name 675 Entry u = supportPosix ? 676 new PosixEntry((PosixEntry)eSrc, Entry.COPY) : 677 new Entry(eSrc, Entry.COPY); 678 u.name(dst); 679 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) { 680 u.type = eSrc.type; // make it the same type 681 if (deletesrc) { // if it's a "rename", take the data 682 u.bytes = eSrc.bytes; 683 u.file = eSrc.file; 684 } else { // if it's not "rename", copy the data 685 if (eSrc.bytes != null) 686 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 687 else if (eSrc.file != null) { 688 u.file = getTempPathForEntry(null); 689 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 690 } 691 } 692 } 693 if (!hasCopyAttrs) 694 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 695 update(u); 696 if (deletesrc) 697 updateDelete(eSrc); 698 } finally { 699 endWrite(); 700 } 701 } 702 703 // Returns an output stream for writing the contents into the specified 704 // entry. 705 OutputStream newOutputStream(byte[] path, OpenOption... options) 706 throws IOException 707 { 708 checkWritable(); 709 boolean hasCreateNew = false; 710 boolean hasCreate = false; 711 boolean hasAppend = false; 712 boolean hasTruncate = false; 713 for (OpenOption opt : options) { 714 if (opt == READ) 715 throw new IllegalArgumentException("READ not allowed"); 716 if (opt == CREATE_NEW) 717 hasCreateNew = true; 718 if (opt == CREATE) 719 hasCreate = true; 720 if (opt == APPEND) 721 hasAppend = true; 722 if (opt == TRUNCATE_EXISTING) 723 hasTruncate = true; 724 } 725 if (hasAppend && hasTruncate) 726 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 727 beginRead(); // only need a readlock, the "update()" will 728 try { // try to obtain a writelock when the os is 729 ensureOpen(); // being closed. 730 Entry e = getEntry(path); 731 if (e != null) { 732 if (e.isDir() || hasCreateNew) 733 throw new FileAlreadyExistsException(getString(path)); 734 if (hasAppend) { 735 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 736 try (InputStream is = getInputStream(e)) { 737 is.transferTo(os); 738 } 739 return os; 740 } 741 return getOutputStream(supportPosix ? 742 new PosixEntry((PosixEntry)e, Entry.NEW) : new Entry(e, Entry.NEW)); 743 } else { 744 if (!hasCreate && !hasCreateNew) 745 throw new NoSuchFileException(getString(path)); 746 checkParents(path); 747 return getOutputStream(supportPosix ? 748 new PosixEntry(path, Entry.NEW, false, defaultCompressionMethod) : 749 new Entry(path, Entry.NEW, false, defaultCompressionMethod)); 750 } 751 } finally { 752 endRead(); 753 } 754 } 755 756 // Returns an input stream for reading the contents of the specified 757 // file entry. 758 InputStream newInputStream(byte[] path) throws IOException { 759 beginRead(); 760 try { 761 ensureOpen(); 762 Entry e = getEntry(path); 763 if (e == null) 764 throw new NoSuchFileException(getString(path)); 765 if (e.isDir()) 766 throw new FileSystemException(getString(path), "is a directory", null); 767 return getInputStream(e); 768 } finally { 769 endRead(); 770 } 771 } 772 773 private void checkOptions(Set<? extends OpenOption> options) { 774 // check for options of null type and option is an intance of StandardOpenOption 775 for (OpenOption option : options) { 776 if (option == null) 777 throw new NullPointerException(); 778 if (!(option instanceof StandardOpenOption)) 779 throw new IllegalArgumentException(); 780 } 781 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 782 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 783 } 784 785 // Returns an output SeekableByteChannel for either 786 // (1) writing the contents of a new entry, if the entry doesn't exist, or 787 // (2) updating/replacing the contents of an existing entry. 788 // Note: The content of the channel is not compressed until the 789 // channel is closed 790 private class EntryOutputChannel extends ByteArrayChannel { 791 final Entry e; 792 793 EntryOutputChannel(Entry e) { 794 super(e.size > 0? (int)e.size : 8192, false); 795 this.e = e; 796 if (e.mtime == -1) 797 e.mtime = System.currentTimeMillis(); 798 if (e.method == -1) 799 e.method = defaultCompressionMethod; 800 // store size, compressed size, and crc-32 in datadescriptor 801 e.flag = FLAG_DATADESCR; 802 if (zc.isUTF8()) 803 e.flag |= FLAG_USE_UTF8; 804 } 805 806 @Override 807 public void close() throws IOException { 808 // will update the entry 809 try (OutputStream os = getOutputStream(e)) { 810 os.write(toByteArray()); 811 } 812 super.close(); 813 } 814 } 815 816 // Returns a Writable/ReadByteChannel for now. Might consider to use 817 // newFileChannel() instead, which dump the entry data into a regular 818 // file on the default file system and create a FileChannel on top of it. 819 SeekableByteChannel newByteChannel(byte[] path, 820 Set<? extends OpenOption> options, 821 FileAttribute<?>... attrs) 822 throws IOException 823 { 824 checkOptions(options); 825 if (options.contains(StandardOpenOption.WRITE) || 826 options.contains(StandardOpenOption.APPEND)) { 827 checkWritable(); 828 beginRead(); // only need a read lock, the "update()" will obtain 829 // the write lock when the channel is closed 830 try { 831 Entry e = getEntry(path); 832 if (e != null) { 833 if (e.isDir() || options.contains(CREATE_NEW)) 834 throw new FileAlreadyExistsException(getString(path)); 835 SeekableByteChannel sbc = 836 new EntryOutputChannel(supportPosix ? 837 new PosixEntry((PosixEntry)e, Entry.NEW) : 838 new Entry(e, Entry.NEW)); 839 if (options.contains(APPEND)) { 840 try (InputStream is = getInputStream(e)) { // copyover 841 byte[] buf = new byte[8192]; 842 ByteBuffer bb = ByteBuffer.wrap(buf); 843 int n; 844 while ((n = is.read(buf)) != -1) { 845 bb.position(0); 846 bb.limit(n); 847 sbc.write(bb); 848 } 849 } 850 } 851 return sbc; 852 } 853 if (!options.contains(CREATE) && !options.contains(CREATE_NEW)) 854 throw new NoSuchFileException(getString(path)); 855 checkParents(path); 856 return new EntryOutputChannel( 857 supportPosix ? 858 new PosixEntry(path, Entry.NEW, false, defaultCompressionMethod, attrs) : 859 new Entry(path, Entry.NEW, false, defaultCompressionMethod, attrs)); 860 } finally { 861 endRead(); 862 } 863 } else { 864 beginRead(); 865 try { 866 ensureOpen(); 867 Entry e = getEntry(path); 868 if (e == null || e.isDir()) 869 throw new NoSuchFileException(getString(path)); 870 try (InputStream is = getInputStream(e)) { 871 // TBD: if (e.size < NNNNN); 872 return new ByteArrayChannel(is.readAllBytes(), true); 873 } 874 } finally { 875 endRead(); 876 } 877 } 878 } 879 880 // Returns a FileChannel of the specified entry. 881 // 882 // This implementation creates a temporary file on the default file system, 883 // copy the entry data into it if the entry exists, and then create a 884 // FileChannel on top of it. 885 FileChannel newFileChannel(byte[] path, 886 Set<? extends OpenOption> options, 887 FileAttribute<?>... attrs) 888 throws IOException 889 { 890 checkOptions(options); 891 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 892 options.contains(StandardOpenOption.APPEND)); 893 beginRead(); 894 try { 895 ensureOpen(); 896 Entry e = getEntry(path); 897 if (forWrite) { 898 checkWritable(); 899 if (e == null) { 900 if (!options.contains(StandardOpenOption.CREATE) && 901 !options.contains(StandardOpenOption.CREATE_NEW)) { 902 throw new NoSuchFileException(getString(path)); 903 } 904 } else { 905 if (options.contains(StandardOpenOption.CREATE_NEW)) { 906 throw new FileAlreadyExistsException(getString(path)); 907 } 908 if (e.isDir()) 909 throw new FileAlreadyExistsException("directory <" 910 + getString(path) + "> exists"); 911 } 912 options = new HashSet<>(options); 913 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 914 } else if (e == null || e.isDir()) { 915 throw new NoSuchFileException(getString(path)); 916 } 917 918 final boolean isFCH = (e != null && e.type == Entry.FILECH); 919 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 920 final FileChannel fch = tmpfile.getFileSystem() 921 .provider() 922 .newFileChannel(tmpfile, options, attrs); 923 final Entry u = isFCH ? e : ( 924 supportPosix ? 925 new PosixEntry(path, tmpfile, Entry.FILECH, attrs) : 926 new Entry(path, tmpfile, Entry.FILECH, attrs)); 927 if (forWrite) { 928 u.flag = FLAG_DATADESCR; 929 u.method = defaultCompressionMethod; 930 } 931 // is there a better way to hook into the FileChannel's close method? 932 return new FileChannel() { 933 public int write(ByteBuffer src) throws IOException { 934 return fch.write(src); 935 } 936 public long write(ByteBuffer[] srcs, int offset, int length) 937 throws IOException 938 { 939 return fch.write(srcs, offset, length); 940 } 941 public long position() throws IOException { 942 return fch.position(); 943 } 944 public FileChannel position(long newPosition) 945 throws IOException 946 { 947 fch.position(newPosition); 948 return this; 949 } 950 public long size() throws IOException { 951 return fch.size(); 952 } 953 public FileChannel truncate(long size) 954 throws IOException 955 { 956 fch.truncate(size); 957 return this; 958 } 959 public void force(boolean metaData) 960 throws IOException 961 { 962 fch.force(metaData); 963 } 964 public long transferTo(long position, long count, 965 WritableByteChannel target) 966 throws IOException 967 { 968 return fch.transferTo(position, count, target); 969 } 970 public long transferFrom(ReadableByteChannel src, 971 long position, long count) 972 throws IOException 973 { 974 return fch.transferFrom(src, position, count); 975 } 976 public int read(ByteBuffer dst) throws IOException { 977 return fch.read(dst); 978 } 979 public int read(ByteBuffer dst, long position) 980 throws IOException 981 { 982 return fch.read(dst, position); 983 } 984 public long read(ByteBuffer[] dsts, int offset, int length) 985 throws IOException 986 { 987 return fch.read(dsts, offset, length); 988 } 989 public int write(ByteBuffer src, long position) 990 throws IOException 991 { 992 return fch.write(src, position); 993 } 994 public MappedByteBuffer map(MapMode mode, 995 long position, long size) 996 { 997 throw new UnsupportedOperationException(); 998 } 999 public FileLock lock(long position, long size, boolean shared) 1000 throws IOException 1001 { 1002 return fch.lock(position, size, shared); 1003 } 1004 public FileLock tryLock(long position, long size, boolean shared) 1005 throws IOException 1006 { 1007 return fch.tryLock(position, size, shared); 1008 } 1009 protected void implCloseChannel() throws IOException { 1010 fch.close(); 1011 if (forWrite) { 1012 u.mtime = System.currentTimeMillis(); 1013 u.size = Files.size(u.file); 1014 update(u); 1015 } else { 1016 if (!isFCH) // if this is a new fch for reading 1017 removeTempPathForEntry(tmpfile); 1018 } 1019 } 1020 }; 1021 } finally { 1022 endRead(); 1023 } 1024 } 1025 1026 // the outstanding input streams that need to be closed 1027 private Set<InputStream> streams = 1028 Collections.synchronizedSet(new HashSet<>()); 1029 1030 // the ex-channel and ex-path that need to close when their outstanding 1031 // input streams are all closed by the obtainers. 1032 private final Set<ExistingChannelCloser> exChClosers = new HashSet<>(); 1033 1034 private final Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<>()); 1035 private Path getTempPathForEntry(byte[] path) throws IOException { 1036 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 1037 if (path != null) { 1038 Entry e = getEntry(path); 1039 if (e != null) { 1040 try (InputStream is = newInputStream(path)) { 1041 Files.copy(is, tmpPath, REPLACE_EXISTING); 1042 } 1043 } 1044 } 1045 return tmpPath; 1046 } 1047 1048 private void removeTempPathForEntry(Path path) throws IOException { 1049 Files.delete(path); 1050 tmppaths.remove(path); 1051 } 1052 1053 // check if all parents really exist. ZIP spec does not require 1054 // the existence of any "parent directory". 1055 private void checkParents(byte[] path) throws IOException { 1056 beginRead(); 1057 try { 1058 while ((path = getParent(path)) != null && 1059 path != ROOTPATH) { 1060 if (!inodes.containsKey(IndexNode.keyOf(path))) { 1061 throw new NoSuchFileException(getString(path)); 1062 } 1063 } 1064 } finally { 1065 endRead(); 1066 } 1067 } 1068 1069 private static byte[] getParent(byte[] path) { 1070 int off = getParentOff(path); 1071 if (off <= 1) 1072 return ROOTPATH; 1073 return Arrays.copyOf(path, off); 1074 } 1075 1076 private static int getParentOff(byte[] path) { 1077 int off = path.length - 1; 1078 if (off > 0 && path[off] == '/') // isDirectory 1079 off--; 1080 while (off > 0 && path[off] != '/') { off--; } 1081 return off; 1082 } 1083 1084 private void beginWrite() { 1085 rwlock.writeLock().lock(); 1086 } 1087 1088 private void endWrite() { 1089 rwlock.writeLock().unlock(); 1090 } 1091 1092 private void beginRead() { 1093 rwlock.readLock().lock(); 1094 } 1095 1096 private void endRead() { 1097 rwlock.readLock().unlock(); 1098 } 1099 1100 /////////////////////////////////////////////////////////////////// 1101 1102 private volatile boolean isOpen = true; 1103 private final SeekableByteChannel ch; // channel to the zipfile 1104 final byte[] cen; // CEN & ENDHDR 1105 private END end; 1106 private long locpos; // position of first LOC header (usually 0) 1107 1108 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 1109 1110 // name -> pos (in cen), IndexNode itself can be used as a "key" 1111 private LinkedHashMap<IndexNode, IndexNode> inodes; 1112 1113 final byte[] getBytes(String name) { 1114 return zc.getBytes(name); 1115 } 1116 1117 final String getString(byte[] name) { 1118 return zc.toString(name); 1119 } 1120 1121 @SuppressWarnings("deprecation") 1122 protected void finalize() throws IOException { 1123 close(); 1124 } 1125 1126 // Reads len bytes of data from the specified offset into buf. 1127 // Returns the total number of bytes read. 1128 // Each/every byte read from here (except the cen, which is mapped). 1129 final long readFullyAt(byte[] buf, int off, long len, long pos) 1130 throws IOException 1131 { 1132 ByteBuffer bb = ByteBuffer.wrap(buf); 1133 bb.position(off); 1134 bb.limit((int)(off + len)); 1135 return readFullyAt(bb, pos); 1136 } 1137 1138 private long readFullyAt(ByteBuffer bb, long pos) throws IOException { 1139 synchronized(ch) { 1140 return ch.position(pos).read(bb); 1141 } 1142 } 1143 1144 // Searches for end of central directory (END) header. The contents of 1145 // the END header will be read and placed in endbuf. Returns the file 1146 // position of the END header, otherwise returns -1 if the END header 1147 // was not found or an error occurred. 1148 private END findEND() throws IOException { 1149 byte[] buf = new byte[READBLOCKSZ]; 1150 long ziplen = ch.size(); 1151 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 1152 long minPos = minHDR - (buf.length - ENDHDR); 1153 1154 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) { 1155 int off = 0; 1156 if (pos < 0) { 1157 // Pretend there are some NUL bytes before start of file 1158 off = (int)-pos; 1159 Arrays.fill(buf, 0, off, (byte)0); 1160 } 1161 int len = buf.length - off; 1162 if (readFullyAt(buf, off, len, pos + off) != len) 1163 throw new ZipException("zip END header not found"); 1164 1165 // Now scan the block backwards for END header signature 1166 for (int i = buf.length - ENDHDR; i >= 0; i--) { 1167 if (buf[i] == (byte)'P' && 1168 buf[i+1] == (byte)'K' && 1169 buf[i+2] == (byte)'\005' && 1170 buf[i+3] == (byte)'\006' && 1171 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 1172 // Found END header 1173 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 1174 END end = new END(); 1175 // end.endsub = ENDSUB(buf); // not used 1176 end.centot = ENDTOT(buf); 1177 end.cenlen = ENDSIZ(buf); 1178 end.cenoff = ENDOFF(buf); 1179 // end.comlen = ENDCOM(buf); // not used 1180 end.endpos = pos + i; 1181 // try if there is zip64 end; 1182 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1183 if (end.endpos < ZIP64_LOCHDR || 1184 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1185 != loc64.length || 1186 !locator64SigAt(loc64, 0)) { 1187 return end; 1188 } 1189 long end64pos = ZIP64_LOCOFF(loc64); 1190 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1191 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1192 != end64buf.length || 1193 !end64SigAt(end64buf, 0)) { 1194 return end; 1195 } 1196 // end64 found, 1197 long cenlen64 = ZIP64_ENDSIZ(end64buf); 1198 long cenoff64 = ZIP64_ENDOFF(end64buf); 1199 long centot64 = ZIP64_ENDTOT(end64buf); 1200 // double-check 1201 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1202 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1203 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1204 return end; 1205 } 1206 // to use the end64 values 1207 end.cenlen = cenlen64; 1208 end.cenoff = cenoff64; 1209 end.centot = (int)centot64; // assume total < 2g 1210 end.endpos = end64pos; 1211 return end; 1212 } 1213 } 1214 } 1215 throw new ZipException("zip END header not found"); 1216 } 1217 1218 private void makeParentDirs(IndexNode node, IndexNode root) { 1219 IndexNode parent; 1220 ParentLookup lookup = new ParentLookup(); 1221 while (true) { 1222 int off = getParentOff(node.name); 1223 // parent is root 1224 if (off <= 1) { 1225 node.sibling = root.child; 1226 root.child = node; 1227 break; 1228 } 1229 // parent exists 1230 lookup = lookup.as(node.name, off); 1231 if (inodes.containsKey(lookup)) { 1232 parent = inodes.get(lookup); 1233 node.sibling = parent.child; 1234 parent.child = node; 1235 break; 1236 } 1237 // parent does not exist, add new pseudo directory entry 1238 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 1239 inodes.put(parent, parent); 1240 node.sibling = parent.child; 1241 parent.child = node; 1242 node = parent; 1243 } 1244 } 1245 1246 // ZIP directory has two issues: 1247 // (1) ZIP spec does not require the ZIP file to include 1248 // directory entry 1249 // (2) all entries are not stored/organized in a "tree" 1250 // structure. 1251 // A possible solution is to build the node tree ourself as 1252 // implemented below. 1253 private void buildNodeTree() { 1254 beginWrite(); 1255 try { 1256 IndexNode root = inodes.remove(LOOKUPKEY.as(ROOTPATH)); 1257 if (root == null) { 1258 root = new IndexNode(ROOTPATH, true); 1259 } 1260 IndexNode[] nodes = inodes.values().toArray(new IndexNode[0]); 1261 inodes.put(root, root); 1262 for (IndexNode node : nodes) { 1263 makeParentDirs(node, root); 1264 } 1265 } finally { 1266 endWrite(); 1267 } 1268 } 1269 1270 private void removeFromTree(IndexNode inode) { 1271 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 1272 IndexNode child = parent.child; 1273 if (child.equals(inode)) { 1274 parent.child = child.sibling; 1275 } else { 1276 IndexNode last = child; 1277 while ((child = child.sibling) != null) { 1278 if (child.equals(inode)) { 1279 last.sibling = child.sibling; 1280 break; 1281 } else { 1282 last = child; 1283 } 1284 } 1285 } 1286 } 1287 1288 // Reads zip file central directory. Returns the file position of first 1289 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1290 // then the error was a zip format error and zip->msg has the error text. 1291 // Always pass in -1 for knownTotal; it's used for a recursive call. 1292 private byte[] initCEN() throws IOException { 1293 end = findEND(); 1294 if (end.endpos == 0) { 1295 inodes = new LinkedHashMap<>(10); 1296 locpos = 0; 1297 buildNodeTree(); 1298 return null; // only END header present 1299 } 1300 if (end.cenlen > end.endpos) 1301 throw new ZipException("invalid END header (bad central directory size)"); 1302 long cenpos = end.endpos - end.cenlen; // position of CEN table 1303 1304 // Get position of first local file (LOC) header, taking into 1305 // account that there may be a stub prefixed to the zip file. 1306 locpos = cenpos - end.cenoff; 1307 if (locpos < 0) 1308 throw new ZipException("invalid END header (bad central directory offset)"); 1309 1310 // read in the CEN and END 1311 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1312 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1313 throw new ZipException("read CEN tables failed"); 1314 } 1315 // Iterate through the entries in the central directory 1316 inodes = new LinkedHashMap<>(end.centot + 1); 1317 int pos = 0; 1318 int limit = cen.length - ENDHDR; 1319 while (pos < limit) { 1320 if (!cenSigAt(cen, pos)) 1321 throw new ZipException("invalid CEN header (bad signature)"); 1322 int method = CENHOW(cen, pos); 1323 int nlen = CENNAM(cen, pos); 1324 int elen = CENEXT(cen, pos); 1325 int clen = CENCOM(cen, pos); 1326 if ((CENFLG(cen, pos) & 1) != 0) { 1327 throw new ZipException("invalid CEN header (encrypted entry)"); 1328 } 1329 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1330 throw new ZipException("invalid CEN header (unsupported compression method: " + method + ")"); 1331 } 1332 if (pos + CENHDR + nlen > limit) { 1333 throw new ZipException("invalid CEN header (bad header size)"); 1334 } 1335 IndexNode inode = new IndexNode(cen, pos, nlen); 1336 inodes.put(inode, inode); 1337 1338 // skip ext and comment 1339 pos += (CENHDR + nlen + elen + clen); 1340 } 1341 if (pos + ENDHDR != cen.length) { 1342 throw new ZipException("invalid CEN header (bad header size)"); 1343 } 1344 buildNodeTree(); 1345 return cen; 1346 } 1347 1348 private void ensureOpen() { 1349 if (!isOpen) 1350 throw new ClosedFileSystemException(); 1351 } 1352 1353 // Creates a new empty temporary file in the same directory as the 1354 // specified file. A variant of Files.createTempFile. 1355 private Path createTempFileInSameDirectoryAs(Path path) 1356 throws IOException 1357 { 1358 Path parent = path.toAbsolutePath().getParent(); 1359 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1360 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1361 tmppaths.add(tmpPath); 1362 return tmpPath; 1363 } 1364 1365 ////////////////////update & sync ////////////////////////////////////// 1366 1367 private boolean hasUpdate = false; 1368 1369 // shared key. consumer guarantees the "writeLock" before use it. 1370 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1371 1372 private void updateDelete(IndexNode inode) { 1373 beginWrite(); 1374 try { 1375 removeFromTree(inode); 1376 inodes.remove(inode); 1377 hasUpdate = true; 1378 } finally { 1379 endWrite(); 1380 } 1381 } 1382 1383 private void update(Entry e) { 1384 beginWrite(); 1385 try { 1386 IndexNode old = inodes.put(e, e); 1387 if (old != null) { 1388 removeFromTree(old); 1389 } 1390 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1391 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1392 e.sibling = parent.child; 1393 parent.child = e; 1394 } 1395 hasUpdate = true; 1396 } finally { 1397 endWrite(); 1398 } 1399 } 1400 1401 // copy over the whole LOC entry (header if necessary, data and ext) from 1402 // old zip to the new one. 1403 private long copyLOCEntry(Entry e, boolean updateHeader, 1404 OutputStream os, 1405 long written, byte[] buf) 1406 throws IOException 1407 { 1408 long locoff = e.locoff; // where to read 1409 e.locoff = written; // update the e.locoff with new value 1410 1411 // calculate the size need to write out 1412 long size = 0; 1413 // if there is A ext 1414 if ((e.flag & FLAG_DATADESCR) != 0) { 1415 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1416 size = 24; 1417 else 1418 size = 16; 1419 } 1420 // read loc, use the original loc.elen/nlen 1421 // 1422 // an extra byte after loc is read, which should be the first byte of the 1423 // 'name' field of the loc. if this byte is '/', which means the original 1424 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1425 // is used to output the loc, in which the leading "/" will be removed 1426 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1427 throw new ZipException("loc: reading failed"); 1428 1429 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1430 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1431 size += e.csize; 1432 written = e.writeLOC(os) + size; 1433 } else { 1434 os.write(buf, 0, LOCHDR); // write out the loc header 1435 locoff += LOCHDR; 1436 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1437 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1438 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1439 written = LOCHDR + size; 1440 } 1441 int n; 1442 while (size > 0 && 1443 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1444 { 1445 if (size < n) 1446 n = (int)size; 1447 os.write(buf, 0, n); 1448 size -= n; 1449 locoff += n; 1450 } 1451 return written; 1452 } 1453 1454 private long writeEntry(Entry e, OutputStream os) 1455 throws IOException { 1456 1457 if (e.bytes == null && e.file == null) // dir, 0-length data 1458 return 0; 1459 1460 long written = 0; 1461 if (e.csize > 0 && (e.crc != 0 || e.size == 0)) { 1462 // pre-compressed entry, write directly to output stream 1463 writeTo(e, os); 1464 } else { 1465 try (OutputStream os2 = (e.method == METHOD_STORED) ? 1466 new EntryOutputStreamCRC32(e, os) : new EntryOutputStreamDef(e, os)) { 1467 writeTo(e, os2); 1468 } 1469 } 1470 written += e.csize; 1471 if ((e.flag & FLAG_DATADESCR) != 0) { 1472 written += e.writeEXT(os); 1473 } 1474 return written; 1475 } 1476 1477 private void writeTo(Entry e, OutputStream os) throws IOException { 1478 if (e.bytes != null) { 1479 os.write(e.bytes, 0, e.bytes.length); 1480 } else if (e.file != null) { 1481 if (e.type == Entry.NEW || e.type == Entry.FILECH) { 1482 try (InputStream is = Files.newInputStream(e.file)) { 1483 is.transferTo(os); 1484 } 1485 } 1486 Files.delete(e.file); 1487 tmppaths.remove(e.file); 1488 } 1489 } 1490 1491 // sync the zip file system, if there is any update 1492 private void sync() throws IOException { 1493 // check ex-closer 1494 if (!exChClosers.isEmpty()) { 1495 for (ExistingChannelCloser ecc : exChClosers) { 1496 if (ecc.closeAndDeleteIfDone()) { 1497 exChClosers.remove(ecc); 1498 } 1499 } 1500 } 1501 if (!hasUpdate) 1502 return; 1503 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1504 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) { 1505 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1506 long written = 0; 1507 byte[] buf = null; 1508 Entry e; 1509 1510 // write loc 1511 for (IndexNode inode : inodes.values()) { 1512 if (inode instanceof Entry) { // an updated inode 1513 e = (Entry)inode; 1514 try { 1515 if (e.type == Entry.COPY) { 1516 // entry copy: the only thing changed is the "name" 1517 // and "nlen" in LOC header, so we update/rewrite the 1518 // LOC in new file and simply copy the rest (data and 1519 // ext) without enflating/deflating from the old zip 1520 // file LOC entry. 1521 if (buf == null) 1522 buf = new byte[8192]; 1523 written += copyLOCEntry(e, true, os, written, buf); 1524 } else { // NEW, FILECH or CEN 1525 e.locoff = written; 1526 written += e.writeLOC(os); // write loc header 1527 written += writeEntry(e, os); 1528 } 1529 elist.add(e); 1530 } catch (IOException x) { 1531 x.printStackTrace(); // skip any in-accurate entry 1532 } 1533 } else { // unchanged inode 1534 if (inode.pos == -1) { 1535 continue; // pseudo directory node 1536 } 1537 if (inode.name.length == 1 && inode.name[0] == '/') { 1538 continue; // no root '/' directory even if it 1539 // exists in original zip/jar file. 1540 } 1541 e = supportPosix ? new PosixEntry(this, inode) : new Entry(this, inode); 1542 try { 1543 if (buf == null) 1544 buf = new byte[8192]; 1545 written += copyLOCEntry(e, false, os, written, buf); 1546 elist.add(e); 1547 } catch (IOException x) { 1548 x.printStackTrace(); // skip any wrong entry 1549 } 1550 } 1551 } 1552 1553 // now write back the cen and end table 1554 end.cenoff = written; 1555 for (Entry entry : elist) { 1556 written += entry.writeCEN(os); 1557 } 1558 end.centot = elist.size(); 1559 end.cenlen = written - end.cenoff; 1560 end.write(os, written, forceEnd64); 1561 } 1562 if (!streams.isEmpty()) { 1563 // 1564 // There are outstanding input streams open on existing "ch", 1565 // so, don't close the "cha" and delete the "file for now, let 1566 // the "ex-channel-closer" to handle them 1567 Path path = createTempFileInSameDirectoryAs(zfpath); 1568 ExistingChannelCloser ecc = new ExistingChannelCloser(path, 1569 ch, 1570 streams); 1571 Files.move(zfpath, path, REPLACE_EXISTING); 1572 exChClosers.add(ecc); 1573 streams = Collections.synchronizedSet(new HashSet<>()); 1574 } else { 1575 ch.close(); 1576 Files.delete(zfpath); 1577 } 1578 1579 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1580 hasUpdate = false; // clear 1581 } 1582 1583 IndexNode getInode(byte[] path) { 1584 return inodes.get(IndexNode.keyOf(Objects.requireNonNull(path, "path"))); 1585 } 1586 1587 /** 1588 * Return the IndexNode from the root tree. If it doesn't exist, 1589 * it gets created along with all parent directory IndexNodes. 1590 */ 1591 IndexNode getOrCreateInode(byte[] path, boolean isdir) { 1592 IndexNode node = getInode(path); 1593 // if node exists, return it 1594 if (node != null) { 1595 return node; 1596 } 1597 1598 // otherwise create new pseudo node and parent directory hierarchy 1599 node = new IndexNode(path, isdir); 1600 beginWrite(); 1601 try { 1602 makeParentDirs(node, Objects.requireNonNull(inodes.get(IndexNode.keyOf(ROOTPATH)), "no root node found")); 1603 return node; 1604 } finally { 1605 endWrite(); 1606 } 1607 } 1608 1609 private Entry getEntry(byte[] path) throws IOException { 1610 IndexNode inode = getInode(path); 1611 if (inode instanceof Entry) 1612 return (Entry)inode; 1613 if (inode == null || inode.pos == -1) 1614 return null; 1615 return supportPosix ? new PosixEntry(this, inode): new Entry(this, inode); 1616 } 1617 1618 public void deleteFile(byte[] path, boolean failIfNotExists) 1619 throws IOException 1620 { 1621 checkWritable(); 1622 IndexNode inode = getInode(path); 1623 if (inode == null) { 1624 if (path != null && path.length == 0) 1625 throw new ZipException("root directory </> can't not be delete"); 1626 if (failIfNotExists) 1627 throw new NoSuchFileException(getString(path)); 1628 } else { 1629 if (inode.isDir() && inode.child != null) 1630 throw new DirectoryNotEmptyException(getString(path)); 1631 updateDelete(inode); 1632 } 1633 } 1634 1635 // Returns an out stream for either 1636 // (1) writing the contents of a new entry, if the entry exists, or 1637 // (2) updating/replacing the contents of the specified existing entry. 1638 private OutputStream getOutputStream(Entry e) throws IOException { 1639 if (e.mtime == -1) 1640 e.mtime = System.currentTimeMillis(); 1641 if (e.method == -1) 1642 e.method = defaultCompressionMethod; 1643 // store size, compressed size, and crc-32 in datadescr 1644 e.flag = FLAG_DATADESCR; 1645 if (zc.isUTF8()) 1646 e.flag |= FLAG_USE_UTF8; 1647 OutputStream os; 1648 if (useTempFile) { 1649 e.file = getTempPathForEntry(null); 1650 os = Files.newOutputStream(e.file, WRITE); 1651 } else { 1652 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1653 } 1654 if (e.method == METHOD_DEFLATED) { 1655 return new DeflatingEntryOutputStream(e, os); 1656 } else { 1657 return new EntryOutputStream(e, os); 1658 } 1659 } 1660 1661 private class EntryOutputStream extends FilterOutputStream { 1662 private final Entry e; 1663 private long written; 1664 private boolean isClosed; 1665 1666 EntryOutputStream(Entry e, OutputStream os) { 1667 super(os); 1668 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1669 // this.written = 0; 1670 } 1671 1672 @Override 1673 public synchronized void write(int b) throws IOException { 1674 out.write(b); 1675 written += 1; 1676 } 1677 1678 @Override 1679 public synchronized void write(byte[] b, int off, int len) 1680 throws IOException { 1681 out.write(b, off, len); 1682 written += len; 1683 } 1684 1685 @Override 1686 public synchronized void close() throws IOException { 1687 if (isClosed) { 1688 return; 1689 } 1690 isClosed = true; 1691 e.size = written; 1692 if (out instanceof ByteArrayOutputStream) 1693 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1694 super.close(); 1695 update(e); 1696 } 1697 } 1698 1699 // Output stream returned when writing "deflated" entries into memory, 1700 // to enable eager (possibly parallel) deflation and reduce memory required. 1701 private class DeflatingEntryOutputStream extends DeflaterOutputStream { 1702 private final CRC32 crc; 1703 private final Entry e; 1704 private boolean isClosed; 1705 1706 DeflatingEntryOutputStream(Entry e, OutputStream os) { 1707 super(os, getDeflater()); 1708 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1709 this.crc = new CRC32(); 1710 } 1711 1712 @Override 1713 public synchronized void write(int b) throws IOException { 1714 super.write(b); 1715 crc.update(b); 1716 } 1717 1718 @Override 1719 public synchronized void write(byte[] b, int off, int len) 1720 throws IOException { 1721 super.write(b, off, len); 1722 crc.update(b, off, len); 1723 } 1724 1725 @Override 1726 public synchronized void close() throws IOException { 1727 if (isClosed) 1728 return; 1729 isClosed = true; 1730 finish(); 1731 e.size = def.getBytesRead(); 1732 e.csize = def.getBytesWritten(); 1733 e.crc = crc.getValue(); 1734 if (out instanceof ByteArrayOutputStream) 1735 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1736 super.close(); 1737 update(e); 1738 releaseDeflater(def); 1739 } 1740 } 1741 1742 // Wrapper output stream class to write out a "stored" entry. 1743 // (1) this class does not close the underlying out stream when 1744 // being closed. 1745 // (2) no need to be "synchronized", only used by sync() 1746 private class EntryOutputStreamCRC32 extends FilterOutputStream { 1747 private final CRC32 crc; 1748 private final Entry e; 1749 private long written; 1750 private boolean isClosed; 1751 1752 EntryOutputStreamCRC32(Entry e, OutputStream os) { 1753 super(os); 1754 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1755 this.crc = new CRC32(); 1756 } 1757 1758 @Override 1759 public void write(int b) throws IOException { 1760 out.write(b); 1761 crc.update(b); 1762 written += 1; 1763 } 1764 1765 @Override 1766 public void write(byte[] b, int off, int len) 1767 throws IOException { 1768 out.write(b, off, len); 1769 crc.update(b, off, len); 1770 written += len; 1771 } 1772 1773 @Override 1774 public void close() { 1775 if (isClosed) 1776 return; 1777 isClosed = true; 1778 e.size = e.csize = written; 1779 e.crc = crc.getValue(); 1780 } 1781 } 1782 1783 // Wrapper output stream class to write out a "deflated" entry. 1784 // (1) this class does not close the underlying out stream when 1785 // being closed. 1786 // (2) no need to be "synchronized", only used by sync() 1787 private class EntryOutputStreamDef extends DeflaterOutputStream { 1788 private final CRC32 crc; 1789 private final Entry e; 1790 private boolean isClosed; 1791 1792 EntryOutputStreamDef(Entry e, OutputStream os) { 1793 super(os, getDeflater()); 1794 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1795 this.crc = new CRC32(); 1796 } 1797 1798 @Override 1799 public void write(byte[] b, int off, int len) throws IOException { 1800 super.write(b, off, len); 1801 crc.update(b, off, len); 1802 } 1803 1804 @Override 1805 public void close() throws IOException { 1806 if (isClosed) 1807 return; 1808 isClosed = true; 1809 finish(); 1810 e.size = def.getBytesRead(); 1811 e.csize = def.getBytesWritten(); 1812 e.crc = crc.getValue(); 1813 releaseDeflater(def); 1814 } 1815 } 1816 1817 private InputStream getInputStream(Entry e) 1818 throws IOException 1819 { 1820 InputStream eis; 1821 if (e.type == Entry.NEW) { 1822 if (e.bytes != null) 1823 eis = new ByteArrayInputStream(e.bytes); 1824 else if (e.file != null) 1825 eis = Files.newInputStream(e.file); 1826 else 1827 throw new ZipException("update entry data is missing"); 1828 } else if (e.type == Entry.FILECH) { 1829 // FILECH result is un-compressed. 1830 eis = Files.newInputStream(e.file); 1831 // TBD: wrap to hook close() 1832 // streams.add(eis); 1833 return eis; 1834 } else { // untouched CEN or COPY 1835 eis = new EntryInputStream(e, ch); 1836 } 1837 if (e.method == METHOD_DEFLATED) { 1838 // MORE: Compute good size for inflater stream: 1839 long bufSize = e.size + 2; // Inflater likes a bit of slack 1840 if (bufSize > 65536) 1841 bufSize = 8192; 1842 final long size = e.size; 1843 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1844 private boolean isClosed = false; 1845 public void close() throws IOException { 1846 if (!isClosed) { 1847 releaseInflater(inf); 1848 this.in.close(); 1849 isClosed = true; 1850 streams.remove(this); 1851 } 1852 } 1853 // Override fill() method to provide an extra "dummy" byte 1854 // at the end of the input stream. This is required when 1855 // using the "nowrap" Inflater option. (it appears the new 1856 // zlib in 7 does not need it, but keep it for now) 1857 protected void fill() throws IOException { 1858 if (eof) { 1859 throw new EOFException( 1860 "Unexpected end of ZLIB input stream"); 1861 } 1862 len = this.in.read(buf, 0, buf.length); 1863 if (len == -1) { 1864 buf[0] = 0; 1865 len = 1; 1866 eof = true; 1867 } 1868 inf.setInput(buf, 0, len); 1869 } 1870 private boolean eof; 1871 1872 public int available() { 1873 if (isClosed) 1874 return 0; 1875 long avail = size - inf.getBytesWritten(); 1876 return avail > (long) Integer.MAX_VALUE ? 1877 Integer.MAX_VALUE : (int) avail; 1878 } 1879 }; 1880 } else if (e.method == METHOD_STORED) { 1881 // TBD: wrap/ it does not seem necessary 1882 } else { 1883 throw new ZipException("invalid compression method"); 1884 } 1885 streams.add(eis); 1886 return eis; 1887 } 1888 1889 // Inner class implementing the input stream used to read 1890 // a (possibly compressed) zip file entry. 1891 private class EntryInputStream extends InputStream { 1892 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1893 // point to a new channel after sync() 1894 private long pos; // current position within entry data 1895 private long rem; // number of remaining bytes within entry 1896 1897 EntryInputStream(Entry e, SeekableByteChannel zfch) 1898 throws IOException 1899 { 1900 this.zfch = zfch; 1901 rem = e.csize; 1902 pos = e.locoff; 1903 if (pos == -1) { 1904 Entry e2 = getEntry(e.name); 1905 if (e2 == null) { 1906 throw new ZipException("invalid loc for entry <" + getString(e.name) + ">"); 1907 } 1908 pos = e2.locoff; 1909 } 1910 pos = -pos; // lazy initialize the real data offset 1911 } 1912 1913 public int read(byte[] b, int off, int len) throws IOException { 1914 ensureOpen(); 1915 initDataPos(); 1916 if (rem == 0) { 1917 return -1; 1918 } 1919 if (len <= 0) { 1920 return 0; 1921 } 1922 if (len > rem) { 1923 len = (int) rem; 1924 } 1925 // readFullyAt() 1926 long n; 1927 ByteBuffer bb = ByteBuffer.wrap(b); 1928 bb.position(off); 1929 bb.limit(off + len); 1930 synchronized(zfch) { 1931 n = zfch.position(pos).read(bb); 1932 } 1933 if (n > 0) { 1934 pos += n; 1935 rem -= n; 1936 } 1937 if (rem == 0) { 1938 close(); 1939 } 1940 return (int)n; 1941 } 1942 1943 public int read() throws IOException { 1944 byte[] b = new byte[1]; 1945 if (read(b, 0, 1) == 1) { 1946 return b[0] & 0xff; 1947 } else { 1948 return -1; 1949 } 1950 } 1951 1952 public long skip(long n) { 1953 ensureOpen(); 1954 if (n > rem) 1955 n = rem; 1956 pos += n; 1957 rem -= n; 1958 if (rem == 0) { 1959 close(); 1960 } 1961 return n; 1962 } 1963 1964 public int available() { 1965 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1966 } 1967 1968 public void close() { 1969 rem = 0; 1970 streams.remove(this); 1971 } 1972 1973 private void initDataPos() throws IOException { 1974 if (pos <= 0) { 1975 pos = -pos + locpos; 1976 byte[] buf = new byte[LOCHDR]; 1977 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 1978 throw new ZipException("invalid loc " + pos + " for entry reading"); 1979 } 1980 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 1981 } 1982 } 1983 } 1984 1985 // Maxmum number of de/inflater we cache 1986 private final int MAX_FLATER = 20; 1987 // List of available Inflater objects for decompression 1988 private final List<Inflater> inflaters = new ArrayList<>(); 1989 1990 // Gets an inflater from the list of available inflaters or allocates 1991 // a new one. 1992 private Inflater getInflater() { 1993 synchronized (inflaters) { 1994 int size = inflaters.size(); 1995 if (size > 0) { 1996 return inflaters.remove(size - 1); 1997 } else { 1998 return new Inflater(true); 1999 } 2000 } 2001 } 2002 2003 // Releases the specified inflater to the list of available inflaters. 2004 private void releaseInflater(Inflater inf) { 2005 synchronized (inflaters) { 2006 if (inflaters.size() < MAX_FLATER) { 2007 inf.reset(); 2008 inflaters.add(inf); 2009 } else { 2010 inf.end(); 2011 } 2012 } 2013 } 2014 2015 // List of available Deflater objects for compression 2016 private final List<Deflater> deflaters = new ArrayList<>(); 2017 2018 // Gets a deflater from the list of available deflaters or allocates 2019 // a new one. 2020 private Deflater getDeflater() { 2021 synchronized (deflaters) { 2022 int size = deflaters.size(); 2023 if (size > 0) { 2024 return deflaters.remove(size - 1); 2025 } else { 2026 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 2027 } 2028 } 2029 } 2030 2031 // Releases the specified inflater to the list of available inflaters. 2032 private void releaseDeflater(Deflater def) { 2033 synchronized (deflaters) { 2034 if (inflaters.size() < MAX_FLATER) { 2035 def.reset(); 2036 deflaters.add(def); 2037 } else { 2038 def.end(); 2039 } 2040 } 2041 } 2042 2043 // End of central directory record 2044 static class END { 2045 // The fields that are commented out below are not used by anyone and write() uses "0" 2046 // int disknum; 2047 // int sdisknum; 2048 // int endsub; 2049 int centot; // 4 bytes 2050 long cenlen; // 4 bytes 2051 long cenoff; // 4 bytes 2052 // int comlen; // comment length 2053 // byte[] comment; 2054 2055 // members of Zip64 end of central directory locator 2056 // int diskNum; 2057 long endpos; 2058 // int disktot; 2059 2060 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 2061 boolean hasZip64 = forceEnd64; // false; 2062 long xlen = cenlen; 2063 long xoff = cenoff; 2064 if (xlen >= ZIP64_MINVAL) { 2065 xlen = ZIP64_MINVAL; 2066 hasZip64 = true; 2067 } 2068 if (xoff >= ZIP64_MINVAL) { 2069 xoff = ZIP64_MINVAL; 2070 hasZip64 = true; 2071 } 2072 int count = centot; 2073 if (count >= ZIP64_MINVAL32) { 2074 count = ZIP64_MINVAL32; 2075 hasZip64 = true; 2076 } 2077 if (hasZip64) { 2078 //zip64 end of central directory record 2079 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 2080 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 2081 writeShort(os, 45); // version made by 2082 writeShort(os, 45); // version needed to extract 2083 writeInt(os, 0); // number of this disk 2084 writeInt(os, 0); // central directory start disk 2085 writeLong(os, centot); // number of directory entries on disk 2086 writeLong(os, centot); // number of directory entries 2087 writeLong(os, cenlen); // length of central directory 2088 writeLong(os, cenoff); // offset of central directory 2089 2090 //zip64 end of central directory locator 2091 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 2092 writeInt(os, 0); // zip64 END start disk 2093 writeLong(os, offset); // offset of zip64 END 2094 writeInt(os, 1); // total number of disks (?) 2095 } 2096 writeInt(os, ENDSIG); // END record signature 2097 writeShort(os, 0); // number of this disk 2098 writeShort(os, 0); // central directory start disk 2099 writeShort(os, count); // number of directory entries on disk 2100 writeShort(os, count); // total number of directory entries 2101 writeInt(os, xlen); // length of central directory 2102 writeInt(os, xoff); // offset of central directory 2103 writeShort(os, 0); // zip file comment, not used 2104 } 2105 } 2106 2107 // Internal node that links a "name" to its pos in cen table. 2108 // The node itself can be used as a "key" to lookup itself in 2109 // the HashMap inodes. 2110 static class IndexNode { 2111 byte[] name; 2112 int hashcode; // node is hashable/hashed by its name 2113 boolean isdir; 2114 int pos = -1; // position in cen table, -1 means the 2115 // entry does not exist in zip file 2116 IndexNode child; // first child 2117 IndexNode sibling; // next sibling 2118 2119 IndexNode() {} 2120 2121 IndexNode(byte[] name, boolean isdir) { 2122 name(name); 2123 this.isdir = isdir; 2124 this.pos = -1; 2125 } 2126 2127 IndexNode(byte[] name, int pos) { 2128 name(name); 2129 this.pos = pos; 2130 } 2131 2132 // constructor for initCEN() (1) remove trailing '/' (2) pad leading '/' 2133 IndexNode(byte[] cen, int pos, int nlen) { 2134 int noff = pos + CENHDR; 2135 if (cen[noff + nlen - 1] == '/') { 2136 isdir = true; 2137 nlen--; 2138 } 2139 if (nlen > 0 && cen[noff] == '/') { 2140 name = Arrays.copyOfRange(cen, noff, noff + nlen); 2141 } else { 2142 name = new byte[nlen + 1]; 2143 System.arraycopy(cen, noff, name, 1, nlen); 2144 name[0] = '/'; 2145 } 2146 name(normalize(name)); 2147 this.pos = pos; 2148 } 2149 2150 // Normalize the IndexNode.name field. 2151 private byte[] normalize(byte[] path) { 2152 int len = path.length; 2153 if (len == 0) 2154 return path; 2155 byte prevC = 0; 2156 for (int pathPos = 0; pathPos < len; pathPos++) { 2157 byte c = path[pathPos]; 2158 if (c == '/' && prevC == '/') 2159 return normalize(path, pathPos - 1); 2160 prevC = c; 2161 } 2162 if (len > 1 && prevC == '/') { 2163 return Arrays.copyOf(path, len - 1); 2164 } 2165 return path; 2166 } 2167 2168 private byte[] normalize(byte[] path, int off) { 2169 // As we know we have at least one / to trim, we can reduce 2170 // the size of the resulting array 2171 byte[] to = new byte[path.length - 1]; 2172 int pathPos = 0; 2173 while (pathPos < off) { 2174 to[pathPos] = path[pathPos]; 2175 pathPos++; 2176 } 2177 int toPos = pathPos; 2178 byte prevC = 0; 2179 while (pathPos < path.length) { 2180 byte c = path[pathPos++]; 2181 if (c == '/' && prevC == '/') 2182 continue; 2183 to[toPos++] = c; 2184 prevC = c; 2185 } 2186 if (toPos > 1 && to[toPos - 1] == '/') 2187 toPos--; 2188 return (toPos == to.length) ? to : Arrays.copyOf(to, toPos); 2189 } 2190 2191 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 2192 2193 final static IndexNode keyOf(byte[] name) { // get a lookup key; 2194 IndexNode key = cachedKey.get(); 2195 if (key == null) { 2196 key = new IndexNode(name, -1); 2197 cachedKey.set(key); 2198 } 2199 return key.as(name); 2200 } 2201 2202 final void name(byte[] name) { 2203 this.name = name; 2204 this.hashcode = Arrays.hashCode(name); 2205 } 2206 2207 final IndexNode as(byte[] name) { // reuse the node, mostly 2208 name(name); // as a lookup "key" 2209 return this; 2210 } 2211 2212 boolean isDir() { 2213 return isdir; 2214 } 2215 2216 @Override 2217 public boolean equals(Object other) { 2218 if (!(other instanceof IndexNode)) { 2219 return false; 2220 } 2221 if (other instanceof ParentLookup) { 2222 return ((ParentLookup)other).equals(this); 2223 } 2224 return Arrays.equals(name, ((IndexNode)other).name); 2225 } 2226 2227 @Override 2228 public int hashCode() { 2229 return hashcode; 2230 } 2231 2232 @Override 2233 public String toString() { 2234 return new String(name) + (isdir ? " (dir)" : " ") + ", index: " + pos; 2235 } 2236 } 2237 2238 static class Entry extends IndexNode implements ZipFileAttributes { 2239 static final int CEN = 1; // entry read from cen 2240 static final int NEW = 2; // updated contents in bytes or file 2241 static final int FILECH = 3; // fch update in "file" 2242 static final int COPY = 4; // copy of a CEN entry 2243 2244 byte[] bytes; // updated content bytes 2245 Path file; // use tmp file to store bytes; 2246 int type = CEN; // default is the entry read from cen 2247 2248 // entry attributes 2249 int version; 2250 int flag; 2251 int posixPerms = -1; // posix permissions 2252 int method = -1; // compression method 2253 long mtime = -1; // last modification time (in DOS time) 2254 long atime = -1; // last access time 2255 long ctime = -1; // create time 2256 long crc = -1; // crc-32 of entry data 2257 long csize = -1; // compressed size of entry data 2258 long size = -1; // uncompressed size of entry data 2259 byte[] extra; 2260 2261 // CEN 2262 // The fields that are commented out below are not used by anyone and write() uses "0" 2263 // int versionMade; 2264 // int disk; 2265 // int attrs; 2266 // long attrsEx; 2267 long locoff; 2268 byte[] comment; 2269 2270 Entry(byte[] name, boolean isdir, int method) { 2271 name(name); 2272 this.isdir = isdir; 2273 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 2274 this.crc = 0; 2275 this.size = 0; 2276 this.csize = 0; 2277 this.method = method; 2278 } 2279 2280 @SuppressWarnings("unchecked") 2281 Entry(byte[] name, int type, boolean isdir, int method, FileAttribute<?>... attrs) { 2282 this(name, isdir, method); 2283 this.type = type; 2284 for (FileAttribute<?> attr : attrs) { 2285 String attrName = attr.name(); 2286 if (attrName.equals("posix:permissions")) { 2287 posixPerms = ZipUtils.permsToFlags((Set<PosixFilePermission>)attr.value()); 2288 } 2289 } 2290 } 2291 2292 Entry(byte[] name, Path file, int type, FileAttribute<?>... attrs) { 2293 this(name, type, false, METHOD_STORED, attrs); 2294 this.file = file; 2295 } 2296 2297 Entry(Entry e, int type) { 2298 name(e.name); 2299 this.isdir = e.isdir; 2300 this.version = e.version; 2301 this.ctime = e.ctime; 2302 this.atime = e.atime; 2303 this.mtime = e.mtime; 2304 this.crc = e.crc; 2305 this.size = e.size; 2306 this.csize = e.csize; 2307 this.method = e.method; 2308 this.extra = e.extra; 2309 /* 2310 this.versionMade = e.versionMade; 2311 this.disk = e.disk; 2312 this.attrs = e.attrs; 2313 this.attrsEx = e.attrsEx; 2314 */ 2315 this.locoff = e.locoff; 2316 this.comment = e.comment; 2317 this.posixPerms = e.posixPerms; 2318 this.type = type; 2319 } 2320 2321 Entry(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2322 readCEN(zipfs, inode); 2323 } 2324 2325 // Calculates a suitable base for the version number to 2326 // be used for fields version made by/version needed to extract. 2327 // The lower bytes of these 2 byte fields hold the version number 2328 // (value/10 = major; value%10 = minor) 2329 // For different features certain minimum versions apply: 2330 // stored = 10 (1.0), deflated = 20 (2.0), zip64 = 45 (4.5) 2331 private int version(boolean zip64) throws ZipException { 2332 if (zip64) { 2333 return 45; 2334 } 2335 if (method == METHOD_DEFLATED) 2336 return 20; 2337 else if (method == METHOD_STORED) 2338 return 10; 2339 throw new ZipException("unsupported compression method"); 2340 } 2341 2342 /** 2343 * Adds information about compatibility of file attribute information 2344 * to a version value. 2345 */ 2346 private int versionMadeBy(int version) { 2347 return (posixPerms < 0) ? version : 2348 VERSION_BASE_UNIX | (version & 0xff); 2349 } 2350 2351 ///////////////////// CEN ////////////////////// 2352 private void readCEN(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2353 byte[] cen = zipfs.cen; 2354 int pos = inode.pos; 2355 if (!cenSigAt(cen, pos)) 2356 throw new ZipException("invalid CEN header (bad signature)"); 2357 version = CENVER(cen, pos); 2358 flag = CENFLG(cen, pos); 2359 method = CENHOW(cen, pos); 2360 mtime = dosToJavaTime(CENTIM(cen, pos)); 2361 crc = CENCRC(cen, pos); 2362 csize = CENSIZ(cen, pos); 2363 size = CENLEN(cen, pos); 2364 int nlen = CENNAM(cen, pos); 2365 int elen = CENEXT(cen, pos); 2366 int clen = CENCOM(cen, pos); 2367 /* 2368 versionMade = CENVEM(cen, pos); 2369 disk = CENDSK(cen, pos); 2370 attrs = CENATT(cen, pos); 2371 attrsEx = CENATX(cen, pos); 2372 */ 2373 if (CENVEM_FA(cen, pos) == FILE_ATTRIBUTES_UNIX) { 2374 posixPerms = CENATX_PERMS(cen, pos) & 0xFFF; // 12 bits for setuid, setgid, sticky + perms 2375 } 2376 locoff = CENOFF(cen, pos); 2377 pos += CENHDR; 2378 this.name = inode.name; 2379 this.isdir = inode.isdir; 2380 this.hashcode = inode.hashcode; 2381 2382 pos += nlen; 2383 if (elen > 0) { 2384 extra = Arrays.copyOfRange(cen, pos, pos + elen); 2385 pos += elen; 2386 readExtra(zipfs); 2387 } 2388 if (clen > 0) { 2389 comment = Arrays.copyOfRange(cen, pos, pos + clen); 2390 } 2391 } 2392 2393 private int writeCEN(OutputStream os) throws IOException { 2394 long csize0 = csize; 2395 long size0 = size; 2396 long locoff0 = locoff; 2397 int elen64 = 0; // extra for ZIP64 2398 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 2399 int elenEXTT = 0; // extra for Extended Timestamp 2400 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 2401 2402 byte[] zname = isdir ? toDirectoryPath(name) : name; 2403 2404 // confirm size/length 2405 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 2406 int elen = (extra != null) ? extra.length : 0; 2407 int eoff = 0; 2408 int clen = (comment != null) ? comment.length : 0; 2409 if (csize >= ZIP64_MINVAL) { 2410 csize0 = ZIP64_MINVAL; 2411 elen64 += 8; // csize(8) 2412 } 2413 if (size >= ZIP64_MINVAL) { 2414 size0 = ZIP64_MINVAL; // size(8) 2415 elen64 += 8; 2416 } 2417 if (locoff >= ZIP64_MINVAL) { 2418 locoff0 = ZIP64_MINVAL; 2419 elen64 += 8; // offset(8) 2420 } 2421 if (elen64 != 0) { 2422 elen64 += 4; // header and data sz 4 bytes 2423 } 2424 boolean zip64 = (elen64 != 0); 2425 int version0 = version(zip64); 2426 while (eoff + 4 < elen) { 2427 int tag = SH(extra, eoff); 2428 int sz = SH(extra, eoff + 2); 2429 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2430 foundExtraTime = true; 2431 } 2432 eoff += (4 + sz); 2433 } 2434 if (!foundExtraTime) { 2435 if (isWindows) { // use NTFS 2436 elenNTFS = 36; // total 36 bytes 2437 } else { // Extended Timestamp otherwise 2438 elenEXTT = 9; // only mtime in cen 2439 } 2440 } 2441 writeInt(os, CENSIG); // CEN header signature 2442 writeShort(os, versionMadeBy(version0)); // version made by 2443 writeShort(os, version0); // version needed to extract 2444 writeShort(os, flag); // general purpose bit flag 2445 writeShort(os, method); // compression method 2446 // last modification time 2447 writeInt(os, (int)javaToDosTime(mtime)); 2448 writeInt(os, crc); // crc-32 2449 writeInt(os, csize0); // compressed size 2450 writeInt(os, size0); // uncompressed size 2451 writeShort(os, nlen); 2452 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2453 2454 if (comment != null) { 2455 writeShort(os, Math.min(clen, 0xffff)); 2456 } else { 2457 writeShort(os, 0); 2458 } 2459 writeShort(os, 0); // starting disk number 2460 writeShort(os, 0); // internal file attributes (unused) 2461 writeInt(os, posixPerms > 0 ? posixPerms << 16 : 0); // external file 2462 // attributes, used for storing posix 2463 // permissions 2464 writeInt(os, locoff0); // relative offset of local header 2465 writeBytes(os, zname, 1, nlen); 2466 if (zip64) { 2467 writeShort(os, EXTID_ZIP64);// Zip64 extra 2468 writeShort(os, elen64 - 4); // size of "this" extra block 2469 if (size0 == ZIP64_MINVAL) 2470 writeLong(os, size); 2471 if (csize0 == ZIP64_MINVAL) 2472 writeLong(os, csize); 2473 if (locoff0 == ZIP64_MINVAL) 2474 writeLong(os, locoff); 2475 } 2476 if (elenNTFS != 0) { 2477 writeShort(os, EXTID_NTFS); 2478 writeShort(os, elenNTFS - 4); 2479 writeInt(os, 0); // reserved 2480 writeShort(os, 0x0001); // NTFS attr tag 2481 writeShort(os, 24); 2482 writeLong(os, javaToWinTime(mtime)); 2483 writeLong(os, javaToWinTime(atime)); 2484 writeLong(os, javaToWinTime(ctime)); 2485 } 2486 if (elenEXTT != 0) { 2487 writeShort(os, EXTID_EXTT); 2488 writeShort(os, elenEXTT - 4); 2489 if (ctime == -1) 2490 os.write(0x3); // mtime and atime 2491 else 2492 os.write(0x7); // mtime, atime and ctime 2493 writeInt(os, javaToUnixTime(mtime)); 2494 } 2495 if (extra != null) // whatever not recognized 2496 writeBytes(os, extra); 2497 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2498 writeBytes(os, comment); 2499 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2500 } 2501 2502 ///////////////////// LOC ////////////////////// 2503 2504 private int writeLOC(OutputStream os) throws IOException { 2505 byte[] zname = isdir ? toDirectoryPath(name) : name; 2506 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2507 int elen = (extra != null) ? extra.length : 0; 2508 boolean foundExtraTime = false; // if extra timestamp present 2509 int eoff = 0; 2510 int elen64 = 0; 2511 boolean zip64 = false; 2512 int elenEXTT = 0; 2513 int elenNTFS = 0; 2514 writeInt(os, LOCSIG); // LOC header signature 2515 if ((flag & FLAG_DATADESCR) != 0) { 2516 writeShort(os, version(false)); // version needed to extract 2517 writeShort(os, flag); // general purpose bit flag 2518 writeShort(os, method); // compression method 2519 // last modification time 2520 writeInt(os, (int)javaToDosTime(mtime)); 2521 // store size, uncompressed size, and crc-32 in data descriptor 2522 // immediately following compressed entry data 2523 writeInt(os, 0); 2524 writeInt(os, 0); 2525 writeInt(os, 0); 2526 } else { 2527 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2528 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2529 zip64 = true; 2530 } 2531 writeShort(os, version(zip64)); // version needed to extract 2532 writeShort(os, flag); // general purpose bit flag 2533 writeShort(os, method); // compression method 2534 // last modification time 2535 writeInt(os, (int)javaToDosTime(mtime)); 2536 writeInt(os, crc); // crc-32 2537 if (zip64) { 2538 writeInt(os, ZIP64_MINVAL); 2539 writeInt(os, ZIP64_MINVAL); 2540 } else { 2541 writeInt(os, csize); // compressed size 2542 writeInt(os, size); // uncompressed size 2543 } 2544 } 2545 while (eoff + 4 < elen) { 2546 int tag = SH(extra, eoff); 2547 int sz = SH(extra, eoff + 2); 2548 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2549 foundExtraTime = true; 2550 } 2551 eoff += (4 + sz); 2552 } 2553 if (!foundExtraTime) { 2554 if (isWindows) { 2555 elenNTFS = 36; // NTFS, total 36 bytes 2556 } else { // on unix use "ext time" 2557 elenEXTT = 9; 2558 if (atime != -1) 2559 elenEXTT += 4; 2560 if (ctime != -1) 2561 elenEXTT += 4; 2562 } 2563 } 2564 writeShort(os, nlen); 2565 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2566 writeBytes(os, zname, 1, nlen); 2567 if (zip64) { 2568 writeShort(os, EXTID_ZIP64); 2569 writeShort(os, 16); 2570 writeLong(os, size); 2571 writeLong(os, csize); 2572 } 2573 if (elenNTFS != 0) { 2574 writeShort(os, EXTID_NTFS); 2575 writeShort(os, elenNTFS - 4); 2576 writeInt(os, 0); // reserved 2577 writeShort(os, 0x0001); // NTFS attr tag 2578 writeShort(os, 24); 2579 writeLong(os, javaToWinTime(mtime)); 2580 writeLong(os, javaToWinTime(atime)); 2581 writeLong(os, javaToWinTime(ctime)); 2582 } 2583 if (elenEXTT != 0) { 2584 writeShort(os, EXTID_EXTT); 2585 writeShort(os, elenEXTT - 4);// size for the folowing data block 2586 int fbyte = 0x1; 2587 if (atime != -1) // mtime and atime 2588 fbyte |= 0x2; 2589 if (ctime != -1) // mtime, atime and ctime 2590 fbyte |= 0x4; 2591 os.write(fbyte); // flags byte 2592 writeInt(os, javaToUnixTime(mtime)); 2593 if (atime != -1) 2594 writeInt(os, javaToUnixTime(atime)); 2595 if (ctime != -1) 2596 writeInt(os, javaToUnixTime(ctime)); 2597 } 2598 if (extra != null) { 2599 writeBytes(os, extra); 2600 } 2601 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2602 } 2603 2604 // Data Descriptor 2605 private int writeEXT(OutputStream os) throws IOException { 2606 writeInt(os, EXTSIG); // EXT header signature 2607 writeInt(os, crc); // crc-32 2608 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2609 writeLong(os, csize); 2610 writeLong(os, size); 2611 return 24; 2612 } else { 2613 writeInt(os, csize); // compressed size 2614 writeInt(os, size); // uncompressed size 2615 return 16; 2616 } 2617 } 2618 2619 // read NTFS, UNIX and ZIP64 data from cen.extra 2620 private void readExtra(ZipFileSystem zipfs) throws IOException { 2621 if (extra == null) 2622 return; 2623 int elen = extra.length; 2624 int off = 0; 2625 int newOff = 0; 2626 while (off + 4 < elen) { 2627 // extra spec: HeaderID+DataSize+Data 2628 int pos = off; 2629 int tag = SH(extra, pos); 2630 int sz = SH(extra, pos + 2); 2631 pos += 4; 2632 if (pos + sz > elen) // invalid data 2633 break; 2634 switch (tag) { 2635 case EXTID_ZIP64 : 2636 if (size == ZIP64_MINVAL) { 2637 if (pos + 8 > elen) // invalid zip64 extra 2638 break; // fields, just skip 2639 size = LL(extra, pos); 2640 pos += 8; 2641 } 2642 if (csize == ZIP64_MINVAL) { 2643 if (pos + 8 > elen) 2644 break; 2645 csize = LL(extra, pos); 2646 pos += 8; 2647 } 2648 if (locoff == ZIP64_MINVAL) { 2649 if (pos + 8 > elen) 2650 break; 2651 locoff = LL(extra, pos); 2652 } 2653 break; 2654 case EXTID_NTFS: 2655 if (sz < 32) 2656 break; 2657 pos += 4; // reserved 4 bytes 2658 if (SH(extra, pos) != 0x0001) 2659 break; 2660 if (SH(extra, pos + 2) != 24) 2661 break; 2662 // override the loc field, datatime here is 2663 // more "accurate" 2664 mtime = winToJavaTime(LL(extra, pos + 4)); 2665 atime = winToJavaTime(LL(extra, pos + 12)); 2666 ctime = winToJavaTime(LL(extra, pos + 20)); 2667 break; 2668 case EXTID_EXTT: 2669 // spec says the Extened timestamp in cen only has mtime 2670 // need to read the loc to get the extra a/ctime, if flag 2671 // "zipinfo-time" is not specified to false; 2672 // there is performance cost (move up to loc and read) to 2673 // access the loc table foreach entry; 2674 if (zipfs.noExtt) { 2675 if (sz == 5) 2676 mtime = unixToJavaTime(LG(extra, pos + 1)); 2677 break; 2678 } 2679 byte[] buf = new byte[LOCHDR]; 2680 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2681 != buf.length) 2682 throw new ZipException("loc: reading failed"); 2683 if (!locSigAt(buf, 0)) 2684 throw new ZipException("loc: wrong sig ->" 2685 + Long.toString(getSig(buf, 0), 16)); 2686 int locElen = LOCEXT(buf); 2687 if (locElen < 9) // EXTT is at least 9 bytes 2688 break; 2689 int locNlen = LOCNAM(buf); 2690 buf = new byte[locElen]; 2691 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2692 != buf.length) 2693 throw new ZipException("loc extra: reading failed"); 2694 int locPos = 0; 2695 while (locPos + 4 < buf.length) { 2696 int locTag = SH(buf, locPos); 2697 int locSZ = SH(buf, locPos + 2); 2698 locPos += 4; 2699 if (locTag != EXTID_EXTT) { 2700 locPos += locSZ; 2701 continue; 2702 } 2703 int end = locPos + locSZ - 4; 2704 int flag = CH(buf, locPos++); 2705 if ((flag & 0x1) != 0 && locPos <= end) { 2706 mtime = unixToJavaTime(LG(buf, locPos)); 2707 locPos += 4; 2708 } 2709 if ((flag & 0x2) != 0 && locPos <= end) { 2710 atime = unixToJavaTime(LG(buf, locPos)); 2711 locPos += 4; 2712 } 2713 if ((flag & 0x4) != 0 && locPos <= end) { 2714 ctime = unixToJavaTime(LG(buf, locPos)); 2715 } 2716 break; 2717 } 2718 break; 2719 default: // unknown tag 2720 System.arraycopy(extra, off, extra, newOff, sz + 4); 2721 newOff += (sz + 4); 2722 } 2723 off += (sz + 4); 2724 } 2725 if (newOff != 0 && newOff != extra.length) 2726 extra = Arrays.copyOf(extra, newOff); 2727 else 2728 extra = null; 2729 } 2730 2731 @Override 2732 public String toString() { 2733 StringBuilder sb = new StringBuilder(1024); 2734 Formatter fm = new Formatter(sb); 2735 fm.format(" name : %s%n", new String(name)); 2736 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2737 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2738 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2739 fm.format(" isRegularFile : %b%n", isRegularFile()); 2740 fm.format(" isDirectory : %b%n", isDirectory()); 2741 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2742 fm.format(" isOther : %b%n", isOther()); 2743 fm.format(" fileKey : %s%n", fileKey()); 2744 fm.format(" size : %d%n", size()); 2745 fm.format(" compressedSize : %d%n", compressedSize()); 2746 fm.format(" crc : %x%n", crc()); 2747 fm.format(" method : %d%n", method()); 2748 Set<PosixFilePermission> permissions = storedPermissions().orElse(null); 2749 if (permissions != null) { 2750 fm.format(" permissions : %s%n", permissions); 2751 } 2752 fm.close(); 2753 return sb.toString(); 2754 } 2755 2756 ///////// basic file attributes /////////// 2757 @Override 2758 public FileTime creationTime() { 2759 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 2760 } 2761 2762 @Override 2763 public boolean isDirectory() { 2764 return isDir(); 2765 } 2766 2767 @Override 2768 public boolean isOther() { 2769 return false; 2770 } 2771 2772 @Override 2773 public boolean isRegularFile() { 2774 return !isDir(); 2775 } 2776 2777 @Override 2778 public FileTime lastAccessTime() { 2779 return FileTime.fromMillis(atime == -1 ? mtime : atime); 2780 } 2781 2782 @Override 2783 public FileTime lastModifiedTime() { 2784 return FileTime.fromMillis(mtime); 2785 } 2786 2787 @Override 2788 public long size() { 2789 return size; 2790 } 2791 2792 @Override 2793 public boolean isSymbolicLink() { 2794 return false; 2795 } 2796 2797 @Override 2798 public Object fileKey() { 2799 return null; 2800 } 2801 2802 ///////// zip file attributes /////////// 2803 2804 @Override 2805 public long compressedSize() { 2806 return csize; 2807 } 2808 2809 @Override 2810 public long crc() { 2811 return crc; 2812 } 2813 2814 @Override 2815 public int method() { 2816 return method; 2817 } 2818 2819 @Override 2820 public byte[] extra() { 2821 if (extra != null) 2822 return Arrays.copyOf(extra, extra.length); 2823 return null; 2824 } 2825 2826 @Override 2827 public byte[] comment() { 2828 if (comment != null) 2829 return Arrays.copyOf(comment, comment.length); 2830 return null; 2831 } 2832 2833 @Override 2834 public Optional<Set<PosixFilePermission>> storedPermissions() { 2835 Set<PosixFilePermission> perms = null; 2836 if (posixPerms != -1) { 2837 perms = new HashSet<>(PosixFilePermission.values().length); 2838 for (PosixFilePermission perm : PosixFilePermission.values()) { 2839 if ((posixPerms & ZipUtils.permToFlag(perm)) != 0) { 2840 perms.add(perm); 2841 } 2842 } 2843 } 2844 return Optional.ofNullable(perms); 2845 } 2846 } 2847 2848 final class PosixEntry extends Entry implements PosixFileAttributes { 2849 private UserPrincipal owner = defaultOwner; 2850 private GroupPrincipal group = defaultGroup; 2851 2852 PosixEntry(byte[] name, boolean isdir, int method) { 2853 super(name, isdir, method); 2854 } 2855 2856 PosixEntry(byte[] name, int type, boolean isdir, int method, FileAttribute<?>... attrs) { 2857 super(name, type, isdir, method, attrs); 2858 } 2859 2860 PosixEntry(byte[] name, Path file, int type, FileAttribute<?>... attrs) { 2861 super(name, file, type, attrs); 2862 } 2863 2864 PosixEntry(PosixEntry e, int type) { 2865 super(e, type); 2866 this.owner = e.owner; 2867 this.group = e.group; 2868 } 2869 2870 PosixEntry(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2871 super(zipfs, inode); 2872 } 2873 2874 @Override 2875 public UserPrincipal owner() { 2876 return owner; 2877 } 2878 2879 @Override 2880 public GroupPrincipal group() { 2881 return group; 2882 } 2883 2884 @Override 2885 public Set<PosixFilePermission> permissions() { 2886 return storedPermissions().orElse(Set.copyOf(defaultPermissions)); 2887 } 2888 } 2889 2890 private static class ExistingChannelCloser { 2891 private final Path path; 2892 private final SeekableByteChannel ch; 2893 private final Set<InputStream> streams; 2894 ExistingChannelCloser(Path path, 2895 SeekableByteChannel ch, 2896 Set<InputStream> streams) { 2897 this.path = path; 2898 this.ch = ch; 2899 this.streams = streams; 2900 } 2901 2902 /** 2903 * If there are no more outstanding streams, close the channel and 2904 * delete the backing file 2905 * 2906 * @return true if we're done and closed the backing file, 2907 * otherwise false 2908 * @throws IOException 2909 */ 2910 private boolean closeAndDeleteIfDone() throws IOException { 2911 if (streams.isEmpty()) { 2912 ch.close(); 2913 Files.delete(path); 2914 return true; 2915 } 2916 return false; 2917 } 2918 } 2919 2920 // purely for parent lookup, so we don't have to copy the parent 2921 // name every time 2922 static class ParentLookup extends IndexNode { 2923 int len; 2924 ParentLookup() {} 2925 2926 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 2927 name(name, len); 2928 return this; 2929 } 2930 2931 void name(byte[] name, int len) { 2932 this.name = name; 2933 this.len = len; 2934 // calculate the hashcode the same way as Arrays.hashCode() does 2935 int result = 1; 2936 for (int i = 0; i < len; i++) 2937 result = 31 * result + name[i]; 2938 this.hashcode = result; 2939 } 2940 2941 @Override 2942 public boolean equals(Object other) { 2943 if (!(other instanceof IndexNode)) { 2944 return false; 2945 } 2946 byte[] oname = ((IndexNode)other).name; 2947 return Arrays.equals(name, 0, len, 2948 oname, 0, oname.length); 2949 } 2950 } 2951 }