1 /* 2 * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.FilterOutputStream; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.nio.ByteBuffer; 37 import java.nio.MappedByteBuffer; 38 import java.nio.channels.FileChannel; 39 import java.nio.channels.FileLock; 40 import java.nio.channels.ReadableByteChannel; 41 import java.nio.channels.SeekableByteChannel; 42 import java.nio.channels.WritableByteChannel; 43 import java.nio.file.*; 44 import java.nio.file.attribute.*; 45 import java.nio.file.spi.FileSystemProvider; 46 import java.security.AccessController; 47 import java.security.PrivilegedAction; 48 import java.security.PrivilegedActionException; 49 import java.security.PrivilegedExceptionAction; 50 import java.util.*; 51 import java.util.concurrent.locks.ReadWriteLock; 52 import java.util.concurrent.locks.ReentrantReadWriteLock; 53 import java.util.regex.Pattern; 54 import java.util.zip.CRC32; 55 import java.util.zip.Deflater; 56 import java.util.zip.DeflaterOutputStream; 57 import java.util.zip.Inflater; 58 import java.util.zip.InflaterInputStream; 59 import java.util.zip.ZipException; 60 61 import static java.lang.Boolean.TRUE; 62 import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES; 63 import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; 64 import static java.nio.file.StandardOpenOption.APPEND; 65 import static java.nio.file.StandardOpenOption.CREATE; 66 import static java.nio.file.StandardOpenOption.CREATE_NEW; 67 import static java.nio.file.StandardOpenOption.READ; 68 import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; 69 import static java.nio.file.StandardOpenOption.WRITE; 70 import static jdk.nio.zipfs.ZipConstants.*; 71 import static jdk.nio.zipfs.ZipUtils.*; 72 73 /** 74 * A FileSystem built on a zip file 75 * 76 * @author Xueming Shen 77 */ 78 class ZipFileSystem extends FileSystem { 79 // statics 80 private static final boolean isWindows = AccessController.doPrivileged( 81 (PrivilegedAction<Boolean>)()->System.getProperty("os.name") 82 .startsWith("Windows")); 83 private static final byte[] ROOTPATH = new byte[] { '/' }; 84 private static final String OPT_POSIX = "enablePosixFileAttributes"; 85 private static final String OPT_DEFAULT_OWNER = "defaultOwner"; 86 private static final String OPT_DEFAULT_GROUP = "defaultGroup"; 87 private static final String OPT_DEFAULT_PERMISSIONS = "defaultPermissions"; 88 89 private static final Set<PosixFilePermission> DEFAULT_PERMISSIONS = 90 PosixFilePermissions.fromString("rwxrwxrwx"); 91 92 private final ZipFileSystemProvider provider; 93 private final Path zfpath; 94 final ZipCoder zc; 95 private final ZipPath rootdir; 96 private boolean readOnly; // readonly file system, false by default 97 98 // default time stamp for pseudo entries 99 private final long zfsDefaultTimeStamp = System.currentTimeMillis(); 100 101 // configurable by env map 102 private final boolean noExtt; // see readExtra() 103 private final boolean useTempFile; // use a temp file for newOS, default 104 // is to use BAOS for better performance 105 private final boolean forceEnd64; 106 private final int defaultCompressionMethod; // METHOD_STORED if "noCompression=true" 107 // METHOD_DEFLATED otherwise 108 109 // POSIX support 110 final boolean supportPosix; 111 private final UserPrincipal defaultOwner; 112 private final GroupPrincipal defaultGroup; 113 private final Set<PosixFilePermission> defaultPermissions; 114 115 private final Set<String> supportedFileAttributeViews; 116 117 ZipFileSystem(ZipFileSystemProvider provider, 118 Path zfpath, 119 Map<String, ?> env) throws IOException 120 { 121 // default encoding for name/comment 122 String nameEncoding = env.containsKey("encoding") ? 123 (String)env.get("encoding") : "UTF-8"; 124 this.noExtt = "false".equals(env.get("zipinfo-time")); 125 this.useTempFile = isTrue(env, "useTempFile"); 126 this.forceEnd64 = isTrue(env, "forceZIP64End"); 127 this.defaultCompressionMethod = isTrue(env, "noCompression") ? METHOD_STORED : METHOD_DEFLATED; 128 this.supportPosix = isTrue(env, OPT_POSIX); 129 this.defaultOwner = initOwner(zfpath, env); 130 this.defaultGroup = initGroup(zfpath, env); 131 this.defaultPermissions = initPermissions(env); 132 this.supportedFileAttributeViews = supportPosix ? 133 Set.of("basic", "posix", "zip") : Set.of("basic", "zip"); 134 if (Files.notExists(zfpath)) { 135 // create a new zip if it doesn't exist 136 if (isTrue(env, "create")) { 137 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 138 new END().write(os, 0, forceEnd64); 139 } 140 } else { 141 throw new FileSystemNotFoundException(zfpath.toString()); 142 } 143 } 144 // sm and existence check 145 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 146 boolean writeable = AccessController.doPrivileged( 147 (PrivilegedAction<Boolean>)()->Files.isWritable(zfpath)); 148 this.readOnly = !writeable; 149 this.zc = ZipCoder.get(nameEncoding); 150 this.rootdir = new ZipPath(this, new byte[]{'/'}); 151 this.ch = Files.newByteChannel(zfpath, READ); 152 try { 153 this.cen = initCEN(); 154 } catch (IOException x) { 155 try { 156 this.ch.close(); 157 } catch (IOException xx) { 158 x.addSuppressed(xx); 159 } 160 throw x; 161 } 162 this.provider = provider; 163 this.zfpath = zfpath; 164 } 165 166 // returns true if there is a name=true/"true" setting in env 167 private static boolean isTrue(Map<String, ?> env, String name) { 168 return "true".equals(env.get(name)) || TRUE.equals(env.get(name)); 169 } 170 171 // Initialize the default owner for files inside the zip archive. 172 // If not specified in env, it is the owner of the archive. If no owner can 173 // be determined, we try to go with system property "user.name". If that's not 174 // accessible, we return "<zipfs_default>". 175 private UserPrincipal initOwner(Path zfpath, Map<String, ?> env) throws IOException { 176 Object o = env.get(OPT_DEFAULT_OWNER); 177 if (o == null) { 178 try { 179 PrivilegedExceptionAction<UserPrincipal> pa = ()->Files.getOwner(zfpath); 180 return AccessController.doPrivileged(pa); 181 } catch (PrivilegedActionException e) { 182 if (e.getCause() instanceof UnsupportedOperationException || 183 e.getCause() instanceof NoSuchFileException) 184 { 185 PrivilegedAction<String> pa = ()->System.getProperty("user.name"); 186 String userName = AccessController.doPrivileged(pa); 187 return ()->userName; 188 } else { 189 throw new IOException(e); 190 } 191 } 192 } 193 if (o instanceof String) { 194 if (((String)o).isEmpty()) { 195 throw new IllegalArgumentException("Value for property " + 196 OPT_DEFAULT_OWNER + " must not be empty."); 197 } 198 return ()->(String)o; 199 } 200 if (o instanceof UserPrincipal) { 201 return (UserPrincipal)o; 202 } 203 throw new IllegalArgumentException("Value for property " + 204 OPT_DEFAULT_OWNER + " must be of type " + String.class + 205 " or " + UserPrincipal.class); 206 } 207 208 // Initialize the default group for files inside the zip archive. 209 // If not specified in env, we try to determine the group of the zip archive itself. 210 // If this is not possible/unsupported, we will return a group principal going by 211 // the same name as the default owner. 212 private GroupPrincipal initGroup(Path zfpath, Map<String, ?> env) throws IOException { 213 Object o = env.get(OPT_DEFAULT_GROUP); 214 if (o == null) { 215 PosixFileAttributeView zfpv = Files.getFileAttributeView(zfpath, PosixFileAttributeView.class); 216 if (zfpv == null) { 217 return defaultOwner::getName; 218 } 219 PrivilegedExceptionAction<GroupPrincipal> pa = ()->zfpv.readAttributes().group(); 220 try { 221 return AccessController.doPrivileged(pa); 222 } catch (PrivilegedActionException e) { 223 if (e.getCause() instanceof NoSuchFileException) { 224 return defaultOwner::getName; 225 } else { 226 throw new IOException(e); 227 } 228 } 229 } 230 if (o instanceof String) { 231 if (((String)o).isEmpty()) { 232 throw new IllegalArgumentException("Value for property " + 233 OPT_DEFAULT_GROUP + " must not be empty."); 234 } 235 return ()->(String)o; 236 } 237 if (o instanceof GroupPrincipal) { 238 return (GroupPrincipal)o; 239 } 240 throw new IllegalArgumentException("Value for property " + 241 OPT_DEFAULT_GROUP + " must be of type " + String.class + 242 " or " + GroupPrincipal.class); 243 } 244 245 // Initialize the default permissions for files inside the zip archive. 246 // If not specified in env, it will return 777. 247 private Set<PosixFilePermission> initPermissions(Map<String, ?> env) { 248 Object o = env.get(OPT_DEFAULT_PERMISSIONS); 249 if (o == null) { 250 return DEFAULT_PERMISSIONS; 251 } 252 if (o instanceof String) { 253 return PosixFilePermissions.fromString((String)o); 254 } 255 if (!(o instanceof Set)) { 256 throw new IllegalArgumentException("Value for property " + 257 OPT_DEFAULT_PERMISSIONS + " must be of type " + String.class + 258 " or " + Set.class); 259 } 260 Set<PosixFilePermission> perms = new HashSet<>(); 261 for (Object o2 : (Set<?>)o) { 262 if (o2 instanceof PosixFilePermission) { 263 perms.add((PosixFilePermission)o2); 264 } else { 265 throw new IllegalArgumentException(OPT_DEFAULT_PERMISSIONS + 266 " must only contain objects of type " + PosixFilePermission.class); 267 } 268 } 269 return perms; 270 } 271 272 @Override 273 public FileSystemProvider provider() { 274 return provider; 275 } 276 277 @Override 278 public String getSeparator() { 279 return "/"; 280 } 281 282 @Override 283 public boolean isOpen() { 284 return isOpen; 285 } 286 287 @Override 288 public boolean isReadOnly() { 289 return readOnly; 290 } 291 292 private void checkWritable() { 293 if (readOnly) { 294 throw new ReadOnlyFileSystemException(); 295 } 296 } 297 298 void setReadOnly() { 299 this.readOnly = true; 300 } 301 302 @Override 303 public Iterable<Path> getRootDirectories() { 304 return List.of(rootdir); 305 } 306 307 ZipPath getRootDir() { 308 return rootdir; 309 } 310 311 @Override 312 public ZipPath getPath(String first, String... more) { 313 if (more.length == 0) { 314 return new ZipPath(this, first); 315 } 316 StringBuilder sb = new StringBuilder(); 317 sb.append(first); 318 for (String path : more) { 319 if (path.length() > 0) { 320 if (sb.length() > 0) { 321 sb.append('/'); 322 } 323 sb.append(path); 324 } 325 } 326 return new ZipPath(this, sb.toString()); 327 } 328 329 @Override 330 public UserPrincipalLookupService getUserPrincipalLookupService() { 331 throw new UnsupportedOperationException(); 332 } 333 334 @Override 335 public WatchService newWatchService() { 336 throw new UnsupportedOperationException(); 337 } 338 339 FileStore getFileStore(ZipPath path) { 340 return new ZipFileStore(path); 341 } 342 343 @Override 344 public Iterable<FileStore> getFileStores() { 345 return List.of(new ZipFileStore(rootdir)); 346 } 347 348 @Override 349 public Set<String> supportedFileAttributeViews() { 350 return supportedFileAttributeViews; 351 } 352 353 @Override 354 public String toString() { 355 return zfpath.toString(); 356 } 357 358 Path getZipFile() { 359 return zfpath; 360 } 361 362 private static final String GLOB_SYNTAX = "glob"; 363 private static final String REGEX_SYNTAX = "regex"; 364 365 @Override 366 public PathMatcher getPathMatcher(String syntaxAndInput) { 367 int pos = syntaxAndInput.indexOf(':'); 368 if (pos <= 0 || pos == syntaxAndInput.length()) { 369 throw new IllegalArgumentException(); 370 } 371 String syntax = syntaxAndInput.substring(0, pos); 372 String input = syntaxAndInput.substring(pos + 1); 373 String expr; 374 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 375 expr = toRegexPattern(input); 376 } else { 377 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 378 expr = input; 379 } else { 380 throw new UnsupportedOperationException("Syntax '" + syntax + 381 "' not recognized"); 382 } 383 } 384 // return matcher 385 final Pattern pattern = Pattern.compile(expr); 386 return (path)->pattern.matcher(path.toString()).matches(); 387 } 388 389 @Override 390 public void close() throws IOException { 391 beginWrite(); 392 try { 393 if (!isOpen) 394 return; 395 isOpen = false; // set closed 396 } finally { 397 endWrite(); 398 } 399 if (!streams.isEmpty()) { // unlock and close all remaining streams 400 Set<InputStream> copy = new HashSet<>(streams); 401 for (InputStream is : copy) 402 is.close(); 403 } 404 beginWrite(); // lock and sync 405 try { 406 AccessController.doPrivileged((PrivilegedExceptionAction<Void>)() -> { 407 sync(); return null; 408 }); 409 ch.close(); // close the ch just in case no update 410 // and sync didn't close the ch 411 } catch (PrivilegedActionException e) { 412 throw (IOException)e.getException(); 413 } finally { 414 endWrite(); 415 } 416 417 synchronized (inflaters) { 418 for (Inflater inf : inflaters) 419 inf.end(); 420 } 421 synchronized (deflaters) { 422 for (Deflater def : deflaters) 423 def.end(); 424 } 425 426 IOException ioe = null; 427 synchronized (tmppaths) { 428 for (Path p : tmppaths) { 429 try { 430 AccessController.doPrivileged( 431 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 432 } catch (PrivilegedActionException e) { 433 IOException x = (IOException)e.getException(); 434 if (ioe == null) 435 ioe = x; 436 else 437 ioe.addSuppressed(x); 438 } 439 } 440 } 441 provider.removeFileSystem(zfpath, this); 442 if (ioe != null) 443 throw ioe; 444 } 445 446 ZipFileAttributes getFileAttributes(byte[] path) 447 throws IOException 448 { 449 beginRead(); 450 try { 451 ensureOpen(); 452 IndexNode inode = getInode(path); 453 if (inode == null) { 454 return null; 455 } else if (inode instanceof Entry) { 456 return (Entry)inode; 457 } else if (inode.pos == -1) { 458 // pseudo directory, uses METHOD_STORED 459 Entry e = supportPosix ? 460 new PosixEntry(inode.name, inode.isdir, METHOD_STORED) : 461 new Entry(inode.name, inode.isdir, METHOD_STORED); 462 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 463 return e; 464 } else { 465 return supportPosix ? new PosixEntry(this, inode) : new Entry(this, inode); 466 } 467 } finally { 468 endRead(); 469 } 470 } 471 472 void checkAccess(byte[] path) throws IOException { 473 beginRead(); 474 try { 475 ensureOpen(); 476 // is it necessary to readCEN as a sanity check? 477 if (getInode(path) == null) { 478 throw new NoSuchFileException(toString()); 479 } 480 481 } finally { 482 endRead(); 483 } 484 } 485 486 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 487 throws IOException 488 { 489 checkWritable(); 490 beginWrite(); 491 try { 492 ensureOpen(); 493 Entry e = getEntry(path); // ensureOpen checked 494 if (e == null) 495 throw new NoSuchFileException(getString(path)); 496 if (e.type == Entry.CEN) 497 e.type = Entry.COPY; // copy e 498 if (mtime != null) 499 e.mtime = mtime.toMillis(); 500 if (atime != null) 501 e.atime = atime.toMillis(); 502 if (ctime != null) 503 e.ctime = ctime.toMillis(); 504 update(e); 505 } finally { 506 endWrite(); 507 } 508 } 509 510 void setOwner(byte[] path, UserPrincipal owner) throws IOException { 511 checkWritable(); 512 beginWrite(); 513 try { 514 ensureOpen(); 515 Entry e = getEntry(path); // ensureOpen checked 516 if (e == null) { 517 throw new NoSuchFileException(getString(path)); 518 } 519 // as the owner information is not persistent, we don't need to 520 // change e.type to Entry.COPY 521 if (e instanceof PosixEntry) { 522 ((PosixEntry)e).owner = owner; 523 update(e); 524 } 525 } finally { 526 endWrite(); 527 } 528 } 529 530 void setGroup(byte[] path, GroupPrincipal group) throws IOException { 531 checkWritable(); 532 beginWrite(); 533 try { 534 ensureOpen(); 535 Entry e = getEntry(path); // ensureOpen checked 536 if (e == null) { 537 throw new NoSuchFileException(getString(path)); 538 } 539 // as the group information is not persistent, we don't need to 540 // change e.type to Entry.COPY 541 if (e instanceof PosixEntry) { 542 ((PosixEntry)e).group = group; 543 update(e); 544 } 545 } finally { 546 endWrite(); 547 } 548 } 549 550 void setPermissions(byte[] path, Set<PosixFilePermission> perms) throws IOException { 551 checkWritable(); 552 beginWrite(); 553 try { 554 ensureOpen(); 555 Entry e = getEntry(path); // ensureOpen checked 556 if (e == null) { 557 throw new NoSuchFileException(getString(path)); 558 } 559 if (e.type == Entry.CEN) { 560 e.type = Entry.COPY; // copy e 561 } 562 e.posixPerms = perms == null ? -1 : ZipUtils.permsToFlags(perms); 563 update(e); 564 } finally { 565 endWrite(); 566 } 567 } 568 569 boolean exists(byte[] path) { 570 beginRead(); 571 try { 572 ensureOpen(); 573 return getInode(path) != null; 574 } finally { 575 endRead(); 576 } 577 } 578 579 boolean isDirectory(byte[] path) { 580 beginRead(); 581 try { 582 IndexNode n = getInode(path); 583 return n != null && n.isDir(); 584 } finally { 585 endRead(); 586 } 587 } 588 589 // returns the list of child paths of "path" 590 Iterator<Path> iteratorOf(ZipPath dir, 591 DirectoryStream.Filter<? super Path> filter) 592 throws IOException 593 { 594 beginWrite(); // iteration of inodes needs exclusive lock 595 try { 596 ensureOpen(); 597 byte[] path = dir.getResolvedPath(); 598 IndexNode inode = getInode(path); 599 if (inode == null) 600 throw new NotDirectoryException(getString(path)); 601 List<Path> list = new ArrayList<>(); 602 IndexNode child = inode.child; 603 while (child != null) { 604 // (1) Assume each path from the zip file itself is "normalized" 605 // (2) IndexNode.name is absolute. see IndexNode(byte[],int,int) 606 // (3) If parent "dir" is relative when ZipDirectoryStream 607 // is created, the returned child path needs to be relative 608 // as well. 609 ZipPath childPath = new ZipPath(this, child.name, true); 610 ZipPath childFileName = childPath.getFileName(); 611 ZipPath zpath = dir.resolve(childFileName); 612 if (filter == null || filter.accept(zpath)) 613 list.add(zpath); 614 child = child.sibling; 615 } 616 return list.iterator(); 617 } finally { 618 endWrite(); 619 } 620 } 621 622 void createDirectory(byte[] dir, FileAttribute<?>... attrs) throws IOException { 623 checkWritable(); 624 beginWrite(); 625 try { 626 ensureOpen(); 627 if (dir.length == 0 || exists(dir)) // root dir, or existing dir 628 throw new FileAlreadyExistsException(getString(dir)); 629 checkParents(dir); 630 Entry e = supportPosix ? 631 new PosixEntry(dir, Entry.NEW, true, METHOD_STORED, attrs) : 632 new Entry(dir, Entry.NEW, true, METHOD_STORED, attrs); 633 update(e); 634 } finally { 635 endWrite(); 636 } 637 } 638 639 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 640 throws IOException 641 { 642 checkWritable(); 643 if (Arrays.equals(src, dst)) 644 return; // do nothing, src and dst are the same 645 646 beginWrite(); 647 try { 648 ensureOpen(); 649 Entry eSrc = getEntry(src); // ensureOpen checked 650 651 if (eSrc == null) 652 throw new NoSuchFileException(getString(src)); 653 if (eSrc.isDir()) { // spec says to create dst dir 654 createDirectory(dst); 655 return; 656 } 657 boolean hasReplace = false; 658 boolean hasCopyAttrs = false; 659 for (CopyOption opt : options) { 660 if (opt == REPLACE_EXISTING) 661 hasReplace = true; 662 else if (opt == COPY_ATTRIBUTES) 663 hasCopyAttrs = true; 664 } 665 Entry eDst = getEntry(dst); 666 if (eDst != null) { 667 if (!hasReplace) 668 throw new FileAlreadyExistsException(getString(dst)); 669 } else { 670 checkParents(dst); 671 } 672 // copy eSrc entry and change name 673 Entry u = supportPosix ? 674 new PosixEntry((PosixEntry)eSrc, Entry.COPY) : 675 new Entry(eSrc, Entry.COPY); 676 u.name(dst); 677 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) { 678 u.type = eSrc.type; // make it the same type 679 if (deletesrc) { // if it's a "rename", take the data 680 u.bytes = eSrc.bytes; 681 u.file = eSrc.file; 682 } else { // if it's not "rename", copy the data 683 if (eSrc.bytes != null) 684 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 685 else if (eSrc.file != null) { 686 u.file = getTempPathForEntry(null); 687 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 688 } 689 } 690 } 691 if (!hasCopyAttrs) 692 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 693 update(u); 694 if (deletesrc) 695 updateDelete(eSrc); 696 } finally { 697 endWrite(); 698 } 699 } 700 701 // Returns an output stream for writing the contents into the specified 702 // entry. 703 OutputStream newOutputStream(byte[] path, OpenOption... options) 704 throws IOException 705 { 706 checkWritable(); 707 boolean hasCreateNew = false; 708 boolean hasCreate = false; 709 boolean hasAppend = false; 710 boolean hasTruncate = false; 711 for (OpenOption opt : options) { 712 if (opt == READ) 713 throw new IllegalArgumentException("READ not allowed"); 714 if (opt == CREATE_NEW) 715 hasCreateNew = true; 716 if (opt == CREATE) 717 hasCreate = true; 718 if (opt == APPEND) 719 hasAppend = true; 720 if (opt == TRUNCATE_EXISTING) 721 hasTruncate = true; 722 } 723 if (hasAppend && hasTruncate) 724 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 725 beginRead(); // only need a readlock, the "update()" will 726 try { // try to obtain a writelock when the os is 727 ensureOpen(); // being closed. 728 Entry e = getEntry(path); 729 if (e != null) { 730 if (e.isDir() || hasCreateNew) 731 throw new FileAlreadyExistsException(getString(path)); 732 if (hasAppend) { 733 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 734 try (InputStream is = getInputStream(e)) { 735 is.transferTo(os); 736 } 737 return os; 738 } 739 return getOutputStream(supportPosix ? 740 new PosixEntry((PosixEntry)e, Entry.NEW) : new Entry(e, Entry.NEW)); 741 } else { 742 if (!hasCreate && !hasCreateNew) 743 throw new NoSuchFileException(getString(path)); 744 checkParents(path); 745 return getOutputStream(supportPosix ? 746 new PosixEntry(path, Entry.NEW, false, defaultCompressionMethod) : 747 new Entry(path, Entry.NEW, false, defaultCompressionMethod)); 748 } 749 } finally { 750 endRead(); 751 } 752 } 753 754 // Returns an input stream for reading the contents of the specified 755 // file entry. 756 InputStream newInputStream(byte[] path) throws IOException { 757 beginRead(); 758 try { 759 ensureOpen(); 760 Entry e = getEntry(path); 761 if (e == null) 762 throw new NoSuchFileException(getString(path)); 763 if (e.isDir()) 764 throw new FileSystemException(getString(path), "is a directory", null); 765 return getInputStream(e); 766 } finally { 767 endRead(); 768 } 769 } 770 771 private void checkOptions(Set<? extends OpenOption> options) { 772 // check for options of null type and option is an intance of StandardOpenOption 773 for (OpenOption option : options) { 774 if (option == null) 775 throw new NullPointerException(); 776 if (!(option instanceof StandardOpenOption)) 777 throw new IllegalArgumentException(); 778 } 779 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 780 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 781 } 782 783 // Returns an output SeekableByteChannel for either 784 // (1) writing the contents of a new entry, if the entry doesn't exist, or 785 // (2) updating/replacing the contents of an existing entry. 786 // Note: The content of the channel is not compressed until the 787 // channel is closed 788 private class EntryOutputChannel extends ByteArrayChannel { 789 final Entry e; 790 791 EntryOutputChannel(Entry e) { 792 super(e.size > 0? (int)e.size : 8192, false); 793 this.e = e; 794 if (e.mtime == -1) 795 e.mtime = System.currentTimeMillis(); 796 if (e.method == -1) 797 e.method = defaultCompressionMethod; 798 // store size, compressed size, and crc-32 in datadescriptor 799 e.flag = FLAG_DATADESCR; 800 if (zc.isUTF8()) 801 e.flag |= FLAG_USE_UTF8; 802 } 803 804 @Override 805 public void close() throws IOException { 806 // will update the entry 807 try (OutputStream os = getOutputStream(e)) { 808 os.write(toByteArray()); 809 } 810 super.close(); 811 } 812 } 813 814 // Returns a Writable/ReadByteChannel for now. Might consider to use 815 // newFileChannel() instead, which dump the entry data into a regular 816 // file on the default file system and create a FileChannel on top of it. 817 SeekableByteChannel newByteChannel(byte[] path, 818 Set<? extends OpenOption> options, 819 FileAttribute<?>... attrs) 820 throws IOException 821 { 822 checkOptions(options); 823 if (options.contains(StandardOpenOption.WRITE) || 824 options.contains(StandardOpenOption.APPEND)) { 825 checkWritable(); 826 beginRead(); // only need a read lock, the "update()" will obtain 827 // the write lock when the channel is closed 828 try { 829 Entry e = getEntry(path); 830 if (e != null) { 831 if (e.isDir() || options.contains(CREATE_NEW)) 832 throw new FileAlreadyExistsException(getString(path)); 833 SeekableByteChannel sbc = 834 new EntryOutputChannel(supportPosix ? 835 new PosixEntry((PosixEntry)e, Entry.NEW) : 836 new Entry(e, Entry.NEW)); 837 if (options.contains(APPEND)) { 838 try (InputStream is = getInputStream(e)) { // copyover 839 byte[] buf = new byte[8192]; 840 ByteBuffer bb = ByteBuffer.wrap(buf); 841 int n; 842 while ((n = is.read(buf)) != -1) { 843 bb.position(0); 844 bb.limit(n); 845 sbc.write(bb); 846 } 847 } 848 } 849 return sbc; 850 } 851 if (!options.contains(CREATE) && !options.contains(CREATE_NEW)) 852 throw new NoSuchFileException(getString(path)); 853 checkParents(path); 854 return new EntryOutputChannel( 855 supportPosix ? 856 new PosixEntry(path, Entry.NEW, false, defaultCompressionMethod, attrs) : 857 new Entry(path, Entry.NEW, false, defaultCompressionMethod, attrs)); 858 } finally { 859 endRead(); 860 } 861 } else { 862 beginRead(); 863 try { 864 ensureOpen(); 865 Entry e = getEntry(path); 866 if (e == null || e.isDir()) 867 throw new NoSuchFileException(getString(path)); 868 try (InputStream is = getInputStream(e)) { 869 // TBD: if (e.size < NNNNN); 870 return new ByteArrayChannel(is.readAllBytes(), true); 871 } 872 } finally { 873 endRead(); 874 } 875 } 876 } 877 878 // Returns a FileChannel of the specified entry. 879 // 880 // This implementation creates a temporary file on the default file system, 881 // copy the entry data into it if the entry exists, and then create a 882 // FileChannel on top of it. 883 FileChannel newFileChannel(byte[] path, 884 Set<? extends OpenOption> options, 885 FileAttribute<?>... attrs) 886 throws IOException 887 { 888 checkOptions(options); 889 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 890 options.contains(StandardOpenOption.APPEND)); 891 beginRead(); 892 try { 893 ensureOpen(); 894 Entry e = getEntry(path); 895 if (forWrite) { 896 checkWritable(); 897 if (e == null) { 898 if (!options.contains(StandardOpenOption.CREATE) && 899 !options.contains(StandardOpenOption.CREATE_NEW)) { 900 throw new NoSuchFileException(getString(path)); 901 } 902 } else { 903 if (options.contains(StandardOpenOption.CREATE_NEW)) { 904 throw new FileAlreadyExistsException(getString(path)); 905 } 906 if (e.isDir()) 907 throw new FileAlreadyExistsException("directory <" 908 + getString(path) + "> exists"); 909 } 910 options = new HashSet<>(options); 911 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 912 } else if (e == null || e.isDir()) { 913 throw new NoSuchFileException(getString(path)); 914 } 915 916 final boolean isFCH = (e != null && e.type == Entry.FILECH); 917 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 918 final FileChannel fch = tmpfile.getFileSystem() 919 .provider() 920 .newFileChannel(tmpfile, options, attrs); 921 final Entry u = isFCH ? e : ( 922 supportPosix ? 923 new PosixEntry(path, tmpfile, Entry.FILECH, attrs) : 924 new Entry(path, tmpfile, Entry.FILECH, attrs)); 925 if (forWrite) { 926 u.flag = FLAG_DATADESCR; 927 u.method = defaultCompressionMethod; 928 } 929 // is there a better way to hook into the FileChannel's close method? 930 return new FileChannel() { 931 public int write(ByteBuffer src) throws IOException { 932 return fch.write(src); 933 } 934 public long write(ByteBuffer[] srcs, int offset, int length) 935 throws IOException 936 { 937 return fch.write(srcs, offset, length); 938 } 939 public long position() throws IOException { 940 return fch.position(); 941 } 942 public FileChannel position(long newPosition) 943 throws IOException 944 { 945 fch.position(newPosition); 946 return this; 947 } 948 public long size() throws IOException { 949 return fch.size(); 950 } 951 public FileChannel truncate(long size) 952 throws IOException 953 { 954 fch.truncate(size); 955 return this; 956 } 957 public void force(boolean metaData) 958 throws IOException 959 { 960 fch.force(metaData); 961 } 962 public long transferTo(long position, long count, 963 WritableByteChannel target) 964 throws IOException 965 { 966 return fch.transferTo(position, count, target); 967 } 968 public long transferFrom(ReadableByteChannel src, 969 long position, long count) 970 throws IOException 971 { 972 return fch.transferFrom(src, position, count); 973 } 974 public int read(ByteBuffer dst) throws IOException { 975 return fch.read(dst); 976 } 977 public int read(ByteBuffer dst, long position) 978 throws IOException 979 { 980 return fch.read(dst, position); 981 } 982 public long read(ByteBuffer[] dsts, int offset, int length) 983 throws IOException 984 { 985 return fch.read(dsts, offset, length); 986 } 987 public int write(ByteBuffer src, long position) 988 throws IOException 989 { 990 return fch.write(src, position); 991 } 992 public MappedByteBuffer map(MapMode mode, 993 long position, long size) 994 { 995 throw new UnsupportedOperationException(); 996 } 997 public FileLock lock(long position, long size, boolean shared) 998 throws IOException 999 { 1000 return fch.lock(position, size, shared); 1001 } 1002 public FileLock tryLock(long position, long size, boolean shared) 1003 throws IOException 1004 { 1005 return fch.tryLock(position, size, shared); 1006 } 1007 protected void implCloseChannel() throws IOException { 1008 fch.close(); 1009 if (forWrite) { 1010 u.mtime = System.currentTimeMillis(); 1011 u.size = Files.size(u.file); 1012 update(u); 1013 } else { 1014 if (!isFCH) // if this is a new fch for reading 1015 removeTempPathForEntry(tmpfile); 1016 } 1017 } 1018 }; 1019 } finally { 1020 endRead(); 1021 } 1022 } 1023 1024 // the outstanding input streams that need to be closed 1025 private Set<InputStream> streams = 1026 Collections.synchronizedSet(new HashSet<>()); 1027 1028 // the ex-channel and ex-path that need to close when their outstanding 1029 // input streams are all closed by the obtainers. 1030 private final Set<ExistingChannelCloser> exChClosers = new HashSet<>(); 1031 1032 private final Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<>()); 1033 private Path getTempPathForEntry(byte[] path) throws IOException { 1034 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 1035 if (path != null) { 1036 Entry e = getEntry(path); 1037 if (e != null) { 1038 try (InputStream is = newInputStream(path)) { 1039 Files.copy(is, tmpPath, REPLACE_EXISTING); 1040 } 1041 } 1042 } 1043 return tmpPath; 1044 } 1045 1046 private void removeTempPathForEntry(Path path) throws IOException { 1047 Files.delete(path); 1048 tmppaths.remove(path); 1049 } 1050 1051 // check if all parents really exist. ZIP spec does not require 1052 // the existence of any "parent directory". 1053 private void checkParents(byte[] path) throws IOException { 1054 beginRead(); 1055 try { 1056 while ((path = getParent(path)) != null && 1057 path != ROOTPATH) { 1058 if (!inodes.containsKey(IndexNode.keyOf(path))) { 1059 throw new NoSuchFileException(getString(path)); 1060 } 1061 } 1062 } finally { 1063 endRead(); 1064 } 1065 } 1066 1067 private static byte[] getParent(byte[] path) { 1068 int off = getParentOff(path); 1069 if (off <= 1) 1070 return ROOTPATH; 1071 return Arrays.copyOf(path, off); 1072 } 1073 1074 private static int getParentOff(byte[] path) { 1075 int off = path.length - 1; 1076 if (off > 0 && path[off] == '/') // isDirectory 1077 off--; 1078 while (off > 0 && path[off] != '/') { off--; } 1079 return off; 1080 } 1081 1082 private void beginWrite() { 1083 rwlock.writeLock().lock(); 1084 } 1085 1086 private void endWrite() { 1087 rwlock.writeLock().unlock(); 1088 } 1089 1090 private void beginRead() { 1091 rwlock.readLock().lock(); 1092 } 1093 1094 private void endRead() { 1095 rwlock.readLock().unlock(); 1096 } 1097 1098 /////////////////////////////////////////////////////////////////// 1099 1100 private volatile boolean isOpen = true; 1101 private final SeekableByteChannel ch; // channel to the zipfile 1102 final byte[] cen; // CEN & ENDHDR 1103 private END end; 1104 private long locpos; // position of first LOC header (usually 0) 1105 1106 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 1107 1108 // name -> pos (in cen), IndexNode itself can be used as a "key" 1109 private LinkedHashMap<IndexNode, IndexNode> inodes; 1110 1111 final byte[] getBytes(String name) { 1112 return zc.getBytes(name); 1113 } 1114 1115 final String getString(byte[] name) { 1116 return zc.toString(name); 1117 } 1118 1119 @SuppressWarnings("deprecation") 1120 protected void finalize() throws IOException { 1121 close(); 1122 } 1123 1124 // Reads len bytes of data from the specified offset into buf. 1125 // Returns the total number of bytes read. 1126 // Each/every byte read from here (except the cen, which is mapped). 1127 final long readFullyAt(byte[] buf, int off, long len, long pos) 1128 throws IOException 1129 { 1130 ByteBuffer bb = ByteBuffer.wrap(buf); 1131 bb.position(off); 1132 bb.limit((int)(off + len)); 1133 return readFullyAt(bb, pos); 1134 } 1135 1136 private long readFullyAt(ByteBuffer bb, long pos) throws IOException { 1137 synchronized(ch) { 1138 return ch.position(pos).read(bb); 1139 } 1140 } 1141 1142 // Searches for end of central directory (END) header. The contents of 1143 // the END header will be read and placed in endbuf. Returns the file 1144 // position of the END header, otherwise returns -1 if the END header 1145 // was not found or an error occurred. 1146 private END findEND() throws IOException { 1147 byte[] buf = new byte[READBLOCKSZ]; 1148 long ziplen = ch.size(); 1149 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 1150 long minPos = minHDR - (buf.length - ENDHDR); 1151 1152 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) { 1153 int off = 0; 1154 if (pos < 0) { 1155 // Pretend there are some NUL bytes before start of file 1156 off = (int)-pos; 1157 Arrays.fill(buf, 0, off, (byte)0); 1158 } 1159 int len = buf.length - off; 1160 if (readFullyAt(buf, off, len, pos + off) != len) 1161 throw new ZipException("zip END header not found"); 1162 1163 // Now scan the block backwards for END header signature 1164 for (int i = buf.length - ENDHDR; i >= 0; i--) { 1165 if (buf[i] == (byte)'P' && 1166 buf[i+1] == (byte)'K' && 1167 buf[i+2] == (byte)'\005' && 1168 buf[i+3] == (byte)'\006' && 1169 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 1170 // Found END header 1171 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 1172 END end = new END(); 1173 // end.endsub = ENDSUB(buf); // not used 1174 end.centot = ENDTOT(buf); 1175 end.cenlen = ENDSIZ(buf); 1176 end.cenoff = ENDOFF(buf); 1177 // end.comlen = ENDCOM(buf); // not used 1178 end.endpos = pos + i; 1179 // try if there is zip64 end; 1180 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1181 if (end.endpos < ZIP64_LOCHDR || 1182 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1183 != loc64.length || 1184 !locator64SigAt(loc64, 0)) { 1185 return end; 1186 } 1187 long end64pos = ZIP64_LOCOFF(loc64); 1188 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1189 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1190 != end64buf.length || 1191 !end64SigAt(end64buf, 0)) { 1192 return end; 1193 } 1194 // end64 found, 1195 long cenlen64 = ZIP64_ENDSIZ(end64buf); 1196 long cenoff64 = ZIP64_ENDOFF(end64buf); 1197 long centot64 = ZIP64_ENDTOT(end64buf); 1198 // double-check 1199 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1200 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1201 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1202 return end; 1203 } 1204 // to use the end64 values 1205 end.cenlen = cenlen64; 1206 end.cenoff = cenoff64; 1207 end.centot = (int)centot64; // assume total < 2g 1208 end.endpos = end64pos; 1209 return end; 1210 } 1211 } 1212 } 1213 throw new ZipException("zip END header not found"); 1214 } 1215 1216 private void makeParentDirs(IndexNode node, IndexNode root) { 1217 IndexNode parent; 1218 ParentLookup lookup = new ParentLookup(); 1219 while (true) { 1220 int off = getParentOff(node.name); 1221 // parent is root 1222 if (off <= 1) { 1223 node.sibling = root.child; 1224 root.child = node; 1225 break; 1226 } 1227 // parent exists 1228 lookup = lookup.as(node.name, off); 1229 if (inodes.containsKey(lookup)) { 1230 parent = inodes.get(lookup); 1231 node.sibling = parent.child; 1232 parent.child = node; 1233 break; 1234 } 1235 // parent does not exist, add new pseudo directory entry 1236 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 1237 inodes.put(parent, parent); 1238 node.sibling = parent.child; 1239 parent.child = node; 1240 node = parent; 1241 } 1242 } 1243 1244 // ZIP directory has two issues: 1245 // (1) ZIP spec does not require the ZIP file to include 1246 // directory entry 1247 // (2) all entries are not stored/organized in a "tree" 1248 // structure. 1249 // A possible solution is to build the node tree ourself as 1250 // implemented below. 1251 private void buildNodeTree() { 1252 beginWrite(); 1253 try { 1254 IndexNode root = inodes.remove(LOOKUPKEY.as(ROOTPATH)); 1255 if (root == null) { 1256 root = new IndexNode(ROOTPATH, true); 1257 } 1258 IndexNode[] nodes = inodes.values().toArray(new IndexNode[0]); 1259 inodes.put(root, root); 1260 for (IndexNode node : nodes) { 1261 makeParentDirs(node, root); 1262 } 1263 } finally { 1264 endWrite(); 1265 } 1266 } 1267 1268 private void removeFromTree(IndexNode inode) { 1269 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 1270 IndexNode child = parent.child; 1271 if (child.equals(inode)) { 1272 parent.child = child.sibling; 1273 } else { 1274 IndexNode last = child; 1275 while ((child = child.sibling) != null) { 1276 if (child.equals(inode)) { 1277 last.sibling = child.sibling; 1278 break; 1279 } else { 1280 last = child; 1281 } 1282 } 1283 } 1284 } 1285 1286 // Reads zip file central directory. Returns the file position of first 1287 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1288 // then the error was a zip format error and zip->msg has the error text. 1289 // Always pass in -1 for knownTotal; it's used for a recursive call. 1290 private byte[] initCEN() throws IOException { 1291 end = findEND(); 1292 if (end.endpos == 0) { 1293 inodes = new LinkedHashMap<>(10); 1294 locpos = 0; 1295 buildNodeTree(); 1296 return null; // only END header present 1297 } 1298 if (end.cenlen > end.endpos) 1299 throw new ZipException("invalid END header (bad central directory size)"); 1300 long cenpos = end.endpos - end.cenlen; // position of CEN table 1301 1302 // Get position of first local file (LOC) header, taking into 1303 // account that there may be a stub prefixed to the zip file. 1304 locpos = cenpos - end.cenoff; 1305 if (locpos < 0) 1306 throw new ZipException("invalid END header (bad central directory offset)"); 1307 1308 // read in the CEN and END 1309 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1310 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1311 throw new ZipException("read CEN tables failed"); 1312 } 1313 // Iterate through the entries in the central directory 1314 inodes = new LinkedHashMap<>(end.centot + 1); 1315 int pos = 0; 1316 int limit = cen.length - ENDHDR; 1317 while (pos < limit) { 1318 if (!cenSigAt(cen, pos)) 1319 throw new ZipException("invalid CEN header (bad signature)"); 1320 int method = CENHOW(cen, pos); 1321 int nlen = CENNAM(cen, pos); 1322 int elen = CENEXT(cen, pos); 1323 int clen = CENCOM(cen, pos); 1324 if ((CENFLG(cen, pos) & 1) != 0) { 1325 throw new ZipException("invalid CEN header (encrypted entry)"); 1326 } 1327 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1328 throw new ZipException("invalid CEN header (unsupported compression method: " + method + ")"); 1329 } 1330 if (pos + CENHDR + nlen > limit) { 1331 throw new ZipException("invalid CEN header (bad header size)"); 1332 } 1333 IndexNode inode = new IndexNode(cen, pos, nlen); 1334 inodes.put(inode, inode); 1335 1336 // skip ext and comment 1337 pos += (CENHDR + nlen + elen + clen); 1338 } 1339 if (pos + ENDHDR != cen.length) { 1340 throw new ZipException("invalid CEN header (bad header size)"); 1341 } 1342 buildNodeTree(); 1343 return cen; 1344 } 1345 1346 private void ensureOpen() { 1347 if (!isOpen) 1348 throw new ClosedFileSystemException(); 1349 } 1350 1351 // Creates a new empty temporary file in the same directory as the 1352 // specified file. A variant of Files.createTempFile. 1353 private Path createTempFileInSameDirectoryAs(Path path) 1354 throws IOException 1355 { 1356 Path parent = path.toAbsolutePath().getParent(); 1357 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1358 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1359 tmppaths.add(tmpPath); 1360 return tmpPath; 1361 } 1362 1363 ////////////////////update & sync ////////////////////////////////////// 1364 1365 private boolean hasUpdate = false; 1366 1367 // shared key. consumer guarantees the "writeLock" before use it. 1368 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1369 1370 private void updateDelete(IndexNode inode) { 1371 beginWrite(); 1372 try { 1373 removeFromTree(inode); 1374 inodes.remove(inode); 1375 hasUpdate = true; 1376 } finally { 1377 endWrite(); 1378 } 1379 } 1380 1381 private void update(Entry e) { 1382 beginWrite(); 1383 try { 1384 IndexNode old = inodes.put(e, e); 1385 if (old != null) { 1386 removeFromTree(old); 1387 } 1388 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1389 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1390 e.sibling = parent.child; 1391 parent.child = e; 1392 } 1393 hasUpdate = true; 1394 } finally { 1395 endWrite(); 1396 } 1397 } 1398 1399 // copy over the whole LOC entry (header if necessary, data and ext) from 1400 // old zip to the new one. 1401 private long copyLOCEntry(Entry e, boolean updateHeader, 1402 OutputStream os, 1403 long written, byte[] buf) 1404 throws IOException 1405 { 1406 long locoff = e.locoff; // where to read 1407 e.locoff = written; // update the e.locoff with new value 1408 1409 // calculate the size need to write out 1410 long size = 0; 1411 // if there is A ext 1412 if ((e.flag & FLAG_DATADESCR) != 0) { 1413 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1414 size = 24; 1415 else 1416 size = 16; 1417 } 1418 // read loc, use the original loc.elen/nlen 1419 // 1420 // an extra byte after loc is read, which should be the first byte of the 1421 // 'name' field of the loc. if this byte is '/', which means the original 1422 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1423 // is used to output the loc, in which the leading "/" will be removed 1424 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1425 throw new ZipException("loc: reading failed"); 1426 1427 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1428 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1429 size += e.csize; 1430 written = e.writeLOC(os) + size; 1431 } else { 1432 os.write(buf, 0, LOCHDR); // write out the loc header 1433 locoff += LOCHDR; 1434 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1435 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1436 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1437 written = LOCHDR + size; 1438 } 1439 int n; 1440 while (size > 0 && 1441 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1442 { 1443 if (size < n) 1444 n = (int)size; 1445 os.write(buf, 0, n); 1446 size -= n; 1447 locoff += n; 1448 } 1449 return written; 1450 } 1451 1452 private long writeEntry(Entry e, OutputStream os) 1453 throws IOException { 1454 1455 if (e.bytes == null && e.file == null) // dir, 0-length data 1456 return 0; 1457 1458 long written = 0; 1459 if (e.csize > 0 && (e.crc != 0 || e.size == 0)) { 1460 // pre-compressed entry, write directly to output stream 1461 writeTo(e, os); 1462 } else { 1463 try (OutputStream os2 = (e.method == METHOD_STORED) ? 1464 new EntryOutputStreamCRC32(e, os) : new EntryOutputStreamDef(e, os)) { 1465 writeTo(e, os2); 1466 } 1467 } 1468 written += e.csize; 1469 if ((e.flag & FLAG_DATADESCR) != 0) { 1470 written += e.writeEXT(os); 1471 } 1472 return written; 1473 } 1474 1475 private void writeTo(Entry e, OutputStream os) throws IOException { 1476 if (e.bytes != null) { 1477 os.write(e.bytes, 0, e.bytes.length); 1478 } else if (e.file != null) { 1479 if (e.type == Entry.NEW || e.type == Entry.FILECH) { 1480 try (InputStream is = Files.newInputStream(e.file)) { 1481 is.transferTo(os); 1482 } 1483 } 1484 Files.delete(e.file); 1485 tmppaths.remove(e.file); 1486 } 1487 } 1488 1489 // sync the zip file system, if there is any update 1490 private void sync() throws IOException { 1491 // check ex-closer 1492 if (!exChClosers.isEmpty()) { 1493 for (ExistingChannelCloser ecc : exChClosers) { 1494 if (ecc.closeAndDeleteIfDone()) { 1495 exChClosers.remove(ecc); 1496 } 1497 } 1498 } 1499 if (!hasUpdate) 1500 return; 1501 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1502 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) { 1503 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1504 long written = 0; 1505 byte[] buf = null; 1506 Entry e; 1507 1508 // write loc 1509 for (IndexNode inode : inodes.values()) { 1510 if (inode instanceof Entry) { // an updated inode 1511 e = (Entry)inode; 1512 try { 1513 if (e.type == Entry.COPY) { 1514 // entry copy: the only thing changed is the "name" 1515 // and "nlen" in LOC header, so we update/rewrite the 1516 // LOC in new file and simply copy the rest (data and 1517 // ext) without enflating/deflating from the old zip 1518 // file LOC entry. 1519 if (buf == null) 1520 buf = new byte[8192]; 1521 written += copyLOCEntry(e, true, os, written, buf); 1522 } else { // NEW, FILECH or CEN 1523 e.locoff = written; 1524 written += e.writeLOC(os); // write loc header 1525 written += writeEntry(e, os); 1526 } 1527 elist.add(e); 1528 } catch (IOException x) { 1529 x.printStackTrace(); // skip any in-accurate entry 1530 } 1531 } else { // unchanged inode 1532 if (inode.pos == -1) { 1533 continue; // pseudo directory node 1534 } 1535 if (inode.name.length == 1 && inode.name[0] == '/') { 1536 continue; // no root '/' directory even if it 1537 // exists in original zip/jar file. 1538 } 1539 e = supportPosix ? new PosixEntry(this, inode) : new Entry(this, inode); 1540 try { 1541 if (buf == null) 1542 buf = new byte[8192]; 1543 written += copyLOCEntry(e, false, os, written, buf); 1544 elist.add(e); 1545 } catch (IOException x) { 1546 x.printStackTrace(); // skip any wrong entry 1547 } 1548 } 1549 } 1550 1551 // now write back the cen and end table 1552 end.cenoff = written; 1553 for (Entry entry : elist) { 1554 written += entry.writeCEN(os); 1555 } 1556 end.centot = elist.size(); 1557 end.cenlen = written - end.cenoff; 1558 end.write(os, written, forceEnd64); 1559 } 1560 if (!streams.isEmpty()) { 1561 // 1562 // There are outstanding input streams open on existing "ch", 1563 // so, don't close the "cha" and delete the "file for now, let 1564 // the "ex-channel-closer" to handle them 1565 Path path = createTempFileInSameDirectoryAs(zfpath); 1566 ExistingChannelCloser ecc = new ExistingChannelCloser(path, 1567 ch, 1568 streams); 1569 Files.move(zfpath, path, REPLACE_EXISTING); 1570 exChClosers.add(ecc); 1571 streams = Collections.synchronizedSet(new HashSet<>()); 1572 } else { 1573 ch.close(); 1574 Files.delete(zfpath); 1575 } 1576 1577 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1578 hasUpdate = false; // clear 1579 } 1580 1581 IndexNode getInode(byte[] path) { 1582 return inodes.get(IndexNode.keyOf(Objects.requireNonNull(path, "path"))); 1583 } 1584 1585 /** 1586 * Return the IndexNode from the root tree. If it doesn't exist, 1587 * it gets created along with all parent directory IndexNodes. 1588 */ 1589 IndexNode getOrCreateInode(byte[] path, boolean isdir) { 1590 IndexNode node = getInode(path); 1591 // if node exists, return it 1592 if (node != null) { 1593 return node; 1594 } 1595 1596 // otherwise create new pseudo node and parent directory hierarchy 1597 node = new IndexNode(path, isdir); 1598 beginWrite(); 1599 try { 1600 makeParentDirs(node, Objects.requireNonNull(inodes.get(IndexNode.keyOf(ROOTPATH)), "no root node found")); 1601 return node; 1602 } finally { 1603 endWrite(); 1604 } 1605 } 1606 1607 private Entry getEntry(byte[] path) throws IOException { 1608 IndexNode inode = getInode(path); 1609 if (inode instanceof Entry) 1610 return (Entry)inode; 1611 if (inode == null || inode.pos == -1) 1612 return null; 1613 return supportPosix ? new PosixEntry(this, inode): new Entry(this, inode); 1614 } 1615 1616 public void deleteFile(byte[] path, boolean failIfNotExists) 1617 throws IOException 1618 { 1619 checkWritable(); 1620 IndexNode inode = getInode(path); 1621 if (inode == null) { 1622 if (path != null && path.length == 0) 1623 throw new ZipException("root directory </> can't not be delete"); 1624 if (failIfNotExists) 1625 throw new NoSuchFileException(getString(path)); 1626 } else { 1627 if (inode.isDir() && inode.child != null) 1628 throw new DirectoryNotEmptyException(getString(path)); 1629 updateDelete(inode); 1630 } 1631 } 1632 1633 // Returns an out stream for either 1634 // (1) writing the contents of a new entry, if the entry exists, or 1635 // (2) updating/replacing the contents of the specified existing entry. 1636 private OutputStream getOutputStream(Entry e) throws IOException { 1637 if (e.mtime == -1) 1638 e.mtime = System.currentTimeMillis(); 1639 if (e.method == -1) 1640 e.method = defaultCompressionMethod; 1641 // store size, compressed size, and crc-32 in datadescr 1642 e.flag = FLAG_DATADESCR; 1643 if (zc.isUTF8()) 1644 e.flag |= FLAG_USE_UTF8; 1645 OutputStream os; 1646 if (useTempFile) { 1647 e.file = getTempPathForEntry(null); 1648 os = Files.newOutputStream(e.file, WRITE); 1649 } else { 1650 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1651 } 1652 if (e.method == METHOD_DEFLATED) { 1653 return new DeflatingEntryOutputStream(e, os); 1654 } else { 1655 return new EntryOutputStream(e, os); 1656 } 1657 } 1658 1659 private class EntryOutputStream extends FilterOutputStream { 1660 private final Entry e; 1661 private long written; 1662 private boolean isClosed; 1663 1664 EntryOutputStream(Entry e, OutputStream os) { 1665 super(os); 1666 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1667 // this.written = 0; 1668 } 1669 1670 @Override 1671 public synchronized void write(int b) throws IOException { 1672 out.write(b); 1673 written += 1; 1674 } 1675 1676 @Override 1677 public synchronized void write(byte[] b, int off, int len) 1678 throws IOException { 1679 out.write(b, off, len); 1680 written += len; 1681 } 1682 1683 @Override 1684 public synchronized void close() throws IOException { 1685 if (isClosed) { 1686 return; 1687 } 1688 isClosed = true; 1689 e.size = written; 1690 if (out instanceof ByteArrayOutputStream) 1691 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1692 super.close(); 1693 update(e); 1694 } 1695 } 1696 1697 // Output stream returned when writing "deflated" entries into memory, 1698 // to enable eager (possibly parallel) deflation and reduce memory required. 1699 private class DeflatingEntryOutputStream extends DeflaterOutputStream { 1700 private final CRC32 crc; 1701 private final Entry e; 1702 private boolean isClosed; 1703 1704 DeflatingEntryOutputStream(Entry e, OutputStream os) { 1705 super(os, getDeflater()); 1706 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1707 this.crc = new CRC32(); 1708 } 1709 1710 @Override 1711 public synchronized void write(int b) throws IOException { 1712 super.write(b); 1713 crc.update(b); 1714 } 1715 1716 @Override 1717 public synchronized void write(byte[] b, int off, int len) 1718 throws IOException { 1719 super.write(b, off, len); 1720 crc.update(b, off, len); 1721 } 1722 1723 @Override 1724 public synchronized void close() throws IOException { 1725 if (isClosed) 1726 return; 1727 isClosed = true; 1728 finish(); 1729 e.size = def.getBytesRead(); 1730 e.csize = def.getBytesWritten(); 1731 e.crc = crc.getValue(); 1732 if (out instanceof ByteArrayOutputStream) 1733 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1734 super.close(); 1735 update(e); 1736 releaseDeflater(def); 1737 } 1738 } 1739 1740 // Wrapper output stream class to write out a "stored" entry. 1741 // (1) this class does not close the underlying out stream when 1742 // being closed. 1743 // (2) no need to be "synchronized", only used by sync() 1744 private class EntryOutputStreamCRC32 extends FilterOutputStream { 1745 private final CRC32 crc; 1746 private final Entry e; 1747 private long written; 1748 private boolean isClosed; 1749 1750 EntryOutputStreamCRC32(Entry e, OutputStream os) { 1751 super(os); 1752 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1753 this.crc = new CRC32(); 1754 } 1755 1756 @Override 1757 public void write(int b) throws IOException { 1758 out.write(b); 1759 crc.update(b); 1760 written += 1; 1761 } 1762 1763 @Override 1764 public void write(byte[] b, int off, int len) 1765 throws IOException { 1766 out.write(b, off, len); 1767 crc.update(b, off, len); 1768 written += len; 1769 } 1770 1771 @Override 1772 public void close() { 1773 if (isClosed) 1774 return; 1775 isClosed = true; 1776 e.size = e.csize = written; 1777 e.crc = crc.getValue(); 1778 } 1779 } 1780 1781 // Wrapper output stream class to write out a "deflated" entry. 1782 // (1) this class does not close the underlying out stream when 1783 // being closed. 1784 // (2) no need to be "synchronized", only used by sync() 1785 private class EntryOutputStreamDef extends DeflaterOutputStream { 1786 private final CRC32 crc; 1787 private final Entry e; 1788 private boolean isClosed; 1789 1790 EntryOutputStreamDef(Entry e, OutputStream os) { 1791 super(os, getDeflater()); 1792 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1793 this.crc = new CRC32(); 1794 } 1795 1796 @Override 1797 public void write(byte[] b, int off, int len) throws IOException { 1798 super.write(b, off, len); 1799 crc.update(b, off, len); 1800 } 1801 1802 @Override 1803 public void close() throws IOException { 1804 if (isClosed) 1805 return; 1806 isClosed = true; 1807 finish(); 1808 e.size = def.getBytesRead(); 1809 e.csize = def.getBytesWritten(); 1810 e.crc = crc.getValue(); 1811 releaseDeflater(def); 1812 } 1813 } 1814 1815 private InputStream getInputStream(Entry e) 1816 throws IOException 1817 { 1818 InputStream eis; 1819 if (e.type == Entry.NEW) { 1820 if (e.bytes != null) 1821 eis = new ByteArrayInputStream(e.bytes); 1822 else if (e.file != null) 1823 eis = Files.newInputStream(e.file); 1824 else 1825 throw new ZipException("update entry data is missing"); 1826 } else if (e.type == Entry.FILECH) { 1827 // FILECH result is un-compressed. 1828 eis = Files.newInputStream(e.file); 1829 // TBD: wrap to hook close() 1830 // streams.add(eis); 1831 return eis; 1832 } else { // untouched CEN or COPY 1833 eis = new EntryInputStream(e, ch); 1834 } 1835 if (e.method == METHOD_DEFLATED) { 1836 // MORE: Compute good size for inflater stream: 1837 long bufSize = e.size + 2; // Inflater likes a bit of slack 1838 if (bufSize > 65536) 1839 bufSize = 8192; 1840 final long size = e.size; 1841 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1842 private boolean isClosed = false; 1843 public void close() throws IOException { 1844 if (!isClosed) { 1845 releaseInflater(inf); 1846 this.in.close(); 1847 isClosed = true; 1848 streams.remove(this); 1849 } 1850 } 1851 // Override fill() method to provide an extra "dummy" byte 1852 // at the end of the input stream. This is required when 1853 // using the "nowrap" Inflater option. (it appears the new 1854 // zlib in 7 does not need it, but keep it for now) 1855 protected void fill() throws IOException { 1856 if (eof) { 1857 throw new EOFException( 1858 "Unexpected end of ZLIB input stream"); 1859 } 1860 len = this.in.read(buf, 0, buf.length); 1861 if (len == -1) { 1862 buf[0] = 0; 1863 len = 1; 1864 eof = true; 1865 } 1866 inf.setInput(buf, 0, len); 1867 } 1868 private boolean eof; 1869 1870 public int available() { 1871 if (isClosed) 1872 return 0; 1873 long avail = size - inf.getBytesWritten(); 1874 return avail > (long) Integer.MAX_VALUE ? 1875 Integer.MAX_VALUE : (int) avail; 1876 } 1877 }; 1878 } else if (e.method == METHOD_STORED) { 1879 // TBD: wrap/ it does not seem necessary 1880 } else { 1881 throw new ZipException("invalid compression method"); 1882 } 1883 streams.add(eis); 1884 return eis; 1885 } 1886 1887 // Inner class implementing the input stream used to read 1888 // a (possibly compressed) zip file entry. 1889 private class EntryInputStream extends InputStream { 1890 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1891 // point to a new channel after sync() 1892 private long pos; // current position within entry data 1893 private long rem; // number of remaining bytes within entry 1894 1895 EntryInputStream(Entry e, SeekableByteChannel zfch) 1896 throws IOException 1897 { 1898 this.zfch = zfch; 1899 rem = e.csize; 1900 pos = e.locoff; 1901 if (pos == -1) { 1902 Entry e2 = getEntry(e.name); 1903 if (e2 == null) { 1904 throw new ZipException("invalid loc for entry <" + getString(e.name) + ">"); 1905 } 1906 pos = e2.locoff; 1907 } 1908 pos = -pos; // lazy initialize the real data offset 1909 } 1910 1911 public int read(byte[] b, int off, int len) throws IOException { 1912 ensureOpen(); 1913 initDataPos(); 1914 if (rem == 0) { 1915 return -1; 1916 } 1917 if (len <= 0) { 1918 return 0; 1919 } 1920 if (len > rem) { 1921 len = (int) rem; 1922 } 1923 // readFullyAt() 1924 long n; 1925 ByteBuffer bb = ByteBuffer.wrap(b); 1926 bb.position(off); 1927 bb.limit(off + len); 1928 synchronized(zfch) { 1929 n = zfch.position(pos).read(bb); 1930 } 1931 if (n > 0) { 1932 pos += n; 1933 rem -= n; 1934 } 1935 if (rem == 0) { 1936 close(); 1937 } 1938 return (int)n; 1939 } 1940 1941 public int read() throws IOException { 1942 byte[] b = new byte[1]; 1943 if (read(b, 0, 1) == 1) { 1944 return b[0] & 0xff; 1945 } else { 1946 return -1; 1947 } 1948 } 1949 1950 public long skip(long n) { 1951 ensureOpen(); 1952 if (n > rem) 1953 n = rem; 1954 pos += n; 1955 rem -= n; 1956 if (rem == 0) { 1957 close(); 1958 } 1959 return n; 1960 } 1961 1962 public int available() { 1963 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1964 } 1965 1966 public void close() { 1967 rem = 0; 1968 streams.remove(this); 1969 } 1970 1971 private void initDataPos() throws IOException { 1972 if (pos <= 0) { 1973 pos = -pos + locpos; 1974 byte[] buf = new byte[LOCHDR]; 1975 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 1976 throw new ZipException("invalid loc " + pos + " for entry reading"); 1977 } 1978 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 1979 } 1980 } 1981 } 1982 1983 // Maxmum number of de/inflater we cache 1984 private final int MAX_FLATER = 20; 1985 // List of available Inflater objects for decompression 1986 private final List<Inflater> inflaters = new ArrayList<>(); 1987 1988 // Gets an inflater from the list of available inflaters or allocates 1989 // a new one. 1990 private Inflater getInflater() { 1991 synchronized (inflaters) { 1992 int size = inflaters.size(); 1993 if (size > 0) { 1994 return inflaters.remove(size - 1); 1995 } else { 1996 return new Inflater(true); 1997 } 1998 } 1999 } 2000 2001 // Releases the specified inflater to the list of available inflaters. 2002 private void releaseInflater(Inflater inf) { 2003 synchronized (inflaters) { 2004 if (inflaters.size() < MAX_FLATER) { 2005 inf.reset(); 2006 inflaters.add(inf); 2007 } else { 2008 inf.end(); 2009 } 2010 } 2011 } 2012 2013 // List of available Deflater objects for compression 2014 private final List<Deflater> deflaters = new ArrayList<>(); 2015 2016 // Gets a deflater from the list of available deflaters or allocates 2017 // a new one. 2018 private Deflater getDeflater() { 2019 synchronized (deflaters) { 2020 int size = deflaters.size(); 2021 if (size > 0) { 2022 return deflaters.remove(size - 1); 2023 } else { 2024 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 2025 } 2026 } 2027 } 2028 2029 // Releases the specified inflater to the list of available inflaters. 2030 private void releaseDeflater(Deflater def) { 2031 synchronized (deflaters) { 2032 if (inflaters.size() < MAX_FLATER) { 2033 def.reset(); 2034 deflaters.add(def); 2035 } else { 2036 def.end(); 2037 } 2038 } 2039 } 2040 2041 // End of central directory record 2042 static class END { 2043 // The fields that are commented out below are not used by anyone and write() uses "0" 2044 // int disknum; 2045 // int sdisknum; 2046 // int endsub; 2047 int centot; // 4 bytes 2048 long cenlen; // 4 bytes 2049 long cenoff; // 4 bytes 2050 // int comlen; // comment length 2051 // byte[] comment; 2052 2053 // members of Zip64 end of central directory locator 2054 // int diskNum; 2055 long endpos; 2056 // int disktot; 2057 2058 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 2059 boolean hasZip64 = forceEnd64; // false; 2060 long xlen = cenlen; 2061 long xoff = cenoff; 2062 if (xlen >= ZIP64_MINVAL) { 2063 xlen = ZIP64_MINVAL; 2064 hasZip64 = true; 2065 } 2066 if (xoff >= ZIP64_MINVAL) { 2067 xoff = ZIP64_MINVAL; 2068 hasZip64 = true; 2069 } 2070 int count = centot; 2071 if (count >= ZIP64_MINVAL32) { 2072 count = ZIP64_MINVAL32; 2073 hasZip64 = true; 2074 } 2075 if (hasZip64) { 2076 //zip64 end of central directory record 2077 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 2078 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 2079 writeShort(os, 45); // version made by 2080 writeShort(os, 45); // version needed to extract 2081 writeInt(os, 0); // number of this disk 2082 writeInt(os, 0); // central directory start disk 2083 writeLong(os, centot); // number of directory entries on disk 2084 writeLong(os, centot); // number of directory entries 2085 writeLong(os, cenlen); // length of central directory 2086 writeLong(os, cenoff); // offset of central directory 2087 2088 //zip64 end of central directory locator 2089 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 2090 writeInt(os, 0); // zip64 END start disk 2091 writeLong(os, offset); // offset of zip64 END 2092 writeInt(os, 1); // total number of disks (?) 2093 } 2094 writeInt(os, ENDSIG); // END record signature 2095 writeShort(os, 0); // number of this disk 2096 writeShort(os, 0); // central directory start disk 2097 writeShort(os, count); // number of directory entries on disk 2098 writeShort(os, count); // total number of directory entries 2099 writeInt(os, xlen); // length of central directory 2100 writeInt(os, xoff); // offset of central directory 2101 writeShort(os, 0); // zip file comment, not used 2102 } 2103 } 2104 2105 // Internal node that links a "name" to its pos in cen table. 2106 // The node itself can be used as a "key" to lookup itself in 2107 // the HashMap inodes. 2108 static class IndexNode { 2109 byte[] name; 2110 int hashcode; // node is hashable/hashed by its name 2111 boolean isdir; 2112 int pos = -1; // position in cen table, -1 means the 2113 // entry does not exist in zip file 2114 IndexNode child; // first child 2115 IndexNode sibling; // next sibling 2116 2117 IndexNode() {} 2118 2119 IndexNode(byte[] name, boolean isdir) { 2120 name(name); 2121 this.isdir = isdir; 2122 this.pos = -1; 2123 } 2124 2125 IndexNode(byte[] name, int pos) { 2126 name(name); 2127 this.pos = pos; 2128 } 2129 2130 // constructor for initCEN() (1) remove trailing '/' (2) pad leading '/' 2131 IndexNode(byte[] cen, int pos, int nlen) { 2132 int noff = pos + CENHDR; 2133 if (cen[noff + nlen - 1] == '/') { 2134 isdir = true; 2135 nlen--; 2136 } 2137 if (nlen > 0 && cen[noff] == '/') { 2138 name = Arrays.copyOfRange(cen, noff, noff + nlen); 2139 } else { 2140 name = new byte[nlen + 1]; 2141 System.arraycopy(cen, noff, name, 1, nlen); 2142 name[0] = '/'; 2143 } 2144 name(normalize(name)); 2145 this.pos = pos; 2146 } 2147 2148 // Normalize the IndexNode.name field. 2149 private byte[] normalize(byte[] path) { 2150 int len = path.length; 2151 if (len == 0) 2152 return path; 2153 byte prevC = 0; 2154 for (int pathPos = 0; pathPos < len; pathPos++) { 2155 byte c = path[pathPos]; 2156 if (c == '/' && prevC == '/') 2157 return normalize(path, pathPos - 1); 2158 prevC = c; 2159 } 2160 if (len > 1 && prevC == '/') { 2161 return Arrays.copyOf(path, len - 1); 2162 } 2163 return path; 2164 } 2165 2166 private byte[] normalize(byte[] path, int off) { 2167 // As we know we have at least one / to trim, we can reduce 2168 // the size of the resulting array 2169 byte[] to = new byte[path.length - 1]; 2170 int pathPos = 0; 2171 while (pathPos < off) { 2172 to[pathPos] = path[pathPos]; 2173 pathPos++; 2174 } 2175 int toPos = pathPos; 2176 byte prevC = 0; 2177 while (pathPos < path.length) { 2178 byte c = path[pathPos++]; 2179 if (c == '/' && prevC == '/') 2180 continue; 2181 to[toPos++] = c; 2182 prevC = c; 2183 } 2184 if (toPos > 1 && to[toPos - 1] == '/') 2185 toPos--; 2186 return (toPos == to.length) ? to : Arrays.copyOf(to, toPos); 2187 } 2188 2189 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 2190 2191 final static IndexNode keyOf(byte[] name) { // get a lookup key; 2192 IndexNode key = cachedKey.get(); 2193 if (key == null) { 2194 key = new IndexNode(name, -1); 2195 cachedKey.set(key); 2196 } 2197 return key.as(name); 2198 } 2199 2200 final void name(byte[] name) { 2201 this.name = name; 2202 this.hashcode = Arrays.hashCode(name); 2203 } 2204 2205 final IndexNode as(byte[] name) { // reuse the node, mostly 2206 name(name); // as a lookup "key" 2207 return this; 2208 } 2209 2210 boolean isDir() { 2211 return isdir; 2212 } 2213 2214 @Override 2215 public boolean equals(Object other) { 2216 if (!(other instanceof IndexNode)) { 2217 return false; 2218 } 2219 if (other instanceof ParentLookup) { 2220 return ((ParentLookup)other).equals(this); 2221 } 2222 return Arrays.equals(name, ((IndexNode)other).name); 2223 } 2224 2225 @Override 2226 public int hashCode() { 2227 return hashcode; 2228 } 2229 2230 @Override 2231 public String toString() { 2232 return new String(name) + (isdir ? " (dir)" : " ") + ", index: " + pos; 2233 } 2234 } 2235 2236 static class Entry extends IndexNode implements ZipFileAttributes { 2237 static final int CEN = 1; // entry read from cen 2238 static final int NEW = 2; // updated contents in bytes or file 2239 static final int FILECH = 3; // fch update in "file" 2240 static final int COPY = 4; // copy of a CEN entry 2241 2242 byte[] bytes; // updated content bytes 2243 Path file; // use tmp file to store bytes; 2244 int type = CEN; // default is the entry read from cen 2245 2246 // entry attributes 2247 int version; 2248 int flag; 2249 int posixPerms = -1; // posix permissions 2250 int method = -1; // compression method 2251 long mtime = -1; // last modification time (in DOS time) 2252 long atime = -1; // last access time 2253 long ctime = -1; // create time 2254 long crc = -1; // crc-32 of entry data 2255 long csize = -1; // compressed size of entry data 2256 long size = -1; // uncompressed size of entry data 2257 byte[] extra; 2258 2259 // CEN 2260 // The fields that are commented out below are not used by anyone and write() uses "0" 2261 // int versionMade; 2262 // int disk; 2263 // int attrs; 2264 // long attrsEx; 2265 long locoff; 2266 byte[] comment; 2267 2268 Entry(byte[] name, boolean isdir, int method) { 2269 name(name); 2270 this.isdir = isdir; 2271 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 2272 this.crc = 0; 2273 this.size = 0; 2274 this.csize = 0; 2275 this.method = method; 2276 } 2277 2278 @SuppressWarnings("unchecked") 2279 Entry(byte[] name, int type, boolean isdir, int method, FileAttribute<?>... attrs) { 2280 this(name, isdir, method); 2281 this.type = type; 2282 for (FileAttribute<?> attr : attrs) { 2283 String attrName = attr.name(); 2284 if (attrName.equals("posix:permissions")) { 2285 posixPerms = ZipUtils.permsToFlags((Set<PosixFilePermission>)attr.value()); 2286 } 2287 } 2288 } 2289 2290 Entry(byte[] name, Path file, int type, FileAttribute<?>... attrs) { 2291 this(name, type, false, METHOD_STORED, attrs); 2292 this.file = file; 2293 } 2294 2295 Entry(Entry e, int type) { 2296 name(e.name); 2297 this.isdir = e.isdir; 2298 this.version = e.version; 2299 this.ctime = e.ctime; 2300 this.atime = e.atime; 2301 this.mtime = e.mtime; 2302 this.crc = e.crc; 2303 this.size = e.size; 2304 this.csize = e.csize; 2305 this.method = e.method; 2306 this.extra = e.extra; 2307 /* 2308 this.versionMade = e.versionMade; 2309 this.disk = e.disk; 2310 this.attrs = e.attrs; 2311 this.attrsEx = e.attrsEx; 2312 */ 2313 this.locoff = e.locoff; 2314 this.comment = e.comment; 2315 this.posixPerms = e.posixPerms; 2316 this.type = type; 2317 } 2318 2319 Entry(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2320 readCEN(zipfs, inode); 2321 } 2322 2323 // Calculates a suitable base for the version number to 2324 // be used for fields version made by/version needed to extract. 2325 // The lower bytes of these 2 byte fields hold the version number 2326 // (value/10 = major; value%10 = minor) 2327 // For different features certain minimum versions apply: 2328 // stored = 10 (1.0), deflated = 20 (2.0), zip64 = 45 (4.5) 2329 private int version(boolean zip64) throws ZipException { 2330 if (zip64) { 2331 return 45; 2332 } 2333 if (method == METHOD_DEFLATED) 2334 return 20; 2335 else if (method == METHOD_STORED) 2336 return 10; 2337 throw new ZipException("unsupported compression method"); 2338 } 2339 2340 /** 2341 * Adds information about compatibility of file attribute information 2342 * to a version value. 2343 */ 2344 private int versionMadeBy(int version) { 2345 return (posixPerms < 0) ? version : 2346 VERSION_BASE_UNIX | (version & 0xff); 2347 } 2348 2349 ///////////////////// CEN ////////////////////// 2350 private void readCEN(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2351 byte[] cen = zipfs.cen; 2352 int pos = inode.pos; 2353 if (!cenSigAt(cen, pos)) 2354 throw new ZipException("invalid CEN header (bad signature)"); 2355 version = CENVER(cen, pos); 2356 flag = CENFLG(cen, pos); 2357 method = CENHOW(cen, pos); 2358 mtime = dosToJavaTime(CENTIM(cen, pos)); 2359 crc = CENCRC(cen, pos); 2360 csize = CENSIZ(cen, pos); 2361 size = CENLEN(cen, pos); 2362 int nlen = CENNAM(cen, pos); 2363 int elen = CENEXT(cen, pos); 2364 int clen = CENCOM(cen, pos); 2365 /* 2366 versionMade = CENVEM(cen, pos); 2367 disk = CENDSK(cen, pos); 2368 attrs = CENATT(cen, pos); 2369 attrsEx = CENATX(cen, pos); 2370 */ 2371 if (CENVEM_FA(cen, pos) == FILE_ATTRIBUTES_UNIX) { 2372 posixPerms = CENATX_PERMS(cen, pos) & 0xFFF; // 12 bits for setuid, setgid, sticky + perms 2373 } 2374 locoff = CENOFF(cen, pos); 2375 pos += CENHDR; 2376 this.name = inode.name; 2377 this.isdir = inode.isdir; 2378 this.hashcode = inode.hashcode; 2379 2380 pos += nlen; 2381 if (elen > 0) { 2382 extra = Arrays.copyOfRange(cen, pos, pos + elen); 2383 pos += elen; 2384 readExtra(zipfs); 2385 } 2386 if (clen > 0) { 2387 comment = Arrays.copyOfRange(cen, pos, pos + clen); 2388 } 2389 } 2390 2391 private int writeCEN(OutputStream os) throws IOException { 2392 long csize0 = csize; 2393 long size0 = size; 2394 long locoff0 = locoff; 2395 int elen64 = 0; // extra for ZIP64 2396 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 2397 int elenEXTT = 0; // extra for Extended Timestamp 2398 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 2399 2400 byte[] zname = isdir ? toDirectoryPath(name) : name; 2401 2402 // confirm size/length 2403 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 2404 int elen = (extra != null) ? extra.length : 0; 2405 int eoff = 0; 2406 int clen = (comment != null) ? comment.length : 0; 2407 if (csize >= ZIP64_MINVAL) { 2408 csize0 = ZIP64_MINVAL; 2409 elen64 += 8; // csize(8) 2410 } 2411 if (size >= ZIP64_MINVAL) { 2412 size0 = ZIP64_MINVAL; // size(8) 2413 elen64 += 8; 2414 } 2415 if (locoff >= ZIP64_MINVAL) { 2416 locoff0 = ZIP64_MINVAL; 2417 elen64 += 8; // offset(8) 2418 } 2419 if (elen64 != 0) { 2420 elen64 += 4; // header and data sz 4 bytes 2421 } 2422 boolean zip64 = (elen64 != 0); 2423 int version0 = version(zip64); 2424 while (eoff + 4 < elen) { 2425 int tag = SH(extra, eoff); 2426 int sz = SH(extra, eoff + 2); 2427 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2428 foundExtraTime = true; 2429 } 2430 eoff += (4 + sz); 2431 } 2432 if (!foundExtraTime) { 2433 if (isWindows) { // use NTFS 2434 elenNTFS = 36; // total 36 bytes 2435 } else { // Extended Timestamp otherwise 2436 elenEXTT = 9; // only mtime in cen 2437 } 2438 } 2439 writeInt(os, CENSIG); // CEN header signature 2440 writeShort(os, versionMadeBy(version0)); // version made by 2441 writeShort(os, version0); // version needed to extract 2442 writeShort(os, flag); // general purpose bit flag 2443 writeShort(os, method); // compression method 2444 // last modification time 2445 writeInt(os, (int)javaToDosTime(mtime)); 2446 writeInt(os, crc); // crc-32 2447 writeInt(os, csize0); // compressed size 2448 writeInt(os, size0); // uncompressed size 2449 writeShort(os, nlen); 2450 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2451 2452 if (comment != null) { 2453 writeShort(os, Math.min(clen, 0xffff)); 2454 } else { 2455 writeShort(os, 0); 2456 } 2457 writeShort(os, 0); // starting disk number 2458 writeShort(os, 0); // internal file attributes (unused) 2459 writeInt(os, posixPerms > 0 ? posixPerms << 16 : 0); // external file 2460 // attributes, used for storing posix 2461 // permissions 2462 writeInt(os, locoff0); // relative offset of local header 2463 writeBytes(os, zname, 1, nlen); 2464 if (zip64) { 2465 writeShort(os, EXTID_ZIP64);// Zip64 extra 2466 writeShort(os, elen64 - 4); // size of "this" extra block 2467 if (size0 == ZIP64_MINVAL) 2468 writeLong(os, size); 2469 if (csize0 == ZIP64_MINVAL) 2470 writeLong(os, csize); 2471 if (locoff0 == ZIP64_MINVAL) 2472 writeLong(os, locoff); 2473 } 2474 if (elenNTFS != 0) { 2475 writeShort(os, EXTID_NTFS); 2476 writeShort(os, elenNTFS - 4); 2477 writeInt(os, 0); // reserved 2478 writeShort(os, 0x0001); // NTFS attr tag 2479 writeShort(os, 24); 2480 writeLong(os, javaToWinTime(mtime)); 2481 writeLong(os, javaToWinTime(atime)); 2482 writeLong(os, javaToWinTime(ctime)); 2483 } 2484 if (elenEXTT != 0) { 2485 writeShort(os, EXTID_EXTT); 2486 writeShort(os, elenEXTT - 4); 2487 if (ctime == -1) 2488 os.write(0x3); // mtime and atime 2489 else 2490 os.write(0x7); // mtime, atime and ctime 2491 writeInt(os, javaToUnixTime(mtime)); 2492 } 2493 if (extra != null) // whatever not recognized 2494 writeBytes(os, extra); 2495 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2496 writeBytes(os, comment); 2497 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2498 } 2499 2500 ///////////////////// LOC ////////////////////// 2501 2502 private int writeLOC(OutputStream os) throws IOException { 2503 byte[] zname = isdir ? toDirectoryPath(name) : name; 2504 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2505 int elen = (extra != null) ? extra.length : 0; 2506 boolean foundExtraTime = false; // if extra timestamp present 2507 int eoff = 0; 2508 int elen64 = 0; 2509 boolean zip64 = false; 2510 int elenEXTT = 0; 2511 int elenNTFS = 0; 2512 writeInt(os, LOCSIG); // LOC header signature 2513 if ((flag & FLAG_DATADESCR) != 0) { 2514 writeShort(os, version(false)); // version needed to extract 2515 writeShort(os, flag); // general purpose bit flag 2516 writeShort(os, method); // compression method 2517 // last modification time 2518 writeInt(os, (int)javaToDosTime(mtime)); 2519 // store size, uncompressed size, and crc-32 in data descriptor 2520 // immediately following compressed entry data 2521 writeInt(os, 0); 2522 writeInt(os, 0); 2523 writeInt(os, 0); 2524 } else { 2525 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2526 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2527 zip64 = true; 2528 } 2529 writeShort(os, version(zip64)); // version needed to extract 2530 writeShort(os, flag); // general purpose bit flag 2531 writeShort(os, method); // compression method 2532 // last modification time 2533 writeInt(os, (int)javaToDosTime(mtime)); 2534 writeInt(os, crc); // crc-32 2535 if (zip64) { 2536 writeInt(os, ZIP64_MINVAL); 2537 writeInt(os, ZIP64_MINVAL); 2538 } else { 2539 writeInt(os, csize); // compressed size 2540 writeInt(os, size); // uncompressed size 2541 } 2542 } 2543 while (eoff + 4 < elen) { 2544 int tag = SH(extra, eoff); 2545 int sz = SH(extra, eoff + 2); 2546 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2547 foundExtraTime = true; 2548 } 2549 eoff += (4 + sz); 2550 } 2551 if (!foundExtraTime) { 2552 if (isWindows) { 2553 elenNTFS = 36; // NTFS, total 36 bytes 2554 } else { // on unix use "ext time" 2555 elenEXTT = 9; 2556 if (atime != -1) 2557 elenEXTT += 4; 2558 if (ctime != -1) 2559 elenEXTT += 4; 2560 } 2561 } 2562 writeShort(os, nlen); 2563 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2564 writeBytes(os, zname, 1, nlen); 2565 if (zip64) { 2566 writeShort(os, EXTID_ZIP64); 2567 writeShort(os, 16); 2568 writeLong(os, size); 2569 writeLong(os, csize); 2570 } 2571 if (elenNTFS != 0) { 2572 writeShort(os, EXTID_NTFS); 2573 writeShort(os, elenNTFS - 4); 2574 writeInt(os, 0); // reserved 2575 writeShort(os, 0x0001); // NTFS attr tag 2576 writeShort(os, 24); 2577 writeLong(os, javaToWinTime(mtime)); 2578 writeLong(os, javaToWinTime(atime)); 2579 writeLong(os, javaToWinTime(ctime)); 2580 } 2581 if (elenEXTT != 0) { 2582 writeShort(os, EXTID_EXTT); 2583 writeShort(os, elenEXTT - 4);// size for the folowing data block 2584 int fbyte = 0x1; 2585 if (atime != -1) // mtime and atime 2586 fbyte |= 0x2; 2587 if (ctime != -1) // mtime, atime and ctime 2588 fbyte |= 0x4; 2589 os.write(fbyte); // flags byte 2590 writeInt(os, javaToUnixTime(mtime)); 2591 if (atime != -1) 2592 writeInt(os, javaToUnixTime(atime)); 2593 if (ctime != -1) 2594 writeInt(os, javaToUnixTime(ctime)); 2595 } 2596 if (extra != null) { 2597 writeBytes(os, extra); 2598 } 2599 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2600 } 2601 2602 // Data Descriptor 2603 private int writeEXT(OutputStream os) throws IOException { 2604 writeInt(os, EXTSIG); // EXT header signature 2605 writeInt(os, crc); // crc-32 2606 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2607 writeLong(os, csize); 2608 writeLong(os, size); 2609 return 24; 2610 } else { 2611 writeInt(os, csize); // compressed size 2612 writeInt(os, size); // uncompressed size 2613 return 16; 2614 } 2615 } 2616 2617 // read NTFS, UNIX and ZIP64 data from cen.extra 2618 private void readExtra(ZipFileSystem zipfs) throws IOException { 2619 if (extra == null) 2620 return; 2621 int elen = extra.length; 2622 int off = 0; 2623 int newOff = 0; 2624 while (off + 4 < elen) { 2625 // extra spec: HeaderID+DataSize+Data 2626 int pos = off; 2627 int tag = SH(extra, pos); 2628 int sz = SH(extra, pos + 2); 2629 pos += 4; 2630 if (pos + sz > elen) // invalid data 2631 break; 2632 switch (tag) { 2633 case EXTID_ZIP64 : 2634 if (size == ZIP64_MINVAL) { 2635 if (pos + 8 > elen) // invalid zip64 extra 2636 break; // fields, just skip 2637 size = LL(extra, pos); 2638 pos += 8; 2639 } 2640 if (csize == ZIP64_MINVAL) { 2641 if (pos + 8 > elen) 2642 break; 2643 csize = LL(extra, pos); 2644 pos += 8; 2645 } 2646 if (locoff == ZIP64_MINVAL) { 2647 if (pos + 8 > elen) 2648 break; 2649 locoff = LL(extra, pos); 2650 } 2651 break; 2652 case EXTID_NTFS: 2653 if (sz < 32) 2654 break; 2655 pos += 4; // reserved 4 bytes 2656 if (SH(extra, pos) != 0x0001) 2657 break; 2658 if (SH(extra, pos + 2) != 24) 2659 break; 2660 // override the loc field, datatime here is 2661 // more "accurate" 2662 mtime = winToJavaTime(LL(extra, pos + 4)); 2663 atime = winToJavaTime(LL(extra, pos + 12)); 2664 ctime = winToJavaTime(LL(extra, pos + 20)); 2665 break; 2666 case EXTID_EXTT: 2667 // spec says the Extened timestamp in cen only has mtime 2668 // need to read the loc to get the extra a/ctime, if flag 2669 // "zipinfo-time" is not specified to false; 2670 // there is performance cost (move up to loc and read) to 2671 // access the loc table foreach entry; 2672 if (zipfs.noExtt) { 2673 if (sz == 5) 2674 mtime = unixToJavaTime(LG(extra, pos + 1)); 2675 break; 2676 } 2677 byte[] buf = new byte[LOCHDR]; 2678 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2679 != buf.length) 2680 throw new ZipException("loc: reading failed"); 2681 if (!locSigAt(buf, 0)) 2682 throw new ZipException("loc: wrong sig ->" 2683 + Long.toString(getSig(buf, 0), 16)); 2684 int locElen = LOCEXT(buf); 2685 if (locElen < 9) // EXTT is at least 9 bytes 2686 break; 2687 int locNlen = LOCNAM(buf); 2688 buf = new byte[locElen]; 2689 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2690 != buf.length) 2691 throw new ZipException("loc extra: reading failed"); 2692 int locPos = 0; 2693 while (locPos + 4 < buf.length) { 2694 int locTag = SH(buf, locPos); 2695 int locSZ = SH(buf, locPos + 2); 2696 locPos += 4; 2697 if (locTag != EXTID_EXTT) { 2698 locPos += locSZ; 2699 continue; 2700 } 2701 int end = locPos + locSZ - 4; 2702 int flag = CH(buf, locPos++); 2703 if ((flag & 0x1) != 0 && locPos <= end) { 2704 mtime = unixToJavaTime(LG(buf, locPos)); 2705 locPos += 4; 2706 } 2707 if ((flag & 0x2) != 0 && locPos <= end) { 2708 atime = unixToJavaTime(LG(buf, locPos)); 2709 locPos += 4; 2710 } 2711 if ((flag & 0x4) != 0 && locPos <= end) { 2712 ctime = unixToJavaTime(LG(buf, locPos)); 2713 } 2714 break; 2715 } 2716 break; 2717 default: // unknown tag 2718 System.arraycopy(extra, off, extra, newOff, sz + 4); 2719 newOff += (sz + 4); 2720 } 2721 off += (sz + 4); 2722 } 2723 if (newOff != 0 && newOff != extra.length) 2724 extra = Arrays.copyOf(extra, newOff); 2725 else 2726 extra = null; 2727 } 2728 2729 @Override 2730 public String toString() { 2731 StringBuilder sb = new StringBuilder(1024); 2732 Formatter fm = new Formatter(sb); 2733 fm.format(" name : %s%n", new String(name)); 2734 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2735 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2736 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2737 fm.format(" isRegularFile : %b%n", isRegularFile()); 2738 fm.format(" isDirectory : %b%n", isDirectory()); 2739 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2740 fm.format(" isOther : %b%n", isOther()); 2741 fm.format(" fileKey : %s%n", fileKey()); 2742 fm.format(" size : %d%n", size()); 2743 fm.format(" compressedSize : %d%n", compressedSize()); 2744 fm.format(" crc : %x%n", crc()); 2745 fm.format(" method : %d%n", method()); 2746 Set<PosixFilePermission> permissions = storedPermissions().orElse(null); 2747 if (permissions != null) { 2748 fm.format(" permissions : %s%n", permissions); 2749 } 2750 fm.close(); 2751 return sb.toString(); 2752 } 2753 2754 ///////// basic file attributes /////////// 2755 @Override 2756 public FileTime creationTime() { 2757 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 2758 } 2759 2760 @Override 2761 public boolean isDirectory() { 2762 return isDir(); 2763 } 2764 2765 @Override 2766 public boolean isOther() { 2767 return false; 2768 } 2769 2770 @Override 2771 public boolean isRegularFile() { 2772 return !isDir(); 2773 } 2774 2775 @Override 2776 public FileTime lastAccessTime() { 2777 return FileTime.fromMillis(atime == -1 ? mtime : atime); 2778 } 2779 2780 @Override 2781 public FileTime lastModifiedTime() { 2782 return FileTime.fromMillis(mtime); 2783 } 2784 2785 @Override 2786 public long size() { 2787 return size; 2788 } 2789 2790 @Override 2791 public boolean isSymbolicLink() { 2792 return false; 2793 } 2794 2795 @Override 2796 public Object fileKey() { 2797 return null; 2798 } 2799 2800 ///////// zip file attributes /////////// 2801 2802 @Override 2803 public long compressedSize() { 2804 return csize; 2805 } 2806 2807 @Override 2808 public long crc() { 2809 return crc; 2810 } 2811 2812 @Override 2813 public int method() { 2814 return method; 2815 } 2816 2817 @Override 2818 public byte[] extra() { 2819 if (extra != null) 2820 return Arrays.copyOf(extra, extra.length); 2821 return null; 2822 } 2823 2824 @Override 2825 public byte[] comment() { 2826 if (comment != null) 2827 return Arrays.copyOf(comment, comment.length); 2828 return null; 2829 } 2830 2831 @Override 2832 public Optional<Set<PosixFilePermission>> storedPermissions() { 2833 Set<PosixFilePermission> perms = null; 2834 if (posixPerms != -1) { 2835 perms = new HashSet<>(PosixFilePermission.values().length); 2836 for (PosixFilePermission perm : PosixFilePermission.values()) { 2837 if ((posixPerms & ZipUtils.permToFlag(perm)) != 0) { 2838 perms.add(perm); 2839 } 2840 } 2841 } 2842 return Optional.ofNullable(perms); 2843 } 2844 } 2845 2846 final class PosixEntry extends Entry implements PosixFileAttributes { 2847 private UserPrincipal owner = defaultOwner; 2848 private GroupPrincipal group = defaultGroup; 2849 2850 PosixEntry(byte[] name, boolean isdir, int method) { 2851 super(name, isdir, method); 2852 } 2853 2854 PosixEntry(byte[] name, int type, boolean isdir, int method, FileAttribute<?>... attrs) { 2855 super(name, type, isdir, method, attrs); 2856 } 2857 2858 PosixEntry(byte[] name, Path file, int type, FileAttribute<?>... attrs) { 2859 super(name, file, type, attrs); 2860 } 2861 2862 PosixEntry(PosixEntry e, int type) { 2863 super(e, type); 2864 this.owner = e.owner; 2865 this.group = e.group; 2866 } 2867 2868 PosixEntry(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2869 super(zipfs, inode); 2870 } 2871 2872 @Override 2873 public UserPrincipal owner() { 2874 return owner; 2875 } 2876 2877 @Override 2878 public GroupPrincipal group() { 2879 return group; 2880 } 2881 2882 @Override 2883 public Set<PosixFilePermission> permissions() { 2884 return storedPermissions().orElse(Set.copyOf(defaultPermissions)); 2885 } 2886 } 2887 2888 private static class ExistingChannelCloser { 2889 private final Path path; 2890 private final SeekableByteChannel ch; 2891 private final Set<InputStream> streams; 2892 ExistingChannelCloser(Path path, 2893 SeekableByteChannel ch, 2894 Set<InputStream> streams) { 2895 this.path = path; 2896 this.ch = ch; 2897 this.streams = streams; 2898 } 2899 2900 /** 2901 * If there are no more outstanding streams, close the channel and 2902 * delete the backing file 2903 * 2904 * @return true if we're done and closed the backing file, 2905 * otherwise false 2906 * @throws IOException 2907 */ 2908 private boolean closeAndDeleteIfDone() throws IOException { 2909 if (streams.isEmpty()) { 2910 ch.close(); 2911 Files.delete(path); 2912 return true; 2913 } 2914 return false; 2915 } 2916 } 2917 2918 // purely for parent lookup, so we don't have to copy the parent 2919 // name every time 2920 static class ParentLookup extends IndexNode { 2921 int len; 2922 ParentLookup() {} 2923 2924 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 2925 name(name, len); 2926 return this; 2927 } 2928 2929 void name(byte[] name, int len) { 2930 this.name = name; 2931 this.len = len; 2932 // calculate the hashcode the same way as Arrays.hashCode() does 2933 int result = 1; 2934 for (int i = 0; i < len; i++) 2935 result = 31 * result + name[i]; 2936 this.hashcode = result; 2937 } 2938 2939 @Override 2940 public boolean equals(Object other) { 2941 if (!(other instanceof IndexNode)) { 2942 return false; 2943 } 2944 byte[] oname = ((IndexNode)other).name; 2945 return Arrays.equals(name, 0, len, 2946 oname, 0, oname.length); 2947 } 2948 } 2949 }