1 /* 2 * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.FilterOutputStream; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.nio.ByteBuffer; 37 import java.nio.MappedByteBuffer; 38 import java.nio.channels.FileChannel; 39 import java.nio.channels.FileLock; 40 import java.nio.channels.ReadableByteChannel; 41 import java.nio.channels.SeekableByteChannel; 42 import java.nio.channels.WritableByteChannel; 43 import java.nio.file.*; 44 import java.nio.file.attribute.FileAttribute; 45 import java.nio.file.attribute.FileTime; 46 import java.nio.file.attribute.GroupPrincipal; 47 import java.nio.file.attribute.PosixFilePermission; 48 import java.nio.file.attribute.UserPrincipal; 49 import java.nio.file.attribute.UserPrincipalLookupService; 50 import java.nio.file.spi.FileSystemProvider; 51 import java.security.AccessControlException; 52 import java.security.AccessController; 53 import java.security.PrivilegedAction; 54 import java.security.PrivilegedActionException; 55 import java.security.PrivilegedExceptionAction; 56 import java.util.*; 57 import java.util.concurrent.locks.ReadWriteLock; 58 import java.util.concurrent.locks.ReentrantReadWriteLock; 59 import java.util.regex.Pattern; 60 import java.util.zip.CRC32; 61 import java.util.zip.Deflater; 62 import java.util.zip.DeflaterOutputStream; 63 import java.util.zip.Inflater; 64 import java.util.zip.InflaterInputStream; 65 import java.util.zip.ZipException; 66 67 import static java.lang.Boolean.TRUE; 68 import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES; 69 import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; 70 import static java.nio.file.StandardOpenOption.APPEND; 71 import static java.nio.file.StandardOpenOption.CREATE; 72 import static java.nio.file.StandardOpenOption.CREATE_NEW; 73 import static java.nio.file.StandardOpenOption.READ; 74 import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; 75 import static java.nio.file.StandardOpenOption.WRITE; 76 import static jdk.nio.zipfs.ZipConstants.*; 77 import static jdk.nio.zipfs.ZipUtils.*; 78 79 /** 80 * A FileSystem built on a zip file 81 * 82 * @author Xueming Shen 83 */ 84 class ZipFileSystem extends FileSystem { 85 // statics 86 private static final boolean isWindows = AccessController.doPrivileged( 87 (PrivilegedAction<Boolean>)() -> System.getProperty("os.name") 88 .startsWith("Windows")); 89 private static final String POSIX_OPT = "posix"; 90 private static final String DEFAULT_OWNER_OPT = "defaultOwner"; 91 private static final String DEFAULT_GROUP_OPT = "defaultGroup"; 92 private static final String DEFAULT_PERMISSIONS_OPT = "defaultPermissions"; 93 private static final String DEFAULT_PRINCIPAL_NAME = "<zipfs_default>"; 94 95 private static final UserPrincipal DEFAULT_OWNER = initDefaultOwner(); 96 private static final GroupPrincipal DEFAULT_GROUP = () -> DEFAULT_PRINCIPAL_NAME; 97 private static final Set<PosixFilePermission> DEFAULT_PERMISSIONS = Set.of( 98 PosixFilePermission.OWNER_READ, 99 PosixFilePermission.OWNER_WRITE, 100 PosixFilePermission.GROUP_READ); 101 102 private final ZipFileSystemProvider provider; 103 private final Path zfpath; 104 final ZipCoder zc; 105 private final ZipPath rootdir; 106 private boolean readOnly = false; // readonly file system 107 108 // configurable by env map 109 private final boolean noExtt; // see readExtra() 110 private final boolean useTempFile; // use a temp file for newOS, default 111 // is to use BAOS for better performance 112 113 private final boolean forceEnd64; 114 private final int defaultMethod; // METHOD_STORED if "noCompression=true" 115 // METHOD_DEFLATED otherwise 116 117 // POSIX support 118 final boolean supportPosix; 119 private final UserPrincipal defaultOwner; 120 private final GroupPrincipal defaultGroup; 121 private final Set<PosixFilePermission> defaultPermissions; 122 123 private final Set<String> supportedFileAttributeViews; 124 125 private static UserPrincipal initDefaultOwner() { 126 String userDotName; 127 try { 128 userDotName = AccessController.doPrivileged( 129 (PrivilegedAction<String>)() -> System.getProperty("user.name")); 130 } catch (AccessControlException e) { 131 userDotName = null; 132 } 133 String defaultUserName = userDotName == null ? DEFAULT_PRINCIPAL_NAME : userDotName; 134 return () -> defaultUserName; 135 } 136 137 ZipFileSystem(ZipFileSystemProvider provider, 138 Path zfpath, 139 Map<String, ?> env) throws IOException 140 { 141 // default encoding for name/comment 142 String nameEncoding = env.containsKey("encoding") ? 143 (String)env.get("encoding") : "UTF-8"; 144 this.noExtt = "false".equals(env.get("zipinfo-time")); 145 this.useTempFile = isTrue(env, "useTempFile"); 146 this.forceEnd64 = isTrue(env, "forceZIP64End"); 147 this.supportPosix = isTrue(env, POSIX_OPT); 148 if (supportPosix) { 149 Object o = env.get(DEFAULT_OWNER_OPT); 150 if (o == null) { 151 defaultOwner = DEFAULT_OWNER; 152 } else { 153 if (o instanceof UserPrincipal) { 154 defaultOwner = (UserPrincipal)o; 155 } else { 156 throw new IllegalArgumentException("Value for property " + 157 DEFAULT_OWNER_OPT + " must be of type " + 158 UserPrincipal.class); 159 } 160 } 161 o = env.get(DEFAULT_GROUP_OPT); 162 if (o == null) { 163 defaultGroup = DEFAULT_GROUP; 164 } else { 165 if (o instanceof GroupPrincipal) { 166 defaultGroup = (GroupPrincipal)o; 167 } else { 168 throw new IllegalArgumentException("Value for property " + 169 DEFAULT_GROUP_OPT + " must be of type " + 170 GroupPrincipal.class); 171 } 172 } 173 o = env.get(DEFAULT_PERMISSIONS_OPT); 174 if (o == null) { 175 defaultPermissions = DEFAULT_PERMISSIONS; 176 } else { 177 if (o instanceof Set) { 178 defaultPermissions = new HashSet<PosixFilePermission>(); 179 Set<?> perms = (Set<?>)o; 180 for (Object o2 : perms) { 181 if (o2 instanceof PosixFilePermission) { 182 defaultPermissions.add((PosixFilePermission)o2); 183 } else { 184 throw new IllegalArgumentException(DEFAULT_PERMISSIONS_OPT + 185 " must only contain objects of type " + 186 PosixFilePermission.class); 187 } 188 } 189 } else { 190 throw new IllegalArgumentException("Value for property " + 191 DEFAULT_PERMISSIONS_OPT + " must be of type " + Set.class); 192 } 193 } 194 supportedFileAttributeViews = Set.of("basic", "posix", "zip"); 195 } else { 196 defaultOwner = DEFAULT_OWNER; 197 defaultGroup = DEFAULT_GROUP; 198 defaultPermissions = DEFAULT_PERMISSIONS; 199 supportedFileAttributeViews = Set.of("basic", "zip"); 200 } 201 this.defaultMethod = isTrue(env, "noCompression") ? METHOD_STORED: METHOD_DEFLATED; 202 if (Files.notExists(zfpath)) { 203 // create a new zip if not exists 204 if (isTrue(env, "create")) { 205 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 206 new END().write(os, 0, forceEnd64); 207 } 208 } else { 209 throw new FileSystemNotFoundException(zfpath.toString()); 210 } 211 } 212 // sm and existence check 213 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 214 boolean writeable = AccessController.doPrivileged( 215 (PrivilegedAction<Boolean>)() -> Files.isWritable(zfpath)); 216 this.readOnly = !writeable; 217 this.zc = ZipCoder.get(nameEncoding); 218 this.rootdir = new ZipPath(this, new byte[]{'/'}); 219 this.ch = Files.newByteChannel(zfpath, READ); 220 try { 221 this.cen = initCEN(); 222 } catch (IOException x) { 223 try { 224 this.ch.close(); 225 } catch (IOException xx) { 226 x.addSuppressed(xx); 227 } 228 throw x; 229 } 230 this.provider = provider; 231 this.zfpath = zfpath; 232 } 233 234 // returns true if there is a name=true/"true" setting in env 235 private static boolean isTrue(Map<String, ?> env, String name) { 236 return "true".equals(env.get(name)) || TRUE.equals(env.get(name)); 237 } 238 239 @Override 240 public FileSystemProvider provider() { 241 return provider; 242 } 243 244 @Override 245 public String getSeparator() { 246 return "/"; 247 } 248 249 @Override 250 public boolean isOpen() { 251 return isOpen; 252 } 253 254 @Override 255 public boolean isReadOnly() { 256 return readOnly; 257 } 258 259 private void checkWritable() throws IOException { 260 if (readOnly) 261 throw new ReadOnlyFileSystemException(); 262 } 263 264 void setReadOnly() { 265 this.readOnly = true; 266 } 267 268 @Override 269 public Iterable<Path> getRootDirectories() { 270 return List.of(rootdir); 271 } 272 273 ZipPath getRootDir() { 274 return rootdir; 275 } 276 277 @Override 278 public ZipPath getPath(String first, String... more) { 279 if (more.length == 0) { 280 return new ZipPath(this, first); 281 } 282 StringBuilder sb = new StringBuilder(); 283 sb.append(first); 284 for (String path : more) { 285 if (path.length() > 0) { 286 if (sb.length() > 0) { 287 sb.append('/'); 288 } 289 sb.append(path); 290 } 291 } 292 return new ZipPath(this, sb.toString()); 293 } 294 295 @Override 296 public UserPrincipalLookupService getUserPrincipalLookupService() { 297 throw new UnsupportedOperationException(); 298 } 299 300 @Override 301 public WatchService newWatchService() { 302 throw new UnsupportedOperationException(); 303 } 304 305 FileStore getFileStore(ZipPath path) { 306 return new ZipFileStore(path); 307 } 308 309 @Override 310 public Iterable<FileStore> getFileStores() { 311 return List.of(new ZipFileStore(rootdir)); 312 } 313 314 @Override 315 public Set<String> supportedFileAttributeViews() { 316 return supportedFileAttributeViews; 317 } 318 319 @Override 320 public String toString() { 321 return zfpath.toString(); 322 } 323 324 Path getZipFile() { 325 return zfpath; 326 } 327 328 private static final String GLOB_SYNTAX = "glob"; 329 private static final String REGEX_SYNTAX = "regex"; 330 331 @Override 332 public PathMatcher getPathMatcher(String syntaxAndInput) { 333 int pos = syntaxAndInput.indexOf(':'); 334 if (pos <= 0 || pos == syntaxAndInput.length()) { 335 throw new IllegalArgumentException(); 336 } 337 String syntax = syntaxAndInput.substring(0, pos); 338 String input = syntaxAndInput.substring(pos + 1); 339 String expr; 340 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 341 expr = toRegexPattern(input); 342 } else { 343 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 344 expr = input; 345 } else { 346 throw new UnsupportedOperationException("Syntax '" + syntax + 347 "' not recognized"); 348 } 349 } 350 // return matcher 351 final Pattern pattern = Pattern.compile(expr); 352 return new PathMatcher() { 353 @Override 354 public boolean matches(Path path) { 355 return pattern.matcher(path.toString()).matches(); 356 } 357 }; 358 } 359 360 @Override 361 public void close() throws IOException { 362 beginWrite(); 363 try { 364 if (!isOpen) 365 return; 366 isOpen = false; // set closed 367 } finally { 368 endWrite(); 369 } 370 if (!streams.isEmpty()) { // unlock and close all remaining streams 371 Set<InputStream> copy = new HashSet<>(streams); 372 for (InputStream is : copy) 373 is.close(); 374 } 375 beginWrite(); // lock and sync 376 try { 377 AccessController.doPrivileged((PrivilegedExceptionAction<Void>)() -> { 378 sync(); return null; 379 }); 380 ch.close(); // close the ch just in case no update 381 // and sync didn't close the ch 382 } catch (PrivilegedActionException e) { 383 throw (IOException)e.getException(); 384 } finally { 385 endWrite(); 386 } 387 388 synchronized (inflaters) { 389 for (Inflater inf : inflaters) 390 inf.end(); 391 } 392 synchronized (deflaters) { 393 for (Deflater def : deflaters) 394 def.end(); 395 } 396 397 IOException ioe = null; 398 synchronized (tmppaths) { 399 for (Path p : tmppaths) { 400 try { 401 AccessController.doPrivileged( 402 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 403 } catch (PrivilegedActionException e) { 404 IOException x = (IOException)e.getException(); 405 if (ioe == null) 406 ioe = x; 407 else 408 ioe.addSuppressed(x); 409 } 410 } 411 } 412 provider.removeFileSystem(zfpath, this); 413 if (ioe != null) 414 throw ioe; 415 } 416 417 ZipFileAttributes getFileAttributes(byte[] path) 418 throws IOException 419 { 420 Entry e; 421 beginRead(); 422 try { 423 ensureOpen(); 424 e = getEntry(path); 425 if (e == null) { 426 IndexNode inode = getInode(path); 427 if (inode == null) 428 return null; 429 // pseudo directory, uses METHOD_STORED 430 e = new Entry(inode.name, inode.isdir, METHOD_STORED); 431 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 432 } 433 } finally { 434 endRead(); 435 } 436 return e; 437 } 438 439 void checkAccess(byte[] path) throws IOException { 440 beginRead(); 441 try { 442 ensureOpen(); 443 // is it necessary to readCEN as a sanity check? 444 if (getInode(path) == null) { 445 throw new NoSuchFileException(toString()); 446 } 447 448 } finally { 449 endRead(); 450 } 451 } 452 453 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 454 throws IOException 455 { 456 checkWritable(); 457 beginWrite(); 458 try { 459 ensureOpen(); 460 Entry e = getEntry(path); // ensureOpen checked 461 if (e == null) 462 throw new NoSuchFileException(getString(path)); 463 if (e.type == Entry.CEN) 464 e.type = Entry.COPY; // copy e 465 if (mtime != null) 466 e.mtime = mtime.toMillis(); 467 if (atime != null) 468 e.atime = atime.toMillis(); 469 if (ctime != null) 470 e.ctime = ctime.toMillis(); 471 update(e); 472 } finally { 473 endWrite(); 474 } 475 } 476 477 void setOwner(byte[] path, UserPrincipal owner) throws IOException { 478 checkWritable(); 479 beginWrite(); 480 try { 481 ensureOpen(); 482 Entry e = getEntry(path); // ensureOpen checked 483 if (e == null) { 484 throw new NoSuchFileException(getString(path)); 485 } 486 // as the owner information is not persistent, we don't need to 487 // change e.type to Entry.COPY 488 e.owner = owner; 489 update(e); 490 } finally { 491 endWrite(); 492 } 493 } 494 495 void setPermissions(byte[] path, Set<PosixFilePermission> perms) 496 throws IOException 497 { 498 checkWritable(); 499 beginWrite(); 500 try { 501 ensureOpen(); 502 Entry e = getEntry(path); // ensureOpen checked 503 if (e == null) { 504 throw new NoSuchFileException(getString(path)); 505 } 506 if (e.type == Entry.CEN) { 507 e.type = Entry.COPY; // copy e 508 } 509 e.posixPerms = perms == null ? -1 : ZipUtils.permsToFlags(perms); 510 update(e); 511 } finally { 512 endWrite(); 513 } 514 } 515 516 void setGroup(byte[] path, GroupPrincipal group) throws IOException { 517 checkWritable(); 518 beginWrite(); 519 try { 520 ensureOpen(); 521 Entry e = getEntry(path); // ensureOpen checked 522 if (e == null) { 523 throw new NoSuchFileException(getString(path)); 524 } 525 // as the group information is not persistent, we don't need to 526 // change e.type to Entry.COPY 527 e.group = group; 528 update(e); 529 } finally { 530 endWrite(); 531 } 532 } 533 534 boolean exists(byte[] path) 535 throws IOException 536 { 537 beginRead(); 538 try { 539 ensureOpen(); 540 return getInode(path) != null; 541 } finally { 542 endRead(); 543 } 544 } 545 546 boolean isDirectory(byte[] path) 547 throws IOException 548 { 549 beginRead(); 550 try { 551 IndexNode n = getInode(path); 552 return n != null && n.isDir(); 553 } finally { 554 endRead(); 555 } 556 } 557 558 // returns the list of child paths of "path" 559 Iterator<Path> iteratorOf(ZipPath dir, 560 DirectoryStream.Filter<? super Path> filter) 561 throws IOException 562 { 563 beginWrite(); // iteration of inodes needs exclusive lock 564 try { 565 ensureOpen(); 566 byte[] path = dir.getResolvedPath(); 567 IndexNode inode = getInode(path); 568 if (inode == null) 569 throw new NotDirectoryException(getString(path)); 570 List<Path> list = new ArrayList<>(); 571 IndexNode child = inode.child; 572 while (child != null) { 573 // (1) Assume each path from the zip file itself is "normalized" 574 // (2) IndexNode.name is absolute. see IndexNode(byte[],int,int) 575 // (3) If parent "dir" is relative when ZipDirectoryStream 576 // is created, the returned child path needs to be relative 577 // as well. 578 byte[] cname = child.name; 579 ZipPath childPath = new ZipPath(this, cname, true); 580 ZipPath childFileName = childPath.getFileName(); 581 ZipPath zpath = dir.resolve(childFileName); 582 if (filter == null || filter.accept(zpath)) 583 list.add(zpath); 584 child = child.sibling; 585 } 586 return list.iterator(); 587 } finally { 588 endWrite(); 589 } 590 } 591 592 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 593 throws IOException 594 { 595 checkWritable(); 596 // dir = toDirectoryPath(dir); 597 beginWrite(); 598 try { 599 ensureOpen(); 600 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 601 throw new FileAlreadyExistsException(getString(dir)); 602 checkParents(dir); 603 Entry e = new Entry(dir, Entry.NEW, true, METHOD_STORED, attrs); 604 update(e); 605 } finally { 606 endWrite(); 607 } 608 } 609 610 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 611 throws IOException 612 { 613 checkWritable(); 614 if (Arrays.equals(src, dst)) 615 return; // do nothing, src and dst are the same 616 617 beginWrite(); 618 try { 619 ensureOpen(); 620 Entry eSrc = getEntry(src); // ensureOpen checked 621 622 if (eSrc == null) 623 throw new NoSuchFileException(getString(src)); 624 if (eSrc.isDir()) { // spec says to create dst dir 625 createDirectory(dst); 626 return; 627 } 628 boolean hasReplace = false; 629 boolean hasCopyAttrs = false; 630 for (CopyOption opt : options) { 631 if (opt == REPLACE_EXISTING) 632 hasReplace = true; 633 else if (opt == COPY_ATTRIBUTES) 634 hasCopyAttrs = true; 635 } 636 Entry eDst = getEntry(dst); 637 if (eDst != null) { 638 if (!hasReplace) 639 throw new FileAlreadyExistsException(getString(dst)); 640 } else { 641 checkParents(dst); 642 } 643 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 644 u.name(dst); // change name 645 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 646 { 647 u.type = eSrc.type; // make it the same type 648 if (deletesrc) { // if it's a "rename", take the data 649 u.bytes = eSrc.bytes; 650 u.file = eSrc.file; 651 } else { // if it's not "rename", copy the data 652 if (eSrc.bytes != null) 653 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 654 else if (eSrc.file != null) { 655 u.file = getTempPathForEntry(null); 656 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 657 } 658 } 659 } 660 if (!hasCopyAttrs) 661 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 662 update(u); 663 if (deletesrc) 664 updateDelete(eSrc); 665 } finally { 666 endWrite(); 667 } 668 } 669 670 // Returns an output stream for writing the contents into the specified 671 // entry. 672 OutputStream newOutputStream(byte[] path, OpenOption... options) 673 throws IOException 674 { 675 checkWritable(); 676 boolean hasCreateNew = false; 677 boolean hasCreate = false; 678 boolean hasAppend = false; 679 boolean hasTruncate = false; 680 for (OpenOption opt : options) { 681 if (opt == READ) 682 throw new IllegalArgumentException("READ not allowed"); 683 if (opt == CREATE_NEW) 684 hasCreateNew = true; 685 if (opt == CREATE) 686 hasCreate = true; 687 if (opt == APPEND) 688 hasAppend = true; 689 if (opt == TRUNCATE_EXISTING) 690 hasTruncate = true; 691 } 692 if (hasAppend && hasTruncate) 693 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 694 beginRead(); // only need a readlock, the "update()" will 695 try { // try to obtain a writelock when the os is 696 ensureOpen(); // being closed. 697 Entry e = getEntry(path); 698 if (e != null) { 699 if (e.isDir() || hasCreateNew) 700 throw new FileAlreadyExistsException(getString(path)); 701 if (hasAppend) { 702 InputStream is = getInputStream(e); 703 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 704 is.transferTo(os); 705 is.close(); 706 return os; 707 } 708 return getOutputStream(new Entry(e, Entry.NEW)); 709 } else { 710 if (!hasCreate && !hasCreateNew) 711 throw new NoSuchFileException(getString(path)); 712 checkParents(path); 713 return getOutputStream(new Entry(path, Entry.NEW, false, defaultMethod)); 714 } 715 } finally { 716 endRead(); 717 } 718 } 719 720 // Returns an input stream for reading the contents of the specified 721 // file entry. 722 InputStream newInputStream(byte[] path) throws IOException { 723 beginRead(); 724 try { 725 ensureOpen(); 726 Entry e = getEntry(path); 727 if (e == null) 728 throw new NoSuchFileException(getString(path)); 729 if (e.isDir()) 730 throw new FileSystemException(getString(path), "is a directory", null); 731 return getInputStream(e); 732 } finally { 733 endRead(); 734 } 735 } 736 737 private void checkOptions(Set<? extends OpenOption> options) { 738 // check for options of null type and option is an intance of StandardOpenOption 739 for (OpenOption option : options) { 740 if (option == null) 741 throw new NullPointerException(); 742 if (!(option instanceof StandardOpenOption)) 743 throw new IllegalArgumentException(); 744 } 745 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 746 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 747 } 748 749 750 // Returns an output SeekableByteChannel for either 751 // (1) writing the contents of a new entry, if the entry doesn't exit, or 752 // (2) updating/replacing the contents of an existing entry. 753 // Note: The content is not compressed. 754 private class EntryOutputChannel extends ByteArrayChannel { 755 Entry e; 756 757 EntryOutputChannel(Entry e) throws IOException { 758 super(e.size > 0? (int)e.size : 8192, false); 759 this.e = e; 760 if (e.mtime == -1) 761 e.mtime = System.currentTimeMillis(); 762 if (e.method == -1) 763 e.method = defaultMethod; 764 // store size, compressed size, and crc-32 in datadescriptor 765 e.flag = FLAG_DATADESCR; 766 if (zc.isUTF8()) 767 e.flag |= FLAG_USE_UTF8; 768 } 769 770 @Override 771 public void close() throws IOException { 772 e.bytes = toByteArray(); 773 e.size = e.bytes.length; 774 e.crc = -1; 775 super.close(); 776 update(e); 777 } 778 } 779 780 private int getCompressMethod(FileAttribute<?>... attrs) { 781 return defaultMethod; 782 } 783 784 // Returns a Writable/ReadByteChannel for now. Might consdier to use 785 // newFileChannel() instead, which dump the entry data into a regular 786 // file on the default file system and create a FileChannel on top of 787 // it. 788 SeekableByteChannel newByteChannel(byte[] path, 789 Set<? extends OpenOption> options, 790 FileAttribute<?>... attrs) 791 throws IOException 792 { 793 checkOptions(options); 794 if (options.contains(StandardOpenOption.WRITE) || 795 options.contains(StandardOpenOption.APPEND)) { 796 checkWritable(); 797 beginRead(); // only need a readlock, the "update()" will obtain 798 // thewritelock when the channel is closed 799 try { 800 ensureOpen(); 801 Entry e = getEntry(path); 802 if (e != null) { 803 if (e.isDir() || options.contains(CREATE_NEW)) 804 throw new FileAlreadyExistsException(getString(path)); 805 SeekableByteChannel sbc = 806 new EntryOutputChannel(new Entry(e, Entry.NEW)); 807 if (options.contains(APPEND)) { 808 try (InputStream is = getInputStream(e)) { // copyover 809 byte[] buf = new byte[8192]; 810 ByteBuffer bb = ByteBuffer.wrap(buf); 811 int n; 812 while ((n = is.read(buf)) != -1) { 813 bb.position(0); 814 bb.limit(n); 815 sbc.write(bb); 816 } 817 } 818 } 819 return sbc; 820 } 821 if (!options.contains(CREATE) && !options.contains(CREATE_NEW)) 822 throw new NoSuchFileException(getString(path)); 823 checkParents(path); 824 return new EntryOutputChannel( 825 new Entry(path, Entry.NEW, false, getCompressMethod(attrs), attrs)); 826 827 } finally { 828 endRead(); 829 } 830 } else { 831 beginRead(); 832 try { 833 ensureOpen(); 834 Entry e = getEntry(path); 835 if (e == null || e.isDir()) 836 throw new NoSuchFileException(getString(path)); 837 try (InputStream is = getInputStream(e)) { 838 // TBD: if (e.size < NNNNN); 839 return new ByteArrayChannel(is.readAllBytes(), true); 840 } 841 } finally { 842 endRead(); 843 } 844 } 845 } 846 847 // Returns a FileChannel of the specified entry. 848 // 849 // This implementation creates a temporary file on the default file system, 850 // copy the entry data into it if the entry exists, and then create a 851 // FileChannel on top of it. 852 FileChannel newFileChannel(byte[] path, 853 Set<? extends OpenOption> options, 854 FileAttribute<?>... attrs) 855 throws IOException 856 { 857 checkOptions(options); 858 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 859 options.contains(StandardOpenOption.APPEND)); 860 beginRead(); 861 try { 862 ensureOpen(); 863 Entry e = getEntry(path); 864 if (forWrite) { 865 checkWritable(); 866 if (e == null) { 867 if (!options.contains(StandardOpenOption.CREATE) && 868 !options.contains(StandardOpenOption.CREATE_NEW)) { 869 throw new NoSuchFileException(getString(path)); 870 } 871 } else { 872 if (options.contains(StandardOpenOption.CREATE_NEW)) { 873 throw new FileAlreadyExistsException(getString(path)); 874 } 875 if (e.isDir()) 876 throw new FileAlreadyExistsException("directory <" 877 + getString(path) + "> exists"); 878 } 879 options = new HashSet<>(options); 880 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 881 } else if (e == null || e.isDir()) { 882 throw new NoSuchFileException(getString(path)); 883 } 884 885 final boolean isFCH = (e != null && e.type == Entry.FILECH); 886 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 887 final FileChannel fch = tmpfile.getFileSystem() 888 .provider() 889 .newFileChannel(tmpfile, options, attrs); 890 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH, attrs); 891 if (forWrite) { 892 u.flag = FLAG_DATADESCR; 893 u.method = getCompressMethod(attrs); 894 } 895 // is there a better way to hook into the FileChannel's close method? 896 return new FileChannel() { 897 public int write(ByteBuffer src) throws IOException { 898 return fch.write(src); 899 } 900 public long write(ByteBuffer[] srcs, int offset, int length) 901 throws IOException 902 { 903 return fch.write(srcs, offset, length); 904 } 905 public long position() throws IOException { 906 return fch.position(); 907 } 908 public FileChannel position(long newPosition) 909 throws IOException 910 { 911 fch.position(newPosition); 912 return this; 913 } 914 public long size() throws IOException { 915 return fch.size(); 916 } 917 public FileChannel truncate(long size) 918 throws IOException 919 { 920 fch.truncate(size); 921 return this; 922 } 923 public void force(boolean metaData) 924 throws IOException 925 { 926 fch.force(metaData); 927 } 928 public long transferTo(long position, long count, 929 WritableByteChannel target) 930 throws IOException 931 { 932 return fch.transferTo(position, count, target); 933 } 934 public long transferFrom(ReadableByteChannel src, 935 long position, long count) 936 throws IOException 937 { 938 return fch.transferFrom(src, position, count); 939 } 940 public int read(ByteBuffer dst) throws IOException { 941 return fch.read(dst); 942 } 943 public int read(ByteBuffer dst, long position) 944 throws IOException 945 { 946 return fch.read(dst, position); 947 } 948 public long read(ByteBuffer[] dsts, int offset, int length) 949 throws IOException 950 { 951 return fch.read(dsts, offset, length); 952 } 953 public int write(ByteBuffer src, long position) 954 throws IOException 955 { 956 return fch.write(src, position); 957 } 958 public MappedByteBuffer map(MapMode mode, 959 long position, long size) 960 throws IOException 961 { 962 throw new UnsupportedOperationException(); 963 } 964 public FileLock lock(long position, long size, boolean shared) 965 throws IOException 966 { 967 return fch.lock(position, size, shared); 968 } 969 public FileLock tryLock(long position, long size, boolean shared) 970 throws IOException 971 { 972 return fch.tryLock(position, size, shared); 973 } 974 protected void implCloseChannel() throws IOException { 975 fch.close(); 976 if (forWrite) { 977 u.mtime = System.currentTimeMillis(); 978 u.size = Files.size(u.file); 979 980 update(u); 981 } else { 982 if (!isFCH) // if this is a new fch for reading 983 removeTempPathForEntry(tmpfile); 984 } 985 } 986 }; 987 } finally { 988 endRead(); 989 } 990 } 991 992 // the outstanding input streams that need to be closed 993 private Set<InputStream> streams = 994 Collections.synchronizedSet(new HashSet<InputStream>()); 995 996 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 997 private Path getTempPathForEntry(byte[] path) throws IOException { 998 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 999 if (path != null) { 1000 Entry e = getEntry(path); 1001 if (e != null) { 1002 try (InputStream is = newInputStream(path)) { 1003 Files.copy(is, tmpPath, REPLACE_EXISTING); 1004 } 1005 } 1006 } 1007 return tmpPath; 1008 } 1009 1010 private void removeTempPathForEntry(Path path) throws IOException { 1011 Files.delete(path); 1012 tmppaths.remove(path); 1013 } 1014 1015 // check if all parents really exit. ZIP spec does not require 1016 // the existence of any "parent directory". 1017 private void checkParents(byte[] path) throws IOException { 1018 beginRead(); 1019 try { 1020 while ((path = getParent(path)) != null && 1021 path != ROOTPATH) { 1022 if (!inodes.containsKey(IndexNode.keyOf(path))) { 1023 throw new NoSuchFileException(getString(path)); 1024 } 1025 } 1026 } finally { 1027 endRead(); 1028 } 1029 } 1030 1031 private static byte[] ROOTPATH = new byte[] { '/' }; 1032 private static byte[] getParent(byte[] path) { 1033 int off = getParentOff(path); 1034 if (off <= 1) 1035 return ROOTPATH; 1036 return Arrays.copyOf(path, off); 1037 } 1038 1039 private static int getParentOff(byte[] path) { 1040 int off = path.length - 1; 1041 if (off > 0 && path[off] == '/') // isDirectory 1042 off--; 1043 while (off > 0 && path[off] != '/') { off--; } 1044 return off; 1045 } 1046 1047 private final void beginWrite() { 1048 rwlock.writeLock().lock(); 1049 } 1050 1051 private final void endWrite() { 1052 rwlock.writeLock().unlock(); 1053 } 1054 1055 private final void beginRead() { 1056 rwlock.readLock().lock(); 1057 } 1058 1059 private final void endRead() { 1060 rwlock.readLock().unlock(); 1061 } 1062 1063 /////////////////////////////////////////////////////////////////// 1064 1065 private volatile boolean isOpen = true; 1066 private final SeekableByteChannel ch; // channel to the zipfile 1067 final byte[] cen; // CEN & ENDHDR 1068 private END end; 1069 private long locpos; // position of first LOC header (usually 0) 1070 1071 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 1072 1073 // name -> pos (in cen), IndexNode itself can be used as a "key" 1074 private LinkedHashMap<IndexNode, IndexNode> inodes; 1075 1076 final byte[] getBytes(String name) { 1077 return zc.getBytes(name); 1078 } 1079 1080 final String getString(byte[] name) { 1081 return zc.toString(name); 1082 } 1083 1084 @SuppressWarnings("deprecation") 1085 protected void finalize() throws IOException { 1086 close(); 1087 } 1088 1089 // Reads len bytes of data from the specified offset into buf. 1090 // Returns the total number of bytes read. 1091 // Each/every byte read from here (except the cen, which is mapped). 1092 final long readFullyAt(byte[] buf, int off, long len, long pos) 1093 throws IOException 1094 { 1095 ByteBuffer bb = ByteBuffer.wrap(buf); 1096 bb.position(off); 1097 bb.limit((int)(off + len)); 1098 return readFullyAt(bb, pos); 1099 } 1100 1101 private final long readFullyAt(ByteBuffer bb, long pos) 1102 throws IOException 1103 { 1104 synchronized(ch) { 1105 return ch.position(pos).read(bb); 1106 } 1107 } 1108 1109 // Searches for end of central directory (END) header. The contents of 1110 // the END header will be read and placed in endbuf. Returns the file 1111 // position of the END header, otherwise returns -1 if the END header 1112 // was not found or an error occurred. 1113 private END findEND() throws IOException 1114 { 1115 byte[] buf = new byte[READBLOCKSZ]; 1116 long ziplen = ch.size(); 1117 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 1118 long minPos = minHDR - (buf.length - ENDHDR); 1119 1120 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 1121 { 1122 int off = 0; 1123 if (pos < 0) { 1124 // Pretend there are some NUL bytes before start of file 1125 off = (int)-pos; 1126 Arrays.fill(buf, 0, off, (byte)0); 1127 } 1128 int len = buf.length - off; 1129 if (readFullyAt(buf, off, len, pos + off) != len) 1130 zerror("zip END header not found"); 1131 1132 // Now scan the block backwards for END header signature 1133 for (int i = buf.length - ENDHDR; i >= 0; i--) { 1134 if (buf[i+0] == (byte)'P' && 1135 buf[i+1] == (byte)'K' && 1136 buf[i+2] == (byte)'\005' && 1137 buf[i+3] == (byte)'\006' && 1138 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 1139 // Found END header 1140 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 1141 END end = new END(); 1142 end.endsub = ENDSUB(buf); 1143 end.centot = ENDTOT(buf); 1144 end.cenlen = ENDSIZ(buf); 1145 end.cenoff = ENDOFF(buf); 1146 end.comlen = ENDCOM(buf); 1147 end.endpos = pos + i; 1148 // try if there is zip64 end; 1149 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1150 if (end.endpos < ZIP64_LOCHDR || 1151 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1152 != loc64.length || 1153 !locator64SigAt(loc64, 0)) { 1154 return end; 1155 } 1156 long end64pos = ZIP64_LOCOFF(loc64); 1157 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1158 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1159 != end64buf.length || 1160 !end64SigAt(end64buf, 0)) { 1161 return end; 1162 } 1163 // end64 found, 1164 long cenlen64 = ZIP64_ENDSIZ(end64buf); 1165 long cenoff64 = ZIP64_ENDOFF(end64buf); 1166 long centot64 = ZIP64_ENDTOT(end64buf); 1167 // double-check 1168 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1169 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1170 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1171 return end; 1172 } 1173 // to use the end64 values 1174 end.cenlen = cenlen64; 1175 end.cenoff = cenoff64; 1176 end.centot = (int)centot64; // assume total < 2g 1177 end.endpos = end64pos; 1178 return end; 1179 } 1180 } 1181 } 1182 zerror("zip END header not found"); 1183 return null; //make compiler happy 1184 } 1185 1186 // Reads zip file central directory. Returns the file position of first 1187 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1188 // then the error was a zip format error and zip->msg has the error text. 1189 // Always pass in -1 for knownTotal; it's used for a recursive call. 1190 private byte[] initCEN() throws IOException { 1191 end = findEND(); 1192 if (end.endpos == 0) { 1193 inodes = new LinkedHashMap<>(10); 1194 locpos = 0; 1195 buildNodeTree(); 1196 return null; // only END header present 1197 } 1198 if (end.cenlen > end.endpos) 1199 zerror("invalid END header (bad central directory size)"); 1200 long cenpos = end.endpos - end.cenlen; // position of CEN table 1201 1202 // Get position of first local file (LOC) header, taking into 1203 // account that there may be a stub prefixed to the zip file. 1204 locpos = cenpos - end.cenoff; 1205 if (locpos < 0) 1206 zerror("invalid END header (bad central directory offset)"); 1207 1208 // read in the CEN and END 1209 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1210 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1211 zerror("read CEN tables failed"); 1212 } 1213 // Iterate through the entries in the central directory 1214 inodes = new LinkedHashMap<>(end.centot + 1); 1215 int pos = 0; 1216 int limit = cen.length - ENDHDR; 1217 while (pos < limit) { 1218 if (!cenSigAt(cen, pos)) 1219 zerror("invalid CEN header (bad signature)"); 1220 int method = CENHOW(cen, pos); 1221 int nlen = CENNAM(cen, pos); 1222 int elen = CENEXT(cen, pos); 1223 int clen = CENCOM(cen, pos); 1224 if ((CENFLG(cen, pos) & 1) != 0) { 1225 zerror("invalid CEN header (encrypted entry)"); 1226 } 1227 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1228 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1229 } 1230 if (pos + CENHDR + nlen > limit) { 1231 zerror("invalid CEN header (bad header size)"); 1232 } 1233 IndexNode inode = new IndexNode(cen, pos, nlen); 1234 inodes.put(inode, inode); 1235 1236 // skip ext and comment 1237 pos += (CENHDR + nlen + elen + clen); 1238 } 1239 if (pos + ENDHDR != cen.length) { 1240 zerror("invalid CEN header (bad header size)"); 1241 } 1242 buildNodeTree(); 1243 return cen; 1244 } 1245 1246 private void ensureOpen() throws IOException { 1247 if (!isOpen) 1248 throw new ClosedFileSystemException(); 1249 } 1250 1251 // Creates a new empty temporary file in the same directory as the 1252 // specified file. A variant of Files.createTempFile. 1253 private Path createTempFileInSameDirectoryAs(Path path) 1254 throws IOException 1255 { 1256 Path parent = path.toAbsolutePath().getParent(); 1257 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1258 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1259 tmppaths.add(tmpPath); 1260 return tmpPath; 1261 } 1262 1263 ////////////////////update & sync ////////////////////////////////////// 1264 1265 private boolean hasUpdate = false; 1266 1267 // shared key. consumer guarantees the "writeLock" before use it. 1268 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1269 1270 private void updateDelete(IndexNode inode) { 1271 beginWrite(); 1272 try { 1273 removeFromTree(inode); 1274 inodes.remove(inode); 1275 hasUpdate = true; 1276 } finally { 1277 endWrite(); 1278 } 1279 } 1280 1281 private void update(Entry e) { 1282 beginWrite(); 1283 try { 1284 IndexNode old = inodes.put(e, e); 1285 if (old != null) { 1286 removeFromTree(old); 1287 } 1288 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1289 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1290 e.sibling = parent.child; 1291 parent.child = e; 1292 } 1293 hasUpdate = true; 1294 } finally { 1295 endWrite(); 1296 } 1297 } 1298 1299 // copy over the whole LOC entry (header if necessary, data and ext) from 1300 // old zip to the new one. 1301 private long copyLOCEntry(Entry e, boolean updateHeader, 1302 OutputStream os, 1303 long written, byte[] buf) 1304 throws IOException 1305 { 1306 long locoff = e.locoff; // where to read 1307 e.locoff = written; // update the e.locoff with new value 1308 1309 // calculate the size need to write out 1310 long size = 0; 1311 // if there is A ext 1312 if ((e.flag & FLAG_DATADESCR) != 0) { 1313 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1314 size = 24; 1315 else 1316 size = 16; 1317 } 1318 // read loc, use the original loc.elen/nlen 1319 // 1320 // an extra byte after loc is read, which should be the first byte of the 1321 // 'name' field of the loc. if this byte is '/', which means the original 1322 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1323 // is used to output the loc, in which the leading "/" will be removed 1324 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1325 throw new ZipException("loc: reading failed"); 1326 1327 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1328 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1329 size += e.csize; 1330 written = e.writeLOC(os) + size; 1331 } else { 1332 os.write(buf, 0, LOCHDR); // write out the loc header 1333 locoff += LOCHDR; 1334 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1335 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1336 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1337 written = LOCHDR + size; 1338 } 1339 int n; 1340 while (size > 0 && 1341 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1342 { 1343 if (size < n) 1344 n = (int)size; 1345 os.write(buf, 0, n); 1346 size -= n; 1347 locoff += n; 1348 } 1349 return written; 1350 } 1351 1352 private long writeEntry(Entry e, OutputStream os, byte[] buf) 1353 throws IOException { 1354 1355 if (e.bytes == null && e.file == null) // dir, 0-length data 1356 return 0; 1357 1358 long written = 0; 1359 try (OutputStream os2 = e.method == METHOD_STORED ? 1360 new EntryOutputStreamCRC32(e, os) : new EntryOutputStreamDef(e, os)) { 1361 if (e.bytes != null) { // in-memory 1362 os2.write(e.bytes, 0, e.bytes.length); 1363 } else if (e.file != null) { // tmp file 1364 if (e.type == Entry.NEW || e.type == Entry.FILECH) { 1365 try (InputStream is = Files.newInputStream(e.file)) { 1366 is.transferTo(os2); 1367 } 1368 } 1369 Files.delete(e.file); 1370 tmppaths.remove(e.file); 1371 } 1372 } 1373 written += e.csize; 1374 if ((e.flag & FLAG_DATADESCR) != 0) { 1375 written += e.writeEXT(os); 1376 } 1377 return written; 1378 } 1379 1380 // sync the zip file system, if there is any udpate 1381 private void sync() throws IOException { 1382 1383 if (!hasUpdate) 1384 return; 1385 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1386 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) 1387 { 1388 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1389 long written = 0; 1390 byte[] buf = new byte[8192]; 1391 Entry e = null; 1392 1393 // write loc 1394 for (IndexNode inode : inodes.values()) { 1395 if (inode instanceof Entry) { // an updated inode 1396 e = (Entry)inode; 1397 try { 1398 if (e.type == Entry.COPY) { 1399 // entry copy: the only thing changed is the "name" 1400 // and "nlen" in LOC header, so we udpate/rewrite the 1401 // LOC in new file and simply copy the rest (data and 1402 // ext) without enflating/deflating from the old zip 1403 // file LOC entry. 1404 written += copyLOCEntry(e, true, os, written, buf); 1405 } else { // NEW, FILECH or CEN 1406 e.locoff = written; 1407 written += e.writeLOC(os); // write loc header 1408 written += writeEntry(e, os, buf); 1409 } 1410 elist.add(e); 1411 } catch (IOException x) { 1412 x.printStackTrace(); // skip any in-accurate entry 1413 } 1414 } else { // unchanged inode 1415 if (inode.pos == -1) { 1416 continue; // pseudo directory node 1417 } 1418 if (inode.name.length == 1 && inode.name[0] == '/') { 1419 continue; // no root '/' directory even if it 1420 // exists in original zip/jar file. 1421 } 1422 e = new Entry(inode); 1423 try { 1424 written += copyLOCEntry(e, false, os, written, buf); 1425 elist.add(e); 1426 } catch (IOException x) { 1427 x.printStackTrace(); // skip any wrong entry 1428 } 1429 } 1430 } 1431 1432 // now write back the cen and end table 1433 end.cenoff = written; 1434 for (Entry entry : elist) { 1435 written += entry.writeCEN(os); 1436 } 1437 end.centot = elist.size(); 1438 end.cenlen = written - end.cenoff; 1439 end.write(os, written, forceEnd64); 1440 } 1441 1442 ch.close(); 1443 Files.delete(zfpath); 1444 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1445 hasUpdate = false; // clear 1446 } 1447 1448 IndexNode getInode(byte[] path) { 1449 if (path == null) 1450 throw new NullPointerException("path"); 1451 return inodes.get(IndexNode.keyOf(path)); 1452 } 1453 1454 Entry getEntry(byte[] path) throws IOException { 1455 IndexNode inode = getInode(path); 1456 if (inode instanceof Entry) 1457 return (Entry)inode; 1458 if (inode == null || inode.pos == -1) 1459 return null; 1460 return new Entry(inode); 1461 } 1462 1463 public void deleteFile(byte[] path, boolean failIfNotExists) 1464 throws IOException 1465 { 1466 checkWritable(); 1467 1468 IndexNode inode = getInode(path); 1469 if (inode == null) { 1470 if (path != null && path.length == 0) 1471 throw new ZipException("root directory </> can't not be delete"); 1472 if (failIfNotExists) 1473 throw new NoSuchFileException(getString(path)); 1474 } else { 1475 if (inode.isDir() && inode.child != null) 1476 throw new DirectoryNotEmptyException(getString(path)); 1477 updateDelete(inode); 1478 } 1479 } 1480 1481 // Returns an out stream for either 1482 // (1) writing the contents of a new entry, if the entry exits, or 1483 // (2) updating/replacing the contents of the specified existing entry. 1484 private OutputStream getOutputStream(Entry e) throws IOException { 1485 1486 if (e.mtime == -1) 1487 e.mtime = System.currentTimeMillis(); 1488 if (e.method == -1) 1489 e.method = defaultMethod; 1490 // store size, compressed size, and crc-32 in datadescr 1491 e.flag = FLAG_DATADESCR; 1492 if (zc.isUTF8()) 1493 e.flag |= FLAG_USE_UTF8; 1494 OutputStream os; 1495 if (useTempFile) { 1496 e.file = getTempPathForEntry(null); 1497 os = Files.newOutputStream(e.file, WRITE); 1498 } else { 1499 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1500 } 1501 return new EntryOutputStream(e, os); 1502 } 1503 1504 private class EntryOutputStream extends FilterOutputStream { 1505 private Entry e; 1506 private long written; 1507 private boolean isClosed; 1508 1509 EntryOutputStream(Entry e, OutputStream os) throws IOException { 1510 super(os); 1511 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1512 // this.written = 0; 1513 } 1514 1515 @Override 1516 public synchronized void write(int b) throws IOException { 1517 out.write(b); 1518 written += 1; 1519 } 1520 1521 @Override 1522 public synchronized void write(byte b[], int off, int len) 1523 throws IOException { 1524 out.write(b, off, len); 1525 written += len; 1526 } 1527 1528 @Override 1529 public synchronized void close() throws IOException { 1530 if (isClosed) { 1531 return; 1532 } 1533 isClosed = true; 1534 e.size = written; 1535 if (out instanceof ByteArrayOutputStream) 1536 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1537 super.close(); 1538 update(e); 1539 } 1540 } 1541 1542 // Wrapper output stream class to write out a "stored" entry. 1543 // (1) this class does not close the underlying out stream when 1544 // being closed. 1545 // (2) no need to be "synchronized", only used by sync() 1546 private class EntryOutputStreamCRC32 extends FilterOutputStream { 1547 private Entry e; 1548 private CRC32 crc; 1549 private long written; 1550 private boolean isClosed; 1551 1552 EntryOutputStreamCRC32(Entry e, OutputStream os) throws IOException { 1553 super(os); 1554 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1555 this.crc = new CRC32(); 1556 } 1557 1558 @Override 1559 public void write(int b) throws IOException { 1560 out.write(b); 1561 crc.update(b); 1562 written += 1; 1563 } 1564 1565 @Override 1566 public void write(byte b[], int off, int len) 1567 throws IOException { 1568 out.write(b, off, len); 1569 crc.update(b, off, len); 1570 written += len; 1571 } 1572 1573 @Override 1574 public void close() throws IOException { 1575 if (isClosed) 1576 return; 1577 isClosed = true; 1578 e.size = e.csize = written; 1579 e.crc = crc.getValue(); 1580 } 1581 } 1582 1583 // Wrapper output stream class to write out a "deflated" entry. 1584 // (1) this class does not close the underlying out stream when 1585 // being closed. 1586 // (2) no need to be "synchronized", only used by sync() 1587 private class EntryOutputStreamDef extends DeflaterOutputStream { 1588 private CRC32 crc; 1589 private Entry e; 1590 private boolean isClosed; 1591 1592 EntryOutputStreamDef(Entry e, OutputStream os) throws IOException { 1593 super(os, getDeflater()); 1594 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1595 this.crc = new CRC32(); 1596 } 1597 1598 @Override 1599 public void write(byte b[], int off, int len) 1600 throws IOException { 1601 super.write(b, off, len); 1602 crc.update(b, off, len); 1603 } 1604 1605 @Override 1606 public void close() throws IOException { 1607 if (isClosed) 1608 return; 1609 isClosed = true; 1610 finish(); 1611 e.size = def.getBytesRead(); 1612 e.csize = def.getBytesWritten(); 1613 e.crc = crc.getValue(); 1614 releaseDeflater(def); 1615 } 1616 } 1617 1618 private InputStream getInputStream(Entry e) 1619 throws IOException 1620 { 1621 InputStream eis = null; 1622 1623 if (e.type == Entry.NEW) { 1624 // now bytes & file is uncompressed. 1625 if (e.bytes != null) 1626 return new ByteArrayInputStream(e.bytes); 1627 else if (e.file != null) 1628 return Files.newInputStream(e.file); 1629 else 1630 throw new ZipException("update entry data is missing"); 1631 } else if (e.type == Entry.FILECH) { 1632 // FILECH result is un-compressed. 1633 eis = Files.newInputStream(e.file); 1634 // TBD: wrap to hook close() 1635 // streams.add(eis); 1636 return eis; 1637 } else { // untouched CEN or COPY 1638 eis = new EntryInputStream(e, ch); 1639 } 1640 if (e.method == METHOD_DEFLATED) { 1641 // MORE: Compute good size for inflater stream: 1642 long bufSize = e.size + 2; // Inflater likes a bit of slack 1643 if (bufSize > 65536) 1644 bufSize = 8192; 1645 final long size = e.size; 1646 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1647 private boolean isClosed = false; 1648 public void close() throws IOException { 1649 if (!isClosed) { 1650 releaseInflater(inf); 1651 this.in.close(); 1652 isClosed = true; 1653 streams.remove(this); 1654 } 1655 } 1656 // Override fill() method to provide an extra "dummy" byte 1657 // at the end of the input stream. This is required when 1658 // using the "nowrap" Inflater option. (it appears the new 1659 // zlib in 7 does not need it, but keep it for now) 1660 protected void fill() throws IOException { 1661 if (eof) { 1662 throw new EOFException( 1663 "Unexpected end of ZLIB input stream"); 1664 } 1665 len = this.in.read(buf, 0, buf.length); 1666 if (len == -1) { 1667 buf[0] = 0; 1668 len = 1; 1669 eof = true; 1670 } 1671 inf.setInput(buf, 0, len); 1672 } 1673 private boolean eof; 1674 1675 public int available() throws IOException { 1676 if (isClosed) 1677 return 0; 1678 long avail = size - inf.getBytesWritten(); 1679 return avail > (long) Integer.MAX_VALUE ? 1680 Integer.MAX_VALUE : (int) avail; 1681 } 1682 }; 1683 } else if (e.method == METHOD_STORED) { 1684 // TBD: wrap/ it does not seem necessary 1685 } else { 1686 throw new ZipException("invalid compression method"); 1687 } 1688 streams.add(eis); 1689 return eis; 1690 } 1691 1692 // Inner class implementing the input stream used to read 1693 // a (possibly compressed) zip file entry. 1694 private class EntryInputStream extends InputStream { 1695 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1696 // point to a new channel after sync() 1697 private long pos; // current position within entry data 1698 protected long rem; // number of remaining bytes within entry 1699 1700 EntryInputStream(Entry e, SeekableByteChannel zfch) 1701 throws IOException 1702 { 1703 this.zfch = zfch; 1704 rem = e.csize; 1705 pos = e.locoff; 1706 if (pos == -1) { 1707 Entry e2 = getEntry(e.name); 1708 if (e2 == null) { 1709 throw new ZipException("invalid loc for entry <" + e.name + ">"); 1710 } 1711 pos = e2.locoff; 1712 } 1713 pos = -pos; // lazy initialize the real data offset 1714 } 1715 1716 public int read(byte b[], int off, int len) throws IOException { 1717 ensureOpen(); 1718 initDataPos(); 1719 if (rem == 0) { 1720 return -1; 1721 } 1722 if (len <= 0) { 1723 return 0; 1724 } 1725 if (len > rem) { 1726 len = (int) rem; 1727 } 1728 // readFullyAt() 1729 long n = 0; 1730 ByteBuffer bb = ByteBuffer.wrap(b); 1731 bb.position(off); 1732 bb.limit(off + len); 1733 synchronized(zfch) { 1734 n = zfch.position(pos).read(bb); 1735 } 1736 if (n > 0) { 1737 pos += n; 1738 rem -= n; 1739 } 1740 if (rem == 0) { 1741 close(); 1742 } 1743 return (int)n; 1744 } 1745 1746 public int read() throws IOException { 1747 byte[] b = new byte[1]; 1748 if (read(b, 0, 1) == 1) { 1749 return b[0] & 0xff; 1750 } else { 1751 return -1; 1752 } 1753 } 1754 1755 public long skip(long n) throws IOException { 1756 ensureOpen(); 1757 if (n > rem) 1758 n = rem; 1759 pos += n; 1760 rem -= n; 1761 if (rem == 0) { 1762 close(); 1763 } 1764 return n; 1765 } 1766 1767 public int available() { 1768 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1769 } 1770 1771 public void close() { 1772 rem = 0; 1773 streams.remove(this); 1774 } 1775 1776 private void initDataPos() throws IOException { 1777 if (pos <= 0) { 1778 pos = -pos + locpos; 1779 byte[] buf = new byte[LOCHDR]; 1780 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 1781 throw new ZipException("invalid loc " + pos + " for entry reading"); 1782 } 1783 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 1784 } 1785 } 1786 } 1787 1788 static void zerror(String msg) throws ZipException { 1789 throw new ZipException(msg); 1790 } 1791 1792 // Maxmum number of de/inflater we cache 1793 private final int MAX_FLATER = 20; 1794 // List of available Inflater objects for decompression 1795 private final List<Inflater> inflaters = new ArrayList<>(); 1796 1797 // Gets an inflater from the list of available inflaters or allocates 1798 // a new one. 1799 private Inflater getInflater() { 1800 synchronized (inflaters) { 1801 int size = inflaters.size(); 1802 if (size > 0) { 1803 Inflater inf = inflaters.remove(size - 1); 1804 return inf; 1805 } else { 1806 return new Inflater(true); 1807 } 1808 } 1809 } 1810 1811 // Releases the specified inflater to the list of available inflaters. 1812 private void releaseInflater(Inflater inf) { 1813 synchronized (inflaters) { 1814 if (inflaters.size() < MAX_FLATER) { 1815 inf.reset(); 1816 inflaters.add(inf); 1817 } else { 1818 inf.end(); 1819 } 1820 } 1821 } 1822 1823 // List of available Deflater objects for compression 1824 private final List<Deflater> deflaters = new ArrayList<>(); 1825 1826 // Gets a deflater from the list of available deflaters or allocates 1827 // a new one. 1828 private Deflater getDeflater() { 1829 synchronized (deflaters) { 1830 int size = deflaters.size(); 1831 if (size > 0) { 1832 Deflater def = deflaters.remove(size - 1); 1833 return def; 1834 } else { 1835 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1836 } 1837 } 1838 } 1839 1840 // Releases the specified inflater to the list of available inflaters. 1841 private void releaseDeflater(Deflater def) { 1842 synchronized (deflaters) { 1843 if (inflaters.size() < MAX_FLATER) { 1844 def.reset(); 1845 deflaters.add(def); 1846 } else { 1847 def.end(); 1848 } 1849 } 1850 } 1851 1852 // End of central directory record 1853 static class END { 1854 // these 2 fields are not used by anyone and write() uses "0" 1855 // int disknum; 1856 // int sdisknum; 1857 int endsub; // endsub 1858 int centot; // 4 bytes 1859 long cenlen; // 4 bytes 1860 long cenoff; // 4 bytes 1861 int comlen; // comment length 1862 byte[] comment; 1863 1864 /* members of Zip64 end of central directory locator */ 1865 // int diskNum; 1866 long endpos; 1867 // int disktot; 1868 1869 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 1870 boolean hasZip64 = forceEnd64; // false; 1871 long xlen = cenlen; 1872 long xoff = cenoff; 1873 if (xlen >= ZIP64_MINVAL) { 1874 xlen = ZIP64_MINVAL; 1875 hasZip64 = true; 1876 } 1877 if (xoff >= ZIP64_MINVAL) { 1878 xoff = ZIP64_MINVAL; 1879 hasZip64 = true; 1880 } 1881 int count = centot; 1882 if (count >= ZIP64_MINVAL32) { 1883 count = ZIP64_MINVAL32; 1884 hasZip64 = true; 1885 } 1886 if (hasZip64) { 1887 long off64 = offset; 1888 //zip64 end of central directory record 1889 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1890 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1891 writeShort(os, 45); // version made by 1892 writeShort(os, 45); // version needed to extract 1893 writeInt(os, 0); // number of this disk 1894 writeInt(os, 0); // central directory start disk 1895 writeLong(os, centot); // number of directory entries on disk 1896 writeLong(os, centot); // number of directory entries 1897 writeLong(os, cenlen); // length of central directory 1898 writeLong(os, cenoff); // offset of central directory 1899 1900 //zip64 end of central directory locator 1901 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1902 writeInt(os, 0); // zip64 END start disk 1903 writeLong(os, off64); // offset of zip64 END 1904 writeInt(os, 1); // total number of disks (?) 1905 } 1906 writeInt(os, ENDSIG); // END record signature 1907 writeShort(os, 0); // number of this disk 1908 writeShort(os, 0); // central directory start disk 1909 writeShort(os, count); // number of directory entries on disk 1910 writeShort(os, count); // total number of directory entries 1911 writeInt(os, xlen); // length of central directory 1912 writeInt(os, xoff); // offset of central directory 1913 if (comment != null) { // zip file comment 1914 writeShort(os, comment.length); 1915 writeBytes(os, comment); 1916 } else { 1917 writeShort(os, 0); 1918 } 1919 } 1920 } 1921 1922 // Internal node that links a "name" to its pos in cen table. 1923 // The node itself can be used as a "key" to lookup itself in 1924 // the HashMap inodes. 1925 static class IndexNode { 1926 byte[] name; 1927 int hashcode; // node is hashable/hashed by its name 1928 int pos = -1; // position in cen table, -1 means the 1929 // entry does not exist in zip file 1930 boolean isdir; 1931 1932 IndexNode(byte[] name, boolean isdir) { 1933 name(name); 1934 this.isdir = isdir; 1935 this.pos = -1; 1936 } 1937 1938 IndexNode(byte[] name, int pos) { 1939 name(name); 1940 this.pos = pos; 1941 } 1942 1943 // constructor for cenInit() (1) remove tailing '/' (2) pad leading '/' 1944 IndexNode(byte[] cen, int pos, int nlen) { 1945 int noff = pos + CENHDR; 1946 if (cen[noff + nlen - 1] == '/') { 1947 isdir = true; 1948 nlen--; 1949 } 1950 if (nlen > 0 && cen[noff] == '/') { 1951 name = Arrays.copyOfRange(cen, noff, noff + nlen); 1952 } else { 1953 name = new byte[nlen + 1]; 1954 System.arraycopy(cen, noff, name, 1, nlen); 1955 name[0] = '/'; 1956 } 1957 name(name); 1958 this.pos = pos; 1959 } 1960 1961 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 1962 1963 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1964 IndexNode key = cachedKey.get(); 1965 if (key == null) { 1966 key = new IndexNode(name, -1); 1967 cachedKey.set(key); 1968 } 1969 return key.as(name); 1970 } 1971 1972 final void name(byte[] name) { 1973 this.name = name; 1974 this.hashcode = Arrays.hashCode(name); 1975 } 1976 1977 final IndexNode as(byte[] name) { // reuse the node, mostly 1978 name(name); // as a lookup "key" 1979 return this; 1980 } 1981 1982 boolean isDir() { 1983 return isdir; 1984 } 1985 1986 public boolean equals(Object other) { 1987 if (!(other instanceof IndexNode)) { 1988 return false; 1989 } 1990 if (other instanceof ParentLookup) { 1991 return ((ParentLookup)other).equals(this); 1992 } 1993 return Arrays.equals(name, ((IndexNode)other).name); 1994 } 1995 1996 public int hashCode() { 1997 return hashcode; 1998 } 1999 2000 IndexNode() {} 2001 IndexNode sibling; 2002 IndexNode child; // 1st child 2003 } 2004 2005 class Entry extends IndexNode implements ZipFileAttributes { 2006 2007 static final int CEN = 1; // entry read from cen 2008 static final int NEW = 2; // updated contents in bytes or file 2009 static final int FILECH = 3; // fch update in "file" 2010 static final int COPY = 4; // copy of a CEN entry 2011 2012 byte[] bytes; // updated content bytes 2013 Path file; // use tmp file to store bytes; 2014 int type = CEN; // default is the entry read from cen 2015 2016 // entry attributes 2017 int version; 2018 int flag; 2019 int posixPerms = -1; // posix permissions 2020 int method = -1; // compression method 2021 long mtime = -1; // last modification time (in DOS time) 2022 long atime = -1; // last access time 2023 long ctime = -1; // create time 2024 long crc = -1; // crc-32 of entry data 2025 long csize = -1; // compressed size of entry data 2026 long size = -1; // uncompressed size of entry data 2027 byte[] extra; 2028 2029 // cen 2030 2031 // these fields are not used by anyone and writeCEN uses "0" 2032 // int versionMade; 2033 // int disk; 2034 // int attrs; 2035 // long attrsEx; 2036 long locoff; 2037 byte[] comment; 2038 2039 // posix support 2040 private UserPrincipal owner = defaultOwner; 2041 private GroupPrincipal group = defaultGroup; 2042 2043 Entry() {} 2044 2045 Entry(byte[] name, boolean isdir, int method) { 2046 name(name); 2047 this.isdir = isdir; 2048 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 2049 this.crc = 0; 2050 this.size = 0; 2051 this.csize = 0; 2052 this.method = method; 2053 } 2054 2055 @SuppressWarnings("unchecked") 2056 Entry(byte[] name, int type, boolean isdir, int method, FileAttribute<?>... attrs) { 2057 this(name, isdir, method); 2058 this.type = type; 2059 for (FileAttribute<?> attr : attrs) { 2060 String attrName = attr.name(); 2061 if (attrName.equals("posix:permissions")) { 2062 posixPerms = ZipUtils.permsToFlags((Set<PosixFilePermission>)attr.value()); 2063 } 2064 } 2065 } 2066 2067 Entry(Entry e, int type) { 2068 name(e.name); 2069 this.isdir = e.isdir; 2070 this.version = e.version; 2071 this.ctime = e.ctime; 2072 this.atime = e.atime; 2073 this.mtime = e.mtime; 2074 this.crc = e.crc; 2075 this.size = e.size; 2076 this.csize = e.csize; 2077 this.method = e.method; 2078 this.extra = e.extra; 2079 /* 2080 this.versionMade = e.versionMade; 2081 this.disk = e.disk; 2082 this.attrs = e.attrs; 2083 this.attrsEx = e.attrsEx; 2084 */ 2085 this.locoff = e.locoff; 2086 this.comment = e.comment; 2087 this.posixPerms = e.posixPerms; 2088 this.owner = e.owner; 2089 this.group = e.group; 2090 this.type = type; 2091 } 2092 2093 @SuppressWarnings("unchecked") 2094 Entry(byte[] name, Path file, int type, FileAttribute<?>... attrs) { 2095 this(name, type, false, METHOD_STORED); 2096 this.file = file; 2097 for (FileAttribute<?> attr : attrs) { 2098 String attrName = attr.name(); 2099 if (attrName.equals("posix:permissions")) { 2100 posixPerms = ZipUtils.permsToFlags((Set<PosixFilePermission>)attr.value()); 2101 } 2102 } 2103 } 2104 2105 // reads the full entry from an IndexNode 2106 Entry(IndexNode inode) throws IOException { 2107 int pos = inode.pos; 2108 if (!cenSigAt(cen, pos)) 2109 zerror("invalid CEN header (bad signature)"); 2110 version = CENVER(cen, pos); 2111 flag = CENFLG(cen, pos); 2112 method = CENHOW(cen, pos); 2113 mtime = dosToJavaTime(CENTIM(cen, pos)); 2114 crc = CENCRC(cen, pos); 2115 csize = CENSIZ(cen, pos); 2116 size = CENLEN(cen, pos); 2117 int nlen = CENNAM(cen, pos); 2118 int elen = CENEXT(cen, pos); 2119 int clen = CENCOM(cen, pos); 2120 /* 2121 versionMade = CENVEM(cen, pos); 2122 disk = CENDSK(cen, pos); 2123 attrs = CENATT(cen, pos); 2124 attrsEx = CENATX(cen, pos); 2125 */ 2126 if (CENVEM_FA(cen, pos) == FILE_ATTRIBUTES_UNIX) { 2127 posixPerms = CENATX_PERMS(cen, pos) & 0xFFF; // 12 bits for setuid, setgid, sticky + perms 2128 } 2129 locoff = CENOFF(cen, pos); 2130 pos += CENHDR; 2131 this.name = inode.name; 2132 this.isdir = inode.isdir; 2133 this.hashcode = inode.hashcode; 2134 2135 pos += nlen; 2136 if (elen > 0) { 2137 extra = Arrays.copyOfRange(cen, pos, pos + elen); 2138 pos += elen; 2139 readExtra(ZipFileSystem.this); 2140 } 2141 if (clen > 0) { 2142 comment = Arrays.copyOfRange(cen, pos, pos + clen); 2143 } 2144 } 2145 2146 int version(boolean zip64) throws ZipException { 2147 if (zip64) { 2148 return 45; 2149 } 2150 if (method == METHOD_DEFLATED) 2151 return 20; 2152 else if (method == METHOD_STORED) 2153 return 10; 2154 throw new ZipException("unsupported compression method"); 2155 } 2156 2157 /** 2158 * Adds information about compatibility of file attribute information 2159 * to a version value. 2160 */ 2161 int versionMadeBy(int version) { 2162 return (posixPerms < 0) ? version : 2163 VERSION_BASE_UNIX | (version & 0xff); 2164 } 2165 2166 ///////////////////// CEN ////////////////////// 2167 int writeCEN(OutputStream os) throws IOException { 2168 long csize0 = csize; 2169 long size0 = size; 2170 long locoff0 = locoff; 2171 int elen64 = 0; // extra for ZIP64 2172 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 2173 int elenEXTT = 0; // extra for Extended Timestamp 2174 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 2175 2176 byte[] zname = isdir ? toDirectoryPath(name) : name; 2177 2178 // confirm size/length 2179 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 2180 int elen = (extra != null) ? extra.length : 0; 2181 int eoff = 0; 2182 int clen = (comment != null) ? comment.length : 0; 2183 if (csize >= ZIP64_MINVAL) { 2184 csize0 = ZIP64_MINVAL; 2185 elen64 += 8; // csize(8) 2186 } 2187 if (size >= ZIP64_MINVAL) { 2188 size0 = ZIP64_MINVAL; // size(8) 2189 elen64 += 8; 2190 } 2191 if (locoff >= ZIP64_MINVAL) { 2192 locoff0 = ZIP64_MINVAL; 2193 elen64 += 8; // offset(8) 2194 } 2195 if (elen64 != 0) { 2196 elen64 += 4; // header and data sz 4 bytes 2197 } 2198 boolean zip64 = (elen64 != 0); 2199 int version0 = version(zip64); 2200 while (eoff + 4 < elen) { 2201 int tag = SH(extra, eoff); 2202 int sz = SH(extra, eoff + 2); 2203 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2204 foundExtraTime = true; 2205 } 2206 eoff += (4 + sz); 2207 } 2208 if (!foundExtraTime) { 2209 if (isWindows) { // use NTFS 2210 elenNTFS = 36; // total 36 bytes 2211 } else { // Extended Timestamp otherwise 2212 elenEXTT = 9; // only mtime in cen 2213 } 2214 } 2215 writeInt(os, CENSIG); // CEN header signature 2216 writeShort(os, versionMadeBy(version0)); // version made by 2217 writeShort(os, version0); // version needed to extract 2218 writeShort(os, flag); // general purpose bit flag 2219 writeShort(os, method); // compression method 2220 // last modification time 2221 writeInt(os, (int)javaToDosTime(mtime)); 2222 writeInt(os, crc); // crc-32 2223 writeInt(os, csize0); // compressed size 2224 writeInt(os, size0); // uncompressed size 2225 writeShort(os, nlen); 2226 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2227 2228 if (comment != null) { 2229 writeShort(os, Math.min(clen, 0xffff)); 2230 } else { 2231 writeShort(os, 0); 2232 } 2233 writeShort(os, 0); // starting disk number 2234 writeShort(os, 0); // internal file attributes (unused) 2235 writeInt(os, posixPerms > 0 ? posixPerms << 16 : 0); // external file 2236 // attributes, used for storing posix 2237 // permissions 2238 writeInt(os, locoff0); // relative offset of local header 2239 writeBytes(os, zname, 1, nlen); 2240 if (zip64) { 2241 writeShort(os, EXTID_ZIP64);// Zip64 extra 2242 writeShort(os, elen64 - 4); // size of "this" extra block 2243 if (size0 == ZIP64_MINVAL) 2244 writeLong(os, size); 2245 if (csize0 == ZIP64_MINVAL) 2246 writeLong(os, csize); 2247 if (locoff0 == ZIP64_MINVAL) 2248 writeLong(os, locoff); 2249 } 2250 if (elenNTFS != 0) { 2251 writeShort(os, EXTID_NTFS); 2252 writeShort(os, elenNTFS - 4); 2253 writeInt(os, 0); // reserved 2254 writeShort(os, 0x0001); // NTFS attr tag 2255 writeShort(os, 24); 2256 writeLong(os, javaToWinTime(mtime)); 2257 writeLong(os, javaToWinTime(atime)); 2258 writeLong(os, javaToWinTime(ctime)); 2259 } 2260 if (elenEXTT != 0) { 2261 writeShort(os, EXTID_EXTT); 2262 writeShort(os, elenEXTT - 4); 2263 if (ctime == -1) 2264 os.write(0x3); // mtime and atime 2265 else 2266 os.write(0x7); // mtime, atime and ctime 2267 writeInt(os, javaToUnixTime(mtime)); 2268 } 2269 if (extra != null) // whatever not recognized 2270 writeBytes(os, extra); 2271 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2272 writeBytes(os, comment); 2273 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2274 } 2275 2276 ///////////////////// LOC ////////////////////// 2277 2278 int writeLOC(OutputStream os) throws IOException { 2279 byte[] zname = isdir ? toDirectoryPath(name) : name; 2280 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2281 int elen = (extra != null) ? extra.length : 0; 2282 boolean foundExtraTime = false; // if extra timestamp present 2283 int eoff = 0; 2284 int elen64 = 0; 2285 boolean zip64 = false; 2286 int elenEXTT = 0; 2287 int elenNTFS = 0; 2288 writeInt(os, LOCSIG); // LOC header signature 2289 if ((flag & FLAG_DATADESCR) != 0) { 2290 writeShort(os, version(zip64)); // version needed to extract 2291 writeShort(os, flag); // general purpose bit flag 2292 writeShort(os, method); // compression method 2293 // last modification time 2294 writeInt(os, (int)javaToDosTime(mtime)); 2295 // store size, uncompressed size, and crc-32 in data descriptor 2296 // immediately following compressed entry data 2297 writeInt(os, 0); 2298 writeInt(os, 0); 2299 writeInt(os, 0); 2300 } else { 2301 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2302 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2303 zip64 = true; 2304 } 2305 writeShort(os, version(zip64)); // version needed to extract 2306 writeShort(os, flag); // general purpose bit flag 2307 writeShort(os, method); // compression method 2308 // last modification time 2309 writeInt(os, (int)javaToDosTime(mtime)); 2310 writeInt(os, crc); // crc-32 2311 if (zip64) { 2312 writeInt(os, ZIP64_MINVAL); 2313 writeInt(os, ZIP64_MINVAL); 2314 } else { 2315 writeInt(os, csize); // compressed size 2316 writeInt(os, size); // uncompressed size 2317 } 2318 } 2319 while (eoff + 4 < elen) { 2320 int tag = SH(extra, eoff); 2321 int sz = SH(extra, eoff + 2); 2322 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2323 foundExtraTime = true; 2324 } 2325 eoff += (4 + sz); 2326 } 2327 if (!foundExtraTime) { 2328 if (isWindows) { 2329 elenNTFS = 36; // NTFS, total 36 bytes 2330 } else { // on unix use "ext time" 2331 elenEXTT = 9; 2332 if (atime != -1) 2333 elenEXTT += 4; 2334 if (ctime != -1) 2335 elenEXTT += 4; 2336 } 2337 } 2338 writeShort(os, nlen); 2339 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2340 writeBytes(os, zname, 1, nlen); 2341 if (zip64) { 2342 writeShort(os, EXTID_ZIP64); 2343 writeShort(os, 16); 2344 writeLong(os, size); 2345 writeLong(os, csize); 2346 } 2347 if (elenNTFS != 0) { 2348 writeShort(os, EXTID_NTFS); 2349 writeShort(os, elenNTFS - 4); 2350 writeInt(os, 0); // reserved 2351 writeShort(os, 0x0001); // NTFS attr tag 2352 writeShort(os, 24); 2353 writeLong(os, javaToWinTime(mtime)); 2354 writeLong(os, javaToWinTime(atime)); 2355 writeLong(os, javaToWinTime(ctime)); 2356 } 2357 if (elenEXTT != 0) { 2358 writeShort(os, EXTID_EXTT); 2359 writeShort(os, elenEXTT - 4);// size for the folowing data block 2360 int fbyte = 0x1; 2361 if (atime != -1) // mtime and atime 2362 fbyte |= 0x2; 2363 if (ctime != -1) // mtime, atime and ctime 2364 fbyte |= 0x4; 2365 os.write(fbyte); // flags byte 2366 writeInt(os, javaToUnixTime(mtime)); 2367 if (atime != -1) 2368 writeInt(os, javaToUnixTime(atime)); 2369 if (ctime != -1) 2370 writeInt(os, javaToUnixTime(ctime)); 2371 } 2372 if (extra != null) { 2373 writeBytes(os, extra); 2374 } 2375 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2376 } 2377 2378 // Data Descriptor 2379 int writeEXT(OutputStream os) throws IOException { 2380 writeInt(os, EXTSIG); // EXT header signature 2381 writeInt(os, crc); // crc-32 2382 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2383 writeLong(os, csize); 2384 writeLong(os, size); 2385 return 24; 2386 } else { 2387 writeInt(os, csize); // compressed size 2388 writeInt(os, size); // uncompressed size 2389 return 16; 2390 } 2391 } 2392 2393 // read NTFS, UNIX and ZIP64 data from cen.extra 2394 void readExtra(ZipFileSystem zipfs) throws IOException { 2395 if (extra == null) 2396 return; 2397 int elen = extra.length; 2398 int off = 0; 2399 int newOff = 0; 2400 while (off + 4 < elen) { 2401 // extra spec: HeaderID+DataSize+Data 2402 int pos = off; 2403 int tag = SH(extra, pos); 2404 int sz = SH(extra, pos + 2); 2405 pos += 4; 2406 if (pos + sz > elen) // invalid data 2407 break; 2408 switch (tag) { 2409 case EXTID_ZIP64 : 2410 if (size == ZIP64_MINVAL) { 2411 if (pos + 8 > elen) // invalid zip64 extra 2412 break; // fields, just skip 2413 size = LL(extra, pos); 2414 pos += 8; 2415 } 2416 if (csize == ZIP64_MINVAL) { 2417 if (pos + 8 > elen) 2418 break; 2419 csize = LL(extra, pos); 2420 pos += 8; 2421 } 2422 if (locoff == ZIP64_MINVAL) { 2423 if (pos + 8 > elen) 2424 break; 2425 locoff = LL(extra, pos); 2426 pos += 8; 2427 } 2428 break; 2429 case EXTID_NTFS: 2430 if (sz < 32) 2431 break; 2432 pos += 4; // reserved 4 bytes 2433 if (SH(extra, pos) != 0x0001) 2434 break; 2435 if (SH(extra, pos + 2) != 24) 2436 break; 2437 // override the loc field, datatime here is 2438 // more "accurate" 2439 mtime = winToJavaTime(LL(extra, pos + 4)); 2440 atime = winToJavaTime(LL(extra, pos + 12)); 2441 ctime = winToJavaTime(LL(extra, pos + 20)); 2442 break; 2443 case EXTID_EXTT: 2444 // spec says the Extened timestamp in cen only has mtime 2445 // need to read the loc to get the extra a/ctime, if flag 2446 // "zipinfo-time" is not specified to false; 2447 // there is performance cost (move up to loc and read) to 2448 // access the loc table foreach entry; 2449 if (zipfs.noExtt) { 2450 if (sz == 5) 2451 mtime = unixToJavaTime(LG(extra, pos + 1)); 2452 break; 2453 } 2454 byte[] buf = new byte[LOCHDR]; 2455 if (zipfs.readFullyAt(buf, 0, buf.length, locoff) 2456 != buf.length) 2457 throw new ZipException("loc: reading failed"); 2458 if (!locSigAt(buf, 0)) 2459 throw new ZipException("loc: wrong sig ->" 2460 + Long.toString(getSig(buf, 0), 16)); 2461 int locElen = LOCEXT(buf); 2462 if (locElen < 9) // EXTT is at lease 9 bytes 2463 break; 2464 int locNlen = LOCNAM(buf); 2465 buf = new byte[locElen]; 2466 if (zipfs.readFullyAt(buf, 0, buf.length, locoff + LOCHDR + locNlen) 2467 != buf.length) 2468 throw new ZipException("loc extra: reading failed"); 2469 int locPos = 0; 2470 while (locPos + 4 < buf.length) { 2471 int locTag = SH(buf, locPos); 2472 int locSZ = SH(buf, locPos + 2); 2473 locPos += 4; 2474 if (locTag != EXTID_EXTT) { 2475 locPos += locSZ; 2476 continue; 2477 } 2478 int end = locPos + locSZ - 4; 2479 int flag = CH(buf, locPos++); 2480 if ((flag & 0x1) != 0 && locPos <= end) { 2481 mtime = unixToJavaTime(LG(buf, locPos)); 2482 locPos += 4; 2483 } 2484 if ((flag & 0x2) != 0 && locPos <= end) { 2485 atime = unixToJavaTime(LG(buf, locPos)); 2486 locPos += 4; 2487 } 2488 if ((flag & 0x4) != 0 && locPos <= end) { 2489 ctime = unixToJavaTime(LG(buf, locPos)); 2490 locPos += 4; 2491 } 2492 break; 2493 } 2494 break; 2495 default: // unknown tag 2496 System.arraycopy(extra, off, extra, newOff, sz + 4); 2497 newOff += (sz + 4); 2498 } 2499 off += (sz + 4); 2500 } 2501 if (newOff != 0 && newOff != extra.length) 2502 extra = Arrays.copyOf(extra, newOff); 2503 else 2504 extra = null; 2505 } 2506 2507 @Override 2508 public String toString() { 2509 StringBuilder sb = new StringBuilder(1024); 2510 Formatter fm = new Formatter(sb); 2511 fm.format(" name : %s%n", new String(name)); 2512 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2513 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2514 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2515 fm.format(" isRegularFile : %b%n", isRegularFile()); 2516 fm.format(" isDirectory : %b%n", isDirectory()); 2517 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2518 fm.format(" isOther : %b%n", isOther()); 2519 fm.format(" fileKey : %s%n", fileKey()); 2520 fm.format(" size : %d%n", size()); 2521 fm.format(" compressedSize : %d%n", compressedSize()); 2522 fm.format(" crc : %x%n", crc()); 2523 fm.format(" method : %d%n", method()); 2524 if (posixPerms != -1) { 2525 fm.format(" permissions : %s%n", permissions()); 2526 } 2527 fm.close(); 2528 return sb.toString(); 2529 } 2530 2531 ///////// basic file attributes /////////// 2532 @Override 2533 public FileTime creationTime() { 2534 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 2535 } 2536 2537 @Override 2538 public boolean isDirectory() { 2539 return isDir(); 2540 } 2541 2542 @Override 2543 public boolean isOther() { 2544 return false; 2545 } 2546 2547 @Override 2548 public boolean isRegularFile() { 2549 return !isDir(); 2550 } 2551 2552 @Override 2553 public FileTime lastAccessTime() { 2554 return FileTime.fromMillis(atime == -1 ? mtime : atime); 2555 } 2556 2557 @Override 2558 public FileTime lastModifiedTime() { 2559 return FileTime.fromMillis(mtime); 2560 } 2561 2562 @Override 2563 public long size() { 2564 return size; 2565 } 2566 2567 @Override 2568 public boolean isSymbolicLink() { 2569 return false; 2570 } 2571 2572 @Override 2573 public Object fileKey() { 2574 return null; 2575 } 2576 2577 ///////// posix file attributes /////////// 2578 2579 @Override 2580 public UserPrincipal owner() { 2581 return owner; 2582 } 2583 2584 @Override 2585 public GroupPrincipal group() { 2586 return group; 2587 } 2588 2589 @Override 2590 public Set<PosixFilePermission> permissions() { 2591 return storedPermissions().orElse(Set.copyOf(defaultPermissions)); 2592 } 2593 2594 ///////// zip file attributes /////////// 2595 2596 @Override 2597 public long compressedSize() { 2598 return csize; 2599 } 2600 2601 @Override 2602 public long crc() { 2603 return crc; 2604 } 2605 2606 @Override 2607 public int method() { 2608 return method; 2609 } 2610 2611 @Override 2612 public byte[] extra() { 2613 if (extra != null) 2614 return Arrays.copyOf(extra, extra.length); 2615 return null; 2616 } 2617 2618 @Override 2619 public byte[] comment() { 2620 if (comment != null) 2621 return Arrays.copyOf(comment, comment.length); 2622 return null; 2623 } 2624 2625 @Override 2626 public Optional<Set<PosixFilePermission>> storedPermissions() { 2627 Set<PosixFilePermission> perms = null; 2628 if (posixPerms != -1) { 2629 perms = new HashSet<>(PosixFilePermission.values().length); 2630 for (PosixFilePermission perm : PosixFilePermission.values()) { 2631 if ((posixPerms & ZipUtils.permToFlag(perm)) != 0) { 2632 perms.add(perm); 2633 } 2634 } 2635 } 2636 return Optional.ofNullable(perms); 2637 } 2638 } 2639 2640 // ZIP directory has two issues: 2641 // (1) ZIP spec does not require the ZIP file to include 2642 // directory entry 2643 // (2) all entries are not stored/organized in a "tree" 2644 // structure. 2645 // A possible solution is to build the node tree ourself as 2646 // implemented below. 2647 2648 // default time stamp for pseudo entries 2649 private long zfsDefaultTimeStamp = System.currentTimeMillis(); 2650 2651 private void removeFromTree(IndexNode inode) { 2652 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2653 IndexNode child = parent.child; 2654 if (child.equals(inode)) { 2655 parent.child = child.sibling; 2656 } else { 2657 IndexNode last = child; 2658 while ((child = child.sibling) != null) { 2659 if (child.equals(inode)) { 2660 last.sibling = child.sibling; 2661 break; 2662 } else { 2663 last = child; 2664 } 2665 } 2666 } 2667 } 2668 2669 // purely for parent lookup, so we don't have to copy the parent 2670 // name every time 2671 static class ParentLookup extends IndexNode { 2672 int len; 2673 ParentLookup() {} 2674 2675 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 2676 name(name, len); 2677 return this; 2678 } 2679 2680 void name(byte[] name, int len) { 2681 this.name = name; 2682 this.len = len; 2683 // calculate the hashcode the same way as Arrays.hashCode() does 2684 int result = 1; 2685 for (int i = 0; i < len; i++) 2686 result = 31 * result + name[i]; 2687 this.hashcode = result; 2688 } 2689 2690 @Override 2691 public boolean equals(Object other) { 2692 if (!(other instanceof IndexNode)) { 2693 return false; 2694 } 2695 byte[] oname = ((IndexNode)other).name; 2696 return Arrays.equals(name, 0, len, 2697 oname, 0, oname.length); 2698 } 2699 2700 } 2701 2702 private void buildNodeTree() throws IOException { 2703 beginWrite(); 2704 try { 2705 IndexNode root = inodes.get(LOOKUPKEY.as(ROOTPATH)); 2706 if (root == null) { 2707 root = new IndexNode(ROOTPATH, true); 2708 } else { 2709 inodes.remove(root); 2710 } 2711 IndexNode[] nodes = inodes.keySet().toArray(new IndexNode[0]); 2712 inodes.put(root, root); 2713 ParentLookup lookup = new ParentLookup(); 2714 for (IndexNode node : nodes) { 2715 IndexNode parent; 2716 while (true) { 2717 int off = getParentOff(node.name); 2718 if (off <= 1) { // parent is root 2719 node.sibling = root.child; 2720 root.child = node; 2721 break; 2722 } 2723 lookup = lookup.as(node.name, off); 2724 if (inodes.containsKey(lookup)) { 2725 parent = inodes.get(lookup); 2726 node.sibling = parent.child; 2727 parent.child = node; 2728 break; 2729 } 2730 // add new pseudo directory entry 2731 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 2732 inodes.put(parent, parent); 2733 node.sibling = parent.child; 2734 parent.child = node; 2735 node = parent; 2736 } 2737 } 2738 } finally { 2739 endWrite(); 2740 } 2741 } 2742 }