1 /* 2 * Copyright (c) 2009, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.File; 33 import java.io.FilterOutputStream; 34 import java.io.IOException; 35 import java.io.InputStream; 36 import java.io.OutputStream; 37 import java.nio.ByteBuffer; 38 import java.nio.MappedByteBuffer; 39 import java.nio.channels.*; 40 import java.nio.file.*; 41 import java.nio.file.attribute.*; 42 import java.nio.file.spi.*; 43 import java.security.AccessController; 44 import java.security.PrivilegedAction; 45 import java.security.PrivilegedActionException; 46 import java.security.PrivilegedExceptionAction; 47 import java.util.*; 48 import java.util.concurrent.locks.ReadWriteLock; 49 import java.util.concurrent.locks.ReentrantReadWriteLock; 50 import java.util.regex.Pattern; 51 import java.util.zip.CRC32; 52 import java.util.zip.Inflater; 53 import java.util.zip.Deflater; 54 import java.util.zip.InflaterInputStream; 55 import java.util.zip.DeflaterOutputStream; 56 import java.util.zip.ZipException; 57 import static java.lang.Boolean.*; 58 import static jdk.nio.zipfs.ZipConstants.*; 59 import static jdk.nio.zipfs.ZipUtils.*; 60 import static java.nio.file.StandardOpenOption.*; 61 import static java.nio.file.StandardCopyOption.*; 62 63 /** 64 * A FileSystem built on a zip file 65 * 66 * @author Xueming Shen 67 */ 68 69 class ZipFileSystem extends FileSystem { 70 71 private final ZipFileSystemProvider provider; 72 private final Path zfpath; 73 final ZipCoder zc; 74 private final ZipPath rootdir; 75 private boolean readOnly = false; // readonly file system 76 77 // configurable by env map 78 private final boolean noExtt; // see readExtra() 79 private final boolean useTempFile; // use a temp file for newOS, default 80 // is to use BAOS for better performance 81 private static final boolean isWindows = AccessController.doPrivileged( 82 (PrivilegedAction<Boolean>) () -> System.getProperty("os.name") 83 .startsWith("Windows")); 84 private final boolean forceEnd64; 85 private final int defaultMethod; // METHOD_STORED if "noCompression=true" 86 // METHOD_DEFLATED otherwise 87 88 ZipFileSystem(ZipFileSystemProvider provider, 89 Path zfpath, 90 Map<String, ?> env) throws IOException 91 { 92 // default encoding for name/comment 93 String nameEncoding = env.containsKey("encoding") ? 94 (String)env.get("encoding") : "UTF-8"; 95 this.noExtt = "false".equals(env.get("zipinfo-time")); 96 this.useTempFile = isTrue(env, "useTempFile"); 97 this.forceEnd64 = isTrue(env, "forceZIP64End"); 98 this.defaultMethod = isTrue(env, "noCompression") ? METHOD_STORED: METHOD_DEFLATED; 99 if (Files.notExists(zfpath)) { 100 // create a new zip if not exists 101 if (isTrue(env, "create")) { 102 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 103 new END().write(os, 0, forceEnd64); 104 } 105 } else { 106 throw new FileSystemNotFoundException(zfpath.toString()); 107 } 108 } 109 // sm and existence check 110 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 111 boolean writeable = AccessController.doPrivileged( 112 (PrivilegedAction<Boolean>) () -> Files.isWritable(zfpath)); 113 this.readOnly = !writeable; 114 this.zc = ZipCoder.get(nameEncoding); 115 this.rootdir = new ZipPath(this, new byte[]{'/'}); 116 this.ch = Files.newByteChannel(zfpath, READ); 117 try { 118 this.cen = initCEN(); 119 } catch (IOException x) { 120 try { 121 this.ch.close(); 122 } catch (IOException xx) { 123 x.addSuppressed(xx); 124 } 125 throw x; 126 } 127 this.provider = provider; 128 this.zfpath = zfpath; 129 } 130 131 // returns true if there is a name=true/"true" setting in env 132 private static boolean isTrue(Map<String, ?> env, String name) { 133 return "true".equals(env.get(name)) || TRUE.equals(env.get(name)); 134 } 135 136 @Override 137 public FileSystemProvider provider() { 138 return provider; 139 } 140 141 @Override 142 public String getSeparator() { 143 return "/"; 144 } 145 146 @Override 147 public boolean isOpen() { 148 return isOpen; 149 } 150 151 @Override 152 public boolean isReadOnly() { 153 return readOnly; 154 } 155 156 private void checkWritable() throws IOException { 157 if (readOnly) 158 throw new ReadOnlyFileSystemException(); 159 } 160 161 void setReadOnly() { 162 this.readOnly = true; 163 } 164 165 @Override 166 public Iterable<Path> getRootDirectories() { 167 return List.of(rootdir); 168 } 169 170 ZipPath getRootDir() { 171 return rootdir; 172 } 173 174 @Override 175 public ZipPath getPath(String first, String... more) { 176 if (more.length == 0) { 177 return new ZipPath(this, first); 178 } 179 StringBuilder sb = new StringBuilder(); 180 sb.append(first); 181 for (String path : more) { 182 if (path.length() > 0) { 183 if (sb.length() > 0) { 184 sb.append('/'); 185 } 186 sb.append(path); 187 } 188 } 189 return new ZipPath(this, sb.toString()); 190 } 191 192 @Override 193 public UserPrincipalLookupService getUserPrincipalLookupService() { 194 throw new UnsupportedOperationException(); 195 } 196 197 @Override 198 public WatchService newWatchService() { 199 throw new UnsupportedOperationException(); 200 } 201 202 FileStore getFileStore(ZipPath path) { 203 return new ZipFileStore(path); 204 } 205 206 @Override 207 public Iterable<FileStore> getFileStores() { 208 return List.of(new ZipFileStore(rootdir)); 209 } 210 211 private static final Set<String> supportedFileAttributeViews = 212 Set.of("basic", "zip"); 213 214 @Override 215 public Set<String> supportedFileAttributeViews() { 216 return supportedFileAttributeViews; 217 } 218 219 @Override 220 public String toString() { 221 return zfpath.toString(); 222 } 223 224 Path getZipFile() { 225 return zfpath; 226 } 227 228 private static final String GLOB_SYNTAX = "glob"; 229 private static final String REGEX_SYNTAX = "regex"; 230 231 @Override 232 public PathMatcher getPathMatcher(String syntaxAndInput) { 233 int pos = syntaxAndInput.indexOf(':'); 234 if (pos <= 0 || pos == syntaxAndInput.length()) { 235 throw new IllegalArgumentException(); 236 } 237 String syntax = syntaxAndInput.substring(0, pos); 238 String input = syntaxAndInput.substring(pos + 1); 239 String expr; 240 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 241 expr = toRegexPattern(input); 242 } else { 243 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 244 expr = input; 245 } else { 246 throw new UnsupportedOperationException("Syntax '" + syntax + 247 "' not recognized"); 248 } 249 } 250 // return matcher 251 final Pattern pattern = Pattern.compile(expr); 252 return new PathMatcher() { 253 @Override 254 public boolean matches(Path path) { 255 return pattern.matcher(path.toString()).matches(); 256 } 257 }; 258 } 259 260 @Override 261 public void close() throws IOException { 262 beginWrite(); 263 try { 264 if (!isOpen) 265 return; 266 isOpen = false; // set closed 267 } finally { 268 endWrite(); 269 } 270 if (!streams.isEmpty()) { // unlock and close all remaining streams 271 Set<InputStream> copy = new HashSet<>(streams); 272 for (InputStream is: copy) 273 is.close(); 274 } 275 beginWrite(); // lock and sync 276 try { 277 AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> { 278 sync(); return null; 279 }); 280 ch.close(); // close the ch just in case no update 281 // and sync didn't close the ch 282 } catch (PrivilegedActionException e) { 283 throw (IOException)e.getException(); 284 } finally { 285 endWrite(); 286 } 287 288 synchronized (inflaters) { 289 for (Inflater inf : inflaters) 290 inf.end(); 291 } 292 synchronized (deflaters) { 293 for (Deflater def : deflaters) 294 def.end(); 295 } 296 297 IOException ioe = null; 298 synchronized (tmppaths) { 299 for (Path p: tmppaths) { 300 try { 301 AccessController.doPrivileged( 302 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 303 } catch (PrivilegedActionException e) { 304 IOException x = (IOException)e.getException(); 305 if (ioe == null) 306 ioe = x; 307 else 308 ioe.addSuppressed(x); 309 } 310 } 311 } 312 provider.removeFileSystem(zfpath, this); 313 if (ioe != null) 314 throw ioe; 315 } 316 317 ZipFileAttributes getFileAttributes(byte[] path) 318 throws IOException 319 { 320 Entry e; 321 beginRead(); 322 try { 323 ensureOpen(); 324 e = getEntry(path); 325 if (e == null) { 326 IndexNode inode = getInode(path); 327 if (inode == null) 328 return null; 329 // pseudo directory, uses METHOD_STORED 330 e = new Entry(inode.name, inode.isdir, METHOD_STORED); 331 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 332 } 333 } finally { 334 endRead(); 335 } 336 return e; 337 } 338 339 void checkAccess(byte[] path) throws IOException { 340 beginRead(); 341 try { 342 ensureOpen(); 343 // is it necessary to readCEN as a sanity check? 344 if (getInode(path) == null) { 345 throw new NoSuchFileException(toString()); 346 } 347 348 } finally { 349 endRead(); 350 } 351 } 352 353 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 354 throws IOException 355 { 356 checkWritable(); 357 beginWrite(); 358 try { 359 ensureOpen(); 360 Entry e = getEntry(path); // ensureOpen checked 361 if (e == null) 362 throw new NoSuchFileException(getString(path)); 363 if (e.type == Entry.CEN) 364 e.type = Entry.COPY; // copy e 365 if (mtime != null) 366 e.mtime = mtime.toMillis(); 367 if (atime != null) 368 e.atime = atime.toMillis(); 369 if (ctime != null) 370 e.ctime = ctime.toMillis(); 371 update(e); 372 } finally { 373 endWrite(); 374 } 375 } 376 377 boolean exists(byte[] path) 378 throws IOException 379 { 380 beginRead(); 381 try { 382 ensureOpen(); 383 return getInode(path) != null; 384 } finally { 385 endRead(); 386 } 387 } 388 389 boolean isDirectory(byte[] path) 390 throws IOException 391 { 392 beginRead(); 393 try { 394 IndexNode n = getInode(path); 395 return n != null && n.isDir(); 396 } finally { 397 endRead(); 398 } 399 } 400 401 // returns the list of child paths of "path" 402 Iterator<Path> iteratorOf(byte[] path, 403 DirectoryStream.Filter<? super Path> filter) 404 throws IOException 405 { 406 beginWrite(); // iteration of inodes needs exclusive lock 407 try { 408 ensureOpen(); 409 IndexNode inode = getInode(path); 410 if (inode == null) 411 throw new NotDirectoryException(getString(path)); 412 List<Path> list = new ArrayList<>(); 413 IndexNode child = inode.child; 414 while (child != null) { 415 // assume all path from zip file itself is "normalized" 416 ZipPath zp = new ZipPath(this, child.name, true); 417 if (filter == null || filter.accept(zp)) 418 list.add(zp); 419 child = child.sibling; 420 } 421 return list.iterator(); 422 } finally { 423 endWrite(); 424 } 425 } 426 427 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 428 throws IOException 429 { 430 checkWritable(); 431 // dir = toDirectoryPath(dir); 432 beginWrite(); 433 try { 434 ensureOpen(); 435 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 436 throw new FileAlreadyExistsException(getString(dir)); 437 checkParents(dir); 438 Entry e = new Entry(dir, Entry.NEW, true, METHOD_STORED); 439 update(e); 440 } finally { 441 endWrite(); 442 } 443 } 444 445 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 446 throws IOException 447 { 448 checkWritable(); 449 if (Arrays.equals(src, dst)) 450 return; // do nothing, src and dst are the same 451 452 beginWrite(); 453 try { 454 ensureOpen(); 455 Entry eSrc = getEntry(src); // ensureOpen checked 456 457 if (eSrc == null) 458 throw new NoSuchFileException(getString(src)); 459 if (eSrc.isDir()) { // spec says to create dst dir 460 createDirectory(dst); 461 return; 462 } 463 boolean hasReplace = false; 464 boolean hasCopyAttrs = false; 465 for (CopyOption opt : options) { 466 if (opt == REPLACE_EXISTING) 467 hasReplace = true; 468 else if (opt == COPY_ATTRIBUTES) 469 hasCopyAttrs = true; 470 } 471 Entry eDst = getEntry(dst); 472 if (eDst != null) { 473 if (!hasReplace) 474 throw new FileAlreadyExistsException(getString(dst)); 475 } else { 476 checkParents(dst); 477 } 478 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 479 u.name(dst); // change name 480 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 481 { 482 u.type = eSrc.type; // make it the same type 483 if (deletesrc) { // if it's a "rename", take the data 484 u.bytes = eSrc.bytes; 485 u.file = eSrc.file; 486 } else { // if it's not "rename", copy the data 487 if (eSrc.bytes != null) 488 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 489 else if (eSrc.file != null) { 490 u.file = getTempPathForEntry(null); 491 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 492 } 493 } 494 } 495 if (!hasCopyAttrs) 496 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 497 update(u); 498 if (deletesrc) 499 updateDelete(eSrc); 500 } finally { 501 endWrite(); 502 } 503 } 504 505 // Returns an output stream for writing the contents into the specified 506 // entry. 507 OutputStream newOutputStream(byte[] path, OpenOption... options) 508 throws IOException 509 { 510 checkWritable(); 511 boolean hasCreateNew = false; 512 boolean hasCreate = false; 513 boolean hasAppend = false; 514 boolean hasTruncate = false; 515 for (OpenOption opt: options) { 516 if (opt == READ) 517 throw new IllegalArgumentException("READ not allowed"); 518 if (opt == CREATE_NEW) 519 hasCreateNew = true; 520 if (opt == CREATE) 521 hasCreate = true; 522 if (opt == APPEND) 523 hasAppend = true; 524 if (opt == TRUNCATE_EXISTING) 525 hasTruncate = true; 526 } 527 if (hasAppend && hasTruncate) 528 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 529 beginRead(); // only need a readlock, the "update()" will 530 try { // try to obtain a writelock when the os is 531 ensureOpen(); // being closed. 532 Entry e = getEntry(path); 533 if (e != null) { 534 if (e.isDir() || hasCreateNew) 535 throw new FileAlreadyExistsException(getString(path)); 536 if (hasAppend) { 537 InputStream is = getInputStream(e); 538 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 539 is.transferTo(os); 540 is.close(); 541 return os; 542 } 543 return getOutputStream(new Entry(e, Entry.NEW)); 544 } else { 545 if (!hasCreate && !hasCreateNew) 546 throw new NoSuchFileException(getString(path)); 547 checkParents(path); 548 return getOutputStream(new Entry(path, Entry.NEW, false, defaultMethod)); 549 } 550 } finally { 551 endRead(); 552 } 553 } 554 555 // Returns an input stream for reading the contents of the specified 556 // file entry. 557 InputStream newInputStream(byte[] path) throws IOException { 558 beginRead(); 559 try { 560 ensureOpen(); 561 Entry e = getEntry(path); 562 if (e == null) 563 throw new NoSuchFileException(getString(path)); 564 if (e.isDir()) 565 throw new FileSystemException(getString(path), "is a directory", null); 566 return getInputStream(e); 567 } finally { 568 endRead(); 569 } 570 } 571 572 private void checkOptions(Set<? extends OpenOption> options) { 573 // check for options of null type and option is an intance of StandardOpenOption 574 for (OpenOption option : options) { 575 if (option == null) 576 throw new NullPointerException(); 577 if (!(option instanceof StandardOpenOption)) 578 throw new IllegalArgumentException(); 579 } 580 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 581 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 582 } 583 584 585 // Returns an output SeekableByteChannel for either 586 // (1) writing the contents of a new entry, if the entry doesn't exit, or 587 // (2) updating/replacing the contents of an existing entry. 588 // Note: The content is not compressed. 589 private class EntryOutputChannel extends ByteArrayChannel { 590 Entry e; 591 592 EntryOutputChannel(Entry e) throws IOException { 593 super(e.size > 0? (int)e.size : 8192, false); 594 this.e = e; 595 if (e.mtime == -1) 596 e.mtime = System.currentTimeMillis(); 597 if (e.method == -1) 598 e.method = defaultMethod; 599 // store size, compressed size, and crc-32 in datadescriptor 600 e.flag = FLAG_DATADESCR; 601 if (zc.isUTF8()) 602 e.flag |= FLAG_USE_UTF8; 603 } 604 605 @Override 606 public void close() throws IOException { 607 e.bytes = toByteArray(); 608 e.size = e.bytes.length; 609 e.crc = -1; 610 super.close(); 611 update(e); 612 } 613 } 614 615 // Returns a Writable/ReadByteChannel for now. Might consdier to use 616 // newFileChannel() instead, which dump the entry data into a regular 617 // file on the default file system and create a FileChannel on top of 618 // it. 619 SeekableByteChannel newByteChannel(byte[] path, 620 Set<? extends OpenOption> options, 621 FileAttribute<?>... attrs) 622 throws IOException 623 { 624 checkOptions(options); 625 if (options.contains(StandardOpenOption.WRITE) || 626 options.contains(StandardOpenOption.APPEND)) { 627 checkWritable(); 628 beginRead(); // only need a readlock, the "update()" will obtain 629 // thewritelock when the channel is closed 630 try { 631 ensureOpen(); 632 Entry e = getEntry(path); 633 if (e != null) { 634 if (e.isDir() || options.contains(CREATE_NEW)) 635 throw new FileAlreadyExistsException(getString(path)); 636 SeekableByteChannel sbc = 637 new EntryOutputChannel(new Entry(e, Entry.NEW)); 638 if (options.contains(APPEND)) { 639 try (InputStream is = getInputStream(e)) { // copyover 640 byte[] buf = new byte[8192]; 641 ByteBuffer bb = ByteBuffer.wrap(buf); 642 int n; 643 while ((n = is.read(buf)) != -1) { 644 bb.position(0); 645 bb.limit(n); 646 sbc.write(bb); 647 } 648 } 649 } 650 return sbc; 651 } 652 if (!options.contains(CREATE) && !options.contains(CREATE_NEW)) 653 throw new NoSuchFileException(getString(path)); 654 checkParents(path); 655 return new EntryOutputChannel( 656 new Entry(path, Entry.NEW, false, defaultMethod)); 657 658 } finally { 659 endRead(); 660 } 661 } else { 662 beginRead(); 663 try { 664 ensureOpen(); 665 Entry e = getEntry(path); 666 if (e == null || e.isDir()) 667 throw new NoSuchFileException(getString(path)); 668 try (InputStream is = getInputStream(e)) { 669 // TBD: if (e.size < NNNNN); 670 return new ByteArrayChannel(is.readAllBytes(), true); 671 } 672 } finally { 673 endRead(); 674 } 675 } 676 } 677 678 // Returns a FileChannel of the specified entry. 679 // 680 // This implementation creates a temporary file on the default file system, 681 // copy the entry data into it if the entry exists, and then create a 682 // FileChannel on top of it. 683 FileChannel newFileChannel(byte[] path, 684 Set<? extends OpenOption> options, 685 FileAttribute<?>... attrs) 686 throws IOException 687 { 688 checkOptions(options); 689 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 690 options.contains(StandardOpenOption.APPEND)); 691 beginRead(); 692 try { 693 ensureOpen(); 694 Entry e = getEntry(path); 695 if (forWrite) { 696 checkWritable(); 697 if (e == null) { 698 if (!options.contains(StandardOpenOption.CREATE) && 699 !options.contains(StandardOpenOption.CREATE_NEW)) { 700 throw new NoSuchFileException(getString(path)); 701 } 702 } else { 703 if (options.contains(StandardOpenOption.CREATE_NEW)) { 704 throw new FileAlreadyExistsException(getString(path)); 705 } 706 if (e.isDir()) 707 throw new FileAlreadyExistsException("directory <" 708 + getString(path) + "> exists"); 709 } 710 options = new HashSet<>(options); 711 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 712 } else if (e == null || e.isDir()) { 713 throw new NoSuchFileException(getString(path)); 714 } 715 716 final boolean isFCH = (e != null && e.type == Entry.FILECH); 717 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 718 final FileChannel fch = tmpfile.getFileSystem() 719 .provider() 720 .newFileChannel(tmpfile, options, attrs); 721 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH); 722 if (forWrite) { 723 u.flag = FLAG_DATADESCR; 724 u.method = METHOD_DEFLATED; 725 } 726 // is there a better way to hook into the FileChannel's close method? 727 return new FileChannel() { 728 public int write(ByteBuffer src) throws IOException { 729 return fch.write(src); 730 } 731 public long write(ByteBuffer[] srcs, int offset, int length) 732 throws IOException 733 { 734 return fch.write(srcs, offset, length); 735 } 736 public long position() throws IOException { 737 return fch.position(); 738 } 739 public FileChannel position(long newPosition) 740 throws IOException 741 { 742 fch.position(newPosition); 743 return this; 744 } 745 public long size() throws IOException { 746 return fch.size(); 747 } 748 public FileChannel truncate(long size) 749 throws IOException 750 { 751 fch.truncate(size); 752 return this; 753 } 754 public void force(boolean metaData) 755 throws IOException 756 { 757 fch.force(metaData); 758 } 759 public long transferTo(long position, long count, 760 WritableByteChannel target) 761 throws IOException 762 { 763 return fch.transferTo(position, count, target); 764 } 765 public long transferFrom(ReadableByteChannel src, 766 long position, long count) 767 throws IOException 768 { 769 return fch.transferFrom(src, position, count); 770 } 771 public int read(ByteBuffer dst) throws IOException { 772 return fch.read(dst); 773 } 774 public int read(ByteBuffer dst, long position) 775 throws IOException 776 { 777 return fch.read(dst, position); 778 } 779 public long read(ByteBuffer[] dsts, int offset, int length) 780 throws IOException 781 { 782 return fch.read(dsts, offset, length); 783 } 784 public int write(ByteBuffer src, long position) 785 throws IOException 786 { 787 return fch.write(src, position); 788 } 789 public MappedByteBuffer map(MapMode mode, 790 long position, long size) 791 throws IOException 792 { 793 throw new UnsupportedOperationException(); 794 } 795 public FileLock lock(long position, long size, boolean shared) 796 throws IOException 797 { 798 return fch.lock(position, size, shared); 799 } 800 public FileLock tryLock(long position, long size, boolean shared) 801 throws IOException 802 { 803 return fch.tryLock(position, size, shared); 804 } 805 protected void implCloseChannel() throws IOException { 806 fch.close(); 807 if (forWrite) { 808 u.mtime = System.currentTimeMillis(); 809 u.size = Files.size(u.file); 810 811 update(u); 812 } else { 813 if (!isFCH) // if this is a new fch for reading 814 removeTempPathForEntry(tmpfile); 815 } 816 } 817 }; 818 } finally { 819 endRead(); 820 } 821 } 822 823 // the outstanding input streams that need to be closed 824 private Set<InputStream> streams = 825 Collections.synchronizedSet(new HashSet<InputStream>()); 826 827 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 828 private Path getTempPathForEntry(byte[] path) throws IOException { 829 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 830 if (path != null) { 831 Entry e = getEntry(path); 832 if (e != null) { 833 try (InputStream is = newInputStream(path)) { 834 Files.copy(is, tmpPath, REPLACE_EXISTING); 835 } 836 } 837 } 838 return tmpPath; 839 } 840 841 private void removeTempPathForEntry(Path path) throws IOException { 842 Files.delete(path); 843 tmppaths.remove(path); 844 } 845 846 // check if all parents really exit. ZIP spec does not require 847 // the existence of any "parent directory". 848 private void checkParents(byte[] path) throws IOException { 849 beginRead(); 850 try { 851 while ((path = getParent(path)) != null && 852 path != ROOTPATH) { 853 if (!inodes.containsKey(IndexNode.keyOf(path))) { 854 throw new NoSuchFileException(getString(path)); 855 } 856 } 857 } finally { 858 endRead(); 859 } 860 } 861 862 private static byte[] ROOTPATH = new byte[] { '/' }; 863 private static byte[] getParent(byte[] path) { 864 int off = getParentOff(path); 865 if (off <= 1) 866 return ROOTPATH; 867 return Arrays.copyOf(path, off); 868 } 869 870 private static int getParentOff(byte[] path) { 871 int off = path.length - 1; 872 if (off > 0 && path[off] == '/') // isDirectory 873 off--; 874 while (off > 0 && path[off] != '/') { off--; } 875 return off; 876 } 877 878 private final void beginWrite() { 879 rwlock.writeLock().lock(); 880 } 881 882 private final void endWrite() { 883 rwlock.writeLock().unlock(); 884 } 885 886 private final void beginRead() { 887 rwlock.readLock().lock(); 888 } 889 890 private final void endRead() { 891 rwlock.readLock().unlock(); 892 } 893 894 /////////////////////////////////////////////////////////////////// 895 896 private volatile boolean isOpen = true; 897 private final SeekableByteChannel ch; // channel to the zipfile 898 final byte[] cen; // CEN & ENDHDR 899 private END end; 900 private long locpos; // position of first LOC header (usually 0) 901 902 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 903 904 // name -> pos (in cen), IndexNode itself can be used as a "key" 905 private LinkedHashMap<IndexNode, IndexNode> inodes; 906 907 final byte[] getBytes(String name) { 908 return zc.getBytes(name); 909 } 910 911 final String getString(byte[] name) { 912 return zc.toString(name); 913 } 914 915 @SuppressWarnings("deprecation") 916 protected void finalize() throws IOException { 917 close(); 918 } 919 920 // Reads len bytes of data from the specified offset into buf. 921 // Returns the total number of bytes read. 922 // Each/every byte read from here (except the cen, which is mapped). 923 final long readFullyAt(byte[] buf, int off, long len, long pos) 924 throws IOException 925 { 926 ByteBuffer bb = ByteBuffer.wrap(buf); 927 bb.position(off); 928 bb.limit((int)(off + len)); 929 return readFullyAt(bb, pos); 930 } 931 932 private final long readFullyAt(ByteBuffer bb, long pos) 933 throws IOException 934 { 935 synchronized(ch) { 936 return ch.position(pos).read(bb); 937 } 938 } 939 940 // Searches for end of central directory (END) header. The contents of 941 // the END header will be read and placed in endbuf. Returns the file 942 // position of the END header, otherwise returns -1 if the END header 943 // was not found or an error occurred. 944 private END findEND() throws IOException 945 { 946 byte[] buf = new byte[READBLOCKSZ]; 947 long ziplen = ch.size(); 948 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 949 long minPos = minHDR - (buf.length - ENDHDR); 950 951 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 952 { 953 int off = 0; 954 if (pos < 0) { 955 // Pretend there are some NUL bytes before start of file 956 off = (int)-pos; 957 Arrays.fill(buf, 0, off, (byte)0); 958 } 959 int len = buf.length - off; 960 if (readFullyAt(buf, off, len, pos + off) != len) 961 zerror("zip END header not found"); 962 963 // Now scan the block backwards for END header signature 964 for (int i = buf.length - ENDHDR; i >= 0; i--) { 965 if (buf[i+0] == (byte)'P' && 966 buf[i+1] == (byte)'K' && 967 buf[i+2] == (byte)'\005' && 968 buf[i+3] == (byte)'\006' && 969 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 970 // Found END header 971 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 972 END end = new END(); 973 end.endsub = ENDSUB(buf); 974 end.centot = ENDTOT(buf); 975 end.cenlen = ENDSIZ(buf); 976 end.cenoff = ENDOFF(buf); 977 end.comlen = ENDCOM(buf); 978 end.endpos = pos + i; 979 // try if there is zip64 end; 980 byte[] loc64 = new byte[ZIP64_LOCHDR]; 981 if (end.endpos < ZIP64_LOCHDR || 982 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 983 != loc64.length || 984 !locator64SigAt(loc64, 0)) { 985 return end; 986 } 987 long end64pos = ZIP64_LOCOFF(loc64); 988 byte[] end64buf = new byte[ZIP64_ENDHDR]; 989 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 990 != end64buf.length || 991 !end64SigAt(end64buf, 0)) { 992 return end; 993 } 994 // end64 found, 995 long cenlen64 = ZIP64_ENDSIZ(end64buf); 996 long cenoff64 = ZIP64_ENDOFF(end64buf); 997 long centot64 = ZIP64_ENDTOT(end64buf); 998 // double-check 999 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1000 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1001 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1002 return end; 1003 } 1004 // to use the end64 values 1005 end.cenlen = cenlen64; 1006 end.cenoff = cenoff64; 1007 end.centot = (int)centot64; // assume total < 2g 1008 end.endpos = end64pos; 1009 return end; 1010 } 1011 } 1012 } 1013 zerror("zip END header not found"); 1014 return null; //make compiler happy 1015 } 1016 1017 // Reads zip file central directory. Returns the file position of first 1018 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1019 // then the error was a zip format error and zip->msg has the error text. 1020 // Always pass in -1 for knownTotal; it's used for a recursive call. 1021 private byte[] initCEN() throws IOException { 1022 end = findEND(); 1023 if (end.endpos == 0) { 1024 inodes = new LinkedHashMap<>(10); 1025 locpos = 0; 1026 buildNodeTree(); 1027 return null; // only END header present 1028 } 1029 if (end.cenlen > end.endpos) 1030 zerror("invalid END header (bad central directory size)"); 1031 long cenpos = end.endpos - end.cenlen; // position of CEN table 1032 1033 // Get position of first local file (LOC) header, taking into 1034 // account that there may be a stub prefixed to the zip file. 1035 locpos = cenpos - end.cenoff; 1036 if (locpos < 0) 1037 zerror("invalid END header (bad central directory offset)"); 1038 1039 // read in the CEN and END 1040 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1041 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1042 zerror("read CEN tables failed"); 1043 } 1044 // Iterate through the entries in the central directory 1045 inodes = new LinkedHashMap<>(end.centot + 1); 1046 int pos = 0; 1047 int limit = cen.length - ENDHDR; 1048 while (pos < limit) { 1049 if (!cenSigAt(cen, pos)) 1050 zerror("invalid CEN header (bad signature)"); 1051 int method = CENHOW(cen, pos); 1052 int nlen = CENNAM(cen, pos); 1053 int elen = CENEXT(cen, pos); 1054 int clen = CENCOM(cen, pos); 1055 if ((CENFLG(cen, pos) & 1) != 0) { 1056 zerror("invalid CEN header (encrypted entry)"); 1057 } 1058 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1059 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1060 } 1061 if (pos + CENHDR + nlen > limit) { 1062 zerror("invalid CEN header (bad header size)"); 1063 } 1064 IndexNode inode = new IndexNode(cen, pos, nlen); 1065 inodes.put(inode, inode); 1066 1067 // skip ext and comment 1068 pos += (CENHDR + nlen + elen + clen); 1069 } 1070 if (pos + ENDHDR != cen.length) { 1071 zerror("invalid CEN header (bad header size)"); 1072 } 1073 buildNodeTree(); 1074 return cen; 1075 } 1076 1077 private void ensureOpen() throws IOException { 1078 if (!isOpen) 1079 throw new ClosedFileSystemException(); 1080 } 1081 1082 // Creates a new empty temporary file in the same directory as the 1083 // specified file. A variant of Files.createTempFile. 1084 private Path createTempFileInSameDirectoryAs(Path path) 1085 throws IOException 1086 { 1087 Path parent = path.toAbsolutePath().getParent(); 1088 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1089 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1090 tmppaths.add(tmpPath); 1091 return tmpPath; 1092 } 1093 1094 ////////////////////update & sync ////////////////////////////////////// 1095 1096 private boolean hasUpdate = false; 1097 1098 // shared key. consumer guarantees the "writeLock" before use it. 1099 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1100 1101 private void updateDelete(IndexNode inode) { 1102 beginWrite(); 1103 try { 1104 removeFromTree(inode); 1105 inodes.remove(inode); 1106 hasUpdate = true; 1107 } finally { 1108 endWrite(); 1109 } 1110 } 1111 1112 private void update(Entry e) { 1113 beginWrite(); 1114 try { 1115 IndexNode old = inodes.put(e, e); 1116 if (old != null) { 1117 removeFromTree(old); 1118 } 1119 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1120 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1121 e.sibling = parent.child; 1122 parent.child = e; 1123 } 1124 hasUpdate = true; 1125 } finally { 1126 endWrite(); 1127 } 1128 } 1129 1130 // copy over the whole LOC entry (header if necessary, data and ext) from 1131 // old zip to the new one. 1132 private long copyLOCEntry(Entry e, boolean updateHeader, 1133 OutputStream os, 1134 long written, byte[] buf) 1135 throws IOException 1136 { 1137 long locoff = e.locoff; // where to read 1138 e.locoff = written; // update the e.locoff with new value 1139 1140 // calculate the size need to write out 1141 long size = 0; 1142 // if there is A ext 1143 if ((e.flag & FLAG_DATADESCR) != 0) { 1144 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1145 size = 24; 1146 else 1147 size = 16; 1148 } 1149 // read loc, use the original loc.elen/nlen 1150 // 1151 // an extra byte after loc is read, which should be the first byte of the 1152 // 'name' field of the loc. if this byte is '/', which means the original 1153 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1154 // is used to output the loc, in which the leading "/" will be removed 1155 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1156 throw new ZipException("loc: reading failed"); 1157 1158 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1159 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1160 size += e.csize; 1161 written = e.writeLOC(os) + size; 1162 } else { 1163 os.write(buf, 0, LOCHDR); // write out the loc header 1164 locoff += LOCHDR; 1165 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1166 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1167 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1168 written = LOCHDR + size; 1169 } 1170 int n; 1171 while (size > 0 && 1172 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1173 { 1174 if (size < n) 1175 n = (int)size; 1176 os.write(buf, 0, n); 1177 size -= n; 1178 locoff += n; 1179 } 1180 return written; 1181 } 1182 1183 private long writeEntry(Entry e, OutputStream os, byte[] buf) 1184 throws IOException { 1185 1186 if (e.bytes == null && e.file == null) // dir, 0-length data 1187 return 0; 1188 1189 long written = 0; 1190 if (e.bytes != null && e.crc != 0) { 1191 // precompressed entry, write directly to output stream 1192 os.write(e.bytes, 0, e.bytes.length); 1193 } else { 1194 try (OutputStream os2 = e.method == METHOD_STORED ? 1195 new EntryOutputStreamCRC32(e, os) : new EntryOutputStreamDef(e, os)) { 1196 if (e.bytes != null) { // in-memory 1197 os2.write(e.bytes, 0, e.bytes.length); 1198 } else if (e.file != null) { // tmp file 1199 if (e.type == Entry.NEW || e.type == Entry.FILECH) { 1200 try (InputStream is = Files.newInputStream(e.file)) { 1201 is.transferTo(os2); 1202 } 1203 } 1204 Files.delete(e.file); 1205 tmppaths.remove(e.file); 1206 } 1207 } 1208 } 1209 written += e.csize; 1210 if ((e.flag & FLAG_DATADESCR) != 0) { 1211 written += e.writeEXT(os); 1212 } 1213 return written; 1214 } 1215 1216 // sync the zip file system, if there is any udpate 1217 private void sync() throws IOException { 1218 1219 if (!hasUpdate) 1220 return; 1221 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1222 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) 1223 { 1224 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1225 long written = 0; 1226 byte[] buf = new byte[8192]; 1227 Entry e = null; 1228 1229 // write loc 1230 for (IndexNode inode : inodes.values()) { 1231 if (inode instanceof Entry) { // an updated inode 1232 e = (Entry)inode; 1233 try { 1234 if (e.type == Entry.COPY) { 1235 // entry copy: the only thing changed is the "name" 1236 // and "nlen" in LOC header, so we udpate/rewrite the 1237 // LOC in new file and simply copy the rest (data and 1238 // ext) without enflating/deflating from the old zip 1239 // file LOC entry. 1240 written += copyLOCEntry(e, true, os, written, buf); 1241 } else { // NEW, FILECH or CEN 1242 e.locoff = written; 1243 written += e.writeLOC(os); // write loc header 1244 written += writeEntry(e, os, buf); 1245 } 1246 elist.add(e); 1247 } catch (IOException x) { 1248 x.printStackTrace(); // skip any in-accurate entry 1249 } 1250 } else { // unchanged inode 1251 if (inode.pos == -1) { 1252 continue; // pseudo directory node 1253 } 1254 if (inode.name.length == 1 && inode.name[0] == '/') { 1255 continue; // no root '/' directory even it 1256 // exits in original zip/jar file. 1257 } 1258 e = Entry.readCEN(this, inode); 1259 try { 1260 written += copyLOCEntry(e, false, os, written, buf); 1261 elist.add(e); 1262 } catch (IOException x) { 1263 x.printStackTrace(); // skip any wrong entry 1264 } 1265 } 1266 } 1267 1268 // now write back the cen and end table 1269 end.cenoff = written; 1270 for (Entry entry : elist) { 1271 written += entry.writeCEN(os); 1272 } 1273 end.centot = elist.size(); 1274 end.cenlen = written - end.cenoff; 1275 end.write(os, written, forceEnd64); 1276 } 1277 1278 ch.close(); 1279 Files.delete(zfpath); 1280 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1281 hasUpdate = false; // clear 1282 } 1283 1284 IndexNode getInode(byte[] path) { 1285 if (path == null) 1286 throw new NullPointerException("path"); 1287 return inodes.get(IndexNode.keyOf(path)); 1288 } 1289 1290 Entry getEntry(byte[] path) throws IOException { 1291 IndexNode inode = getInode(path); 1292 if (inode instanceof Entry) 1293 return (Entry)inode; 1294 if (inode == null || inode.pos == -1) 1295 return null; 1296 return Entry.readCEN(this, inode); 1297 } 1298 1299 public void deleteFile(byte[] path, boolean failIfNotExists) 1300 throws IOException 1301 { 1302 checkWritable(); 1303 1304 IndexNode inode = getInode(path); 1305 if (inode == null) { 1306 if (path != null && path.length == 0) 1307 throw new ZipException("root directory </> can't not be delete"); 1308 if (failIfNotExists) 1309 throw new NoSuchFileException(getString(path)); 1310 } else { 1311 if (inode.isDir() && inode.child != null) 1312 throw new DirectoryNotEmptyException(getString(path)); 1313 updateDelete(inode); 1314 } 1315 } 1316 1317 // Returns an out stream for either 1318 // (1) writing the contents of a new entry, if the entry exits, or 1319 // (2) updating/replacing the contents of the specified existing entry. 1320 private OutputStream getOutputStream(Entry e) throws IOException { 1321 1322 if (e.mtime == -1) 1323 e.mtime = System.currentTimeMillis(); 1324 if (e.method == -1) 1325 e.method = defaultMethod; 1326 // store size, compressed size, and crc-32 in datadescr 1327 e.flag = FLAG_DATADESCR; 1328 if (zc.isUTF8()) 1329 e.flag |= FLAG_USE_UTF8; 1330 OutputStream os; 1331 if (useTempFile) { 1332 e.file = getTempPathForEntry(null); 1333 os = Files.newOutputStream(e.file, WRITE); 1334 } else { 1335 if (defaultMethod == METHOD_DEFLATED) { 1336 return new DeflatingByteArrayEntryOutputStream(e, 1337 new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192)); 1338 } 1339 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1340 } 1341 return new EntryOutputStream(e, os); 1342 } 1343 1344 private class EntryOutputStream extends FilterOutputStream { 1345 private Entry e; 1346 private long written; 1347 private boolean isClosed; 1348 1349 EntryOutputStream(Entry e, OutputStream os) throws IOException { 1350 super(os); 1351 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1352 // this.written = 0; 1353 } 1354 1355 @Override 1356 public synchronized void write(int b) throws IOException { 1357 out.write(b); 1358 written += 1; 1359 } 1360 1361 @Override 1362 public synchronized void write(byte b[], int off, int len) 1363 throws IOException { 1364 out.write(b, off, len); 1365 written += len; 1366 } 1367 1368 @Override 1369 public synchronized void close() throws IOException { 1370 if (isClosed) { 1371 return; 1372 } 1373 isClosed = true; 1374 e.size = written; 1375 if (out instanceof ByteArrayOutputStream) 1376 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1377 super.close(); 1378 update(e); 1379 } 1380 } 1381 1382 // Output stream returned when writing "deflated" entries into memory, 1383 // to enable eager (possibly parallel) deflation and reduce memory required. 1384 private class DeflatingByteArrayEntryOutputStream extends DeflaterOutputStream { 1385 private Entry e; 1386 private CRC32 crc; 1387 private boolean isClosed; 1388 1389 DeflatingByteArrayEntryOutputStream(Entry e, ByteArrayOutputStream os) throws IOException { 1390 super(os, getDeflater()); 1391 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1392 this.crc = new CRC32(); 1393 } 1394 1395 @Override 1396 public synchronized void write(int b) throws IOException { 1397 super.write(b); 1398 crc.update(b); 1399 } 1400 1401 @Override 1402 public synchronized void write(byte b[], int off, int len) 1403 throws IOException { 1404 super.write(b, off, len); 1405 crc.update(b, off, len); 1406 } 1407 1408 @Override 1409 public synchronized void close() throws IOException { 1410 if (isClosed) 1411 return; 1412 isClosed = true; 1413 finish(); 1414 e.size = def.getBytesRead(); 1415 e.csize = def.getBytesWritten(); 1416 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1417 e.crc = crc.getValue(); 1418 releaseDeflater(def); 1419 update(e); 1420 } 1421 } 1422 1423 // Wrapper output stream class to write out a "stored" entry. 1424 // (1) this class does not close the underlying out stream when 1425 // being closed. 1426 // (2) no need to be "synchronized", only used by sync() 1427 private class EntryOutputStreamCRC32 extends FilterOutputStream { 1428 private Entry e; 1429 private CRC32 crc; 1430 private long written; 1431 private boolean isClosed; 1432 1433 EntryOutputStreamCRC32(Entry e, OutputStream os) throws IOException { 1434 super(os); 1435 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1436 this.crc = new CRC32(); 1437 } 1438 1439 @Override 1440 public void write(int b) throws IOException { 1441 out.write(b); 1442 crc.update(b); 1443 written += 1; 1444 } 1445 1446 @Override 1447 public void write(byte b[], int off, int len) 1448 throws IOException { 1449 out.write(b, off, len); 1450 crc.update(b, off, len); 1451 written += len; 1452 } 1453 1454 @Override 1455 public void close() throws IOException { 1456 if (isClosed) 1457 return; 1458 isClosed = true; 1459 e.size = e.csize = written; 1460 e.size = crc.getValue(); 1461 } 1462 } 1463 1464 // Wrapper output stream class to write out a "deflated" entry. 1465 // (1) this class does not close the underlying out stream when 1466 // being closed. 1467 // (2) no need to be "synchronized", only used by sync() 1468 private class EntryOutputStreamDef extends DeflaterOutputStream { 1469 private CRC32 crc; 1470 private Entry e; 1471 private boolean isClosed; 1472 1473 EntryOutputStreamDef(Entry e, OutputStream os) throws IOException { 1474 super(os, getDeflater()); 1475 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1476 this.crc = new CRC32(); 1477 } 1478 1479 @Override 1480 public void write(byte b[], int off, int len) 1481 throws IOException { 1482 super.write(b, off, len); 1483 crc.update(b, off, len); 1484 } 1485 1486 @Override 1487 public void close() throws IOException { 1488 if (isClosed) 1489 return; 1490 isClosed = true; 1491 finish(); 1492 e.size = def.getBytesRead(); 1493 e.csize = def.getBytesWritten(); 1494 e.crc = crc.getValue(); 1495 } 1496 } 1497 1498 private InputStream getInputStream(Entry e) 1499 throws IOException 1500 { 1501 InputStream eis = null; 1502 1503 if (e.type == Entry.NEW) { 1504 // now bytes & file is uncompressed. 1505 if (e.bytes != null) 1506 return new ByteArrayInputStream(e.bytes); 1507 else if (e.file != null) 1508 return Files.newInputStream(e.file); 1509 else 1510 throw new ZipException("update entry data is missing"); 1511 } else if (e.type == Entry.FILECH) { 1512 // FILECH result is un-compressed. 1513 eis = Files.newInputStream(e.file); 1514 // TBD: wrap to hook close() 1515 // streams.add(eis); 1516 return eis; 1517 } else { // untouced CEN or COPY 1518 eis = new EntryInputStream(e, ch); 1519 } 1520 if (e.method == METHOD_DEFLATED) { 1521 // MORE: Compute good size for inflater stream: 1522 long bufSize = e.size + 2; // Inflater likes a bit of slack 1523 if (bufSize > 65536) 1524 bufSize = 8192; 1525 final long size = e.size; 1526 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1527 private boolean isClosed = false; 1528 public void close() throws IOException { 1529 if (!isClosed) { 1530 releaseInflater(inf); 1531 this.in.close(); 1532 isClosed = true; 1533 streams.remove(this); 1534 } 1535 } 1536 // Override fill() method to provide an extra "dummy" byte 1537 // at the end of the input stream. This is required when 1538 // using the "nowrap" Inflater option. (it appears the new 1539 // zlib in 7 does not need it, but keep it for now) 1540 protected void fill() throws IOException { 1541 if (eof) { 1542 throw new EOFException( 1543 "Unexpected end of ZLIB input stream"); 1544 } 1545 len = this.in.read(buf, 0, buf.length); 1546 if (len == -1) { 1547 buf[0] = 0; 1548 len = 1; 1549 eof = true; 1550 } 1551 inf.setInput(buf, 0, len); 1552 } 1553 private boolean eof; 1554 1555 public int available() throws IOException { 1556 if (isClosed) 1557 return 0; 1558 long avail = size - inf.getBytesWritten(); 1559 return avail > (long) Integer.MAX_VALUE ? 1560 Integer.MAX_VALUE : (int) avail; 1561 } 1562 }; 1563 } else if (e.method == METHOD_STORED) { 1564 // TBD: wrap/ it does not seem necessary 1565 } else { 1566 throw new ZipException("invalid compression method"); 1567 } 1568 streams.add(eis); 1569 return eis; 1570 } 1571 1572 // Inner class implementing the input stream used to read 1573 // a (possibly compressed) zip file entry. 1574 private class EntryInputStream extends InputStream { 1575 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1576 // point to a new channel after sync() 1577 private long pos; // current position within entry data 1578 protected long rem; // number of remaining bytes within entry 1579 protected final long size; // uncompressed size of this entry 1580 1581 EntryInputStream(Entry e, SeekableByteChannel zfch) 1582 throws IOException 1583 { 1584 this.zfch = zfch; 1585 rem = e.csize; 1586 size = e.size; 1587 pos = e.locoff; 1588 if (pos == -1) { 1589 Entry e2 = getEntry(e.name); 1590 if (e2 == null) { 1591 throw new ZipException("invalid loc for entry <" + e.name + ">"); 1592 } 1593 pos = e2.locoff; 1594 } 1595 pos = -pos; // lazy initialize the real data offset 1596 } 1597 1598 public int read(byte b[], int off, int len) throws IOException { 1599 ensureOpen(); 1600 initDataPos(); 1601 if (rem == 0) { 1602 return -1; 1603 } 1604 if (len <= 0) { 1605 return 0; 1606 } 1607 if (len > rem) { 1608 len = (int) rem; 1609 } 1610 // readFullyAt() 1611 long n = 0; 1612 ByteBuffer bb = ByteBuffer.wrap(b); 1613 bb.position(off); 1614 bb.limit(off + len); 1615 synchronized(zfch) { 1616 n = zfch.position(pos).read(bb); 1617 } 1618 if (n > 0) { 1619 pos += n; 1620 rem -= n; 1621 } 1622 if (rem == 0) { 1623 close(); 1624 } 1625 return (int)n; 1626 } 1627 1628 public int read() throws IOException { 1629 byte[] b = new byte[1]; 1630 if (read(b, 0, 1) == 1) { 1631 return b[0] & 0xff; 1632 } else { 1633 return -1; 1634 } 1635 } 1636 1637 public long skip(long n) throws IOException { 1638 ensureOpen(); 1639 if (n > rem) 1640 n = rem; 1641 pos += n; 1642 rem -= n; 1643 if (rem == 0) { 1644 close(); 1645 } 1646 return n; 1647 } 1648 1649 public int available() { 1650 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1651 } 1652 1653 public long size() { 1654 return size; 1655 } 1656 1657 public void close() { 1658 rem = 0; 1659 streams.remove(this); 1660 } 1661 1662 private void initDataPos() throws IOException { 1663 if (pos <= 0) { 1664 pos = -pos + locpos; 1665 byte[] buf = new byte[LOCHDR]; 1666 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 1667 throw new ZipException("invalid loc " + pos + " for entry reading"); 1668 } 1669 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 1670 } 1671 } 1672 } 1673 1674 static void zerror(String msg) throws ZipException { 1675 throw new ZipException(msg); 1676 } 1677 1678 // Maxmum number of de/inflater we cache 1679 private final int MAX_FLATER = 20; 1680 // List of available Inflater objects for decompression 1681 private final List<Inflater> inflaters = new ArrayList<>(); 1682 1683 // Gets an inflater from the list of available inflaters or allocates 1684 // a new one. 1685 private Inflater getInflater() { 1686 synchronized (inflaters) { 1687 int size = inflaters.size(); 1688 if (size > 0) { 1689 Inflater inf = inflaters.remove(size - 1); 1690 return inf; 1691 } else { 1692 return new Inflater(true); 1693 } 1694 } 1695 } 1696 1697 // Releases the specified inflater to the list of available inflaters. 1698 private void releaseInflater(Inflater inf) { 1699 synchronized (inflaters) { 1700 if (inflaters.size() < MAX_FLATER) { 1701 inf.reset(); 1702 inflaters.add(inf); 1703 } else { 1704 inf.end(); 1705 } 1706 } 1707 } 1708 1709 // List of available Deflater objects for compression 1710 private final List<Deflater> deflaters = new ArrayList<>(); 1711 1712 // Gets an deflater from the list of available deflaters or allocates 1713 // a new one. 1714 private Deflater getDeflater() { 1715 synchronized (deflaters) { 1716 int size = deflaters.size(); 1717 if (size > 0) { 1718 Deflater def = deflaters.remove(size - 1); 1719 return def; 1720 } else { 1721 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1722 } 1723 } 1724 } 1725 1726 // Releases the specified inflater to the list of available inflaters. 1727 private void releaseDeflater(Deflater def) { 1728 synchronized (deflaters) { 1729 if (inflaters.size() < MAX_FLATER) { 1730 def.reset(); 1731 deflaters.add(def); 1732 } else { 1733 def.end(); 1734 } 1735 } 1736 } 1737 1738 // End of central directory record 1739 static class END { 1740 // these 2 fields are not used by anyone and write() uses "0" 1741 // int disknum; 1742 // int sdisknum; 1743 int endsub; // endsub 1744 int centot; // 4 bytes 1745 long cenlen; // 4 bytes 1746 long cenoff; // 4 bytes 1747 int comlen; // comment length 1748 byte[] comment; 1749 1750 /* members of Zip64 end of central directory locator */ 1751 // int diskNum; 1752 long endpos; 1753 // int disktot; 1754 1755 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 1756 boolean hasZip64 = forceEnd64; // false; 1757 long xlen = cenlen; 1758 long xoff = cenoff; 1759 if (xlen >= ZIP64_MINVAL) { 1760 xlen = ZIP64_MINVAL; 1761 hasZip64 = true; 1762 } 1763 if (xoff >= ZIP64_MINVAL) { 1764 xoff = ZIP64_MINVAL; 1765 hasZip64 = true; 1766 } 1767 int count = centot; 1768 if (count >= ZIP64_MINVAL32) { 1769 count = ZIP64_MINVAL32; 1770 hasZip64 = true; 1771 } 1772 if (hasZip64) { 1773 long off64 = offset; 1774 //zip64 end of central directory record 1775 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1776 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1777 writeShort(os, 45); // version made by 1778 writeShort(os, 45); // version needed to extract 1779 writeInt(os, 0); // number of this disk 1780 writeInt(os, 0); // central directory start disk 1781 writeLong(os, centot); // number of directory entries on disk 1782 writeLong(os, centot); // number of directory entries 1783 writeLong(os, cenlen); // length of central directory 1784 writeLong(os, cenoff); // offset of central directory 1785 1786 //zip64 end of central directory locator 1787 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1788 writeInt(os, 0); // zip64 END start disk 1789 writeLong(os, off64); // offset of zip64 END 1790 writeInt(os, 1); // total number of disks (?) 1791 } 1792 writeInt(os, ENDSIG); // END record signature 1793 writeShort(os, 0); // number of this disk 1794 writeShort(os, 0); // central directory start disk 1795 writeShort(os, count); // number of directory entries on disk 1796 writeShort(os, count); // total number of directory entries 1797 writeInt(os, xlen); // length of central directory 1798 writeInt(os, xoff); // offset of central directory 1799 if (comment != null) { // zip file comment 1800 writeShort(os, comment.length); 1801 writeBytes(os, comment); 1802 } else { 1803 writeShort(os, 0); 1804 } 1805 } 1806 } 1807 1808 // Internal node that links a "name" to its pos in cen table. 1809 // The node itself can be used as a "key" to lookup itself in 1810 // the HashMap inodes. 1811 static class IndexNode { 1812 byte[] name; 1813 int hashcode; // node is hashable/hashed by its name 1814 int pos = -1; // position in cen table, -1 menas the 1815 // entry does not exists in zip file 1816 boolean isdir; 1817 1818 IndexNode(byte[] name, boolean isdir) { 1819 name(name); 1820 this.isdir = isdir; 1821 this.pos = -1; 1822 } 1823 1824 IndexNode(byte[] name, int pos) { 1825 name(name); 1826 this.pos = pos; 1827 } 1828 1829 // constructor for cenInit() (1) remove tailing '/' (2) pad leading '/' 1830 IndexNode(byte[] cen, int pos, int nlen) { 1831 int noff = pos + CENHDR; 1832 if (cen[noff + nlen - 1] == '/') { 1833 isdir = true; 1834 nlen--; 1835 } 1836 if (nlen > 0 && cen[noff] == '/') { 1837 name = Arrays.copyOfRange(cen, noff, noff + nlen); 1838 } else { 1839 name = new byte[nlen + 1]; 1840 System.arraycopy(cen, noff, name, 1, nlen); 1841 name[0] = '/'; 1842 } 1843 name(name); 1844 this.pos = pos; 1845 } 1846 1847 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 1848 1849 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1850 IndexNode key = cachedKey.get(); 1851 if (key == null) { 1852 key = new IndexNode(name, -1); 1853 cachedKey.set(key); 1854 } 1855 return key.as(name); 1856 } 1857 1858 final void name(byte[] name) { 1859 this.name = name; 1860 this.hashcode = Arrays.hashCode(name); 1861 } 1862 1863 final IndexNode as(byte[] name) { // reuse the node, mostly 1864 name(name); // as a lookup "key" 1865 return this; 1866 } 1867 1868 boolean isDir() { 1869 return isdir; 1870 } 1871 1872 public boolean equals(Object other) { 1873 if (!(other instanceof IndexNode)) { 1874 return false; 1875 } 1876 if (other instanceof ParentLookup) { 1877 return ((ParentLookup)other).equals(this); 1878 } 1879 return Arrays.equals(name, ((IndexNode)other).name); 1880 } 1881 1882 public int hashCode() { 1883 return hashcode; 1884 } 1885 1886 IndexNode() {} 1887 IndexNode sibling; 1888 IndexNode child; // 1st child 1889 } 1890 1891 static class Entry extends IndexNode implements ZipFileAttributes { 1892 1893 static final int CEN = 1; // entry read from cen 1894 static final int NEW = 2; // updated contents in bytes or file 1895 static final int FILECH = 3; // fch update in "file" 1896 static final int COPY = 4; // copy of a CEN entry 1897 1898 byte[] bytes; // updated content bytes 1899 Path file; // use tmp file to store bytes; 1900 int type = CEN; // default is the entry read from cen 1901 1902 // entry attributes 1903 int version; 1904 int flag; 1905 int method = -1; // compression method 1906 long mtime = -1; // last modification time (in DOS time) 1907 long atime = -1; // last access time 1908 long ctime = -1; // create time 1909 long crc = -1; // crc-32 of entry data 1910 long csize = -1; // compressed size of entry data 1911 long size = -1; // uncompressed size of entry data 1912 byte[] extra; 1913 1914 // cen 1915 1916 // these fields are not used by anyone and writeCEN uses "0" 1917 // int versionMade; 1918 // int disk; 1919 // int attrs; 1920 // long attrsEx; 1921 long locoff; 1922 byte[] comment; 1923 1924 Entry() {} 1925 1926 Entry(byte[] name, boolean isdir, int method) { 1927 name(name); 1928 this.isdir = isdir; 1929 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 1930 this.crc = 0; 1931 this.size = 0; 1932 this.csize = 0; 1933 this.method = method; 1934 } 1935 1936 Entry(byte[] name, int type, boolean isdir, int method) { 1937 this(name, isdir, method); 1938 this.type = type; 1939 } 1940 1941 Entry (Entry e, int type) { 1942 name(e.name); 1943 this.isdir = e.isdir; 1944 this.version = e.version; 1945 this.ctime = e.ctime; 1946 this.atime = e.atime; 1947 this.mtime = e.mtime; 1948 this.crc = e.crc; 1949 this.size = e.size; 1950 this.csize = e.csize; 1951 this.method = e.method; 1952 this.extra = e.extra; 1953 /* 1954 this.versionMade = e.versionMade; 1955 this.disk = e.disk; 1956 this.attrs = e.attrs; 1957 this.attrsEx = e.attrsEx; 1958 */ 1959 this.locoff = e.locoff; 1960 this.comment = e.comment; 1961 this.type = type; 1962 } 1963 1964 Entry (byte[] name, Path file, int type) { 1965 this(name, type, false, METHOD_STORED); 1966 this.file = file; 1967 } 1968 1969 int version() throws ZipException { 1970 if (method == METHOD_DEFLATED) 1971 return 20; 1972 else if (method == METHOD_STORED) 1973 return 10; 1974 throw new ZipException("unsupported compression method"); 1975 } 1976 1977 ///////////////////// CEN ////////////////////// 1978 static Entry readCEN(ZipFileSystem zipfs, IndexNode inode) 1979 throws IOException 1980 { 1981 return new Entry().cen(zipfs, inode); 1982 } 1983 1984 private Entry cen(ZipFileSystem zipfs, IndexNode inode) 1985 throws IOException 1986 { 1987 byte[] cen = zipfs.cen; 1988 int pos = inode.pos; 1989 if (!cenSigAt(cen, pos)) 1990 zerror("invalid CEN header (bad signature)"); 1991 version = CENVER(cen, pos); 1992 flag = CENFLG(cen, pos); 1993 method = CENHOW(cen, pos); 1994 mtime = dosToJavaTime(CENTIM(cen, pos)); 1995 crc = CENCRC(cen, pos); 1996 csize = CENSIZ(cen, pos); 1997 size = CENLEN(cen, pos); 1998 int nlen = CENNAM(cen, pos); 1999 int elen = CENEXT(cen, pos); 2000 int clen = CENCOM(cen, pos); 2001 /* 2002 versionMade = CENVEM(cen, pos); 2003 disk = CENDSK(cen, pos); 2004 attrs = CENATT(cen, pos); 2005 attrsEx = CENATX(cen, pos); 2006 */ 2007 locoff = CENOFF(cen, pos); 2008 pos += CENHDR; 2009 this.name = inode.name; 2010 this.isdir = inode.isdir; 2011 this.hashcode = inode.hashcode; 2012 2013 pos += nlen; 2014 if (elen > 0) { 2015 extra = Arrays.copyOfRange(cen, pos, pos + elen); 2016 pos += elen; 2017 readExtra(zipfs); 2018 } 2019 if (clen > 0) { 2020 comment = Arrays.copyOfRange(cen, pos, pos + clen); 2021 } 2022 return this; 2023 } 2024 2025 int writeCEN(OutputStream os) throws IOException 2026 { 2027 int written = CENHDR; 2028 int version0 = version(); 2029 long csize0 = csize; 2030 long size0 = size; 2031 long locoff0 = locoff; 2032 int elen64 = 0; // extra for ZIP64 2033 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 2034 int elenEXTT = 0; // extra for Extended Timestamp 2035 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 2036 2037 byte[] zname = isdir ? toDirectoryPath(name) : name; 2038 2039 // confirm size/length 2040 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 2041 int elen = (extra != null) ? extra.length : 0; 2042 int eoff = 0; 2043 int clen = (comment != null) ? comment.length : 0; 2044 if (csize >= ZIP64_MINVAL) { 2045 csize0 = ZIP64_MINVAL; 2046 elen64 += 8; // csize(8) 2047 } 2048 if (size >= ZIP64_MINVAL) { 2049 size0 = ZIP64_MINVAL; // size(8) 2050 elen64 += 8; 2051 } 2052 if (locoff >= ZIP64_MINVAL) { 2053 locoff0 = ZIP64_MINVAL; 2054 elen64 += 8; // offset(8) 2055 } 2056 if (elen64 != 0) { 2057 elen64 += 4; // header and data sz 4 bytes 2058 } 2059 while (eoff + 4 < elen) { 2060 int tag = SH(extra, eoff); 2061 int sz = SH(extra, eoff + 2); 2062 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2063 foundExtraTime = true; 2064 } 2065 eoff += (4 + sz); 2066 } 2067 if (!foundExtraTime) { 2068 if (isWindows) { // use NTFS 2069 elenNTFS = 36; // total 36 bytes 2070 } else { // Extended Timestamp otherwise 2071 elenEXTT = 9; // only mtime in cen 2072 } 2073 } 2074 writeInt(os, CENSIG); // CEN header signature 2075 if (elen64 != 0) { 2076 writeShort(os, 45); // ver 4.5 for zip64 2077 writeShort(os, 45); 2078 } else { 2079 writeShort(os, version0); // version made by 2080 writeShort(os, version0); // version needed to extract 2081 } 2082 writeShort(os, flag); // general purpose bit flag 2083 writeShort(os, method); // compression method 2084 // last modification time 2085 writeInt(os, (int)javaToDosTime(mtime)); 2086 writeInt(os, crc); // crc-32 2087 writeInt(os, csize0); // compressed size 2088 writeInt(os, size0); // uncompressed size 2089 writeShort(os, nlen); 2090 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2091 2092 if (comment != null) { 2093 writeShort(os, Math.min(clen, 0xffff)); 2094 } else { 2095 writeShort(os, 0); 2096 } 2097 writeShort(os, 0); // starting disk number 2098 writeShort(os, 0); // internal file attributes (unused) 2099 writeInt(os, 0); // external file attributes (unused) 2100 writeInt(os, locoff0); // relative offset of local header 2101 writeBytes(os, zname, 1, nlen); 2102 if (elen64 != 0) { 2103 writeShort(os, EXTID_ZIP64);// Zip64 extra 2104 writeShort(os, elen64 - 4); // size of "this" extra block 2105 if (size0 == ZIP64_MINVAL) 2106 writeLong(os, size); 2107 if (csize0 == ZIP64_MINVAL) 2108 writeLong(os, csize); 2109 if (locoff0 == ZIP64_MINVAL) 2110 writeLong(os, locoff); 2111 } 2112 if (elenNTFS != 0) { 2113 writeShort(os, EXTID_NTFS); 2114 writeShort(os, elenNTFS - 4); 2115 writeInt(os, 0); // reserved 2116 writeShort(os, 0x0001); // NTFS attr tag 2117 writeShort(os, 24); 2118 writeLong(os, javaToWinTime(mtime)); 2119 writeLong(os, javaToWinTime(atime)); 2120 writeLong(os, javaToWinTime(ctime)); 2121 } 2122 if (elenEXTT != 0) { 2123 writeShort(os, EXTID_EXTT); 2124 writeShort(os, elenEXTT - 4); 2125 if (ctime == -1) 2126 os.write(0x3); // mtime and atime 2127 else 2128 os.write(0x7); // mtime, atime and ctime 2129 writeInt(os, javaToUnixTime(mtime)); 2130 } 2131 if (extra != null) // whatever not recognized 2132 writeBytes(os, extra); 2133 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2134 writeBytes(os, comment); 2135 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2136 } 2137 2138 ///////////////////// LOC ////////////////////// 2139 2140 int writeLOC(OutputStream os) throws IOException { 2141 writeInt(os, LOCSIG); // LOC header signature 2142 int version = version(); 2143 2144 byte[] zname = isdir ? toDirectoryPath(name) : name; 2145 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2146 int elen = (extra != null) ? extra.length : 0; 2147 boolean foundExtraTime = false; // if extra timestamp present 2148 int eoff = 0; 2149 int elen64 = 0; 2150 int elenEXTT = 0; 2151 int elenNTFS = 0; 2152 if ((flag & FLAG_DATADESCR) != 0) { 2153 writeShort(os, version()); // version needed to extract 2154 writeShort(os, flag); // general purpose bit flag 2155 writeShort(os, method); // compression method 2156 // last modification time 2157 writeInt(os, (int)javaToDosTime(mtime)); 2158 // store size, uncompressed size, and crc-32 in data descriptor 2159 // immediately following compressed entry data 2160 writeInt(os, 0); 2161 writeInt(os, 0); 2162 writeInt(os, 0); 2163 } else { 2164 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2165 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2166 writeShort(os, 45); // ver 4.5 for zip64 2167 } else { 2168 writeShort(os, version()); // version needed to extract 2169 } 2170 writeShort(os, flag); // general purpose bit flag 2171 writeShort(os, method); // compression method 2172 // last modification time 2173 writeInt(os, (int)javaToDosTime(mtime)); 2174 writeInt(os, crc); // crc-32 2175 if (elen64 != 0) { 2176 writeInt(os, ZIP64_MINVAL); 2177 writeInt(os, ZIP64_MINVAL); 2178 } else { 2179 writeInt(os, csize); // compressed size 2180 writeInt(os, size); // uncompressed size 2181 } 2182 } 2183 while (eoff + 4 < elen) { 2184 int tag = SH(extra, eoff); 2185 int sz = SH(extra, eoff + 2); 2186 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2187 foundExtraTime = true; 2188 } 2189 eoff += (4 + sz); 2190 } 2191 if (!foundExtraTime) { 2192 if (isWindows) { 2193 elenNTFS = 36; // NTFS, total 36 bytes 2194 } else { // on unix use "ext time" 2195 elenEXTT = 9; 2196 if (atime != -1) 2197 elenEXTT += 4; 2198 if (ctime != -1) 2199 elenEXTT += 4; 2200 } 2201 } 2202 writeShort(os, nlen); 2203 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2204 writeBytes(os, zname, 1, nlen); 2205 if (elen64 != 0) { 2206 writeShort(os, EXTID_ZIP64); 2207 writeShort(os, 16); 2208 writeLong(os, size); 2209 writeLong(os, csize); 2210 } 2211 if (elenNTFS != 0) { 2212 writeShort(os, EXTID_NTFS); 2213 writeShort(os, elenNTFS - 4); 2214 writeInt(os, 0); // reserved 2215 writeShort(os, 0x0001); // NTFS attr tag 2216 writeShort(os, 24); 2217 writeLong(os, javaToWinTime(mtime)); 2218 writeLong(os, javaToWinTime(atime)); 2219 writeLong(os, javaToWinTime(ctime)); 2220 } 2221 if (elenEXTT != 0) { 2222 writeShort(os, EXTID_EXTT); 2223 writeShort(os, elenEXTT - 4);// size for the folowing data block 2224 int fbyte = 0x1; 2225 if (atime != -1) // mtime and atime 2226 fbyte |= 0x2; 2227 if (ctime != -1) // mtime, atime and ctime 2228 fbyte |= 0x4; 2229 os.write(fbyte); // flags byte 2230 writeInt(os, javaToUnixTime(mtime)); 2231 if (atime != -1) 2232 writeInt(os, javaToUnixTime(atime)); 2233 if (ctime != -1) 2234 writeInt(os, javaToUnixTime(ctime)); 2235 } 2236 if (extra != null) { 2237 writeBytes(os, extra); 2238 } 2239 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2240 } 2241 2242 // Data Descriptior 2243 int writeEXT(OutputStream os) throws IOException { 2244 writeInt(os, EXTSIG); // EXT header signature 2245 writeInt(os, crc); // crc-32 2246 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2247 writeLong(os, csize); 2248 writeLong(os, size); 2249 return 24; 2250 } else { 2251 writeInt(os, csize); // compressed size 2252 writeInt(os, size); // uncompressed size 2253 return 16; 2254 } 2255 } 2256 2257 // read NTFS, UNIX and ZIP64 data from cen.extra 2258 void readExtra(ZipFileSystem zipfs) throws IOException { 2259 if (extra == null) 2260 return; 2261 int elen = extra.length; 2262 int off = 0; 2263 int newOff = 0; 2264 while (off + 4 < elen) { 2265 // extra spec: HeaderID+DataSize+Data 2266 int pos = off; 2267 int tag = SH(extra, pos); 2268 int sz = SH(extra, pos + 2); 2269 pos += 4; 2270 if (pos + sz > elen) // invalid data 2271 break; 2272 switch (tag) { 2273 case EXTID_ZIP64 : 2274 if (size == ZIP64_MINVAL) { 2275 if (pos + 8 > elen) // invalid zip64 extra 2276 break; // fields, just skip 2277 size = LL(extra, pos); 2278 pos += 8; 2279 } 2280 if (csize == ZIP64_MINVAL) { 2281 if (pos + 8 > elen) 2282 break; 2283 csize = LL(extra, pos); 2284 pos += 8; 2285 } 2286 if (locoff == ZIP64_MINVAL) { 2287 if (pos + 8 > elen) 2288 break; 2289 locoff = LL(extra, pos); 2290 pos += 8; 2291 } 2292 break; 2293 case EXTID_NTFS: 2294 if (sz < 32) 2295 break; 2296 pos += 4; // reserved 4 bytes 2297 if (SH(extra, pos) != 0x0001) 2298 break; 2299 if (SH(extra, pos + 2) != 24) 2300 break; 2301 // override the loc field, datatime here is 2302 // more "accurate" 2303 mtime = winToJavaTime(LL(extra, pos + 4)); 2304 atime = winToJavaTime(LL(extra, pos + 12)); 2305 ctime = winToJavaTime(LL(extra, pos + 20)); 2306 break; 2307 case EXTID_EXTT: 2308 // spec says the Extened timestamp in cen only has mtime 2309 // need to read the loc to get the extra a/ctime, if flag 2310 // "zipinfo-time" is not specified to false; 2311 // there is performance cost (move up to loc and read) to 2312 // access the loc table foreach entry; 2313 if (zipfs.noExtt) { 2314 if (sz == 5) 2315 mtime = unixToJavaTime(LG(extra, pos + 1)); 2316 break; 2317 } 2318 byte[] buf = new byte[LOCHDR]; 2319 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2320 != buf.length) 2321 throw new ZipException("loc: reading failed"); 2322 if (!locSigAt(buf, 0)) 2323 throw new ZipException("loc: wrong sig ->" 2324 + Long.toString(getSig(buf, 0), 16)); 2325 int locElen = LOCEXT(buf); 2326 if (locElen < 9) // EXTT is at lease 9 bytes 2327 break; 2328 int locNlen = LOCNAM(buf); 2329 buf = new byte[locElen]; 2330 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2331 != buf.length) 2332 throw new ZipException("loc extra: reading failed"); 2333 int locPos = 0; 2334 while (locPos + 4 < buf.length) { 2335 int locTag = SH(buf, locPos); 2336 int locSZ = SH(buf, locPos + 2); 2337 locPos += 4; 2338 if (locTag != EXTID_EXTT) { 2339 locPos += locSZ; 2340 continue; 2341 } 2342 int end = locPos + locSZ - 4; 2343 int flag = CH(buf, locPos++); 2344 if ((flag & 0x1) != 0 && locPos <= end) { 2345 mtime = unixToJavaTime(LG(buf, locPos)); 2346 locPos += 4; 2347 } 2348 if ((flag & 0x2) != 0 && locPos <= end) { 2349 atime = unixToJavaTime(LG(buf, locPos)); 2350 locPos += 4; 2351 } 2352 if ((flag & 0x4) != 0 && locPos <= end) { 2353 ctime = unixToJavaTime(LG(buf, locPos)); 2354 locPos += 4; 2355 } 2356 break; 2357 } 2358 break; 2359 default: // unknown tag 2360 System.arraycopy(extra, off, extra, newOff, sz + 4); 2361 newOff += (sz + 4); 2362 } 2363 off += (sz + 4); 2364 } 2365 if (newOff != 0 && newOff != extra.length) 2366 extra = Arrays.copyOf(extra, newOff); 2367 else 2368 extra = null; 2369 } 2370 2371 ///////// basic file attributes /////////// 2372 @Override 2373 public FileTime creationTime() { 2374 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 2375 } 2376 2377 @Override 2378 public boolean isDirectory() { 2379 return isDir(); 2380 } 2381 2382 @Override 2383 public boolean isOther() { 2384 return false; 2385 } 2386 2387 @Override 2388 public boolean isRegularFile() { 2389 return !isDir(); 2390 } 2391 2392 @Override 2393 public FileTime lastAccessTime() { 2394 return FileTime.fromMillis(atime == -1 ? mtime : atime); 2395 } 2396 2397 @Override 2398 public FileTime lastModifiedTime() { 2399 return FileTime.fromMillis(mtime); 2400 } 2401 2402 @Override 2403 public long size() { 2404 return size; 2405 } 2406 2407 @Override 2408 public boolean isSymbolicLink() { 2409 return false; 2410 } 2411 2412 @Override 2413 public Object fileKey() { 2414 return null; 2415 } 2416 2417 ///////// zip entry attributes /////////// 2418 public long compressedSize() { 2419 return csize; 2420 } 2421 2422 public long crc() { 2423 return crc; 2424 } 2425 2426 public int method() { 2427 return method; 2428 } 2429 2430 public byte[] extra() { 2431 if (extra != null) 2432 return Arrays.copyOf(extra, extra.length); 2433 return null; 2434 } 2435 2436 public byte[] comment() { 2437 if (comment != null) 2438 return Arrays.copyOf(comment, comment.length); 2439 return null; 2440 } 2441 2442 public String toString() { 2443 StringBuilder sb = new StringBuilder(1024); 2444 Formatter fm = new Formatter(sb); 2445 fm.format(" name : %s%n", new String(name)); 2446 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2447 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2448 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2449 fm.format(" isRegularFile : %b%n", isRegularFile()); 2450 fm.format(" isDirectory : %b%n", isDirectory()); 2451 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2452 fm.format(" isOther : %b%n", isOther()); 2453 fm.format(" fileKey : %s%n", fileKey()); 2454 fm.format(" size : %d%n", size()); 2455 fm.format(" compressedSize : %d%n", compressedSize()); 2456 fm.format(" crc : %x%n", crc()); 2457 fm.format(" method : %d%n", method()); 2458 fm.close(); 2459 return sb.toString(); 2460 } 2461 } 2462 2463 // ZIP directory has two issues: 2464 // (1) ZIP spec does not require the ZIP file to include 2465 // directory entry 2466 // (2) all entries are not stored/organized in a "tree" 2467 // structure. 2468 // A possible solution is to build the node tree ourself as 2469 // implemented below. 2470 private IndexNode root; 2471 2472 // default time stamp for pseudo entries 2473 private long zfsDefaultTimeStamp = System.currentTimeMillis(); 2474 2475 private void removeFromTree(IndexNode inode) { 2476 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2477 IndexNode child = parent.child; 2478 if (child.equals(inode)) { 2479 parent.child = child.sibling; 2480 } else { 2481 IndexNode last = child; 2482 while ((child = child.sibling) != null) { 2483 if (child.equals(inode)) { 2484 last.sibling = child.sibling; 2485 break; 2486 } else { 2487 last = child; 2488 } 2489 } 2490 } 2491 } 2492 2493 // purely for parent lookup, so we don't have to copy the parent 2494 // name every time 2495 static class ParentLookup extends IndexNode { 2496 int len; 2497 ParentLookup() {} 2498 2499 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 2500 name(name, len); 2501 return this; 2502 } 2503 2504 void name(byte[] name, int len) { 2505 this.name = name; 2506 this.len = len; 2507 // calculate the hashcode the same way as Arrays.hashCode() does 2508 int result = 1; 2509 for (int i = 0; i < len; i++) 2510 result = 31 * result + name[i]; 2511 this.hashcode = result; 2512 } 2513 2514 @Override 2515 public boolean equals(Object other) { 2516 if (!(other instanceof IndexNode)) { 2517 return false; 2518 } 2519 byte[] oname = ((IndexNode)other).name; 2520 return Arrays.equals(name, 0, len, 2521 oname, 0, oname.length); 2522 } 2523 2524 } 2525 2526 private void buildNodeTree() throws IOException { 2527 beginWrite(); 2528 try { 2529 IndexNode root = inodes.get(LOOKUPKEY.as(ROOTPATH)); 2530 if (root == null) { 2531 root = new IndexNode(ROOTPATH, true); 2532 } else { 2533 inodes.remove(root); 2534 } 2535 IndexNode[] nodes = inodes.keySet().toArray(new IndexNode[0]); 2536 inodes.put(root, root); 2537 ParentLookup lookup = new ParentLookup(); 2538 for (IndexNode node : nodes) { 2539 IndexNode parent; 2540 while (true) { 2541 int off = getParentOff(node.name); 2542 if (off <= 1) { // parent is root 2543 node.sibling = root.child; 2544 root.child = node; 2545 break; 2546 } 2547 lookup = lookup.as(node.name, off); 2548 if (inodes.containsKey(lookup)) { 2549 parent = inodes.get(lookup); 2550 node.sibling = parent.child; 2551 parent.child = node; 2552 break; 2553 } 2554 // add new pseudo directory entry 2555 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 2556 inodes.put(parent, parent); 2557 node.sibling = parent.child; 2558 parent.child = node; 2559 node = parent; 2560 } 2561 } 2562 } finally { 2563 endWrite(); 2564 } 2565 } 2566 }