1 /* 2 * Copyright (c) 2009, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.File; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.nio.ByteBuffer; 37 import java.nio.MappedByteBuffer; 38 import java.nio.channels.*; 39 import java.nio.file.*; 40 import java.nio.file.attribute.*; 41 import java.nio.file.spi.*; 42 import java.security.AccessController; 43 import java.security.PrivilegedAction; 44 import java.security.PrivilegedActionException; 45 import java.security.PrivilegedExceptionAction; 46 import java.util.*; 47 import java.util.concurrent.locks.ReadWriteLock; 48 import java.util.concurrent.locks.ReentrantReadWriteLock; 49 import java.util.regex.Pattern; 50 import java.util.zip.CRC32; 51 import java.util.zip.Inflater; 52 import java.util.zip.Deflater; 53 import java.util.zip.InflaterInputStream; 54 import java.util.zip.DeflaterOutputStream; 55 import java.util.zip.ZipException; 56 import static java.lang.Boolean.*; 57 import static jdk.nio.zipfs.ZipConstants.*; 58 import static jdk.nio.zipfs.ZipUtils.*; 59 import static java.nio.file.StandardOpenOption.*; 60 import static java.nio.file.StandardCopyOption.*; 61 62 /** 63 * A FileSystem built on a zip file 64 * 65 * @author Xueming Shen 66 */ 67 68 class ZipFileSystem extends FileSystem { 69 70 private final ZipFileSystemProvider provider; 71 private final Path zfpath; 72 final ZipCoder zc; 73 private final boolean noExtt; // see readExtra() 74 private final ZipPath rootdir; 75 // configurable by env map 76 private final boolean useTempFile; // use a temp file for newOS, default 77 // is to use BAOS for better performance 78 private boolean readOnly = false; // readonly file system 79 private static final boolean isWindows = AccessController.doPrivileged( 80 (PrivilegedAction<Boolean>) () -> System.getProperty("os.name") 81 .startsWith("Windows")); 82 83 ZipFileSystem(ZipFileSystemProvider provider, 84 Path zfpath, 85 Map<String, ?> env) throws IOException 86 { 87 // create a new zip if not exists 88 boolean createNew = "true".equals(env.get("create")); 89 // default encoding for name/comment 90 String nameEncoding = env.containsKey("encoding") ? 91 (String)env.get("encoding") : "UTF-8"; 92 this.noExtt = "false".equals(env.get("zipinfo-time")); 93 this.useTempFile = TRUE.equals(env.get("useTempFile")); 94 this.provider = provider; 95 this.zfpath = zfpath; 96 if (Files.notExists(zfpath)) { 97 if (createNew) { 98 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 99 new END().write(os, 0); 100 } 101 } else { 102 throw new FileSystemNotFoundException(zfpath.toString()); 103 } 104 } 105 // sm and existence check 106 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 107 boolean writeable = AccessController.doPrivileged( 108 (PrivilegedAction<Boolean>) () -> Files.isWritable(zfpath)); 109 this.readOnly = !writeable; 110 this.zc = ZipCoder.get(nameEncoding); 111 this.rootdir = new ZipPath(this, new byte[]{'/'}); 112 this.ch = Files.newByteChannel(zfpath, READ); 113 try { 114 this.cen = initCEN(); 115 } catch (IOException x) { 116 try { 117 this.ch.close(); 118 } catch (IOException xx) { 119 x.addSuppressed(xx); 120 } 121 throw x; 122 } 123 } 124 125 @Override 126 public FileSystemProvider provider() { 127 return provider; 128 } 129 130 @Override 131 public String getSeparator() { 132 return "/"; 133 } 134 135 @Override 136 public boolean isOpen() { 137 return isOpen; 138 } 139 140 @Override 141 public boolean isReadOnly() { 142 return readOnly; 143 } 144 145 private void checkWritable() throws IOException { 146 if (readOnly) 147 throw new ReadOnlyFileSystemException(); 148 } 149 150 void setReadOnly() { 151 this.readOnly = true; 152 } 153 154 @Override 155 public Iterable<Path> getRootDirectories() { 156 return List.of(rootdir); 157 } 158 159 ZipPath getRootDir() { 160 return rootdir; 161 } 162 163 @Override 164 public ZipPath getPath(String first, String... more) { 165 if (more.length == 0) { 166 return new ZipPath(this, first); 167 } 168 StringBuilder sb = new StringBuilder(); 169 sb.append(first); 170 for (String path : more) { 171 if (path.length() > 0) { 172 if (sb.length() > 0) { 173 sb.append('/'); 174 } 175 sb.append(path); 176 } 177 } 178 return new ZipPath(this, sb.toString()); 179 } 180 181 @Override 182 public UserPrincipalLookupService getUserPrincipalLookupService() { 183 throw new UnsupportedOperationException(); 184 } 185 186 @Override 187 public WatchService newWatchService() { 188 throw new UnsupportedOperationException(); 189 } 190 191 FileStore getFileStore(ZipPath path) { 192 return new ZipFileStore(path); 193 } 194 195 @Override 196 public Iterable<FileStore> getFileStores() { 197 return List.of(new ZipFileStore(rootdir)); 198 } 199 200 private static final Set<String> supportedFileAttributeViews = 201 Set.of("basic", "zip"); 202 203 @Override 204 public Set<String> supportedFileAttributeViews() { 205 return supportedFileAttributeViews; 206 } 207 208 @Override 209 public String toString() { 210 return zfpath.toString(); 211 } 212 213 Path getZipFile() { 214 return zfpath; 215 } 216 217 private static final String GLOB_SYNTAX = "glob"; 218 private static final String REGEX_SYNTAX = "regex"; 219 220 @Override 221 public PathMatcher getPathMatcher(String syntaxAndInput) { 222 int pos = syntaxAndInput.indexOf(':'); 223 if (pos <= 0 || pos == syntaxAndInput.length()) { 224 throw new IllegalArgumentException(); 225 } 226 String syntax = syntaxAndInput.substring(0, pos); 227 String input = syntaxAndInput.substring(pos + 1); 228 String expr; 229 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 230 expr = toRegexPattern(input); 231 } else { 232 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 233 expr = input; 234 } else { 235 throw new UnsupportedOperationException("Syntax '" + syntax + 236 "' not recognized"); 237 } 238 } 239 // return matcher 240 final Pattern pattern = Pattern.compile(expr); 241 return new PathMatcher() { 242 @Override 243 public boolean matches(Path path) { 244 return pattern.matcher(path.toString()).matches(); 245 } 246 }; 247 } 248 249 @Override 250 public void close() throws IOException { 251 beginWrite(); 252 try { 253 if (!isOpen) 254 return; 255 isOpen = false; // set closed 256 } finally { 257 endWrite(); 258 } 259 if (!streams.isEmpty()) { // unlock and close all remaining streams 260 Set<InputStream> copy = new HashSet<>(streams); 261 for (InputStream is: copy) 262 is.close(); 263 } 264 beginWrite(); // lock and sync 265 try { 266 AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> { 267 sync(); return null; 268 }); 269 ch.close(); // close the ch just in case no update 270 } catch (PrivilegedActionException e) { // and sync dose not close the ch 271 throw (IOException)e.getException(); 272 } finally { 273 endWrite(); 274 } 275 276 synchronized (inflaters) { 277 for (Inflater inf : inflaters) 278 inf.end(); 279 } 280 synchronized (deflaters) { 281 for (Deflater def : deflaters) 282 def.end(); 283 } 284 285 IOException ioe = null; 286 synchronized (tmppaths) { 287 for (Path p: tmppaths) { 288 try { 289 AccessController.doPrivileged( 290 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 291 } catch (PrivilegedActionException e) { 292 IOException x = (IOException)e.getException(); 293 if (ioe == null) 294 ioe = x; 295 else 296 ioe.addSuppressed(x); 297 } 298 } 299 } 300 provider.removeFileSystem(zfpath, this); 301 if (ioe != null) 302 throw ioe; 303 } 304 305 ZipFileAttributes getFileAttributes(byte[] path) 306 throws IOException 307 { 308 Entry e; 309 beginRead(); 310 try { 311 ensureOpen(); 312 e = getEntry(path); 313 if (e == null) { 314 IndexNode inode = getInode(path); 315 if (inode == null) 316 return null; 317 e = new Entry(inode.name, inode.isdir); // pseudo directory 318 e.method = METHOD_STORED; // STORED for dir 319 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 320 } 321 } finally { 322 endRead(); 323 } 324 return e; 325 } 326 327 void checkAccess(byte[] path) throws IOException { 328 beginRead(); 329 try { 330 ensureOpen(); 331 // is it necessary to readCEN as a sanity check? 332 if (getInode(path) == null) { 333 throw new NoSuchFileException(toString()); 334 } 335 336 } finally { 337 endRead(); 338 } 339 } 340 341 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 342 throws IOException 343 { 344 checkWritable(); 345 beginWrite(); 346 try { 347 ensureOpen(); 348 Entry e = getEntry(path); // ensureOpen checked 349 if (e == null) 350 throw new NoSuchFileException(getString(path)); 351 if (e.type == Entry.CEN) 352 e.type = Entry.COPY; // copy e 353 if (mtime != null) 354 e.mtime = mtime.toMillis(); 355 if (atime != null) 356 e.atime = atime.toMillis(); 357 if (ctime != null) 358 e.ctime = ctime.toMillis(); 359 update(e); 360 } finally { 361 endWrite(); 362 } 363 } 364 365 boolean exists(byte[] path) 366 throws IOException 367 { 368 beginRead(); 369 try { 370 ensureOpen(); 371 return getInode(path) != null; 372 } finally { 373 endRead(); 374 } 375 } 376 377 boolean isDirectory(byte[] path) 378 throws IOException 379 { 380 beginRead(); 381 try { 382 IndexNode n = getInode(path); 383 return n != null && n.isDir(); 384 } finally { 385 endRead(); 386 } 387 } 388 389 // returns the list of child paths of "path" 390 Iterator<Path> iteratorOf(byte[] path, 391 DirectoryStream.Filter<? super Path> filter) 392 throws IOException 393 { 394 beginWrite(); // iteration of inodes needs exclusive lock 395 try { 396 ensureOpen(); 397 IndexNode inode = getInode(path); 398 if (inode == null) 399 throw new NotDirectoryException(getString(path)); 400 List<Path> list = new ArrayList<>(); 401 IndexNode child = inode.child; 402 while (child != null) { 403 // assume all path from zip file itself is "normalized" 404 ZipPath zp = new ZipPath(this, child.name, true); 405 if (filter == null || filter.accept(zp)) 406 list.add(zp); 407 child = child.sibling; 408 } 409 return list.iterator(); 410 } finally { 411 endWrite(); 412 } 413 } 414 415 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 416 throws IOException 417 { 418 checkWritable(); 419 // dir = toDirectoryPath(dir); 420 beginWrite(); 421 try { 422 ensureOpen(); 423 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 424 throw new FileAlreadyExistsException(getString(dir)); 425 checkParents(dir); 426 Entry e = new Entry(dir, Entry.NEW, true); 427 e.method = METHOD_STORED; // STORED for dir 428 update(e); 429 } finally { 430 endWrite(); 431 } 432 } 433 434 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 435 throws IOException 436 { 437 checkWritable(); 438 if (Arrays.equals(src, dst)) 439 return; // do nothing, src and dst are the same 440 441 beginWrite(); 442 try { 443 ensureOpen(); 444 Entry eSrc = getEntry(src); // ensureOpen checked 445 446 if (eSrc == null) 447 throw new NoSuchFileException(getString(src)); 448 if (eSrc.isDir()) { // spec says to create dst dir 449 createDirectory(dst); 450 return; 451 } 452 boolean hasReplace = false; 453 boolean hasCopyAttrs = false; 454 for (CopyOption opt : options) { 455 if (opt == REPLACE_EXISTING) 456 hasReplace = true; 457 else if (opt == COPY_ATTRIBUTES) 458 hasCopyAttrs = true; 459 } 460 Entry eDst = getEntry(dst); 461 if (eDst != null) { 462 if (!hasReplace) 463 throw new FileAlreadyExistsException(getString(dst)); 464 } else { 465 checkParents(dst); 466 } 467 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 468 u.name(dst); // change name 469 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 470 { 471 u.type = eSrc.type; // make it the same type 472 if (deletesrc) { // if it's a "rename", take the data 473 u.bytes = eSrc.bytes; 474 u.file = eSrc.file; 475 } else { // if it's not "rename", copy the data 476 if (eSrc.bytes != null) 477 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 478 else if (eSrc.file != null) { 479 u.file = getTempPathForEntry(null); 480 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 481 } 482 } 483 } 484 if (!hasCopyAttrs) 485 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 486 update(u); 487 if (deletesrc) 488 updateDelete(eSrc); 489 } finally { 490 endWrite(); 491 } 492 } 493 494 // Returns an output stream for writing the contents into the specified 495 // entry. 496 OutputStream newOutputStream(byte[] path, OpenOption... options) 497 throws IOException 498 { 499 checkWritable(); 500 boolean hasCreateNew = false; 501 boolean hasCreate = false; 502 boolean hasAppend = false; 503 boolean hasTruncate = false; 504 for (OpenOption opt: options) { 505 if (opt == READ) 506 throw new IllegalArgumentException("READ not allowed"); 507 if (opt == CREATE_NEW) 508 hasCreateNew = true; 509 if (opt == CREATE) 510 hasCreate = true; 511 if (opt == APPEND) 512 hasAppend = true; 513 if (opt == TRUNCATE_EXISTING) 514 hasTruncate = true; 515 } 516 if (hasAppend && hasTruncate) 517 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 518 beginRead(); // only need a readlock, the "update()" will 519 try { // try to obtain a writelock when the os is 520 ensureOpen(); // being closed. 521 Entry e = getEntry(path); 522 if (e != null) { 523 if (e.isDir() || hasCreateNew) 524 throw new FileAlreadyExistsException(getString(path)); 525 if (hasAppend) { 526 InputStream is = getInputStream(e); 527 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 528 copyStream(is, os); 529 is.close(); 530 return os; 531 } 532 return getOutputStream(new Entry(e, Entry.NEW)); 533 } else { 534 if (!hasCreate && !hasCreateNew) 535 throw new NoSuchFileException(getString(path)); 536 checkParents(path); 537 return getOutputStream(new Entry(path, Entry.NEW, false)); 538 } 539 } finally { 540 endRead(); 541 } 542 } 543 544 // Returns an input stream for reading the contents of the specified 545 // file entry. 546 InputStream newInputStream(byte[] path) throws IOException { 547 beginRead(); 548 try { 549 ensureOpen(); 550 Entry e = getEntry(path); 551 if (e == null) 552 throw new NoSuchFileException(getString(path)); 553 if (e.isDir()) 554 throw new FileSystemException(getString(path), "is a directory", null); 555 return getInputStream(e); 556 } finally { 557 endRead(); 558 } 559 } 560 561 private void checkOptions(Set<? extends OpenOption> options) { 562 // check for options of null type and option is an intance of StandardOpenOption 563 for (OpenOption option : options) { 564 if (option == null) 565 throw new NullPointerException(); 566 if (!(option instanceof StandardOpenOption)) 567 throw new IllegalArgumentException(); 568 } 569 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 570 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 571 } 572 573 // Returns a Writable/ReadByteChannel for now. Might consdier to use 574 // newFileChannel() instead, which dump the entry data into a regular 575 // file on the default file system and create a FileChannel on top of 576 // it. 577 SeekableByteChannel newByteChannel(byte[] path, 578 Set<? extends OpenOption> options, 579 FileAttribute<?>... attrs) 580 throws IOException 581 { 582 checkOptions(options); 583 if (options.contains(StandardOpenOption.WRITE) || 584 options.contains(StandardOpenOption.APPEND)) { 585 checkWritable(); 586 beginRead(); 587 try { 588 final WritableByteChannel wbc = Channels.newChannel( 589 newOutputStream(path, options.toArray(new OpenOption[0]))); 590 long leftover = 0; 591 if (options.contains(StandardOpenOption.APPEND)) { 592 Entry e = getEntry(path); 593 if (e != null && e.size >= 0) 594 leftover = e.size; 595 } 596 final long offset = leftover; 597 return new SeekableByteChannel() { 598 long written = offset; 599 public boolean isOpen() { 600 return wbc.isOpen(); 601 } 602 603 public long position() throws IOException { 604 return written; 605 } 606 607 public SeekableByteChannel position(long pos) 608 throws IOException 609 { 610 throw new UnsupportedOperationException(); 611 } 612 613 public int read(ByteBuffer dst) throws IOException { 614 throw new UnsupportedOperationException(); 615 } 616 617 public SeekableByteChannel truncate(long size) 618 throws IOException 619 { 620 throw new UnsupportedOperationException(); 621 } 622 623 public int write(ByteBuffer src) throws IOException { 624 int n = wbc.write(src); 625 written += n; 626 return n; 627 } 628 629 public long size() throws IOException { 630 return written; 631 } 632 633 public void close() throws IOException { 634 wbc.close(); 635 } 636 }; 637 } finally { 638 endRead(); 639 } 640 } else { 641 beginRead(); 642 try { 643 ensureOpen(); 644 Entry e = getEntry(path); 645 if (e == null || e.isDir()) 646 throw new NoSuchFileException(getString(path)); 647 final ReadableByteChannel rbc = 648 Channels.newChannel(getInputStream(e)); 649 final long size = e.size; 650 return new SeekableByteChannel() { 651 long read = 0; 652 public boolean isOpen() { 653 return rbc.isOpen(); 654 } 655 656 public long position() throws IOException { 657 return read; 658 } 659 660 public SeekableByteChannel position(long pos) 661 throws IOException 662 { 663 throw new UnsupportedOperationException(); 664 } 665 666 public int read(ByteBuffer dst) throws IOException { 667 int n = rbc.read(dst); 668 if (n > 0) { 669 read += n; 670 } 671 return n; 672 } 673 674 public SeekableByteChannel truncate(long size) 675 throws IOException 676 { 677 throw new NonWritableChannelException(); 678 } 679 680 public int write (ByteBuffer src) throws IOException { 681 throw new NonWritableChannelException(); 682 } 683 684 public long size() throws IOException { 685 return size; 686 } 687 688 public void close() throws IOException { 689 rbc.close(); 690 } 691 }; 692 } finally { 693 endRead(); 694 } 695 } 696 } 697 698 // Returns a FileChannel of the specified entry. 699 // 700 // This implementation creates a temporary file on the default file system, 701 // copy the entry data into it if the entry exists, and then create a 702 // FileChannel on top of it. 703 FileChannel newFileChannel(byte[] path, 704 Set<? extends OpenOption> options, 705 FileAttribute<?>... attrs) 706 throws IOException 707 { 708 checkOptions(options); 709 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 710 options.contains(StandardOpenOption.APPEND)); 711 beginRead(); 712 try { 713 ensureOpen(); 714 Entry e = getEntry(path); 715 if (forWrite) { 716 checkWritable(); 717 if (e == null) { 718 if (!options.contains(StandardOpenOption.CREATE) && 719 !options.contains(StandardOpenOption.CREATE_NEW)) { 720 throw new NoSuchFileException(getString(path)); 721 } 722 } else { 723 if (options.contains(StandardOpenOption.CREATE_NEW)) { 724 throw new FileAlreadyExistsException(getString(path)); 725 } 726 if (e.isDir()) 727 throw new FileAlreadyExistsException("directory <" 728 + getString(path) + "> exists"); 729 } 730 options = new HashSet<>(options); 731 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 732 } else if (e == null || e.isDir()) { 733 throw new NoSuchFileException(getString(path)); 734 } 735 736 final boolean isFCH = (e != null && e.type == Entry.FILECH); 737 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 738 final FileChannel fch = tmpfile.getFileSystem() 739 .provider() 740 .newFileChannel(tmpfile, options, attrs); 741 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH); 742 if (forWrite) { 743 u.flag = FLAG_DATADESCR; 744 u.method = METHOD_DEFLATED; 745 } 746 // is there a better way to hook into the FileChannel's close method? 747 return new FileChannel() { 748 public int write(ByteBuffer src) throws IOException { 749 return fch.write(src); 750 } 751 public long write(ByteBuffer[] srcs, int offset, int length) 752 throws IOException 753 { 754 return fch.write(srcs, offset, length); 755 } 756 public long position() throws IOException { 757 return fch.position(); 758 } 759 public FileChannel position(long newPosition) 760 throws IOException 761 { 762 fch.position(newPosition); 763 return this; 764 } 765 public long size() throws IOException { 766 return fch.size(); 767 } 768 public FileChannel truncate(long size) 769 throws IOException 770 { 771 fch.truncate(size); 772 return this; 773 } 774 public void force(boolean metaData) 775 throws IOException 776 { 777 fch.force(metaData); 778 } 779 public long transferTo(long position, long count, 780 WritableByteChannel target) 781 throws IOException 782 { 783 return fch.transferTo(position, count, target); 784 } 785 public long transferFrom(ReadableByteChannel src, 786 long position, long count) 787 throws IOException 788 { 789 return fch.transferFrom(src, position, count); 790 } 791 public int read(ByteBuffer dst) throws IOException { 792 return fch.read(dst); 793 } 794 public int read(ByteBuffer dst, long position) 795 throws IOException 796 { 797 return fch.read(dst, position); 798 } 799 public long read(ByteBuffer[] dsts, int offset, int length) 800 throws IOException 801 { 802 return fch.read(dsts, offset, length); 803 } 804 public int write(ByteBuffer src, long position) 805 throws IOException 806 { 807 return fch.write(src, position); 808 } 809 public MappedByteBuffer map(MapMode mode, 810 long position, long size) 811 throws IOException 812 { 813 throw new UnsupportedOperationException(); 814 } 815 public FileLock lock(long position, long size, boolean shared) 816 throws IOException 817 { 818 return fch.lock(position, size, shared); 819 } 820 public FileLock tryLock(long position, long size, boolean shared) 821 throws IOException 822 { 823 return fch.tryLock(position, size, shared); 824 } 825 protected void implCloseChannel() throws IOException { 826 fch.close(); 827 if (forWrite) { 828 u.mtime = System.currentTimeMillis(); 829 u.size = Files.size(u.file); 830 831 update(u); 832 } else { 833 if (!isFCH) // if this is a new fch for reading 834 removeTempPathForEntry(tmpfile); 835 } 836 } 837 }; 838 } finally { 839 endRead(); 840 } 841 } 842 843 // the outstanding input streams that need to be closed 844 private Set<InputStream> streams = 845 Collections.synchronizedSet(new HashSet<InputStream>()); 846 847 // the ex-channel and ex-path that need to close when their outstanding 848 // input streams are all closed by the obtainers. 849 private Set<ExChannelCloser> exChClosers = new HashSet<>(); 850 851 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 852 private Path getTempPathForEntry(byte[] path) throws IOException { 853 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 854 if (path != null) { 855 Entry e = getEntry(path); 856 if (e != null) { 857 try (InputStream is = newInputStream(path)) { 858 Files.copy(is, tmpPath, REPLACE_EXISTING); 859 } 860 } 861 } 862 return tmpPath; 863 } 864 865 private void removeTempPathForEntry(Path path) throws IOException { 866 Files.delete(path); 867 tmppaths.remove(path); 868 } 869 870 // check if all parents really exit. ZIP spec does not require 871 // the existence of any "parent directory". 872 private void checkParents(byte[] path) throws IOException { 873 beginRead(); 874 try { 875 while ((path = getParent(path)) != null && 876 path != ROOTPATH) { 877 if (!inodes.containsKey(IndexNode.keyOf(path))) { 878 throw new NoSuchFileException(getString(path)); 879 } 880 } 881 } finally { 882 endRead(); 883 } 884 } 885 886 private static byte[] ROOTPATH = new byte[] { '/' }; 887 private static byte[] getParent(byte[] path) { 888 int off = getParentOff(path); 889 if (off <= 1) 890 return ROOTPATH; 891 return Arrays.copyOf(path, off); 892 } 893 894 private static int getParentOff(byte[] path) { 895 int off = path.length - 1; 896 if (off > 0 && path[off] == '/') // isDirectory 897 off--; 898 while (off > 0 && path[off] != '/') { off--; } 899 return off; 900 } 901 902 private final void beginWrite() { 903 rwlock.writeLock().lock(); 904 } 905 906 private final void endWrite() { 907 rwlock.writeLock().unlock(); 908 } 909 910 private final void beginRead() { 911 rwlock.readLock().lock(); 912 } 913 914 private final void endRead() { 915 rwlock.readLock().unlock(); 916 } 917 918 /////////////////////////////////////////////////////////////////// 919 920 private volatile boolean isOpen = true; 921 private final SeekableByteChannel ch; // channel to the zipfile 922 final byte[] cen; // CEN & ENDHDR 923 private END end; 924 private long locpos; // position of first LOC header (usually 0) 925 926 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 927 928 // name -> pos (in cen), IndexNode itself can be used as a "key" 929 private LinkedHashMap<IndexNode, IndexNode> inodes; 930 931 final byte[] getBytes(String name) { 932 return zc.getBytes(name); 933 } 934 935 final String getString(byte[] name) { 936 return zc.toString(name); 937 } 938 939 @SuppressWarnings("deprecation") 940 protected void finalize() throws IOException { 941 close(); 942 } 943 944 // Reads len bytes of data from the specified offset into buf. 945 // Returns the total number of bytes read. 946 // Each/every byte read from here (except the cen, which is mapped). 947 final long readFullyAt(byte[] buf, int off, long len, long pos) 948 throws IOException 949 { 950 ByteBuffer bb = ByteBuffer.wrap(buf); 951 bb.position(off); 952 bb.limit((int)(off + len)); 953 return readFullyAt(bb, pos); 954 } 955 956 private final long readFullyAt(ByteBuffer bb, long pos) 957 throws IOException 958 { 959 synchronized(ch) { 960 return ch.position(pos).read(bb); 961 } 962 } 963 964 // Searches for end of central directory (END) header. The contents of 965 // the END header will be read and placed in endbuf. Returns the file 966 // position of the END header, otherwise returns -1 if the END header 967 // was not found or an error occurred. 968 private END findEND() throws IOException 969 { 970 byte[] buf = new byte[READBLOCKSZ]; 971 long ziplen = ch.size(); 972 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 973 long minPos = minHDR - (buf.length - ENDHDR); 974 975 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 976 { 977 int off = 0; 978 if (pos < 0) { 979 // Pretend there are some NUL bytes before start of file 980 off = (int)-pos; 981 Arrays.fill(buf, 0, off, (byte)0); 982 } 983 int len = buf.length - off; 984 if (readFullyAt(buf, off, len, pos + off) != len) 985 zerror("zip END header not found"); 986 987 // Now scan the block backwards for END header signature 988 for (int i = buf.length - ENDHDR; i >= 0; i--) { 989 if (buf[i+0] == (byte)'P' && 990 buf[i+1] == (byte)'K' && 991 buf[i+2] == (byte)'\005' && 992 buf[i+3] == (byte)'\006' && 993 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 994 // Found END header 995 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 996 END end = new END(); 997 end.endsub = ENDSUB(buf); 998 end.centot = ENDTOT(buf); 999 end.cenlen = ENDSIZ(buf); 1000 end.cenoff = ENDOFF(buf); 1001 end.comlen = ENDCOM(buf); 1002 end.endpos = pos + i; 1003 if (end.cenlen == ZIP64_MINVAL || 1004 end.cenoff == ZIP64_MINVAL || 1005 end.centot == ZIP64_MINVAL32) 1006 { 1007 // need to find the zip64 end; 1008 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1009 if (readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1010 != loc64.length) { 1011 return end; 1012 } 1013 long end64pos = ZIP64_LOCOFF(loc64); 1014 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1015 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1016 != end64buf.length) { 1017 return end; 1018 } 1019 // end64 found, re-calcualte everything. 1020 end.cenlen = ZIP64_ENDSIZ(end64buf); 1021 end.cenoff = ZIP64_ENDOFF(end64buf); 1022 end.centot = (int)ZIP64_ENDTOT(end64buf); // assume total < 2g 1023 end.endpos = end64pos; 1024 } 1025 return end; 1026 } 1027 } 1028 } 1029 zerror("zip END header not found"); 1030 return null; //make compiler happy 1031 } 1032 1033 // Reads zip file central directory. Returns the file position of first 1034 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1035 // then the error was a zip format error and zip->msg has the error text. 1036 // Always pass in -1 for knownTotal; it's used for a recursive call. 1037 private byte[] initCEN() throws IOException { 1038 end = findEND(); 1039 if (end.endpos == 0) { 1040 inodes = new LinkedHashMap<>(10); 1041 locpos = 0; 1042 buildNodeTree(); 1043 return null; // only END header present 1044 } 1045 if (end.cenlen > end.endpos) 1046 zerror("invalid END header (bad central directory size)"); 1047 long cenpos = end.endpos - end.cenlen; // position of CEN table 1048 1049 // Get position of first local file (LOC) header, taking into 1050 // account that there may be a stub prefixed to the zip file. 1051 locpos = cenpos - end.cenoff; 1052 if (locpos < 0) 1053 zerror("invalid END header (bad central directory offset)"); 1054 1055 // read in the CEN and END 1056 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1057 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1058 zerror("read CEN tables failed"); 1059 } 1060 // Iterate through the entries in the central directory 1061 inodes = new LinkedHashMap<>(end.centot + 1); 1062 int pos = 0; 1063 int limit = cen.length - ENDHDR; 1064 while (pos < limit) { 1065 if (!cenSigAt(cen, pos)) 1066 zerror("invalid CEN header (bad signature)"); 1067 int method = CENHOW(cen, pos); 1068 int nlen = CENNAM(cen, pos); 1069 int elen = CENEXT(cen, pos); 1070 int clen = CENCOM(cen, pos); 1071 if ((CENFLG(cen, pos) & 1) != 0) { 1072 zerror("invalid CEN header (encrypted entry)"); 1073 } 1074 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1075 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1076 } 1077 if (pos + CENHDR + nlen > limit) { 1078 zerror("invalid CEN header (bad header size)"); 1079 } 1080 IndexNode inode = new IndexNode(cen, pos + CENHDR, nlen, pos); 1081 inodes.put(inode, inode); 1082 1083 // skip ext and comment 1084 pos += (CENHDR + nlen + elen + clen); 1085 } 1086 if (pos + ENDHDR != cen.length) { 1087 zerror("invalid CEN header (bad header size)"); 1088 } 1089 buildNodeTree(); 1090 return cen; 1091 } 1092 1093 private void ensureOpen() throws IOException { 1094 if (!isOpen) 1095 throw new ClosedFileSystemException(); 1096 } 1097 1098 // Creates a new empty temporary file in the same directory as the 1099 // specified file. A variant of Files.createTempFile. 1100 private Path createTempFileInSameDirectoryAs(Path path) 1101 throws IOException 1102 { 1103 Path parent = path.toAbsolutePath().getParent(); 1104 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1105 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1106 tmppaths.add(tmpPath); 1107 return tmpPath; 1108 } 1109 1110 ////////////////////update & sync ////////////////////////////////////// 1111 1112 private boolean hasUpdate = false; 1113 1114 // shared key. consumer guarantees the "writeLock" before use it. 1115 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1116 1117 private void updateDelete(IndexNode inode) { 1118 beginWrite(); 1119 try { 1120 removeFromTree(inode); 1121 inodes.remove(inode); 1122 hasUpdate = true; 1123 } finally { 1124 endWrite(); 1125 } 1126 } 1127 1128 private void update(Entry e) { 1129 beginWrite(); 1130 try { 1131 IndexNode old = inodes.put(e, e); 1132 if (old != null) { 1133 removeFromTree(old); 1134 } 1135 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1136 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1137 e.sibling = parent.child; 1138 parent.child = e; 1139 } 1140 hasUpdate = true; 1141 } finally { 1142 endWrite(); 1143 } 1144 } 1145 1146 // copy over the whole LOC entry (header if necessary, data and ext) from 1147 // old zip to the new one. 1148 private long copyLOCEntry(Entry e, boolean updateHeader, 1149 OutputStream os, 1150 long written, byte[] buf) 1151 throws IOException 1152 { 1153 long locoff = e.locoff; // where to read 1154 e.locoff = written; // update the e.locoff with new value 1155 1156 // calculate the size need to write out 1157 long size = 0; 1158 // if there is A ext 1159 if ((e.flag & FLAG_DATADESCR) != 0) { 1160 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1161 size = 24; 1162 else 1163 size = 16; 1164 } 1165 // read loc, use the original loc.elen/nlen 1166 if (readFullyAt(buf, 0, LOCHDR , locoff) != LOCHDR) 1167 throw new ZipException("loc: reading failed"); 1168 if (updateHeader) { 1169 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1170 size += e.csize; 1171 written = e.writeLOC(os) + size; 1172 } else { 1173 os.write(buf, 0, LOCHDR); // write out the loc header 1174 locoff += LOCHDR; 1175 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1176 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1177 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1178 written = LOCHDR + size; 1179 } 1180 int n; 1181 while (size > 0 && 1182 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1183 { 1184 if (size < n) 1185 n = (int)size; 1186 os.write(buf, 0, n); 1187 size -= n; 1188 locoff += n; 1189 } 1190 return written; 1191 } 1192 1193 // sync the zip file system, if there is any udpate 1194 private void sync() throws IOException { 1195 //System.out.printf("->sync(%s) starting....!%n", toString()); 1196 // check ex-closer 1197 if (!exChClosers.isEmpty()) { 1198 for (ExChannelCloser ecc : exChClosers) { 1199 if (ecc.streams.isEmpty()) { 1200 ecc.ch.close(); 1201 Files.delete(ecc.path); 1202 exChClosers.remove(ecc); 1203 } 1204 } 1205 } 1206 if (!hasUpdate) 1207 return; 1208 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1209 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) 1210 { 1211 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1212 long written = 0; 1213 byte[] buf = new byte[8192]; 1214 Entry e = null; 1215 1216 // write loc 1217 for (IndexNode inode : inodes.values()) { 1218 if (inode instanceof Entry) { // an updated inode 1219 e = (Entry)inode; 1220 try { 1221 if (e.type == Entry.COPY) { 1222 // entry copy: the only thing changed is the "name" 1223 // and "nlen" in LOC header, so we udpate/rewrite the 1224 // LOC in new file and simply copy the rest (data and 1225 // ext) without enflating/deflating from the old zip 1226 // file LOC entry. 1227 written += copyLOCEntry(e, true, os, written, buf); 1228 } else { // NEW, FILECH or CEN 1229 e.locoff = written; 1230 written += e.writeLOC(os); // write loc header 1231 if (e.bytes != null) { // in-memory, deflated 1232 os.write(e.bytes); // already 1233 written += e.bytes.length; 1234 } else if (e.file != null) { // tmp file 1235 try (InputStream is = Files.newInputStream(e.file)) { 1236 int n; 1237 if (e.type == Entry.NEW) { // deflated already 1238 while ((n = is.read(buf)) != -1) { 1239 os.write(buf, 0, n); 1240 written += n; 1241 } 1242 } else if (e.type == Entry.FILECH) { 1243 // the data are not deflated, use ZEOS 1244 try (OutputStream os2 = new EntryOutputStream(e, os)) { 1245 while ((n = is.read(buf)) != -1) { 1246 os2.write(buf, 0, n); 1247 } 1248 } 1249 written += e.csize; 1250 if ((e.flag & FLAG_DATADESCR) != 0) 1251 written += e.writeEXT(os); 1252 } 1253 } 1254 Files.delete(e.file); 1255 tmppaths.remove(e.file); 1256 } else { 1257 // dir, 0-length data 1258 } 1259 } 1260 elist.add(e); 1261 } catch (IOException x) { 1262 x.printStackTrace(); // skip any in-accurate entry 1263 } 1264 } else { // unchanged inode 1265 if (inode.pos == -1) { 1266 continue; // pseudo directory node 1267 } 1268 e = Entry.readCEN(this, inode); 1269 try { 1270 written += copyLOCEntry(e, false, os, written, buf); 1271 elist.add(e); 1272 } catch (IOException x) { 1273 x.printStackTrace(); // skip any wrong entry 1274 } 1275 } 1276 } 1277 1278 // now write back the cen and end table 1279 end.cenoff = written; 1280 for (Entry entry : elist) { 1281 written += entry.writeCEN(os); 1282 } 1283 end.centot = elist.size(); 1284 end.cenlen = written - end.cenoff; 1285 end.write(os, written); 1286 } 1287 if (!streams.isEmpty()) { 1288 // 1289 // TBD: ExChannelCloser should not be necessary if we only 1290 // sync when being closed, all streams should have been 1291 // closed already. Keep the logic here for now. 1292 // 1293 // There are outstanding input streams open on existing "ch", 1294 // so, don't close the "cha" and delete the "file for now, let 1295 // the "ex-channel-closer" to handle them 1296 ExChannelCloser ecc = new ExChannelCloser( 1297 createTempFileInSameDirectoryAs(zfpath), 1298 ch, 1299 streams); 1300 Files.move(zfpath, ecc.path, REPLACE_EXISTING); 1301 exChClosers.add(ecc); 1302 streams = Collections.synchronizedSet(new HashSet<InputStream>()); 1303 } else { 1304 ch.close(); 1305 Files.delete(zfpath); 1306 } 1307 1308 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1309 hasUpdate = false; // clear 1310 } 1311 1312 IndexNode getInode(byte[] path) { 1313 if (path == null) 1314 throw new NullPointerException("path"); 1315 return inodes.get(IndexNode.keyOf(path)); 1316 } 1317 1318 Entry getEntry(byte[] path) throws IOException { 1319 IndexNode inode = getInode(path); 1320 if (inode instanceof Entry) 1321 return (Entry)inode; 1322 if (inode == null || inode.pos == -1) 1323 return null; 1324 return Entry.readCEN(this, inode); 1325 } 1326 1327 public void deleteFile(byte[] path, boolean failIfNotExists) 1328 throws IOException 1329 { 1330 checkWritable(); 1331 1332 IndexNode inode = getInode(path); 1333 if (inode == null) { 1334 if (path != null && path.length == 0) 1335 throw new ZipException("root directory </> can't not be delete"); 1336 if (failIfNotExists) 1337 throw new NoSuchFileException(getString(path)); 1338 } else { 1339 if (inode.isDir() && inode.child != null) 1340 throw new DirectoryNotEmptyException(getString(path)); 1341 updateDelete(inode); 1342 } 1343 } 1344 1345 private static void copyStream(InputStream is, OutputStream os) 1346 throws IOException 1347 { 1348 byte[] copyBuf = new byte[8192]; 1349 int n; 1350 while ((n = is.read(copyBuf)) != -1) { 1351 os.write(copyBuf, 0, n); 1352 } 1353 } 1354 1355 // Returns an out stream for either 1356 // (1) writing the contents of a new entry, if the entry exits, or 1357 // (2) updating/replacing the contents of the specified existing entry. 1358 private OutputStream getOutputStream(Entry e) throws IOException { 1359 1360 if (e.mtime == -1) 1361 e.mtime = System.currentTimeMillis(); 1362 if (e.method == -1) 1363 e.method = METHOD_DEFLATED; // TBD: use default method 1364 // store size, compressed size, and crc-32 in LOC header 1365 e.flag = 0; 1366 if (zc.isUTF8()) 1367 e.flag |= FLAG_EFS; 1368 OutputStream os; 1369 if (useTempFile) { 1370 e.file = getTempPathForEntry(null); 1371 os = Files.newOutputStream(e.file, WRITE); 1372 } else { 1373 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1374 } 1375 return new EntryOutputStream(e, os); 1376 } 1377 1378 private InputStream getInputStream(Entry e) 1379 throws IOException 1380 { 1381 InputStream eis = null; 1382 1383 if (e.type == Entry.NEW) { 1384 if (e.bytes != null) 1385 eis = new ByteArrayInputStream(e.bytes); 1386 else if (e.file != null) 1387 eis = Files.newInputStream(e.file); 1388 else 1389 throw new ZipException("update entry data is missing"); 1390 } else if (e.type == Entry.FILECH) { 1391 // FILECH result is un-compressed. 1392 eis = Files.newInputStream(e.file); 1393 // TBD: wrap to hook close() 1394 // streams.add(eis); 1395 return eis; 1396 } else { // untouced CEN or COPY 1397 eis = new EntryInputStream(e, ch); 1398 } 1399 if (e.method == METHOD_DEFLATED) { 1400 // MORE: Compute good size for inflater stream: 1401 long bufSize = e.size + 2; // Inflater likes a bit of slack 1402 if (bufSize > 65536) 1403 bufSize = 8192; 1404 final long size = e.size; 1405 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1406 private boolean isClosed = false; 1407 public void close() throws IOException { 1408 if (!isClosed) { 1409 releaseInflater(inf); 1410 this.in.close(); 1411 isClosed = true; 1412 streams.remove(this); 1413 } 1414 } 1415 // Override fill() method to provide an extra "dummy" byte 1416 // at the end of the input stream. This is required when 1417 // using the "nowrap" Inflater option. (it appears the new 1418 // zlib in 7 does not need it, but keep it for now) 1419 protected void fill() throws IOException { 1420 if (eof) { 1421 throw new EOFException( 1422 "Unexpected end of ZLIB input stream"); 1423 } 1424 len = this.in.read(buf, 0, buf.length); 1425 if (len == -1) { 1426 buf[0] = 0; 1427 len = 1; 1428 eof = true; 1429 } 1430 inf.setInput(buf, 0, len); 1431 } 1432 private boolean eof; 1433 1434 public int available() throws IOException { 1435 if (isClosed) 1436 return 0; 1437 long avail = size - inf.getBytesWritten(); 1438 return avail > (long) Integer.MAX_VALUE ? 1439 Integer.MAX_VALUE : (int) avail; 1440 } 1441 }; 1442 } else if (e.method == METHOD_STORED) { 1443 // TBD: wrap/ it does not seem necessary 1444 } else { 1445 throw new ZipException("invalid compression method"); 1446 } 1447 streams.add(eis); 1448 return eis; 1449 } 1450 1451 // Inner class implementing the input stream used to read 1452 // a (possibly compressed) zip file entry. 1453 private class EntryInputStream extends InputStream { 1454 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1455 // point to a new channel after sync() 1456 private long pos; // current position within entry data 1457 protected long rem; // number of remaining bytes within entry 1458 protected final long size; // uncompressed size of this entry 1459 1460 EntryInputStream(Entry e, SeekableByteChannel zfch) 1461 throws IOException 1462 { 1463 this.zfch = zfch; 1464 rem = e.csize; 1465 size = e.size; 1466 pos = e.locoff; 1467 if (pos == -1) { 1468 Entry e2 = getEntry(e.name); 1469 if (e2 == null) { 1470 throw new ZipException("invalid loc for entry <" + e.name + ">"); 1471 } 1472 pos = e2.locoff; 1473 } 1474 pos = -pos; // lazy initialize the real data offset 1475 } 1476 1477 public int read(byte b[], int off, int len) throws IOException { 1478 ensureOpen(); 1479 initDataPos(); 1480 if (rem == 0) { 1481 return -1; 1482 } 1483 if (len <= 0) { 1484 return 0; 1485 } 1486 if (len > rem) { 1487 len = (int) rem; 1488 } 1489 // readFullyAt() 1490 long n = 0; 1491 ByteBuffer bb = ByteBuffer.wrap(b); 1492 bb.position(off); 1493 bb.limit(off + len); 1494 synchronized(zfch) { 1495 n = zfch.position(pos).read(bb); 1496 } 1497 if (n > 0) { 1498 pos += n; 1499 rem -= n; 1500 } 1501 if (rem == 0) { 1502 close(); 1503 } 1504 return (int)n; 1505 } 1506 1507 public int read() throws IOException { 1508 byte[] b = new byte[1]; 1509 if (read(b, 0, 1) == 1) { 1510 return b[0] & 0xff; 1511 } else { 1512 return -1; 1513 } 1514 } 1515 1516 public long skip(long n) throws IOException { 1517 ensureOpen(); 1518 if (n > rem) 1519 n = rem; 1520 pos += n; 1521 rem -= n; 1522 if (rem == 0) { 1523 close(); 1524 } 1525 return n; 1526 } 1527 1528 public int available() { 1529 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1530 } 1531 1532 public long size() { 1533 return size; 1534 } 1535 1536 public void close() { 1537 rem = 0; 1538 streams.remove(this); 1539 } 1540 1541 private void initDataPos() throws IOException { 1542 if (pos <= 0) { 1543 pos = -pos + locpos; 1544 byte[] buf = new byte[LOCHDR]; 1545 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 1546 throw new ZipException("invalid loc " + pos + " for entry reading"); 1547 } 1548 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 1549 } 1550 } 1551 } 1552 1553 class EntryOutputStream extends DeflaterOutputStream 1554 { 1555 private CRC32 crc; 1556 private Entry e; 1557 private long written; 1558 private boolean isClosed = false; 1559 1560 EntryOutputStream(Entry e, OutputStream os) 1561 throws IOException 1562 { 1563 super(os, getDeflater()); 1564 if (e == null) 1565 throw new NullPointerException("Zip entry is null"); 1566 this.e = e; 1567 crc = new CRC32(); 1568 } 1569 1570 @Override 1571 public synchronized void write(byte b[], int off, int len) 1572 throws IOException 1573 { 1574 if (e.type != Entry.FILECH) // only from sync 1575 ensureOpen(); 1576 if (isClosed) { 1577 throw new IOException("Stream closed"); 1578 } 1579 if (off < 0 || len < 0 || off > b.length - len) { 1580 throw new IndexOutOfBoundsException(); 1581 } else if (len == 0) { 1582 return; 1583 } 1584 switch (e.method) { 1585 case METHOD_DEFLATED: 1586 super.write(b, off, len); 1587 break; 1588 case METHOD_STORED: 1589 written += len; 1590 out.write(b, off, len); 1591 break; 1592 default: 1593 throw new ZipException("invalid compression method"); 1594 } 1595 crc.update(b, off, len); 1596 } 1597 1598 @Override 1599 public synchronized void close() throws IOException { 1600 if (isClosed) { 1601 return; 1602 } 1603 isClosed = true; 1604 // TBD ensureOpen(); 1605 switch (e.method) { 1606 case METHOD_DEFLATED: 1607 finish(); 1608 e.size = def.getBytesRead(); 1609 e.csize = def.getBytesWritten(); 1610 e.crc = crc.getValue(); 1611 break; 1612 case METHOD_STORED: 1613 // we already know that both e.size and e.csize are the same 1614 e.size = e.csize = written; 1615 e.crc = crc.getValue(); 1616 break; 1617 default: 1618 throw new ZipException("invalid compression method"); 1619 } 1620 //crc.reset(); 1621 if (out instanceof ByteArrayOutputStream) 1622 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1623 1624 if (e.type == Entry.FILECH) { 1625 releaseDeflater(def); 1626 return; 1627 } 1628 super.close(); 1629 releaseDeflater(def); 1630 update(e); 1631 } 1632 } 1633 1634 static void zerror(String msg) throws ZipException { 1635 throw new ZipException(msg); 1636 } 1637 1638 // Maxmum number of de/inflater we cache 1639 private final int MAX_FLATER = 20; 1640 // List of available Inflater objects for decompression 1641 private final List<Inflater> inflaters = new ArrayList<>(); 1642 1643 // Gets an inflater from the list of available inflaters or allocates 1644 // a new one. 1645 private Inflater getInflater() { 1646 synchronized (inflaters) { 1647 int size = inflaters.size(); 1648 if (size > 0) { 1649 Inflater inf = inflaters.remove(size - 1); 1650 return inf; 1651 } else { 1652 return new Inflater(true); 1653 } 1654 } 1655 } 1656 1657 // Releases the specified inflater to the list of available inflaters. 1658 private void releaseInflater(Inflater inf) { 1659 synchronized (inflaters) { 1660 if (inflaters.size() < MAX_FLATER) { 1661 inf.reset(); 1662 inflaters.add(inf); 1663 } else { 1664 inf.end(); 1665 } 1666 } 1667 } 1668 1669 // List of available Deflater objects for compression 1670 private final List<Deflater> deflaters = new ArrayList<>(); 1671 1672 // Gets an deflater from the list of available deflaters or allocates 1673 // a new one. 1674 private Deflater getDeflater() { 1675 synchronized (deflaters) { 1676 int size = deflaters.size(); 1677 if (size > 0) { 1678 Deflater def = deflaters.remove(size - 1); 1679 return def; 1680 } else { 1681 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1682 } 1683 } 1684 } 1685 1686 // Releases the specified inflater to the list of available inflaters. 1687 private void releaseDeflater(Deflater def) { 1688 synchronized (deflaters) { 1689 if (inflaters.size() < MAX_FLATER) { 1690 def.reset(); 1691 deflaters.add(def); 1692 } else { 1693 def.end(); 1694 } 1695 } 1696 } 1697 1698 // End of central directory record 1699 static class END { 1700 // these 2 fields are not used by anyone and write() uses "0" 1701 // int disknum; 1702 // int sdisknum; 1703 int endsub; // endsub 1704 int centot; // 4 bytes 1705 long cenlen; // 4 bytes 1706 long cenoff; // 4 bytes 1707 int comlen; // comment length 1708 byte[] comment; 1709 1710 /* members of Zip64 end of central directory locator */ 1711 // int diskNum; 1712 long endpos; 1713 // int disktot; 1714 1715 void write(OutputStream os, long offset) throws IOException { 1716 boolean hasZip64 = false; 1717 long xlen = cenlen; 1718 long xoff = cenoff; 1719 if (xlen >= ZIP64_MINVAL) { 1720 xlen = ZIP64_MINVAL; 1721 hasZip64 = true; 1722 } 1723 if (xoff >= ZIP64_MINVAL) { 1724 xoff = ZIP64_MINVAL; 1725 hasZip64 = true; 1726 } 1727 int count = centot; 1728 if (count >= ZIP64_MINVAL32) { 1729 count = ZIP64_MINVAL32; 1730 hasZip64 = true; 1731 } 1732 if (hasZip64) { 1733 long off64 = offset; 1734 //zip64 end of central directory record 1735 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1736 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1737 writeShort(os, 45); // version made by 1738 writeShort(os, 45); // version needed to extract 1739 writeInt(os, 0); // number of this disk 1740 writeInt(os, 0); // central directory start disk 1741 writeLong(os, centot); // number of directory entires on disk 1742 writeLong(os, centot); // number of directory entires 1743 writeLong(os, cenlen); // length of central directory 1744 writeLong(os, cenoff); // offset of central directory 1745 1746 //zip64 end of central directory locator 1747 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1748 writeInt(os, 0); // zip64 END start disk 1749 writeLong(os, off64); // offset of zip64 END 1750 writeInt(os, 1); // total number of disks (?) 1751 } 1752 writeInt(os, ENDSIG); // END record signature 1753 writeShort(os, 0); // number of this disk 1754 writeShort(os, 0); // central directory start disk 1755 writeShort(os, count); // number of directory entries on disk 1756 writeShort(os, count); // total number of directory entries 1757 writeInt(os, xlen); // length of central directory 1758 writeInt(os, xoff); // offset of central directory 1759 if (comment != null) { // zip file comment 1760 writeShort(os, comment.length); 1761 writeBytes(os, comment); 1762 } else { 1763 writeShort(os, 0); 1764 } 1765 } 1766 } 1767 1768 // Internal node that links a "name" to its pos in cen table. 1769 // The node itself can be used as a "key" to lookup itself in 1770 // the HashMap inodes. 1771 static class IndexNode { 1772 byte[] name; 1773 int hashcode; // node is hashable/hashed by its name 1774 int pos = -1; // position in cen table, -1 menas the 1775 // entry does not exists in zip file 1776 boolean isdir; 1777 1778 IndexNode(byte[] name, boolean isdir) { 1779 name(name); 1780 this.isdir = isdir; 1781 this.pos = -1; 1782 } 1783 1784 IndexNode(byte[] name, int pos) { 1785 name(name); 1786 this.pos = pos; 1787 } 1788 1789 // constructor for cenInit() 1790 IndexNode(byte[] cen, int noff, int nlen, int pos) { 1791 if (cen[noff + nlen - 1] == '/') { 1792 isdir = true; 1793 nlen--; 1794 } 1795 name = new byte[nlen + 1]; 1796 System.arraycopy(cen, pos + CENHDR, name, 1, nlen); 1797 name[0] = '/'; 1798 name(name); 1799 this.pos = pos; 1800 } 1801 1802 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 1803 1804 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1805 IndexNode key = cachedKey.get(); 1806 if (key == null) { 1807 key = new IndexNode(name, -1); 1808 cachedKey.set(key); 1809 } 1810 return key.as(name); 1811 } 1812 1813 final void name(byte[] name) { 1814 this.name = name; 1815 this.hashcode = Arrays.hashCode(name); 1816 } 1817 1818 final IndexNode as(byte[] name) { // reuse the node, mostly 1819 name(name); // as a lookup "key" 1820 return this; 1821 } 1822 1823 boolean isDir() { 1824 return isdir; 1825 } 1826 1827 public boolean equals(Object other) { 1828 if (!(other instanceof IndexNode)) { 1829 return false; 1830 } 1831 if (other instanceof ParentLookup) { 1832 return ((ParentLookup)other).equals(this); 1833 } 1834 return Arrays.equals(name, ((IndexNode)other).name); 1835 } 1836 1837 public int hashCode() { 1838 return hashcode; 1839 } 1840 1841 IndexNode() {} 1842 IndexNode sibling; 1843 IndexNode child; // 1st child 1844 } 1845 1846 static class Entry extends IndexNode implements ZipFileAttributes { 1847 1848 static final int CEN = 1; // entry read from cen 1849 static final int NEW = 2; // updated contents in bytes or file 1850 static final int FILECH = 3; // fch update in "file" 1851 static final int COPY = 4; // copy of a CEN entry 1852 1853 byte[] bytes; // updated content bytes 1854 Path file; // use tmp file to store bytes; 1855 int type = CEN; // default is the entry read from cen 1856 1857 // entry attributes 1858 int version; 1859 int flag; 1860 int method = -1; // compression method 1861 long mtime = -1; // last modification time (in DOS time) 1862 long atime = -1; // last access time 1863 long ctime = -1; // create time 1864 long crc = -1; // crc-32 of entry data 1865 long csize = -1; // compressed size of entry data 1866 long size = -1; // uncompressed size of entry data 1867 byte[] extra; 1868 1869 // cen 1870 1871 // these fields are not used by anyone and writeCEN uses "0" 1872 // int versionMade; 1873 // int disk; 1874 // int attrs; 1875 // long attrsEx; 1876 long locoff; 1877 byte[] comment; 1878 1879 Entry() {} 1880 1881 Entry(byte[] name, boolean isdir) { 1882 name(name); 1883 this.isdir = isdir; 1884 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 1885 this.crc = 0; 1886 this.size = 0; 1887 this.csize = 0; 1888 this.method = METHOD_DEFLATED; 1889 } 1890 1891 Entry(byte[] name, int type, boolean isdir) { 1892 this(name, isdir); 1893 this.type = type; 1894 } 1895 1896 Entry (Entry e, int type) { 1897 name(e.name); 1898 this.isdir = e.isdir; 1899 this.version = e.version; 1900 this.ctime = e.ctime; 1901 this.atime = e.atime; 1902 this.mtime = e.mtime; 1903 this.crc = e.crc; 1904 this.size = e.size; 1905 this.csize = e.csize; 1906 this.method = e.method; 1907 this.extra = e.extra; 1908 /* 1909 this.versionMade = e.versionMade; 1910 this.disk = e.disk; 1911 this.attrs = e.attrs; 1912 this.attrsEx = e.attrsEx; 1913 */ 1914 this.locoff = e.locoff; 1915 this.comment = e.comment; 1916 this.type = type; 1917 } 1918 1919 Entry (byte[] name, Path file, int type) { 1920 this(name, type, false); 1921 this.file = file; 1922 this.method = METHOD_STORED; 1923 } 1924 1925 int version() throws ZipException { 1926 if (method == METHOD_DEFLATED) 1927 return 20; 1928 else if (method == METHOD_STORED) 1929 return 10; 1930 throw new ZipException("unsupported compression method"); 1931 } 1932 1933 ///////////////////// CEN ////////////////////// 1934 static Entry readCEN(ZipFileSystem zipfs, IndexNode inode) 1935 throws IOException 1936 { 1937 return new Entry().cen(zipfs, inode); 1938 } 1939 1940 private Entry cen(ZipFileSystem zipfs, IndexNode inode) 1941 throws IOException 1942 { 1943 byte[] cen = zipfs.cen; 1944 int pos = inode.pos; 1945 if (!cenSigAt(cen, pos)) 1946 zerror("invalid CEN header (bad signature)"); 1947 version = CENVER(cen, pos); 1948 flag = CENFLG(cen, pos); 1949 method = CENHOW(cen, pos); 1950 mtime = dosToJavaTime(CENTIM(cen, pos)); 1951 crc = CENCRC(cen, pos); 1952 csize = CENSIZ(cen, pos); 1953 size = CENLEN(cen, pos); 1954 int nlen = CENNAM(cen, pos); 1955 int elen = CENEXT(cen, pos); 1956 int clen = CENCOM(cen, pos); 1957 /* 1958 versionMade = CENVEM(cen, pos); 1959 disk = CENDSK(cen, pos); 1960 attrs = CENATT(cen, pos); 1961 attrsEx = CENATX(cen, pos); 1962 */ 1963 locoff = CENOFF(cen, pos); 1964 pos += CENHDR; 1965 this.name = inode.name; 1966 this.isdir = inode.isdir; 1967 this.hashcode = inode.hashcode; 1968 1969 pos += nlen; 1970 if (elen > 0) { 1971 extra = Arrays.copyOfRange(cen, pos, pos + elen); 1972 pos += elen; 1973 readExtra(zipfs); 1974 } 1975 if (clen > 0) { 1976 comment = Arrays.copyOfRange(cen, pos, pos + clen); 1977 } 1978 return this; 1979 } 1980 1981 int writeCEN(OutputStream os) throws IOException 1982 { 1983 int written = CENHDR; 1984 int version0 = version(); 1985 long csize0 = csize; 1986 long size0 = size; 1987 long locoff0 = locoff; 1988 int elen64 = 0; // extra for ZIP64 1989 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 1990 int elenEXTT = 0; // extra for Extended Timestamp 1991 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 1992 1993 byte[] zname = isdir ? toDirectoryPath(name) : name; 1994 1995 // confirm size/length 1996 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 1997 int elen = (extra != null) ? extra.length : 0; 1998 int eoff = 0; 1999 int clen = (comment != null) ? comment.length : 0; 2000 if (csize >= ZIP64_MINVAL) { 2001 csize0 = ZIP64_MINVAL; 2002 elen64 += 8; // csize(8) 2003 } 2004 if (size >= ZIP64_MINVAL) { 2005 size0 = ZIP64_MINVAL; // size(8) 2006 elen64 += 8; 2007 } 2008 if (locoff >= ZIP64_MINVAL) { 2009 locoff0 = ZIP64_MINVAL; 2010 elen64 += 8; // offset(8) 2011 } 2012 if (elen64 != 0) { 2013 elen64 += 4; // header and data sz 4 bytes 2014 } 2015 while (eoff + 4 < elen) { 2016 int tag = SH(extra, eoff); 2017 int sz = SH(extra, eoff + 2); 2018 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2019 foundExtraTime = true; 2020 } 2021 eoff += (4 + sz); 2022 } 2023 if (!foundExtraTime) { 2024 if (isWindows) { // use NTFS 2025 elenNTFS = 36; // total 36 bytes 2026 } else { // Extended Timestamp otherwise 2027 elenEXTT = 9; // only mtime in cen 2028 } 2029 } 2030 writeInt(os, CENSIG); // CEN header signature 2031 if (elen64 != 0) { 2032 writeShort(os, 45); // ver 4.5 for zip64 2033 writeShort(os, 45); 2034 } else { 2035 writeShort(os, version0); // version made by 2036 writeShort(os, version0); // version needed to extract 2037 } 2038 writeShort(os, flag); // general purpose bit flag 2039 writeShort(os, method); // compression method 2040 // last modification time 2041 writeInt(os, (int)javaToDosTime(mtime)); 2042 writeInt(os, crc); // crc-32 2043 writeInt(os, csize0); // compressed size 2044 writeInt(os, size0); // uncompressed size 2045 writeShort(os, nlen); 2046 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2047 2048 if (comment != null) { 2049 writeShort(os, Math.min(clen, 0xffff)); 2050 } else { 2051 writeShort(os, 0); 2052 } 2053 writeShort(os, 0); // starting disk number 2054 writeShort(os, 0); // internal file attributes (unused) 2055 writeInt(os, 0); // external file attributes (unused) 2056 writeInt(os, locoff0); // relative offset of local header 2057 writeBytes(os, zname, 1, nlen); 2058 if (elen64 != 0) { 2059 writeShort(os, EXTID_ZIP64);// Zip64 extra 2060 writeShort(os, elen64 - 4); // size of "this" extra block 2061 if (size0 == ZIP64_MINVAL) 2062 writeLong(os, size); 2063 if (csize0 == ZIP64_MINVAL) 2064 writeLong(os, csize); 2065 if (locoff0 == ZIP64_MINVAL) 2066 writeLong(os, locoff); 2067 } 2068 if (elenNTFS != 0) { 2069 writeShort(os, EXTID_NTFS); 2070 writeShort(os, elenNTFS - 4); 2071 writeInt(os, 0); // reserved 2072 writeShort(os, 0x0001); // NTFS attr tag 2073 writeShort(os, 24); 2074 writeLong(os, javaToWinTime(mtime)); 2075 writeLong(os, javaToWinTime(atime)); 2076 writeLong(os, javaToWinTime(ctime)); 2077 } 2078 if (elenEXTT != 0) { 2079 writeShort(os, EXTID_EXTT); 2080 writeShort(os, elenEXTT - 4); 2081 if (ctime == -1) 2082 os.write(0x3); // mtime and atime 2083 else 2084 os.write(0x7); // mtime, atime and ctime 2085 writeInt(os, javaToUnixTime(mtime)); 2086 } 2087 if (extra != null) // whatever not recognized 2088 writeBytes(os, extra); 2089 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2090 writeBytes(os, comment); 2091 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2092 } 2093 2094 ///////////////////// LOC ////////////////////// 2095 2096 int writeLOC(OutputStream os) throws IOException { 2097 writeInt(os, LOCSIG); // LOC header signature 2098 int version = version(); 2099 2100 byte[] zname = isdir ? toDirectoryPath(name) : name; 2101 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2102 int elen = (extra != null) ? extra.length : 0; 2103 boolean foundExtraTime = false; // if extra timestamp present 2104 int eoff = 0; 2105 int elen64 = 0; 2106 int elenEXTT = 0; 2107 int elenNTFS = 0; 2108 if ((flag & FLAG_DATADESCR) != 0) { 2109 writeShort(os, version()); // version needed to extract 2110 writeShort(os, flag); // general purpose bit flag 2111 writeShort(os, method); // compression method 2112 // last modification time 2113 writeInt(os, (int)javaToDosTime(mtime)); 2114 // store size, uncompressed size, and crc-32 in data descriptor 2115 // immediately following compressed entry data 2116 writeInt(os, 0); 2117 writeInt(os, 0); 2118 writeInt(os, 0); 2119 } else { 2120 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2121 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2122 writeShort(os, 45); // ver 4.5 for zip64 2123 } else { 2124 writeShort(os, version()); // version needed to extract 2125 } 2126 writeShort(os, flag); // general purpose bit flag 2127 writeShort(os, method); // compression method 2128 // last modification time 2129 writeInt(os, (int)javaToDosTime(mtime)); 2130 writeInt(os, crc); // crc-32 2131 if (elen64 != 0) { 2132 writeInt(os, ZIP64_MINVAL); 2133 writeInt(os, ZIP64_MINVAL); 2134 } else { 2135 writeInt(os, csize); // compressed size 2136 writeInt(os, size); // uncompressed size 2137 } 2138 } 2139 while (eoff + 4 < elen) { 2140 int tag = SH(extra, eoff); 2141 int sz = SH(extra, eoff + 2); 2142 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2143 foundExtraTime = true; 2144 } 2145 eoff += (4 + sz); 2146 } 2147 if (!foundExtraTime) { 2148 if (isWindows) { 2149 elenNTFS = 36; // NTFS, total 36 bytes 2150 } else { // on unix use "ext time" 2151 elenEXTT = 9; 2152 if (atime != -1) 2153 elenEXTT += 4; 2154 if (ctime != -1) 2155 elenEXTT += 4; 2156 } 2157 } 2158 writeShort(os, nlen); 2159 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2160 writeBytes(os, zname, 1, nlen); 2161 if (elen64 != 0) { 2162 writeShort(os, EXTID_ZIP64); 2163 writeShort(os, 16); 2164 writeLong(os, size); 2165 writeLong(os, csize); 2166 } 2167 if (elenNTFS != 0) { 2168 writeShort(os, EXTID_NTFS); 2169 writeShort(os, elenNTFS - 4); 2170 writeInt(os, 0); // reserved 2171 writeShort(os, 0x0001); // NTFS attr tag 2172 writeShort(os, 24); 2173 writeLong(os, javaToWinTime(mtime)); 2174 writeLong(os, javaToWinTime(atime)); 2175 writeLong(os, javaToWinTime(ctime)); 2176 } 2177 if (elenEXTT != 0) { 2178 writeShort(os, EXTID_EXTT); 2179 writeShort(os, elenEXTT - 4);// size for the folowing data block 2180 int fbyte = 0x1; 2181 if (atime != -1) // mtime and atime 2182 fbyte |= 0x2; 2183 if (ctime != -1) // mtime, atime and ctime 2184 fbyte |= 0x4; 2185 os.write(fbyte); // flags byte 2186 writeInt(os, javaToUnixTime(mtime)); 2187 if (atime != -1) 2188 writeInt(os, javaToUnixTime(atime)); 2189 if (ctime != -1) 2190 writeInt(os, javaToUnixTime(ctime)); 2191 } 2192 if (extra != null) { 2193 writeBytes(os, extra); 2194 } 2195 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2196 } 2197 2198 // Data Descriptior 2199 int writeEXT(OutputStream os) throws IOException { 2200 writeInt(os, EXTSIG); // EXT header signature 2201 writeInt(os, crc); // crc-32 2202 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2203 writeLong(os, csize); 2204 writeLong(os, size); 2205 return 24; 2206 } else { 2207 writeInt(os, csize); // compressed size 2208 writeInt(os, size); // uncompressed size 2209 return 16; 2210 } 2211 } 2212 2213 // read NTFS, UNIX and ZIP64 data from cen.extra 2214 void readExtra(ZipFileSystem zipfs) throws IOException { 2215 if (extra == null) 2216 return; 2217 int elen = extra.length; 2218 int off = 0; 2219 int newOff = 0; 2220 while (off + 4 < elen) { 2221 // extra spec: HeaderID+DataSize+Data 2222 int pos = off; 2223 int tag = SH(extra, pos); 2224 int sz = SH(extra, pos + 2); 2225 pos += 4; 2226 if (pos + sz > elen) // invalid data 2227 break; 2228 switch (tag) { 2229 case EXTID_ZIP64 : 2230 if (size == ZIP64_MINVAL) { 2231 if (pos + 8 > elen) // invalid zip64 extra 2232 break; // fields, just skip 2233 size = LL(extra, pos); 2234 pos += 8; 2235 } 2236 if (csize == ZIP64_MINVAL) { 2237 if (pos + 8 > elen) 2238 break; 2239 csize = LL(extra, pos); 2240 pos += 8; 2241 } 2242 if (locoff == ZIP64_MINVAL) { 2243 if (pos + 8 > elen) 2244 break; 2245 locoff = LL(extra, pos); 2246 pos += 8; 2247 } 2248 break; 2249 case EXTID_NTFS: 2250 if (sz < 32) 2251 break; 2252 pos += 4; // reserved 4 bytes 2253 if (SH(extra, pos) != 0x0001) 2254 break; 2255 if (SH(extra, pos + 2) != 24) 2256 break; 2257 // override the loc field, datatime here is 2258 // more "accurate" 2259 mtime = winToJavaTime(LL(extra, pos + 4)); 2260 atime = winToJavaTime(LL(extra, pos + 12)); 2261 ctime = winToJavaTime(LL(extra, pos + 20)); 2262 break; 2263 case EXTID_EXTT: 2264 // spec says the Extened timestamp in cen only has mtime 2265 // need to read the loc to get the extra a/ctime, if flag 2266 // "zipinfo-time" is not specified to false; 2267 // there is performance cost (move up to loc and read) to 2268 // access the loc table foreach entry; 2269 if (zipfs.noExtt) { 2270 if (sz == 5) 2271 mtime = unixToJavaTime(LG(extra, pos + 1)); 2272 break; 2273 } 2274 byte[] buf = new byte[LOCHDR]; 2275 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2276 != buf.length) 2277 throw new ZipException("loc: reading failed"); 2278 if (!locSigAt(buf, 0)) 2279 throw new ZipException("loc: wrong sig ->" 2280 + Long.toString(getSig(buf, 0), 16)); 2281 int locElen = LOCEXT(buf); 2282 if (locElen < 9) // EXTT is at lease 9 bytes 2283 break; 2284 int locNlen = LOCNAM(buf); 2285 buf = new byte[locElen]; 2286 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2287 != buf.length) 2288 throw new ZipException("loc extra: reading failed"); 2289 int locPos = 0; 2290 while (locPos + 4 < buf.length) { 2291 int locTag = SH(buf, locPos); 2292 int locSZ = SH(buf, locPos + 2); 2293 locPos += 4; 2294 if (locTag != EXTID_EXTT) { 2295 locPos += locSZ; 2296 continue; 2297 } 2298 int end = locPos + locSZ - 4; 2299 int flag = CH(buf, locPos++); 2300 if ((flag & 0x1) != 0 && locPos <= end) { 2301 mtime = unixToJavaTime(LG(buf, locPos)); 2302 locPos += 4; 2303 } 2304 if ((flag & 0x2) != 0 && locPos <= end) { 2305 atime = unixToJavaTime(LG(buf, locPos)); 2306 locPos += 4; 2307 } 2308 if ((flag & 0x4) != 0 && locPos <= end) { 2309 ctime = unixToJavaTime(LG(buf, locPos)); 2310 locPos += 4; 2311 } 2312 break; 2313 } 2314 break; 2315 default: // unknown tag 2316 System.arraycopy(extra, off, extra, newOff, sz + 4); 2317 newOff += (sz + 4); 2318 } 2319 off += (sz + 4); 2320 } 2321 if (newOff != 0 && newOff != extra.length) 2322 extra = Arrays.copyOf(extra, newOff); 2323 else 2324 extra = null; 2325 } 2326 2327 ///////// basic file attributes /////////// 2328 @Override 2329 public FileTime creationTime() { 2330 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 2331 } 2332 2333 @Override 2334 public boolean isDirectory() { 2335 return isDir(); 2336 } 2337 2338 @Override 2339 public boolean isOther() { 2340 return false; 2341 } 2342 2343 @Override 2344 public boolean isRegularFile() { 2345 return !isDir(); 2346 } 2347 2348 @Override 2349 public FileTime lastAccessTime() { 2350 return FileTime.fromMillis(atime == -1 ? mtime : atime); 2351 } 2352 2353 @Override 2354 public FileTime lastModifiedTime() { 2355 return FileTime.fromMillis(mtime); 2356 } 2357 2358 @Override 2359 public long size() { 2360 return size; 2361 } 2362 2363 @Override 2364 public boolean isSymbolicLink() { 2365 return false; 2366 } 2367 2368 @Override 2369 public Object fileKey() { 2370 return null; 2371 } 2372 2373 ///////// zip entry attributes /////////// 2374 public long compressedSize() { 2375 return csize; 2376 } 2377 2378 public long crc() { 2379 return crc; 2380 } 2381 2382 public int method() { 2383 return method; 2384 } 2385 2386 public byte[] extra() { 2387 if (extra != null) 2388 return Arrays.copyOf(extra, extra.length); 2389 return null; 2390 } 2391 2392 public byte[] comment() { 2393 if (comment != null) 2394 return Arrays.copyOf(comment, comment.length); 2395 return null; 2396 } 2397 2398 public String toString() { 2399 StringBuilder sb = new StringBuilder(1024); 2400 Formatter fm = new Formatter(sb); 2401 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2402 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2403 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2404 fm.format(" isRegularFile : %b%n", isRegularFile()); 2405 fm.format(" isDirectory : %b%n", isDirectory()); 2406 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2407 fm.format(" isOther : %b%n", isOther()); 2408 fm.format(" fileKey : %s%n", fileKey()); 2409 fm.format(" size : %d%n", size()); 2410 fm.format(" compressedSize : %d%n", compressedSize()); 2411 fm.format(" crc : %x%n", crc()); 2412 fm.format(" method : %d%n", method()); 2413 fm.close(); 2414 return sb.toString(); 2415 } 2416 } 2417 2418 private static class ExChannelCloser { 2419 Path path; 2420 SeekableByteChannel ch; 2421 Set<InputStream> streams; 2422 ExChannelCloser(Path path, 2423 SeekableByteChannel ch, 2424 Set<InputStream> streams) 2425 { 2426 this.path = path; 2427 this.ch = ch; 2428 this.streams = streams; 2429 } 2430 } 2431 2432 // ZIP directory has two issues: 2433 // (1) ZIP spec does not require the ZIP file to include 2434 // directory entry 2435 // (2) all entries are not stored/organized in a "tree" 2436 // structure. 2437 // A possible solution is to build the node tree ourself as 2438 // implemented below. 2439 private IndexNode root; 2440 2441 // default time stamp for pseudo entries 2442 private long zfsDefaultTimeStamp = System.currentTimeMillis(); 2443 2444 private void removeFromTree(IndexNode inode) { 2445 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2446 IndexNode child = parent.child; 2447 if (child.equals(inode)) { 2448 parent.child = child.sibling; 2449 } else { 2450 IndexNode last = child; 2451 while ((child = child.sibling) != null) { 2452 if (child.equals(inode)) { 2453 last.sibling = child.sibling; 2454 break; 2455 } else { 2456 last = child; 2457 } 2458 } 2459 } 2460 } 2461 2462 // purely for parent lookup, so we don't have to copy the parent 2463 // name every time 2464 static class ParentLookup extends IndexNode { 2465 int len; 2466 ParentLookup() {} 2467 2468 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 2469 name(name, len); 2470 return this; 2471 } 2472 2473 void name(byte[] name, int len) { 2474 this.name = name; 2475 this.len = len; 2476 // calculate the hashcode the same way as Arrays.hashCode() does 2477 int result = 1; 2478 for (int i = 0; i < len; i++) 2479 result = 31 * result + name[i]; 2480 this.hashcode = result; 2481 } 2482 2483 @Override 2484 public boolean equals(Object other) { 2485 if (!(other instanceof IndexNode)) { 2486 return false; 2487 } 2488 byte[] oname = ((IndexNode)other).name; 2489 return Arrays.equals(name, 0, len, 2490 oname, 0, oname.length); 2491 } 2492 2493 } 2494 2495 private void buildNodeTree() throws IOException { 2496 beginWrite(); 2497 try { 2498 IndexNode root = new IndexNode(ROOTPATH, true); 2499 IndexNode[] nodes = inodes.keySet().toArray(new IndexNode[0]); 2500 inodes.put(root, root); 2501 ParentLookup lookup = new ParentLookup(); 2502 for (IndexNode node : nodes) { 2503 IndexNode parent; 2504 while (true) { 2505 int off = getParentOff(node.name); 2506 if (off <= 1) { // parent is root 2507 node.sibling = root.child; 2508 root.child = node; 2509 break; 2510 } 2511 lookup = lookup.as(node.name, off); 2512 if (inodes.containsKey(lookup)) { 2513 parent = inodes.get(lookup); 2514 node.sibling = parent.child; 2515 parent.child = node; 2516 break; 2517 } 2518 // add new pseudo directory entry 2519 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 2520 inodes.put(parent, parent); 2521 node.sibling = parent.child; 2522 parent.child = node; 2523 node = parent; 2524 } 2525 } 2526 } finally { 2527 endWrite(); 2528 } 2529 } 2530 }