1 /* 2 * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.File; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.nio.ByteBuffer; 37 import java.nio.MappedByteBuffer; 38 import java.nio.channels.*; 39 import java.nio.file.*; 40 import java.nio.file.attribute.*; 41 import java.nio.file.spi.*; 42 import java.security.AccessController; 43 import java.security.PrivilegedAction; 44 import java.util.*; 45 import java.util.concurrent.locks.ReadWriteLock; 46 import java.util.concurrent.locks.ReentrantReadWriteLock; 47 import java.util.regex.Pattern; 48 import java.util.zip.CRC32; 49 import java.util.zip.Inflater; 50 import java.util.zip.Deflater; 51 import java.util.zip.InflaterInputStream; 52 import java.util.zip.DeflaterOutputStream; 53 import java.util.zip.ZipException; 54 import java.util.zip.ZipError; 55 import static java.lang.Boolean.*; 56 import static jdk.nio.zipfs.ZipConstants.*; 57 import static jdk.nio.zipfs.ZipUtils.*; 58 import static java.nio.file.StandardOpenOption.*; 59 import static java.nio.file.StandardCopyOption.*; 60 61 /** 62 * A FileSystem built on a zip file 63 * 64 * @author Xueming Shen 65 */ 66 67 class ZipFileSystem extends FileSystem { 68 69 private final ZipFileSystemProvider provider; 70 private final ZipPath defaultdir; 71 private boolean readOnly = false; 72 private final Path zfpath; 73 private final ZipCoder zc; 74 75 // configurable by env map 76 private final String defaultDir; // default dir for the file system 77 private final String nameEncoding; // default encoding for name/comment 78 private final boolean useTempFile; // use a temp file for newOS, default 79 // is to use BAOS for better performance 80 private final boolean createNew; // create a new zip if not exists 81 private static final boolean isWindows = AccessController.doPrivileged( 82 (PrivilegedAction<Boolean>) () -> System.getProperty("os.name") 83 .startsWith("Windows")); 84 85 ZipFileSystem(ZipFileSystemProvider provider, 86 Path zfpath, 87 Map<String, ?> env) 88 throws IOException 89 { 90 // configurable env setup 91 this.createNew = "true".equals(env.get("create")); 92 this.nameEncoding = env.containsKey("encoding") ? 93 (String)env.get("encoding") : "UTF-8"; 94 this.useTempFile = TRUE.equals(env.get("useTempFile")); 95 this.defaultDir = env.containsKey("default.dir") ? 96 (String)env.get("default.dir") : "/"; 97 if (this.defaultDir.charAt(0) != '/') 98 throw new IllegalArgumentException("default dir should be absolute"); 99 100 this.provider = provider; 101 this.zfpath = zfpath; 102 if (Files.notExists(zfpath)) { 103 if (createNew) { 104 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 105 new END().write(os, 0); 106 } 107 } else { 108 throw new FileSystemNotFoundException(zfpath.toString()); 109 } 110 } 111 // sm and existence check 112 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 113 if (!Files.isWritable(zfpath)) 114 this.readOnly = true; 115 this.zc = ZipCoder.get(nameEncoding); 116 this.defaultdir = new ZipPath(this, getBytes(defaultDir)); 117 this.ch = Files.newByteChannel(zfpath, READ); 118 this.cen = initCEN(); 119 } 120 121 @Override 122 public FileSystemProvider provider() { 123 return provider; 124 } 125 126 @Override 127 public String getSeparator() { 128 return "/"; 129 } 130 131 @Override 132 public boolean isOpen() { 133 return isOpen; 134 } 135 136 @Override 137 public boolean isReadOnly() { 138 return readOnly; 139 } 140 141 private void checkWritable() throws IOException { 142 if (readOnly) 143 throw new ReadOnlyFileSystemException(); 144 } 145 146 @Override 147 public Iterable<Path> getRootDirectories() { 148 ArrayList<Path> pathArr = new ArrayList<>(); 149 pathArr.add(new ZipPath(this, new byte[]{'/'})); 150 return pathArr; 151 } 152 153 ZipPath getDefaultDir() { // package private 154 return defaultdir; 155 } 156 157 @Override 158 public ZipPath getPath(String first, String... more) { 159 String path; 160 if (more.length == 0) { 161 path = first; 162 } else { 163 StringBuilder sb = new StringBuilder(); 164 sb.append(first); 165 for (String segment: more) { 166 if (segment.length() > 0) { 167 if (sb.length() > 0) 168 sb.append('/'); 169 sb.append(segment); 170 } 171 } 172 path = sb.toString(); 173 } 174 return new ZipPath(this, getBytes(path)); 175 } 176 177 @Override 178 public UserPrincipalLookupService getUserPrincipalLookupService() { 179 throw new UnsupportedOperationException(); 180 } 181 182 @Override 183 public WatchService newWatchService() { 184 throw new UnsupportedOperationException(); 185 } 186 187 FileStore getFileStore(ZipPath path) { 188 return new ZipFileStore(path); 189 } 190 191 @Override 192 public Iterable<FileStore> getFileStores() { 193 ArrayList<FileStore> list = new ArrayList<>(1); 194 list.add(new ZipFileStore(new ZipPath(this, new byte[]{'/'}))); 195 return list; 196 } 197 198 private static final Set<String> supportedFileAttributeViews = 199 Collections.unmodifiableSet( 200 new HashSet<String>(Arrays.asList("basic", "zip"))); 201 202 @Override 203 public Set<String> supportedFileAttributeViews() { 204 return supportedFileAttributeViews; 205 } 206 207 @Override 208 public String toString() { 209 return zfpath.toString(); 210 } 211 212 Path getZipFile() { 213 return zfpath; 214 } 215 216 private static final String GLOB_SYNTAX = "glob"; 217 private static final String REGEX_SYNTAX = "regex"; 218 219 @Override 220 public PathMatcher getPathMatcher(String syntaxAndInput) { 221 int pos = syntaxAndInput.indexOf(':'); 222 if (pos <= 0 || pos == syntaxAndInput.length()) { 223 throw new IllegalArgumentException(); 224 } 225 String syntax = syntaxAndInput.substring(0, pos); 226 String input = syntaxAndInput.substring(pos + 1); 227 String expr; 228 if (syntax.equals(GLOB_SYNTAX)) { 229 expr = toRegexPattern(input); 230 } else { 231 if (syntax.equals(REGEX_SYNTAX)) { 232 expr = input; 233 } else { 234 throw new UnsupportedOperationException("Syntax '" + syntax + 235 "' not recognized"); 236 } 237 } 238 // return matcher 239 final Pattern pattern = Pattern.compile(expr); 240 return new PathMatcher() { 241 @Override 242 public boolean matches(Path path) { 243 return pattern.matcher(path.toString()).matches(); 244 } 245 }; 246 } 247 248 @Override 249 public void close() throws IOException { 250 beginWrite(); 251 try { 252 if (!isOpen) 253 return; 254 isOpen = false; // set closed 255 } finally { 256 endWrite(); 257 } 258 if (!streams.isEmpty()) { // unlock and close all remaining streams 259 Set<InputStream> copy = new HashSet<>(streams); 260 for (InputStream is: copy) 261 is.close(); 262 } 263 beginWrite(); // lock and sync 264 try { 265 sync(); 266 ch.close(); // close the ch just in case no update 267 } finally { // and sync dose not close the ch 268 endWrite(); 269 } 270 271 synchronized (inflaters) { 272 for (Inflater inf : inflaters) 273 inf.end(); 274 } 275 synchronized (deflaters) { 276 for (Deflater def : deflaters) 277 def.end(); 278 } 279 280 IOException ioe = null; 281 synchronized (tmppaths) { 282 for (Path p: tmppaths) { 283 try { 284 Files.deleteIfExists(p); 285 } catch (IOException x) { 286 if (ioe == null) 287 ioe = x; 288 else 289 ioe.addSuppressed(x); 290 } 291 } 292 } 293 provider.removeFileSystem(zfpath, this); 294 if (ioe != null) 295 throw ioe; 296 } 297 298 ZipFileAttributes getFileAttributes(byte[] path) 299 throws IOException 300 { 301 Entry e; 302 beginRead(); 303 try { 304 ensureOpen(); 305 e = getEntry0(path); 306 if (e == null) { 307 IndexNode inode = getInode(path); 308 if (inode == null) 309 return null; 310 e = new Entry(inode.name); // pseudo directory 311 e.method = METHOD_STORED; // STORED for dir 312 e.mtime = e.atime = e.ctime = -1;// -1 for all times 313 } 314 } finally { 315 endRead(); 316 } 317 return new ZipFileAttributes(e); 318 } 319 320 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 321 throws IOException 322 { 323 checkWritable(); 324 beginWrite(); 325 try { 326 ensureOpen(); 327 Entry e = getEntry0(path); // ensureOpen checked 328 if (e == null) 329 throw new NoSuchFileException(getString(path)); 330 if (e.type == Entry.CEN) 331 e.type = Entry.COPY; // copy e 332 if (mtime != null) 333 e.mtime = mtime.toMillis(); 334 if (atime != null) 335 e.atime = atime.toMillis(); 336 if (ctime != null) 337 e.ctime = ctime.toMillis(); 338 update(e); 339 } finally { 340 endWrite(); 341 } 342 } 343 344 boolean exists(byte[] path) 345 throws IOException 346 { 347 beginRead(); 348 try { 349 ensureOpen(); 350 return getInode(path) != null; 351 } finally { 352 endRead(); 353 } 354 } 355 356 boolean isDirectory(byte[] path) 357 throws IOException 358 { 359 beginRead(); 360 try { 361 IndexNode n = getInode(path); 362 return n != null && n.isDir(); 363 } finally { 364 endRead(); 365 } 366 } 367 368 private ZipPath toZipPath(byte[] path) { 369 // make it absolute 370 byte[] p = new byte[path.length + 1]; 371 p[0] = '/'; 372 System.arraycopy(path, 0, p, 1, path.length); 373 return new ZipPath(this, p); 374 } 375 376 // returns the list of child paths of "path" 377 Iterator<Path> iteratorOf(byte[] path, 378 DirectoryStream.Filter<? super Path> filter) 379 throws IOException 380 { 381 beginWrite(); // iteration of inodes needs exclusive lock 382 try { 383 ensureOpen(); 384 IndexNode inode = getInode(path); 385 if (inode == null) 386 throw new NotDirectoryException(getString(path)); 387 List<Path> list = new ArrayList<>(); 388 IndexNode child = inode.child; 389 while (child != null) { 390 ZipPath zp = toZipPath(child.name); 391 if (filter == null || filter.accept(zp)) 392 list.add(zp); 393 child = child.sibling; 394 } 395 return list.iterator(); 396 } finally { 397 endWrite(); 398 } 399 } 400 401 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 402 throws IOException 403 { 404 checkWritable(); 405 dir = toDirectoryPath(dir); 406 beginWrite(); 407 try { 408 ensureOpen(); 409 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 410 throw new FileAlreadyExistsException(getString(dir)); 411 checkParents(dir); 412 Entry e = new Entry(dir, Entry.NEW); 413 e.method = METHOD_STORED; // STORED for dir 414 update(e); 415 } finally { 416 endWrite(); 417 } 418 } 419 420 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 421 throws IOException 422 { 423 checkWritable(); 424 if (Arrays.equals(src, dst)) 425 return; // do nothing, src and dst are the same 426 427 beginWrite(); 428 try { 429 ensureOpen(); 430 Entry eSrc = getEntry0(src); // ensureOpen checked 431 if (eSrc == null) 432 throw new NoSuchFileException(getString(src)); 433 if (eSrc.isDir()) { // spec says to create dst dir 434 createDirectory(dst); 435 return; 436 } 437 boolean hasReplace = false; 438 boolean hasCopyAttrs = false; 439 for (CopyOption opt : options) { 440 if (opt == REPLACE_EXISTING) 441 hasReplace = true; 442 else if (opt == COPY_ATTRIBUTES) 443 hasCopyAttrs = true; 444 } 445 Entry eDst = getEntry0(dst); 446 if (eDst != null) { 447 if (!hasReplace) 448 throw new FileAlreadyExistsException(getString(dst)); 449 } else { 450 checkParents(dst); 451 } 452 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 453 u.name(dst); // change name 454 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 455 { 456 u.type = eSrc.type; // make it the same type 457 if (deletesrc) { // if it's a "rename", take the data 458 u.bytes = eSrc.bytes; 459 u.file = eSrc.file; 460 } else { // if it's not "rename", copy the data 461 if (eSrc.bytes != null) 462 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 463 else if (eSrc.file != null) { 464 u.file = getTempPathForEntry(null); 465 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 466 } 467 } 468 } 469 if (!hasCopyAttrs) 470 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 471 update(u); 472 if (deletesrc) 473 updateDelete(eSrc); 474 } finally { 475 endWrite(); 476 } 477 } 478 479 // Returns an output stream for writing the contents into the specified 480 // entry. 481 OutputStream newOutputStream(byte[] path, OpenOption... options) 482 throws IOException 483 { 484 checkWritable(); 485 boolean hasCreateNew = false; 486 boolean hasCreate = false; 487 boolean hasAppend = false; 488 for (OpenOption opt: options) { 489 if (opt == READ) 490 throw new IllegalArgumentException("READ not allowed"); 491 if (opt == CREATE_NEW) 492 hasCreateNew = true; 493 if (opt == CREATE) 494 hasCreate = true; 495 if (opt == APPEND) 496 hasAppend = true; 497 } 498 beginRead(); // only need a readlock, the "update()" will 499 try { // try to obtain a writelock when the os is 500 ensureOpen(); // being closed. 501 Entry e = getEntry0(path); 502 if (e != null) { 503 if (e.isDir() || hasCreateNew) 504 throw new FileAlreadyExistsException(getString(path)); 505 if (hasAppend) { 506 InputStream is = getInputStream(e); 507 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 508 copyStream(is, os); 509 is.close(); 510 return os; 511 } 512 return getOutputStream(new Entry(e, Entry.NEW)); 513 } else { 514 if (!hasCreate && !hasCreateNew) 515 throw new NoSuchFileException(getString(path)); 516 checkParents(path); 517 return getOutputStream(new Entry(path, Entry.NEW)); 518 } 519 } finally { 520 endRead(); 521 } 522 } 523 524 // Returns an input stream for reading the contents of the specified 525 // file entry. 526 InputStream newInputStream(byte[] path) throws IOException { 527 beginRead(); 528 try { 529 ensureOpen(); 530 Entry e = getEntry0(path); 531 if (e == null) 532 throw new NoSuchFileException(getString(path)); 533 if (e.isDir()) 534 throw new FileSystemException(getString(path), "is a directory", null); 535 return getInputStream(e); 536 } finally { 537 endRead(); 538 } 539 } 540 541 private void checkOptions(Set<? extends OpenOption> options) { 542 // check for options of null type and option is an intance of StandardOpenOption 543 for (OpenOption option : options) { 544 if (option == null) 545 throw new NullPointerException(); 546 if (!(option instanceof StandardOpenOption)) 547 throw new IllegalArgumentException(); 548 } 549 } 550 551 // Returns a Writable/ReadByteChannel for now. Might consdier to use 552 // newFileChannel() instead, which dump the entry data into a regular 553 // file on the default file system and create a FileChannel on top of 554 // it. 555 SeekableByteChannel newByteChannel(byte[] path, 556 Set<? extends OpenOption> options, 557 FileAttribute<?>... attrs) 558 throws IOException 559 { 560 checkOptions(options); 561 if (options.contains(StandardOpenOption.WRITE) || 562 options.contains(StandardOpenOption.APPEND)) { 563 checkWritable(); 564 beginRead(); 565 try { 566 final WritableByteChannel wbc = Channels.newChannel( 567 newOutputStream(path, options.toArray(new OpenOption[0]))); 568 long leftover = 0; 569 if (options.contains(StandardOpenOption.APPEND)) { 570 Entry e = getEntry0(path); 571 if (e != null && e.size >= 0) 572 leftover = e.size; 573 } 574 final long offset = leftover; 575 return new SeekableByteChannel() { 576 long written = offset; 577 public boolean isOpen() { 578 return wbc.isOpen(); 579 } 580 581 public long position() throws IOException { 582 return written; 583 } 584 585 public SeekableByteChannel position(long pos) 586 throws IOException 587 { 588 throw new UnsupportedOperationException(); 589 } 590 591 public int read(ByteBuffer dst) throws IOException { 592 throw new UnsupportedOperationException(); 593 } 594 595 public SeekableByteChannel truncate(long size) 596 throws IOException 597 { 598 throw new UnsupportedOperationException(); 599 } 600 601 public int write(ByteBuffer src) throws IOException { 602 int n = wbc.write(src); 603 written += n; 604 return n; 605 } 606 607 public long size() throws IOException { 608 return written; 609 } 610 611 public void close() throws IOException { 612 wbc.close(); 613 } 614 }; 615 } finally { 616 endRead(); 617 } 618 } else { 619 beginRead(); 620 try { 621 ensureOpen(); 622 Entry e = getEntry0(path); 623 if (e == null || e.isDir()) 624 throw new NoSuchFileException(getString(path)); 625 final ReadableByteChannel rbc = 626 Channels.newChannel(getInputStream(e)); 627 final long size = e.size; 628 return new SeekableByteChannel() { 629 long read = 0; 630 public boolean isOpen() { 631 return rbc.isOpen(); 632 } 633 634 public long position() throws IOException { 635 return read; 636 } 637 638 public SeekableByteChannel position(long pos) 639 throws IOException 640 { 641 throw new UnsupportedOperationException(); 642 } 643 644 public int read(ByteBuffer dst) throws IOException { 645 int n = rbc.read(dst); 646 if (n > 0) { 647 read += n; 648 } 649 return n; 650 } 651 652 public SeekableByteChannel truncate(long size) 653 throws IOException 654 { 655 throw new NonWritableChannelException(); 656 } 657 658 public int write (ByteBuffer src) throws IOException { 659 throw new NonWritableChannelException(); 660 } 661 662 public long size() throws IOException { 663 return size; 664 } 665 666 public void close() throws IOException { 667 rbc.close(); 668 } 669 }; 670 } finally { 671 endRead(); 672 } 673 } 674 } 675 676 // Returns a FileChannel of the specified entry. 677 // 678 // This implementation creates a temporary file on the default file system, 679 // copy the entry data into it if the entry exists, and then create a 680 // FileChannel on top of it. 681 FileChannel newFileChannel(byte[] path, 682 Set<? extends OpenOption> options, 683 FileAttribute<?>... attrs) 684 throws IOException 685 { 686 checkOptions(options); 687 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 688 options.contains(StandardOpenOption.APPEND)); 689 beginRead(); 690 try { 691 ensureOpen(); 692 Entry e = getEntry0(path); 693 if (forWrite) { 694 checkWritable(); 695 if (e == null) { 696 if (!options.contains(StandardOpenOption.CREATE_NEW)) 697 throw new NoSuchFileException(getString(path)); 698 } else { 699 if (options.contains(StandardOpenOption.CREATE_NEW)) 700 throw new FileAlreadyExistsException(getString(path)); 701 if (e.isDir()) 702 throw new FileAlreadyExistsException("directory <" 703 + getString(path) + "> exists"); 704 } 705 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 706 } else if (e == null || e.isDir()) { 707 throw new NoSuchFileException(getString(path)); 708 } 709 710 final boolean isFCH = (e != null && e.type == Entry.FILECH); 711 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 712 final FileChannel fch = tmpfile.getFileSystem() 713 .provider() 714 .newFileChannel(tmpfile, options, attrs); 715 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH); 716 if (forWrite) { 717 u.flag = FLAG_DATADESCR; 718 u.method = METHOD_DEFLATED; 719 } 720 // is there a better way to hook into the FileChannel's close method? 721 return new FileChannel() { 722 public int write(ByteBuffer src) throws IOException { 723 return fch.write(src); 724 } 725 public long write(ByteBuffer[] srcs, int offset, int length) 726 throws IOException 727 { 728 return fch.write(srcs, offset, length); 729 } 730 public long position() throws IOException { 731 return fch.position(); 732 } 733 public FileChannel position(long newPosition) 734 throws IOException 735 { 736 fch.position(newPosition); 737 return this; 738 } 739 public long size() throws IOException { 740 return fch.size(); 741 } 742 public FileChannel truncate(long size) 743 throws IOException 744 { 745 fch.truncate(size); 746 return this; 747 } 748 public void force(boolean metaData) 749 throws IOException 750 { 751 fch.force(metaData); 752 } 753 public long transferTo(long position, long count, 754 WritableByteChannel target) 755 throws IOException 756 { 757 return fch.transferTo(position, count, target); 758 } 759 public long transferFrom(ReadableByteChannel src, 760 long position, long count) 761 throws IOException 762 { 763 return fch.transferFrom(src, position, count); 764 } 765 public int read(ByteBuffer dst) throws IOException { 766 return fch.read(dst); 767 } 768 public int read(ByteBuffer dst, long position) 769 throws IOException 770 { 771 return fch.read(dst, position); 772 } 773 public long read(ByteBuffer[] dsts, int offset, int length) 774 throws IOException 775 { 776 return fch.read(dsts, offset, length); 777 } 778 public int write(ByteBuffer src, long position) 779 throws IOException 780 { 781 return fch.write(src, position); 782 } 783 public MappedByteBuffer map(MapMode mode, 784 long position, long size) 785 throws IOException 786 { 787 throw new UnsupportedOperationException(); 788 } 789 public FileLock lock(long position, long size, boolean shared) 790 throws IOException 791 { 792 return fch.lock(position, size, shared); 793 } 794 public FileLock tryLock(long position, long size, boolean shared) 795 throws IOException 796 { 797 return fch.tryLock(position, size, shared); 798 } 799 protected void implCloseChannel() throws IOException { 800 fch.close(); 801 if (forWrite) { 802 u.mtime = System.currentTimeMillis(); 803 u.size = Files.size(u.file); 804 805 update(u); 806 } else { 807 if (!isFCH) // if this is a new fch for reading 808 removeTempPathForEntry(tmpfile); 809 } 810 } 811 }; 812 } finally { 813 endRead(); 814 } 815 } 816 817 // the outstanding input streams that need to be closed 818 private Set<InputStream> streams = 819 Collections.synchronizedSet(new HashSet<InputStream>()); 820 821 // the ex-channel and ex-path that need to close when their outstanding 822 // input streams are all closed by the obtainers. 823 private Set<ExChannelCloser> exChClosers = new HashSet<>(); 824 825 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 826 private Path getTempPathForEntry(byte[] path) throws IOException { 827 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 828 if (path != null) { 829 Entry e = getEntry0(path); 830 if (e != null) { 831 try (InputStream is = newInputStream(path)) { 832 Files.copy(is, tmpPath, REPLACE_EXISTING); 833 } 834 } 835 } 836 return tmpPath; 837 } 838 839 private void removeTempPathForEntry(Path path) throws IOException { 840 Files.delete(path); 841 tmppaths.remove(path); 842 } 843 844 // check if all parents really exit. ZIP spec does not require 845 // the existence of any "parent directory". 846 private void checkParents(byte[] path) throws IOException { 847 beginRead(); 848 try { 849 while ((path = getParent(path)) != null && path.length != 0) { 850 if (!inodes.containsKey(IndexNode.keyOf(path))) { 851 throw new NoSuchFileException(getString(path)); 852 } 853 } 854 } finally { 855 endRead(); 856 } 857 } 858 859 private static byte[] ROOTPATH = new byte[0]; 860 private static byte[] getParent(byte[] path) { 861 int off = path.length - 1; 862 if (off > 0 && path[off] == '/') // isDirectory 863 off--; 864 while (off > 0 && path[off] != '/') { off--; } 865 if (off <= 0) 866 return ROOTPATH; 867 return Arrays.copyOf(path, off + 1); 868 } 869 870 private final void beginWrite() { 871 rwlock.writeLock().lock(); 872 } 873 874 private final void endWrite() { 875 rwlock.writeLock().unlock(); 876 } 877 878 private final void beginRead() { 879 rwlock.readLock().lock(); 880 } 881 882 private final void endRead() { 883 rwlock.readLock().unlock(); 884 } 885 886 /////////////////////////////////////////////////////////////////// 887 888 private volatile boolean isOpen = true; 889 private final SeekableByteChannel ch; // channel to the zipfile 890 final byte[] cen; // CEN & ENDHDR 891 private END end; 892 private long locpos; // position of first LOC header (usually 0) 893 894 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 895 896 // name -> pos (in cen), IndexNode itself can be used as a "key" 897 private LinkedHashMap<IndexNode, IndexNode> inodes; 898 899 final byte[] getBytes(String name) { 900 return zc.getBytes(name); 901 } 902 903 final String getString(byte[] name) { 904 return zc.toString(name); 905 } 906 907 protected void finalize() throws IOException { 908 close(); 909 } 910 911 private long getDataPos(Entry e) throws IOException { 912 if (e.locoff == -1) { 913 Entry e2 = getEntry0(e.name); 914 if (e2 == null) 915 throw new ZipException("invalid loc for entry <" + e.name + ">"); 916 e.locoff = e2.locoff; 917 } 918 byte[] buf = new byte[LOCHDR]; 919 if (readFullyAt(buf, 0, buf.length, e.locoff) != buf.length) 920 throw new ZipException("invalid loc for entry <" + e.name + ">"); 921 return locpos + e.locoff + LOCHDR + LOCNAM(buf) + LOCEXT(buf); 922 } 923 924 // Reads len bytes of data from the specified offset into buf. 925 // Returns the total number of bytes read. 926 // Each/every byte read from here (except the cen, which is mapped). 927 final long readFullyAt(byte[] buf, int off, long len, long pos) 928 throws IOException 929 { 930 ByteBuffer bb = ByteBuffer.wrap(buf); 931 bb.position(off); 932 bb.limit((int)(off + len)); 933 return readFullyAt(bb, pos); 934 } 935 936 private final long readFullyAt(ByteBuffer bb, long pos) 937 throws IOException 938 { 939 synchronized(ch) { 940 return ch.position(pos).read(bb); 941 } 942 } 943 944 // Searches for end of central directory (END) header. The contents of 945 // the END header will be read and placed in endbuf. Returns the file 946 // position of the END header, otherwise returns -1 if the END header 947 // was not found or an error occurred. 948 private END findEND() throws IOException 949 { 950 byte[] buf = new byte[READBLOCKSZ]; 951 long ziplen = ch.size(); 952 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 953 long minPos = minHDR - (buf.length - ENDHDR); 954 955 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 956 { 957 int off = 0; 958 if (pos < 0) { 959 // Pretend there are some NUL bytes before start of file 960 off = (int)-pos; 961 Arrays.fill(buf, 0, off, (byte)0); 962 } 963 int len = buf.length - off; 964 if (readFullyAt(buf, off, len, pos + off) != len) 965 zerror("zip END header not found"); 966 967 // Now scan the block backwards for END header signature 968 for (int i = buf.length - ENDHDR; i >= 0; i--) { 969 if (buf[i+0] == (byte)'P' && 970 buf[i+1] == (byte)'K' && 971 buf[i+2] == (byte)'\005' && 972 buf[i+3] == (byte)'\006' && 973 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 974 // Found END header 975 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 976 END end = new END(); 977 end.endsub = ENDSUB(buf); 978 end.centot = ENDTOT(buf); 979 end.cenlen = ENDSIZ(buf); 980 end.cenoff = ENDOFF(buf); 981 end.comlen = ENDCOM(buf); 982 end.endpos = pos + i; 983 if (end.cenlen == ZIP64_MINVAL || 984 end.cenoff == ZIP64_MINVAL || 985 end.centot == ZIP64_MINVAL32) 986 { 987 // need to find the zip64 end; 988 byte[] loc64 = new byte[ZIP64_LOCHDR]; 989 if (readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 990 != loc64.length) { 991 return end; 992 } 993 long end64pos = ZIP64_LOCOFF(loc64); 994 byte[] end64buf = new byte[ZIP64_ENDHDR]; 995 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 996 != end64buf.length) { 997 return end; 998 } 999 // end64 found, re-calcualte everything. 1000 end.cenlen = ZIP64_ENDSIZ(end64buf); 1001 end.cenoff = ZIP64_ENDOFF(end64buf); 1002 end.centot = (int)ZIP64_ENDTOT(end64buf); // assume total < 2g 1003 end.endpos = end64pos; 1004 } 1005 return end; 1006 } 1007 } 1008 } 1009 zerror("zip END header not found"); 1010 return null; //make compiler happy 1011 } 1012 1013 // Reads zip file central directory. Returns the file position of first 1014 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1015 // then the error was a zip format error and zip->msg has the error text. 1016 // Always pass in -1 for knownTotal; it's used for a recursive call. 1017 private byte[] initCEN() throws IOException { 1018 end = findEND(); 1019 if (end.endpos == 0) { 1020 inodes = new LinkedHashMap<>(10); 1021 locpos = 0; 1022 buildNodeTree(); 1023 return null; // only END header present 1024 } 1025 if (end.cenlen > end.endpos) 1026 zerror("invalid END header (bad central directory size)"); 1027 long cenpos = end.endpos - end.cenlen; // position of CEN table 1028 1029 // Get position of first local file (LOC) header, taking into 1030 // account that there may be a stub prefixed to the zip file. 1031 locpos = cenpos - end.cenoff; 1032 if (locpos < 0) 1033 zerror("invalid END header (bad central directory offset)"); 1034 1035 // read in the CEN and END 1036 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1037 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1038 zerror("read CEN tables failed"); 1039 } 1040 // Iterate through the entries in the central directory 1041 inodes = new LinkedHashMap<>(end.centot + 1); 1042 int pos = 0; 1043 int limit = cen.length - ENDHDR; 1044 while (pos < limit) { 1045 if (CENSIG(cen, pos) != CENSIG) 1046 zerror("invalid CEN header (bad signature)"); 1047 int method = CENHOW(cen, pos); 1048 int nlen = CENNAM(cen, pos); 1049 int elen = CENEXT(cen, pos); 1050 int clen = CENCOM(cen, pos); 1051 if ((CENFLG(cen, pos) & 1) != 0) 1052 zerror("invalid CEN header (encrypted entry)"); 1053 if (method != METHOD_STORED && method != METHOD_DEFLATED) 1054 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1055 if (pos + CENHDR + nlen > limit) 1056 zerror("invalid CEN header (bad header size)"); 1057 byte[] name = Arrays.copyOfRange(cen, pos + CENHDR, pos + CENHDR + nlen); 1058 IndexNode inode = new IndexNode(name, pos); 1059 inodes.put(inode, inode); 1060 // skip ext and comment 1061 pos += (CENHDR + nlen + elen + clen); 1062 } 1063 if (pos + ENDHDR != cen.length) { 1064 zerror("invalid CEN header (bad header size)"); 1065 } 1066 buildNodeTree(); 1067 return cen; 1068 } 1069 1070 private void ensureOpen() throws IOException { 1071 if (!isOpen) 1072 throw new ClosedFileSystemException(); 1073 } 1074 1075 // Creates a new empty temporary file in the same directory as the 1076 // specified file. A variant of Files.createTempFile. 1077 private Path createTempFileInSameDirectoryAs(Path path) 1078 throws IOException 1079 { 1080 Path parent = path.toAbsolutePath().getParent(); 1081 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1082 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1083 tmppaths.add(tmpPath); 1084 return tmpPath; 1085 } 1086 1087 ////////////////////update & sync ////////////////////////////////////// 1088 1089 private boolean hasUpdate = false; 1090 1091 // shared key. consumer guarantees the "writeLock" before use it. 1092 private final IndexNode LOOKUPKEY = IndexNode.keyOf(null); 1093 1094 private void updateDelete(IndexNode inode) { 1095 beginWrite(); 1096 try { 1097 removeFromTree(inode); 1098 inodes.remove(inode); 1099 hasUpdate = true; 1100 } finally { 1101 endWrite(); 1102 } 1103 } 1104 1105 private void update(Entry e) { 1106 beginWrite(); 1107 try { 1108 IndexNode old = inodes.put(e, e); 1109 if (old != null) { 1110 removeFromTree(old); 1111 } 1112 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1113 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1114 e.sibling = parent.child; 1115 parent.child = e; 1116 } 1117 hasUpdate = true; 1118 } finally { 1119 endWrite(); 1120 } 1121 } 1122 1123 // copy over the whole LOC entry (header if necessary, data and ext) from 1124 // old zip to the new one. 1125 private long copyLOCEntry(Entry e, boolean updateHeader, 1126 OutputStream os, 1127 long written, byte[] buf) 1128 throws IOException 1129 { 1130 long locoff = e.locoff; // where to read 1131 e.locoff = written; // update the e.locoff with new value 1132 1133 // calculate the size need to write out 1134 long size = 0; 1135 // if there is A ext 1136 if ((e.flag & FLAG_DATADESCR) != 0) { 1137 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1138 size = 24; 1139 else 1140 size = 16; 1141 } 1142 // read loc, use the original loc.elen/nlen 1143 if (readFullyAt(buf, 0, LOCHDR , locoff) != LOCHDR) 1144 throw new ZipException("loc: reading failed"); 1145 if (updateHeader) { 1146 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1147 size += e.csize; 1148 written = e.writeLOC(os) + size; 1149 } else { 1150 os.write(buf, 0, LOCHDR); // write out the loc header 1151 locoff += LOCHDR; 1152 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1153 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1154 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1155 written = LOCHDR + size; 1156 } 1157 int n; 1158 while (size > 0 && 1159 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1160 { 1161 if (size < n) 1162 n = (int)size; 1163 os.write(buf, 0, n); 1164 size -= n; 1165 locoff += n; 1166 } 1167 return written; 1168 } 1169 1170 // sync the zip file system, if there is any udpate 1171 private void sync() throws IOException { 1172 //System.out.printf("->sync(%s) starting....!%n", toString()); 1173 // check ex-closer 1174 if (!exChClosers.isEmpty()) { 1175 for (ExChannelCloser ecc : exChClosers) { 1176 if (ecc.streams.isEmpty()) { 1177 ecc.ch.close(); 1178 Files.delete(ecc.path); 1179 exChClosers.remove(ecc); 1180 } 1181 } 1182 } 1183 if (!hasUpdate) 1184 return; 1185 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1186 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) 1187 { 1188 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1189 long written = 0; 1190 byte[] buf = new byte[8192]; 1191 Entry e = null; 1192 1193 // write loc 1194 for (IndexNode inode : inodes.values()) { 1195 if (inode instanceof Entry) { // an updated inode 1196 e = (Entry)inode; 1197 try { 1198 if (e.type == Entry.COPY) { 1199 // entry copy: the only thing changed is the "name" 1200 // and "nlen" in LOC header, so we udpate/rewrite the 1201 // LOC in new file and simply copy the rest (data and 1202 // ext) without enflating/deflating from the old zip 1203 // file LOC entry. 1204 written += copyLOCEntry(e, true, os, written, buf); 1205 } else { // NEW, FILECH or CEN 1206 e.locoff = written; 1207 written += e.writeLOC(os); // write loc header 1208 if (e.bytes != null) { // in-memory, deflated 1209 os.write(e.bytes); // already 1210 written += e.bytes.length; 1211 } else if (e.file != null) { // tmp file 1212 try (InputStream is = Files.newInputStream(e.file)) { 1213 int n; 1214 if (e.type == Entry.NEW) { // deflated already 1215 while ((n = is.read(buf)) != -1) { 1216 os.write(buf, 0, n); 1217 written += n; 1218 } 1219 } else if (e.type == Entry.FILECH) { 1220 // the data are not deflated, use ZEOS 1221 try (OutputStream os2 = new EntryOutputStream(e, os)) { 1222 while ((n = is.read(buf)) != -1) { 1223 os2.write(buf, 0, n); 1224 } 1225 } 1226 written += e.csize; 1227 if ((e.flag & FLAG_DATADESCR) != 0) 1228 written += e.writeEXT(os); 1229 } 1230 } 1231 Files.delete(e.file); 1232 tmppaths.remove(e.file); 1233 } else { 1234 // dir, 0-length data 1235 } 1236 } 1237 elist.add(e); 1238 } catch (IOException x) { 1239 x.printStackTrace(); // skip any in-accurate entry 1240 } 1241 } else { // unchanged inode 1242 if (inode.pos == -1) { 1243 continue; // pseudo directory node 1244 } 1245 e = Entry.readCEN(this, inode.pos); 1246 try { 1247 written += copyLOCEntry(e, false, os, written, buf); 1248 elist.add(e); 1249 } catch (IOException x) { 1250 x.printStackTrace(); // skip any wrong entry 1251 } 1252 } 1253 } 1254 1255 // now write back the cen and end table 1256 end.cenoff = written; 1257 for (Entry entry : elist) { 1258 written += entry.writeCEN(os); 1259 } 1260 end.centot = elist.size(); 1261 end.cenlen = written - end.cenoff; 1262 end.write(os, written); 1263 } 1264 if (!streams.isEmpty()) { 1265 // 1266 // TBD: ExChannelCloser should not be necessary if we only 1267 // sync when being closed, all streams should have been 1268 // closed already. Keep the logic here for now. 1269 // 1270 // There are outstanding input streams open on existing "ch", 1271 // so, don't close the "cha" and delete the "file for now, let 1272 // the "ex-channel-closer" to handle them 1273 ExChannelCloser ecc = new ExChannelCloser( 1274 createTempFileInSameDirectoryAs(zfpath), 1275 ch, 1276 streams); 1277 Files.move(zfpath, ecc.path, REPLACE_EXISTING); 1278 exChClosers.add(ecc); 1279 streams = Collections.synchronizedSet(new HashSet<InputStream>()); 1280 } else { 1281 ch.close(); 1282 Files.delete(zfpath); 1283 } 1284 1285 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1286 hasUpdate = false; // clear 1287 /* 1288 if (isOpen) { 1289 ch = zfpath.newByteChannel(READ); // re-fresh "ch" and "cen" 1290 cen = initCEN(); 1291 } 1292 */ 1293 //System.out.printf("->sync(%s) done!%n", toString()); 1294 } 1295 1296 private IndexNode getInode(byte[] path) { 1297 if (path == null) 1298 throw new NullPointerException("path"); 1299 IndexNode key = IndexNode.keyOf(path); 1300 IndexNode inode = inodes.get(key); 1301 if (inode == null && 1302 (path.length == 0 || path[path.length -1] != '/')) { 1303 // if does not ends with a slash 1304 path = Arrays.copyOf(path, path.length + 1); 1305 path[path.length - 1] = '/'; 1306 inode = inodes.get(key.as(path)); 1307 } 1308 return inode; 1309 } 1310 1311 private Entry getEntry0(byte[] path) throws IOException { 1312 IndexNode inode = getInode(path); 1313 if (inode instanceof Entry) 1314 return (Entry)inode; 1315 if (inode == null || inode.pos == -1) 1316 return null; 1317 return Entry.readCEN(this, inode.pos); 1318 } 1319 1320 public void deleteFile(byte[] path, boolean failIfNotExists) 1321 throws IOException 1322 { 1323 checkWritable(); 1324 1325 IndexNode inode = getInode(path); 1326 if (inode == null) { 1327 if (path != null && path.length == 0) 1328 throw new ZipException("root directory </> can't not be delete"); 1329 if (failIfNotExists) 1330 throw new NoSuchFileException(getString(path)); 1331 } else { 1332 if (inode.isDir() && inode.child != null) 1333 throw new DirectoryNotEmptyException(getString(path)); 1334 updateDelete(inode); 1335 } 1336 } 1337 1338 private static void copyStream(InputStream is, OutputStream os) 1339 throws IOException 1340 { 1341 byte[] copyBuf = new byte[8192]; 1342 int n; 1343 while ((n = is.read(copyBuf)) != -1) { 1344 os.write(copyBuf, 0, n); 1345 } 1346 } 1347 1348 // Returns an out stream for either 1349 // (1) writing the contents of a new entry, if the entry exits, or 1350 // (2) updating/replacing the contents of the specified existing entry. 1351 private OutputStream getOutputStream(Entry e) throws IOException { 1352 1353 if (e.mtime == -1) 1354 e.mtime = System.currentTimeMillis(); 1355 if (e.method == -1) 1356 e.method = METHOD_DEFLATED; // TBD: use default method 1357 // store size, compressed size, and crc-32 in LOC header 1358 e.flag = 0; 1359 if (zc.isUTF8()) 1360 e.flag |= FLAG_EFS; 1361 OutputStream os; 1362 if (useTempFile) { 1363 e.file = getTempPathForEntry(null); 1364 os = Files.newOutputStream(e.file, WRITE); 1365 } else { 1366 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1367 } 1368 return new EntryOutputStream(e, os); 1369 } 1370 1371 private InputStream getInputStream(Entry e) 1372 throws IOException 1373 { 1374 InputStream eis = null; 1375 1376 if (e.type == Entry.NEW) { 1377 if (e.bytes != null) 1378 eis = new ByteArrayInputStream(e.bytes); 1379 else if (e.file != null) 1380 eis = Files.newInputStream(e.file); 1381 else 1382 throw new ZipException("update entry data is missing"); 1383 } else if (e.type == Entry.FILECH) { 1384 // FILECH result is un-compressed. 1385 eis = Files.newInputStream(e.file); 1386 // TBD: wrap to hook close() 1387 // streams.add(eis); 1388 return eis; 1389 } else { // untouced CEN or COPY 1390 eis = new EntryInputStream(e, ch); 1391 } 1392 if (e.method == METHOD_DEFLATED) { 1393 // MORE: Compute good size for inflater stream: 1394 long bufSize = e.size + 2; // Inflater likes a bit of slack 1395 if (bufSize > 65536) 1396 bufSize = 8192; 1397 final long size = e.size; 1398 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1399 1400 private boolean isClosed = false; 1401 public void close() throws IOException { 1402 if (!isClosed) { 1403 releaseInflater(inf); 1404 this.in.close(); 1405 isClosed = true; 1406 streams.remove(this); 1407 } 1408 } 1409 // Override fill() method to provide an extra "dummy" byte 1410 // at the end of the input stream. This is required when 1411 // using the "nowrap" Inflater option. (it appears the new 1412 // zlib in 7 does not need it, but keep it for now) 1413 protected void fill() throws IOException { 1414 if (eof) { 1415 throw new EOFException( 1416 "Unexpected end of ZLIB input stream"); 1417 } 1418 len = this.in.read(buf, 0, buf.length); 1419 if (len == -1) { 1420 buf[0] = 0; 1421 len = 1; 1422 eof = true; 1423 } 1424 inf.setInput(buf, 0, len); 1425 } 1426 private boolean eof; 1427 1428 public int available() throws IOException { 1429 if (isClosed) 1430 return 0; 1431 long avail = size - inf.getBytesWritten(); 1432 return avail > (long) Integer.MAX_VALUE ? 1433 Integer.MAX_VALUE : (int) avail; 1434 } 1435 }; 1436 } else if (e.method == METHOD_STORED) { 1437 // TBD: wrap/ it does not seem necessary 1438 } else { 1439 throw new ZipException("invalid compression method"); 1440 } 1441 streams.add(eis); 1442 return eis; 1443 } 1444 1445 // Inner class implementing the input stream used to read 1446 // a (possibly compressed) zip file entry. 1447 private class EntryInputStream extends InputStream { 1448 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1449 // point to a new channel after sync() 1450 private long pos; // current position within entry data 1451 protected long rem; // number of remaining bytes within entry 1452 protected final long size; // uncompressed size of this entry 1453 1454 EntryInputStream(Entry e, SeekableByteChannel zfch) 1455 throws IOException 1456 { 1457 this.zfch = zfch; 1458 rem = e.csize; 1459 size = e.size; 1460 pos = getDataPos(e); 1461 } 1462 public int read(byte b[], int off, int len) throws IOException { 1463 ensureOpen(); 1464 if (rem == 0) { 1465 return -1; 1466 } 1467 if (len <= 0) { 1468 return 0; 1469 } 1470 if (len > rem) { 1471 len = (int) rem; 1472 } 1473 // readFullyAt() 1474 long n = 0; 1475 ByteBuffer bb = ByteBuffer.wrap(b); 1476 bb.position(off); 1477 bb.limit(off + len); 1478 synchronized(zfch) { 1479 n = zfch.position(pos).read(bb); 1480 } 1481 if (n > 0) { 1482 pos += n; 1483 rem -= n; 1484 } 1485 if (rem == 0) { 1486 close(); 1487 } 1488 return (int)n; 1489 } 1490 public int read() throws IOException { 1491 byte[] b = new byte[1]; 1492 if (read(b, 0, 1) == 1) { 1493 return b[0] & 0xff; 1494 } else { 1495 return -1; 1496 } 1497 } 1498 public long skip(long n) throws IOException { 1499 ensureOpen(); 1500 if (n > rem) 1501 n = rem; 1502 pos += n; 1503 rem -= n; 1504 if (rem == 0) { 1505 close(); 1506 } 1507 return n; 1508 } 1509 public int available() { 1510 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1511 } 1512 public long size() { 1513 return size; 1514 } 1515 public void close() { 1516 rem = 0; 1517 streams.remove(this); 1518 } 1519 } 1520 1521 class EntryOutputStream extends DeflaterOutputStream 1522 { 1523 private CRC32 crc; 1524 private Entry e; 1525 private long written; 1526 1527 EntryOutputStream(Entry e, OutputStream os) 1528 throws IOException 1529 { 1530 super(os, getDeflater()); 1531 if (e == null) 1532 throw new NullPointerException("Zip entry is null"); 1533 this.e = e; 1534 crc = new CRC32(); 1535 } 1536 1537 @Override 1538 public void write(byte b[], int off, int len) throws IOException { 1539 if (e.type != Entry.FILECH) // only from sync 1540 ensureOpen(); 1541 if (off < 0 || len < 0 || off > b.length - len) { 1542 throw new IndexOutOfBoundsException(); 1543 } else if (len == 0) { 1544 return; 1545 } 1546 switch (e.method) { 1547 case METHOD_DEFLATED: 1548 super.write(b, off, len); 1549 break; 1550 case METHOD_STORED: 1551 written += len; 1552 out.write(b, off, len); 1553 break; 1554 default: 1555 throw new ZipException("invalid compression method"); 1556 } 1557 crc.update(b, off, len); 1558 } 1559 1560 @Override 1561 public void close() throws IOException { 1562 // TBD ensureOpen(); 1563 switch (e.method) { 1564 case METHOD_DEFLATED: 1565 finish(); 1566 e.size = def.getBytesRead(); 1567 e.csize = def.getBytesWritten(); 1568 e.crc = crc.getValue(); 1569 break; 1570 case METHOD_STORED: 1571 // we already know that both e.size and e.csize are the same 1572 e.size = e.csize = written; 1573 e.crc = crc.getValue(); 1574 break; 1575 default: 1576 throw new ZipException("invalid compression method"); 1577 } 1578 //crc.reset(); 1579 if (out instanceof ByteArrayOutputStream) 1580 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1581 1582 if (e.type == Entry.FILECH) { 1583 releaseDeflater(def); 1584 return; 1585 } 1586 super.close(); 1587 releaseDeflater(def); 1588 update(e); 1589 } 1590 } 1591 1592 static void zerror(String msg) { 1593 throw new ZipError(msg); 1594 } 1595 1596 // Maxmum number of de/inflater we cache 1597 private final int MAX_FLATER = 20; 1598 // List of available Inflater objects for decompression 1599 private final List<Inflater> inflaters = new ArrayList<>(); 1600 1601 // Gets an inflater from the list of available inflaters or allocates 1602 // a new one. 1603 private Inflater getInflater() { 1604 synchronized (inflaters) { 1605 int size = inflaters.size(); 1606 if (size > 0) { 1607 Inflater inf = inflaters.remove(size - 1); 1608 return inf; 1609 } else { 1610 return new Inflater(true); 1611 } 1612 } 1613 } 1614 1615 // Releases the specified inflater to the list of available inflaters. 1616 private void releaseInflater(Inflater inf) { 1617 synchronized (inflaters) { 1618 if (inflaters.size() < MAX_FLATER) { 1619 inf.reset(); 1620 inflaters.add(inf); 1621 } else { 1622 inf.end(); 1623 } 1624 } 1625 } 1626 1627 // List of available Deflater objects for compression 1628 private final List<Deflater> deflaters = new ArrayList<>(); 1629 1630 // Gets an deflater from the list of available deflaters or allocates 1631 // a new one. 1632 private Deflater getDeflater() { 1633 synchronized (deflaters) { 1634 int size = deflaters.size(); 1635 if (size > 0) { 1636 Deflater def = deflaters.remove(size - 1); 1637 return def; 1638 } else { 1639 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1640 } 1641 } 1642 } 1643 1644 // Releases the specified inflater to the list of available inflaters. 1645 private void releaseDeflater(Deflater def) { 1646 synchronized (deflaters) { 1647 if (inflaters.size() < MAX_FLATER) { 1648 def.reset(); 1649 deflaters.add(def); 1650 } else { 1651 def.end(); 1652 } 1653 } 1654 } 1655 1656 // End of central directory record 1657 static class END { 1658 int disknum; 1659 int sdisknum; 1660 int endsub; // endsub 1661 int centot; // 4 bytes 1662 long cenlen; // 4 bytes 1663 long cenoff; // 4 bytes 1664 int comlen; // comment length 1665 byte[] comment; 1666 1667 /* members of Zip64 end of central directory locator */ 1668 int diskNum; 1669 long endpos; 1670 int disktot; 1671 1672 void write(OutputStream os, long offset) throws IOException { 1673 boolean hasZip64 = false; 1674 long xlen = cenlen; 1675 long xoff = cenoff; 1676 if (xlen >= ZIP64_MINVAL) { 1677 xlen = ZIP64_MINVAL; 1678 hasZip64 = true; 1679 } 1680 if (xoff >= ZIP64_MINVAL) { 1681 xoff = ZIP64_MINVAL; 1682 hasZip64 = true; 1683 } 1684 int count = centot; 1685 if (count >= ZIP64_MINVAL32) { 1686 count = ZIP64_MINVAL32; 1687 hasZip64 = true; 1688 } 1689 if (hasZip64) { 1690 long off64 = offset; 1691 //zip64 end of central directory record 1692 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1693 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1694 writeShort(os, 45); // version made by 1695 writeShort(os, 45); // version needed to extract 1696 writeInt(os, 0); // number of this disk 1697 writeInt(os, 0); // central directory start disk 1698 writeLong(os, centot); // number of directory entires on disk 1699 writeLong(os, centot); // number of directory entires 1700 writeLong(os, cenlen); // length of central directory 1701 writeLong(os, cenoff); // offset of central directory 1702 1703 //zip64 end of central directory locator 1704 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1705 writeInt(os, 0); // zip64 END start disk 1706 writeLong(os, off64); // offset of zip64 END 1707 writeInt(os, 1); // total number of disks (?) 1708 } 1709 writeInt(os, ENDSIG); // END record signature 1710 writeShort(os, 0); // number of this disk 1711 writeShort(os, 0); // central directory start disk 1712 writeShort(os, count); // number of directory entries on disk 1713 writeShort(os, count); // total number of directory entries 1714 writeInt(os, xlen); // length of central directory 1715 writeInt(os, xoff); // offset of central directory 1716 if (comment != null) { // zip file comment 1717 writeShort(os, comment.length); 1718 writeBytes(os, comment); 1719 } else { 1720 writeShort(os, 0); 1721 } 1722 } 1723 } 1724 1725 // Internal node that links a "name" to its pos in cen table. 1726 // The node itself can be used as a "key" to lookup itself in 1727 // the HashMap inodes. 1728 static class IndexNode { 1729 byte[] name; 1730 int hashcode; // node is hashable/hashed by its name 1731 int pos = -1; // position in cen table, -1 menas the 1732 // entry does not exists in zip file 1733 IndexNode(byte[] name, int pos) { 1734 name(name); 1735 this.pos = pos; 1736 } 1737 1738 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1739 return new IndexNode(name, -1); 1740 } 1741 1742 final void name(byte[] name) { 1743 this.name = name; 1744 this.hashcode = Arrays.hashCode(name); 1745 } 1746 1747 final IndexNode as(byte[] name) { // reuse the node, mostly 1748 name(name); // as a lookup "key" 1749 return this; 1750 } 1751 1752 boolean isDir() { 1753 return name != null && 1754 (name.length == 0 || name[name.length - 1] == '/'); 1755 } 1756 1757 public boolean equals(Object other) { 1758 if (!(other instanceof IndexNode)) { 1759 return false; 1760 } 1761 return Arrays.equals(name, ((IndexNode)other).name); 1762 } 1763 1764 public int hashCode() { 1765 return hashcode; 1766 } 1767 1768 IndexNode() {} 1769 IndexNode sibling; 1770 IndexNode child; // 1st child 1771 } 1772 1773 static class Entry extends IndexNode { 1774 1775 static final int CEN = 1; // entry read from cen 1776 static final int NEW = 2; // updated contents in bytes or file 1777 static final int FILECH = 3; // fch update in "file" 1778 static final int COPY = 4; // copy of a CEN entry 1779 1780 1781 byte[] bytes; // updated content bytes 1782 Path file; // use tmp file to store bytes; 1783 int type = CEN; // default is the entry read from cen 1784 1785 // entry attributes 1786 int version; 1787 int flag; 1788 int method = -1; // compression method 1789 long mtime = -1; // last modification time (in DOS time) 1790 long atime = -1; // last access time 1791 long ctime = -1; // create time 1792 long crc = -1; // crc-32 of entry data 1793 long csize = -1; // compressed size of entry data 1794 long size = -1; // uncompressed size of entry data 1795 byte[] extra; 1796 1797 // cen 1798 int versionMade; 1799 int disk; 1800 int attrs; 1801 long attrsEx; 1802 long locoff; 1803 byte[] comment; 1804 1805 Entry() {} 1806 1807 Entry(byte[] name) { 1808 name(name); 1809 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 1810 this.crc = 0; 1811 this.size = 0; 1812 this.csize = 0; 1813 this.method = METHOD_DEFLATED; 1814 } 1815 1816 Entry(byte[] name, int type) { 1817 this(name); 1818 this.type = type; 1819 } 1820 1821 Entry (Entry e, int type) { 1822 name(e.name); 1823 this.version = e.version; 1824 this.ctime = e.ctime; 1825 this.atime = e.atime; 1826 this.mtime = e.mtime; 1827 this.crc = e.crc; 1828 this.size = e.size; 1829 this.csize = e.csize; 1830 this.method = e.method; 1831 this.extra = e.extra; 1832 this.versionMade = e.versionMade; 1833 this.disk = e.disk; 1834 this.attrs = e.attrs; 1835 this.attrsEx = e.attrsEx; 1836 this.locoff = e.locoff; 1837 this.comment = e.comment; 1838 this.type = type; 1839 } 1840 1841 Entry (byte[] name, Path file, int type) { 1842 this(name, type); 1843 this.file = file; 1844 this.method = METHOD_STORED; 1845 } 1846 1847 int version() throws ZipException { 1848 if (method == METHOD_DEFLATED) 1849 return 20; 1850 else if (method == METHOD_STORED) 1851 return 10; 1852 throw new ZipException("unsupported compression method"); 1853 } 1854 1855 ///////////////////// CEN ////////////////////// 1856 static Entry readCEN(ZipFileSystem zipfs, int pos) 1857 throws IOException 1858 { 1859 return new Entry().cen(zipfs, pos); 1860 } 1861 1862 private Entry cen(ZipFileSystem zipfs, int pos) 1863 throws IOException 1864 { 1865 byte[] cen = zipfs.cen; 1866 if (CENSIG(cen, pos) != CENSIG) 1867 zerror("invalid CEN header (bad signature)"); 1868 versionMade = CENVEM(cen, pos); 1869 version = CENVER(cen, pos); 1870 flag = CENFLG(cen, pos); 1871 method = CENHOW(cen, pos); 1872 mtime = dosToJavaTime(CENTIM(cen, pos)); 1873 crc = CENCRC(cen, pos); 1874 csize = CENSIZ(cen, pos); 1875 size = CENLEN(cen, pos); 1876 int nlen = CENNAM(cen, pos); 1877 int elen = CENEXT(cen, pos); 1878 int clen = CENCOM(cen, pos); 1879 disk = CENDSK(cen, pos); 1880 attrs = CENATT(cen, pos); 1881 attrsEx = CENATX(cen, pos); 1882 locoff = CENOFF(cen, pos); 1883 1884 pos += CENHDR; 1885 name(Arrays.copyOfRange(cen, pos, pos + nlen)); 1886 1887 pos += nlen; 1888 if (elen > 0) { 1889 extra = Arrays.copyOfRange(cen, pos, pos + elen); 1890 pos += elen; 1891 readExtra(zipfs); 1892 } 1893 if (clen > 0) { 1894 comment = Arrays.copyOfRange(cen, pos, pos + clen); 1895 } 1896 return this; 1897 } 1898 1899 int writeCEN(OutputStream os) throws IOException 1900 { 1901 int written = CENHDR; 1902 int version0 = version(); 1903 long csize0 = csize; 1904 long size0 = size; 1905 long locoff0 = locoff; 1906 int elen64 = 0; // extra for ZIP64 1907 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 1908 int elenEXTT = 0; // extra for Extended Timestamp 1909 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 1910 1911 // confirm size/length 1912 int nlen = (name != null) ? name.length : 0; 1913 int elen = (extra != null) ? extra.length : 0; 1914 int eoff = 0; 1915 int clen = (comment != null) ? comment.length : 0; 1916 if (csize >= ZIP64_MINVAL) { 1917 csize0 = ZIP64_MINVAL; 1918 elen64 += 8; // csize(8) 1919 } 1920 if (size >= ZIP64_MINVAL) { 1921 size0 = ZIP64_MINVAL; // size(8) 1922 elen64 += 8; 1923 } 1924 if (locoff >= ZIP64_MINVAL) { 1925 locoff0 = ZIP64_MINVAL; 1926 elen64 += 8; // offset(8) 1927 } 1928 if (elen64 != 0) { 1929 elen64 += 4; // header and data sz 4 bytes 1930 } 1931 while (eoff + 4 < elen) { 1932 int tag = SH(extra, eoff); 1933 int sz = SH(extra, eoff + 2); 1934 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 1935 foundExtraTime = true; 1936 } 1937 eoff += (4 + sz); 1938 } 1939 if (!foundExtraTime) { 1940 if (isWindows) { // use NTFS 1941 elenNTFS = 36; // total 36 bytes 1942 } else { // Extended Timestamp otherwise 1943 elenEXTT = 9; // only mtime in cen 1944 } 1945 } 1946 writeInt(os, CENSIG); // CEN header signature 1947 if (elen64 != 0) { 1948 writeShort(os, 45); // ver 4.5 for zip64 1949 writeShort(os, 45); 1950 } else { 1951 writeShort(os, version0); // version made by 1952 writeShort(os, version0); // version needed to extract 1953 } 1954 writeShort(os, flag); // general purpose bit flag 1955 writeShort(os, method); // compression method 1956 // last modification time 1957 writeInt(os, (int)javaToDosTime(mtime)); 1958 writeInt(os, crc); // crc-32 1959 writeInt(os, csize0); // compressed size 1960 writeInt(os, size0); // uncompressed size 1961 writeShort(os, name.length); 1962 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 1963 1964 if (comment != null) { 1965 writeShort(os, Math.min(clen, 0xffff)); 1966 } else { 1967 writeShort(os, 0); 1968 } 1969 writeShort(os, 0); // starting disk number 1970 writeShort(os, 0); // internal file attributes (unused) 1971 writeInt(os, 0); // external file attributes (unused) 1972 writeInt(os, locoff0); // relative offset of local header 1973 writeBytes(os, name); 1974 if (elen64 != 0) { 1975 writeShort(os, EXTID_ZIP64);// Zip64 extra 1976 writeShort(os, elen64 - 4); // size of "this" extra block 1977 if (size0 == ZIP64_MINVAL) 1978 writeLong(os, size); 1979 if (csize0 == ZIP64_MINVAL) 1980 writeLong(os, csize); 1981 if (locoff0 == ZIP64_MINVAL) 1982 writeLong(os, locoff); 1983 } 1984 if (elenNTFS != 0) { 1985 writeShort(os, EXTID_NTFS); 1986 writeShort(os, elenNTFS - 4); 1987 writeInt(os, 0); // reserved 1988 writeShort(os, 0x0001); // NTFS attr tag 1989 writeShort(os, 24); 1990 writeLong(os, javaToWinTime(mtime)); 1991 writeLong(os, javaToWinTime(atime)); 1992 writeLong(os, javaToWinTime(ctime)); 1993 } 1994 if (elenEXTT != 0) { 1995 writeShort(os, EXTID_EXTT); 1996 writeShort(os, elenEXTT - 4); 1997 if (ctime == -1) 1998 os.write(0x3); // mtime and atime 1999 else 2000 os.write(0x7); // mtime, atime and ctime 2001 writeInt(os, javaToUnixTime(mtime)); 2002 } 2003 if (extra != null) // whatever not recognized 2004 writeBytes(os, extra); 2005 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2006 writeBytes(os, comment); 2007 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2008 } 2009 2010 ///////////////////// LOC ////////////////////// 2011 static Entry readLOC(ZipFileSystem zipfs, long pos) 2012 throws IOException 2013 { 2014 return readLOC(zipfs, pos, new byte[1024]); 2015 } 2016 2017 static Entry readLOC(ZipFileSystem zipfs, long pos, byte[] buf) 2018 throws IOException 2019 { 2020 return new Entry().loc(zipfs, pos, buf); 2021 } 2022 2023 Entry loc(ZipFileSystem zipfs, long pos, byte[] buf) 2024 throws IOException 2025 { 2026 assert (buf.length >= LOCHDR); 2027 if (zipfs.readFullyAt(buf, 0, LOCHDR , pos) != LOCHDR) 2028 throw new ZipException("loc: reading failed"); 2029 if (LOCSIG(buf) != LOCSIG) 2030 throw new ZipException("loc: wrong sig ->" 2031 + Long.toString(LOCSIG(buf), 16)); 2032 //startPos = pos; 2033 version = LOCVER(buf); 2034 flag = LOCFLG(buf); 2035 method = LOCHOW(buf); 2036 mtime = dosToJavaTime(LOCTIM(buf)); 2037 crc = LOCCRC(buf); 2038 csize = LOCSIZ(buf); 2039 size = LOCLEN(buf); 2040 int nlen = LOCNAM(buf); 2041 int elen = LOCEXT(buf); 2042 2043 name = new byte[nlen]; 2044 if (zipfs.readFullyAt(name, 0, nlen, pos + LOCHDR) != nlen) { 2045 throw new ZipException("loc: name reading failed"); 2046 } 2047 if (elen > 0) { 2048 extra = new byte[elen]; 2049 if (zipfs.readFullyAt(extra, 0, elen, pos + LOCHDR + nlen) 2050 != elen) { 2051 throw new ZipException("loc: ext reading failed"); 2052 } 2053 } 2054 pos += (LOCHDR + nlen + elen); 2055 if ((flag & FLAG_DATADESCR) != 0) { 2056 // Data Descriptor 2057 Entry e = zipfs.getEntry0(name); // get the size/csize from cen 2058 if (e == null) 2059 throw new ZipException("loc: name not found in cen"); 2060 size = e.size; 2061 csize = e.csize; 2062 pos += (method == METHOD_STORED ? size : csize); 2063 if (size >= ZIP64_MINVAL || csize >= ZIP64_MINVAL) 2064 pos += 24; 2065 else 2066 pos += 16; 2067 } else { 2068 if (extra != null && 2069 (size == ZIP64_MINVAL || csize == ZIP64_MINVAL)) { 2070 // zip64 ext: must include both size and csize 2071 int off = 0; 2072 while (off + 20 < elen) { // HeaderID+DataSize+Data 2073 int sz = SH(extra, off + 2); 2074 if (SH(extra, off) == EXTID_ZIP64 && sz == 16) { 2075 size = LL(extra, off + 4); 2076 csize = LL(extra, off + 12); 2077 break; 2078 } 2079 off += (sz + 4); 2080 } 2081 } 2082 pos += (method == METHOD_STORED ? size : csize); 2083 } 2084 return this; 2085 } 2086 2087 int writeLOC(OutputStream os) 2088 throws IOException 2089 { 2090 writeInt(os, LOCSIG); // LOC header signature 2091 int version = version(); 2092 int nlen = (name != null) ? name.length : 0; 2093 int elen = (extra != null) ? extra.length : 0; 2094 boolean foundExtraTime = false; // if extra timestamp present 2095 int eoff = 0; 2096 int elen64 = 0; 2097 int elenEXTT = 0; 2098 int elenNTFS = 0; 2099 if ((flag & FLAG_DATADESCR) != 0) { 2100 writeShort(os, version()); // version needed to extract 2101 writeShort(os, flag); // general purpose bit flag 2102 writeShort(os, method); // compression method 2103 // last modification time 2104 writeInt(os, (int)javaToDosTime(mtime)); 2105 // store size, uncompressed size, and crc-32 in data descriptor 2106 // immediately following compressed entry data 2107 writeInt(os, 0); 2108 writeInt(os, 0); 2109 writeInt(os, 0); 2110 } else { 2111 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2112 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2113 writeShort(os, 45); // ver 4.5 for zip64 2114 } else { 2115 writeShort(os, version()); // version needed to extract 2116 } 2117 writeShort(os, flag); // general purpose bit flag 2118 writeShort(os, method); // compression method 2119 // last modification time 2120 writeInt(os, (int)javaToDosTime(mtime)); 2121 writeInt(os, crc); // crc-32 2122 if (elen64 != 0) { 2123 writeInt(os, ZIP64_MINVAL); 2124 writeInt(os, ZIP64_MINVAL); 2125 } else { 2126 writeInt(os, csize); // compressed size 2127 writeInt(os, size); // uncompressed size 2128 } 2129 } 2130 while (eoff + 4 < elen) { 2131 int tag = SH(extra, eoff); 2132 int sz = SH(extra, eoff + 2); 2133 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2134 foundExtraTime = true; 2135 } 2136 eoff += (4 + sz); 2137 } 2138 if (!foundExtraTime) { 2139 if (isWindows) { 2140 elenNTFS = 36; // NTFS, total 36 bytes 2141 } else { // on unix use "ext time" 2142 elenEXTT = 9; 2143 if (atime != -1) 2144 elenEXTT += 4; 2145 if (ctime != -1) 2146 elenEXTT += 4; 2147 } 2148 } 2149 writeShort(os, name.length); 2150 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2151 writeBytes(os, name); 2152 if (elen64 != 0) { 2153 writeShort(os, EXTID_ZIP64); 2154 writeShort(os, 16); 2155 writeLong(os, size); 2156 writeLong(os, csize); 2157 } 2158 if (elenNTFS != 0) { 2159 writeShort(os, EXTID_NTFS); 2160 writeShort(os, elenNTFS - 4); 2161 writeInt(os, 0); // reserved 2162 writeShort(os, 0x0001); // NTFS attr tag 2163 writeShort(os, 24); 2164 writeLong(os, javaToWinTime(mtime)); 2165 writeLong(os, javaToWinTime(atime)); 2166 writeLong(os, javaToWinTime(ctime)); 2167 } 2168 if (elenEXTT != 0) { 2169 writeShort(os, EXTID_EXTT); 2170 writeShort(os, elenEXTT - 4);// size for the folowing data block 2171 int fbyte = 0x1; 2172 if (atime != -1) // mtime and atime 2173 fbyte |= 0x2; 2174 if (ctime != -1) // mtime, atime and ctime 2175 fbyte |= 0x4; 2176 os.write(fbyte); // flags byte 2177 writeInt(os, javaToUnixTime(mtime)); 2178 if (atime != -1) 2179 writeInt(os, javaToUnixTime(atime)); 2180 if (ctime != -1) 2181 writeInt(os, javaToUnixTime(ctime)); 2182 } 2183 if (extra != null) { 2184 writeBytes(os, extra); 2185 } 2186 return LOCHDR + name.length + elen + elen64 + elenNTFS + elenEXTT; 2187 } 2188 2189 // Data Descriptior 2190 int writeEXT(OutputStream os) 2191 throws IOException 2192 { 2193 writeInt(os, EXTSIG); // EXT header signature 2194 writeInt(os, crc); // crc-32 2195 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2196 writeLong(os, csize); 2197 writeLong(os, size); 2198 return 24; 2199 } else { 2200 writeInt(os, csize); // compressed size 2201 writeInt(os, size); // uncompressed size 2202 return 16; 2203 } 2204 } 2205 2206 // read NTFS, UNIX and ZIP64 data from cen.extra 2207 void readExtra(ZipFileSystem zipfs) throws IOException { 2208 if (extra == null) 2209 return; 2210 int elen = extra.length; 2211 int off = 0; 2212 int newOff = 0; 2213 while (off + 4 < elen) { 2214 // extra spec: HeaderID+DataSize+Data 2215 int pos = off; 2216 int tag = SH(extra, pos); 2217 int sz = SH(extra, pos + 2); 2218 pos += 4; 2219 if (pos + sz > elen) // invalid data 2220 break; 2221 switch (tag) { 2222 case EXTID_ZIP64 : 2223 if (size == ZIP64_MINVAL) { 2224 if (pos + 8 > elen) // invalid zip64 extra 2225 break; // fields, just skip 2226 size = LL(extra, pos); 2227 pos += 8; 2228 } 2229 if (csize == ZIP64_MINVAL) { 2230 if (pos + 8 > elen) 2231 break; 2232 csize = LL(extra, pos); 2233 pos += 8; 2234 } 2235 if (locoff == ZIP64_MINVAL) { 2236 if (pos + 8 > elen) 2237 break; 2238 locoff = LL(extra, pos); 2239 pos += 8; 2240 } 2241 break; 2242 case EXTID_NTFS: 2243 pos += 4; // reserved 4 bytes 2244 if (SH(extra, pos) != 0x0001) 2245 break; 2246 if (SH(extra, pos + 2) != 24) 2247 break; 2248 // override the loc field, datatime here is 2249 // more "accurate" 2250 mtime = winToJavaTime(LL(extra, pos + 4)); 2251 atime = winToJavaTime(LL(extra, pos + 12)); 2252 ctime = winToJavaTime(LL(extra, pos + 20)); 2253 break; 2254 case EXTID_EXTT: 2255 // spec says the Extened timestamp in cen only has mtime 2256 // need to read the loc to get the extra a/ctime 2257 byte[] buf = new byte[LOCHDR]; 2258 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2259 != buf.length) 2260 throw new ZipException("loc: reading failed"); 2261 if (LOCSIG(buf) != LOCSIG) 2262 throw new ZipException("loc: wrong sig ->" 2263 + Long.toString(LOCSIG(buf), 16)); 2264 2265 int locElen = LOCEXT(buf); 2266 if (locElen < 9) // EXTT is at lease 9 bytes 2267 break; 2268 int locNlen = LOCNAM(buf); 2269 buf = new byte[locElen]; 2270 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2271 != buf.length) 2272 throw new ZipException("loc extra: reading failed"); 2273 int locPos = 0; 2274 while (locPos + 4 < buf.length) { 2275 int locTag = SH(buf, locPos); 2276 int locSZ = SH(buf, locPos + 2); 2277 locPos += 4; 2278 if (locTag != EXTID_EXTT) { 2279 locPos += locSZ; 2280 continue; 2281 } 2282 int flag = CH(buf, locPos++); 2283 if ((flag & 0x1) != 0) { 2284 mtime = unixToJavaTime(LG(buf, locPos)); 2285 locPos += 4; 2286 } 2287 if ((flag & 0x2) != 0) { 2288 atime = unixToJavaTime(LG(buf, locPos)); 2289 locPos += 4; 2290 } 2291 if ((flag & 0x4) != 0) { 2292 ctime = unixToJavaTime(LG(buf, locPos)); 2293 locPos += 4; 2294 } 2295 break; 2296 } 2297 break; 2298 default: // unknown tag 2299 System.arraycopy(extra, off, extra, newOff, sz + 4); 2300 newOff += (sz + 4); 2301 } 2302 off += (sz + 4); 2303 } 2304 if (newOff != 0 && newOff != extra.length) 2305 extra = Arrays.copyOf(extra, newOff); 2306 else 2307 extra = null; 2308 } 2309 } 2310 2311 private static class ExChannelCloser { 2312 Path path; 2313 SeekableByteChannel ch; 2314 Set<InputStream> streams; 2315 ExChannelCloser(Path path, 2316 SeekableByteChannel ch, 2317 Set<InputStream> streams) 2318 { 2319 this.path = path; 2320 this.ch = ch; 2321 this.streams = streams; 2322 } 2323 } 2324 2325 // ZIP directory has two issues: 2326 // (1) ZIP spec does not require the ZIP file to include 2327 // directory entry 2328 // (2) all entries are not stored/organized in a "tree" 2329 // structure. 2330 // A possible solution is to build the node tree ourself as 2331 // implemented below. 2332 private IndexNode root; 2333 2334 private void addToTree(IndexNode inode, HashSet<IndexNode> dirs) { 2335 if (dirs.contains(inode)) { 2336 return; 2337 } 2338 IndexNode parent; 2339 byte[] name = inode.name; 2340 byte[] pname = getParent(name); 2341 if (inodes.containsKey(LOOKUPKEY.as(pname))) { 2342 parent = inodes.get(LOOKUPKEY); 2343 } else { // pseudo directory entry 2344 parent = new IndexNode(pname, -1); 2345 inodes.put(parent, parent); 2346 } 2347 addToTree(parent, dirs); 2348 inode.sibling = parent.child; 2349 parent.child = inode; 2350 if (name[name.length -1] == '/') 2351 dirs.add(inode); 2352 } 2353 2354 private void removeFromTree(IndexNode inode) { 2355 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2356 IndexNode child = parent.child; 2357 if (child.equals(inode)) { 2358 parent.child = child.sibling; 2359 } else { 2360 IndexNode last = child; 2361 while ((child = child.sibling) != null) { 2362 if (child.equals(inode)) { 2363 last.sibling = child.sibling; 2364 break; 2365 } else { 2366 last = child; 2367 } 2368 } 2369 } 2370 } 2371 2372 private void buildNodeTree() throws IOException { 2373 beginWrite(); 2374 try { 2375 HashSet<IndexNode> dirs = new HashSet<>(); 2376 IndexNode root = new IndexNode(ROOTPATH, -1); 2377 inodes.put(root, root); 2378 dirs.add(root); 2379 for (IndexNode node : inodes.keySet().toArray(new IndexNode[0])) { 2380 addToTree(node, dirs); 2381 } 2382 } finally { 2383 endWrite(); 2384 } 2385 } 2386 }