1 /* 2 * Copyright (c) 2009, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.File; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.nio.ByteBuffer; 37 import java.nio.MappedByteBuffer; 38 import java.nio.channels.*; 39 import java.nio.file.*; 40 import java.nio.file.attribute.*; 41 import java.nio.file.spi.*; 42 import java.security.AccessController; 43 import java.security.PrivilegedAction; 44 import java.security.PrivilegedActionException; 45 import java.security.PrivilegedExceptionAction; 46 import java.util.*; 47 import java.util.concurrent.locks.ReadWriteLock; 48 import java.util.concurrent.locks.ReentrantReadWriteLock; 49 import java.util.regex.Pattern; 50 import java.util.zip.CRC32; 51 import java.util.zip.Inflater; 52 import java.util.zip.Deflater; 53 import java.util.zip.InflaterInputStream; 54 import java.util.zip.DeflaterOutputStream; 55 import java.util.zip.ZipException; 56 import static java.lang.Boolean.*; 57 import static jdk.nio.zipfs.ZipConstants.*; 58 import static jdk.nio.zipfs.ZipUtils.*; 59 import static java.nio.file.StandardOpenOption.*; 60 import static java.nio.file.StandardCopyOption.*; 61 62 /** 63 * A FileSystem built on a zip file 64 * 65 * @author Xueming Shen 66 */ 67 68 class ZipFileSystem extends FileSystem { 69 70 private final ZipFileSystemProvider provider; 71 private final Path zfpath; 72 final ZipCoder zc; 73 private final boolean noExtt; // see readExtra() 74 private final ZipPath rootdir; 75 // configurable by env map 76 private final boolean useTempFile; // use a temp file for newOS, default 77 // is to use BAOS for better performance 78 private boolean readOnly = false; // readonly file system 79 private static final boolean isWindows = AccessController.doPrivileged( 80 (PrivilegedAction<Boolean>) () -> System.getProperty("os.name") 81 .startsWith("Windows")); 82 private final boolean forceEnd64; 83 84 ZipFileSystem(ZipFileSystemProvider provider, 85 Path zfpath, 86 Map<String, ?> env) throws IOException 87 { 88 // create a new zip if not exists 89 boolean createNew = "true".equals(env.get("create")); 90 // default encoding for name/comment 91 String nameEncoding = env.containsKey("encoding") ? 92 (String)env.get("encoding") : "UTF-8"; 93 this.noExtt = "false".equals(env.get("zipinfo-time")); 94 this.useTempFile = TRUE.equals(env.get("useTempFile")); 95 this.forceEnd64 = "true".equals(env.get("forceZIP64End")); 96 this.provider = provider; 97 this.zfpath = zfpath; 98 if (Files.notExists(zfpath)) { 99 if (createNew) { 100 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 101 new END().write(os, 0, forceEnd64); 102 } 103 } else { 104 throw new FileSystemNotFoundException(zfpath.toString()); 105 } 106 } 107 // sm and existence check 108 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 109 boolean writeable = AccessController.doPrivileged( 110 (PrivilegedAction<Boolean>) () -> Files.isWritable(zfpath)); 111 this.readOnly = !writeable; 112 this.zc = ZipCoder.get(nameEncoding); 113 this.rootdir = new ZipPath(this, new byte[]{'/'}); 114 this.ch = Files.newByteChannel(zfpath, READ); 115 try { 116 this.cen = initCEN(); 117 } catch (IOException x) { 118 try { 119 this.ch.close(); 120 } catch (IOException xx) { 121 x.addSuppressed(xx); 122 } 123 throw x; 124 } 125 } 126 127 @Override 128 public FileSystemProvider provider() { 129 return provider; 130 } 131 132 @Override 133 public String getSeparator() { 134 return "/"; 135 } 136 137 @Override 138 public boolean isOpen() { 139 return isOpen; 140 } 141 142 @Override 143 public boolean isReadOnly() { 144 return readOnly; 145 } 146 147 private void checkWritable() throws IOException { 148 if (readOnly) 149 throw new ReadOnlyFileSystemException(); 150 } 151 152 void setReadOnly() { 153 this.readOnly = true; 154 } 155 156 @Override 157 public Iterable<Path> getRootDirectories() { 158 return List.of(rootdir); 159 } 160 161 ZipPath getRootDir() { 162 return rootdir; 163 } 164 165 @Override 166 public ZipPath getPath(String first, String... more) { 167 if (more.length == 0) { 168 return new ZipPath(this, first); 169 } 170 StringBuilder sb = new StringBuilder(); 171 sb.append(first); 172 for (String path : more) { 173 if (path.length() > 0) { 174 if (sb.length() > 0) { 175 sb.append('/'); 176 } 177 sb.append(path); 178 } 179 } 180 return new ZipPath(this, sb.toString()); 181 } 182 183 @Override 184 public UserPrincipalLookupService getUserPrincipalLookupService() { 185 throw new UnsupportedOperationException(); 186 } 187 188 @Override 189 public WatchService newWatchService() { 190 throw new UnsupportedOperationException(); 191 } 192 193 FileStore getFileStore(ZipPath path) { 194 return new ZipFileStore(path); 195 } 196 197 @Override 198 public Iterable<FileStore> getFileStores() { 199 return List.of(new ZipFileStore(rootdir)); 200 } 201 202 private static final Set<String> supportedFileAttributeViews = 203 Set.of("basic", "zip"); 204 205 @Override 206 public Set<String> supportedFileAttributeViews() { 207 return supportedFileAttributeViews; 208 } 209 210 @Override 211 public String toString() { 212 return zfpath.toString(); 213 } 214 215 Path getZipFile() { 216 return zfpath; 217 } 218 219 private static final String GLOB_SYNTAX = "glob"; 220 private static final String REGEX_SYNTAX = "regex"; 221 222 @Override 223 public PathMatcher getPathMatcher(String syntaxAndInput) { 224 int pos = syntaxAndInput.indexOf(':'); 225 if (pos <= 0 || pos == syntaxAndInput.length()) { 226 throw new IllegalArgumentException(); 227 } 228 String syntax = syntaxAndInput.substring(0, pos); 229 String input = syntaxAndInput.substring(pos + 1); 230 String expr; 231 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 232 expr = toRegexPattern(input); 233 } else { 234 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 235 expr = input; 236 } else { 237 throw new UnsupportedOperationException("Syntax '" + syntax + 238 "' not recognized"); 239 } 240 } 241 // return matcher 242 final Pattern pattern = Pattern.compile(expr); 243 return new PathMatcher() { 244 @Override 245 public boolean matches(Path path) { 246 return pattern.matcher(path.toString()).matches(); 247 } 248 }; 249 } 250 251 @Override 252 public void close() throws IOException { 253 beginWrite(); 254 try { 255 if (!isOpen) 256 return; 257 isOpen = false; // set closed 258 } finally { 259 endWrite(); 260 } 261 if (!streams.isEmpty()) { // unlock and close all remaining streams 262 Set<InputStream> copy = new HashSet<>(streams); 263 for (InputStream is: copy) 264 is.close(); 265 } 266 beginWrite(); // lock and sync 267 try { 268 AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> { 269 sync(); return null; 270 }); 271 ch.close(); // close the ch just in case no update 272 } catch (PrivilegedActionException e) { // and sync dose not close the ch 273 throw (IOException)e.getException(); 274 } finally { 275 endWrite(); 276 } 277 278 synchronized (inflaters) { 279 for (Inflater inf : inflaters) 280 inf.end(); 281 } 282 synchronized (deflaters) { 283 for (Deflater def : deflaters) 284 def.end(); 285 } 286 287 IOException ioe = null; 288 synchronized (tmppaths) { 289 for (Path p: tmppaths) { 290 try { 291 AccessController.doPrivileged( 292 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 293 } catch (PrivilegedActionException e) { 294 IOException x = (IOException)e.getException(); 295 if (ioe == null) 296 ioe = x; 297 else 298 ioe.addSuppressed(x); 299 } 300 } 301 } 302 provider.removeFileSystem(zfpath, this); 303 if (ioe != null) 304 throw ioe; 305 } 306 307 ZipFileAttributes getFileAttributes(byte[] path) 308 throws IOException 309 { 310 Entry e; 311 beginRead(); 312 try { 313 ensureOpen(); 314 e = getEntry(path); 315 if (e == null) { 316 IndexNode inode = getInode(path); 317 if (inode == null) 318 return null; 319 e = new Entry(inode.name, inode.isdir); // pseudo directory 320 e.method = METHOD_STORED; // STORED for dir 321 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 322 } 323 } finally { 324 endRead(); 325 } 326 return e; 327 } 328 329 void checkAccess(byte[] path) throws IOException { 330 beginRead(); 331 try { 332 ensureOpen(); 333 // is it necessary to readCEN as a sanity check? 334 if (getInode(path) == null) { 335 throw new NoSuchFileException(toString()); 336 } 337 338 } finally { 339 endRead(); 340 } 341 } 342 343 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 344 throws IOException 345 { 346 checkWritable(); 347 beginWrite(); 348 try { 349 ensureOpen(); 350 Entry e = getEntry(path); // ensureOpen checked 351 if (e == null) 352 throw new NoSuchFileException(getString(path)); 353 if (e.type == Entry.CEN) 354 e.type = Entry.COPY; // copy e 355 if (mtime != null) 356 e.mtime = mtime.toMillis(); 357 if (atime != null) 358 e.atime = atime.toMillis(); 359 if (ctime != null) 360 e.ctime = ctime.toMillis(); 361 update(e); 362 } finally { 363 endWrite(); 364 } 365 } 366 367 boolean exists(byte[] path) 368 throws IOException 369 { 370 beginRead(); 371 try { 372 ensureOpen(); 373 return getInode(path) != null; 374 } finally { 375 endRead(); 376 } 377 } 378 379 boolean isDirectory(byte[] path) 380 throws IOException 381 { 382 beginRead(); 383 try { 384 IndexNode n = getInode(path); 385 return n != null && n.isDir(); 386 } finally { 387 endRead(); 388 } 389 } 390 391 // returns the list of child paths of "path" 392 Iterator<Path> iteratorOf(byte[] path, 393 DirectoryStream.Filter<? super Path> filter) 394 throws IOException 395 { 396 beginWrite(); // iteration of inodes needs exclusive lock 397 try { 398 ensureOpen(); 399 IndexNode inode = getInode(path); 400 if (inode == null) 401 throw new NotDirectoryException(getString(path)); 402 List<Path> list = new ArrayList<>(); 403 IndexNode child = inode.child; 404 while (child != null) { 405 // assume all path from zip file itself is "normalized" 406 ZipPath zp = new ZipPath(this, child.name, true); 407 if (filter == null || filter.accept(zp)) 408 list.add(zp); 409 child = child.sibling; 410 } 411 return list.iterator(); 412 } finally { 413 endWrite(); 414 } 415 } 416 417 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 418 throws IOException 419 { 420 checkWritable(); 421 // dir = toDirectoryPath(dir); 422 beginWrite(); 423 try { 424 ensureOpen(); 425 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 426 throw new FileAlreadyExistsException(getString(dir)); 427 checkParents(dir); 428 Entry e = new Entry(dir, Entry.NEW, true); 429 e.method = METHOD_STORED; // STORED for dir 430 update(e); 431 } finally { 432 endWrite(); 433 } 434 } 435 436 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 437 throws IOException 438 { 439 checkWritable(); 440 if (Arrays.equals(src, dst)) 441 return; // do nothing, src and dst are the same 442 443 beginWrite(); 444 try { 445 ensureOpen(); 446 Entry eSrc = getEntry(src); // ensureOpen checked 447 448 if (eSrc == null) 449 throw new NoSuchFileException(getString(src)); 450 if (eSrc.isDir()) { // spec says to create dst dir 451 createDirectory(dst); 452 return; 453 } 454 boolean hasReplace = false; 455 boolean hasCopyAttrs = false; 456 for (CopyOption opt : options) { 457 if (opt == REPLACE_EXISTING) 458 hasReplace = true; 459 else if (opt == COPY_ATTRIBUTES) 460 hasCopyAttrs = true; 461 } 462 Entry eDst = getEntry(dst); 463 if (eDst != null) { 464 if (!hasReplace) 465 throw new FileAlreadyExistsException(getString(dst)); 466 } else { 467 checkParents(dst); 468 } 469 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 470 u.name(dst); // change name 471 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 472 { 473 u.type = eSrc.type; // make it the same type 474 if (deletesrc) { // if it's a "rename", take the data 475 u.bytes = eSrc.bytes; 476 u.file = eSrc.file; 477 } else { // if it's not "rename", copy the data 478 if (eSrc.bytes != null) 479 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 480 else if (eSrc.file != null) { 481 u.file = getTempPathForEntry(null); 482 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 483 } 484 } 485 } 486 if (!hasCopyAttrs) 487 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 488 update(u); 489 if (deletesrc) 490 updateDelete(eSrc); 491 } finally { 492 endWrite(); 493 } 494 } 495 496 // Returns an output stream for writing the contents into the specified 497 // entry. 498 OutputStream newOutputStream(byte[] path, OpenOption... options) 499 throws IOException 500 { 501 checkWritable(); 502 boolean hasCreateNew = false; 503 boolean hasCreate = false; 504 boolean hasAppend = false; 505 boolean hasTruncate = false; 506 for (OpenOption opt: options) { 507 if (opt == READ) 508 throw new IllegalArgumentException("READ not allowed"); 509 if (opt == CREATE_NEW) 510 hasCreateNew = true; 511 if (opt == CREATE) 512 hasCreate = true; 513 if (opt == APPEND) 514 hasAppend = true; 515 if (opt == TRUNCATE_EXISTING) 516 hasTruncate = true; 517 } 518 if (hasAppend && hasTruncate) 519 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 520 beginRead(); // only need a readlock, the "update()" will 521 try { // try to obtain a writelock when the os is 522 ensureOpen(); // being closed. 523 Entry e = getEntry(path); 524 if (e != null) { 525 if (e.isDir() || hasCreateNew) 526 throw new FileAlreadyExistsException(getString(path)); 527 if (hasAppend) { 528 InputStream is = getInputStream(e); 529 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 530 copyStream(is, os); 531 is.close(); 532 return os; 533 } 534 return getOutputStream(new Entry(e, Entry.NEW)); 535 } else { 536 if (!hasCreate && !hasCreateNew) 537 throw new NoSuchFileException(getString(path)); 538 checkParents(path); 539 return getOutputStream(new Entry(path, Entry.NEW, false)); 540 } 541 } finally { 542 endRead(); 543 } 544 } 545 546 // Returns an input stream for reading the contents of the specified 547 // file entry. 548 InputStream newInputStream(byte[] path) throws IOException { 549 beginRead(); 550 try { 551 ensureOpen(); 552 Entry e = getEntry(path); 553 if (e == null) 554 throw new NoSuchFileException(getString(path)); 555 if (e.isDir()) 556 throw new FileSystemException(getString(path), "is a directory", null); 557 return getInputStream(e); 558 } finally { 559 endRead(); 560 } 561 } 562 563 private void checkOptions(Set<? extends OpenOption> options) { 564 // check for options of null type and option is an intance of StandardOpenOption 565 for (OpenOption option : options) { 566 if (option == null) 567 throw new NullPointerException(); 568 if (!(option instanceof StandardOpenOption)) 569 throw new IllegalArgumentException(); 570 } 571 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 572 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 573 } 574 575 // Returns a Writable/ReadByteChannel for now. Might consdier to use 576 // newFileChannel() instead, which dump the entry data into a regular 577 // file on the default file system and create a FileChannel on top of 578 // it. 579 SeekableByteChannel newByteChannel(byte[] path, 580 Set<? extends OpenOption> options, 581 FileAttribute<?>... attrs) 582 throws IOException 583 { 584 checkOptions(options); 585 if (options.contains(StandardOpenOption.WRITE) || 586 options.contains(StandardOpenOption.APPEND)) { 587 checkWritable(); 588 beginRead(); 589 try { 590 final WritableByteChannel wbc = Channels.newChannel( 591 newOutputStream(path, options.toArray(new OpenOption[0]))); 592 long leftover = 0; 593 if (options.contains(StandardOpenOption.APPEND)) { 594 Entry e = getEntry(path); 595 if (e != null && e.size >= 0) 596 leftover = e.size; 597 } 598 final long offset = leftover; 599 return new SeekableByteChannel() { 600 long written = offset; 601 public boolean isOpen() { 602 return wbc.isOpen(); 603 } 604 605 public long position() throws IOException { 606 return written; 607 } 608 609 public SeekableByteChannel position(long pos) 610 throws IOException 611 { 612 throw new UnsupportedOperationException(); 613 } 614 615 public int read(ByteBuffer dst) throws IOException { 616 throw new UnsupportedOperationException(); 617 } 618 619 public SeekableByteChannel truncate(long size) 620 throws IOException 621 { 622 throw new UnsupportedOperationException(); 623 } 624 625 public int write(ByteBuffer src) throws IOException { 626 int n = wbc.write(src); 627 written += n; 628 return n; 629 } 630 631 public long size() throws IOException { 632 return written; 633 } 634 635 public void close() throws IOException { 636 wbc.close(); 637 } 638 }; 639 } finally { 640 endRead(); 641 } 642 } else { 643 beginRead(); 644 try { 645 ensureOpen(); 646 Entry e = getEntry(path); 647 if (e == null || e.isDir()) 648 throw new NoSuchFileException(getString(path)); 649 final ReadableByteChannel rbc = 650 Channels.newChannel(getInputStream(e)); 651 final long size = e.size; 652 return new SeekableByteChannel() { 653 long read = 0; 654 public boolean isOpen() { 655 return rbc.isOpen(); 656 } 657 658 public long position() throws IOException { 659 return read; 660 } 661 662 public SeekableByteChannel position(long pos) 663 throws IOException 664 { 665 throw new UnsupportedOperationException(); 666 } 667 668 public int read(ByteBuffer dst) throws IOException { 669 int n = rbc.read(dst); 670 if (n > 0) { 671 read += n; 672 } 673 return n; 674 } 675 676 public SeekableByteChannel truncate(long size) 677 throws IOException 678 { 679 throw new NonWritableChannelException(); 680 } 681 682 public int write (ByteBuffer src) throws IOException { 683 throw new NonWritableChannelException(); 684 } 685 686 public long size() throws IOException { 687 return size; 688 } 689 690 public void close() throws IOException { 691 rbc.close(); 692 } 693 }; 694 } finally { 695 endRead(); 696 } 697 } 698 } 699 700 // Returns a FileChannel of the specified entry. 701 // 702 // This implementation creates a temporary file on the default file system, 703 // copy the entry data into it if the entry exists, and then create a 704 // FileChannel on top of it. 705 FileChannel newFileChannel(byte[] path, 706 Set<? extends OpenOption> options, 707 FileAttribute<?>... attrs) 708 throws IOException 709 { 710 checkOptions(options); 711 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 712 options.contains(StandardOpenOption.APPEND)); 713 beginRead(); 714 try { 715 ensureOpen(); 716 Entry e = getEntry(path); 717 if (forWrite) { 718 checkWritable(); 719 if (e == null) { 720 if (!options.contains(StandardOpenOption.CREATE) && 721 !options.contains(StandardOpenOption.CREATE_NEW)) { 722 throw new NoSuchFileException(getString(path)); 723 } 724 } else { 725 if (options.contains(StandardOpenOption.CREATE_NEW)) { 726 throw new FileAlreadyExistsException(getString(path)); 727 } 728 if (e.isDir()) 729 throw new FileAlreadyExistsException("directory <" 730 + getString(path) + "> exists"); 731 } 732 options = new HashSet<>(options); 733 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 734 } else if (e == null || e.isDir()) { 735 throw new NoSuchFileException(getString(path)); 736 } 737 738 final boolean isFCH = (e != null && e.type == Entry.FILECH); 739 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 740 final FileChannel fch = tmpfile.getFileSystem() 741 .provider() 742 .newFileChannel(tmpfile, options, attrs); 743 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH); 744 if (forWrite) { 745 u.flag = FLAG_DATADESCR; 746 u.method = METHOD_DEFLATED; 747 } 748 // is there a better way to hook into the FileChannel's close method? 749 return new FileChannel() { 750 public int write(ByteBuffer src) throws IOException { 751 return fch.write(src); 752 } 753 public long write(ByteBuffer[] srcs, int offset, int length) 754 throws IOException 755 { 756 return fch.write(srcs, offset, length); 757 } 758 public long position() throws IOException { 759 return fch.position(); 760 } 761 public FileChannel position(long newPosition) 762 throws IOException 763 { 764 fch.position(newPosition); 765 return this; 766 } 767 public long size() throws IOException { 768 return fch.size(); 769 } 770 public FileChannel truncate(long size) 771 throws IOException 772 { 773 fch.truncate(size); 774 return this; 775 } 776 public void force(boolean metaData) 777 throws IOException 778 { 779 fch.force(metaData); 780 } 781 public long transferTo(long position, long count, 782 WritableByteChannel target) 783 throws IOException 784 { 785 return fch.transferTo(position, count, target); 786 } 787 public long transferFrom(ReadableByteChannel src, 788 long position, long count) 789 throws IOException 790 { 791 return fch.transferFrom(src, position, count); 792 } 793 public int read(ByteBuffer dst) throws IOException { 794 return fch.read(dst); 795 } 796 public int read(ByteBuffer dst, long position) 797 throws IOException 798 { 799 return fch.read(dst, position); 800 } 801 public long read(ByteBuffer[] dsts, int offset, int length) 802 throws IOException 803 { 804 return fch.read(dsts, offset, length); 805 } 806 public int write(ByteBuffer src, long position) 807 throws IOException 808 { 809 return fch.write(src, position); 810 } 811 public MappedByteBuffer map(MapMode mode, 812 long position, long size) 813 throws IOException 814 { 815 throw new UnsupportedOperationException(); 816 } 817 public FileLock lock(long position, long size, boolean shared) 818 throws IOException 819 { 820 return fch.lock(position, size, shared); 821 } 822 public FileLock tryLock(long position, long size, boolean shared) 823 throws IOException 824 { 825 return fch.tryLock(position, size, shared); 826 } 827 protected void implCloseChannel() throws IOException { 828 fch.close(); 829 if (forWrite) { 830 u.mtime = System.currentTimeMillis(); 831 u.size = Files.size(u.file); 832 833 update(u); 834 } else { 835 if (!isFCH) // if this is a new fch for reading 836 removeTempPathForEntry(tmpfile); 837 } 838 } 839 }; 840 } finally { 841 endRead(); 842 } 843 } 844 845 // the outstanding input streams that need to be closed 846 private Set<InputStream> streams = 847 Collections.synchronizedSet(new HashSet<InputStream>()); 848 849 // the ex-channel and ex-path that need to close when their outstanding 850 // input streams are all closed by the obtainers. 851 private Set<ExChannelCloser> exChClosers = new HashSet<>(); 852 853 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 854 private Path getTempPathForEntry(byte[] path) throws IOException { 855 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 856 if (path != null) { 857 Entry e = getEntry(path); 858 if (e != null) { 859 try (InputStream is = newInputStream(path)) { 860 Files.copy(is, tmpPath, REPLACE_EXISTING); 861 } 862 } 863 } 864 return tmpPath; 865 } 866 867 private void removeTempPathForEntry(Path path) throws IOException { 868 Files.delete(path); 869 tmppaths.remove(path); 870 } 871 872 // check if all parents really exit. ZIP spec does not require 873 // the existence of any "parent directory". 874 private void checkParents(byte[] path) throws IOException { 875 beginRead(); 876 try { 877 while ((path = getParent(path)) != null && 878 path != ROOTPATH) { 879 if (!inodes.containsKey(IndexNode.keyOf(path))) { 880 throw new NoSuchFileException(getString(path)); 881 } 882 } 883 } finally { 884 endRead(); 885 } 886 } 887 888 private static byte[] ROOTPATH = new byte[] { '/' }; 889 private static byte[] getParent(byte[] path) { 890 int off = getParentOff(path); 891 if (off <= 1) 892 return ROOTPATH; 893 return Arrays.copyOf(path, off); 894 } 895 896 private static int getParentOff(byte[] path) { 897 int off = path.length - 1; 898 if (off > 0 && path[off] == '/') // isDirectory 899 off--; 900 while (off > 0 && path[off] != '/') { off--; } 901 return off; 902 } 903 904 private final void beginWrite() { 905 rwlock.writeLock().lock(); 906 } 907 908 private final void endWrite() { 909 rwlock.writeLock().unlock(); 910 } 911 912 private final void beginRead() { 913 rwlock.readLock().lock(); 914 } 915 916 private final void endRead() { 917 rwlock.readLock().unlock(); 918 } 919 920 /////////////////////////////////////////////////////////////////// 921 922 private volatile boolean isOpen = true; 923 private final SeekableByteChannel ch; // channel to the zipfile 924 final byte[] cen; // CEN & ENDHDR 925 private END end; 926 private long locpos; // position of first LOC header (usually 0) 927 928 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 929 930 // name -> pos (in cen), IndexNode itself can be used as a "key" 931 private LinkedHashMap<IndexNode, IndexNode> inodes; 932 933 final byte[] getBytes(String name) { 934 return zc.getBytes(name); 935 } 936 937 final String getString(byte[] name) { 938 return zc.toString(name); 939 } 940 941 @SuppressWarnings("deprecation") 942 protected void finalize() throws IOException { 943 close(); 944 } 945 946 // Reads len bytes of data from the specified offset into buf. 947 // Returns the total number of bytes read. 948 // Each/every byte read from here (except the cen, which is mapped). 949 final long readFullyAt(byte[] buf, int off, long len, long pos) 950 throws IOException 951 { 952 ByteBuffer bb = ByteBuffer.wrap(buf); 953 bb.position(off); 954 bb.limit((int)(off + len)); 955 return readFullyAt(bb, pos); 956 } 957 958 private final long readFullyAt(ByteBuffer bb, long pos) 959 throws IOException 960 { 961 synchronized(ch) { 962 return ch.position(pos).read(bb); 963 } 964 } 965 966 // Searches for end of central directory (END) header. The contents of 967 // the END header will be read and placed in endbuf. Returns the file 968 // position of the END header, otherwise returns -1 if the END header 969 // was not found or an error occurred. 970 private END findEND() throws IOException 971 { 972 byte[] buf = new byte[READBLOCKSZ]; 973 long ziplen = ch.size(); 974 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 975 long minPos = minHDR - (buf.length - ENDHDR); 976 977 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 978 { 979 int off = 0; 980 if (pos < 0) { 981 // Pretend there are some NUL bytes before start of file 982 off = (int)-pos; 983 Arrays.fill(buf, 0, off, (byte)0); 984 } 985 int len = buf.length - off; 986 if (readFullyAt(buf, off, len, pos + off) != len) 987 zerror("zip END header not found"); 988 989 // Now scan the block backwards for END header signature 990 for (int i = buf.length - ENDHDR; i >= 0; i--) { 991 if (buf[i+0] == (byte)'P' && 992 buf[i+1] == (byte)'K' && 993 buf[i+2] == (byte)'\005' && 994 buf[i+3] == (byte)'\006' && 995 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 996 // Found END header 997 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 998 END end = new END(); 999 end.endsub = ENDSUB(buf); 1000 end.centot = ENDTOT(buf); 1001 end.cenlen = ENDSIZ(buf); 1002 end.cenoff = ENDOFF(buf); 1003 end.comlen = ENDCOM(buf); 1004 end.endpos = pos + i; 1005 // try if there is zip64 end; 1006 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1007 if (end.endpos < ZIP64_LOCHDR || 1008 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1009 != loc64.length || 1010 !locator64SigAt(loc64, 0)) { 1011 return end; 1012 } 1013 long end64pos = ZIP64_LOCOFF(loc64); 1014 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1015 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1016 != end64buf.length || 1017 !end64SigAt(end64buf, 0)) { 1018 return end; 1019 } 1020 // end64 found, 1021 long cenlen64 = ZIP64_ENDSIZ(end64buf); 1022 long cenoff64 = ZIP64_ENDOFF(end64buf); 1023 long centot64 = ZIP64_ENDTOT(end64buf); 1024 // double-check 1025 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1026 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1027 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1028 return end; 1029 } 1030 // to use the end64 values 1031 end.cenlen = cenlen64; 1032 end.cenoff = cenoff64; 1033 end.centot = (int)centot64; // assume total < 2g 1034 end.endpos = end64pos; 1035 return end; 1036 } 1037 } 1038 } 1039 zerror("zip END header not found"); 1040 return null; //make compiler happy 1041 } 1042 1043 // Reads zip file central directory. Returns the file position of first 1044 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1045 // then the error was a zip format error and zip->msg has the error text. 1046 // Always pass in -1 for knownTotal; it's used for a recursive call. 1047 private byte[] initCEN() throws IOException { 1048 end = findEND(); 1049 if (end.endpos == 0) { 1050 inodes = new LinkedHashMap<>(10); 1051 locpos = 0; 1052 buildNodeTree(); 1053 return null; // only END header present 1054 } 1055 if (end.cenlen > end.endpos) 1056 zerror("invalid END header (bad central directory size)"); 1057 long cenpos = end.endpos - end.cenlen; // position of CEN table 1058 1059 // Get position of first local file (LOC) header, taking into 1060 // account that there may be a stub prefixed to the zip file. 1061 locpos = cenpos - end.cenoff; 1062 if (locpos < 0) 1063 zerror("invalid END header (bad central directory offset)"); 1064 1065 // read in the CEN and END 1066 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1067 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1068 zerror("read CEN tables failed"); 1069 } 1070 // Iterate through the entries in the central directory 1071 inodes = new LinkedHashMap<>(end.centot + 1); 1072 int pos = 0; 1073 int limit = cen.length - ENDHDR; 1074 while (pos < limit) { 1075 if (!cenSigAt(cen, pos)) 1076 zerror("invalid CEN header (bad signature)"); 1077 int method = CENHOW(cen, pos); 1078 int nlen = CENNAM(cen, pos); 1079 int elen = CENEXT(cen, pos); 1080 int clen = CENCOM(cen, pos); 1081 if ((CENFLG(cen, pos) & 1) != 0) { 1082 zerror("invalid CEN header (encrypted entry)"); 1083 } 1084 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1085 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1086 } 1087 if (pos + CENHDR + nlen > limit) { 1088 zerror("invalid CEN header (bad header size)"); 1089 } 1090 IndexNode inode = new IndexNode(cen, nlen, pos); 1091 inodes.put(inode, inode); 1092 // skip ext and comment 1093 pos += (CENHDR + nlen + elen + clen); 1094 } 1095 if (pos + ENDHDR != cen.length) { 1096 zerror("invalid CEN header (bad header size)"); 1097 } 1098 buildNodeTree(); 1099 return cen; 1100 } 1101 1102 private void ensureOpen() throws IOException { 1103 if (!isOpen) 1104 throw new ClosedFileSystemException(); 1105 } 1106 1107 // Creates a new empty temporary file in the same directory as the 1108 // specified file. A variant of Files.createTempFile. 1109 private Path createTempFileInSameDirectoryAs(Path path) 1110 throws IOException 1111 { 1112 Path parent = path.toAbsolutePath().getParent(); 1113 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1114 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1115 tmppaths.add(tmpPath); 1116 return tmpPath; 1117 } 1118 1119 ////////////////////update & sync ////////////////////////////////////// 1120 1121 private boolean hasUpdate = false; 1122 1123 // shared key. consumer guarantees the "writeLock" before use it. 1124 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1125 1126 private void updateDelete(IndexNode inode) { 1127 beginWrite(); 1128 try { 1129 removeFromTree(inode); 1130 inodes.remove(inode); 1131 hasUpdate = true; 1132 } finally { 1133 endWrite(); 1134 } 1135 } 1136 1137 private void update(Entry e) { 1138 beginWrite(); 1139 try { 1140 IndexNode old = inodes.put(e, e); 1141 if (old != null) { 1142 removeFromTree(old); 1143 } 1144 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1145 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1146 e.sibling = parent.child; 1147 parent.child = e; 1148 } 1149 hasUpdate = true; 1150 } finally { 1151 endWrite(); 1152 } 1153 } 1154 1155 // copy over the whole LOC entry (header if necessary, data and ext) from 1156 // old zip to the new one. 1157 private long copyLOCEntry(Entry e, boolean updateHeader, 1158 OutputStream os, 1159 long written, byte[] buf) 1160 throws IOException 1161 { 1162 long locoff = e.locoff; // where to read 1163 e.locoff = written; // update the e.locoff with new value 1164 1165 // calculate the size need to write out 1166 long size = 0; 1167 // if there is A ext 1168 if ((e.flag & FLAG_DATADESCR) != 0) { 1169 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1170 size = 24; 1171 else 1172 size = 16; 1173 } 1174 // read loc, use the original loc.elen/nlen 1175 // 1176 // an extra byte after loc is read, which should be the first byte of the 1177 // 'name' field of the loc. if this byte is '/', which means the original 1178 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1179 // is used to output the loc, in which the leading "/" will be removed 1180 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1181 throw new ZipException("loc: reading failed"); 1182 1183 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1184 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1185 size += e.csize; 1186 written = e.writeLOC(os) + size; 1187 } else { 1188 os.write(buf, 0, LOCHDR); // write out the loc header 1189 locoff += LOCHDR; 1190 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1191 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1192 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1193 written = LOCHDR + size; 1194 } 1195 int n; 1196 while (size > 0 && 1197 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1198 { 1199 if (size < n) 1200 n = (int)size; 1201 os.write(buf, 0, n); 1202 size -= n; 1203 locoff += n; 1204 } 1205 return written; 1206 } 1207 1208 // sync the zip file system, if there is any udpate 1209 private void sync() throws IOException { 1210 // System.out.printf("->sync(%s) starting....!%n", toString()); 1211 // check ex-closer 1212 if (!exChClosers.isEmpty()) { 1213 for (ExChannelCloser ecc : exChClosers) { 1214 if (ecc.streams.isEmpty()) { 1215 ecc.ch.close(); 1216 Files.delete(ecc.path); 1217 exChClosers.remove(ecc); 1218 } 1219 } 1220 } 1221 if (!hasUpdate) 1222 return; 1223 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1224 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) 1225 { 1226 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1227 long written = 0; 1228 byte[] buf = new byte[8192]; 1229 Entry e = null; 1230 1231 // write loc 1232 for (IndexNode inode : inodes.values()) { 1233 if (inode instanceof Entry) { // an updated inode 1234 e = (Entry)inode; 1235 try { 1236 if (e.type == Entry.COPY) { 1237 // entry copy: the only thing changed is the "name" 1238 // and "nlen" in LOC header, so we udpate/rewrite the 1239 // LOC in new file and simply copy the rest (data and 1240 // ext) without enflating/deflating from the old zip 1241 // file LOC entry. 1242 written += copyLOCEntry(e, true, os, written, buf); 1243 } else { // NEW, FILECH or CEN 1244 e.locoff = written; 1245 written += e.writeLOC(os); // write loc header 1246 if (e.bytes != null) { // in-memory, deflated 1247 os.write(e.bytes); // already 1248 written += e.bytes.length; 1249 } else if (e.file != null) { // tmp file 1250 try (InputStream is = Files.newInputStream(e.file)) { 1251 int n; 1252 if (e.type == Entry.NEW) { // deflated already 1253 while ((n = is.read(buf)) != -1) { 1254 os.write(buf, 0, n); 1255 written += n; 1256 } 1257 } else if (e.type == Entry.FILECH) { 1258 // the data are not deflated, use ZEOS 1259 try (OutputStream os2 = new EntryOutputStream(e, os)) { 1260 while ((n = is.read(buf)) != -1) { 1261 os2.write(buf, 0, n); 1262 } 1263 } 1264 written += e.csize; 1265 if ((e.flag & FLAG_DATADESCR) != 0) 1266 written += e.writeEXT(os); 1267 } 1268 } 1269 Files.delete(e.file); 1270 tmppaths.remove(e.file); 1271 } else { 1272 // dir, 0-length data 1273 } 1274 } 1275 elist.add(e); 1276 } catch (IOException x) { 1277 x.printStackTrace(); // skip any in-accurate entry 1278 } 1279 } else { // unchanged inode 1280 if (inode.pos == -1) { 1281 continue; // pseudo directory node 1282 } 1283 if (inode.name.length == 1 && inode.name[0] == '/') { 1284 continue; // no root '/' directory even it 1285 // exits in original zip/jar file. 1286 } 1287 e = Entry.readCEN(this, inode); 1288 try { 1289 written += copyLOCEntry(e, false, os, written, buf); 1290 elist.add(e); 1291 } catch (IOException x) { 1292 x.printStackTrace(); // skip any wrong entry 1293 } 1294 } 1295 } 1296 1297 // now write back the cen and end table 1298 end.cenoff = written; 1299 for (Entry entry : elist) { 1300 written += entry.writeCEN(os); 1301 } 1302 end.centot = elist.size(); 1303 end.cenlen = written - end.cenoff; 1304 end.write(os, written, forceEnd64); 1305 } 1306 if (!streams.isEmpty()) { 1307 // 1308 // TBD: ExChannelCloser should not be necessary if we only 1309 // sync when being closed, all streams should have been 1310 // closed already. Keep the logic here for now. 1311 // 1312 // There are outstanding input streams open on existing "ch", 1313 // so, don't close the "cha" and delete the "file for now, let 1314 // the "ex-channel-closer" to handle them 1315 ExChannelCloser ecc = new ExChannelCloser( 1316 createTempFileInSameDirectoryAs(zfpath), 1317 ch, 1318 streams); 1319 Files.move(zfpath, ecc.path, REPLACE_EXISTING); 1320 exChClosers.add(ecc); 1321 streams = Collections.synchronizedSet(new HashSet<InputStream>()); 1322 } else { 1323 ch.close(); 1324 Files.delete(zfpath); 1325 } 1326 1327 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1328 hasUpdate = false; // clear 1329 } 1330 1331 IndexNode getInode(byte[] path) { 1332 if (path == null) 1333 throw new NullPointerException("path"); 1334 return inodes.get(IndexNode.keyOf(path)); 1335 } 1336 1337 Entry getEntry(byte[] path) throws IOException { 1338 IndexNode inode = getInode(path); 1339 if (inode instanceof Entry) 1340 return (Entry)inode; 1341 if (inode == null || inode.pos == -1) 1342 return null; 1343 return Entry.readCEN(this, inode); 1344 } 1345 1346 public void deleteFile(byte[] path, boolean failIfNotExists) 1347 throws IOException 1348 { 1349 checkWritable(); 1350 1351 IndexNode inode = getInode(path); 1352 if (inode == null) { 1353 if (path != null && path.length == 0) 1354 throw new ZipException("root directory </> can't not be delete"); 1355 if (failIfNotExists) 1356 throw new NoSuchFileException(getString(path)); 1357 } else { 1358 if (inode.isDir() && inode.child != null) 1359 throw new DirectoryNotEmptyException(getString(path)); 1360 updateDelete(inode); 1361 } 1362 } 1363 1364 private static void copyStream(InputStream is, OutputStream os) 1365 throws IOException 1366 { 1367 byte[] copyBuf = new byte[8192]; 1368 int n; 1369 while ((n = is.read(copyBuf)) != -1) { 1370 os.write(copyBuf, 0, n); 1371 } 1372 } 1373 1374 // Returns an out stream for either 1375 // (1) writing the contents of a new entry, if the entry exits, or 1376 // (2) updating/replacing the contents of the specified existing entry. 1377 private OutputStream getOutputStream(Entry e) throws IOException { 1378 1379 if (e.mtime == -1) 1380 e.mtime = System.currentTimeMillis(); 1381 if (e.method == -1) 1382 e.method = METHOD_DEFLATED; // TBD: use default method 1383 // store size, compressed size, and crc-32 in LOC header 1384 e.flag = 0; 1385 if (zc.isUTF8()) 1386 e.flag |= FLAG_USE_UTF8; 1387 OutputStream os; 1388 if (useTempFile) { 1389 e.file = getTempPathForEntry(null); 1390 os = Files.newOutputStream(e.file, WRITE); 1391 } else { 1392 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1393 } 1394 return new EntryOutputStream(e, os); 1395 } 1396 1397 private InputStream getInputStream(Entry e) 1398 throws IOException 1399 { 1400 InputStream eis = null; 1401 1402 if (e.type == Entry.NEW) { 1403 if (e.bytes != null) 1404 eis = new ByteArrayInputStream(e.bytes); 1405 else if (e.file != null) 1406 eis = Files.newInputStream(e.file); 1407 else 1408 throw new ZipException("update entry data is missing"); 1409 } else if (e.type == Entry.FILECH) { 1410 // FILECH result is un-compressed. 1411 eis = Files.newInputStream(e.file); 1412 // TBD: wrap to hook close() 1413 // streams.add(eis); 1414 return eis; 1415 } else { // untouced CEN or COPY 1416 eis = new EntryInputStream(e, ch); 1417 } 1418 if (e.method == METHOD_DEFLATED) { 1419 // MORE: Compute good size for inflater stream: 1420 long bufSize = e.size + 2; // Inflater likes a bit of slack 1421 if (bufSize > 65536) 1422 bufSize = 8192; 1423 final long size = e.size; 1424 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1425 private boolean isClosed = false; 1426 public void close() throws IOException { 1427 if (!isClosed) { 1428 releaseInflater(inf); 1429 this.in.close(); 1430 isClosed = true; 1431 streams.remove(this); 1432 } 1433 } 1434 // Override fill() method to provide an extra "dummy" byte 1435 // at the end of the input stream. This is required when 1436 // using the "nowrap" Inflater option. (it appears the new 1437 // zlib in 7 does not need it, but keep it for now) 1438 protected void fill() throws IOException { 1439 if (eof) { 1440 throw new EOFException( 1441 "Unexpected end of ZLIB input stream"); 1442 } 1443 len = this.in.read(buf, 0, buf.length); 1444 if (len == -1) { 1445 buf[0] = 0; 1446 len = 1; 1447 eof = true; 1448 } 1449 inf.setInput(buf, 0, len); 1450 } 1451 private boolean eof; 1452 1453 public int available() throws IOException { 1454 if (isClosed) 1455 return 0; 1456 long avail = size - inf.getBytesWritten(); 1457 return avail > (long) Integer.MAX_VALUE ? 1458 Integer.MAX_VALUE : (int) avail; 1459 } 1460 }; 1461 } else if (e.method == METHOD_STORED) { 1462 // TBD: wrap/ it does not seem necessary 1463 } else { 1464 throw new ZipException("invalid compression method"); 1465 } 1466 streams.add(eis); 1467 return eis; 1468 } 1469 1470 // Inner class implementing the input stream used to read 1471 // a (possibly compressed) zip file entry. 1472 private class EntryInputStream extends InputStream { 1473 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1474 // point to a new channel after sync() 1475 private long pos; // current position within entry data 1476 protected long rem; // number of remaining bytes within entry 1477 protected final long size; // uncompressed size of this entry 1478 1479 EntryInputStream(Entry e, SeekableByteChannel zfch) 1480 throws IOException 1481 { 1482 this.zfch = zfch; 1483 rem = e.csize; 1484 size = e.size; 1485 pos = e.locoff; 1486 if (pos == -1) { 1487 Entry e2 = getEntry(e.name); 1488 if (e2 == null) { 1489 throw new ZipException("invalid loc for entry <" + e.name + ">"); 1490 } 1491 pos = e2.locoff; 1492 } 1493 pos = -pos; // lazy initialize the real data offset 1494 } 1495 1496 public int read(byte b[], int off, int len) throws IOException { 1497 ensureOpen(); 1498 initDataPos(); 1499 if (rem == 0) { 1500 return -1; 1501 } 1502 if (len <= 0) { 1503 return 0; 1504 } 1505 if (len > rem) { 1506 len = (int) rem; 1507 } 1508 // readFullyAt() 1509 long n = 0; 1510 ByteBuffer bb = ByteBuffer.wrap(b); 1511 bb.position(off); 1512 bb.limit(off + len); 1513 synchronized(zfch) { 1514 n = zfch.position(pos).read(bb); 1515 } 1516 if (n > 0) { 1517 pos += n; 1518 rem -= n; 1519 } 1520 if (rem == 0) { 1521 close(); 1522 } 1523 return (int)n; 1524 } 1525 1526 public int read() throws IOException { 1527 byte[] b = new byte[1]; 1528 if (read(b, 0, 1) == 1) { 1529 return b[0] & 0xff; 1530 } else { 1531 return -1; 1532 } 1533 } 1534 1535 public long skip(long n) throws IOException { 1536 ensureOpen(); 1537 if (n > rem) 1538 n = rem; 1539 pos += n; 1540 rem -= n; 1541 if (rem == 0) { 1542 close(); 1543 } 1544 return n; 1545 } 1546 1547 public int available() { 1548 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1549 } 1550 1551 public long size() { 1552 return size; 1553 } 1554 1555 public void close() { 1556 rem = 0; 1557 streams.remove(this); 1558 } 1559 1560 private void initDataPos() throws IOException { 1561 if (pos <= 0) { 1562 pos = -pos + locpos; 1563 byte[] buf = new byte[LOCHDR]; 1564 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 1565 throw new ZipException("invalid loc " + pos + " for entry reading"); 1566 } 1567 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 1568 } 1569 } 1570 } 1571 1572 class EntryOutputStream extends DeflaterOutputStream 1573 { 1574 private CRC32 crc; 1575 private Entry e; 1576 private long written; 1577 private boolean isClosed = false; 1578 1579 EntryOutputStream(Entry e, OutputStream os) 1580 throws IOException 1581 { 1582 super(os, getDeflater()); 1583 if (e == null) 1584 throw new NullPointerException("Zip entry is null"); 1585 this.e = e; 1586 crc = new CRC32(); 1587 } 1588 1589 @Override 1590 public synchronized void write(byte b[], int off, int len) 1591 throws IOException 1592 { 1593 if (e.type != Entry.FILECH) // only from sync 1594 ensureOpen(); 1595 if (isClosed) { 1596 throw new IOException("Stream closed"); 1597 } 1598 if (off < 0 || len < 0 || off > b.length - len) { 1599 throw new IndexOutOfBoundsException(); 1600 } else if (len == 0) { 1601 return; 1602 } 1603 switch (e.method) { 1604 case METHOD_DEFLATED: 1605 super.write(b, off, len); 1606 break; 1607 case METHOD_STORED: 1608 written += len; 1609 out.write(b, off, len); 1610 break; 1611 default: 1612 throw new ZipException("invalid compression method"); 1613 } 1614 crc.update(b, off, len); 1615 } 1616 1617 @Override 1618 public synchronized void close() throws IOException { 1619 if (isClosed) { 1620 return; 1621 } 1622 isClosed = true; 1623 // TBD ensureOpen(); 1624 switch (e.method) { 1625 case METHOD_DEFLATED: 1626 finish(); 1627 e.size = def.getBytesRead(); 1628 e.csize = def.getBytesWritten(); 1629 e.crc = crc.getValue(); 1630 break; 1631 case METHOD_STORED: 1632 // we already know that both e.size and e.csize are the same 1633 e.size = e.csize = written; 1634 e.crc = crc.getValue(); 1635 break; 1636 default: 1637 throw new ZipException("invalid compression method"); 1638 } 1639 //crc.reset(); 1640 if (out instanceof ByteArrayOutputStream) 1641 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1642 1643 if (e.type == Entry.FILECH) { 1644 releaseDeflater(def); 1645 return; 1646 } 1647 super.close(); 1648 releaseDeflater(def); 1649 update(e); 1650 } 1651 } 1652 1653 static void zerror(String msg) throws ZipException { 1654 throw new ZipException(msg); 1655 } 1656 1657 // Maxmum number of de/inflater we cache 1658 private final int MAX_FLATER = 20; 1659 // List of available Inflater objects for decompression 1660 private final List<Inflater> inflaters = new ArrayList<>(); 1661 1662 // Gets an inflater from the list of available inflaters or allocates 1663 // a new one. 1664 private Inflater getInflater() { 1665 synchronized (inflaters) { 1666 int size = inflaters.size(); 1667 if (size > 0) { 1668 Inflater inf = inflaters.remove(size - 1); 1669 return inf; 1670 } else { 1671 return new Inflater(true); 1672 } 1673 } 1674 } 1675 1676 // Releases the specified inflater to the list of available inflaters. 1677 private void releaseInflater(Inflater inf) { 1678 synchronized (inflaters) { 1679 if (inflaters.size() < MAX_FLATER) { 1680 inf.reset(); 1681 inflaters.add(inf); 1682 } else { 1683 inf.end(); 1684 } 1685 } 1686 } 1687 1688 // List of available Deflater objects for compression 1689 private final List<Deflater> deflaters = new ArrayList<>(); 1690 1691 // Gets an deflater from the list of available deflaters or allocates 1692 // a new one. 1693 private Deflater getDeflater() { 1694 synchronized (deflaters) { 1695 int size = deflaters.size(); 1696 if (size > 0) { 1697 Deflater def = deflaters.remove(size - 1); 1698 return def; 1699 } else { 1700 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1701 } 1702 } 1703 } 1704 1705 // Releases the specified inflater to the list of available inflaters. 1706 private void releaseDeflater(Deflater def) { 1707 synchronized (deflaters) { 1708 if (inflaters.size() < MAX_FLATER) { 1709 def.reset(); 1710 deflaters.add(def); 1711 } else { 1712 def.end(); 1713 } 1714 } 1715 } 1716 1717 // End of central directory record 1718 static class END { 1719 // these 2 fields are not used by anyone and write() uses "0" 1720 // int disknum; 1721 // int sdisknum; 1722 int endsub; // endsub 1723 int centot; // 4 bytes 1724 long cenlen; // 4 bytes 1725 long cenoff; // 4 bytes 1726 int comlen; // comment length 1727 byte[] comment; 1728 1729 /* members of Zip64 end of central directory locator */ 1730 // int diskNum; 1731 long endpos; 1732 // int disktot; 1733 1734 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 1735 boolean hasZip64 = forceEnd64; // false; 1736 long xlen = cenlen; 1737 long xoff = cenoff; 1738 if (xlen >= ZIP64_MINVAL) { 1739 xlen = ZIP64_MINVAL; 1740 hasZip64 = true; 1741 } 1742 if (xoff >= ZIP64_MINVAL) { 1743 xoff = ZIP64_MINVAL; 1744 hasZip64 = true; 1745 } 1746 int count = centot; 1747 if (count >= ZIP64_MINVAL32) { 1748 count = ZIP64_MINVAL32; 1749 hasZip64 = true; 1750 } 1751 if (hasZip64) { 1752 long off64 = offset; 1753 //zip64 end of central directory record 1754 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1755 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1756 writeShort(os, 45); // version made by 1757 writeShort(os, 45); // version needed to extract 1758 writeInt(os, 0); // number of this disk 1759 writeInt(os, 0); // central directory start disk 1760 writeLong(os, centot); // number of directory entries on disk 1761 writeLong(os, centot); // number of directory entries 1762 writeLong(os, cenlen); // length of central directory 1763 writeLong(os, cenoff); // offset of central directory 1764 1765 //zip64 end of central directory locator 1766 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1767 writeInt(os, 0); // zip64 END start disk 1768 writeLong(os, off64); // offset of zip64 END 1769 writeInt(os, 1); // total number of disks (?) 1770 } 1771 writeInt(os, ENDSIG); // END record signature 1772 writeShort(os, 0); // number of this disk 1773 writeShort(os, 0); // central directory start disk 1774 writeShort(os, count); // number of directory entries on disk 1775 writeShort(os, count); // total number of directory entries 1776 writeInt(os, xlen); // length of central directory 1777 writeInt(os, xoff); // offset of central directory 1778 if (comment != null) { // zip file comment 1779 writeShort(os, comment.length); 1780 writeBytes(os, comment); 1781 } else { 1782 writeShort(os, 0); 1783 } 1784 } 1785 } 1786 1787 // Internal node that links a "name" to its pos in cen table. 1788 // The node itself can be used as a "key" to lookup itself in 1789 // the HashMap inodes. 1790 static class IndexNode { 1791 byte[] name; 1792 int hashcode; // node is hashable/hashed by its name 1793 int pos = -1; // position in cen table, -1 menas the 1794 // entry does not exists in zip file 1795 boolean isdir; 1796 1797 IndexNode(byte[] name, boolean isdir) { 1798 name(name); 1799 this.isdir = isdir; 1800 this.pos = -1; 1801 } 1802 1803 IndexNode(byte[] name, int pos) { 1804 name(name); 1805 this.pos = pos; 1806 } 1807 1808 // constructor for cenInit() (1) remove tailing '/' (2) pad leading '/' 1809 IndexNode(byte[] cen, int nlen, int pos) { 1810 int noff = pos + CENHDR; 1811 if (cen[noff + nlen - 1] == '/') { 1812 isdir = true; 1813 nlen--; 1814 } 1815 if (nlen > 0 && cen[noff] == '/') { 1816 name = Arrays.copyOfRange(cen, noff, noff + nlen); 1817 } else { 1818 name = new byte[nlen + 1]; 1819 System.arraycopy(cen, noff, name, 1, nlen); 1820 name[0] = '/'; 1821 } 1822 name(name); 1823 this.pos = pos; 1824 } 1825 1826 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 1827 1828 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1829 IndexNode key = cachedKey.get(); 1830 if (key == null) { 1831 key = new IndexNode(name, -1); 1832 cachedKey.set(key); 1833 } 1834 return key.as(name); 1835 } 1836 1837 final void name(byte[] name) { 1838 this.name = name; 1839 this.hashcode = Arrays.hashCode(name); 1840 } 1841 1842 final IndexNode as(byte[] name) { // reuse the node, mostly 1843 name(name); // as a lookup "key" 1844 return this; 1845 } 1846 1847 boolean isDir() { 1848 return isdir; 1849 } 1850 1851 public boolean equals(Object other) { 1852 if (!(other instanceof IndexNode)) { 1853 return false; 1854 } 1855 if (other instanceof ParentLookup) { 1856 return ((ParentLookup)other).equals(this); 1857 } 1858 return Arrays.equals(name, ((IndexNode)other).name); 1859 } 1860 1861 public int hashCode() { 1862 return hashcode; 1863 } 1864 1865 IndexNode() {} 1866 IndexNode sibling; 1867 IndexNode child; // 1st child 1868 } 1869 1870 static class Entry extends IndexNode implements ZipFileAttributes { 1871 1872 static final int CEN = 1; // entry read from cen 1873 static final int NEW = 2; // updated contents in bytes or file 1874 static final int FILECH = 3; // fch update in "file" 1875 static final int COPY = 4; // copy of a CEN entry 1876 1877 byte[] bytes; // updated content bytes 1878 Path file; // use tmp file to store bytes; 1879 int type = CEN; // default is the entry read from cen 1880 1881 // entry attributes 1882 int version; 1883 int flag; 1884 int method = -1; // compression method 1885 long mtime = -1; // last modification time (in DOS time) 1886 long atime = -1; // last access time 1887 long ctime = -1; // create time 1888 long crc = -1; // crc-32 of entry data 1889 long csize = -1; // compressed size of entry data 1890 long size = -1; // uncompressed size of entry data 1891 byte[] extra; 1892 1893 // cen 1894 1895 // these fields are not used by anyone and writeCEN uses "0" 1896 // int versionMade; 1897 // int disk; 1898 // int attrs; 1899 // long attrsEx; 1900 long locoff; 1901 byte[] comment; 1902 1903 Entry() {} 1904 1905 Entry(byte[] name, boolean isdir) { 1906 name(name); 1907 this.isdir = isdir; 1908 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 1909 this.crc = 0; 1910 this.size = 0; 1911 this.csize = 0; 1912 this.method = METHOD_DEFLATED; 1913 } 1914 1915 Entry(byte[] name, int type, boolean isdir) { 1916 this(name, isdir); 1917 this.type = type; 1918 } 1919 1920 Entry (Entry e, int type) { 1921 name(e.name); 1922 this.isdir = e.isdir; 1923 this.version = e.version; 1924 this.ctime = e.ctime; 1925 this.atime = e.atime; 1926 this.mtime = e.mtime; 1927 this.crc = e.crc; 1928 this.size = e.size; 1929 this.csize = e.csize; 1930 this.method = e.method; 1931 this.extra = e.extra; 1932 /* 1933 this.versionMade = e.versionMade; 1934 this.disk = e.disk; 1935 this.attrs = e.attrs; 1936 this.attrsEx = e.attrsEx; 1937 */ 1938 this.locoff = e.locoff; 1939 this.comment = e.comment; 1940 this.type = type; 1941 } 1942 1943 Entry (byte[] name, Path file, int type) { 1944 this(name, type, false); 1945 this.file = file; 1946 this.method = METHOD_STORED; 1947 } 1948 1949 int version() throws ZipException { 1950 if (method == METHOD_DEFLATED) 1951 return 20; 1952 else if (method == METHOD_STORED) 1953 return 10; 1954 throw new ZipException("unsupported compression method"); 1955 } 1956 1957 ///////////////////// CEN ////////////////////// 1958 static Entry readCEN(ZipFileSystem zipfs, IndexNode inode) 1959 throws IOException 1960 { 1961 return new Entry().cen(zipfs, inode); 1962 } 1963 1964 private Entry cen(ZipFileSystem zipfs, IndexNode inode) 1965 throws IOException 1966 { 1967 byte[] cen = zipfs.cen; 1968 int pos = inode.pos; 1969 if (!cenSigAt(cen, pos)) 1970 zerror("invalid CEN header (bad signature)"); 1971 version = CENVER(cen, pos); 1972 flag = CENFLG(cen, pos); 1973 method = CENHOW(cen, pos); 1974 mtime = dosToJavaTime(CENTIM(cen, pos)); 1975 crc = CENCRC(cen, pos); 1976 csize = CENSIZ(cen, pos); 1977 size = CENLEN(cen, pos); 1978 int nlen = CENNAM(cen, pos); 1979 int elen = CENEXT(cen, pos); 1980 int clen = CENCOM(cen, pos); 1981 /* 1982 versionMade = CENVEM(cen, pos); 1983 disk = CENDSK(cen, pos); 1984 attrs = CENATT(cen, pos); 1985 attrsEx = CENATX(cen, pos); 1986 */ 1987 locoff = CENOFF(cen, pos); 1988 pos += CENHDR; 1989 this.name = inode.name; 1990 this.isdir = inode.isdir; 1991 this.hashcode = inode.hashcode; 1992 1993 pos += nlen; 1994 if (elen > 0) { 1995 extra = Arrays.copyOfRange(cen, pos, pos + elen); 1996 pos += elen; 1997 readExtra(zipfs); 1998 } 1999 if (clen > 0) { 2000 comment = Arrays.copyOfRange(cen, pos, pos + clen); 2001 } 2002 return this; 2003 } 2004 2005 int writeCEN(OutputStream os) throws IOException 2006 { 2007 int written = CENHDR; 2008 int version0 = version(); 2009 long csize0 = csize; 2010 long size0 = size; 2011 long locoff0 = locoff; 2012 int elen64 = 0; // extra for ZIP64 2013 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 2014 int elenEXTT = 0; // extra for Extended Timestamp 2015 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 2016 2017 byte[] zname = isdir ? toDirectoryPath(name) : name; 2018 2019 // confirm size/length 2020 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 2021 int elen = (extra != null) ? extra.length : 0; 2022 int eoff = 0; 2023 int clen = (comment != null) ? comment.length : 0; 2024 if (csize >= ZIP64_MINVAL) { 2025 csize0 = ZIP64_MINVAL; 2026 elen64 += 8; // csize(8) 2027 } 2028 if (size >= ZIP64_MINVAL) { 2029 size0 = ZIP64_MINVAL; // size(8) 2030 elen64 += 8; 2031 } 2032 if (locoff >= ZIP64_MINVAL) { 2033 locoff0 = ZIP64_MINVAL; 2034 elen64 += 8; // offset(8) 2035 } 2036 if (elen64 != 0) { 2037 elen64 += 4; // header and data sz 4 bytes 2038 } 2039 while (eoff + 4 < elen) { 2040 int tag = SH(extra, eoff); 2041 int sz = SH(extra, eoff + 2); 2042 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2043 foundExtraTime = true; 2044 } 2045 eoff += (4 + sz); 2046 } 2047 if (!foundExtraTime) { 2048 if (isWindows) { // use NTFS 2049 elenNTFS = 36; // total 36 bytes 2050 } else { // Extended Timestamp otherwise 2051 elenEXTT = 9; // only mtime in cen 2052 } 2053 } 2054 writeInt(os, CENSIG); // CEN header signature 2055 if (elen64 != 0) { 2056 writeShort(os, 45); // ver 4.5 for zip64 2057 writeShort(os, 45); 2058 } else { 2059 writeShort(os, version0); // version made by 2060 writeShort(os, version0); // version needed to extract 2061 } 2062 writeShort(os, flag); // general purpose bit flag 2063 writeShort(os, method); // compression method 2064 // last modification time 2065 writeInt(os, (int)javaToDosTime(mtime)); 2066 writeInt(os, crc); // crc-32 2067 writeInt(os, csize0); // compressed size 2068 writeInt(os, size0); // uncompressed size 2069 writeShort(os, nlen); 2070 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2071 2072 if (comment != null) { 2073 writeShort(os, Math.min(clen, 0xffff)); 2074 } else { 2075 writeShort(os, 0); 2076 } 2077 writeShort(os, 0); // starting disk number 2078 writeShort(os, 0); // internal file attributes (unused) 2079 writeInt(os, 0); // external file attributes (unused) 2080 writeInt(os, locoff0); // relative offset of local header 2081 writeBytes(os, zname, 1, nlen); 2082 if (elen64 != 0) { 2083 writeShort(os, EXTID_ZIP64);// Zip64 extra 2084 writeShort(os, elen64 - 4); // size of "this" extra block 2085 if (size0 == ZIP64_MINVAL) 2086 writeLong(os, size); 2087 if (csize0 == ZIP64_MINVAL) 2088 writeLong(os, csize); 2089 if (locoff0 == ZIP64_MINVAL) 2090 writeLong(os, locoff); 2091 } 2092 if (elenNTFS != 0) { 2093 writeShort(os, EXTID_NTFS); 2094 writeShort(os, elenNTFS - 4); 2095 writeInt(os, 0); // reserved 2096 writeShort(os, 0x0001); // NTFS attr tag 2097 writeShort(os, 24); 2098 writeLong(os, javaToWinTime(mtime)); 2099 writeLong(os, javaToWinTime(atime)); 2100 writeLong(os, javaToWinTime(ctime)); 2101 } 2102 if (elenEXTT != 0) { 2103 writeShort(os, EXTID_EXTT); 2104 writeShort(os, elenEXTT - 4); 2105 if (ctime == -1) 2106 os.write(0x3); // mtime and atime 2107 else 2108 os.write(0x7); // mtime, atime and ctime 2109 writeInt(os, javaToUnixTime(mtime)); 2110 } 2111 if (extra != null) // whatever not recognized 2112 writeBytes(os, extra); 2113 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2114 writeBytes(os, comment); 2115 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2116 } 2117 2118 ///////////////////// LOC ////////////////////// 2119 2120 int writeLOC(OutputStream os) throws IOException { 2121 writeInt(os, LOCSIG); // LOC header signature 2122 int version = version(); 2123 2124 byte[] zname = isdir ? toDirectoryPath(name) : name; 2125 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2126 int elen = (extra != null) ? extra.length : 0; 2127 boolean foundExtraTime = false; // if extra timestamp present 2128 int eoff = 0; 2129 int elen64 = 0; 2130 int elenEXTT = 0; 2131 int elenNTFS = 0; 2132 if ((flag & FLAG_DATADESCR) != 0) { 2133 writeShort(os, version()); // version needed to extract 2134 writeShort(os, flag); // general purpose bit flag 2135 writeShort(os, method); // compression method 2136 // last modification time 2137 writeInt(os, (int)javaToDosTime(mtime)); 2138 // store size, uncompressed size, and crc-32 in data descriptor 2139 // immediately following compressed entry data 2140 writeInt(os, 0); 2141 writeInt(os, 0); 2142 writeInt(os, 0); 2143 } else { 2144 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2145 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2146 writeShort(os, 45); // ver 4.5 for zip64 2147 } else { 2148 writeShort(os, version()); // version needed to extract 2149 } 2150 writeShort(os, flag); // general purpose bit flag 2151 writeShort(os, method); // compression method 2152 // last modification time 2153 writeInt(os, (int)javaToDosTime(mtime)); 2154 writeInt(os, crc); // crc-32 2155 if (elen64 != 0) { 2156 writeInt(os, ZIP64_MINVAL); 2157 writeInt(os, ZIP64_MINVAL); 2158 } else { 2159 writeInt(os, csize); // compressed size 2160 writeInt(os, size); // uncompressed size 2161 } 2162 } 2163 while (eoff + 4 < elen) { 2164 int tag = SH(extra, eoff); 2165 int sz = SH(extra, eoff + 2); 2166 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2167 foundExtraTime = true; 2168 } 2169 eoff += (4 + sz); 2170 } 2171 if (!foundExtraTime) { 2172 if (isWindows) { 2173 elenNTFS = 36; // NTFS, total 36 bytes 2174 } else { // on unix use "ext time" 2175 elenEXTT = 9; 2176 if (atime != -1) 2177 elenEXTT += 4; 2178 if (ctime != -1) 2179 elenEXTT += 4; 2180 } 2181 } 2182 writeShort(os, nlen); 2183 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2184 writeBytes(os, zname, 1, nlen); 2185 if (elen64 != 0) { 2186 writeShort(os, EXTID_ZIP64); 2187 writeShort(os, 16); 2188 writeLong(os, size); 2189 writeLong(os, csize); 2190 } 2191 if (elenNTFS != 0) { 2192 writeShort(os, EXTID_NTFS); 2193 writeShort(os, elenNTFS - 4); 2194 writeInt(os, 0); // reserved 2195 writeShort(os, 0x0001); // NTFS attr tag 2196 writeShort(os, 24); 2197 writeLong(os, javaToWinTime(mtime)); 2198 writeLong(os, javaToWinTime(atime)); 2199 writeLong(os, javaToWinTime(ctime)); 2200 } 2201 if (elenEXTT != 0) { 2202 writeShort(os, EXTID_EXTT); 2203 writeShort(os, elenEXTT - 4);// size for the folowing data block 2204 int fbyte = 0x1; 2205 if (atime != -1) // mtime and atime 2206 fbyte |= 0x2; 2207 if (ctime != -1) // mtime, atime and ctime 2208 fbyte |= 0x4; 2209 os.write(fbyte); // flags byte 2210 writeInt(os, javaToUnixTime(mtime)); 2211 if (atime != -1) 2212 writeInt(os, javaToUnixTime(atime)); 2213 if (ctime != -1) 2214 writeInt(os, javaToUnixTime(ctime)); 2215 } 2216 if (extra != null) { 2217 writeBytes(os, extra); 2218 } 2219 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2220 } 2221 2222 // Data Descriptior 2223 int writeEXT(OutputStream os) throws IOException { 2224 writeInt(os, EXTSIG); // EXT header signature 2225 writeInt(os, crc); // crc-32 2226 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2227 writeLong(os, csize); 2228 writeLong(os, size); 2229 return 24; 2230 } else { 2231 writeInt(os, csize); // compressed size 2232 writeInt(os, size); // uncompressed size 2233 return 16; 2234 } 2235 } 2236 2237 // read NTFS, UNIX and ZIP64 data from cen.extra 2238 void readExtra(ZipFileSystem zipfs) throws IOException { 2239 if (extra == null) 2240 return; 2241 int elen = extra.length; 2242 int off = 0; 2243 int newOff = 0; 2244 while (off + 4 < elen) { 2245 // extra spec: HeaderID+DataSize+Data 2246 int pos = off; 2247 int tag = SH(extra, pos); 2248 int sz = SH(extra, pos + 2); 2249 pos += 4; 2250 if (pos + sz > elen) // invalid data 2251 break; 2252 switch (tag) { 2253 case EXTID_ZIP64 : 2254 if (size == ZIP64_MINVAL) { 2255 if (pos + 8 > elen) // invalid zip64 extra 2256 break; // fields, just skip 2257 size = LL(extra, pos); 2258 pos += 8; 2259 } 2260 if (csize == ZIP64_MINVAL) { 2261 if (pos + 8 > elen) 2262 break; 2263 csize = LL(extra, pos); 2264 pos += 8; 2265 } 2266 if (locoff == ZIP64_MINVAL) { 2267 if (pos + 8 > elen) 2268 break; 2269 locoff = LL(extra, pos); 2270 pos += 8; 2271 } 2272 break; 2273 case EXTID_NTFS: 2274 if (sz < 32) 2275 break; 2276 pos += 4; // reserved 4 bytes 2277 if (SH(extra, pos) != 0x0001) 2278 break; 2279 if (SH(extra, pos + 2) != 24) 2280 break; 2281 // override the loc field, datatime here is 2282 // more "accurate" 2283 mtime = winToJavaTime(LL(extra, pos + 4)); 2284 atime = winToJavaTime(LL(extra, pos + 12)); 2285 ctime = winToJavaTime(LL(extra, pos + 20)); 2286 break; 2287 case EXTID_EXTT: 2288 // spec says the Extened timestamp in cen only has mtime 2289 // need to read the loc to get the extra a/ctime, if flag 2290 // "zipinfo-time" is not specified to false; 2291 // there is performance cost (move up to loc and read) to 2292 // access the loc table foreach entry; 2293 if (zipfs.noExtt) { 2294 if (sz == 5) 2295 mtime = unixToJavaTime(LG(extra, pos + 1)); 2296 break; 2297 } 2298 byte[] buf = new byte[LOCHDR]; 2299 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2300 != buf.length) 2301 throw new ZipException("loc: reading failed"); 2302 if (!locSigAt(buf, 0)) 2303 throw new ZipException("loc: wrong sig ->" 2304 + Long.toString(getSig(buf, 0), 16)); 2305 int locElen = LOCEXT(buf); 2306 if (locElen < 9) // EXTT is at lease 9 bytes 2307 break; 2308 int locNlen = LOCNAM(buf); 2309 buf = new byte[locElen]; 2310 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2311 != buf.length) 2312 throw new ZipException("loc extra: reading failed"); 2313 int locPos = 0; 2314 while (locPos + 4 < buf.length) { 2315 int locTag = SH(buf, locPos); 2316 int locSZ = SH(buf, locPos + 2); 2317 locPos += 4; 2318 if (locTag != EXTID_EXTT) { 2319 locPos += locSZ; 2320 continue; 2321 } 2322 int end = locPos + locSZ - 4; 2323 int flag = CH(buf, locPos++); 2324 if ((flag & 0x1) != 0 && locPos <= end) { 2325 mtime = unixToJavaTime(LG(buf, locPos)); 2326 locPos += 4; 2327 } 2328 if ((flag & 0x2) != 0 && locPos <= end) { 2329 atime = unixToJavaTime(LG(buf, locPos)); 2330 locPos += 4; 2331 } 2332 if ((flag & 0x4) != 0 && locPos <= end) { 2333 ctime = unixToJavaTime(LG(buf, locPos)); 2334 locPos += 4; 2335 } 2336 break; 2337 } 2338 break; 2339 default: // unknown tag 2340 System.arraycopy(extra, off, extra, newOff, sz + 4); 2341 newOff += (sz + 4); 2342 } 2343 off += (sz + 4); 2344 } 2345 if (newOff != 0 && newOff != extra.length) 2346 extra = Arrays.copyOf(extra, newOff); 2347 else 2348 extra = null; 2349 } 2350 2351 ///////// basic file attributes /////////// 2352 @Override 2353 public FileTime creationTime() { 2354 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 2355 } 2356 2357 @Override 2358 public boolean isDirectory() { 2359 return isDir(); 2360 } 2361 2362 @Override 2363 public boolean isOther() { 2364 return false; 2365 } 2366 2367 @Override 2368 public boolean isRegularFile() { 2369 return !isDir(); 2370 } 2371 2372 @Override 2373 public FileTime lastAccessTime() { 2374 return FileTime.fromMillis(atime == -1 ? mtime : atime); 2375 } 2376 2377 @Override 2378 public FileTime lastModifiedTime() { 2379 return FileTime.fromMillis(mtime); 2380 } 2381 2382 @Override 2383 public long size() { 2384 return size; 2385 } 2386 2387 @Override 2388 public boolean isSymbolicLink() { 2389 return false; 2390 } 2391 2392 @Override 2393 public Object fileKey() { 2394 return null; 2395 } 2396 2397 ///////// zip entry attributes /////////// 2398 public long compressedSize() { 2399 return csize; 2400 } 2401 2402 public long crc() { 2403 return crc; 2404 } 2405 2406 public int method() { 2407 return method; 2408 } 2409 2410 public byte[] extra() { 2411 if (extra != null) 2412 return Arrays.copyOf(extra, extra.length); 2413 return null; 2414 } 2415 2416 public byte[] comment() { 2417 if (comment != null) 2418 return Arrays.copyOf(comment, comment.length); 2419 return null; 2420 } 2421 2422 public String toString() { 2423 StringBuilder sb = new StringBuilder(1024); 2424 Formatter fm = new Formatter(sb); 2425 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2426 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2427 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2428 fm.format(" isRegularFile : %b%n", isRegularFile()); 2429 fm.format(" isDirectory : %b%n", isDirectory()); 2430 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2431 fm.format(" isOther : %b%n", isOther()); 2432 fm.format(" fileKey : %s%n", fileKey()); 2433 fm.format(" size : %d%n", size()); 2434 fm.format(" compressedSize : %d%n", compressedSize()); 2435 fm.format(" crc : %x%n", crc()); 2436 fm.format(" method : %d%n", method()); 2437 fm.close(); 2438 return sb.toString(); 2439 } 2440 } 2441 2442 private static class ExChannelCloser { 2443 Path path; 2444 SeekableByteChannel ch; 2445 Set<InputStream> streams; 2446 ExChannelCloser(Path path, 2447 SeekableByteChannel ch, 2448 Set<InputStream> streams) 2449 { 2450 this.path = path; 2451 this.ch = ch; 2452 this.streams = streams; 2453 } 2454 } 2455 2456 // ZIP directory has two issues: 2457 // (1) ZIP spec does not require the ZIP file to include 2458 // directory entry 2459 // (2) all entries are not stored/organized in a "tree" 2460 // structure. 2461 // A possible solution is to build the node tree ourself as 2462 // implemented below. 2463 private IndexNode root; 2464 2465 // default time stamp for pseudo entries 2466 private long zfsDefaultTimeStamp = System.currentTimeMillis(); 2467 2468 private void removeFromTree(IndexNode inode) { 2469 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2470 IndexNode child = parent.child; 2471 if (child.equals(inode)) { 2472 parent.child = child.sibling; 2473 } else { 2474 IndexNode last = child; 2475 while ((child = child.sibling) != null) { 2476 if (child.equals(inode)) { 2477 last.sibling = child.sibling; 2478 break; 2479 } else { 2480 last = child; 2481 } 2482 } 2483 } 2484 } 2485 2486 // purely for parent lookup, so we don't have to copy the parent 2487 // name every time 2488 static class ParentLookup extends IndexNode { 2489 int len; 2490 ParentLookup() {} 2491 2492 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 2493 name(name, len); 2494 return this; 2495 } 2496 2497 void name(byte[] name, int len) { 2498 this.name = name; 2499 this.len = len; 2500 // calculate the hashcode the same way as Arrays.hashCode() does 2501 int result = 1; 2502 for (int i = 0; i < len; i++) 2503 result = 31 * result + name[i]; 2504 this.hashcode = result; 2505 } 2506 2507 @Override 2508 public boolean equals(Object other) { 2509 if (!(other instanceof IndexNode)) { 2510 return false; 2511 } 2512 byte[] oname = ((IndexNode)other).name; 2513 return Arrays.equals(name, 0, len, 2514 oname, 0, oname.length); 2515 } 2516 2517 } 2518 2519 private void buildNodeTree() throws IOException { 2520 beginWrite(); 2521 try { 2522 IndexNode root = inodes.get(LOOKUPKEY.as(ROOTPATH)); 2523 if (root == null) { 2524 root = new IndexNode(ROOTPATH, true); 2525 } else { 2526 inodes.remove(root); 2527 } 2528 IndexNode[] nodes = inodes.keySet().toArray(new IndexNode[0]); 2529 inodes.put(root, root); 2530 ParentLookup lookup = new ParentLookup(); 2531 for (IndexNode node : nodes) { 2532 IndexNode parent; 2533 while (true) { 2534 int off = getParentOff(node.name); 2535 if (off <= 1) { // parent is root 2536 node.sibling = root.child; 2537 root.child = node; 2538 break; 2539 } 2540 lookup = lookup.as(node.name, off); 2541 if (inodes.containsKey(lookup)) { 2542 parent = inodes.get(lookup); 2543 node.sibling = parent.child; 2544 parent.child = node; 2545 break; 2546 } 2547 // add new pseudo directory entry 2548 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 2549 inodes.put(parent, parent); 2550 node.sibling = parent.child; 2551 parent.child = node; 2552 node = parent; 2553 } 2554 } 2555 } finally { 2556 endWrite(); 2557 } 2558 } 2559 }