1 /* 2 * Copyright (c) 2009, 201, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.File; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.nio.ByteBuffer; 37 import java.nio.MappedByteBuffer; 38 import java.nio.channels.*; 39 import java.nio.file.*; 40 import java.nio.file.attribute.*; 41 import java.nio.file.spi.*; 42 import java.security.AccessController; 43 import java.security.PrivilegedAction; 44 import java.security.PrivilegedActionException; 45 import java.security.PrivilegedExceptionAction; 46 import java.util.*; 47 import java.util.concurrent.locks.ReadWriteLock; 48 import java.util.concurrent.locks.ReentrantReadWriteLock; 49 import java.util.regex.Pattern; 50 import java.util.zip.CRC32; 51 import java.util.zip.Inflater; 52 import java.util.zip.Deflater; 53 import java.util.zip.InflaterInputStream; 54 import java.util.zip.DeflaterOutputStream; 55 import java.util.zip.ZipException; 56 import static java.lang.Boolean.*; 57 import static jdk.nio.zipfs.ZipConstants.*; 58 import static jdk.nio.zipfs.ZipUtils.*; 59 import static java.nio.file.StandardOpenOption.*; 60 import static java.nio.file.StandardCopyOption.*; 61 62 /** 63 * A FileSystem built on a zip file 64 * 65 * @author Xueming Shen 66 */ 67 68 class ZipFileSystem extends FileSystem { 69 70 private final ZipFileSystemProvider provider; 71 private final ZipPath defaultdir; 72 private boolean readOnly = false; 73 private final Path zfpath; 74 private final ZipCoder zc; 75 76 // configurable by env map 77 private final String defaultDir; // default dir for the file system 78 private final String nameEncoding; // default encoding for name/comment 79 private final boolean useTempFile; // use a temp file for newOS, default 80 // is to use BAOS for better performance 81 private final boolean createNew; // create a new zip if not exists 82 private static final boolean isWindows = AccessController.doPrivileged( 83 (PrivilegedAction<Boolean>) () -> System.getProperty("os.name") 84 .startsWith("Windows")); 85 86 ZipFileSystem(ZipFileSystemProvider provider, 87 Path zfpath, 88 Map<String, ?> env) 89 throws IOException 90 { 91 // configurable env setup 92 this.createNew = "true".equals(env.get("create")); 93 this.nameEncoding = env.containsKey("encoding") ? 94 (String)env.get("encoding") : "UTF-8"; 95 this.useTempFile = TRUE.equals(env.get("useTempFile")); 96 this.defaultDir = env.containsKey("default.dir") ? 97 (String)env.get("default.dir") : "/"; 98 if (this.defaultDir.charAt(0) != '/') 99 throw new IllegalArgumentException("default dir should be absolute"); 100 101 this.provider = provider; 102 this.zfpath = zfpath; 103 if (Files.notExists(zfpath)) { 104 if (createNew) { 105 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 106 new END().write(os, 0); 107 } 108 } else { 109 throw new FileSystemNotFoundException(zfpath.toString()); 110 } 111 } 112 // sm and existence check 113 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 114 boolean writeable = AccessController.doPrivileged( 115 (PrivilegedAction<Boolean>) () -> Files.isWritable(zfpath)); 116 if (!writeable) 117 this.readOnly = true; 118 this.zc = ZipCoder.get(nameEncoding); 119 this.defaultdir = new ZipPath(this, getBytes(defaultDir)); 120 this.ch = Files.newByteChannel(zfpath, READ); 121 try { 122 this.cen = initCEN(); 123 } catch (IOException x) { 124 try { 125 this.ch.close(); 126 } catch (IOException xx) { 127 x.addSuppressed(xx); 128 } 129 throw x; 130 } 131 } 132 133 @Override 134 public FileSystemProvider provider() { 135 return provider; 136 } 137 138 @Override 139 public String getSeparator() { 140 return "/"; 141 } 142 143 @Override 144 public boolean isOpen() { 145 return isOpen; 146 } 147 148 @Override 149 public boolean isReadOnly() { 150 return readOnly; 151 } 152 153 private void checkWritable() throws IOException { 154 if (readOnly) 155 throw new ReadOnlyFileSystemException(); 156 } 157 158 @Override 159 public Iterable<Path> getRootDirectories() { 160 ArrayList<Path> pathArr = new ArrayList<>(); 161 pathArr.add(new ZipPath(this, new byte[]{'/'})); 162 return pathArr; 163 } 164 165 ZipPath getDefaultDir() { // package private 166 return defaultdir; 167 } 168 169 @Override 170 public ZipPath getPath(String first, String... more) { 171 String path; 172 if (more.length == 0) { 173 path = first; 174 } else { 175 StringBuilder sb = new StringBuilder(); 176 sb.append(first); 177 for (String segment: more) { 178 if (segment.length() > 0) { 179 if (sb.length() > 0) 180 sb.append('/'); 181 sb.append(segment); 182 } 183 } 184 path = sb.toString(); 185 } 186 return new ZipPath(this, getBytes(path)); 187 } 188 189 @Override 190 public UserPrincipalLookupService getUserPrincipalLookupService() { 191 throw new UnsupportedOperationException(); 192 } 193 194 @Override 195 public WatchService newWatchService() { 196 throw new UnsupportedOperationException(); 197 } 198 199 FileStore getFileStore(ZipPath path) { 200 return new ZipFileStore(path); 201 } 202 203 @Override 204 public Iterable<FileStore> getFileStores() { 205 ArrayList<FileStore> list = new ArrayList<>(1); 206 list.add(new ZipFileStore(new ZipPath(this, new byte[]{'/'}))); 207 return list; 208 } 209 210 private static final Set<String> supportedFileAttributeViews = 211 Collections.unmodifiableSet( 212 new HashSet<String>(Arrays.asList("basic", "zip"))); 213 214 @Override 215 public Set<String> supportedFileAttributeViews() { 216 return supportedFileAttributeViews; 217 } 218 219 @Override 220 public String toString() { 221 return zfpath.toString(); 222 } 223 224 Path getZipFile() { 225 return zfpath; 226 } 227 228 private static final String GLOB_SYNTAX = "glob"; 229 private static final String REGEX_SYNTAX = "regex"; 230 231 @Override 232 public PathMatcher getPathMatcher(String syntaxAndInput) { 233 int pos = syntaxAndInput.indexOf(':'); 234 if (pos <= 0 || pos == syntaxAndInput.length()) { 235 throw new IllegalArgumentException(); 236 } 237 String syntax = syntaxAndInput.substring(0, pos); 238 String input = syntaxAndInput.substring(pos + 1); 239 String expr; 240 if (syntax.equals(GLOB_SYNTAX)) { 241 expr = toRegexPattern(input); 242 } else { 243 if (syntax.equals(REGEX_SYNTAX)) { 244 expr = input; 245 } else { 246 throw new UnsupportedOperationException("Syntax '" + syntax + 247 "' not recognized"); 248 } 249 } 250 // return matcher 251 final Pattern pattern = Pattern.compile(expr); 252 return new PathMatcher() { 253 @Override 254 public boolean matches(Path path) { 255 return pattern.matcher(path.toString()).matches(); 256 } 257 }; 258 } 259 260 @Override 261 public void close() throws IOException { 262 beginWrite(); 263 try { 264 if (!isOpen) 265 return; 266 isOpen = false; // set closed 267 } finally { 268 endWrite(); 269 } 270 if (!streams.isEmpty()) { // unlock and close all remaining streams 271 Set<InputStream> copy = new HashSet<>(streams); 272 for (InputStream is: copy) 273 is.close(); 274 } 275 beginWrite(); // lock and sync 276 try { 277 AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> { 278 sync(); return null; 279 }); 280 ch.close(); // close the ch just in case no update 281 } catch (PrivilegedActionException e) { // and sync dose not close the ch 282 throw (IOException)e.getException(); 283 } finally { 284 endWrite(); 285 } 286 287 synchronized (inflaters) { 288 for (Inflater inf : inflaters) 289 inf.end(); 290 } 291 synchronized (deflaters) { 292 for (Deflater def : deflaters) 293 def.end(); 294 } 295 296 IOException ioe = null; 297 synchronized (tmppaths) { 298 for (Path p: tmppaths) { 299 try { 300 AccessController.doPrivileged( 301 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 302 } catch (PrivilegedActionException e) { 303 IOException x = (IOException)e.getException(); 304 if (ioe == null) 305 ioe = x; 306 else 307 ioe.addSuppressed(x); 308 } 309 } 310 } 311 provider.removeFileSystem(zfpath, this); 312 if (ioe != null) 313 throw ioe; 314 } 315 316 ZipFileAttributes getFileAttributes(byte[] path) 317 throws IOException 318 { 319 Entry e; 320 beginRead(); 321 try { 322 ensureOpen(); 323 e = getEntry0(path); 324 if (e == null) { 325 IndexNode inode = getInode(path); 326 if (inode == null) 327 return null; 328 e = new Entry(inode.name); // pseudo directory 329 e.method = METHOD_STORED; // STORED for dir 330 e.mtime = e.atime = e.ctime = -1;// -1 for all times 331 } 332 } finally { 333 endRead(); 334 } 335 return new ZipFileAttributes(e); 336 } 337 338 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 339 throws IOException 340 { 341 checkWritable(); 342 beginWrite(); 343 try { 344 ensureOpen(); 345 Entry e = getEntry0(path); // ensureOpen checked 346 if (e == null) 347 throw new NoSuchFileException(getString(path)); 348 if (e.type == Entry.CEN) 349 e.type = Entry.COPY; // copy e 350 if (mtime != null) 351 e.mtime = mtime.toMillis(); 352 if (atime != null) 353 e.atime = atime.toMillis(); 354 if (ctime != null) 355 e.ctime = ctime.toMillis(); 356 update(e); 357 } finally { 358 endWrite(); 359 } 360 } 361 362 boolean exists(byte[] path) 363 throws IOException 364 { 365 beginRead(); 366 try { 367 ensureOpen(); 368 return getInode(path) != null; 369 } finally { 370 endRead(); 371 } 372 } 373 374 boolean isDirectory(byte[] path) 375 throws IOException 376 { 377 beginRead(); 378 try { 379 IndexNode n = getInode(path); 380 return n != null && n.isDir(); 381 } finally { 382 endRead(); 383 } 384 } 385 386 private ZipPath toZipPath(byte[] path) { 387 // make it absolute 388 byte[] p = new byte[path.length + 1]; 389 p[0] = '/'; 390 System.arraycopy(path, 0, p, 1, path.length); 391 return new ZipPath(this, p); 392 } 393 394 // returns the list of child paths of "path" 395 Iterator<Path> iteratorOf(byte[] path, 396 DirectoryStream.Filter<? super Path> filter) 397 throws IOException 398 { 399 beginWrite(); // iteration of inodes needs exclusive lock 400 try { 401 ensureOpen(); 402 IndexNode inode = getInode(path); 403 if (inode == null) 404 throw new NotDirectoryException(getString(path)); 405 List<Path> list = new ArrayList<>(); 406 IndexNode child = inode.child; 407 while (child != null) { 408 ZipPath zp = toZipPath(child.name); 409 if (filter == null || filter.accept(zp)) 410 list.add(zp); 411 child = child.sibling; 412 } 413 return list.iterator(); 414 } finally { 415 endWrite(); 416 } 417 } 418 419 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 420 throws IOException 421 { 422 checkWritable(); 423 dir = toDirectoryPath(dir); 424 beginWrite(); 425 try { 426 ensureOpen(); 427 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 428 throw new FileAlreadyExistsException(getString(dir)); 429 checkParents(dir); 430 Entry e = new Entry(dir, Entry.NEW); 431 e.method = METHOD_STORED; // STORED for dir 432 update(e); 433 } finally { 434 endWrite(); 435 } 436 } 437 438 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 439 throws IOException 440 { 441 checkWritable(); 442 if (Arrays.equals(src, dst)) 443 return; // do nothing, src and dst are the same 444 445 beginWrite(); 446 try { 447 ensureOpen(); 448 Entry eSrc = getEntry0(src); // ensureOpen checked 449 if (eSrc == null) 450 throw new NoSuchFileException(getString(src)); 451 if (eSrc.isDir()) { // spec says to create dst dir 452 createDirectory(dst); 453 return; 454 } 455 boolean hasReplace = false; 456 boolean hasCopyAttrs = false; 457 for (CopyOption opt : options) { 458 if (opt == REPLACE_EXISTING) 459 hasReplace = true; 460 else if (opt == COPY_ATTRIBUTES) 461 hasCopyAttrs = true; 462 } 463 Entry eDst = getEntry0(dst); 464 if (eDst != null) { 465 if (!hasReplace) 466 throw new FileAlreadyExistsException(getString(dst)); 467 } else { 468 checkParents(dst); 469 } 470 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 471 u.name(dst); // change name 472 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 473 { 474 u.type = eSrc.type; // make it the same type 475 if (deletesrc) { // if it's a "rename", take the data 476 u.bytes = eSrc.bytes; 477 u.file = eSrc.file; 478 } else { // if it's not "rename", copy the data 479 if (eSrc.bytes != null) 480 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 481 else if (eSrc.file != null) { 482 u.file = getTempPathForEntry(null); 483 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 484 } 485 } 486 } 487 if (!hasCopyAttrs) 488 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 489 update(u); 490 if (deletesrc) 491 updateDelete(eSrc); 492 } finally { 493 endWrite(); 494 } 495 } 496 497 // Returns an output stream for writing the contents into the specified 498 // entry. 499 OutputStream newOutputStream(byte[] path, OpenOption... options) 500 throws IOException 501 { 502 checkWritable(); 503 boolean hasCreateNew = false; 504 boolean hasCreate = false; 505 boolean hasAppend = false; 506 for (OpenOption opt: options) { 507 if (opt == READ) 508 throw new IllegalArgumentException("READ not allowed"); 509 if (opt == CREATE_NEW) 510 hasCreateNew = true; 511 if (opt == CREATE) 512 hasCreate = true; 513 if (opt == APPEND) 514 hasAppend = true; 515 } 516 beginRead(); // only need a readlock, the "update()" will 517 try { // try to obtain a writelock when the os is 518 ensureOpen(); // being closed. 519 Entry e = getEntry0(path); 520 if (e != null) { 521 if (e.isDir() || hasCreateNew) 522 throw new FileAlreadyExistsException(getString(path)); 523 if (hasAppend) { 524 InputStream is = getInputStream(e); 525 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 526 copyStream(is, os); 527 is.close(); 528 return os; 529 } 530 return getOutputStream(new Entry(e, Entry.NEW)); 531 } else { 532 if (!hasCreate && !hasCreateNew) 533 throw new NoSuchFileException(getString(path)); 534 checkParents(path); 535 return getOutputStream(new Entry(path, Entry.NEW)); 536 } 537 } finally { 538 endRead(); 539 } 540 } 541 542 // Returns an input stream for reading the contents of the specified 543 // file entry. 544 InputStream newInputStream(byte[] path) throws IOException { 545 beginRead(); 546 try { 547 ensureOpen(); 548 Entry e = getEntry0(path); 549 if (e == null) 550 throw new NoSuchFileException(getString(path)); 551 if (e.isDir()) 552 throw new FileSystemException(getString(path), "is a directory", null); 553 return getInputStream(e); 554 } finally { 555 endRead(); 556 } 557 } 558 559 private void checkOptions(Set<? extends OpenOption> options) { 560 // check for options of null type and option is an intance of StandardOpenOption 561 for (OpenOption option : options) { 562 if (option == null) 563 throw new NullPointerException(); 564 if (!(option instanceof StandardOpenOption)) 565 throw new IllegalArgumentException(); 566 } 567 } 568 569 // Returns a Writable/ReadByteChannel for now. Might consdier to use 570 // newFileChannel() instead, which dump the entry data into a regular 571 // file on the default file system and create a FileChannel on top of 572 // it. 573 SeekableByteChannel newByteChannel(byte[] path, 574 Set<? extends OpenOption> options, 575 FileAttribute<?>... attrs) 576 throws IOException 577 { 578 checkOptions(options); 579 if (options.contains(StandardOpenOption.WRITE) || 580 options.contains(StandardOpenOption.APPEND)) { 581 checkWritable(); 582 beginRead(); 583 try { 584 final WritableByteChannel wbc = Channels.newChannel( 585 newOutputStream(path, options.toArray(new OpenOption[0]))); 586 long leftover = 0; 587 if (options.contains(StandardOpenOption.APPEND)) { 588 Entry e = getEntry0(path); 589 if (e != null && e.size >= 0) 590 leftover = e.size; 591 } 592 final long offset = leftover; 593 return new SeekableByteChannel() { 594 long written = offset; 595 public boolean isOpen() { 596 return wbc.isOpen(); 597 } 598 599 public long position() throws IOException { 600 return written; 601 } 602 603 public SeekableByteChannel position(long pos) 604 throws IOException 605 { 606 throw new UnsupportedOperationException(); 607 } 608 609 public int read(ByteBuffer dst) throws IOException { 610 throw new UnsupportedOperationException(); 611 } 612 613 public SeekableByteChannel truncate(long size) 614 throws IOException 615 { 616 throw new UnsupportedOperationException(); 617 } 618 619 public int write(ByteBuffer src) throws IOException { 620 int n = wbc.write(src); 621 written += n; 622 return n; 623 } 624 625 public long size() throws IOException { 626 return written; 627 } 628 629 public void close() throws IOException { 630 wbc.close(); 631 } 632 }; 633 } finally { 634 endRead(); 635 } 636 } else { 637 beginRead(); 638 try { 639 ensureOpen(); 640 Entry e = getEntry0(path); 641 if (e == null || e.isDir()) 642 throw new NoSuchFileException(getString(path)); 643 final ReadableByteChannel rbc = 644 Channels.newChannel(getInputStream(e)); 645 final long size = e.size; 646 return new SeekableByteChannel() { 647 long read = 0; 648 public boolean isOpen() { 649 return rbc.isOpen(); 650 } 651 652 public long position() throws IOException { 653 return read; 654 } 655 656 public SeekableByteChannel position(long pos) 657 throws IOException 658 { 659 throw new UnsupportedOperationException(); 660 } 661 662 public int read(ByteBuffer dst) throws IOException { 663 int n = rbc.read(dst); 664 if (n > 0) { 665 read += n; 666 } 667 return n; 668 } 669 670 public SeekableByteChannel truncate(long size) 671 throws IOException 672 { 673 throw new NonWritableChannelException(); 674 } 675 676 public int write (ByteBuffer src) throws IOException { 677 throw new NonWritableChannelException(); 678 } 679 680 public long size() throws IOException { 681 return size; 682 } 683 684 public void close() throws IOException { 685 rbc.close(); 686 } 687 }; 688 } finally { 689 endRead(); 690 } 691 } 692 } 693 694 // Returns a FileChannel of the specified entry. 695 // 696 // This implementation creates a temporary file on the default file system, 697 // copy the entry data into it if the entry exists, and then create a 698 // FileChannel on top of it. 699 FileChannel newFileChannel(byte[] path, 700 Set<? extends OpenOption> options, 701 FileAttribute<?>... attrs) 702 throws IOException 703 { 704 checkOptions(options); 705 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 706 options.contains(StandardOpenOption.APPEND)); 707 beginRead(); 708 try { 709 ensureOpen(); 710 Entry e = getEntry0(path); 711 if (forWrite) { 712 checkWritable(); 713 if (e == null) { 714 if (!options.contains(StandardOpenOption.CREATE_NEW)) 715 throw new NoSuchFileException(getString(path)); 716 } else { 717 if (options.contains(StandardOpenOption.CREATE_NEW)) 718 throw new FileAlreadyExistsException(getString(path)); 719 if (e.isDir()) 720 throw new FileAlreadyExistsException("directory <" 721 + getString(path) + "> exists"); 722 } 723 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 724 } else if (e == null || e.isDir()) { 725 throw new NoSuchFileException(getString(path)); 726 } 727 728 final boolean isFCH = (e != null && e.type == Entry.FILECH); 729 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 730 final FileChannel fch = tmpfile.getFileSystem() 731 .provider() 732 .newFileChannel(tmpfile, options, attrs); 733 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH); 734 if (forWrite) { 735 u.flag = FLAG_DATADESCR; 736 u.method = METHOD_DEFLATED; 737 } 738 // is there a better way to hook into the FileChannel's close method? 739 return new FileChannel() { 740 public int write(ByteBuffer src) throws IOException { 741 return fch.write(src); 742 } 743 public long write(ByteBuffer[] srcs, int offset, int length) 744 throws IOException 745 { 746 return fch.write(srcs, offset, length); 747 } 748 public long position() throws IOException { 749 return fch.position(); 750 } 751 public FileChannel position(long newPosition) 752 throws IOException 753 { 754 fch.position(newPosition); 755 return this; 756 } 757 public long size() throws IOException { 758 return fch.size(); 759 } 760 public FileChannel truncate(long size) 761 throws IOException 762 { 763 fch.truncate(size); 764 return this; 765 } 766 public void force(boolean metaData) 767 throws IOException 768 { 769 fch.force(metaData); 770 } 771 public long transferTo(long position, long count, 772 WritableByteChannel target) 773 throws IOException 774 { 775 return fch.transferTo(position, count, target); 776 } 777 public long transferFrom(ReadableByteChannel src, 778 long position, long count) 779 throws IOException 780 { 781 return fch.transferFrom(src, position, count); 782 } 783 public int read(ByteBuffer dst) throws IOException { 784 return fch.read(dst); 785 } 786 public int read(ByteBuffer dst, long position) 787 throws IOException 788 { 789 return fch.read(dst, position); 790 } 791 public long read(ByteBuffer[] dsts, int offset, int length) 792 throws IOException 793 { 794 return fch.read(dsts, offset, length); 795 } 796 public int write(ByteBuffer src, long position) 797 throws IOException 798 { 799 return fch.write(src, position); 800 } 801 public MappedByteBuffer map(MapMode mode, 802 long position, long size) 803 throws IOException 804 { 805 throw new UnsupportedOperationException(); 806 } 807 public FileLock lock(long position, long size, boolean shared) 808 throws IOException 809 { 810 return fch.lock(position, size, shared); 811 } 812 public FileLock tryLock(long position, long size, boolean shared) 813 throws IOException 814 { 815 return fch.tryLock(position, size, shared); 816 } 817 protected void implCloseChannel() throws IOException { 818 fch.close(); 819 if (forWrite) { 820 u.mtime = System.currentTimeMillis(); 821 u.size = Files.size(u.file); 822 823 update(u); 824 } else { 825 if (!isFCH) // if this is a new fch for reading 826 removeTempPathForEntry(tmpfile); 827 } 828 } 829 }; 830 } finally { 831 endRead(); 832 } 833 } 834 835 // the outstanding input streams that need to be closed 836 private Set<InputStream> streams = 837 Collections.synchronizedSet(new HashSet<InputStream>()); 838 839 // the ex-channel and ex-path that need to close when their outstanding 840 // input streams are all closed by the obtainers. 841 private Set<ExChannelCloser> exChClosers = new HashSet<>(); 842 843 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 844 private Path getTempPathForEntry(byte[] path) throws IOException { 845 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 846 if (path != null) { 847 Entry e = getEntry0(path); 848 if (e != null) { 849 try (InputStream is = newInputStream(path)) { 850 Files.copy(is, tmpPath, REPLACE_EXISTING); 851 } 852 } 853 } 854 return tmpPath; 855 } 856 857 private void removeTempPathForEntry(Path path) throws IOException { 858 Files.delete(path); 859 tmppaths.remove(path); 860 } 861 862 // check if all parents really exit. ZIP spec does not require 863 // the existence of any "parent directory". 864 private void checkParents(byte[] path) throws IOException { 865 beginRead(); 866 try { 867 while ((path = getParent(path)) != null && path.length != 0) { 868 if (!inodes.containsKey(IndexNode.keyOf(path))) { 869 throw new NoSuchFileException(getString(path)); 870 } 871 } 872 } finally { 873 endRead(); 874 } 875 } 876 877 private static byte[] ROOTPATH = new byte[0]; 878 private static byte[] getParent(byte[] path) { 879 int off = path.length - 1; 880 if (off > 0 && path[off] == '/') // isDirectory 881 off--; 882 while (off > 0 && path[off] != '/') { off--; } 883 if (off <= 0) 884 return ROOTPATH; 885 return Arrays.copyOf(path, off + 1); 886 } 887 888 private final void beginWrite() { 889 rwlock.writeLock().lock(); 890 } 891 892 private final void endWrite() { 893 rwlock.writeLock().unlock(); 894 } 895 896 private final void beginRead() { 897 rwlock.readLock().lock(); 898 } 899 900 private final void endRead() { 901 rwlock.readLock().unlock(); 902 } 903 904 /////////////////////////////////////////////////////////////////// 905 906 private volatile boolean isOpen = true; 907 private final SeekableByteChannel ch; // channel to the zipfile 908 final byte[] cen; // CEN & ENDHDR 909 private END end; 910 private long locpos; // position of first LOC header (usually 0) 911 912 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 913 914 // name -> pos (in cen), IndexNode itself can be used as a "key" 915 private LinkedHashMap<IndexNode, IndexNode> inodes; 916 917 final byte[] getBytes(String name) { 918 return zc.getBytes(name); 919 } 920 921 final String getString(byte[] name) { 922 return zc.toString(name); 923 } 924 925 protected void finalize() throws IOException { 926 close(); 927 } 928 929 private long getDataPos(Entry e) throws IOException { 930 if (e.locoff == -1) { 931 Entry e2 = getEntry0(e.name); 932 if (e2 == null) 933 throw new ZipException("invalid loc for entry <" + e.name + ">"); 934 e.locoff = e2.locoff; 935 } 936 byte[] buf = new byte[LOCHDR]; 937 if (readFullyAt(buf, 0, buf.length, e.locoff) != buf.length) 938 throw new ZipException("invalid loc for entry <" + e.name + ">"); 939 return locpos + e.locoff + LOCHDR + LOCNAM(buf) + LOCEXT(buf); 940 } 941 942 // Reads len bytes of data from the specified offset into buf. 943 // Returns the total number of bytes read. 944 // Each/every byte read from here (except the cen, which is mapped). 945 final long readFullyAt(byte[] buf, int off, long len, long pos) 946 throws IOException 947 { 948 ByteBuffer bb = ByteBuffer.wrap(buf); 949 bb.position(off); 950 bb.limit((int)(off + len)); 951 return readFullyAt(bb, pos); 952 } 953 954 private final long readFullyAt(ByteBuffer bb, long pos) 955 throws IOException 956 { 957 synchronized(ch) { 958 return ch.position(pos).read(bb); 959 } 960 } 961 962 // Searches for end of central directory (END) header. The contents of 963 // the END header will be read and placed in endbuf. Returns the file 964 // position of the END header, otherwise returns -1 if the END header 965 // was not found or an error occurred. 966 private END findEND() throws IOException 967 { 968 byte[] buf = new byte[READBLOCKSZ]; 969 long ziplen = ch.size(); 970 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 971 long minPos = minHDR - (buf.length - ENDHDR); 972 973 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 974 { 975 int off = 0; 976 if (pos < 0) { 977 // Pretend there are some NUL bytes before start of file 978 off = (int)-pos; 979 Arrays.fill(buf, 0, off, (byte)0); 980 } 981 int len = buf.length - off; 982 if (readFullyAt(buf, off, len, pos + off) != len) 983 zerror("zip END header not found"); 984 985 // Now scan the block backwards for END header signature 986 for (int i = buf.length - ENDHDR; i >= 0; i--) { 987 if (buf[i+0] == (byte)'P' && 988 buf[i+1] == (byte)'K' && 989 buf[i+2] == (byte)'\005' && 990 buf[i+3] == (byte)'\006' && 991 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 992 // Found END header 993 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 994 END end = new END(); 995 end.endsub = ENDSUB(buf); 996 end.centot = ENDTOT(buf); 997 end.cenlen = ENDSIZ(buf); 998 end.cenoff = ENDOFF(buf); 999 end.comlen = ENDCOM(buf); 1000 end.endpos = pos + i; 1001 if (end.cenlen == ZIP64_MINVAL || 1002 end.cenoff == ZIP64_MINVAL || 1003 end.centot == ZIP64_MINVAL32) 1004 { 1005 // need to find the zip64 end; 1006 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1007 if (readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1008 != loc64.length) { 1009 return end; 1010 } 1011 long end64pos = ZIP64_LOCOFF(loc64); 1012 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1013 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1014 != end64buf.length) { 1015 return end; 1016 } 1017 // end64 found, re-calcualte everything. 1018 end.cenlen = ZIP64_ENDSIZ(end64buf); 1019 end.cenoff = ZIP64_ENDOFF(end64buf); 1020 end.centot = (int)ZIP64_ENDTOT(end64buf); // assume total < 2g 1021 end.endpos = end64pos; 1022 } 1023 return end; 1024 } 1025 } 1026 } 1027 zerror("zip END header not found"); 1028 return null; //make compiler happy 1029 } 1030 1031 // Reads zip file central directory. Returns the file position of first 1032 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1033 // then the error was a zip format error and zip->msg has the error text. 1034 // Always pass in -1 for knownTotal; it's used for a recursive call. 1035 private byte[] initCEN() throws IOException { 1036 end = findEND(); 1037 if (end.endpos == 0) { 1038 inodes = new LinkedHashMap<>(10); 1039 locpos = 0; 1040 buildNodeTree(); 1041 return null; // only END header present 1042 } 1043 if (end.cenlen > end.endpos) 1044 zerror("invalid END header (bad central directory size)"); 1045 long cenpos = end.endpos - end.cenlen; // position of CEN table 1046 1047 // Get position of first local file (LOC) header, taking into 1048 // account that there may be a stub prefixed to the zip file. 1049 locpos = cenpos - end.cenoff; 1050 if (locpos < 0) 1051 zerror("invalid END header (bad central directory offset)"); 1052 1053 // read in the CEN and END 1054 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1055 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1056 zerror("read CEN tables failed"); 1057 } 1058 // Iterate through the entries in the central directory 1059 inodes = new LinkedHashMap<>(end.centot + 1); 1060 int pos = 0; 1061 int limit = cen.length - ENDHDR; 1062 while (pos < limit) { 1063 if (CENSIG(cen, pos) != CENSIG) 1064 zerror("invalid CEN header (bad signature)"); 1065 int method = CENHOW(cen, pos); 1066 int nlen = CENNAM(cen, pos); 1067 int elen = CENEXT(cen, pos); 1068 int clen = CENCOM(cen, pos); 1069 if ((CENFLG(cen, pos) & 1) != 0) { 1070 zerror("invalid CEN header (encrypted entry)"); 1071 } 1072 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1073 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1074 } 1075 if (pos + CENHDR + nlen > limit) { 1076 zerror("invalid CEN header (bad header size)"); 1077 } 1078 byte[] name = Arrays.copyOfRange(cen, pos + CENHDR, pos + CENHDR + nlen); 1079 IndexNode inode = new IndexNode(name, pos); 1080 inodes.put(inode, inode); 1081 // skip ext and comment 1082 pos += (CENHDR + nlen + elen + clen); 1083 } 1084 if (pos + ENDHDR != cen.length) { 1085 zerror("invalid CEN header (bad header size)"); 1086 } 1087 buildNodeTree(); 1088 return cen; 1089 } 1090 1091 private void ensureOpen() throws IOException { 1092 if (!isOpen) 1093 throw new ClosedFileSystemException(); 1094 } 1095 1096 // Creates a new empty temporary file in the same directory as the 1097 // specified file. A variant of Files.createTempFile. 1098 private Path createTempFileInSameDirectoryAs(Path path) 1099 throws IOException 1100 { 1101 Path parent = path.toAbsolutePath().getParent(); 1102 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1103 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1104 tmppaths.add(tmpPath); 1105 return tmpPath; 1106 } 1107 1108 ////////////////////update & sync ////////////////////////////////////// 1109 1110 private boolean hasUpdate = false; 1111 1112 // shared key. consumer guarantees the "writeLock" before use it. 1113 private final IndexNode LOOKUPKEY = IndexNode.keyOf(null); 1114 1115 private void updateDelete(IndexNode inode) { 1116 beginWrite(); 1117 try { 1118 removeFromTree(inode); 1119 inodes.remove(inode); 1120 hasUpdate = true; 1121 } finally { 1122 endWrite(); 1123 } 1124 } 1125 1126 private void update(Entry e) { 1127 beginWrite(); 1128 try { 1129 IndexNode old = inodes.put(e, e); 1130 if (old != null) { 1131 removeFromTree(old); 1132 } 1133 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1134 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1135 e.sibling = parent.child; 1136 parent.child = e; 1137 } 1138 hasUpdate = true; 1139 } finally { 1140 endWrite(); 1141 } 1142 } 1143 1144 // copy over the whole LOC entry (header if necessary, data and ext) from 1145 // old zip to the new one. 1146 private long copyLOCEntry(Entry e, boolean updateHeader, 1147 OutputStream os, 1148 long written, byte[] buf) 1149 throws IOException 1150 { 1151 long locoff = e.locoff; // where to read 1152 e.locoff = written; // update the e.locoff with new value 1153 1154 // calculate the size need to write out 1155 long size = 0; 1156 // if there is A ext 1157 if ((e.flag & FLAG_DATADESCR) != 0) { 1158 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1159 size = 24; 1160 else 1161 size = 16; 1162 } 1163 // read loc, use the original loc.elen/nlen 1164 if (readFullyAt(buf, 0, LOCHDR , locoff) != LOCHDR) 1165 throw new ZipException("loc: reading failed"); 1166 if (updateHeader) { 1167 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1168 size += e.csize; 1169 written = e.writeLOC(os) + size; 1170 } else { 1171 os.write(buf, 0, LOCHDR); // write out the loc header 1172 locoff += LOCHDR; 1173 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1174 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1175 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1176 written = LOCHDR + size; 1177 } 1178 int n; 1179 while (size > 0 && 1180 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1181 { 1182 if (size < n) 1183 n = (int)size; 1184 os.write(buf, 0, n); 1185 size -= n; 1186 locoff += n; 1187 } 1188 return written; 1189 } 1190 1191 // sync the zip file system, if there is any udpate 1192 private void sync() throws IOException { 1193 //System.out.printf("->sync(%s) starting....!%n", toString()); 1194 // check ex-closer 1195 if (!exChClosers.isEmpty()) { 1196 for (ExChannelCloser ecc : exChClosers) { 1197 if (ecc.streams.isEmpty()) { 1198 ecc.ch.close(); 1199 Files.delete(ecc.path); 1200 exChClosers.remove(ecc); 1201 } 1202 } 1203 } 1204 if (!hasUpdate) 1205 return; 1206 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1207 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) 1208 { 1209 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1210 long written = 0; 1211 byte[] buf = new byte[8192]; 1212 Entry e = null; 1213 1214 // write loc 1215 for (IndexNode inode : inodes.values()) { 1216 if (inode instanceof Entry) { // an updated inode 1217 e = (Entry)inode; 1218 try { 1219 if (e.type == Entry.COPY) { 1220 // entry copy: the only thing changed is the "name" 1221 // and "nlen" in LOC header, so we udpate/rewrite the 1222 // LOC in new file and simply copy the rest (data and 1223 // ext) without enflating/deflating from the old zip 1224 // file LOC entry. 1225 written += copyLOCEntry(e, true, os, written, buf); 1226 } else { // NEW, FILECH or CEN 1227 e.locoff = written; 1228 written += e.writeLOC(os); // write loc header 1229 if (e.bytes != null) { // in-memory, deflated 1230 os.write(e.bytes); // already 1231 written += e.bytes.length; 1232 } else if (e.file != null) { // tmp file 1233 try (InputStream is = Files.newInputStream(e.file)) { 1234 int n; 1235 if (e.type == Entry.NEW) { // deflated already 1236 while ((n = is.read(buf)) != -1) { 1237 os.write(buf, 0, n); 1238 written += n; 1239 } 1240 } else if (e.type == Entry.FILECH) { 1241 // the data are not deflated, use ZEOS 1242 try (OutputStream os2 = new EntryOutputStream(e, os)) { 1243 while ((n = is.read(buf)) != -1) { 1244 os2.write(buf, 0, n); 1245 } 1246 } 1247 written += e.csize; 1248 if ((e.flag & FLAG_DATADESCR) != 0) 1249 written += e.writeEXT(os); 1250 } 1251 } 1252 Files.delete(e.file); 1253 tmppaths.remove(e.file); 1254 } else { 1255 // dir, 0-length data 1256 } 1257 } 1258 elist.add(e); 1259 } catch (IOException x) { 1260 x.printStackTrace(); // skip any in-accurate entry 1261 } 1262 } else { // unchanged inode 1263 if (inode.pos == -1) { 1264 continue; // pseudo directory node 1265 } 1266 e = Entry.readCEN(this, inode.pos); 1267 try { 1268 written += copyLOCEntry(e, false, os, written, buf); 1269 elist.add(e); 1270 } catch (IOException x) { 1271 x.printStackTrace(); // skip any wrong entry 1272 } 1273 } 1274 } 1275 1276 // now write back the cen and end table 1277 end.cenoff = written; 1278 for (Entry entry : elist) { 1279 written += entry.writeCEN(os); 1280 } 1281 end.centot = elist.size(); 1282 end.cenlen = written - end.cenoff; 1283 end.write(os, written); 1284 } 1285 if (!streams.isEmpty()) { 1286 // 1287 // TBD: ExChannelCloser should not be necessary if we only 1288 // sync when being closed, all streams should have been 1289 // closed already. Keep the logic here for now. 1290 // 1291 // There are outstanding input streams open on existing "ch", 1292 // so, don't close the "cha" and delete the "file for now, let 1293 // the "ex-channel-closer" to handle them 1294 ExChannelCloser ecc = new ExChannelCloser( 1295 createTempFileInSameDirectoryAs(zfpath), 1296 ch, 1297 streams); 1298 Files.move(zfpath, ecc.path, REPLACE_EXISTING); 1299 exChClosers.add(ecc); 1300 streams = Collections.synchronizedSet(new HashSet<InputStream>()); 1301 } else { 1302 ch.close(); 1303 Files.delete(zfpath); 1304 } 1305 1306 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1307 hasUpdate = false; // clear 1308 /* 1309 if (isOpen) { 1310 ch = zfpath.newByteChannel(READ); // re-fresh "ch" and "cen" 1311 cen = initCEN(); 1312 } 1313 */ 1314 //System.out.printf("->sync(%s) done!%n", toString()); 1315 } 1316 1317 private IndexNode getInode(byte[] path) { 1318 if (path == null) 1319 throw new NullPointerException("path"); 1320 IndexNode key = IndexNode.keyOf(path); 1321 IndexNode inode = inodes.get(key); 1322 if (inode == null && 1323 (path.length == 0 || path[path.length -1] != '/')) { 1324 // if does not ends with a slash 1325 path = Arrays.copyOf(path, path.length + 1); 1326 path[path.length - 1] = '/'; 1327 inode = inodes.get(key.as(path)); 1328 } 1329 return inode; 1330 } 1331 1332 private Entry getEntry0(byte[] path) throws IOException { 1333 IndexNode inode = getInode(path); 1334 if (inode instanceof Entry) 1335 return (Entry)inode; 1336 if (inode == null || inode.pos == -1) 1337 return null; 1338 return Entry.readCEN(this, inode.pos); 1339 } 1340 1341 public void deleteFile(byte[] path, boolean failIfNotExists) 1342 throws IOException 1343 { 1344 checkWritable(); 1345 1346 IndexNode inode = getInode(path); 1347 if (inode == null) { 1348 if (path != null && path.length == 0) 1349 throw new ZipException("root directory </> can't not be delete"); 1350 if (failIfNotExists) 1351 throw new NoSuchFileException(getString(path)); 1352 } else { 1353 if (inode.isDir() && inode.child != null) 1354 throw new DirectoryNotEmptyException(getString(path)); 1355 updateDelete(inode); 1356 } 1357 } 1358 1359 private static void copyStream(InputStream is, OutputStream os) 1360 throws IOException 1361 { 1362 byte[] copyBuf = new byte[8192]; 1363 int n; 1364 while ((n = is.read(copyBuf)) != -1) { 1365 os.write(copyBuf, 0, n); 1366 } 1367 } 1368 1369 // Returns an out stream for either 1370 // (1) writing the contents of a new entry, if the entry exits, or 1371 // (2) updating/replacing the contents of the specified existing entry. 1372 private OutputStream getOutputStream(Entry e) throws IOException { 1373 1374 if (e.mtime == -1) 1375 e.mtime = System.currentTimeMillis(); 1376 if (e.method == -1) 1377 e.method = METHOD_DEFLATED; // TBD: use default method 1378 // store size, compressed size, and crc-32 in LOC header 1379 e.flag = 0; 1380 if (zc.isUTF8()) 1381 e.flag |= FLAG_EFS; 1382 OutputStream os; 1383 if (useTempFile) { 1384 e.file = getTempPathForEntry(null); 1385 os = Files.newOutputStream(e.file, WRITE); 1386 } else { 1387 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1388 } 1389 return new EntryOutputStream(e, os); 1390 } 1391 1392 private InputStream getInputStream(Entry e) 1393 throws IOException 1394 { 1395 InputStream eis = null; 1396 1397 if (e.type == Entry.NEW) { 1398 if (e.bytes != null) 1399 eis = new ByteArrayInputStream(e.bytes); 1400 else if (e.file != null) 1401 eis = Files.newInputStream(e.file); 1402 else 1403 throw new ZipException("update entry data is missing"); 1404 } else if (e.type == Entry.FILECH) { 1405 // FILECH result is un-compressed. 1406 eis = Files.newInputStream(e.file); 1407 // TBD: wrap to hook close() 1408 // streams.add(eis); 1409 return eis; 1410 } else { // untouced CEN or COPY 1411 eis = new EntryInputStream(e, ch); 1412 } 1413 if (e.method == METHOD_DEFLATED) { 1414 // MORE: Compute good size for inflater stream: 1415 long bufSize = e.size + 2; // Inflater likes a bit of slack 1416 if (bufSize > 65536) 1417 bufSize = 8192; 1418 final long size = e.size; 1419 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1420 1421 private boolean isClosed = false; 1422 public void close() throws IOException { 1423 if (!isClosed) { 1424 releaseInflater(inf); 1425 this.in.close(); 1426 isClosed = true; 1427 streams.remove(this); 1428 } 1429 } 1430 // Override fill() method to provide an extra "dummy" byte 1431 // at the end of the input stream. This is required when 1432 // using the "nowrap" Inflater option. (it appears the new 1433 // zlib in 7 does not need it, but keep it for now) 1434 protected void fill() throws IOException { 1435 if (eof) { 1436 throw new EOFException( 1437 "Unexpected end of ZLIB input stream"); 1438 } 1439 len = this.in.read(buf, 0, buf.length); 1440 if (len == -1) { 1441 buf[0] = 0; 1442 len = 1; 1443 eof = true; 1444 } 1445 inf.setInput(buf, 0, len); 1446 } 1447 private boolean eof; 1448 1449 public int available() throws IOException { 1450 if (isClosed) 1451 return 0; 1452 long avail = size - inf.getBytesWritten(); 1453 return avail > (long) Integer.MAX_VALUE ? 1454 Integer.MAX_VALUE : (int) avail; 1455 } 1456 }; 1457 } else if (e.method == METHOD_STORED) { 1458 // TBD: wrap/ it does not seem necessary 1459 } else { 1460 throw new ZipException("invalid compression method"); 1461 } 1462 streams.add(eis); 1463 return eis; 1464 } 1465 1466 // Inner class implementing the input stream used to read 1467 // a (possibly compressed) zip file entry. 1468 private class EntryInputStream extends InputStream { 1469 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1470 // point to a new channel after sync() 1471 private long pos; // current position within entry data 1472 protected long rem; // number of remaining bytes within entry 1473 protected final long size; // uncompressed size of this entry 1474 1475 EntryInputStream(Entry e, SeekableByteChannel zfch) 1476 throws IOException 1477 { 1478 this.zfch = zfch; 1479 rem = e.csize; 1480 size = e.size; 1481 pos = getDataPos(e); 1482 } 1483 public int read(byte b[], int off, int len) throws IOException { 1484 ensureOpen(); 1485 if (rem == 0) { 1486 return -1; 1487 } 1488 if (len <= 0) { 1489 return 0; 1490 } 1491 if (len > rem) { 1492 len = (int) rem; 1493 } 1494 // readFullyAt() 1495 long n = 0; 1496 ByteBuffer bb = ByteBuffer.wrap(b); 1497 bb.position(off); 1498 bb.limit(off + len); 1499 synchronized(zfch) { 1500 n = zfch.position(pos).read(bb); 1501 } 1502 if (n > 0) { 1503 pos += n; 1504 rem -= n; 1505 } 1506 if (rem == 0) { 1507 close(); 1508 } 1509 return (int)n; 1510 } 1511 public int read() throws IOException { 1512 byte[] b = new byte[1]; 1513 if (read(b, 0, 1) == 1) { 1514 return b[0] & 0xff; 1515 } else { 1516 return -1; 1517 } 1518 } 1519 public long skip(long n) throws IOException { 1520 ensureOpen(); 1521 if (n > rem) 1522 n = rem; 1523 pos += n; 1524 rem -= n; 1525 if (rem == 0) { 1526 close(); 1527 } 1528 return n; 1529 } 1530 public int available() { 1531 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1532 } 1533 public long size() { 1534 return size; 1535 } 1536 public void close() { 1537 rem = 0; 1538 streams.remove(this); 1539 } 1540 } 1541 1542 class EntryOutputStream extends DeflaterOutputStream 1543 { 1544 private CRC32 crc; 1545 private Entry e; 1546 private long written; 1547 private boolean isClosed = false; 1548 1549 EntryOutputStream(Entry e, OutputStream os) 1550 throws IOException 1551 { 1552 super(os, getDeflater()); 1553 if (e == null) 1554 throw new NullPointerException("Zip entry is null"); 1555 this.e = e; 1556 crc = new CRC32(); 1557 } 1558 1559 @Override 1560 public synchronized void write(byte b[], int off, int len) 1561 throws IOException 1562 { 1563 if (e.type != Entry.FILECH) // only from sync 1564 ensureOpen(); 1565 if (isClosed) { 1566 throw new IOException("Stream closed"); 1567 } 1568 if (off < 0 || len < 0 || off > b.length - len) { 1569 throw new IndexOutOfBoundsException(); 1570 } else if (len == 0) { 1571 return; 1572 } 1573 switch (e.method) { 1574 case METHOD_DEFLATED: 1575 super.write(b, off, len); 1576 break; 1577 case METHOD_STORED: 1578 written += len; 1579 out.write(b, off, len); 1580 break; 1581 default: 1582 throw new ZipException("invalid compression method"); 1583 } 1584 crc.update(b, off, len); 1585 } 1586 1587 @Override 1588 public synchronized void close() throws IOException { 1589 if (isClosed) { 1590 return; 1591 } 1592 isClosed = true; 1593 // TBD ensureOpen(); 1594 switch (e.method) { 1595 case METHOD_DEFLATED: 1596 finish(); 1597 e.size = def.getBytesRead(); 1598 e.csize = def.getBytesWritten(); 1599 e.crc = crc.getValue(); 1600 break; 1601 case METHOD_STORED: 1602 // we already know that both e.size and e.csize are the same 1603 e.size = e.csize = written; 1604 e.crc = crc.getValue(); 1605 break; 1606 default: 1607 throw new ZipException("invalid compression method"); 1608 } 1609 //crc.reset(); 1610 if (out instanceof ByteArrayOutputStream) 1611 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1612 1613 if (e.type == Entry.FILECH) { 1614 releaseDeflater(def); 1615 return; 1616 } 1617 super.close(); 1618 releaseDeflater(def); 1619 update(e); 1620 } 1621 } 1622 1623 static void zerror(String msg) throws ZipException { 1624 throw new ZipException(msg); 1625 } 1626 1627 // Maxmum number of de/inflater we cache 1628 private final int MAX_FLATER = 20; 1629 // List of available Inflater objects for decompression 1630 private final List<Inflater> inflaters = new ArrayList<>(); 1631 1632 // Gets an inflater from the list of available inflaters or allocates 1633 // a new one. 1634 private Inflater getInflater() { 1635 synchronized (inflaters) { 1636 int size = inflaters.size(); 1637 if (size > 0) { 1638 Inflater inf = inflaters.remove(size - 1); 1639 return inf; 1640 } else { 1641 return new Inflater(true); 1642 } 1643 } 1644 } 1645 1646 // Releases the specified inflater to the list of available inflaters. 1647 private void releaseInflater(Inflater inf) { 1648 synchronized (inflaters) { 1649 if (inflaters.size() < MAX_FLATER) { 1650 inf.reset(); 1651 inflaters.add(inf); 1652 } else { 1653 inf.end(); 1654 } 1655 } 1656 } 1657 1658 // List of available Deflater objects for compression 1659 private final List<Deflater> deflaters = new ArrayList<>(); 1660 1661 // Gets an deflater from the list of available deflaters or allocates 1662 // a new one. 1663 private Deflater getDeflater() { 1664 synchronized (deflaters) { 1665 int size = deflaters.size(); 1666 if (size > 0) { 1667 Deflater def = deflaters.remove(size - 1); 1668 return def; 1669 } else { 1670 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1671 } 1672 } 1673 } 1674 1675 // Releases the specified inflater to the list of available inflaters. 1676 private void releaseDeflater(Deflater def) { 1677 synchronized (deflaters) { 1678 if (inflaters.size() < MAX_FLATER) { 1679 def.reset(); 1680 deflaters.add(def); 1681 } else { 1682 def.end(); 1683 } 1684 } 1685 } 1686 1687 // End of central directory record 1688 static class END { 1689 int disknum; 1690 int sdisknum; 1691 int endsub; // endsub 1692 int centot; // 4 bytes 1693 long cenlen; // 4 bytes 1694 long cenoff; // 4 bytes 1695 int comlen; // comment length 1696 byte[] comment; 1697 1698 /* members of Zip64 end of central directory locator */ 1699 int diskNum; 1700 long endpos; 1701 int disktot; 1702 1703 void write(OutputStream os, long offset) throws IOException { 1704 boolean hasZip64 = false; 1705 long xlen = cenlen; 1706 long xoff = cenoff; 1707 if (xlen >= ZIP64_MINVAL) { 1708 xlen = ZIP64_MINVAL; 1709 hasZip64 = true; 1710 } 1711 if (xoff >= ZIP64_MINVAL) { 1712 xoff = ZIP64_MINVAL; 1713 hasZip64 = true; 1714 } 1715 int count = centot; 1716 if (count >= ZIP64_MINVAL32) { 1717 count = ZIP64_MINVAL32; 1718 hasZip64 = true; 1719 } 1720 if (hasZip64) { 1721 long off64 = offset; 1722 //zip64 end of central directory record 1723 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1724 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1725 writeShort(os, 45); // version made by 1726 writeShort(os, 45); // version needed to extract 1727 writeInt(os, 0); // number of this disk 1728 writeInt(os, 0); // central directory start disk 1729 writeLong(os, centot); // number of directory entires on disk 1730 writeLong(os, centot); // number of directory entires 1731 writeLong(os, cenlen); // length of central directory 1732 writeLong(os, cenoff); // offset of central directory 1733 1734 //zip64 end of central directory locator 1735 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1736 writeInt(os, 0); // zip64 END start disk 1737 writeLong(os, off64); // offset of zip64 END 1738 writeInt(os, 1); // total number of disks (?) 1739 } 1740 writeInt(os, ENDSIG); // END record signature 1741 writeShort(os, 0); // number of this disk 1742 writeShort(os, 0); // central directory start disk 1743 writeShort(os, count); // number of directory entries on disk 1744 writeShort(os, count); // total number of directory entries 1745 writeInt(os, xlen); // length of central directory 1746 writeInt(os, xoff); // offset of central directory 1747 if (comment != null) { // zip file comment 1748 writeShort(os, comment.length); 1749 writeBytes(os, comment); 1750 } else { 1751 writeShort(os, 0); 1752 } 1753 } 1754 } 1755 1756 // Internal node that links a "name" to its pos in cen table. 1757 // The node itself can be used as a "key" to lookup itself in 1758 // the HashMap inodes. 1759 static class IndexNode { 1760 byte[] name; 1761 int hashcode; // node is hashable/hashed by its name 1762 int pos = -1; // position in cen table, -1 menas the 1763 // entry does not exists in zip file 1764 IndexNode(byte[] name, int pos) { 1765 name(name); 1766 this.pos = pos; 1767 } 1768 1769 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1770 return new IndexNode(name, -1); 1771 } 1772 1773 final void name(byte[] name) { 1774 this.name = name; 1775 this.hashcode = Arrays.hashCode(name); 1776 } 1777 1778 final IndexNode as(byte[] name) { // reuse the node, mostly 1779 name(name); // as a lookup "key" 1780 return this; 1781 } 1782 1783 boolean isDir() { 1784 return name != null && 1785 (name.length == 0 || name[name.length - 1] == '/'); 1786 } 1787 1788 public boolean equals(Object other) { 1789 if (!(other instanceof IndexNode)) { 1790 return false; 1791 } 1792 return Arrays.equals(name, ((IndexNode)other).name); 1793 } 1794 1795 public int hashCode() { 1796 return hashcode; 1797 } 1798 1799 IndexNode() {} 1800 IndexNode sibling; 1801 IndexNode child; // 1st child 1802 } 1803 1804 static class Entry extends IndexNode { 1805 1806 static final int CEN = 1; // entry read from cen 1807 static final int NEW = 2; // updated contents in bytes or file 1808 static final int FILECH = 3; // fch update in "file" 1809 static final int COPY = 4; // copy of a CEN entry 1810 1811 1812 byte[] bytes; // updated content bytes 1813 Path file; // use tmp file to store bytes; 1814 int type = CEN; // default is the entry read from cen 1815 1816 // entry attributes 1817 int version; 1818 int flag; 1819 int method = -1; // compression method 1820 long mtime = -1; // last modification time (in DOS time) 1821 long atime = -1; // last access time 1822 long ctime = -1; // create time 1823 long crc = -1; // crc-32 of entry data 1824 long csize = -1; // compressed size of entry data 1825 long size = -1; // uncompressed size of entry data 1826 byte[] extra; 1827 1828 // cen 1829 int versionMade; 1830 int disk; 1831 int attrs; 1832 long attrsEx; 1833 long locoff; 1834 byte[] comment; 1835 1836 Entry() {} 1837 1838 Entry(byte[] name) { 1839 name(name); 1840 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 1841 this.crc = 0; 1842 this.size = 0; 1843 this.csize = 0; 1844 this.method = METHOD_DEFLATED; 1845 } 1846 1847 Entry(byte[] name, int type) { 1848 this(name); 1849 this.type = type; 1850 } 1851 1852 Entry (Entry e, int type) { 1853 name(e.name); 1854 this.version = e.version; 1855 this.ctime = e.ctime; 1856 this.atime = e.atime; 1857 this.mtime = e.mtime; 1858 this.crc = e.crc; 1859 this.size = e.size; 1860 this.csize = e.csize; 1861 this.method = e.method; 1862 this.extra = e.extra; 1863 this.versionMade = e.versionMade; 1864 this.disk = e.disk; 1865 this.attrs = e.attrs; 1866 this.attrsEx = e.attrsEx; 1867 this.locoff = e.locoff; 1868 this.comment = e.comment; 1869 this.type = type; 1870 } 1871 1872 Entry (byte[] name, Path file, int type) { 1873 this(name, type); 1874 this.file = file; 1875 this.method = METHOD_STORED; 1876 } 1877 1878 int version() throws ZipException { 1879 if (method == METHOD_DEFLATED) 1880 return 20; 1881 else if (method == METHOD_STORED) 1882 return 10; 1883 throw new ZipException("unsupported compression method"); 1884 } 1885 1886 ///////////////////// CEN ////////////////////// 1887 static Entry readCEN(ZipFileSystem zipfs, int pos) 1888 throws IOException 1889 { 1890 return new Entry().cen(zipfs, pos); 1891 } 1892 1893 private Entry cen(ZipFileSystem zipfs, int pos) 1894 throws IOException 1895 { 1896 byte[] cen = zipfs.cen; 1897 if (CENSIG(cen, pos) != CENSIG) 1898 zerror("invalid CEN header (bad signature)"); 1899 versionMade = CENVEM(cen, pos); 1900 version = CENVER(cen, pos); 1901 flag = CENFLG(cen, pos); 1902 method = CENHOW(cen, pos); 1903 mtime = dosToJavaTime(CENTIM(cen, pos)); 1904 crc = CENCRC(cen, pos); 1905 csize = CENSIZ(cen, pos); 1906 size = CENLEN(cen, pos); 1907 int nlen = CENNAM(cen, pos); 1908 int elen = CENEXT(cen, pos); 1909 int clen = CENCOM(cen, pos); 1910 disk = CENDSK(cen, pos); 1911 attrs = CENATT(cen, pos); 1912 attrsEx = CENATX(cen, pos); 1913 locoff = CENOFF(cen, pos); 1914 1915 pos += CENHDR; 1916 name(Arrays.copyOfRange(cen, pos, pos + nlen)); 1917 1918 pos += nlen; 1919 if (elen > 0) { 1920 extra = Arrays.copyOfRange(cen, pos, pos + elen); 1921 pos += elen; 1922 readExtra(zipfs); 1923 } 1924 if (clen > 0) { 1925 comment = Arrays.copyOfRange(cen, pos, pos + clen); 1926 } 1927 return this; 1928 } 1929 1930 int writeCEN(OutputStream os) throws IOException 1931 { 1932 int written = CENHDR; 1933 int version0 = version(); 1934 long csize0 = csize; 1935 long size0 = size; 1936 long locoff0 = locoff; 1937 int elen64 = 0; // extra for ZIP64 1938 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 1939 int elenEXTT = 0; // extra for Extended Timestamp 1940 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 1941 1942 // confirm size/length 1943 int nlen = (name != null) ? name.length : 0; 1944 int elen = (extra != null) ? extra.length : 0; 1945 int eoff = 0; 1946 int clen = (comment != null) ? comment.length : 0; 1947 if (csize >= ZIP64_MINVAL) { 1948 csize0 = ZIP64_MINVAL; 1949 elen64 += 8; // csize(8) 1950 } 1951 if (size >= ZIP64_MINVAL) { 1952 size0 = ZIP64_MINVAL; // size(8) 1953 elen64 += 8; 1954 } 1955 if (locoff >= ZIP64_MINVAL) { 1956 locoff0 = ZIP64_MINVAL; 1957 elen64 += 8; // offset(8) 1958 } 1959 if (elen64 != 0) { 1960 elen64 += 4; // header and data sz 4 bytes 1961 } 1962 while (eoff + 4 < elen) { 1963 int tag = SH(extra, eoff); 1964 int sz = SH(extra, eoff + 2); 1965 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 1966 foundExtraTime = true; 1967 } 1968 eoff += (4 + sz); 1969 } 1970 if (!foundExtraTime) { 1971 if (isWindows) { // use NTFS 1972 elenNTFS = 36; // total 36 bytes 1973 } else { // Extended Timestamp otherwise 1974 elenEXTT = 9; // only mtime in cen 1975 } 1976 } 1977 writeInt(os, CENSIG); // CEN header signature 1978 if (elen64 != 0) { 1979 writeShort(os, 45); // ver 4.5 for zip64 1980 writeShort(os, 45); 1981 } else { 1982 writeShort(os, version0); // version made by 1983 writeShort(os, version0); // version needed to extract 1984 } 1985 writeShort(os, flag); // general purpose bit flag 1986 writeShort(os, method); // compression method 1987 // last modification time 1988 writeInt(os, (int)javaToDosTime(mtime)); 1989 writeInt(os, crc); // crc-32 1990 writeInt(os, csize0); // compressed size 1991 writeInt(os, size0); // uncompressed size 1992 writeShort(os, name.length); 1993 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 1994 1995 if (comment != null) { 1996 writeShort(os, Math.min(clen, 0xffff)); 1997 } else { 1998 writeShort(os, 0); 1999 } 2000 writeShort(os, 0); // starting disk number 2001 writeShort(os, 0); // internal file attributes (unused) 2002 writeInt(os, 0); // external file attributes (unused) 2003 writeInt(os, locoff0); // relative offset of local header 2004 writeBytes(os, name); 2005 if (elen64 != 0) { 2006 writeShort(os, EXTID_ZIP64);// Zip64 extra 2007 writeShort(os, elen64 - 4); // size of "this" extra block 2008 if (size0 == ZIP64_MINVAL) 2009 writeLong(os, size); 2010 if (csize0 == ZIP64_MINVAL) 2011 writeLong(os, csize); 2012 if (locoff0 == ZIP64_MINVAL) 2013 writeLong(os, locoff); 2014 } 2015 if (elenNTFS != 0) { 2016 writeShort(os, EXTID_NTFS); 2017 writeShort(os, elenNTFS - 4); 2018 writeInt(os, 0); // reserved 2019 writeShort(os, 0x0001); // NTFS attr tag 2020 writeShort(os, 24); 2021 writeLong(os, javaToWinTime(mtime)); 2022 writeLong(os, javaToWinTime(atime)); 2023 writeLong(os, javaToWinTime(ctime)); 2024 } 2025 if (elenEXTT != 0) { 2026 writeShort(os, EXTID_EXTT); 2027 writeShort(os, elenEXTT - 4); 2028 if (ctime == -1) 2029 os.write(0x3); // mtime and atime 2030 else 2031 os.write(0x7); // mtime, atime and ctime 2032 writeInt(os, javaToUnixTime(mtime)); 2033 } 2034 if (extra != null) // whatever not recognized 2035 writeBytes(os, extra); 2036 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2037 writeBytes(os, comment); 2038 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2039 } 2040 2041 ///////////////////// LOC ////////////////////// 2042 static Entry readLOC(ZipFileSystem zipfs, long pos) 2043 throws IOException 2044 { 2045 return readLOC(zipfs, pos, new byte[1024]); 2046 } 2047 2048 static Entry readLOC(ZipFileSystem zipfs, long pos, byte[] buf) 2049 throws IOException 2050 { 2051 return new Entry().loc(zipfs, pos, buf); 2052 } 2053 2054 Entry loc(ZipFileSystem zipfs, long pos, byte[] buf) 2055 throws IOException 2056 { 2057 assert (buf.length >= LOCHDR); 2058 if (zipfs.readFullyAt(buf, 0, LOCHDR , pos) != LOCHDR) 2059 throw new ZipException("loc: reading failed"); 2060 if (LOCSIG(buf) != LOCSIG) 2061 throw new ZipException("loc: wrong sig ->" 2062 + Long.toString(LOCSIG(buf), 16)); 2063 //startPos = pos; 2064 version = LOCVER(buf); 2065 flag = LOCFLG(buf); 2066 method = LOCHOW(buf); 2067 mtime = dosToJavaTime(LOCTIM(buf)); 2068 crc = LOCCRC(buf); 2069 csize = LOCSIZ(buf); 2070 size = LOCLEN(buf); 2071 int nlen = LOCNAM(buf); 2072 int elen = LOCEXT(buf); 2073 2074 name = new byte[nlen]; 2075 if (zipfs.readFullyAt(name, 0, nlen, pos + LOCHDR) != nlen) { 2076 throw new ZipException("loc: name reading failed"); 2077 } 2078 if (elen > 0) { 2079 extra = new byte[elen]; 2080 if (zipfs.readFullyAt(extra, 0, elen, pos + LOCHDR + nlen) 2081 != elen) { 2082 throw new ZipException("loc: ext reading failed"); 2083 } 2084 } 2085 pos += (LOCHDR + nlen + elen); 2086 if ((flag & FLAG_DATADESCR) != 0) { 2087 // Data Descriptor 2088 Entry e = zipfs.getEntry0(name); // get the size/csize from cen 2089 if (e == null) 2090 throw new ZipException("loc: name not found in cen"); 2091 size = e.size; 2092 csize = e.csize; 2093 pos += (method == METHOD_STORED ? size : csize); 2094 if (size >= ZIP64_MINVAL || csize >= ZIP64_MINVAL) 2095 pos += 24; 2096 else 2097 pos += 16; 2098 } else { 2099 if (extra != null && 2100 (size == ZIP64_MINVAL || csize == ZIP64_MINVAL)) { 2101 // zip64 ext: must include both size and csize 2102 int off = 0; 2103 while (off + 20 < elen) { // HeaderID+DataSize+Data 2104 int sz = SH(extra, off + 2); 2105 if (SH(extra, off) == EXTID_ZIP64 && sz == 16) { 2106 size = LL(extra, off + 4); 2107 csize = LL(extra, off + 12); 2108 break; 2109 } 2110 off += (sz + 4); 2111 } 2112 } 2113 pos += (method == METHOD_STORED ? size : csize); 2114 } 2115 return this; 2116 } 2117 2118 int writeLOC(OutputStream os) 2119 throws IOException 2120 { 2121 writeInt(os, LOCSIG); // LOC header signature 2122 int version = version(); 2123 int nlen = (name != null) ? name.length : 0; 2124 int elen = (extra != null) ? extra.length : 0; 2125 boolean foundExtraTime = false; // if extra timestamp present 2126 int eoff = 0; 2127 int elen64 = 0; 2128 int elenEXTT = 0; 2129 int elenNTFS = 0; 2130 if ((flag & FLAG_DATADESCR) != 0) { 2131 writeShort(os, version()); // version needed to extract 2132 writeShort(os, flag); // general purpose bit flag 2133 writeShort(os, method); // compression method 2134 // last modification time 2135 writeInt(os, (int)javaToDosTime(mtime)); 2136 // store size, uncompressed size, and crc-32 in data descriptor 2137 // immediately following compressed entry data 2138 writeInt(os, 0); 2139 writeInt(os, 0); 2140 writeInt(os, 0); 2141 } else { 2142 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2143 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2144 writeShort(os, 45); // ver 4.5 for zip64 2145 } else { 2146 writeShort(os, version()); // version needed to extract 2147 } 2148 writeShort(os, flag); // general purpose bit flag 2149 writeShort(os, method); // compression method 2150 // last modification time 2151 writeInt(os, (int)javaToDosTime(mtime)); 2152 writeInt(os, crc); // crc-32 2153 if (elen64 != 0) { 2154 writeInt(os, ZIP64_MINVAL); 2155 writeInt(os, ZIP64_MINVAL); 2156 } else { 2157 writeInt(os, csize); // compressed size 2158 writeInt(os, size); // uncompressed size 2159 } 2160 } 2161 while (eoff + 4 < elen) { 2162 int tag = SH(extra, eoff); 2163 int sz = SH(extra, eoff + 2); 2164 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2165 foundExtraTime = true; 2166 } 2167 eoff += (4 + sz); 2168 } 2169 if (!foundExtraTime) { 2170 if (isWindows) { 2171 elenNTFS = 36; // NTFS, total 36 bytes 2172 } else { // on unix use "ext time" 2173 elenEXTT = 9; 2174 if (atime != -1) 2175 elenEXTT += 4; 2176 if (ctime != -1) 2177 elenEXTT += 4; 2178 } 2179 } 2180 writeShort(os, name.length); 2181 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2182 writeBytes(os, name); 2183 if (elen64 != 0) { 2184 writeShort(os, EXTID_ZIP64); 2185 writeShort(os, 16); 2186 writeLong(os, size); 2187 writeLong(os, csize); 2188 } 2189 if (elenNTFS != 0) { 2190 writeShort(os, EXTID_NTFS); 2191 writeShort(os, elenNTFS - 4); 2192 writeInt(os, 0); // reserved 2193 writeShort(os, 0x0001); // NTFS attr tag 2194 writeShort(os, 24); 2195 writeLong(os, javaToWinTime(mtime)); 2196 writeLong(os, javaToWinTime(atime)); 2197 writeLong(os, javaToWinTime(ctime)); 2198 } 2199 if (elenEXTT != 0) { 2200 writeShort(os, EXTID_EXTT); 2201 writeShort(os, elenEXTT - 4);// size for the folowing data block 2202 int fbyte = 0x1; 2203 if (atime != -1) // mtime and atime 2204 fbyte |= 0x2; 2205 if (ctime != -1) // mtime, atime and ctime 2206 fbyte |= 0x4; 2207 os.write(fbyte); // flags byte 2208 writeInt(os, javaToUnixTime(mtime)); 2209 if (atime != -1) 2210 writeInt(os, javaToUnixTime(atime)); 2211 if (ctime != -1) 2212 writeInt(os, javaToUnixTime(ctime)); 2213 } 2214 if (extra != null) { 2215 writeBytes(os, extra); 2216 } 2217 return LOCHDR + name.length + elen + elen64 + elenNTFS + elenEXTT; 2218 } 2219 2220 // Data Descriptior 2221 int writeEXT(OutputStream os) 2222 throws IOException 2223 { 2224 writeInt(os, EXTSIG); // EXT header signature 2225 writeInt(os, crc); // crc-32 2226 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2227 writeLong(os, csize); 2228 writeLong(os, size); 2229 return 24; 2230 } else { 2231 writeInt(os, csize); // compressed size 2232 writeInt(os, size); // uncompressed size 2233 return 16; 2234 } 2235 } 2236 2237 // read NTFS, UNIX and ZIP64 data from cen.extra 2238 void readExtra(ZipFileSystem zipfs) throws IOException { 2239 if (extra == null) 2240 return; 2241 int elen = extra.length; 2242 int off = 0; 2243 int newOff = 0; 2244 while (off + 4 < elen) { 2245 // extra spec: HeaderID+DataSize+Data 2246 int pos = off; 2247 int tag = SH(extra, pos); 2248 int sz = SH(extra, pos + 2); 2249 pos += 4; 2250 if (pos + sz > elen) // invalid data 2251 break; 2252 switch (tag) { 2253 case EXTID_ZIP64 : 2254 if (size == ZIP64_MINVAL) { 2255 if (pos + 8 > elen) // invalid zip64 extra 2256 break; // fields, just skip 2257 size = LL(extra, pos); 2258 pos += 8; 2259 } 2260 if (csize == ZIP64_MINVAL) { 2261 if (pos + 8 > elen) 2262 break; 2263 csize = LL(extra, pos); 2264 pos += 8; 2265 } 2266 if (locoff == ZIP64_MINVAL) { 2267 if (pos + 8 > elen) 2268 break; 2269 locoff = LL(extra, pos); 2270 pos += 8; 2271 } 2272 break; 2273 case EXTID_NTFS: 2274 pos += 4; // reserved 4 bytes 2275 if (SH(extra, pos) != 0x0001) 2276 break; 2277 if (SH(extra, pos + 2) != 24) 2278 break; 2279 // override the loc field, datatime here is 2280 // more "accurate" 2281 mtime = winToJavaTime(LL(extra, pos + 4)); 2282 atime = winToJavaTime(LL(extra, pos + 12)); 2283 ctime = winToJavaTime(LL(extra, pos + 20)); 2284 break; 2285 case EXTID_EXTT: 2286 // spec says the Extened timestamp in cen only has mtime 2287 // need to read the loc to get the extra a/ctime 2288 byte[] buf = new byte[LOCHDR]; 2289 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2290 != buf.length) 2291 throw new ZipException("loc: reading failed"); 2292 if (LOCSIG(buf) != LOCSIG) 2293 throw new ZipException("loc: wrong sig ->" 2294 + Long.toString(LOCSIG(buf), 16)); 2295 2296 int locElen = LOCEXT(buf); 2297 if (locElen < 9) // EXTT is at lease 9 bytes 2298 break; 2299 int locNlen = LOCNAM(buf); 2300 buf = new byte[locElen]; 2301 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2302 != buf.length) 2303 throw new ZipException("loc extra: reading failed"); 2304 int locPos = 0; 2305 while (locPos + 4 < buf.length) { 2306 int locTag = SH(buf, locPos); 2307 int locSZ = SH(buf, locPos + 2); 2308 locPos += 4; 2309 if (locTag != EXTID_EXTT) { 2310 locPos += locSZ; 2311 continue; 2312 } 2313 int flag = CH(buf, locPos++); 2314 if ((flag & 0x1) != 0) { 2315 mtime = unixToJavaTime(LG(buf, locPos)); 2316 locPos += 4; 2317 } 2318 if ((flag & 0x2) != 0) { 2319 atime = unixToJavaTime(LG(buf, locPos)); 2320 locPos += 4; 2321 } 2322 if ((flag & 0x4) != 0) { 2323 ctime = unixToJavaTime(LG(buf, locPos)); 2324 locPos += 4; 2325 } 2326 break; 2327 } 2328 break; 2329 default: // unknown tag 2330 System.arraycopy(extra, off, extra, newOff, sz + 4); 2331 newOff += (sz + 4); 2332 } 2333 off += (sz + 4); 2334 } 2335 if (newOff != 0 && newOff != extra.length) 2336 extra = Arrays.copyOf(extra, newOff); 2337 else 2338 extra = null; 2339 } 2340 } 2341 2342 private static class ExChannelCloser { 2343 Path path; 2344 SeekableByteChannel ch; 2345 Set<InputStream> streams; 2346 ExChannelCloser(Path path, 2347 SeekableByteChannel ch, 2348 Set<InputStream> streams) 2349 { 2350 this.path = path; 2351 this.ch = ch; 2352 this.streams = streams; 2353 } 2354 } 2355 2356 // ZIP directory has two issues: 2357 // (1) ZIP spec does not require the ZIP file to include 2358 // directory entry 2359 // (2) all entries are not stored/organized in a "tree" 2360 // structure. 2361 // A possible solution is to build the node tree ourself as 2362 // implemented below. 2363 private IndexNode root; 2364 2365 private void addToTree(IndexNode inode, HashSet<IndexNode> dirs) { 2366 if (dirs.contains(inode)) { 2367 return; 2368 } 2369 IndexNode parent; 2370 byte[] name = inode.name; 2371 byte[] pname = getParent(name); 2372 if (inodes.containsKey(LOOKUPKEY.as(pname))) { 2373 parent = inodes.get(LOOKUPKEY); 2374 } else { // pseudo directory entry 2375 parent = new IndexNode(pname, -1); 2376 inodes.put(parent, parent); 2377 } 2378 addToTree(parent, dirs); 2379 inode.sibling = parent.child; 2380 parent.child = inode; 2381 if (name[name.length -1] == '/') 2382 dirs.add(inode); 2383 } 2384 2385 private void removeFromTree(IndexNode inode) { 2386 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2387 IndexNode child = parent.child; 2388 if (child.equals(inode)) { 2389 parent.child = child.sibling; 2390 } else { 2391 IndexNode last = child; 2392 while ((child = child.sibling) != null) { 2393 if (child.equals(inode)) { 2394 last.sibling = child.sibling; 2395 break; 2396 } else { 2397 last = child; 2398 } 2399 } 2400 } 2401 } 2402 2403 private void buildNodeTree() throws IOException { 2404 beginWrite(); 2405 try { 2406 HashSet<IndexNode> dirs = new HashSet<>(); 2407 IndexNode root = new IndexNode(ROOTPATH, -1); 2408 inodes.put(root, root); 2409 dirs.add(root); 2410 for (IndexNode node : inodes.keySet().toArray(new IndexNode[0])) { 2411 addToTree(node, dirs); 2412 } 2413 } finally { 2414 endWrite(); 2415 } 2416 } 2417 }