1 /* 2 * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * - Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 11 * - Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * - Neither the name of Oracle nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 20 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 26 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 27 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 28 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * This source code is provided to illustrate the usage of a given feature 34 * or technique and has been deliberately simplified. Additional steps 35 * required for a production-quality application, such as security checks, 36 * input validation and proper error handling, might not be present in 37 * this sample code. 38 */ 39 40 41 package com.sun.nio.zipfs; 42 43 import java.io.BufferedOutputStream; 44 import java.io.ByteArrayInputStream; 45 import java.io.ByteArrayOutputStream; 46 import java.io.EOFException; 47 import java.io.File; 48 import java.io.IOException; 49 import java.io.InputStream; 50 import java.io.OutputStream; 51 import java.nio.ByteBuffer; 52 import java.nio.MappedByteBuffer; 53 import java.nio.channels.*; 54 import java.nio.file.*; 55 import java.nio.file.attribute.*; 56 import java.nio.file.spi.*; 57 import java.util.*; 58 import java.util.concurrent.locks.ReadWriteLock; 59 import java.util.concurrent.locks.ReentrantReadWriteLock; 60 import java.util.regex.Pattern; 61 import java.util.zip.CRC32; 62 import java.util.zip.Inflater; 63 import java.util.zip.Deflater; 64 import java.util.zip.InflaterInputStream; 65 import java.util.zip.DeflaterOutputStream; 66 import java.util.zip.ZipException; 67 import java.util.zip.ZipError; 68 import static java.lang.Boolean.*; 69 import static com.sun.nio.zipfs.ZipConstants.*; 70 import static com.sun.nio.zipfs.ZipUtils.*; 71 import static java.nio.file.StandardOpenOption.*; 72 import static java.nio.file.StandardCopyOption.*; 73 74 /** 75 * A FileSystem built on a zip file 76 * 77 * @author Xueming Shen 78 */ 79 80 public class ZipFileSystem extends FileSystem { 81 82 private final ZipFileSystemProvider provider; 83 private final ZipPath defaultdir; 84 private boolean readOnly = false; 85 private final Path zfpath; 86 private final ZipCoder zc; 87 88 // configurable by env map 89 private final String defaultDir; // default dir for the file system 90 private final String nameEncoding; // default encoding for name/comment 91 private final boolean useTempFile; // use a temp file for newOS, default 92 // is to use BAOS for better performance 93 private final boolean createNew; // create a new zip if not exists 94 private static final boolean isWindows = 95 System.getProperty("os.name").startsWith("Windows"); 96 97 ZipFileSystem(ZipFileSystemProvider provider, 98 Path zfpath, 99 Map<String, ?> env) 100 throws IOException 101 { 102 // configurable env setup 103 this.createNew = "true".equals(env.get("create")); 104 this.nameEncoding = env.containsKey("encoding") ? 105 (String)env.get("encoding") : "UTF-8"; 106 this.useTempFile = TRUE.equals(env.get("useTempFile")); 107 this.defaultDir = env.containsKey("default.dir") ? 108 (String)env.get("default.dir") : "/"; 109 if (this.defaultDir.charAt(0) != '/') 110 throw new IllegalArgumentException("default dir should be absolute"); 111 112 this.provider = provider; 113 this.zfpath = zfpath; 114 if (Files.notExists(zfpath)) { 115 if (createNew) { 116 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 117 new END().write(os, 0); 118 } 119 } else { 120 throw new FileSystemNotFoundException(zfpath.toString()); 121 } 122 } 123 // sm and existence check 124 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 125 if (!Files.isWritable(zfpath)) 126 this.readOnly = true; 127 this.zc = ZipCoder.get(nameEncoding); 128 this.defaultdir = new ZipPath(this, getBytes(defaultDir)); 129 this.ch = Files.newByteChannel(zfpath, READ); 130 this.cen = initCEN(); 131 } 132 133 @Override 134 public FileSystemProvider provider() { 135 return provider; 136 } 137 138 @Override 139 public String getSeparator() { 140 return "/"; 141 } 142 143 @Override 144 public boolean isOpen() { 145 return isOpen; 146 } 147 148 @Override 149 public boolean isReadOnly() { 150 return readOnly; 151 } 152 153 private void checkWritable() throws IOException { 154 if (readOnly) 155 throw new ReadOnlyFileSystemException(); 156 } 157 158 @Override 159 public Iterable<Path> getRootDirectories() { 160 ArrayList<Path> pathArr = new ArrayList<>(); 161 pathArr.add(new ZipPath(this, new byte[]{'/'})); 162 return pathArr; 163 } 164 165 ZipPath getDefaultDir() { // package private 166 return defaultdir; 167 } 168 169 @Override 170 public ZipPath getPath(String first, String... more) { 171 String path; 172 if (more.length == 0) { 173 path = first; 174 } else { 175 StringBuilder sb = new StringBuilder(); 176 sb.append(first); 177 for (String segment: more) { 178 if (segment.length() > 0) { 179 if (sb.length() > 0) 180 sb.append('/'); 181 sb.append(segment); 182 } 183 } 184 path = sb.toString(); 185 } 186 return new ZipPath(this, getBytes(path)); 187 } 188 189 @Override 190 public UserPrincipalLookupService getUserPrincipalLookupService() { 191 throw new UnsupportedOperationException(); 192 } 193 194 @Override 195 public WatchService newWatchService() { 196 throw new UnsupportedOperationException(); 197 } 198 199 FileStore getFileStore(ZipPath path) { 200 return new ZipFileStore(path); 201 } 202 203 @Override 204 public Iterable<FileStore> getFileStores() { 205 ArrayList<FileStore> list = new ArrayList<>(1); 206 list.add(new ZipFileStore(new ZipPath(this, new byte[]{'/'}))); 207 return list; 208 } 209 210 private static final Set<String> supportedFileAttributeViews = 211 Collections.unmodifiableSet( 212 new HashSet<String>(Arrays.asList("basic", "zip"))); 213 214 @Override 215 public Set<String> supportedFileAttributeViews() { 216 return supportedFileAttributeViews; 217 } 218 219 @Override 220 public String toString() { 221 return zfpath.toString(); 222 } 223 224 Path getZipFile() { 225 return zfpath; 226 } 227 228 private static final String GLOB_SYNTAX = "glob"; 229 private static final String REGEX_SYNTAX = "regex"; 230 231 @Override 232 public PathMatcher getPathMatcher(String syntaxAndInput) { 233 int pos = syntaxAndInput.indexOf(':'); 234 if (pos <= 0 || pos == syntaxAndInput.length()) { 235 throw new IllegalArgumentException(); 236 } 237 String syntax = syntaxAndInput.substring(0, pos); 238 String input = syntaxAndInput.substring(pos + 1); 239 String expr; 240 if (syntax.equals(GLOB_SYNTAX)) { 241 expr = toRegexPattern(input); 242 } else { 243 if (syntax.equals(REGEX_SYNTAX)) { 244 expr = input; 245 } else { 246 throw new UnsupportedOperationException("Syntax '" + syntax + 247 "' not recognized"); 248 } 249 } 250 // return matcher 251 final Pattern pattern = Pattern.compile(expr); 252 return new PathMatcher() { 253 @Override 254 public boolean matches(Path path) { 255 return pattern.matcher(path.toString()).matches(); 256 } 257 }; 258 } 259 260 @Override 261 public void close() throws IOException { 262 beginWrite(); 263 try { 264 if (!isOpen) 265 return; 266 isOpen = false; // set closed 267 } finally { 268 endWrite(); 269 } 270 if (!streams.isEmpty()) { // unlock and close all remaining streams 271 Set<InputStream> copy = new HashSet<>(streams); 272 for (InputStream is: copy) 273 is.close(); 274 } 275 beginWrite(); // lock and sync 276 try { 277 sync(); 278 ch.close(); // close the ch just in case no update 279 } finally { // and sync dose not close the ch 280 endWrite(); 281 } 282 283 synchronized (inflaters) { 284 for (Inflater inf : inflaters) 285 inf.end(); 286 } 287 synchronized (deflaters) { 288 for (Deflater def : deflaters) 289 def.end(); 290 } 291 292 IOException ioe = null; 293 synchronized (tmppaths) { 294 for (Path p: tmppaths) { 295 try { 296 Files.deleteIfExists(p); 297 } catch (IOException x) { 298 if (ioe == null) 299 ioe = x; 300 else 301 ioe.addSuppressed(x); 302 } 303 } 304 } 305 provider.removeFileSystem(zfpath, this); 306 if (ioe != null) 307 throw ioe; 308 } 309 310 ZipFileAttributes getFileAttributes(byte[] path) 311 throws IOException 312 { 313 Entry e; 314 beginRead(); 315 try { 316 ensureOpen(); 317 e = getEntry0(path); 318 if (e == null) { 319 IndexNode inode = getInode(path); 320 if (inode == null) 321 return null; 322 e = new Entry(inode.name); // pseudo directory 323 e.method = METHOD_STORED; // STORED for dir 324 e.mtime = e.atime = e.ctime = -1;// -1 for all times 325 } 326 } finally { 327 endRead(); 328 } 329 return new ZipFileAttributes(e); 330 } 331 332 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 333 throws IOException 334 { 335 checkWritable(); 336 beginWrite(); 337 try { 338 ensureOpen(); 339 Entry e = getEntry0(path); // ensureOpen checked 340 if (e == null) 341 throw new NoSuchFileException(getString(path)); 342 if (e.type == Entry.CEN) 343 e.type = Entry.COPY; // copy e 344 if (mtime != null) 345 e.mtime = mtime.toMillis(); 346 if (atime != null) 347 e.atime = atime.toMillis(); 348 if (ctime != null) 349 e.ctime = ctime.toMillis(); 350 update(e); 351 } finally { 352 endWrite(); 353 } 354 } 355 356 boolean exists(byte[] path) 357 throws IOException 358 { 359 beginRead(); 360 try { 361 ensureOpen(); 362 return getInode(path) != null; 363 } finally { 364 endRead(); 365 } 366 } 367 368 boolean isDirectory(byte[] path) 369 throws IOException 370 { 371 beginRead(); 372 try { 373 IndexNode n = getInode(path); 374 return n != null && n.isDir(); 375 } finally { 376 endRead(); 377 } 378 } 379 380 private ZipPath toZipPath(byte[] path) { 381 // make it absolute 382 byte[] p = new byte[path.length + 1]; 383 p[0] = '/'; 384 System.arraycopy(path, 0, p, 1, path.length); 385 return new ZipPath(this, p); 386 } 387 388 // returns the list of child paths of "path" 389 Iterator<Path> iteratorOf(byte[] path, 390 DirectoryStream.Filter<? super Path> filter) 391 throws IOException 392 { 393 beginWrite(); // iteration of inodes needs exclusive lock 394 try { 395 ensureOpen(); 396 IndexNode inode = getInode(path); 397 if (inode == null) 398 throw new NotDirectoryException(getString(path)); 399 List<Path> list = new ArrayList<>(); 400 IndexNode child = inode.child; 401 while (child != null) { 402 ZipPath zp = toZipPath(child.name); 403 if (filter == null || filter.accept(zp)) 404 list.add(zp); 405 child = child.sibling; 406 } 407 return list.iterator(); 408 } finally { 409 endWrite(); 410 } 411 } 412 413 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 414 throws IOException 415 { 416 checkWritable(); 417 dir = toDirectoryPath(dir); 418 beginWrite(); 419 try { 420 ensureOpen(); 421 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 422 throw new FileAlreadyExistsException(getString(dir)); 423 checkParents(dir); 424 Entry e = new Entry(dir, Entry.NEW); 425 e.method = METHOD_STORED; // STORED for dir 426 update(e); 427 } finally { 428 endWrite(); 429 } 430 } 431 432 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 433 throws IOException 434 { 435 checkWritable(); 436 if (Arrays.equals(src, dst)) 437 return; // do nothing, src and dst are the same 438 439 beginWrite(); 440 try { 441 ensureOpen(); 442 Entry eSrc = getEntry0(src); // ensureOpen checked 443 if (eSrc == null) 444 throw new NoSuchFileException(getString(src)); 445 if (eSrc.isDir()) { // spec says to create dst dir 446 createDirectory(dst); 447 return; 448 } 449 boolean hasReplace = false; 450 boolean hasCopyAttrs = false; 451 for (CopyOption opt : options) { 452 if (opt == REPLACE_EXISTING) 453 hasReplace = true; 454 else if (opt == COPY_ATTRIBUTES) 455 hasCopyAttrs = true; 456 } 457 Entry eDst = getEntry0(dst); 458 if (eDst != null) { 459 if (!hasReplace) 460 throw new FileAlreadyExistsException(getString(dst)); 461 } else { 462 checkParents(dst); 463 } 464 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 465 u.name(dst); // change name 466 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 467 { 468 u.type = eSrc.type; // make it the same type 469 if (deletesrc) { // if it's a "rename", take the data 470 u.bytes = eSrc.bytes; 471 u.file = eSrc.file; 472 } else { // if it's not "rename", copy the data 473 if (eSrc.bytes != null) 474 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 475 else if (eSrc.file != null) { 476 u.file = getTempPathForEntry(null); 477 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 478 } 479 } 480 } 481 if (!hasCopyAttrs) 482 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 483 update(u); 484 if (deletesrc) 485 updateDelete(eSrc); 486 } finally { 487 endWrite(); 488 } 489 } 490 491 // Returns an output stream for writing the contents into the specified 492 // entry. 493 OutputStream newOutputStream(byte[] path, OpenOption... options) 494 throws IOException 495 { 496 checkWritable(); 497 boolean hasCreateNew = false; 498 boolean hasCreate = false; 499 boolean hasAppend = false; 500 for (OpenOption opt: options) { 501 if (opt == READ) 502 throw new IllegalArgumentException("READ not allowed"); 503 if (opt == CREATE_NEW) 504 hasCreateNew = true; 505 if (opt == CREATE) 506 hasCreate = true; 507 if (opt == APPEND) 508 hasAppend = true; 509 } 510 beginRead(); // only need a readlock, the "update()" will 511 try { // try to obtain a writelock when the os is 512 ensureOpen(); // being closed. 513 Entry e = getEntry0(path); 514 if (e != null) { 515 if (e.isDir() || hasCreateNew) 516 throw new FileAlreadyExistsException(getString(path)); 517 if (hasAppend) { 518 InputStream is = getInputStream(e); 519 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 520 copyStream(is, os); 521 is.close(); 522 return os; 523 } 524 return getOutputStream(new Entry(e, Entry.NEW)); 525 } else { 526 if (!hasCreate && !hasCreateNew) 527 throw new NoSuchFileException(getString(path)); 528 checkParents(path); 529 return getOutputStream(new Entry(path, Entry.NEW)); 530 } 531 } finally { 532 endRead(); 533 } 534 } 535 536 // Returns an input stream for reading the contents of the specified 537 // file entry. 538 InputStream newInputStream(byte[] path) throws IOException { 539 beginRead(); 540 try { 541 ensureOpen(); 542 Entry e = getEntry0(path); 543 if (e == null) 544 throw new NoSuchFileException(getString(path)); 545 if (e.isDir()) 546 throw new FileSystemException(getString(path), "is a directory", null); 547 return getInputStream(e); 548 } finally { 549 endRead(); 550 } 551 } 552 553 private void checkOptions(Set<? extends OpenOption> options) { 554 // check for options of null type and option is an intance of StandardOpenOption 555 for (OpenOption option : options) { 556 if (option == null) 557 throw new NullPointerException(); 558 if (!(option instanceof StandardOpenOption)) 559 throw new IllegalArgumentException(); 560 } 561 } 562 563 // Returns a Writable/ReadByteChannel for now. Might consdier to use 564 // newFileChannel() instead, which dump the entry data into a regular 565 // file on the default file system and create a FileChannel on top of 566 // it. 567 SeekableByteChannel newByteChannel(byte[] path, 568 Set<? extends OpenOption> options, 569 FileAttribute<?>... attrs) 570 throws IOException 571 { 572 checkOptions(options); 573 if (options.contains(StandardOpenOption.WRITE) || 574 options.contains(StandardOpenOption.APPEND)) { 575 checkWritable(); 576 beginRead(); 577 try { 578 final WritableByteChannel wbc = Channels.newChannel( 579 newOutputStream(path, options.toArray(new OpenOption[0]))); 580 long leftover = 0; 581 if (options.contains(StandardOpenOption.APPEND)) { 582 Entry e = getEntry0(path); 583 if (e != null && e.size >= 0) 584 leftover = e.size; 585 } 586 final long offset = leftover; 587 return new SeekableByteChannel() { 588 long written = offset; 589 public boolean isOpen() { 590 return wbc.isOpen(); 591 } 592 593 public long position() throws IOException { 594 return written; 595 } 596 597 public SeekableByteChannel position(long pos) 598 throws IOException 599 { 600 throw new UnsupportedOperationException(); 601 } 602 603 public int read(ByteBuffer dst) throws IOException { 604 throw new UnsupportedOperationException(); 605 } 606 607 public SeekableByteChannel truncate(long size) 608 throws IOException 609 { 610 throw new UnsupportedOperationException(); 611 } 612 613 public int write(ByteBuffer src) throws IOException { 614 int n = wbc.write(src); 615 written += n; 616 return n; 617 } 618 619 public long size() throws IOException { 620 return written; 621 } 622 623 public void close() throws IOException { 624 wbc.close(); 625 } 626 }; 627 } finally { 628 endRead(); 629 } 630 } else { 631 beginRead(); 632 try { 633 ensureOpen(); 634 Entry e = getEntry0(path); 635 if (e == null || e.isDir()) 636 throw new NoSuchFileException(getString(path)); 637 final ReadableByteChannel rbc = 638 Channels.newChannel(getInputStream(e)); 639 final long size = e.size; 640 return new SeekableByteChannel() { 641 long read = 0; 642 public boolean isOpen() { 643 return rbc.isOpen(); 644 } 645 646 public long position() throws IOException { 647 return read; 648 } 649 650 public SeekableByteChannel position(long pos) 651 throws IOException 652 { 653 throw new UnsupportedOperationException(); 654 } 655 656 public int read(ByteBuffer dst) throws IOException { 657 int n = rbc.read(dst); 658 if (n > 0) { 659 read += n; 660 } 661 return n; 662 } 663 664 public SeekableByteChannel truncate(long size) 665 throws IOException 666 { 667 throw new NonWritableChannelException(); 668 } 669 670 public int write (ByteBuffer src) throws IOException { 671 throw new NonWritableChannelException(); 672 } 673 674 public long size() throws IOException { 675 return size; 676 } 677 678 public void close() throws IOException { 679 rbc.close(); 680 } 681 }; 682 } finally { 683 endRead(); 684 } 685 } 686 } 687 688 // Returns a FileChannel of the specified entry. 689 // 690 // This implementation creates a temporary file on the default file system, 691 // copy the entry data into it if the entry exists, and then create a 692 // FileChannel on top of it. 693 FileChannel newFileChannel(byte[] path, 694 Set<? extends OpenOption> options, 695 FileAttribute<?>... attrs) 696 throws IOException 697 { 698 checkOptions(options); 699 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 700 options.contains(StandardOpenOption.APPEND)); 701 beginRead(); 702 try { 703 ensureOpen(); 704 Entry e = getEntry0(path); 705 if (forWrite) { 706 checkWritable(); 707 if (e == null) { 708 if (!options.contains(StandardOpenOption.CREATE_NEW)) 709 throw new NoSuchFileException(getString(path)); 710 } else { 711 if (options.contains(StandardOpenOption.CREATE_NEW)) 712 throw new FileAlreadyExistsException(getString(path)); 713 if (e.isDir()) 714 throw new FileAlreadyExistsException("directory <" 715 + getString(path) + "> exists"); 716 } 717 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 718 } else if (e == null || e.isDir()) { 719 throw new NoSuchFileException(getString(path)); 720 } 721 722 final boolean isFCH = (e != null && e.type == Entry.FILECH); 723 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 724 final FileChannel fch = tmpfile.getFileSystem() 725 .provider() 726 .newFileChannel(tmpfile, options, attrs); 727 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH); 728 if (forWrite) { 729 u.flag = FLAG_DATADESCR; 730 u.method = METHOD_DEFLATED; 731 } 732 // is there a better way to hook into the FileChannel's close method? 733 return new FileChannel() { 734 public int write(ByteBuffer src) throws IOException { 735 return fch.write(src); 736 } 737 public long write(ByteBuffer[] srcs, int offset, int length) 738 throws IOException 739 { 740 return fch.write(srcs, offset, length); 741 } 742 public long position() throws IOException { 743 return fch.position(); 744 } 745 public FileChannel position(long newPosition) 746 throws IOException 747 { 748 fch.position(newPosition); 749 return this; 750 } 751 public long size() throws IOException { 752 return fch.size(); 753 } 754 public FileChannel truncate(long size) 755 throws IOException 756 { 757 fch.truncate(size); 758 return this; 759 } 760 public void force(boolean metaData) 761 throws IOException 762 { 763 fch.force(metaData); 764 } 765 public long transferTo(long position, long count, 766 WritableByteChannel target) 767 throws IOException 768 { 769 return fch.transferTo(position, count, target); 770 } 771 public long transferFrom(ReadableByteChannel src, 772 long position, long count) 773 throws IOException 774 { 775 return fch.transferFrom(src, position, count); 776 } 777 public int read(ByteBuffer dst) throws IOException { 778 return fch.read(dst); 779 } 780 public int read(ByteBuffer dst, long position) 781 throws IOException 782 { 783 return fch.read(dst, position); 784 } 785 public long read(ByteBuffer[] dsts, int offset, int length) 786 throws IOException 787 { 788 return fch.read(dsts, offset, length); 789 } 790 public int write(ByteBuffer src, long position) 791 throws IOException 792 { 793 return fch.write(src, position); 794 } 795 public MappedByteBuffer map(MapMode mode, 796 long position, long size) 797 throws IOException 798 { 799 throw new UnsupportedOperationException(); 800 } 801 public FileLock lock(long position, long size, boolean shared) 802 throws IOException 803 { 804 return fch.lock(position, size, shared); 805 } 806 public FileLock tryLock(long position, long size, boolean shared) 807 throws IOException 808 { 809 return fch.tryLock(position, size, shared); 810 } 811 protected void implCloseChannel() throws IOException { 812 fch.close(); 813 if (forWrite) { 814 u.mtime = System.currentTimeMillis(); 815 u.size = Files.size(u.file); 816 817 update(u); 818 } else { 819 if (!isFCH) // if this is a new fch for reading 820 removeTempPathForEntry(tmpfile); 821 } 822 } 823 }; 824 } finally { 825 endRead(); 826 } 827 } 828 829 // the outstanding input streams that need to be closed 830 private Set<InputStream> streams = 831 Collections.synchronizedSet(new HashSet<InputStream>()); 832 833 // the ex-channel and ex-path that need to close when their outstanding 834 // input streams are all closed by the obtainers. 835 private Set<ExChannelCloser> exChClosers = new HashSet<>(); 836 837 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 838 private Path getTempPathForEntry(byte[] path) throws IOException { 839 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 840 if (path != null) { 841 Entry e = getEntry0(path); 842 if (e != null) { 843 try (InputStream is = newInputStream(path)) { 844 Files.copy(is, tmpPath, REPLACE_EXISTING); 845 } 846 } 847 } 848 return tmpPath; 849 } 850 851 private void removeTempPathForEntry(Path path) throws IOException { 852 Files.delete(path); 853 tmppaths.remove(path); 854 } 855 856 // check if all parents really exit. ZIP spec does not require 857 // the existence of any "parent directory". 858 private void checkParents(byte[] path) throws IOException { 859 beginRead(); 860 try { 861 while ((path = getParent(path)) != null && path.length != 0) { 862 if (!inodes.containsKey(IndexNode.keyOf(path))) { 863 throw new NoSuchFileException(getString(path)); 864 } 865 } 866 } finally { 867 endRead(); 868 } 869 } 870 871 private static byte[] ROOTPATH = new byte[0]; 872 private static byte[] getParent(byte[] path) { 873 int off = path.length - 1; 874 if (off > 0 && path[off] == '/') // isDirectory 875 off--; 876 while (off > 0 && path[off] != '/') { off--; } 877 if (off <= 0) 878 return ROOTPATH; 879 return Arrays.copyOf(path, off + 1); 880 } 881 882 private final void beginWrite() { 883 rwlock.writeLock().lock(); 884 } 885 886 private final void endWrite() { 887 rwlock.writeLock().unlock(); 888 } 889 890 private final void beginRead() { 891 rwlock.readLock().lock(); 892 } 893 894 private final void endRead() { 895 rwlock.readLock().unlock(); 896 } 897 898 /////////////////////////////////////////////////////////////////// 899 900 private volatile boolean isOpen = true; 901 private final SeekableByteChannel ch; // channel to the zipfile 902 final byte[] cen; // CEN & ENDHDR 903 private END end; 904 private long locpos; // position of first LOC header (usually 0) 905 906 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 907 908 // name -> pos (in cen), IndexNode itself can be used as a "key" 909 private LinkedHashMap<IndexNode, IndexNode> inodes; 910 911 final byte[] getBytes(String name) { 912 return zc.getBytes(name); 913 } 914 915 final String getString(byte[] name) { 916 return zc.toString(name); 917 } 918 919 protected void finalize() throws IOException { 920 close(); 921 } 922 923 private long getDataPos(Entry e) throws IOException { 924 if (e.locoff == -1) { 925 Entry e2 = getEntry0(e.name); 926 if (e2 == null) 927 throw new ZipException("invalid loc for entry <" + e.name + ">"); 928 e.locoff = e2.locoff; 929 } 930 byte[] buf = new byte[LOCHDR]; 931 if (readFullyAt(buf, 0, buf.length, e.locoff) != buf.length) 932 throw new ZipException("invalid loc for entry <" + e.name + ">"); 933 return locpos + e.locoff + LOCHDR + LOCNAM(buf) + LOCEXT(buf); 934 } 935 936 // Reads len bytes of data from the specified offset into buf. 937 // Returns the total number of bytes read. 938 // Each/every byte read from here (except the cen, which is mapped). 939 final long readFullyAt(byte[] buf, int off, long len, long pos) 940 throws IOException 941 { 942 ByteBuffer bb = ByteBuffer.wrap(buf); 943 bb.position(off); 944 bb.limit((int)(off + len)); 945 return readFullyAt(bb, pos); 946 } 947 948 private final long readFullyAt(ByteBuffer bb, long pos) 949 throws IOException 950 { 951 synchronized(ch) { 952 return ch.position(pos).read(bb); 953 } 954 } 955 956 // Searches for end of central directory (END) header. The contents of 957 // the END header will be read and placed in endbuf. Returns the file 958 // position of the END header, otherwise returns -1 if the END header 959 // was not found or an error occurred. 960 private END findEND() throws IOException 961 { 962 byte[] buf = new byte[READBLOCKSZ]; 963 long ziplen = ch.size(); 964 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 965 long minPos = minHDR - (buf.length - ENDHDR); 966 967 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 968 { 969 int off = 0; 970 if (pos < 0) { 971 // Pretend there are some NUL bytes before start of file 972 off = (int)-pos; 973 Arrays.fill(buf, 0, off, (byte)0); 974 } 975 int len = buf.length - off; 976 if (readFullyAt(buf, off, len, pos + off) != len) 977 zerror("zip END header not found"); 978 979 // Now scan the block backwards for END header signature 980 for (int i = buf.length - ENDHDR; i >= 0; i--) { 981 if (buf[i+0] == (byte)'P' && 982 buf[i+1] == (byte)'K' && 983 buf[i+2] == (byte)'\005' && 984 buf[i+3] == (byte)'\006' && 985 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 986 // Found END header 987 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 988 END end = new END(); 989 end.endsub = ENDSUB(buf); 990 end.centot = ENDTOT(buf); 991 end.cenlen = ENDSIZ(buf); 992 end.cenoff = ENDOFF(buf); 993 end.comlen = ENDCOM(buf); 994 end.endpos = pos + i; 995 if (end.cenlen == ZIP64_MINVAL || 996 end.cenoff == ZIP64_MINVAL || 997 end.centot == ZIP64_MINVAL32) 998 { 999 // need to find the zip64 end; 1000 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1001 if (readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1002 != loc64.length) { 1003 return end; 1004 } 1005 long end64pos = ZIP64_LOCOFF(loc64); 1006 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1007 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1008 != end64buf.length) { 1009 return end; 1010 } 1011 // end64 found, re-calcualte everything. 1012 end.cenlen = ZIP64_ENDSIZ(end64buf); 1013 end.cenoff = ZIP64_ENDOFF(end64buf); 1014 end.centot = (int)ZIP64_ENDTOT(end64buf); // assume total < 2g 1015 end.endpos = end64pos; 1016 } 1017 return end; 1018 } 1019 } 1020 } 1021 zerror("zip END header not found"); 1022 return null; //make compiler happy 1023 } 1024 1025 // Reads zip file central directory. Returns the file position of first 1026 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1027 // then the error was a zip format error and zip->msg has the error text. 1028 // Always pass in -1 for knownTotal; it's used for a recursive call. 1029 private byte[] initCEN() throws IOException { 1030 end = findEND(); 1031 if (end.endpos == 0) { 1032 inodes = new LinkedHashMap<>(10); 1033 locpos = 0; 1034 buildNodeTree(); 1035 return null; // only END header present 1036 } 1037 if (end.cenlen > end.endpos) 1038 zerror("invalid END header (bad central directory size)"); 1039 long cenpos = end.endpos - end.cenlen; // position of CEN table 1040 1041 // Get position of first local file (LOC) header, taking into 1042 // account that there may be a stub prefixed to the zip file. 1043 locpos = cenpos - end.cenoff; 1044 if (locpos < 0) 1045 zerror("invalid END header (bad central directory offset)"); 1046 1047 // read in the CEN and END 1048 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1049 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1050 zerror("read CEN tables failed"); 1051 } 1052 // Iterate through the entries in the central directory 1053 inodes = new LinkedHashMap<>(end.centot + 1); 1054 int pos = 0; 1055 int limit = cen.length - ENDHDR; 1056 while (pos < limit) { 1057 if (CENSIG(cen, pos) != CENSIG) 1058 zerror("invalid CEN header (bad signature)"); 1059 int method = CENHOW(cen, pos); 1060 int nlen = CENNAM(cen, pos); 1061 int elen = CENEXT(cen, pos); 1062 int clen = CENCOM(cen, pos); 1063 if ((CENFLG(cen, pos) & 1) != 0) 1064 zerror("invalid CEN header (encrypted entry)"); 1065 if (method != METHOD_STORED && method != METHOD_DEFLATED) 1066 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1067 if (pos + CENHDR + nlen > limit) 1068 zerror("invalid CEN header (bad header size)"); 1069 int startIndex = pos + CENHDR; 1070 int endIndex = startIndex + nlen; 1071 startIndex = ((nlen > 0) && (char)cen[startIndex] == '/') ? startIndex + 1 : startIndex; 1072 byte[] name = Arrays.copyOfRange(cen, startIndex, endIndex); 1073 if(name.length > 0){ 1074 IndexNode inode = new IndexNode(name, pos); 1075 inodes.put(inode, inode); 1076 } 1077 // skip ext and comment 1078 pos += (CENHDR + nlen + elen + clen); 1079 } 1080 if (pos + ENDHDR != cen.length) { 1081 zerror("invalid CEN header (bad header size)"); 1082 } 1083 buildNodeTree(); 1084 return cen; 1085 } 1086 1087 private void ensureOpen() throws IOException { 1088 if (!isOpen) 1089 throw new ClosedFileSystemException(); 1090 } 1091 1092 // Creates a new empty temporary file in the same directory as the 1093 // specified file. A variant of Files.createTempFile. 1094 private Path createTempFileInSameDirectoryAs(Path path) 1095 throws IOException 1096 { 1097 Path parent = path.toAbsolutePath().getParent(); 1098 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1099 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1100 tmppaths.add(tmpPath); 1101 return tmpPath; 1102 } 1103 1104 ////////////////////update & sync ////////////////////////////////////// 1105 1106 private boolean hasUpdate = false; 1107 1108 // shared key. consumer guarantees the "writeLock" before use it. 1109 private final IndexNode LOOKUPKEY = IndexNode.keyOf(null); 1110 1111 private void updateDelete(IndexNode inode) { 1112 beginWrite(); 1113 try { 1114 removeFromTree(inode); 1115 inodes.remove(inode); 1116 hasUpdate = true; 1117 } finally { 1118 endWrite(); 1119 } 1120 } 1121 1122 private void update(Entry e) { 1123 beginWrite(); 1124 try { 1125 IndexNode old = inodes.put(e, e); 1126 if (old != null) { 1127 removeFromTree(old); 1128 } 1129 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1130 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1131 e.sibling = parent.child; 1132 parent.child = e; 1133 } 1134 hasUpdate = true; 1135 } finally { 1136 endWrite(); 1137 } 1138 } 1139 1140 // copy over the whole LOC entry (header if necessary, data and ext) from 1141 // old zip to the new one. 1142 private long copyLOCEntry(Entry e, boolean updateHeader, 1143 OutputStream os, 1144 long written, byte[] buf) 1145 throws IOException 1146 { 1147 long locoff = e.locoff; // where to read 1148 e.locoff = written; // update the e.locoff with new value 1149 1150 // calculate the size need to write out 1151 long size = 0; 1152 // if there is A ext 1153 if ((e.flag & FLAG_DATADESCR) != 0) { 1154 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1155 size = 24; 1156 else 1157 size = 16; 1158 } 1159 // read loc, use the original loc.elen/nlen 1160 if (readFullyAt(buf, 0, LOCHDR , locoff) != LOCHDR) 1161 throw new ZipException("loc: reading failed"); 1162 if (updateHeader) { 1163 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1164 size += e.csize; 1165 written = e.writeLOC(os) + size; 1166 } else { 1167 os.write(buf, 0, LOCHDR); // write out the loc header 1168 locoff += LOCHDR; 1169 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1170 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1171 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1172 written = LOCHDR + size; 1173 } 1174 int n; 1175 while (size > 0 && 1176 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1177 { 1178 if (size < n) 1179 n = (int)size; 1180 os.write(buf, 0, n); 1181 size -= n; 1182 locoff += n; 1183 } 1184 return written; 1185 } 1186 1187 // sync the zip file system, if there is any udpate 1188 private void sync() throws IOException { 1189 //System.out.printf("->sync(%s) starting....!%n", toString()); 1190 // check ex-closer 1191 if (!exChClosers.isEmpty()) { 1192 for (ExChannelCloser ecc : exChClosers) { 1193 if (ecc.streams.isEmpty()) { 1194 ecc.ch.close(); 1195 Files.delete(ecc.path); 1196 exChClosers.remove(ecc); 1197 } 1198 } 1199 } 1200 if (!hasUpdate) 1201 return; 1202 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1203 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) 1204 { 1205 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1206 long written = 0; 1207 byte[] buf = new byte[8192]; 1208 Entry e = null; 1209 1210 // write loc 1211 for (IndexNode inode : inodes.values()) { 1212 if (inode instanceof Entry) { // an updated inode 1213 e = (Entry)inode; 1214 try { 1215 if (e.type == Entry.COPY) { 1216 // entry copy: the only thing changed is the "name" 1217 // and "nlen" in LOC header, so we udpate/rewrite the 1218 // LOC in new file and simply copy the rest (data and 1219 // ext) without enflating/deflating from the old zip 1220 // file LOC entry. 1221 written += copyLOCEntry(e, true, os, written, buf); 1222 } else { // NEW, FILECH or CEN 1223 e.locoff = written; 1224 written += e.writeLOC(os); // write loc header 1225 if (e.bytes != null) { // in-memory, deflated 1226 os.write(e.bytes); // already 1227 written += e.bytes.length; 1228 } else if (e.file != null) { // tmp file 1229 try (InputStream is = Files.newInputStream(e.file)) { 1230 int n; 1231 if (e.type == Entry.NEW) { // deflated already 1232 while ((n = is.read(buf)) != -1) { 1233 os.write(buf, 0, n); 1234 written += n; 1235 } 1236 } else if (e.type == Entry.FILECH) { 1237 // the data are not deflated, use ZEOS 1238 try (OutputStream os2 = new EntryOutputStream(e, os)) { 1239 while ((n = is.read(buf)) != -1) { 1240 os2.write(buf, 0, n); 1241 } 1242 } 1243 written += e.csize; 1244 if ((e.flag & FLAG_DATADESCR) != 0) 1245 written += e.writeEXT(os); 1246 } 1247 } 1248 Files.delete(e.file); 1249 tmppaths.remove(e.file); 1250 } else { 1251 // dir, 0-length data 1252 } 1253 } 1254 elist.add(e); 1255 } catch (IOException x) { 1256 x.printStackTrace(); // skip any in-accurate entry 1257 } 1258 } else { // unchanged inode 1259 if (inode.pos == -1) { 1260 continue; // pseudo directory node 1261 } 1262 e = Entry.readCEN(this, inode.pos); 1263 try { 1264 written += copyLOCEntry(e, false, os, written, buf); 1265 elist.add(e); 1266 } catch (IOException x) { 1267 x.printStackTrace(); // skip any wrong entry 1268 } 1269 } 1270 } 1271 1272 // now write back the cen and end table 1273 end.cenoff = written; 1274 for (Entry entry : elist) { 1275 written += entry.writeCEN(os); 1276 } 1277 end.centot = elist.size(); 1278 end.cenlen = written - end.cenoff; 1279 end.write(os, written); 1280 } 1281 if (!streams.isEmpty()) { 1282 // 1283 // TBD: ExChannelCloser should not be necessary if we only 1284 // sync when being closed, all streams should have been 1285 // closed already. Keep the logic here for now. 1286 // 1287 // There are outstanding input streams open on existing "ch", 1288 // so, don't close the "cha" and delete the "file for now, let 1289 // the "ex-channel-closer" to handle them 1290 ExChannelCloser ecc = new ExChannelCloser( 1291 createTempFileInSameDirectoryAs(zfpath), 1292 ch, 1293 streams); 1294 Files.move(zfpath, ecc.path, REPLACE_EXISTING); 1295 exChClosers.add(ecc); 1296 streams = Collections.synchronizedSet(new HashSet<InputStream>()); 1297 } else { 1298 ch.close(); 1299 Files.delete(zfpath); 1300 } 1301 1302 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1303 hasUpdate = false; // clear 1304 /* 1305 if (isOpen) { 1306 ch = zfpath.newByteChannel(READ); // re-fresh "ch" and "cen" 1307 cen = initCEN(); 1308 } 1309 */ 1310 //System.out.printf("->sync(%s) done!%n", toString()); 1311 } 1312 1313 private IndexNode getInode(byte[] path) { 1314 if (path == null) 1315 throw new NullPointerException("path"); 1316 IndexNode key = IndexNode.keyOf(path); 1317 IndexNode inode = inodes.get(key); 1318 if (inode == null && 1319 (path.length == 0 || path[path.length -1] != '/')) { 1320 // if does not ends with a slash 1321 path = Arrays.copyOf(path, path.length + 1); 1322 path[path.length - 1] = '/'; 1323 inode = inodes.get(key.as(path)); 1324 } 1325 return inode; 1326 } 1327 1328 private Entry getEntry0(byte[] path) throws IOException { 1329 IndexNode inode = getInode(path); 1330 if (inode instanceof Entry) 1331 return (Entry)inode; 1332 if (inode == null || inode.pos == -1) 1333 return null; 1334 return Entry.readCEN(this, inode.pos); 1335 } 1336 1337 public void deleteFile(byte[] path, boolean failIfNotExists) 1338 throws IOException 1339 { 1340 checkWritable(); 1341 1342 IndexNode inode = getInode(path); 1343 if (inode == null) { 1344 if (path != null && path.length == 0) 1345 throw new ZipException("root directory </> can't not be delete"); 1346 if (failIfNotExists) 1347 throw new NoSuchFileException(getString(path)); 1348 } else { 1349 if (inode.isDir() && inode.child != null) 1350 throw new DirectoryNotEmptyException(getString(path)); 1351 updateDelete(inode); 1352 } 1353 } 1354 1355 private static void copyStream(InputStream is, OutputStream os) 1356 throws IOException 1357 { 1358 byte[] copyBuf = new byte[8192]; 1359 int n; 1360 while ((n = is.read(copyBuf)) != -1) { 1361 os.write(copyBuf, 0, n); 1362 } 1363 } 1364 1365 // Returns an out stream for either 1366 // (1) writing the contents of a new entry, if the entry exits, or 1367 // (2) updating/replacing the contents of the specified existing entry. 1368 private OutputStream getOutputStream(Entry e) throws IOException { 1369 1370 if (e.mtime == -1) 1371 e.mtime = System.currentTimeMillis(); 1372 if (e.method == -1) 1373 e.method = METHOD_DEFLATED; // TBD: use default method 1374 // store size, compressed size, and crc-32 in LOC header 1375 e.flag = 0; 1376 if (zc.isUTF8()) 1377 e.flag |= FLAG_EFS; 1378 OutputStream os; 1379 if (useTempFile) { 1380 e.file = getTempPathForEntry(null); 1381 os = Files.newOutputStream(e.file, WRITE); 1382 } else { 1383 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1384 } 1385 return new EntryOutputStream(e, os); 1386 } 1387 1388 private InputStream getInputStream(Entry e) 1389 throws IOException 1390 { 1391 InputStream eis = null; 1392 1393 if (e.type == Entry.NEW) { 1394 if (e.bytes != null) 1395 eis = new ByteArrayInputStream(e.bytes); 1396 else if (e.file != null) 1397 eis = Files.newInputStream(e.file); 1398 else 1399 throw new ZipException("update entry data is missing"); 1400 } else if (e.type == Entry.FILECH) { 1401 // FILECH result is un-compressed. 1402 eis = Files.newInputStream(e.file); 1403 // TBD: wrap to hook close() 1404 // streams.add(eis); 1405 return eis; 1406 } else { // untouced CEN or COPY 1407 eis = new EntryInputStream(e, ch); 1408 } 1409 if (e.method == METHOD_DEFLATED) { 1410 // MORE: Compute good size for inflater stream: 1411 long bufSize = e.size + 2; // Inflater likes a bit of slack 1412 if (bufSize > 65536) 1413 bufSize = 8192; 1414 final long size = e.size; 1415 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1416 1417 private boolean isClosed = false; 1418 public void close() throws IOException { 1419 if (!isClosed) { 1420 releaseInflater(inf); 1421 this.in.close(); 1422 isClosed = true; 1423 streams.remove(this); 1424 } 1425 } 1426 // Override fill() method to provide an extra "dummy" byte 1427 // at the end of the input stream. This is required when 1428 // using the "nowrap" Inflater option. (it appears the new 1429 // zlib in 7 does not need it, but keep it for now) 1430 protected void fill() throws IOException { 1431 if (eof) { 1432 throw new EOFException( 1433 "Unexpected end of ZLIB input stream"); 1434 } 1435 len = this.in.read(buf, 0, buf.length); 1436 if (len == -1) { 1437 buf[0] = 0; 1438 len = 1; 1439 eof = true; 1440 } 1441 inf.setInput(buf, 0, len); 1442 } 1443 private boolean eof; 1444 1445 public int available() throws IOException { 1446 if (isClosed) 1447 return 0; 1448 long avail = size - inf.getBytesWritten(); 1449 return avail > (long) Integer.MAX_VALUE ? 1450 Integer.MAX_VALUE : (int) avail; 1451 } 1452 }; 1453 } else if (e.method == METHOD_STORED) { 1454 // TBD: wrap/ it does not seem necessary 1455 } else { 1456 throw new ZipException("invalid compression method"); 1457 } 1458 streams.add(eis); 1459 return eis; 1460 } 1461 1462 // Inner class implementing the input stream used to read 1463 // a (possibly compressed) zip file entry. 1464 private class EntryInputStream extends InputStream { 1465 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1466 // point to a new channel after sync() 1467 private long pos; // current position within entry data 1468 protected long rem; // number of remaining bytes within entry 1469 protected final long size; // uncompressed size of this entry 1470 1471 EntryInputStream(Entry e, SeekableByteChannel zfch) 1472 throws IOException 1473 { 1474 this.zfch = zfch; 1475 rem = e.csize; 1476 size = e.size; 1477 pos = getDataPos(e); 1478 } 1479 public int read(byte b[], int off, int len) throws IOException { 1480 ensureOpen(); 1481 if (rem == 0) { 1482 return -1; 1483 } 1484 if (len <= 0) { 1485 return 0; 1486 } 1487 if (len > rem) { 1488 len = (int) rem; 1489 } 1490 // readFullyAt() 1491 long n = 0; 1492 ByteBuffer bb = ByteBuffer.wrap(b); 1493 bb.position(off); 1494 bb.limit(off + len); 1495 synchronized(zfch) { 1496 n = zfch.position(pos).read(bb); 1497 } 1498 if (n > 0) { 1499 pos += n; 1500 rem -= n; 1501 } 1502 if (rem == 0) { 1503 close(); 1504 } 1505 return (int)n; 1506 } 1507 public int read() throws IOException { 1508 byte[] b = new byte[1]; 1509 if (read(b, 0, 1) == 1) { 1510 return b[0] & 0xff; 1511 } else { 1512 return -1; 1513 } 1514 } 1515 public long skip(long n) throws IOException { 1516 ensureOpen(); 1517 if (n > rem) 1518 n = rem; 1519 pos += n; 1520 rem -= n; 1521 if (rem == 0) { 1522 close(); 1523 } 1524 return n; 1525 } 1526 public int available() { 1527 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1528 } 1529 public long size() { 1530 return size; 1531 } 1532 public void close() { 1533 rem = 0; 1534 streams.remove(this); 1535 } 1536 } 1537 1538 class EntryOutputStream extends DeflaterOutputStream 1539 { 1540 private CRC32 crc; 1541 private Entry e; 1542 private long written; 1543 1544 EntryOutputStream(Entry e, OutputStream os) 1545 throws IOException 1546 { 1547 super(os, getDeflater()); 1548 if (e == null) 1549 throw new NullPointerException("Zip entry is null"); 1550 this.e = e; 1551 crc = new CRC32(); 1552 } 1553 1554 @Override 1555 public void write(byte b[], int off, int len) throws IOException { 1556 if (e.type != Entry.FILECH) // only from sync 1557 ensureOpen(); 1558 if (off < 0 || len < 0 || off > b.length - len) { 1559 throw new IndexOutOfBoundsException(); 1560 } else if (len == 0) { 1561 return; 1562 } 1563 switch (e.method) { 1564 case METHOD_DEFLATED: 1565 super.write(b, off, len); 1566 break; 1567 case METHOD_STORED: 1568 written += len; 1569 out.write(b, off, len); 1570 break; 1571 default: 1572 throw new ZipException("invalid compression method"); 1573 } 1574 crc.update(b, off, len); 1575 } 1576 1577 @Override 1578 public void close() throws IOException { 1579 // TBD ensureOpen(); 1580 switch (e.method) { 1581 case METHOD_DEFLATED: 1582 finish(); 1583 e.size = def.getBytesRead(); 1584 e.csize = def.getBytesWritten(); 1585 e.crc = crc.getValue(); 1586 break; 1587 case METHOD_STORED: 1588 // we already know that both e.size and e.csize are the same 1589 e.size = e.csize = written; 1590 e.crc = crc.getValue(); 1591 break; 1592 default: 1593 throw new ZipException("invalid compression method"); 1594 } 1595 //crc.reset(); 1596 if (out instanceof ByteArrayOutputStream) 1597 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1598 1599 if (e.type == Entry.FILECH) { 1600 releaseDeflater(def); 1601 return; 1602 } 1603 super.close(); 1604 releaseDeflater(def); 1605 update(e); 1606 } 1607 } 1608 1609 static void zerror(String msg) { 1610 throw new ZipError(msg); 1611 } 1612 1613 // Maxmum number of de/inflater we cache 1614 private final int MAX_FLATER = 20; 1615 // List of available Inflater objects for decompression 1616 private final List<Inflater> inflaters = new ArrayList<>(); 1617 1618 // Gets an inflater from the list of available inflaters or allocates 1619 // a new one. 1620 private Inflater getInflater() { 1621 synchronized (inflaters) { 1622 int size = inflaters.size(); 1623 if (size > 0) { 1624 Inflater inf = inflaters.remove(size - 1); 1625 return inf; 1626 } else { 1627 return new Inflater(true); 1628 } 1629 } 1630 } 1631 1632 // Releases the specified inflater to the list of available inflaters. 1633 private void releaseInflater(Inflater inf) { 1634 synchronized (inflaters) { 1635 if (inflaters.size() < MAX_FLATER) { 1636 inf.reset(); 1637 inflaters.add(inf); 1638 } else { 1639 inf.end(); 1640 } 1641 } 1642 } 1643 1644 // List of available Deflater objects for compression 1645 private final List<Deflater> deflaters = new ArrayList<>(); 1646 1647 // Gets an deflater from the list of available deflaters or allocates 1648 // a new one. 1649 private Deflater getDeflater() { 1650 synchronized (deflaters) { 1651 int size = deflaters.size(); 1652 if (size > 0) { 1653 Deflater def = deflaters.remove(size - 1); 1654 return def; 1655 } else { 1656 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1657 } 1658 } 1659 } 1660 1661 // Releases the specified inflater to the list of available inflaters. 1662 private void releaseDeflater(Deflater def) { 1663 synchronized (deflaters) { 1664 if (inflaters.size() < MAX_FLATER) { 1665 def.reset(); 1666 deflaters.add(def); 1667 } else { 1668 def.end(); 1669 } 1670 } 1671 } 1672 1673 // End of central directory record 1674 static class END { 1675 int disknum; 1676 int sdisknum; 1677 int endsub; // endsub 1678 int centot; // 4 bytes 1679 long cenlen; // 4 bytes 1680 long cenoff; // 4 bytes 1681 int comlen; // comment length 1682 byte[] comment; 1683 1684 /* members of Zip64 end of central directory locator */ 1685 int diskNum; 1686 long endpos; 1687 int disktot; 1688 1689 void write(OutputStream os, long offset) throws IOException { 1690 boolean hasZip64 = false; 1691 long xlen = cenlen; 1692 long xoff = cenoff; 1693 if (xlen >= ZIP64_MINVAL) { 1694 xlen = ZIP64_MINVAL; 1695 hasZip64 = true; 1696 } 1697 if (xoff >= ZIP64_MINVAL) { 1698 xoff = ZIP64_MINVAL; 1699 hasZip64 = true; 1700 } 1701 int count = centot; 1702 if (count >= ZIP64_MINVAL32) { 1703 count = ZIP64_MINVAL32; 1704 hasZip64 = true; 1705 } 1706 if (hasZip64) { 1707 long off64 = offset; 1708 //zip64 end of central directory record 1709 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1710 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1711 writeShort(os, 45); // version made by 1712 writeShort(os, 45); // version needed to extract 1713 writeInt(os, 0); // number of this disk 1714 writeInt(os, 0); // central directory start disk 1715 writeLong(os, centot); // number of directory entires on disk 1716 writeLong(os, centot); // number of directory entires 1717 writeLong(os, cenlen); // length of central directory 1718 writeLong(os, cenoff); // offset of central directory 1719 1720 //zip64 end of central directory locator 1721 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1722 writeInt(os, 0); // zip64 END start disk 1723 writeLong(os, off64); // offset of zip64 END 1724 writeInt(os, 1); // total number of disks (?) 1725 } 1726 writeInt(os, ENDSIG); // END record signature 1727 writeShort(os, 0); // number of this disk 1728 writeShort(os, 0); // central directory start disk 1729 writeShort(os, count); // number of directory entries on disk 1730 writeShort(os, count); // total number of directory entries 1731 writeInt(os, xlen); // length of central directory 1732 writeInt(os, xoff); // offset of central directory 1733 if (comment != null) { // zip file comment 1734 writeShort(os, comment.length); 1735 writeBytes(os, comment); 1736 } else { 1737 writeShort(os, 0); 1738 } 1739 } 1740 } 1741 1742 // Internal node that links a "name" to its pos in cen table. 1743 // The node itself can be used as a "key" to lookup itself in 1744 // the HashMap inodes. 1745 static class IndexNode { 1746 byte[] name; 1747 int hashcode; // node is hashable/hashed by its name 1748 int pos = -1; // position in cen table, -1 menas the 1749 // entry does not exists in zip file 1750 IndexNode(byte[] name, int pos) { 1751 name(name); 1752 this.pos = pos; 1753 } 1754 1755 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1756 return new IndexNode(name, -1); 1757 } 1758 1759 final void name(byte[] name) { 1760 this.name = name; 1761 this.hashcode = Arrays.hashCode(name); 1762 } 1763 1764 final IndexNode as(byte[] name) { // reuse the node, mostly 1765 name(name); // as a lookup "key" 1766 return this; 1767 } 1768 1769 boolean isDir() { 1770 return name != null && 1771 (name.length == 0 || name[name.length - 1] == '/'); 1772 } 1773 1774 public boolean equals(Object other) { 1775 if (!(other instanceof IndexNode)) { 1776 return false; 1777 } 1778 return Arrays.equals(name, ((IndexNode)other).name); 1779 } 1780 1781 public int hashCode() { 1782 return hashcode; 1783 } 1784 1785 IndexNode() {} 1786 IndexNode sibling; 1787 IndexNode child; // 1st child 1788 } 1789 1790 static class Entry extends IndexNode { 1791 1792 static final int CEN = 1; // entry read from cen 1793 static final int NEW = 2; // updated contents in bytes or file 1794 static final int FILECH = 3; // fch update in "file" 1795 static final int COPY = 4; // copy of a CEN entry 1796 1797 1798 byte[] bytes; // updated content bytes 1799 Path file; // use tmp file to store bytes; 1800 int type = CEN; // default is the entry read from cen 1801 1802 // entry attributes 1803 int version; 1804 int flag; 1805 int method = -1; // compression method 1806 long mtime = -1; // last modification time (in DOS time) 1807 long atime = -1; // last access time 1808 long ctime = -1; // create time 1809 long crc = -1; // crc-32 of entry data 1810 long csize = -1; // compressed size of entry data 1811 long size = -1; // uncompressed size of entry data 1812 byte[] extra; 1813 1814 // cen 1815 int versionMade; 1816 int disk; 1817 int attrs; 1818 long attrsEx; 1819 long locoff; 1820 byte[] comment; 1821 1822 Entry() {} 1823 1824 Entry(byte[] name) { 1825 name(name); 1826 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 1827 this.crc = 0; 1828 this.size = 0; 1829 this.csize = 0; 1830 this.method = METHOD_DEFLATED; 1831 } 1832 1833 Entry(byte[] name, int type) { 1834 this(name); 1835 this.type = type; 1836 } 1837 1838 Entry (Entry e, int type) { 1839 name(e.name); 1840 this.version = e.version; 1841 this.ctime = e.ctime; 1842 this.atime = e.atime; 1843 this.mtime = e.mtime; 1844 this.crc = e.crc; 1845 this.size = e.size; 1846 this.csize = e.csize; 1847 this.method = e.method; 1848 this.extra = e.extra; 1849 this.versionMade = e.versionMade; 1850 this.disk = e.disk; 1851 this.attrs = e.attrs; 1852 this.attrsEx = e.attrsEx; 1853 this.locoff = e.locoff; 1854 this.comment = e.comment; 1855 this.type = type; 1856 } 1857 1858 Entry (byte[] name, Path file, int type) { 1859 this(name, type); 1860 this.file = file; 1861 this.method = METHOD_STORED; 1862 } 1863 1864 int version() throws ZipException { 1865 if (method == METHOD_DEFLATED) 1866 return 20; 1867 else if (method == METHOD_STORED) 1868 return 10; 1869 throw new ZipException("unsupported compression method"); 1870 } 1871 1872 ///////////////////// CEN ////////////////////// 1873 static Entry readCEN(ZipFileSystem zipfs, int pos) 1874 throws IOException 1875 { 1876 return new Entry().cen(zipfs, pos); 1877 } 1878 1879 private Entry cen(ZipFileSystem zipfs, int pos) 1880 throws IOException 1881 { 1882 byte[] cen = zipfs.cen; 1883 if (CENSIG(cen, pos) != CENSIG) 1884 zerror("invalid CEN header (bad signature)"); 1885 versionMade = CENVEM(cen, pos); 1886 version = CENVER(cen, pos); 1887 flag = CENFLG(cen, pos); 1888 method = CENHOW(cen, pos); 1889 mtime = dosToJavaTime(CENTIM(cen, pos)); 1890 crc = CENCRC(cen, pos); 1891 csize = CENSIZ(cen, pos); 1892 size = CENLEN(cen, pos); 1893 int nlen = CENNAM(cen, pos); 1894 int elen = CENEXT(cen, pos); 1895 int clen = CENCOM(cen, pos); 1896 disk = CENDSK(cen, pos); 1897 attrs = CENATT(cen, pos); 1898 attrsEx = CENATX(cen, pos); 1899 locoff = CENOFF(cen, pos); 1900 1901 pos += CENHDR; 1902 name(Arrays.copyOfRange(cen, pos, pos + nlen)); 1903 1904 pos += nlen; 1905 if (elen > 0) { 1906 extra = Arrays.copyOfRange(cen, pos, pos + elen); 1907 pos += elen; 1908 readExtra(zipfs); 1909 } 1910 if (clen > 0) { 1911 comment = Arrays.copyOfRange(cen, pos, pos + clen); 1912 } 1913 return this; 1914 } 1915 1916 int writeCEN(OutputStream os) throws IOException 1917 { 1918 int written = CENHDR; 1919 int version0 = version(); 1920 long csize0 = csize; 1921 long size0 = size; 1922 long locoff0 = locoff; 1923 int elen64 = 0; // extra for ZIP64 1924 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 1925 int elenEXTT = 0; // extra for Extended Timestamp 1926 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 1927 1928 // confirm size/length 1929 int nlen = (name != null) ? name.length : 0; 1930 int elen = (extra != null) ? extra.length : 0; 1931 int eoff = 0; 1932 int clen = (comment != null) ? comment.length : 0; 1933 if (csize >= ZIP64_MINVAL) { 1934 csize0 = ZIP64_MINVAL; 1935 elen64 += 8; // csize(8) 1936 } 1937 if (size >= ZIP64_MINVAL) { 1938 size0 = ZIP64_MINVAL; // size(8) 1939 elen64 += 8; 1940 } 1941 if (locoff >= ZIP64_MINVAL) { 1942 locoff0 = ZIP64_MINVAL; 1943 elen64 += 8; // offset(8) 1944 } 1945 if (elen64 != 0) { 1946 elen64 += 4; // header and data sz 4 bytes 1947 } 1948 while (eoff + 4 < elen) { 1949 int tag = SH(extra, eoff); 1950 int sz = SH(extra, eoff + 2); 1951 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 1952 foundExtraTime = true; 1953 } 1954 eoff += (4 + sz); 1955 } 1956 if (!foundExtraTime) { 1957 if (isWindows) { // use NTFS 1958 elenNTFS = 36; // total 36 bytes 1959 } else { // Extended Timestamp otherwise 1960 elenEXTT = 9; // only mtime in cen 1961 } 1962 } 1963 writeInt(os, CENSIG); // CEN header signature 1964 if (elen64 != 0) { 1965 writeShort(os, 45); // ver 4.5 for zip64 1966 writeShort(os, 45); 1967 } else { 1968 writeShort(os, version0); // version made by 1969 writeShort(os, version0); // version needed to extract 1970 } 1971 writeShort(os, flag); // general purpose bit flag 1972 writeShort(os, method); // compression method 1973 // last modification time 1974 writeInt(os, (int)javaToDosTime(mtime)); 1975 writeInt(os, crc); // crc-32 1976 writeInt(os, csize0); // compressed size 1977 writeInt(os, size0); // uncompressed size 1978 writeShort(os, name.length); 1979 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 1980 1981 if (comment != null) { 1982 writeShort(os, Math.min(clen, 0xffff)); 1983 } else { 1984 writeShort(os, 0); 1985 } 1986 writeShort(os, 0); // starting disk number 1987 writeShort(os, 0); // internal file attributes (unused) 1988 writeInt(os, 0); // external file attributes (unused) 1989 writeInt(os, locoff0); // relative offset of local header 1990 writeBytes(os, name); 1991 if (elen64 != 0) { 1992 writeShort(os, EXTID_ZIP64);// Zip64 extra 1993 writeShort(os, elen64 - 4); // size of "this" extra block 1994 if (size0 == ZIP64_MINVAL) 1995 writeLong(os, size); 1996 if (csize0 == ZIP64_MINVAL) 1997 writeLong(os, csize); 1998 if (locoff0 == ZIP64_MINVAL) 1999 writeLong(os, locoff); 2000 } 2001 if (elenNTFS != 0) { 2002 writeShort(os, EXTID_NTFS); 2003 writeShort(os, elenNTFS - 4); 2004 writeInt(os, 0); // reserved 2005 writeShort(os, 0x0001); // NTFS attr tag 2006 writeShort(os, 24); 2007 writeLong(os, javaToWinTime(mtime)); 2008 writeLong(os, javaToWinTime(atime)); 2009 writeLong(os, javaToWinTime(ctime)); 2010 } 2011 if (elenEXTT != 0) { 2012 writeShort(os, EXTID_EXTT); 2013 writeShort(os, elenEXTT - 4); 2014 if (ctime == -1) 2015 os.write(0x3); // mtime and atime 2016 else 2017 os.write(0x7); // mtime, atime and ctime 2018 writeInt(os, javaToUnixTime(mtime)); 2019 } 2020 if (extra != null) // whatever not recognized 2021 writeBytes(os, extra); 2022 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2023 writeBytes(os, comment); 2024 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2025 } 2026 2027 ///////////////////// LOC ////////////////////// 2028 static Entry readLOC(ZipFileSystem zipfs, long pos) 2029 throws IOException 2030 { 2031 return readLOC(zipfs, pos, new byte[1024]); 2032 } 2033 2034 static Entry readLOC(ZipFileSystem zipfs, long pos, byte[] buf) 2035 throws IOException 2036 { 2037 return new Entry().loc(zipfs, pos, buf); 2038 } 2039 2040 Entry loc(ZipFileSystem zipfs, long pos, byte[] buf) 2041 throws IOException 2042 { 2043 assert (buf.length >= LOCHDR); 2044 if (zipfs.readFullyAt(buf, 0, LOCHDR , pos) != LOCHDR) 2045 throw new ZipException("loc: reading failed"); 2046 if (LOCSIG(buf) != LOCSIG) 2047 throw new ZipException("loc: wrong sig ->" 2048 + Long.toString(LOCSIG(buf), 16)); 2049 //startPos = pos; 2050 version = LOCVER(buf); 2051 flag = LOCFLG(buf); 2052 method = LOCHOW(buf); 2053 mtime = dosToJavaTime(LOCTIM(buf)); 2054 crc = LOCCRC(buf); 2055 csize = LOCSIZ(buf); 2056 size = LOCLEN(buf); 2057 int nlen = LOCNAM(buf); 2058 int elen = LOCEXT(buf); 2059 2060 name = new byte[nlen]; 2061 if (zipfs.readFullyAt(name, 0, nlen, pos + LOCHDR) != nlen) { 2062 throw new ZipException("loc: name reading failed"); 2063 } 2064 if (elen > 0) { 2065 extra = new byte[elen]; 2066 if (zipfs.readFullyAt(extra, 0, elen, pos + LOCHDR + nlen) 2067 != elen) { 2068 throw new ZipException("loc: ext reading failed"); 2069 } 2070 } 2071 pos += (LOCHDR + nlen + elen); 2072 if ((flag & FLAG_DATADESCR) != 0) { 2073 // Data Descriptor 2074 Entry e = zipfs.getEntry0(name); // get the size/csize from cen 2075 if (e == null) 2076 throw new ZipException("loc: name not found in cen"); 2077 size = e.size; 2078 csize = e.csize; 2079 pos += (method == METHOD_STORED ? size : csize); 2080 if (size >= ZIP64_MINVAL || csize >= ZIP64_MINVAL) 2081 pos += 24; 2082 else 2083 pos += 16; 2084 } else { 2085 if (extra != null && 2086 (size == ZIP64_MINVAL || csize == ZIP64_MINVAL)) { 2087 // zip64 ext: must include both size and csize 2088 int off = 0; 2089 while (off + 20 < elen) { // HeaderID+DataSize+Data 2090 int sz = SH(extra, off + 2); 2091 if (SH(extra, off) == EXTID_ZIP64 && sz == 16) { 2092 size = LL(extra, off + 4); 2093 csize = LL(extra, off + 12); 2094 break; 2095 } 2096 off += (sz + 4); 2097 } 2098 } 2099 pos += (method == METHOD_STORED ? size : csize); 2100 } 2101 return this; 2102 } 2103 2104 int writeLOC(OutputStream os) 2105 throws IOException 2106 { 2107 writeInt(os, LOCSIG); // LOC header signature 2108 int version = version(); 2109 int nlen = (name != null) ? name.length : 0; 2110 int elen = (extra != null) ? extra.length : 0; 2111 boolean foundExtraTime = false; // if extra timestamp present 2112 int eoff = 0; 2113 int elen64 = 0; 2114 int elenEXTT = 0; 2115 int elenNTFS = 0; 2116 if ((flag & FLAG_DATADESCR) != 0) { 2117 writeShort(os, version()); // version needed to extract 2118 writeShort(os, flag); // general purpose bit flag 2119 writeShort(os, method); // compression method 2120 // last modification time 2121 writeInt(os, (int)javaToDosTime(mtime)); 2122 // store size, uncompressed size, and crc-32 in data descriptor 2123 // immediately following compressed entry data 2124 writeInt(os, 0); 2125 writeInt(os, 0); 2126 writeInt(os, 0); 2127 } else { 2128 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2129 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2130 writeShort(os, 45); // ver 4.5 for zip64 2131 } else { 2132 writeShort(os, version()); // version needed to extract 2133 } 2134 writeShort(os, flag); // general purpose bit flag 2135 writeShort(os, method); // compression method 2136 // last modification time 2137 writeInt(os, (int)javaToDosTime(mtime)); 2138 writeInt(os, crc); // crc-32 2139 if (elen64 != 0) { 2140 writeInt(os, ZIP64_MINVAL); 2141 writeInt(os, ZIP64_MINVAL); 2142 } else { 2143 writeInt(os, csize); // compressed size 2144 writeInt(os, size); // uncompressed size 2145 } 2146 } 2147 while (eoff + 4 < elen) { 2148 int tag = SH(extra, eoff); 2149 int sz = SH(extra, eoff + 2); 2150 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2151 foundExtraTime = true; 2152 } 2153 eoff += (4 + sz); 2154 } 2155 if (!foundExtraTime) { 2156 if (isWindows) { 2157 elenNTFS = 36; // NTFS, total 36 bytes 2158 } else { // on unix use "ext time" 2159 elenEXTT = 9; 2160 if (atime != -1) 2161 elenEXTT += 4; 2162 if (ctime != -1) 2163 elenEXTT += 4; 2164 } 2165 } 2166 writeShort(os, name.length); 2167 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2168 writeBytes(os, name); 2169 if (elen64 != 0) { 2170 writeShort(os, EXTID_ZIP64); 2171 writeShort(os, 16); 2172 writeLong(os, size); 2173 writeLong(os, csize); 2174 } 2175 if (elenNTFS != 0) { 2176 writeShort(os, EXTID_NTFS); 2177 writeShort(os, elenNTFS - 4); 2178 writeInt(os, 0); // reserved 2179 writeShort(os, 0x0001); // NTFS attr tag 2180 writeShort(os, 24); 2181 writeLong(os, javaToWinTime(mtime)); 2182 writeLong(os, javaToWinTime(atime)); 2183 writeLong(os, javaToWinTime(ctime)); 2184 } 2185 if (elenEXTT != 0) { 2186 writeShort(os, EXTID_EXTT); 2187 writeShort(os, elenEXTT - 4);// size for the folowing data block 2188 int fbyte = 0x1; 2189 if (atime != -1) // mtime and atime 2190 fbyte |= 0x2; 2191 if (ctime != -1) // mtime, atime and ctime 2192 fbyte |= 0x4; 2193 os.write(fbyte); // flags byte 2194 writeInt(os, javaToUnixTime(mtime)); 2195 if (atime != -1) 2196 writeInt(os, javaToUnixTime(atime)); 2197 if (ctime != -1) 2198 writeInt(os, javaToUnixTime(ctime)); 2199 } 2200 if (extra != null) { 2201 writeBytes(os, extra); 2202 } 2203 return LOCHDR + name.length + elen + elen64 + elenNTFS + elenEXTT; 2204 } 2205 2206 // Data Descriptior 2207 int writeEXT(OutputStream os) 2208 throws IOException 2209 { 2210 writeInt(os, EXTSIG); // EXT header signature 2211 writeInt(os, crc); // crc-32 2212 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2213 writeLong(os, csize); 2214 writeLong(os, size); 2215 return 24; 2216 } else { 2217 writeInt(os, csize); // compressed size 2218 writeInt(os, size); // uncompressed size 2219 return 16; 2220 } 2221 } 2222 2223 // read NTFS, UNIX and ZIP64 data from cen.extra 2224 void readExtra(ZipFileSystem zipfs) throws IOException { 2225 if (extra == null) 2226 return; 2227 int elen = extra.length; 2228 int off = 0; 2229 int newOff = 0; 2230 while (off + 4 < elen) { 2231 // extra spec: HeaderID+DataSize+Data 2232 int pos = off; 2233 int tag = SH(extra, pos); 2234 int sz = SH(extra, pos + 2); 2235 pos += 4; 2236 if (pos + sz > elen) // invalid data 2237 break; 2238 switch (tag) { 2239 case EXTID_ZIP64 : 2240 if (size == ZIP64_MINVAL) { 2241 if (pos + 8 > elen) // invalid zip64 extra 2242 break; // fields, just skip 2243 size = LL(extra, pos); 2244 pos += 8; 2245 } 2246 if (csize == ZIP64_MINVAL) { 2247 if (pos + 8 > elen) 2248 break; 2249 csize = LL(extra, pos); 2250 pos += 8; 2251 } 2252 if (locoff == ZIP64_MINVAL) { 2253 if (pos + 8 > elen) 2254 break; 2255 locoff = LL(extra, pos); 2256 pos += 8; 2257 } 2258 break; 2259 case EXTID_NTFS: 2260 if (sz < 32) 2261 break; 2262 pos += 4; // reserved 4 bytes 2263 if (SH(extra, pos) != 0x0001) 2264 break; 2265 if (SH(extra, pos + 2) != 24) 2266 break; 2267 // override the loc field, datatime here is 2268 // more "accurate" 2269 mtime = winToJavaTime(LL(extra, pos + 4)); 2270 atime = winToJavaTime(LL(extra, pos + 12)); 2271 ctime = winToJavaTime(LL(extra, pos + 20)); 2272 break; 2273 case EXTID_EXTT: 2274 // spec says the Extened timestamp in cen only has mtime 2275 // need to read the loc to get the extra a/ctime 2276 byte[] buf = new byte[LOCHDR]; 2277 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2278 != buf.length) 2279 throw new ZipException("loc: reading failed"); 2280 if (LOCSIG(buf) != LOCSIG) 2281 throw new ZipException("loc: wrong sig ->" 2282 + Long.toString(LOCSIG(buf), 16)); 2283 2284 int locElen = LOCEXT(buf); 2285 if (locElen < 9) // EXTT is at lease 9 bytes 2286 break; 2287 int locNlen = LOCNAM(buf); 2288 buf = new byte[locElen]; 2289 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2290 != buf.length) 2291 throw new ZipException("loc extra: reading failed"); 2292 int locPos = 0; 2293 while (locPos + 4 < buf.length) { 2294 int locTag = SH(buf, locPos); 2295 int locSZ = SH(buf, locPos + 2); 2296 locPos += 4; 2297 if (locTag != EXTID_EXTT) { 2298 locPos += locSZ; 2299 continue; 2300 } 2301 int flag = CH(buf, locPos++); 2302 if ((flag & 0x1) != 0) { 2303 mtime = unixToJavaTime(LG(buf, locPos)); 2304 locPos += 4; 2305 } 2306 if ((flag & 0x2) != 0) { 2307 atime = unixToJavaTime(LG(buf, locPos)); 2308 locPos += 4; 2309 } 2310 if ((flag & 0x4) != 0) { 2311 ctime = unixToJavaTime(LG(buf, locPos)); 2312 locPos += 4; 2313 } 2314 break; 2315 } 2316 break; 2317 default: // unknown tag 2318 System.arraycopy(extra, off, extra, newOff, sz + 4); 2319 newOff += (sz + 4); 2320 } 2321 off += (sz + 4); 2322 } 2323 if (newOff != 0 && newOff != extra.length) 2324 extra = Arrays.copyOf(extra, newOff); 2325 else 2326 extra = null; 2327 } 2328 } 2329 2330 private static class ExChannelCloser { 2331 Path path; 2332 SeekableByteChannel ch; 2333 Set<InputStream> streams; 2334 ExChannelCloser(Path path, 2335 SeekableByteChannel ch, 2336 Set<InputStream> streams) 2337 { 2338 this.path = path; 2339 this.ch = ch; 2340 this.streams = streams; 2341 } 2342 } 2343 2344 // ZIP directory has two issues: 2345 // (1) ZIP spec does not require the ZIP file to include 2346 // directory entry 2347 // (2) all entries are not stored/organized in a "tree" 2348 // structure. 2349 // A possible solution is to build the node tree ourself as 2350 // implemented below. 2351 private IndexNode root; 2352 2353 private void addToTree(IndexNode inode, HashSet<IndexNode> dirs) { 2354 if (dirs.contains(inode)) { 2355 return; 2356 } 2357 IndexNode parent; 2358 byte[] name = inode.name; 2359 byte[] pname = getParent(name); 2360 if (inodes.containsKey(LOOKUPKEY.as(pname))) { 2361 parent = inodes.get(LOOKUPKEY); 2362 } else { // pseudo directory entry 2363 parent = new IndexNode(pname, -1); 2364 inodes.put(parent, parent); 2365 } 2366 addToTree(parent, dirs); 2367 inode.sibling = parent.child; 2368 parent.child = inode; 2369 if (name[name.length -1] == '/') 2370 dirs.add(inode); 2371 } 2372 2373 private void removeFromTree(IndexNode inode) { 2374 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2375 IndexNode child = parent.child; 2376 if (child.equals(inode)) { 2377 parent.child = child.sibling; 2378 } else { 2379 IndexNode last = child; 2380 while ((child = child.sibling) != null) { 2381 if (child.equals(inode)) { 2382 last.sibling = child.sibling; 2383 break; 2384 } else { 2385 last = child; 2386 } 2387 } 2388 } 2389 } 2390 2391 private void buildNodeTree() throws IOException { 2392 beginWrite(); 2393 try { 2394 HashSet<IndexNode> dirs = new HashSet<>(); 2395 IndexNode root = new IndexNode(ROOTPATH, -1); 2396 inodes.put(root, root); 2397 dirs.add(root); 2398 for (IndexNode node : inodes.keySet().toArray(new IndexNode[0])) { 2399 addToTree(node, dirs); 2400 } 2401 } finally { 2402 endWrite(); 2403 } 2404 } 2405 }