1 /* 2 * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * - Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 11 * - Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * - Neither the name of Oracle nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 20 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 26 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 27 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 28 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * This source code is provided to illustrate the usage of a given feature 34 * or technique and has been deliberately simplified. Additional steps 35 * required for a production-quality application, such as security checks, 36 * input validation and proper error handling, might not be present in 37 * this sample code. 38 */ 39 40 41 package com.sun.nio.zipfs; 42 43 import java.io.BufferedOutputStream; 44 import java.io.ByteArrayInputStream; 45 import java.io.ByteArrayOutputStream; 46 import java.io.EOFException; 47 import java.io.File; 48 import java.io.IOException; 49 import java.io.InputStream; 50 import java.io.OutputStream; 51 import java.nio.ByteBuffer; 52 import java.nio.MappedByteBuffer; 53 import java.nio.channels.*; 54 import java.nio.file.*; 55 import java.nio.file.attribute.*; 56 import java.nio.file.spi.*; 57 import java.util.*; 58 import java.util.concurrent.locks.ReadWriteLock; 59 import java.util.concurrent.locks.ReentrantReadWriteLock; 60 import java.util.regex.Pattern; 61 import java.util.zip.CRC32; 62 import java.util.zip.Inflater; 63 import java.util.zip.Deflater; 64 import java.util.zip.InflaterInputStream; 65 import java.util.zip.DeflaterOutputStream; 66 import java.util.zip.ZipException; 67 import java.util.zip.ZipError; 68 import static java.lang.Boolean.*; 69 import static com.sun.nio.zipfs.ZipConstants.*; 70 import static com.sun.nio.zipfs.ZipUtils.*; 71 import static java.nio.file.StandardOpenOption.*; 72 import static java.nio.file.StandardCopyOption.*; 73 74 /** 75 * A FileSystem built on a zip file 76 * 77 * @author Xueming Shen 78 */ 79 80 public class ZipFileSystem extends FileSystem { 81 82 private final ZipFileSystemProvider provider; 83 private final ZipPath defaultdir; 84 private boolean readOnly = false; 85 private final Path zfpath; 86 private final ZipCoder zc; 87 88 // configurable by env map 89 private final String defaultDir; // default dir for the file system 90 private final String nameEncoding; // default encoding for name/comment 91 private final boolean useTempFile; // use a temp file for newOS, default 92 // is to use BAOS for better performance 93 private final boolean createNew; // create a new zip if not exists 94 private static final boolean isWindows = 95 System.getProperty("os.name").startsWith("Windows"); 96 97 ZipFileSystem(ZipFileSystemProvider provider, 98 Path zfpath, 99 Map<String, ?> env) 100 throws IOException 101 { 102 // configurable env setup 103 this.createNew = "true".equals(env.get("create")); 104 this.nameEncoding = env.containsKey("encoding") ? 105 (String)env.get("encoding") : "UTF-8"; 106 this.useTempFile = TRUE.equals(env.get("useTempFile")); 107 this.defaultDir = env.containsKey("default.dir") ? 108 (String)env.get("default.dir") : "/"; 109 if (this.defaultDir.charAt(0) != '/') 110 throw new IllegalArgumentException("default dir should be absolute"); 111 112 this.provider = provider; 113 this.zfpath = zfpath; 114 if (Files.notExists(zfpath)) { 115 if (createNew) { 116 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 117 new END().write(os, 0); 118 } 119 } else { 120 throw new FileSystemNotFoundException(zfpath.toString()); 121 } 122 } 123 // sm and existence check 124 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 125 if (!Files.isWritable(zfpath)) 126 this.readOnly = true; 127 this.zc = ZipCoder.get(nameEncoding); 128 this.defaultdir = new ZipPath(this, getBytes(defaultDir)); 129 this.ch = Files.newByteChannel(zfpath, READ); 130 this.cen = initCEN(); 131 } 132 133 @Override 134 public FileSystemProvider provider() { 135 return provider; 136 } 137 138 @Override 139 public String getSeparator() { 140 return "/"; 141 } 142 143 @Override 144 public boolean isOpen() { 145 return isOpen; 146 } 147 148 @Override 149 public boolean isReadOnly() { 150 return readOnly; 151 } 152 153 private void checkWritable() throws IOException { 154 if (readOnly) 155 throw new ReadOnlyFileSystemException(); 156 } 157 158 @Override 159 public Iterable<Path> getRootDirectories() { 160 ArrayList<Path> pathArr = new ArrayList<>(); 161 pathArr.add(new ZipPath(this, new byte[]{'/'})); 162 return pathArr; 163 } 164 165 ZipPath getDefaultDir() { // package private 166 return defaultdir; 167 } 168 169 @Override 170 public ZipPath getPath(String first, String... more) { 171 String path; 172 if (more.length == 0) { 173 path = first; 174 } else { 175 StringBuilder sb = new StringBuilder(); 176 sb.append(first); 177 for (String segment: more) { 178 if (segment.length() > 0) { 179 if (sb.length() > 0) 180 sb.append('/'); 181 sb.append(segment); 182 } 183 } 184 path = sb.toString(); 185 } 186 return new ZipPath(this, getBytes(path)); 187 } 188 189 @Override 190 public UserPrincipalLookupService getUserPrincipalLookupService() { 191 throw new UnsupportedOperationException(); 192 } 193 194 @Override 195 public WatchService newWatchService() { 196 throw new UnsupportedOperationException(); 197 } 198 199 FileStore getFileStore(ZipPath path) { 200 return new ZipFileStore(path); 201 } 202 203 @Override 204 public Iterable<FileStore> getFileStores() { 205 ArrayList<FileStore> list = new ArrayList<>(1); 206 list.add(new ZipFileStore(new ZipPath(this, new byte[]{'/'}))); 207 return list; 208 } 209 210 private static final Set<String> supportedFileAttributeViews = 211 Collections.unmodifiableSet( 212 new HashSet<String>(Arrays.asList("basic", "zip"))); 213 214 @Override 215 public Set<String> supportedFileAttributeViews() { 216 return supportedFileAttributeViews; 217 } 218 219 @Override 220 public String toString() { 221 return zfpath.toString(); 222 } 223 224 Path getZipFile() { 225 return zfpath; 226 } 227 228 private static final String GLOB_SYNTAX = "glob"; 229 private static final String REGEX_SYNTAX = "regex"; 230 231 @Override 232 public PathMatcher getPathMatcher(String syntaxAndInput) { 233 int pos = syntaxAndInput.indexOf(':'); 234 if (pos <= 0 || pos == syntaxAndInput.length()) { 235 throw new IllegalArgumentException(); 236 } 237 String syntax = syntaxAndInput.substring(0, pos); 238 String input = syntaxAndInput.substring(pos + 1); 239 String expr; 240 if (syntax.equals(GLOB_SYNTAX)) { 241 expr = toRegexPattern(input); 242 } else { 243 if (syntax.equals(REGEX_SYNTAX)) { 244 expr = input; 245 } else { 246 throw new UnsupportedOperationException("Syntax '" + syntax + 247 "' not recognized"); 248 } 249 } 250 // return matcher 251 final Pattern pattern = Pattern.compile(expr); 252 return new PathMatcher() { 253 @Override 254 public boolean matches(Path path) { 255 return pattern.matcher(path.toString()).matches(); 256 } 257 }; 258 } 259 260 @Override 261 public void close() throws IOException { 262 beginWrite(); 263 try { 264 if (!isOpen) 265 return; 266 isOpen = false; // set closed 267 } finally { 268 endWrite(); 269 } 270 if (!streams.isEmpty()) { // unlock and close all remaining streams 271 Set<InputStream> copy = new HashSet<>(streams); 272 for (InputStream is: copy) 273 is.close(); 274 } 275 beginWrite(); // lock and sync 276 try { 277 sync(); 278 ch.close(); // close the ch just in case no update 279 } finally { // and sync dose not close the ch 280 endWrite(); 281 } 282 283 synchronized (inflaters) { 284 for (Inflater inf : inflaters) 285 inf.end(); 286 } 287 synchronized (deflaters) { 288 for (Deflater def : deflaters) 289 def.end(); 290 } 291 292 IOException ioe = null; 293 synchronized (tmppaths) { 294 for (Path p: tmppaths) { 295 try { 296 Files.deleteIfExists(p); 297 } catch (IOException x) { 298 if (ioe == null) 299 ioe = x; 300 else 301 ioe.addSuppressed(x); 302 } 303 } 304 } 305 provider.removeFileSystem(zfpath, this); 306 if (ioe != null) 307 throw ioe; 308 } 309 310 ZipFileAttributes getFileAttributes(byte[] path) 311 throws IOException 312 { 313 Entry e; 314 beginRead(); 315 try { 316 ensureOpen(); 317 e = getEntry0(path); 318 if (e == null) { 319 IndexNode inode = getInode(path); 320 if (inode == null) 321 return null; 322 e = new Entry(inode.name); // pseudo directory 323 e.method = METHOD_STORED; // STORED for dir 324 e.mtime = e.atime = e.ctime = -1;// -1 for all times 325 } 326 } finally { 327 endRead(); 328 } 329 return new ZipFileAttributes(e); 330 } 331 332 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 333 throws IOException 334 { 335 checkWritable(); 336 beginWrite(); 337 try { 338 ensureOpen(); 339 Entry e = getEntry0(path); // ensureOpen checked 340 if (e == null) 341 throw new NoSuchFileException(getString(path)); 342 if (e.type == Entry.CEN) 343 e.type = Entry.COPY; // copy e 344 if (mtime != null) 345 e.mtime = mtime.toMillis(); 346 if (atime != null) 347 e.atime = atime.toMillis(); 348 if (ctime != null) 349 e.ctime = ctime.toMillis(); 350 update(e); 351 } finally { 352 endWrite(); 353 } 354 } 355 356 boolean exists(byte[] path) 357 throws IOException 358 { 359 beginRead(); 360 try { 361 ensureOpen(); 362 return getInode(path) != null; 363 } finally { 364 endRead(); 365 } 366 } 367 368 boolean isDirectory(byte[] path) 369 throws IOException 370 { 371 beginRead(); 372 try { 373 IndexNode n = getInode(path); 374 return n != null && n.isDir(); 375 } finally { 376 endRead(); 377 } 378 } 379 380 private ZipPath toZipPath(byte[] path) { 381 // make it absolute 382 byte[] p = new byte[path.length + 1]; 383 p[0] = '/'; 384 System.arraycopy(path, 0, p, 1, path.length); 385 return new ZipPath(this, p); 386 } 387 388 // returns the list of child paths of "path" 389 Iterator<Path> iteratorOf(byte[] path, 390 DirectoryStream.Filter<? super Path> filter) 391 throws IOException 392 { 393 beginWrite(); // iteration of inodes needs exclusive lock 394 try { 395 ensureOpen(); 396 IndexNode inode = getInode(path); 397 if (inode == null) 398 throw new NotDirectoryException(getString(path)); 399 List<Path> list = new ArrayList<>(); 400 IndexNode child = inode.child; 401 while (child != null) { 402 ZipPath zp = toZipPath(child.name); 403 if (filter == null || filter.accept(zp)) 404 list.add(zp); 405 child = child.sibling; 406 } 407 return list.iterator(); 408 } finally { 409 endWrite(); 410 } 411 } 412 413 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 414 throws IOException 415 { 416 checkWritable(); 417 dir = toDirectoryPath(dir); 418 beginWrite(); 419 try { 420 ensureOpen(); 421 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 422 throw new FileAlreadyExistsException(getString(dir)); 423 checkParents(dir); 424 Entry e = new Entry(dir, Entry.NEW); 425 e.method = METHOD_STORED; // STORED for dir 426 update(e); 427 } finally { 428 endWrite(); 429 } 430 } 431 432 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 433 throws IOException 434 { 435 checkWritable(); 436 if (Arrays.equals(src, dst)) 437 return; // do nothing, src and dst are the same 438 439 beginWrite(); 440 try { 441 ensureOpen(); 442 Entry eSrc = getEntry0(src); // ensureOpen checked 443 if (eSrc == null) 444 throw new NoSuchFileException(getString(src)); 445 if (eSrc.isDir()) { // spec says to create dst dir 446 createDirectory(dst); 447 return; 448 } 449 boolean hasReplace = false; 450 boolean hasCopyAttrs = false; 451 for (CopyOption opt : options) { 452 if (opt == REPLACE_EXISTING) 453 hasReplace = true; 454 else if (opt == COPY_ATTRIBUTES) 455 hasCopyAttrs = true; 456 } 457 Entry eDst = getEntry0(dst); 458 if (eDst != null) { 459 if (!hasReplace) 460 throw new FileAlreadyExistsException(getString(dst)); 461 } else { 462 checkParents(dst); 463 } 464 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 465 u.name(dst); // change name 466 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 467 { 468 u.type = eSrc.type; // make it the same type 469 if (deletesrc) { // if it's a "rename", take the data 470 u.bytes = eSrc.bytes; 471 u.file = eSrc.file; 472 } else { // if it's not "rename", copy the data 473 if (eSrc.bytes != null) 474 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 475 else if (eSrc.file != null) { 476 u.file = getTempPathForEntry(null); 477 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 478 } 479 } 480 } 481 if (!hasCopyAttrs) 482 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 483 update(u); 484 if (deletesrc) 485 updateDelete(eSrc); 486 } finally { 487 endWrite(); 488 } 489 } 490 491 // Returns an output stream for writing the contents into the specified 492 // entry. 493 OutputStream newOutputStream(byte[] path, OpenOption... options) 494 throws IOException 495 { 496 checkWritable(); 497 boolean hasCreateNew = false; 498 boolean hasCreate = false; 499 boolean hasAppend = false; 500 boolean hasTruncate = false; 501 for (OpenOption opt: options) { 502 if (opt == READ) 503 throw new IllegalArgumentException("READ not allowed"); 504 if (opt == CREATE_NEW) 505 hasCreateNew = true; 506 if (opt == CREATE) 507 hasCreate = true; 508 if (opt == APPEND) 509 hasAppend = true; 510 if (opt == TRUNCATE_EXISTING) 511 hasTruncate = true; 512 } 513 if (hasAppend && hasTruncate) 514 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 515 beginRead(); // only need a readlock, the "update()" will 516 try { // try to obtain a writelock when the os is 517 ensureOpen(); // being closed. 518 Entry e = getEntry0(path); 519 if (e != null) { 520 if (e.isDir() || hasCreateNew) 521 throw new FileAlreadyExistsException(getString(path)); 522 if (hasAppend) { 523 InputStream is = getInputStream(e); 524 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 525 copyStream(is, os); 526 is.close(); 527 return os; 528 } 529 return getOutputStream(new Entry(e, Entry.NEW)); 530 } else { 531 if (!hasCreate && !hasCreateNew) 532 throw new NoSuchFileException(getString(path)); 533 checkParents(path); 534 return getOutputStream(new Entry(path, Entry.NEW)); 535 } 536 } finally { 537 endRead(); 538 } 539 } 540 541 // Returns an input stream for reading the contents of the specified 542 // file entry. 543 InputStream newInputStream(byte[] path) throws IOException { 544 beginRead(); 545 try { 546 ensureOpen(); 547 Entry e = getEntry0(path); 548 if (e == null) 549 throw new NoSuchFileException(getString(path)); 550 if (e.isDir()) 551 throw new FileSystemException(getString(path), "is a directory", null); 552 return getInputStream(e); 553 } finally { 554 endRead(); 555 } 556 } 557 558 private void checkOptions(Set<? extends OpenOption> options) { 559 // check for options of null type and option is an intance of StandardOpenOption 560 for (OpenOption option : options) { 561 if (option == null) 562 throw new NullPointerException(); 563 if (!(option instanceof StandardOpenOption)) 564 throw new IllegalArgumentException(); 565 } 566 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 567 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 568 } 569 570 // Returns a Writable/ReadByteChannel for now. Might consdier to use 571 // newFileChannel() instead, which dump the entry data into a regular 572 // file on the default file system and create a FileChannel on top of 573 // it. 574 SeekableByteChannel newByteChannel(byte[] path, 575 Set<? extends OpenOption> options, 576 FileAttribute<?>... attrs) 577 throws IOException 578 { 579 checkOptions(options); 580 if (options.contains(StandardOpenOption.WRITE) || 581 options.contains(StandardOpenOption.APPEND)) { 582 checkWritable(); 583 beginRead(); 584 try { 585 final WritableByteChannel wbc = Channels.newChannel( 586 newOutputStream(path, options.toArray(new OpenOption[0]))); 587 long leftover = 0; 588 if (options.contains(StandardOpenOption.APPEND)) { 589 Entry e = getEntry0(path); 590 if (e != null && e.size >= 0) 591 leftover = e.size; 592 } 593 final long offset = leftover; 594 return new SeekableByteChannel() { 595 long written = offset; 596 public boolean isOpen() { 597 return wbc.isOpen(); 598 } 599 600 public long position() throws IOException { 601 return written; 602 } 603 604 public SeekableByteChannel position(long pos) 605 throws IOException 606 { 607 throw new UnsupportedOperationException(); 608 } 609 610 public int read(ByteBuffer dst) throws IOException { 611 throw new UnsupportedOperationException(); 612 } 613 614 public SeekableByteChannel truncate(long size) 615 throws IOException 616 { 617 throw new UnsupportedOperationException(); 618 } 619 620 public int write(ByteBuffer src) throws IOException { 621 int n = wbc.write(src); 622 written += n; 623 return n; 624 } 625 626 public long size() throws IOException { 627 return written; 628 } 629 630 public void close() throws IOException { 631 wbc.close(); 632 } 633 }; 634 } finally { 635 endRead(); 636 } 637 } else { 638 beginRead(); 639 try { 640 ensureOpen(); 641 Entry e = getEntry0(path); 642 if (e == null || e.isDir()) 643 throw new NoSuchFileException(getString(path)); 644 final ReadableByteChannel rbc = 645 Channels.newChannel(getInputStream(e)); 646 final long size = e.size; 647 return new SeekableByteChannel() { 648 long read = 0; 649 public boolean isOpen() { 650 return rbc.isOpen(); 651 } 652 653 public long position() throws IOException { 654 return read; 655 } 656 657 public SeekableByteChannel position(long pos) 658 throws IOException 659 { 660 throw new UnsupportedOperationException(); 661 } 662 663 public int read(ByteBuffer dst) throws IOException { 664 int n = rbc.read(dst); 665 if (n > 0) { 666 read += n; 667 } 668 return n; 669 } 670 671 public SeekableByteChannel truncate(long size) 672 throws IOException 673 { 674 throw new NonWritableChannelException(); 675 } 676 677 public int write (ByteBuffer src) throws IOException { 678 throw new NonWritableChannelException(); 679 } 680 681 public long size() throws IOException { 682 return size; 683 } 684 685 public void close() throws IOException { 686 rbc.close(); 687 } 688 }; 689 } finally { 690 endRead(); 691 } 692 } 693 } 694 695 // Returns a FileChannel of the specified entry. 696 // 697 // This implementation creates a temporary file on the default file system, 698 // copy the entry data into it if the entry exists, and then create a 699 // FileChannel on top of it. 700 FileChannel newFileChannel(byte[] path, 701 Set<? extends OpenOption> options, 702 FileAttribute<?>... attrs) 703 throws IOException 704 { 705 checkOptions(options); 706 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 707 options.contains(StandardOpenOption.APPEND)); 708 beginRead(); 709 try { 710 ensureOpen(); 711 Entry e = getEntry0(path); 712 if (forWrite) { 713 checkWritable(); 714 if (e == null) { 715 if (!options.contains(StandardOpenOption.CREATE) && 716 !options.contains(StandardOpenOption.CREATE_NEW)) { 717 throw new NoSuchFileException(getString(path)); 718 } 719 } else { 720 if (options.contains(StandardOpenOption.CREATE_NEW)) { 721 throw new FileAlreadyExistsException(getString(path)); 722 } 723 if (e.isDir()) 724 throw new FileAlreadyExistsException("directory <" 725 + getString(path) + "> exists"); 726 } 727 options = new HashSet<>(options); 728 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 729 } else if (e == null || e.isDir()) { 730 throw new NoSuchFileException(getString(path)); 731 } 732 733 final boolean isFCH = (e != null && e.type == Entry.FILECH); 734 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 735 final FileChannel fch = tmpfile.getFileSystem() 736 .provider() 737 .newFileChannel(tmpfile, options, attrs); 738 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH); 739 if (forWrite) { 740 u.flag = FLAG_DATADESCR; 741 u.method = METHOD_DEFLATED; 742 } 743 // is there a better way to hook into the FileChannel's close method? 744 return new FileChannel() { 745 public int write(ByteBuffer src) throws IOException { 746 return fch.write(src); 747 } 748 public long write(ByteBuffer[] srcs, int offset, int length) 749 throws IOException 750 { 751 return fch.write(srcs, offset, length); 752 } 753 public long position() throws IOException { 754 return fch.position(); 755 } 756 public FileChannel position(long newPosition) 757 throws IOException 758 { 759 fch.position(newPosition); 760 return this; 761 } 762 public long size() throws IOException { 763 return fch.size(); 764 } 765 public FileChannel truncate(long size) 766 throws IOException 767 { 768 fch.truncate(size); 769 return this; 770 } 771 public void force(boolean metaData) 772 throws IOException 773 { 774 fch.force(metaData); 775 } 776 public long transferTo(long position, long count, 777 WritableByteChannel target) 778 throws IOException 779 { 780 return fch.transferTo(position, count, target); 781 } 782 public long transferFrom(ReadableByteChannel src, 783 long position, long count) 784 throws IOException 785 { 786 return fch.transferFrom(src, position, count); 787 } 788 public int read(ByteBuffer dst) throws IOException { 789 return fch.read(dst); 790 } 791 public int read(ByteBuffer dst, long position) 792 throws IOException 793 { 794 return fch.read(dst, position); 795 } 796 public long read(ByteBuffer[] dsts, int offset, int length) 797 throws IOException 798 { 799 return fch.read(dsts, offset, length); 800 } 801 public int write(ByteBuffer src, long position) 802 throws IOException 803 { 804 return fch.write(src, position); 805 } 806 public MappedByteBuffer map(MapMode mode, 807 long position, long size) 808 throws IOException 809 { 810 throw new UnsupportedOperationException(); 811 } 812 public FileLock lock(long position, long size, boolean shared) 813 throws IOException 814 { 815 return fch.lock(position, size, shared); 816 } 817 public FileLock tryLock(long position, long size, boolean shared) 818 throws IOException 819 { 820 return fch.tryLock(position, size, shared); 821 } 822 protected void implCloseChannel() throws IOException { 823 fch.close(); 824 if (forWrite) { 825 u.mtime = System.currentTimeMillis(); 826 u.size = Files.size(u.file); 827 828 update(u); 829 } else { 830 if (!isFCH) // if this is a new fch for reading 831 removeTempPathForEntry(tmpfile); 832 } 833 } 834 }; 835 } finally { 836 endRead(); 837 } 838 } 839 840 // the outstanding input streams that need to be closed 841 private Set<InputStream> streams = 842 Collections.synchronizedSet(new HashSet<InputStream>()); 843 844 // the ex-channel and ex-path that need to close when their outstanding 845 // input streams are all closed by the obtainers. 846 private Set<ExChannelCloser> exChClosers = new HashSet<>(); 847 848 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 849 private Path getTempPathForEntry(byte[] path) throws IOException { 850 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 851 if (path != null) { 852 Entry e = getEntry0(path); 853 if (e != null) { 854 try (InputStream is = newInputStream(path)) { 855 Files.copy(is, tmpPath, REPLACE_EXISTING); 856 } 857 } 858 } 859 return tmpPath; 860 } 861 862 private void removeTempPathForEntry(Path path) throws IOException { 863 Files.delete(path); 864 tmppaths.remove(path); 865 } 866 867 // check if all parents really exit. ZIP spec does not require 868 // the existence of any "parent directory". 869 private void checkParents(byte[] path) throws IOException { 870 beginRead(); 871 try { 872 while ((path = getParent(path)) != null && path.length != 0) { 873 if (!inodes.containsKey(IndexNode.keyOf(path))) { 874 throw new NoSuchFileException(getString(path)); 875 } 876 } 877 } finally { 878 endRead(); 879 } 880 } 881 882 private static byte[] ROOTPATH = new byte[0]; 883 private static byte[] getParent(byte[] path) { 884 int off = path.length - 1; 885 if (off > 0 && path[off] == '/') // isDirectory 886 off--; 887 while (off > 0 && path[off] != '/') { off--; } 888 if (off <= 0) 889 return ROOTPATH; 890 return Arrays.copyOf(path, off + 1); 891 } 892 893 private final void beginWrite() { 894 rwlock.writeLock().lock(); 895 } 896 897 private final void endWrite() { 898 rwlock.writeLock().unlock(); 899 } 900 901 private final void beginRead() { 902 rwlock.readLock().lock(); 903 } 904 905 private final void endRead() { 906 rwlock.readLock().unlock(); 907 } 908 909 /////////////////////////////////////////////////////////////////// 910 911 private volatile boolean isOpen = true; 912 private final SeekableByteChannel ch; // channel to the zipfile 913 final byte[] cen; // CEN & ENDHDR 914 private END end; 915 private long locpos; // position of first LOC header (usually 0) 916 917 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 918 919 // name -> pos (in cen), IndexNode itself can be used as a "key" 920 private LinkedHashMap<IndexNode, IndexNode> inodes; 921 922 final byte[] getBytes(String name) { 923 return zc.getBytes(name); 924 } 925 926 final String getString(byte[] name) { 927 return zc.toString(name); 928 } 929 930 protected void finalize() throws IOException { 931 close(); 932 } 933 934 private long getDataPos(Entry e) throws IOException { 935 if (e.locoff == -1) { 936 Entry e2 = getEntry0(e.name); 937 if (e2 == null) 938 throw new ZipException("invalid loc for entry <" + e.name + ">"); 939 e.locoff = e2.locoff; 940 } 941 byte[] buf = new byte[LOCHDR]; 942 if (readFullyAt(buf, 0, buf.length, e.locoff) != buf.length) 943 throw new ZipException("invalid loc for entry <" + e.name + ">"); 944 return locpos + e.locoff + LOCHDR + LOCNAM(buf) + LOCEXT(buf); 945 } 946 947 // Reads len bytes of data from the specified offset into buf. 948 // Returns the total number of bytes read. 949 // Each/every byte read from here (except the cen, which is mapped). 950 final long readFullyAt(byte[] buf, int off, long len, long pos) 951 throws IOException 952 { 953 ByteBuffer bb = ByteBuffer.wrap(buf); 954 bb.position(off); 955 bb.limit((int)(off + len)); 956 return readFullyAt(bb, pos); 957 } 958 959 private final long readFullyAt(ByteBuffer bb, long pos) 960 throws IOException 961 { 962 synchronized(ch) { 963 return ch.position(pos).read(bb); 964 } 965 } 966 967 // Searches for end of central directory (END) header. The contents of 968 // the END header will be read and placed in endbuf. Returns the file 969 // position of the END header, otherwise returns -1 if the END header 970 // was not found or an error occurred. 971 private END findEND() throws IOException 972 { 973 byte[] buf = new byte[READBLOCKSZ]; 974 long ziplen = ch.size(); 975 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 976 long minPos = minHDR - (buf.length - ENDHDR); 977 978 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 979 { 980 int off = 0; 981 if (pos < 0) { 982 // Pretend there are some NUL bytes before start of file 983 off = (int)-pos; 984 Arrays.fill(buf, 0, off, (byte)0); 985 } 986 int len = buf.length - off; 987 if (readFullyAt(buf, off, len, pos + off) != len) 988 zerror("zip END header not found"); 989 990 // Now scan the block backwards for END header signature 991 for (int i = buf.length - ENDHDR; i >= 0; i--) { 992 if (buf[i+0] == (byte)'P' && 993 buf[i+1] == (byte)'K' && 994 buf[i+2] == (byte)'\005' && 995 buf[i+3] == (byte)'\006' && 996 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 997 // Found END header 998 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 999 END end = new END(); 1000 end.endsub = ENDSUB(buf); 1001 end.centot = ENDTOT(buf); 1002 end.cenlen = ENDSIZ(buf); 1003 end.cenoff = ENDOFF(buf); 1004 end.comlen = ENDCOM(buf); 1005 end.endpos = pos + i; 1006 if (end.cenlen == ZIP64_MINVAL || 1007 end.cenoff == ZIP64_MINVAL || 1008 end.centot == ZIP64_MINVAL32) 1009 { 1010 // need to find the zip64 end; 1011 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1012 if (readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1013 != loc64.length) { 1014 return end; 1015 } 1016 long end64pos = ZIP64_LOCOFF(loc64); 1017 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1018 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1019 != end64buf.length) { 1020 return end; 1021 } 1022 // end64 found, re-calcualte everything. 1023 end.cenlen = ZIP64_ENDSIZ(end64buf); 1024 end.cenoff = ZIP64_ENDOFF(end64buf); 1025 end.centot = (int)ZIP64_ENDTOT(end64buf); // assume total < 2g 1026 end.endpos = end64pos; 1027 } 1028 return end; 1029 } 1030 } 1031 } 1032 zerror("zip END header not found"); 1033 return null; //make compiler happy 1034 } 1035 1036 // Reads zip file central directory. Returns the file position of first 1037 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1038 // then the error was a zip format error and zip->msg has the error text. 1039 // Always pass in -1 for knownTotal; it's used for a recursive call. 1040 private byte[] initCEN() throws IOException { 1041 end = findEND(); 1042 if (end.endpos == 0) { 1043 inodes = new LinkedHashMap<>(10); 1044 locpos = 0; 1045 buildNodeTree(); 1046 return null; // only END header present 1047 } 1048 if (end.cenlen > end.endpos) 1049 zerror("invalid END header (bad central directory size)"); 1050 long cenpos = end.endpos - end.cenlen; // position of CEN table 1051 1052 // Get position of first local file (LOC) header, taking into 1053 // account that there may be a stub prefixed to the zip file. 1054 locpos = cenpos - end.cenoff; 1055 if (locpos < 0) 1056 zerror("invalid END header (bad central directory offset)"); 1057 1058 // read in the CEN and END 1059 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1060 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1061 zerror("read CEN tables failed"); 1062 } 1063 // Iterate through the entries in the central directory 1064 inodes = new LinkedHashMap<>(end.centot + 1); 1065 int pos = 0; 1066 int limit = cen.length - ENDHDR; 1067 while (pos < limit) { 1068 if (CENSIG(cen, pos) != CENSIG) 1069 zerror("invalid CEN header (bad signature)"); 1070 int method = CENHOW(cen, pos); 1071 int nlen = CENNAM(cen, pos); 1072 int elen = CENEXT(cen, pos); 1073 int clen = CENCOM(cen, pos); 1074 if ((CENFLG(cen, pos) & 1) != 0) 1075 zerror("invalid CEN header (encrypted entry)"); 1076 if (method != METHOD_STORED && method != METHOD_DEFLATED) 1077 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1078 if (pos + CENHDR + nlen > limit) 1079 zerror("invalid CEN header (bad header size)"); 1080 byte[] name = Arrays.copyOfRange(cen, pos + CENHDR, pos + CENHDR + nlen); 1081 IndexNode inode = new IndexNode(name, pos); 1082 inodes.put(inode, inode); 1083 // skip ext and comment 1084 pos += (CENHDR + nlen + elen + clen); 1085 } 1086 if (pos + ENDHDR != cen.length) { 1087 zerror("invalid CEN header (bad header size)"); 1088 } 1089 buildNodeTree(); 1090 return cen; 1091 } 1092 1093 private void ensureOpen() throws IOException { 1094 if (!isOpen) 1095 throw new ClosedFileSystemException(); 1096 } 1097 1098 // Creates a new empty temporary file in the same directory as the 1099 // specified file. A variant of Files.createTempFile. 1100 private Path createTempFileInSameDirectoryAs(Path path) 1101 throws IOException 1102 { 1103 Path parent = path.toAbsolutePath().getParent(); 1104 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1105 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1106 tmppaths.add(tmpPath); 1107 return tmpPath; 1108 } 1109 1110 ////////////////////update & sync ////////////////////////////////////// 1111 1112 private boolean hasUpdate = false; 1113 1114 // shared key. consumer guarantees the "writeLock" before use it. 1115 private final IndexNode LOOKUPKEY = IndexNode.keyOf(null); 1116 1117 private void updateDelete(IndexNode inode) { 1118 beginWrite(); 1119 try { 1120 removeFromTree(inode); 1121 inodes.remove(inode); 1122 hasUpdate = true; 1123 } finally { 1124 endWrite(); 1125 } 1126 } 1127 1128 private void update(Entry e) { 1129 beginWrite(); 1130 try { 1131 IndexNode old = inodes.put(e, e); 1132 if (old != null) { 1133 removeFromTree(old); 1134 } 1135 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1136 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1137 e.sibling = parent.child; 1138 parent.child = e; 1139 } 1140 hasUpdate = true; 1141 } finally { 1142 endWrite(); 1143 } 1144 } 1145 1146 // copy over the whole LOC entry (header if necessary, data and ext) from 1147 // old zip to the new one. 1148 private long copyLOCEntry(Entry e, boolean updateHeader, 1149 OutputStream os, 1150 long written, byte[] buf) 1151 throws IOException 1152 { 1153 long locoff = e.locoff; // where to read 1154 e.locoff = written; // update the e.locoff with new value 1155 1156 // calculate the size need to write out 1157 long size = 0; 1158 // if there is A ext 1159 if ((e.flag & FLAG_DATADESCR) != 0) { 1160 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1161 size = 24; 1162 else 1163 size = 16; 1164 } 1165 // read loc, use the original loc.elen/nlen 1166 if (readFullyAt(buf, 0, LOCHDR , locoff) != LOCHDR) 1167 throw new ZipException("loc: reading failed"); 1168 if (updateHeader) { 1169 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1170 size += e.csize; 1171 written = e.writeLOC(os) + size; 1172 } else { 1173 os.write(buf, 0, LOCHDR); // write out the loc header 1174 locoff += LOCHDR; 1175 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1176 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1177 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1178 written = LOCHDR + size; 1179 } 1180 int n; 1181 while (size > 0 && 1182 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1183 { 1184 if (size < n) 1185 n = (int)size; 1186 os.write(buf, 0, n); 1187 size -= n; 1188 locoff += n; 1189 } 1190 return written; 1191 } 1192 1193 // sync the zip file system, if there is any udpate 1194 private void sync() throws IOException { 1195 //System.out.printf("->sync(%s) starting....!%n", toString()); 1196 // check ex-closer 1197 if (!exChClosers.isEmpty()) { 1198 for (ExChannelCloser ecc : exChClosers) { 1199 if (ecc.streams.isEmpty()) { 1200 ecc.ch.close(); 1201 Files.delete(ecc.path); 1202 exChClosers.remove(ecc); 1203 } 1204 } 1205 } 1206 if (!hasUpdate) 1207 return; 1208 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1209 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) 1210 { 1211 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1212 long written = 0; 1213 byte[] buf = new byte[8192]; 1214 Entry e = null; 1215 1216 // write loc 1217 for (IndexNode inode : inodes.values()) { 1218 if (inode instanceof Entry) { // an updated inode 1219 e = (Entry)inode; 1220 try { 1221 if (e.type == Entry.COPY) { 1222 // entry copy: the only thing changed is the "name" 1223 // and "nlen" in LOC header, so we udpate/rewrite the 1224 // LOC in new file and simply copy the rest (data and 1225 // ext) without enflating/deflating from the old zip 1226 // file LOC entry. 1227 written += copyLOCEntry(e, true, os, written, buf); 1228 } else { // NEW, FILECH or CEN 1229 e.locoff = written; 1230 written += e.writeLOC(os); // write loc header 1231 if (e.bytes != null) { // in-memory, deflated 1232 os.write(e.bytes); // already 1233 written += e.bytes.length; 1234 } else if (e.file != null) { // tmp file 1235 try (InputStream is = Files.newInputStream(e.file)) { 1236 int n; 1237 if (e.type == Entry.NEW) { // deflated already 1238 while ((n = is.read(buf)) != -1) { 1239 os.write(buf, 0, n); 1240 written += n; 1241 } 1242 } else if (e.type == Entry.FILECH) { 1243 // the data are not deflated, use ZEOS 1244 try (OutputStream os2 = new EntryOutputStream(e, os)) { 1245 while ((n = is.read(buf)) != -1) { 1246 os2.write(buf, 0, n); 1247 } 1248 } 1249 written += e.csize; 1250 if ((e.flag & FLAG_DATADESCR) != 0) 1251 written += e.writeEXT(os); 1252 } 1253 } 1254 Files.delete(e.file); 1255 tmppaths.remove(e.file); 1256 } else { 1257 // dir, 0-length data 1258 } 1259 } 1260 elist.add(e); 1261 } catch (IOException x) { 1262 x.printStackTrace(); // skip any in-accurate entry 1263 } 1264 } else { // unchanged inode 1265 if (inode.pos == -1) { 1266 continue; // pseudo directory node 1267 } 1268 e = Entry.readCEN(this, inode.pos); 1269 try { 1270 written += copyLOCEntry(e, false, os, written, buf); 1271 elist.add(e); 1272 } catch (IOException x) { 1273 x.printStackTrace(); // skip any wrong entry 1274 } 1275 } 1276 } 1277 1278 // now write back the cen and end table 1279 end.cenoff = written; 1280 for (Entry entry : elist) { 1281 written += entry.writeCEN(os); 1282 } 1283 end.centot = elist.size(); 1284 end.cenlen = written - end.cenoff; 1285 end.write(os, written); 1286 } 1287 if (!streams.isEmpty()) { 1288 // 1289 // TBD: ExChannelCloser should not be necessary if we only 1290 // sync when being closed, all streams should have been 1291 // closed already. Keep the logic here for now. 1292 // 1293 // There are outstanding input streams open on existing "ch", 1294 // so, don't close the "cha" and delete the "file for now, let 1295 // the "ex-channel-closer" to handle them 1296 ExChannelCloser ecc = new ExChannelCloser( 1297 createTempFileInSameDirectoryAs(zfpath), 1298 ch, 1299 streams); 1300 Files.move(zfpath, ecc.path, REPLACE_EXISTING); 1301 exChClosers.add(ecc); 1302 streams = Collections.synchronizedSet(new HashSet<InputStream>()); 1303 } else { 1304 ch.close(); 1305 Files.delete(zfpath); 1306 } 1307 1308 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1309 hasUpdate = false; // clear 1310 /* 1311 if (isOpen) { 1312 ch = zfpath.newByteChannel(READ); // re-fresh "ch" and "cen" 1313 cen = initCEN(); 1314 } 1315 */ 1316 //System.out.printf("->sync(%s) done!%n", toString()); 1317 } 1318 1319 private IndexNode getInode(byte[] path) { 1320 if (path == null) 1321 throw new NullPointerException("path"); 1322 IndexNode key = IndexNode.keyOf(path); 1323 IndexNode inode = inodes.get(key); 1324 if (inode == null && 1325 (path.length == 0 || path[path.length -1] != '/')) { 1326 // if does not ends with a slash 1327 path = Arrays.copyOf(path, path.length + 1); 1328 path[path.length - 1] = '/'; 1329 inode = inodes.get(key.as(path)); 1330 } 1331 return inode; 1332 } 1333 1334 private Entry getEntry0(byte[] path) throws IOException { 1335 IndexNode inode = getInode(path); 1336 if (inode instanceof Entry) 1337 return (Entry)inode; 1338 if (inode == null || inode.pos == -1) 1339 return null; 1340 return Entry.readCEN(this, inode.pos); 1341 } 1342 1343 public void deleteFile(byte[] path, boolean failIfNotExists) 1344 throws IOException 1345 { 1346 checkWritable(); 1347 1348 IndexNode inode = getInode(path); 1349 if (inode == null) { 1350 if (path != null && path.length == 0) 1351 throw new ZipException("root directory </> can't not be delete"); 1352 if (failIfNotExists) 1353 throw new NoSuchFileException(getString(path)); 1354 } else { 1355 if (inode.isDir() && inode.child != null) 1356 throw new DirectoryNotEmptyException(getString(path)); 1357 updateDelete(inode); 1358 } 1359 } 1360 1361 private static void copyStream(InputStream is, OutputStream os) 1362 throws IOException 1363 { 1364 byte[] copyBuf = new byte[8192]; 1365 int n; 1366 while ((n = is.read(copyBuf)) != -1) { 1367 os.write(copyBuf, 0, n); 1368 } 1369 } 1370 1371 // Returns an out stream for either 1372 // (1) writing the contents of a new entry, if the entry exits, or 1373 // (2) updating/replacing the contents of the specified existing entry. 1374 private OutputStream getOutputStream(Entry e) throws IOException { 1375 1376 if (e.mtime == -1) 1377 e.mtime = System.currentTimeMillis(); 1378 if (e.method == -1) 1379 e.method = METHOD_DEFLATED; // TBD: use default method 1380 // store size, compressed size, and crc-32 in LOC header 1381 e.flag = 0; 1382 if (zc.isUTF8()) 1383 e.flag |= FLAG_EFS; 1384 OutputStream os; 1385 if (useTempFile) { 1386 e.file = getTempPathForEntry(null); 1387 os = Files.newOutputStream(e.file, WRITE); 1388 } else { 1389 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1390 } 1391 return new EntryOutputStream(e, os); 1392 } 1393 1394 private InputStream getInputStream(Entry e) 1395 throws IOException 1396 { 1397 InputStream eis = null; 1398 1399 if (e.type == Entry.NEW) { 1400 if (e.bytes != null) 1401 eis = new ByteArrayInputStream(e.bytes); 1402 else if (e.file != null) 1403 eis = Files.newInputStream(e.file); 1404 else 1405 throw new ZipException("update entry data is missing"); 1406 } else if (e.type == Entry.FILECH) { 1407 // FILECH result is un-compressed. 1408 eis = Files.newInputStream(e.file); 1409 // TBD: wrap to hook close() 1410 // streams.add(eis); 1411 return eis; 1412 } else { // untouced CEN or COPY 1413 eis = new EntryInputStream(e, ch); 1414 } 1415 if (e.method == METHOD_DEFLATED) { 1416 // MORE: Compute good size for inflater stream: 1417 long bufSize = e.size + 2; // Inflater likes a bit of slack 1418 if (bufSize > 65536) 1419 bufSize = 8192; 1420 final long size = e.size; 1421 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1422 1423 private boolean isClosed = false; 1424 public void close() throws IOException { 1425 if (!isClosed) { 1426 releaseInflater(inf); 1427 this.in.close(); 1428 isClosed = true; 1429 streams.remove(this); 1430 } 1431 } 1432 // Override fill() method to provide an extra "dummy" byte 1433 // at the end of the input stream. This is required when 1434 // using the "nowrap" Inflater option. (it appears the new 1435 // zlib in 7 does not need it, but keep it for now) 1436 protected void fill() throws IOException { 1437 if (eof) { 1438 throw new EOFException( 1439 "Unexpected end of ZLIB input stream"); 1440 } 1441 len = this.in.read(buf, 0, buf.length); 1442 if (len == -1) { 1443 buf[0] = 0; 1444 len = 1; 1445 eof = true; 1446 } 1447 inf.setInput(buf, 0, len); 1448 } 1449 private boolean eof; 1450 1451 public int available() throws IOException { 1452 if (isClosed) 1453 return 0; 1454 long avail = size - inf.getBytesWritten(); 1455 return avail > (long) Integer.MAX_VALUE ? 1456 Integer.MAX_VALUE : (int) avail; 1457 } 1458 }; 1459 } else if (e.method == METHOD_STORED) { 1460 // TBD: wrap/ it does not seem necessary 1461 } else { 1462 throw new ZipException("invalid compression method"); 1463 } 1464 streams.add(eis); 1465 return eis; 1466 } 1467 1468 // Inner class implementing the input stream used to read 1469 // a (possibly compressed) zip file entry. 1470 private class EntryInputStream extends InputStream { 1471 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1472 // point to a new channel after sync() 1473 private long pos; // current position within entry data 1474 protected long rem; // number of remaining bytes within entry 1475 protected final long size; // uncompressed size of this entry 1476 1477 EntryInputStream(Entry e, SeekableByteChannel zfch) 1478 throws IOException 1479 { 1480 this.zfch = zfch; 1481 rem = e.csize; 1482 size = e.size; 1483 pos = getDataPos(e); 1484 } 1485 public int read(byte b[], int off, int len) throws IOException { 1486 ensureOpen(); 1487 if (rem == 0) { 1488 return -1; 1489 } 1490 if (len <= 0) { 1491 return 0; 1492 } 1493 if (len > rem) { 1494 len = (int) rem; 1495 } 1496 // readFullyAt() 1497 long n = 0; 1498 ByteBuffer bb = ByteBuffer.wrap(b); 1499 bb.position(off); 1500 bb.limit(off + len); 1501 synchronized(zfch) { 1502 n = zfch.position(pos).read(bb); 1503 } 1504 if (n > 0) { 1505 pos += n; 1506 rem -= n; 1507 } 1508 if (rem == 0) { 1509 close(); 1510 } 1511 return (int)n; 1512 } 1513 public int read() throws IOException { 1514 byte[] b = new byte[1]; 1515 if (read(b, 0, 1) == 1) { 1516 return b[0] & 0xff; 1517 } else { 1518 return -1; 1519 } 1520 } 1521 public long skip(long n) throws IOException { 1522 ensureOpen(); 1523 if (n > rem) 1524 n = rem; 1525 pos += n; 1526 rem -= n; 1527 if (rem == 0) { 1528 close(); 1529 } 1530 return n; 1531 } 1532 public int available() { 1533 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1534 } 1535 public long size() { 1536 return size; 1537 } 1538 public void close() { 1539 rem = 0; 1540 streams.remove(this); 1541 } 1542 } 1543 1544 class EntryOutputStream extends DeflaterOutputStream 1545 { 1546 private CRC32 crc; 1547 private Entry e; 1548 private long written; 1549 1550 EntryOutputStream(Entry e, OutputStream os) 1551 throws IOException 1552 { 1553 super(os, getDeflater()); 1554 if (e == null) 1555 throw new NullPointerException("Zip entry is null"); 1556 this.e = e; 1557 crc = new CRC32(); 1558 } 1559 1560 @Override 1561 public void write(byte b[], int off, int len) throws IOException { 1562 if (e.type != Entry.FILECH) // only from sync 1563 ensureOpen(); 1564 if (off < 0 || len < 0 || off > b.length - len) { 1565 throw new IndexOutOfBoundsException(); 1566 } else if (len == 0) { 1567 return; 1568 } 1569 switch (e.method) { 1570 case METHOD_DEFLATED: 1571 super.write(b, off, len); 1572 break; 1573 case METHOD_STORED: 1574 written += len; 1575 out.write(b, off, len); 1576 break; 1577 default: 1578 throw new ZipException("invalid compression method"); 1579 } 1580 crc.update(b, off, len); 1581 } 1582 1583 @Override 1584 public void close() throws IOException { 1585 // TBD ensureOpen(); 1586 switch (e.method) { 1587 case METHOD_DEFLATED: 1588 finish(); 1589 e.size = def.getBytesRead(); 1590 e.csize = def.getBytesWritten(); 1591 e.crc = crc.getValue(); 1592 break; 1593 case METHOD_STORED: 1594 // we already know that both e.size and e.csize are the same 1595 e.size = e.csize = written; 1596 e.crc = crc.getValue(); 1597 break; 1598 default: 1599 throw new ZipException("invalid compression method"); 1600 } 1601 //crc.reset(); 1602 if (out instanceof ByteArrayOutputStream) 1603 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1604 1605 if (e.type == Entry.FILECH) { 1606 releaseDeflater(def); 1607 return; 1608 } 1609 super.close(); 1610 releaseDeflater(def); 1611 update(e); 1612 } 1613 } 1614 1615 static void zerror(String msg) { 1616 throw new ZipError(msg); 1617 } 1618 1619 // Maxmum number of de/inflater we cache 1620 private final int MAX_FLATER = 20; 1621 // List of available Inflater objects for decompression 1622 private final List<Inflater> inflaters = new ArrayList<>(); 1623 1624 // Gets an inflater from the list of available inflaters or allocates 1625 // a new one. 1626 private Inflater getInflater() { 1627 synchronized (inflaters) { 1628 int size = inflaters.size(); 1629 if (size > 0) { 1630 Inflater inf = inflaters.remove(size - 1); 1631 return inf; 1632 } else { 1633 return new Inflater(true); 1634 } 1635 } 1636 } 1637 1638 // Releases the specified inflater to the list of available inflaters. 1639 private void releaseInflater(Inflater inf) { 1640 synchronized (inflaters) { 1641 if (inflaters.size() < MAX_FLATER) { 1642 inf.reset(); 1643 inflaters.add(inf); 1644 } else { 1645 inf.end(); 1646 } 1647 } 1648 } 1649 1650 // List of available Deflater objects for compression 1651 private final List<Deflater> deflaters = new ArrayList<>(); 1652 1653 // Gets an deflater from the list of available deflaters or allocates 1654 // a new one. 1655 private Deflater getDeflater() { 1656 synchronized (deflaters) { 1657 int size = deflaters.size(); 1658 if (size > 0) { 1659 Deflater def = deflaters.remove(size - 1); 1660 return def; 1661 } else { 1662 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1663 } 1664 } 1665 } 1666 1667 // Releases the specified inflater to the list of available inflaters. 1668 private void releaseDeflater(Deflater def) { 1669 synchronized (deflaters) { 1670 if (inflaters.size() < MAX_FLATER) { 1671 def.reset(); 1672 deflaters.add(def); 1673 } else { 1674 def.end(); 1675 } 1676 } 1677 } 1678 1679 // End of central directory record 1680 static class END { 1681 int disknum; 1682 int sdisknum; 1683 int endsub; // endsub 1684 int centot; // 4 bytes 1685 long cenlen; // 4 bytes 1686 long cenoff; // 4 bytes 1687 int comlen; // comment length 1688 byte[] comment; 1689 1690 /* members of Zip64 end of central directory locator */ 1691 int diskNum; 1692 long endpos; 1693 int disktot; 1694 1695 void write(OutputStream os, long offset) throws IOException { 1696 boolean hasZip64 = false; 1697 long xlen = cenlen; 1698 long xoff = cenoff; 1699 if (xlen >= ZIP64_MINVAL) { 1700 xlen = ZIP64_MINVAL; 1701 hasZip64 = true; 1702 } 1703 if (xoff >= ZIP64_MINVAL) { 1704 xoff = ZIP64_MINVAL; 1705 hasZip64 = true; 1706 } 1707 int count = centot; 1708 if (count >= ZIP64_MINVAL32) { 1709 count = ZIP64_MINVAL32; 1710 hasZip64 = true; 1711 } 1712 if (hasZip64) { 1713 long off64 = offset; 1714 //zip64 end of central directory record 1715 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1716 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1717 writeShort(os, 45); // version made by 1718 writeShort(os, 45); // version needed to extract 1719 writeInt(os, 0); // number of this disk 1720 writeInt(os, 0); // central directory start disk 1721 writeLong(os, centot); // number of directory entires on disk 1722 writeLong(os, centot); // number of directory entires 1723 writeLong(os, cenlen); // length of central directory 1724 writeLong(os, cenoff); // offset of central directory 1725 1726 //zip64 end of central directory locator 1727 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1728 writeInt(os, 0); // zip64 END start disk 1729 writeLong(os, off64); // offset of zip64 END 1730 writeInt(os, 1); // total number of disks (?) 1731 } 1732 writeInt(os, ENDSIG); // END record signature 1733 writeShort(os, 0); // number of this disk 1734 writeShort(os, 0); // central directory start disk 1735 writeShort(os, count); // number of directory entries on disk 1736 writeShort(os, count); // total number of directory entries 1737 writeInt(os, xlen); // length of central directory 1738 writeInt(os, xoff); // offset of central directory 1739 if (comment != null) { // zip file comment 1740 writeShort(os, comment.length); 1741 writeBytes(os, comment); 1742 } else { 1743 writeShort(os, 0); 1744 } 1745 } 1746 } 1747 1748 // Internal node that links a "name" to its pos in cen table. 1749 // The node itself can be used as a "key" to lookup itself in 1750 // the HashMap inodes. 1751 static class IndexNode { 1752 byte[] name; 1753 int hashcode; // node is hashable/hashed by its name 1754 int pos = -1; // position in cen table, -1 menas the 1755 // entry does not exists in zip file 1756 IndexNode(byte[] name, int pos) { 1757 name(name); 1758 this.pos = pos; 1759 } 1760 1761 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1762 return new IndexNode(name, -1); 1763 } 1764 1765 final void name(byte[] name) { 1766 this.name = name; 1767 this.hashcode = Arrays.hashCode(name); 1768 } 1769 1770 final IndexNode as(byte[] name) { // reuse the node, mostly 1771 name(name); // as a lookup "key" 1772 return this; 1773 } 1774 1775 boolean isDir() { 1776 return name != null && 1777 (name.length == 0 || name[name.length - 1] == '/'); 1778 } 1779 1780 public boolean equals(Object other) { 1781 if (!(other instanceof IndexNode)) { 1782 return false; 1783 } 1784 return Arrays.equals(name, ((IndexNode)other).name); 1785 } 1786 1787 public int hashCode() { 1788 return hashcode; 1789 } 1790 1791 IndexNode() {} 1792 IndexNode sibling; 1793 IndexNode child; // 1st child 1794 } 1795 1796 static class Entry extends IndexNode { 1797 1798 static final int CEN = 1; // entry read from cen 1799 static final int NEW = 2; // updated contents in bytes or file 1800 static final int FILECH = 3; // fch update in "file" 1801 static final int COPY = 4; // copy of a CEN entry 1802 1803 1804 byte[] bytes; // updated content bytes 1805 Path file; // use tmp file to store bytes; 1806 int type = CEN; // default is the entry read from cen 1807 1808 // entry attributes 1809 int version; 1810 int flag; 1811 int method = -1; // compression method 1812 long mtime = -1; // last modification time (in DOS time) 1813 long atime = -1; // last access time 1814 long ctime = -1; // create time 1815 long crc = -1; // crc-32 of entry data 1816 long csize = -1; // compressed size of entry data 1817 long size = -1; // uncompressed size of entry data 1818 byte[] extra; 1819 1820 // cen 1821 int versionMade; 1822 int disk; 1823 int attrs; 1824 long attrsEx; 1825 long locoff; 1826 byte[] comment; 1827 1828 Entry() {} 1829 1830 Entry(byte[] name) { 1831 name(name); 1832 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 1833 this.crc = 0; 1834 this.size = 0; 1835 this.csize = 0; 1836 this.method = METHOD_DEFLATED; 1837 } 1838 1839 Entry(byte[] name, int type) { 1840 this(name); 1841 this.type = type; 1842 } 1843 1844 Entry (Entry e, int type) { 1845 name(e.name); 1846 this.version = e.version; 1847 this.ctime = e.ctime; 1848 this.atime = e.atime; 1849 this.mtime = e.mtime; 1850 this.crc = e.crc; 1851 this.size = e.size; 1852 this.csize = e.csize; 1853 this.method = e.method; 1854 this.extra = e.extra; 1855 this.versionMade = e.versionMade; 1856 this.disk = e.disk; 1857 this.attrs = e.attrs; 1858 this.attrsEx = e.attrsEx; 1859 this.locoff = e.locoff; 1860 this.comment = e.comment; 1861 this.type = type; 1862 } 1863 1864 Entry (byte[] name, Path file, int type) { 1865 this(name, type); 1866 this.file = file; 1867 this.method = METHOD_STORED; 1868 } 1869 1870 int version() throws ZipException { 1871 if (method == METHOD_DEFLATED) 1872 return 20; 1873 else if (method == METHOD_STORED) 1874 return 10; 1875 throw new ZipException("unsupported compression method"); 1876 } 1877 1878 ///////////////////// CEN ////////////////////// 1879 static Entry readCEN(ZipFileSystem zipfs, int pos) 1880 throws IOException 1881 { 1882 return new Entry().cen(zipfs, pos); 1883 } 1884 1885 private Entry cen(ZipFileSystem zipfs, int pos) 1886 throws IOException 1887 { 1888 byte[] cen = zipfs.cen; 1889 if (CENSIG(cen, pos) != CENSIG) 1890 zerror("invalid CEN header (bad signature)"); 1891 versionMade = CENVEM(cen, pos); 1892 version = CENVER(cen, pos); 1893 flag = CENFLG(cen, pos); 1894 method = CENHOW(cen, pos); 1895 mtime = dosToJavaTime(CENTIM(cen, pos)); 1896 crc = CENCRC(cen, pos); 1897 csize = CENSIZ(cen, pos); 1898 size = CENLEN(cen, pos); 1899 int nlen = CENNAM(cen, pos); 1900 int elen = CENEXT(cen, pos); 1901 int clen = CENCOM(cen, pos); 1902 disk = CENDSK(cen, pos); 1903 attrs = CENATT(cen, pos); 1904 attrsEx = CENATX(cen, pos); 1905 locoff = CENOFF(cen, pos); 1906 1907 pos += CENHDR; 1908 name(Arrays.copyOfRange(cen, pos, pos + nlen)); 1909 1910 pos += nlen; 1911 if (elen > 0) { 1912 extra = Arrays.copyOfRange(cen, pos, pos + elen); 1913 pos += elen; 1914 readExtra(zipfs); 1915 } 1916 if (clen > 0) { 1917 comment = Arrays.copyOfRange(cen, pos, pos + clen); 1918 } 1919 return this; 1920 } 1921 1922 int writeCEN(OutputStream os) throws IOException 1923 { 1924 int written = CENHDR; 1925 int version0 = version(); 1926 long csize0 = csize; 1927 long size0 = size; 1928 long locoff0 = locoff; 1929 int elen64 = 0; // extra for ZIP64 1930 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 1931 int elenEXTT = 0; // extra for Extended Timestamp 1932 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 1933 1934 // confirm size/length 1935 int nlen = (name != null) ? name.length : 0; 1936 int elen = (extra != null) ? extra.length : 0; 1937 int eoff = 0; 1938 int clen = (comment != null) ? comment.length : 0; 1939 if (csize >= ZIP64_MINVAL) { 1940 csize0 = ZIP64_MINVAL; 1941 elen64 += 8; // csize(8) 1942 } 1943 if (size >= ZIP64_MINVAL) { 1944 size0 = ZIP64_MINVAL; // size(8) 1945 elen64 += 8; 1946 } 1947 if (locoff >= ZIP64_MINVAL) { 1948 locoff0 = ZIP64_MINVAL; 1949 elen64 += 8; // offset(8) 1950 } 1951 if (elen64 != 0) { 1952 elen64 += 4; // header and data sz 4 bytes 1953 } 1954 while (eoff + 4 < elen) { 1955 int tag = SH(extra, eoff); 1956 int sz = SH(extra, eoff + 2); 1957 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 1958 foundExtraTime = true; 1959 } 1960 eoff += (4 + sz); 1961 } 1962 if (!foundExtraTime) { 1963 if (isWindows) { // use NTFS 1964 elenNTFS = 36; // total 36 bytes 1965 } else { // Extended Timestamp otherwise 1966 elenEXTT = 9; // only mtime in cen 1967 } 1968 } 1969 writeInt(os, CENSIG); // CEN header signature 1970 if (elen64 != 0) { 1971 writeShort(os, 45); // ver 4.5 for zip64 1972 writeShort(os, 45); 1973 } else { 1974 writeShort(os, version0); // version made by 1975 writeShort(os, version0); // version needed to extract 1976 } 1977 writeShort(os, flag); // general purpose bit flag 1978 writeShort(os, method); // compression method 1979 // last modification time 1980 writeInt(os, (int)javaToDosTime(mtime)); 1981 writeInt(os, crc); // crc-32 1982 writeInt(os, csize0); // compressed size 1983 writeInt(os, size0); // uncompressed size 1984 writeShort(os, name.length); 1985 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 1986 1987 if (comment != null) { 1988 writeShort(os, Math.min(clen, 0xffff)); 1989 } else { 1990 writeShort(os, 0); 1991 } 1992 writeShort(os, 0); // starting disk number 1993 writeShort(os, 0); // internal file attributes (unused) 1994 writeInt(os, 0); // external file attributes (unused) 1995 writeInt(os, locoff0); // relative offset of local header 1996 writeBytes(os, name); 1997 if (elen64 != 0) { 1998 writeShort(os, EXTID_ZIP64);// Zip64 extra 1999 writeShort(os, elen64 - 4); // size of "this" extra block 2000 if (size0 == ZIP64_MINVAL) 2001 writeLong(os, size); 2002 if (csize0 == ZIP64_MINVAL) 2003 writeLong(os, csize); 2004 if (locoff0 == ZIP64_MINVAL) 2005 writeLong(os, locoff); 2006 } 2007 if (elenNTFS != 0) { 2008 writeShort(os, EXTID_NTFS); 2009 writeShort(os, elenNTFS - 4); 2010 writeInt(os, 0); // reserved 2011 writeShort(os, 0x0001); // NTFS attr tag 2012 writeShort(os, 24); 2013 writeLong(os, javaToWinTime(mtime)); 2014 writeLong(os, javaToWinTime(atime)); 2015 writeLong(os, javaToWinTime(ctime)); 2016 } 2017 if (elenEXTT != 0) { 2018 writeShort(os, EXTID_EXTT); 2019 writeShort(os, elenEXTT - 4); 2020 if (ctime == -1) 2021 os.write(0x3); // mtime and atime 2022 else 2023 os.write(0x7); // mtime, atime and ctime 2024 writeInt(os, javaToUnixTime(mtime)); 2025 } 2026 if (extra != null) // whatever not recognized 2027 writeBytes(os, extra); 2028 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2029 writeBytes(os, comment); 2030 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2031 } 2032 2033 ///////////////////// LOC ////////////////////// 2034 static Entry readLOC(ZipFileSystem zipfs, long pos) 2035 throws IOException 2036 { 2037 return readLOC(zipfs, pos, new byte[1024]); 2038 } 2039 2040 static Entry readLOC(ZipFileSystem zipfs, long pos, byte[] buf) 2041 throws IOException 2042 { 2043 return new Entry().loc(zipfs, pos, buf); 2044 } 2045 2046 Entry loc(ZipFileSystem zipfs, long pos, byte[] buf) 2047 throws IOException 2048 { 2049 assert (buf.length >= LOCHDR); 2050 if (zipfs.readFullyAt(buf, 0, LOCHDR , pos) != LOCHDR) 2051 throw new ZipException("loc: reading failed"); 2052 if (LOCSIG(buf) != LOCSIG) 2053 throw new ZipException("loc: wrong sig ->" 2054 + Long.toString(LOCSIG(buf), 16)); 2055 //startPos = pos; 2056 version = LOCVER(buf); 2057 flag = LOCFLG(buf); 2058 method = LOCHOW(buf); 2059 mtime = dosToJavaTime(LOCTIM(buf)); 2060 crc = LOCCRC(buf); 2061 csize = LOCSIZ(buf); 2062 size = LOCLEN(buf); 2063 int nlen = LOCNAM(buf); 2064 int elen = LOCEXT(buf); 2065 2066 name = new byte[nlen]; 2067 if (zipfs.readFullyAt(name, 0, nlen, pos + LOCHDR) != nlen) { 2068 throw new ZipException("loc: name reading failed"); 2069 } 2070 if (elen > 0) { 2071 extra = new byte[elen]; 2072 if (zipfs.readFullyAt(extra, 0, elen, pos + LOCHDR + nlen) 2073 != elen) { 2074 throw new ZipException("loc: ext reading failed"); 2075 } 2076 } 2077 pos += (LOCHDR + nlen + elen); 2078 if ((flag & FLAG_DATADESCR) != 0) { 2079 // Data Descriptor 2080 Entry e = zipfs.getEntry0(name); // get the size/csize from cen 2081 if (e == null) 2082 throw new ZipException("loc: name not found in cen"); 2083 size = e.size; 2084 csize = e.csize; 2085 pos += (method == METHOD_STORED ? size : csize); 2086 if (size >= ZIP64_MINVAL || csize >= ZIP64_MINVAL) 2087 pos += 24; 2088 else 2089 pos += 16; 2090 } else { 2091 if (extra != null && 2092 (size == ZIP64_MINVAL || csize == ZIP64_MINVAL)) { 2093 // zip64 ext: must include both size and csize 2094 int off = 0; 2095 while (off + 20 < elen) { // HeaderID+DataSize+Data 2096 int sz = SH(extra, off + 2); 2097 if (SH(extra, off) == EXTID_ZIP64 && sz == 16) { 2098 size = LL(extra, off + 4); 2099 csize = LL(extra, off + 12); 2100 break; 2101 } 2102 off += (sz + 4); 2103 } 2104 } 2105 pos += (method == METHOD_STORED ? size : csize); 2106 } 2107 return this; 2108 } 2109 2110 int writeLOC(OutputStream os) 2111 throws IOException 2112 { 2113 writeInt(os, LOCSIG); // LOC header signature 2114 int version = version(); 2115 int nlen = (name != null) ? name.length : 0; 2116 int elen = (extra != null) ? extra.length : 0; 2117 boolean foundExtraTime = false; // if extra timestamp present 2118 int eoff = 0; 2119 int elen64 = 0; 2120 int elenEXTT = 0; 2121 int elenNTFS = 0; 2122 if ((flag & FLAG_DATADESCR) != 0) { 2123 writeShort(os, version()); // version needed to extract 2124 writeShort(os, flag); // general purpose bit flag 2125 writeShort(os, method); // compression method 2126 // last modification time 2127 writeInt(os, (int)javaToDosTime(mtime)); 2128 // store size, uncompressed size, and crc-32 in data descriptor 2129 // immediately following compressed entry data 2130 writeInt(os, 0); 2131 writeInt(os, 0); 2132 writeInt(os, 0); 2133 } else { 2134 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2135 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2136 writeShort(os, 45); // ver 4.5 for zip64 2137 } else { 2138 writeShort(os, version()); // version needed to extract 2139 } 2140 writeShort(os, flag); // general purpose bit flag 2141 writeShort(os, method); // compression method 2142 // last modification time 2143 writeInt(os, (int)javaToDosTime(mtime)); 2144 writeInt(os, crc); // crc-32 2145 if (elen64 != 0) { 2146 writeInt(os, ZIP64_MINVAL); 2147 writeInt(os, ZIP64_MINVAL); 2148 } else { 2149 writeInt(os, csize); // compressed size 2150 writeInt(os, size); // uncompressed size 2151 } 2152 } 2153 while (eoff + 4 < elen) { 2154 int tag = SH(extra, eoff); 2155 int sz = SH(extra, eoff + 2); 2156 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2157 foundExtraTime = true; 2158 } 2159 eoff += (4 + sz); 2160 } 2161 if (!foundExtraTime) { 2162 if (isWindows) { 2163 elenNTFS = 36; // NTFS, total 36 bytes 2164 } else { // on unix use "ext time" 2165 elenEXTT = 9; 2166 if (atime != -1) 2167 elenEXTT += 4; 2168 if (ctime != -1) 2169 elenEXTT += 4; 2170 } 2171 } 2172 writeShort(os, name.length); 2173 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2174 writeBytes(os, name); 2175 if (elen64 != 0) { 2176 writeShort(os, EXTID_ZIP64); 2177 writeShort(os, 16); 2178 writeLong(os, size); 2179 writeLong(os, csize); 2180 } 2181 if (elenNTFS != 0) { 2182 writeShort(os, EXTID_NTFS); 2183 writeShort(os, elenNTFS - 4); 2184 writeInt(os, 0); // reserved 2185 writeShort(os, 0x0001); // NTFS attr tag 2186 writeShort(os, 24); 2187 writeLong(os, javaToWinTime(mtime)); 2188 writeLong(os, javaToWinTime(atime)); 2189 writeLong(os, javaToWinTime(ctime)); 2190 } 2191 if (elenEXTT != 0) { 2192 writeShort(os, EXTID_EXTT); 2193 writeShort(os, elenEXTT - 4);// size for the folowing data block 2194 int fbyte = 0x1; 2195 if (atime != -1) // mtime and atime 2196 fbyte |= 0x2; 2197 if (ctime != -1) // mtime, atime and ctime 2198 fbyte |= 0x4; 2199 os.write(fbyte); // flags byte 2200 writeInt(os, javaToUnixTime(mtime)); 2201 if (atime != -1) 2202 writeInt(os, javaToUnixTime(atime)); 2203 if (ctime != -1) 2204 writeInt(os, javaToUnixTime(ctime)); 2205 } 2206 if (extra != null) { 2207 writeBytes(os, extra); 2208 } 2209 return LOCHDR + name.length + elen + elen64 + elenNTFS + elenEXTT; 2210 } 2211 2212 // Data Descriptior 2213 int writeEXT(OutputStream os) 2214 throws IOException 2215 { 2216 writeInt(os, EXTSIG); // EXT header signature 2217 writeInt(os, crc); // crc-32 2218 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2219 writeLong(os, csize); 2220 writeLong(os, size); 2221 return 24; 2222 } else { 2223 writeInt(os, csize); // compressed size 2224 writeInt(os, size); // uncompressed size 2225 return 16; 2226 } 2227 } 2228 2229 // read NTFS, UNIX and ZIP64 data from cen.extra 2230 void readExtra(ZipFileSystem zipfs) throws IOException { 2231 if (extra == null) 2232 return; 2233 int elen = extra.length; 2234 int off = 0; 2235 int newOff = 0; 2236 while (off + 4 < elen) { 2237 // extra spec: HeaderID+DataSize+Data 2238 int pos = off; 2239 int tag = SH(extra, pos); 2240 int sz = SH(extra, pos + 2); 2241 pos += 4; 2242 if (pos + sz > elen) // invalid data 2243 break; 2244 switch (tag) { 2245 case EXTID_ZIP64 : 2246 if (size == ZIP64_MINVAL) { 2247 if (pos + 8 > elen) // invalid zip64 extra 2248 break; // fields, just skip 2249 size = LL(extra, pos); 2250 pos += 8; 2251 } 2252 if (csize == ZIP64_MINVAL) { 2253 if (pos + 8 > elen) 2254 break; 2255 csize = LL(extra, pos); 2256 pos += 8; 2257 } 2258 if (locoff == ZIP64_MINVAL) { 2259 if (pos + 8 > elen) 2260 break; 2261 locoff = LL(extra, pos); 2262 pos += 8; 2263 } 2264 break; 2265 case EXTID_NTFS: 2266 if (sz < 32) 2267 break; 2268 pos += 4; // reserved 4 bytes 2269 if (SH(extra, pos) != 0x0001) 2270 break; 2271 if (SH(extra, pos + 2) != 24) 2272 break; 2273 // override the loc field, datatime here is 2274 // more "accurate" 2275 mtime = winToJavaTime(LL(extra, pos + 4)); 2276 atime = winToJavaTime(LL(extra, pos + 12)); 2277 ctime = winToJavaTime(LL(extra, pos + 20)); 2278 break; 2279 case EXTID_EXTT: 2280 // spec says the Extened timestamp in cen only has mtime 2281 // need to read the loc to get the extra a/ctime 2282 byte[] buf = new byte[LOCHDR]; 2283 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2284 != buf.length) 2285 throw new ZipException("loc: reading failed"); 2286 if (LOCSIG(buf) != LOCSIG) 2287 throw new ZipException("loc: wrong sig ->" 2288 + Long.toString(LOCSIG(buf), 16)); 2289 2290 int locElen = LOCEXT(buf); 2291 if (locElen < 9) // EXTT is at lease 9 bytes 2292 break; 2293 int locNlen = LOCNAM(buf); 2294 buf = new byte[locElen]; 2295 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2296 != buf.length) 2297 throw new ZipException("loc extra: reading failed"); 2298 int locPos = 0; 2299 while (locPos + 4 < buf.length) { 2300 int locTag = SH(buf, locPos); 2301 int locSZ = SH(buf, locPos + 2); 2302 locPos += 4; 2303 if (locTag != EXTID_EXTT) { 2304 locPos += locSZ; 2305 continue; 2306 } 2307 int flag = CH(buf, locPos++); 2308 if ((flag & 0x1) != 0) { 2309 mtime = unixToJavaTime(LG(buf, locPos)); 2310 locPos += 4; 2311 } 2312 if ((flag & 0x2) != 0) { 2313 atime = unixToJavaTime(LG(buf, locPos)); 2314 locPos += 4; 2315 } 2316 if ((flag & 0x4) != 0) { 2317 ctime = unixToJavaTime(LG(buf, locPos)); 2318 locPos += 4; 2319 } 2320 break; 2321 } 2322 break; 2323 default: // unknown tag 2324 System.arraycopy(extra, off, extra, newOff, sz + 4); 2325 newOff += (sz + 4); 2326 } 2327 off += (sz + 4); 2328 } 2329 if (newOff != 0 && newOff != extra.length) 2330 extra = Arrays.copyOf(extra, newOff); 2331 else 2332 extra = null; 2333 } 2334 } 2335 2336 private static class ExChannelCloser { 2337 Path path; 2338 SeekableByteChannel ch; 2339 Set<InputStream> streams; 2340 ExChannelCloser(Path path, 2341 SeekableByteChannel ch, 2342 Set<InputStream> streams) 2343 { 2344 this.path = path; 2345 this.ch = ch; 2346 this.streams = streams; 2347 } 2348 } 2349 2350 // ZIP directory has two issues: 2351 // (1) ZIP spec does not require the ZIP file to include 2352 // directory entry 2353 // (2) all entries are not stored/organized in a "tree" 2354 // structure. 2355 // A possible solution is to build the node tree ourself as 2356 // implemented below. 2357 private IndexNode root; 2358 2359 private void addToTree(IndexNode inode, HashSet<IndexNode> dirs) { 2360 if (dirs.contains(inode)) { 2361 return; 2362 } 2363 IndexNode parent; 2364 byte[] name = inode.name; 2365 byte[] pname = getParent(name); 2366 if (inodes.containsKey(LOOKUPKEY.as(pname))) { 2367 parent = inodes.get(LOOKUPKEY); 2368 } else { // pseudo directory entry 2369 parent = new IndexNode(pname, -1); 2370 inodes.put(parent, parent); 2371 } 2372 addToTree(parent, dirs); 2373 inode.sibling = parent.child; 2374 parent.child = inode; 2375 if (name[name.length -1] == '/') 2376 dirs.add(inode); 2377 } 2378 2379 private void removeFromTree(IndexNode inode) { 2380 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2381 IndexNode child = parent.child; 2382 if (child.equals(inode)) { 2383 parent.child = child.sibling; 2384 } else { 2385 IndexNode last = child; 2386 while ((child = child.sibling) != null) { 2387 if (child.equals(inode)) { 2388 last.sibling = child.sibling; 2389 break; 2390 } else { 2391 last = child; 2392 } 2393 } 2394 } 2395 } 2396 2397 private void buildNodeTree() throws IOException { 2398 beginWrite(); 2399 try { 2400 HashSet<IndexNode> dirs = new HashSet<>(); 2401 IndexNode root = new IndexNode(ROOTPATH, -1); 2402 inodes.put(root, root); 2403 dirs.add(root); 2404 for (IndexNode node : inodes.keySet().toArray(new IndexNode[0])) { 2405 addToTree(node, dirs); 2406 } 2407 } finally { 2408 endWrite(); 2409 } 2410 } 2411 }