1 /* 2 * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.File; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.nio.ByteBuffer; 37 import java.nio.MappedByteBuffer; 38 import java.nio.channels.*; 39 import java.nio.file.*; 40 import java.nio.file.attribute.*; 41 import java.nio.file.spi.*; 42 import java.security.AccessController; 43 import java.security.PrivilegedAction; 44 import java.security.PrivilegedActionException; 45 import java.security.PrivilegedExceptionAction; 46 import java.util.*; 47 import java.util.concurrent.locks.ReadWriteLock; 48 import java.util.concurrent.locks.ReentrantReadWriteLock; 49 import java.util.regex.Pattern; 50 import java.util.zip.CRC32; 51 import java.util.zip.Inflater; 52 import java.util.zip.Deflater; 53 import java.util.zip.InflaterInputStream; 54 import java.util.zip.DeflaterOutputStream; 55 import java.util.zip.ZipException; 56 import java.util.zip.ZipError; 57 import static java.lang.Boolean.*; 58 import static jdk.nio.zipfs.ZipConstants.*; 59 import static jdk.nio.zipfs.ZipUtils.*; 60 import static java.nio.file.StandardOpenOption.*; 61 import static java.nio.file.StandardCopyOption.*; 62 63 /** 64 * A FileSystem built on a zip file 65 * 66 * @author Xueming Shen 67 */ 68 69 class ZipFileSystem extends FileSystem { 70 71 private final ZipFileSystemProvider provider; 72 private final ZipPath defaultdir; 73 private boolean readOnly = false; 74 private final Path zfpath; 75 private final ZipCoder zc; 76 77 // configurable by env map 78 private final String defaultDir; // default dir for the file system 79 private final String nameEncoding; // default encoding for name/comment 80 private final boolean useTempFile; // use a temp file for newOS, default 81 // is to use BAOS for better performance 82 private final boolean createNew; // create a new zip if not exists 83 private static final boolean isWindows = AccessController.doPrivileged( 84 (PrivilegedAction<Boolean>) () -> System.getProperty("os.name") 85 .startsWith("Windows")); 86 87 ZipFileSystem(ZipFileSystemProvider provider, 88 Path zfpath, 89 Map<String, ?> env) 90 throws IOException 91 { 92 // configurable env setup 93 this.createNew = "true".equals(env.get("create")); 94 this.nameEncoding = env.containsKey("encoding") ? 95 (String)env.get("encoding") : "UTF-8"; 96 this.useTempFile = TRUE.equals(env.get("useTempFile")); 97 this.defaultDir = env.containsKey("default.dir") ? 98 (String)env.get("default.dir") : "/"; 99 if (this.defaultDir.charAt(0) != '/') 100 throw new IllegalArgumentException("default dir should be absolute"); 101 102 this.provider = provider; 103 this.zfpath = zfpath; 104 if (Files.notExists(zfpath)) { 105 if (createNew) { 106 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 107 new END().write(os, 0); 108 } 109 } else { 110 throw new FileSystemNotFoundException(zfpath.toString()); 111 } 112 } 113 // sm and existence check 114 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 115 boolean writeable = AccessController.doPrivileged( 116 (PrivilegedAction<Boolean>) () -> Files.isWritable(zfpath)); 117 if (!writeable) 118 this.readOnly = true; 119 this.zc = ZipCoder.get(nameEncoding); 120 this.defaultdir = new ZipPath(this, getBytes(defaultDir)); 121 this.ch = Files.newByteChannel(zfpath, READ); 122 this.cen = initCEN(); 123 } 124 125 @Override 126 public FileSystemProvider provider() { 127 return provider; 128 } 129 130 @Override 131 public String getSeparator() { 132 return "/"; 133 } 134 135 @Override 136 public boolean isOpen() { 137 return isOpen; 138 } 139 140 @Override 141 public boolean isReadOnly() { 142 return readOnly; 143 } 144 145 private void checkWritable() throws IOException { 146 if (readOnly) 147 throw new ReadOnlyFileSystemException(); 148 } 149 150 @Override 151 public Iterable<Path> getRootDirectories() { 152 ArrayList<Path> pathArr = new ArrayList<>(); 153 pathArr.add(new ZipPath(this, new byte[]{'/'})); 154 return pathArr; 155 } 156 157 ZipPath getDefaultDir() { // package private 158 return defaultdir; 159 } 160 161 @Override 162 public ZipPath getPath(String first, String... more) { 163 String path; 164 if (more.length == 0) { 165 path = first; 166 } else { 167 StringBuilder sb = new StringBuilder(); 168 sb.append(first); 169 for (String segment: more) { 170 if (segment.length() > 0) { 171 if (sb.length() > 0) 172 sb.append('/'); 173 sb.append(segment); 174 } 175 } 176 path = sb.toString(); 177 } 178 return new ZipPath(this, getBytes(path)); 179 } 180 181 @Override 182 public UserPrincipalLookupService getUserPrincipalLookupService() { 183 throw new UnsupportedOperationException(); 184 } 185 186 @Override 187 public WatchService newWatchService() { 188 throw new UnsupportedOperationException(); 189 } 190 191 FileStore getFileStore(ZipPath path) { 192 return new ZipFileStore(path); 193 } 194 195 @Override 196 public Iterable<FileStore> getFileStores() { 197 ArrayList<FileStore> list = new ArrayList<>(1); 198 list.add(new ZipFileStore(new ZipPath(this, new byte[]{'/'}))); 199 return list; 200 } 201 202 private static final Set<String> supportedFileAttributeViews = 203 Collections.unmodifiableSet( 204 new HashSet<String>(Arrays.asList("basic", "zip"))); 205 206 @Override 207 public Set<String> supportedFileAttributeViews() { 208 return supportedFileAttributeViews; 209 } 210 211 @Override 212 public String toString() { 213 return zfpath.toString(); 214 } 215 216 Path getZipFile() { 217 return zfpath; 218 } 219 220 private static final String GLOB_SYNTAX = "glob"; 221 private static final String REGEX_SYNTAX = "regex"; 222 223 @Override 224 public PathMatcher getPathMatcher(String syntaxAndInput) { 225 int pos = syntaxAndInput.indexOf(':'); 226 if (pos <= 0 || pos == syntaxAndInput.length()) { 227 throw new IllegalArgumentException(); 228 } 229 String syntax = syntaxAndInput.substring(0, pos); 230 String input = syntaxAndInput.substring(pos + 1); 231 String expr; 232 if (syntax.equals(GLOB_SYNTAX)) { 233 expr = toRegexPattern(input); 234 } else { 235 if (syntax.equals(REGEX_SYNTAX)) { 236 expr = input; 237 } else { 238 throw new UnsupportedOperationException("Syntax '" + syntax + 239 "' not recognized"); 240 } 241 } 242 // return matcher 243 final Pattern pattern = Pattern.compile(expr); 244 return new PathMatcher() { 245 @Override 246 public boolean matches(Path path) { 247 return pattern.matcher(path.toString()).matches(); 248 } 249 }; 250 } 251 252 @Override 253 public void close() throws IOException { 254 beginWrite(); 255 try { 256 if (!isOpen) 257 return; 258 isOpen = false; // set closed 259 } finally { 260 endWrite(); 261 } 262 if (!streams.isEmpty()) { // unlock and close all remaining streams 263 Set<InputStream> copy = new HashSet<>(streams); 264 for (InputStream is: copy) 265 is.close(); 266 } 267 beginWrite(); // lock and sync 268 try { 269 AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> { 270 sync(); return null; 271 }); 272 ch.close(); // close the ch just in case no update 273 } catch (PrivilegedActionException e) { // and sync dose not close the ch 274 throw (IOException)e.getException(); 275 } finally { 276 endWrite(); 277 } 278 279 synchronized (inflaters) { 280 for (Inflater inf : inflaters) 281 inf.end(); 282 } 283 synchronized (deflaters) { 284 for (Deflater def : deflaters) 285 def.end(); 286 } 287 288 IOException ioe = null; 289 synchronized (tmppaths) { 290 for (Path p: tmppaths) { 291 try { 292 AccessController.doPrivileged( 293 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 294 } catch (PrivilegedActionException e) { 295 IOException x = (IOException)e.getException(); 296 if (ioe == null) 297 ioe = x; 298 else 299 ioe.addSuppressed(x); 300 } 301 } 302 } 303 provider.removeFileSystem(zfpath, this); 304 if (ioe != null) 305 throw ioe; 306 } 307 308 ZipFileAttributes getFileAttributes(byte[] path) 309 throws IOException 310 { 311 Entry e; 312 beginRead(); 313 try { 314 ensureOpen(); 315 e = getEntry0(path); 316 if (e == null) { 317 IndexNode inode = getInode(path); 318 if (inode == null) 319 return null; 320 e = new Entry(inode.name); // pseudo directory 321 e.method = METHOD_STORED; // STORED for dir 322 e.mtime = e.atime = e.ctime = -1;// -1 for all times 323 } 324 } finally { 325 endRead(); 326 } 327 return new ZipFileAttributes(e); 328 } 329 330 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 331 throws IOException 332 { 333 checkWritable(); 334 beginWrite(); 335 try { 336 ensureOpen(); 337 Entry e = getEntry0(path); // ensureOpen checked 338 if (e == null) 339 throw new NoSuchFileException(getString(path)); 340 if (e.type == Entry.CEN) 341 e.type = Entry.COPY; // copy e 342 if (mtime != null) 343 e.mtime = mtime.toMillis(); 344 if (atime != null) 345 e.atime = atime.toMillis(); 346 if (ctime != null) 347 e.ctime = ctime.toMillis(); 348 update(e); 349 } finally { 350 endWrite(); 351 } 352 } 353 354 boolean exists(byte[] path) 355 throws IOException 356 { 357 beginRead(); 358 try { 359 ensureOpen(); 360 return getInode(path) != null; 361 } finally { 362 endRead(); 363 } 364 } 365 366 boolean isDirectory(byte[] path) 367 throws IOException 368 { 369 beginRead(); 370 try { 371 IndexNode n = getInode(path); 372 return n != null && n.isDir(); 373 } finally { 374 endRead(); 375 } 376 } 377 378 private ZipPath toZipPath(byte[] path) { 379 // make it absolute 380 byte[] p = new byte[path.length + 1]; 381 p[0] = '/'; 382 System.arraycopy(path, 0, p, 1, path.length); 383 return new ZipPath(this, p); 384 } 385 386 // returns the list of child paths of "path" 387 Iterator<Path> iteratorOf(byte[] path, 388 DirectoryStream.Filter<? super Path> filter) 389 throws IOException 390 { 391 beginWrite(); // iteration of inodes needs exclusive lock 392 try { 393 ensureOpen(); 394 IndexNode inode = getInode(path); 395 if (inode == null) 396 throw new NotDirectoryException(getString(path)); 397 List<Path> list = new ArrayList<>(); 398 IndexNode child = inode.child; 399 while (child != null) { 400 ZipPath zp = toZipPath(child.name); 401 if (filter == null || filter.accept(zp)) 402 list.add(zp); 403 child = child.sibling; 404 } 405 return list.iterator(); 406 } finally { 407 endWrite(); 408 } 409 } 410 411 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 412 throws IOException 413 { 414 checkWritable(); 415 dir = toDirectoryPath(dir); 416 beginWrite(); 417 try { 418 ensureOpen(); 419 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 420 throw new FileAlreadyExistsException(getString(dir)); 421 checkParents(dir); 422 Entry e = new Entry(dir, Entry.NEW); 423 e.method = METHOD_STORED; // STORED for dir 424 update(e); 425 } finally { 426 endWrite(); 427 } 428 } 429 430 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 431 throws IOException 432 { 433 checkWritable(); 434 if (Arrays.equals(src, dst)) 435 return; // do nothing, src and dst are the same 436 437 beginWrite(); 438 try { 439 ensureOpen(); 440 Entry eSrc = getEntry0(src); // ensureOpen checked 441 if (eSrc == null) 442 throw new NoSuchFileException(getString(src)); 443 if (eSrc.isDir()) { // spec says to create dst dir 444 createDirectory(dst); 445 return; 446 } 447 boolean hasReplace = false; 448 boolean hasCopyAttrs = false; 449 for (CopyOption opt : options) { 450 if (opt == REPLACE_EXISTING) 451 hasReplace = true; 452 else if (opt == COPY_ATTRIBUTES) 453 hasCopyAttrs = true; 454 } 455 Entry eDst = getEntry0(dst); 456 if (eDst != null) { 457 if (!hasReplace) 458 throw new FileAlreadyExistsException(getString(dst)); 459 } else { 460 checkParents(dst); 461 } 462 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 463 u.name(dst); // change name 464 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 465 { 466 u.type = eSrc.type; // make it the same type 467 if (deletesrc) { // if it's a "rename", take the data 468 u.bytes = eSrc.bytes; 469 u.file = eSrc.file; 470 } else { // if it's not "rename", copy the data 471 if (eSrc.bytes != null) 472 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 473 else if (eSrc.file != null) { 474 u.file = getTempPathForEntry(null); 475 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 476 } 477 } 478 } 479 if (!hasCopyAttrs) 480 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 481 update(u); 482 if (deletesrc) 483 updateDelete(eSrc); 484 } finally { 485 endWrite(); 486 } 487 } 488 489 // Returns an output stream for writing the contents into the specified 490 // entry. 491 OutputStream newOutputStream(byte[] path, OpenOption... options) 492 throws IOException 493 { 494 checkWritable(); 495 boolean hasCreateNew = false; 496 boolean hasCreate = false; 497 boolean hasAppend = false; 498 for (OpenOption opt: options) { 499 if (opt == READ) 500 throw new IllegalArgumentException("READ not allowed"); 501 if (opt == CREATE_NEW) 502 hasCreateNew = true; 503 if (opt == CREATE) 504 hasCreate = true; 505 if (opt == APPEND) 506 hasAppend = true; 507 } 508 beginRead(); // only need a readlock, the "update()" will 509 try { // try to obtain a writelock when the os is 510 ensureOpen(); // being closed. 511 Entry e = getEntry0(path); 512 if (e != null) { 513 if (e.isDir() || hasCreateNew) 514 throw new FileAlreadyExistsException(getString(path)); 515 if (hasAppend) { 516 InputStream is = getInputStream(e); 517 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 518 copyStream(is, os); 519 is.close(); 520 return os; 521 } 522 return getOutputStream(new Entry(e, Entry.NEW)); 523 } else { 524 if (!hasCreate && !hasCreateNew) 525 throw new NoSuchFileException(getString(path)); 526 checkParents(path); 527 return getOutputStream(new Entry(path, Entry.NEW)); 528 } 529 } finally { 530 endRead(); 531 } 532 } 533 534 // Returns an input stream for reading the contents of the specified 535 // file entry. 536 InputStream newInputStream(byte[] path) throws IOException { 537 beginRead(); 538 try { 539 ensureOpen(); 540 Entry e = getEntry0(path); 541 if (e == null) 542 throw new NoSuchFileException(getString(path)); 543 if (e.isDir()) 544 throw new FileSystemException(getString(path), "is a directory", null); 545 return getInputStream(e); 546 } finally { 547 endRead(); 548 } 549 } 550 551 private void checkOptions(Set<? extends OpenOption> options) { 552 // check for options of null type and option is an intance of StandardOpenOption 553 for (OpenOption option : options) { 554 if (option == null) 555 throw new NullPointerException(); 556 if (!(option instanceof StandardOpenOption)) 557 throw new IllegalArgumentException(); 558 } 559 } 560 561 // Returns a Writable/ReadByteChannel for now. Might consdier to use 562 // newFileChannel() instead, which dump the entry data into a regular 563 // file on the default file system and create a FileChannel on top of 564 // it. 565 SeekableByteChannel newByteChannel(byte[] path, 566 Set<? extends OpenOption> options, 567 FileAttribute<?>... attrs) 568 throws IOException 569 { 570 checkOptions(options); 571 if (options.contains(StandardOpenOption.WRITE) || 572 options.contains(StandardOpenOption.APPEND)) { 573 checkWritable(); 574 beginRead(); 575 try { 576 final WritableByteChannel wbc = Channels.newChannel( 577 newOutputStream(path, options.toArray(new OpenOption[0]))); 578 long leftover = 0; 579 if (options.contains(StandardOpenOption.APPEND)) { 580 Entry e = getEntry0(path); 581 if (e != null && e.size >= 0) 582 leftover = e.size; 583 } 584 final long offset = leftover; 585 return new SeekableByteChannel() { 586 long written = offset; 587 public boolean isOpen() { 588 return wbc.isOpen(); 589 } 590 591 public long position() throws IOException { 592 return written; 593 } 594 595 public SeekableByteChannel position(long pos) 596 throws IOException 597 { 598 throw new UnsupportedOperationException(); 599 } 600 601 public int read(ByteBuffer dst) throws IOException { 602 throw new UnsupportedOperationException(); 603 } 604 605 public SeekableByteChannel truncate(long size) 606 throws IOException 607 { 608 throw new UnsupportedOperationException(); 609 } 610 611 public int write(ByteBuffer src) throws IOException { 612 int n = wbc.write(src); 613 written += n; 614 return n; 615 } 616 617 public long size() throws IOException { 618 return written; 619 } 620 621 public void close() throws IOException { 622 wbc.close(); 623 } 624 }; 625 } finally { 626 endRead(); 627 } 628 } else { 629 beginRead(); 630 try { 631 ensureOpen(); 632 Entry e = getEntry0(path); 633 if (e == null || e.isDir()) 634 throw new NoSuchFileException(getString(path)); 635 final ReadableByteChannel rbc = 636 Channels.newChannel(getInputStream(e)); 637 final long size = e.size; 638 return new SeekableByteChannel() { 639 long read = 0; 640 public boolean isOpen() { 641 return rbc.isOpen(); 642 } 643 644 public long position() throws IOException { 645 return read; 646 } 647 648 public SeekableByteChannel position(long pos) 649 throws IOException 650 { 651 throw new UnsupportedOperationException(); 652 } 653 654 public int read(ByteBuffer dst) throws IOException { 655 int n = rbc.read(dst); 656 if (n > 0) { 657 read += n; 658 } 659 return n; 660 } 661 662 public SeekableByteChannel truncate(long size) 663 throws IOException 664 { 665 throw new NonWritableChannelException(); 666 } 667 668 public int write (ByteBuffer src) throws IOException { 669 throw new NonWritableChannelException(); 670 } 671 672 public long size() throws IOException { 673 return size; 674 } 675 676 public void close() throws IOException { 677 rbc.close(); 678 } 679 }; 680 } finally { 681 endRead(); 682 } 683 } 684 } 685 686 // Returns a FileChannel of the specified entry. 687 // 688 // This implementation creates a temporary file on the default file system, 689 // copy the entry data into it if the entry exists, and then create a 690 // FileChannel on top of it. 691 FileChannel newFileChannel(byte[] path, 692 Set<? extends OpenOption> options, 693 FileAttribute<?>... attrs) 694 throws IOException 695 { 696 checkOptions(options); 697 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 698 options.contains(StandardOpenOption.APPEND)); 699 beginRead(); 700 try { 701 ensureOpen(); 702 Entry e = getEntry0(path); 703 if (forWrite) { 704 checkWritable(); 705 if (e == null) { 706 if (!options.contains(StandardOpenOption.CREATE_NEW)) 707 throw new NoSuchFileException(getString(path)); 708 } else { 709 if (options.contains(StandardOpenOption.CREATE_NEW)) 710 throw new FileAlreadyExistsException(getString(path)); 711 if (e.isDir()) 712 throw new FileAlreadyExistsException("directory <" 713 + getString(path) + "> exists"); 714 } 715 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 716 } else if (e == null || e.isDir()) { 717 throw new NoSuchFileException(getString(path)); 718 } 719 720 final boolean isFCH = (e != null && e.type == Entry.FILECH); 721 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 722 final FileChannel fch = tmpfile.getFileSystem() 723 .provider() 724 .newFileChannel(tmpfile, options, attrs); 725 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH); 726 if (forWrite) { 727 u.flag = FLAG_DATADESCR; 728 u.method = METHOD_DEFLATED; 729 } 730 // is there a better way to hook into the FileChannel's close method? 731 return new FileChannel() { 732 public int write(ByteBuffer src) throws IOException { 733 return fch.write(src); 734 } 735 public long write(ByteBuffer[] srcs, int offset, int length) 736 throws IOException 737 { 738 return fch.write(srcs, offset, length); 739 } 740 public long position() throws IOException { 741 return fch.position(); 742 } 743 public FileChannel position(long newPosition) 744 throws IOException 745 { 746 fch.position(newPosition); 747 return this; 748 } 749 public long size() throws IOException { 750 return fch.size(); 751 } 752 public FileChannel truncate(long size) 753 throws IOException 754 { 755 fch.truncate(size); 756 return this; 757 } 758 public void force(boolean metaData) 759 throws IOException 760 { 761 fch.force(metaData); 762 } 763 public long transferTo(long position, long count, 764 WritableByteChannel target) 765 throws IOException 766 { 767 return fch.transferTo(position, count, target); 768 } 769 public long transferFrom(ReadableByteChannel src, 770 long position, long count) 771 throws IOException 772 { 773 return fch.transferFrom(src, position, count); 774 } 775 public int read(ByteBuffer dst) throws IOException { 776 return fch.read(dst); 777 } 778 public int read(ByteBuffer dst, long position) 779 throws IOException 780 { 781 return fch.read(dst, position); 782 } 783 public long read(ByteBuffer[] dsts, int offset, int length) 784 throws IOException 785 { 786 return fch.read(dsts, offset, length); 787 } 788 public int write(ByteBuffer src, long position) 789 throws IOException 790 { 791 return fch.write(src, position); 792 } 793 public MappedByteBuffer map(MapMode mode, 794 long position, long size) 795 throws IOException 796 { 797 throw new UnsupportedOperationException(); 798 } 799 public FileLock lock(long position, long size, boolean shared) 800 throws IOException 801 { 802 return fch.lock(position, size, shared); 803 } 804 public FileLock tryLock(long position, long size, boolean shared) 805 throws IOException 806 { 807 return fch.tryLock(position, size, shared); 808 } 809 protected void implCloseChannel() throws IOException { 810 fch.close(); 811 if (forWrite) { 812 u.mtime = System.currentTimeMillis(); 813 u.size = Files.size(u.file); 814 815 update(u); 816 } else { 817 if (!isFCH) // if this is a new fch for reading 818 removeTempPathForEntry(tmpfile); 819 } 820 } 821 }; 822 } finally { 823 endRead(); 824 } 825 } 826 827 // the outstanding input streams that need to be closed 828 private Set<InputStream> streams = 829 Collections.synchronizedSet(new HashSet<InputStream>()); 830 831 // the ex-channel and ex-path that need to close when their outstanding 832 // input streams are all closed by the obtainers. 833 private Set<ExChannelCloser> exChClosers = new HashSet<>(); 834 835 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 836 private Path getTempPathForEntry(byte[] path) throws IOException { 837 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 838 if (path != null) { 839 Entry e = getEntry0(path); 840 if (e != null) { 841 try (InputStream is = newInputStream(path)) { 842 Files.copy(is, tmpPath, REPLACE_EXISTING); 843 } 844 } 845 } 846 return tmpPath; 847 } 848 849 private void removeTempPathForEntry(Path path) throws IOException { 850 Files.delete(path); 851 tmppaths.remove(path); 852 } 853 854 // check if all parents really exit. ZIP spec does not require 855 // the existence of any "parent directory". 856 private void checkParents(byte[] path) throws IOException { 857 beginRead(); 858 try { 859 while ((path = getParent(path)) != null && path.length != 0) { 860 if (!inodes.containsKey(IndexNode.keyOf(path))) { 861 throw new NoSuchFileException(getString(path)); 862 } 863 } 864 } finally { 865 endRead(); 866 } 867 } 868 869 private static byte[] ROOTPATH = new byte[0]; 870 private static byte[] getParent(byte[] path) { 871 int off = path.length - 1; 872 if (off > 0 && path[off] == '/') // isDirectory 873 off--; 874 while (off > 0 && path[off] != '/') { off--; } 875 if (off <= 0) 876 return ROOTPATH; 877 return Arrays.copyOf(path, off + 1); 878 } 879 880 private final void beginWrite() { 881 rwlock.writeLock().lock(); 882 } 883 884 private final void endWrite() { 885 rwlock.writeLock().unlock(); 886 } 887 888 private final void beginRead() { 889 rwlock.readLock().lock(); 890 } 891 892 private final void endRead() { 893 rwlock.readLock().unlock(); 894 } 895 896 /////////////////////////////////////////////////////////////////// 897 898 private volatile boolean isOpen = true; 899 private final SeekableByteChannel ch; // channel to the zipfile 900 final byte[] cen; // CEN & ENDHDR 901 private END end; 902 private long locpos; // position of first LOC header (usually 0) 903 904 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 905 906 // name -> pos (in cen), IndexNode itself can be used as a "key" 907 private LinkedHashMap<IndexNode, IndexNode> inodes; 908 909 final byte[] getBytes(String name) { 910 return zc.getBytes(name); 911 } 912 913 final String getString(byte[] name) { 914 return zc.toString(name); 915 } 916 917 protected void finalize() throws IOException { 918 close(); 919 } 920 921 private long getDataPos(Entry e) throws IOException { 922 if (e.locoff == -1) { 923 Entry e2 = getEntry0(e.name); 924 if (e2 == null) 925 throw new ZipException("invalid loc for entry <" + e.name + ">"); 926 e.locoff = e2.locoff; 927 } 928 byte[] buf = new byte[LOCHDR]; 929 if (readFullyAt(buf, 0, buf.length, e.locoff) != buf.length) 930 throw new ZipException("invalid loc for entry <" + e.name + ">"); 931 return locpos + e.locoff + LOCHDR + LOCNAM(buf) + LOCEXT(buf); 932 } 933 934 // Reads len bytes of data from the specified offset into buf. 935 // Returns the total number of bytes read. 936 // Each/every byte read from here (except the cen, which is mapped). 937 final long readFullyAt(byte[] buf, int off, long len, long pos) 938 throws IOException 939 { 940 ByteBuffer bb = ByteBuffer.wrap(buf); 941 bb.position(off); 942 bb.limit((int)(off + len)); 943 return readFullyAt(bb, pos); 944 } 945 946 private final long readFullyAt(ByteBuffer bb, long pos) 947 throws IOException 948 { 949 synchronized(ch) { 950 return ch.position(pos).read(bb); 951 } 952 } 953 954 // Searches for end of central directory (END) header. The contents of 955 // the END header will be read and placed in endbuf. Returns the file 956 // position of the END header, otherwise returns -1 if the END header 957 // was not found or an error occurred. 958 private END findEND() throws IOException 959 { 960 byte[] buf = new byte[READBLOCKSZ]; 961 long ziplen = ch.size(); 962 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 963 long minPos = minHDR - (buf.length - ENDHDR); 964 965 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 966 { 967 int off = 0; 968 if (pos < 0) { 969 // Pretend there are some NUL bytes before start of file 970 off = (int)-pos; 971 Arrays.fill(buf, 0, off, (byte)0); 972 } 973 int len = buf.length - off; 974 if (readFullyAt(buf, off, len, pos + off) != len) 975 zerror("zip END header not found"); 976 977 // Now scan the block backwards for END header signature 978 for (int i = buf.length - ENDHDR; i >= 0; i--) { 979 if (buf[i+0] == (byte)'P' && 980 buf[i+1] == (byte)'K' && 981 buf[i+2] == (byte)'\005' && 982 buf[i+3] == (byte)'\006' && 983 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 984 // Found END header 985 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 986 END end = new END(); 987 end.endsub = ENDSUB(buf); 988 end.centot = ENDTOT(buf); 989 end.cenlen = ENDSIZ(buf); 990 end.cenoff = ENDOFF(buf); 991 end.comlen = ENDCOM(buf); 992 end.endpos = pos + i; 993 if (end.cenlen == ZIP64_MINVAL || 994 end.cenoff == ZIP64_MINVAL || 995 end.centot == ZIP64_MINVAL32) 996 { 997 // need to find the zip64 end; 998 byte[] loc64 = new byte[ZIP64_LOCHDR]; 999 if (readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1000 != loc64.length) { 1001 return end; 1002 } 1003 long end64pos = ZIP64_LOCOFF(loc64); 1004 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1005 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1006 != end64buf.length) { 1007 return end; 1008 } 1009 // end64 found, re-calcualte everything. 1010 end.cenlen = ZIP64_ENDSIZ(end64buf); 1011 end.cenoff = ZIP64_ENDOFF(end64buf); 1012 end.centot = (int)ZIP64_ENDTOT(end64buf); // assume total < 2g 1013 end.endpos = end64pos; 1014 } 1015 return end; 1016 } 1017 } 1018 } 1019 zerror("zip END header not found"); 1020 return null; //make compiler happy 1021 } 1022 1023 // Reads zip file central directory. Returns the file position of first 1024 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1025 // then the error was a zip format error and zip->msg has the error text. 1026 // Always pass in -1 for knownTotal; it's used for a recursive call. 1027 private byte[] initCEN() throws IOException { 1028 end = findEND(); 1029 if (end.endpos == 0) { 1030 inodes = new LinkedHashMap<>(10); 1031 locpos = 0; 1032 buildNodeTree(); 1033 return null; // only END header present 1034 } 1035 if (end.cenlen > end.endpos) 1036 zerror("invalid END header (bad central directory size)"); 1037 long cenpos = end.endpos - end.cenlen; // position of CEN table 1038 1039 // Get position of first local file (LOC) header, taking into 1040 // account that there may be a stub prefixed to the zip file. 1041 locpos = cenpos - end.cenoff; 1042 if (locpos < 0) 1043 zerror("invalid END header (bad central directory offset)"); 1044 1045 // read in the CEN and END 1046 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1047 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1048 zerror("read CEN tables failed"); 1049 } 1050 // Iterate through the entries in the central directory 1051 inodes = new LinkedHashMap<>(end.centot + 1); 1052 int pos = 0; 1053 int limit = cen.length - ENDHDR; 1054 while (pos < limit) { 1055 if (CENSIG(cen, pos) != CENSIG) 1056 zerror("invalid CEN header (bad signature)"); 1057 int method = CENHOW(cen, pos); 1058 int nlen = CENNAM(cen, pos); 1059 int elen = CENEXT(cen, pos); 1060 int clen = CENCOM(cen, pos); 1061 if ((CENFLG(cen, pos) & 1) != 0) 1062 zerror("invalid CEN header (encrypted entry)"); 1063 if (method != METHOD_STORED && method != METHOD_DEFLATED) 1064 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1065 if (pos + CENHDR + nlen > limit) 1066 zerror("invalid CEN header (bad header size)"); 1067 byte[] name = Arrays.copyOfRange(cen, pos + CENHDR, pos + CENHDR + nlen); 1068 IndexNode inode = new IndexNode(name, pos); 1069 inodes.put(inode, inode); 1070 // skip ext and comment 1071 pos += (CENHDR + nlen + elen + clen); 1072 } 1073 if (pos + ENDHDR != cen.length) { 1074 zerror("invalid CEN header (bad header size)"); 1075 } 1076 buildNodeTree(); 1077 return cen; 1078 } 1079 1080 private void ensureOpen() throws IOException { 1081 if (!isOpen) 1082 throw new ClosedFileSystemException(); 1083 } 1084 1085 // Creates a new empty temporary file in the same directory as the 1086 // specified file. A variant of Files.createTempFile. 1087 private Path createTempFileInSameDirectoryAs(Path path) 1088 throws IOException 1089 { 1090 Path parent = path.toAbsolutePath().getParent(); 1091 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1092 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1093 tmppaths.add(tmpPath); 1094 return tmpPath; 1095 } 1096 1097 ////////////////////update & sync ////////////////////////////////////// 1098 1099 private boolean hasUpdate = false; 1100 1101 // shared key. consumer guarantees the "writeLock" before use it. 1102 private final IndexNode LOOKUPKEY = IndexNode.keyOf(null); 1103 1104 private void updateDelete(IndexNode inode) { 1105 beginWrite(); 1106 try { 1107 removeFromTree(inode); 1108 inodes.remove(inode); 1109 hasUpdate = true; 1110 } finally { 1111 endWrite(); 1112 } 1113 } 1114 1115 private void update(Entry e) { 1116 beginWrite(); 1117 try { 1118 IndexNode old = inodes.put(e, e); 1119 if (old != null) { 1120 removeFromTree(old); 1121 } 1122 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1123 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1124 e.sibling = parent.child; 1125 parent.child = e; 1126 } 1127 hasUpdate = true; 1128 } finally { 1129 endWrite(); 1130 } 1131 } 1132 1133 // copy over the whole LOC entry (header if necessary, data and ext) from 1134 // old zip to the new one. 1135 private long copyLOCEntry(Entry e, boolean updateHeader, 1136 OutputStream os, 1137 long written, byte[] buf) 1138 throws IOException 1139 { 1140 long locoff = e.locoff; // where to read 1141 e.locoff = written; // update the e.locoff with new value 1142 1143 // calculate the size need to write out 1144 long size = 0; 1145 // if there is A ext 1146 if ((e.flag & FLAG_DATADESCR) != 0) { 1147 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1148 size = 24; 1149 else 1150 size = 16; 1151 } 1152 // read loc, use the original loc.elen/nlen 1153 if (readFullyAt(buf, 0, LOCHDR , locoff) != LOCHDR) 1154 throw new ZipException("loc: reading failed"); 1155 if (updateHeader) { 1156 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1157 size += e.csize; 1158 written = e.writeLOC(os) + size; 1159 } else { 1160 os.write(buf, 0, LOCHDR); // write out the loc header 1161 locoff += LOCHDR; 1162 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1163 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1164 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1165 written = LOCHDR + size; 1166 } 1167 int n; 1168 while (size > 0 && 1169 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1170 { 1171 if (size < n) 1172 n = (int)size; 1173 os.write(buf, 0, n); 1174 size -= n; 1175 locoff += n; 1176 } 1177 return written; 1178 } 1179 1180 // sync the zip file system, if there is any udpate 1181 private void sync() throws IOException { 1182 //System.out.printf("->sync(%s) starting....!%n", toString()); 1183 // check ex-closer 1184 if (!exChClosers.isEmpty()) { 1185 for (ExChannelCloser ecc : exChClosers) { 1186 if (ecc.streams.isEmpty()) { 1187 ecc.ch.close(); 1188 Files.delete(ecc.path); 1189 exChClosers.remove(ecc); 1190 } 1191 } 1192 } 1193 if (!hasUpdate) 1194 return; 1195 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1196 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) 1197 { 1198 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1199 long written = 0; 1200 byte[] buf = new byte[8192]; 1201 Entry e = null; 1202 1203 // write loc 1204 for (IndexNode inode : inodes.values()) { 1205 if (inode instanceof Entry) { // an updated inode 1206 e = (Entry)inode; 1207 try { 1208 if (e.type == Entry.COPY) { 1209 // entry copy: the only thing changed is the "name" 1210 // and "nlen" in LOC header, so we udpate/rewrite the 1211 // LOC in new file and simply copy the rest (data and 1212 // ext) without enflating/deflating from the old zip 1213 // file LOC entry. 1214 written += copyLOCEntry(e, true, os, written, buf); 1215 } else { // NEW, FILECH or CEN 1216 e.locoff = written; 1217 written += e.writeLOC(os); // write loc header 1218 if (e.bytes != null) { // in-memory, deflated 1219 os.write(e.bytes); // already 1220 written += e.bytes.length; 1221 } else if (e.file != null) { // tmp file 1222 try (InputStream is = Files.newInputStream(e.file)) { 1223 int n; 1224 if (e.type == Entry.NEW) { // deflated already 1225 while ((n = is.read(buf)) != -1) { 1226 os.write(buf, 0, n); 1227 written += n; 1228 } 1229 } else if (e.type == Entry.FILECH) { 1230 // the data are not deflated, use ZEOS 1231 try (OutputStream os2 = new EntryOutputStream(e, os)) { 1232 while ((n = is.read(buf)) != -1) { 1233 os2.write(buf, 0, n); 1234 } 1235 } 1236 written += e.csize; 1237 if ((e.flag & FLAG_DATADESCR) != 0) 1238 written += e.writeEXT(os); 1239 } 1240 } 1241 Files.delete(e.file); 1242 tmppaths.remove(e.file); 1243 } else { 1244 // dir, 0-length data 1245 } 1246 } 1247 elist.add(e); 1248 } catch (IOException x) { 1249 x.printStackTrace(); // skip any in-accurate entry 1250 } 1251 } else { // unchanged inode 1252 if (inode.pos == -1) { 1253 continue; // pseudo directory node 1254 } 1255 e = Entry.readCEN(this, inode.pos); 1256 try { 1257 written += copyLOCEntry(e, false, os, written, buf); 1258 elist.add(e); 1259 } catch (IOException x) { 1260 x.printStackTrace(); // skip any wrong entry 1261 } 1262 } 1263 } 1264 1265 // now write back the cen and end table 1266 end.cenoff = written; 1267 for (Entry entry : elist) { 1268 written += entry.writeCEN(os); 1269 } 1270 end.centot = elist.size(); 1271 end.cenlen = written - end.cenoff; 1272 end.write(os, written); 1273 } 1274 if (!streams.isEmpty()) { 1275 // 1276 // TBD: ExChannelCloser should not be necessary if we only 1277 // sync when being closed, all streams should have been 1278 // closed already. Keep the logic here for now. 1279 // 1280 // There are outstanding input streams open on existing "ch", 1281 // so, don't close the "cha" and delete the "file for now, let 1282 // the "ex-channel-closer" to handle them 1283 ExChannelCloser ecc = new ExChannelCloser( 1284 createTempFileInSameDirectoryAs(zfpath), 1285 ch, 1286 streams); 1287 Files.move(zfpath, ecc.path, REPLACE_EXISTING); 1288 exChClosers.add(ecc); 1289 streams = Collections.synchronizedSet(new HashSet<InputStream>()); 1290 } else { 1291 ch.close(); 1292 Files.delete(zfpath); 1293 } 1294 1295 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1296 hasUpdate = false; // clear 1297 /* 1298 if (isOpen) { 1299 ch = zfpath.newByteChannel(READ); // re-fresh "ch" and "cen" 1300 cen = initCEN(); 1301 } 1302 */ 1303 //System.out.printf("->sync(%s) done!%n", toString()); 1304 } 1305 1306 private IndexNode getInode(byte[] path) { 1307 if (path == null) 1308 throw new NullPointerException("path"); 1309 IndexNode key = IndexNode.keyOf(path); 1310 IndexNode inode = inodes.get(key); 1311 if (inode == null && 1312 (path.length == 0 || path[path.length -1] != '/')) { 1313 // if does not ends with a slash 1314 path = Arrays.copyOf(path, path.length + 1); 1315 path[path.length - 1] = '/'; 1316 inode = inodes.get(key.as(path)); 1317 } 1318 return inode; 1319 } 1320 1321 private Entry getEntry0(byte[] path) throws IOException { 1322 IndexNode inode = getInode(path); 1323 if (inode instanceof Entry) 1324 return (Entry)inode; 1325 if (inode == null || inode.pos == -1) 1326 return null; 1327 return Entry.readCEN(this, inode.pos); 1328 } 1329 1330 public void deleteFile(byte[] path, boolean failIfNotExists) 1331 throws IOException 1332 { 1333 checkWritable(); 1334 1335 IndexNode inode = getInode(path); 1336 if (inode == null) { 1337 if (path != null && path.length == 0) 1338 throw new ZipException("root directory </> can't not be delete"); 1339 if (failIfNotExists) 1340 throw new NoSuchFileException(getString(path)); 1341 } else { 1342 if (inode.isDir() && inode.child != null) 1343 throw new DirectoryNotEmptyException(getString(path)); 1344 updateDelete(inode); 1345 } 1346 } 1347 1348 private static void copyStream(InputStream is, OutputStream os) 1349 throws IOException 1350 { 1351 byte[] copyBuf = new byte[8192]; 1352 int n; 1353 while ((n = is.read(copyBuf)) != -1) { 1354 os.write(copyBuf, 0, n); 1355 } 1356 } 1357 1358 // Returns an out stream for either 1359 // (1) writing the contents of a new entry, if the entry exits, or 1360 // (2) updating/replacing the contents of the specified existing entry. 1361 private OutputStream getOutputStream(Entry e) throws IOException { 1362 1363 if (e.mtime == -1) 1364 e.mtime = System.currentTimeMillis(); 1365 if (e.method == -1) 1366 e.method = METHOD_DEFLATED; // TBD: use default method 1367 // store size, compressed size, and crc-32 in LOC header 1368 e.flag = 0; 1369 if (zc.isUTF8()) 1370 e.flag |= FLAG_EFS; 1371 OutputStream os; 1372 if (useTempFile) { 1373 e.file = getTempPathForEntry(null); 1374 os = Files.newOutputStream(e.file, WRITE); 1375 } else { 1376 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1377 } 1378 return new EntryOutputStream(e, os); 1379 } 1380 1381 private InputStream getInputStream(Entry e) 1382 throws IOException 1383 { 1384 InputStream eis = null; 1385 1386 if (e.type == Entry.NEW) { 1387 if (e.bytes != null) 1388 eis = new ByteArrayInputStream(e.bytes); 1389 else if (e.file != null) 1390 eis = Files.newInputStream(e.file); 1391 else 1392 throw new ZipException("update entry data is missing"); 1393 } else if (e.type == Entry.FILECH) { 1394 // FILECH result is un-compressed. 1395 eis = Files.newInputStream(e.file); 1396 // TBD: wrap to hook close() 1397 // streams.add(eis); 1398 return eis; 1399 } else { // untouced CEN or COPY 1400 eis = new EntryInputStream(e, ch); 1401 } 1402 if (e.method == METHOD_DEFLATED) { 1403 // MORE: Compute good size for inflater stream: 1404 long bufSize = e.size + 2; // Inflater likes a bit of slack 1405 if (bufSize > 65536) 1406 bufSize = 8192; 1407 final long size = e.size; 1408 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1409 1410 private boolean isClosed = false; 1411 public void close() throws IOException { 1412 if (!isClosed) { 1413 releaseInflater(inf); 1414 this.in.close(); 1415 isClosed = true; 1416 streams.remove(this); 1417 } 1418 } 1419 // Override fill() method to provide an extra "dummy" byte 1420 // at the end of the input stream. This is required when 1421 // using the "nowrap" Inflater option. (it appears the new 1422 // zlib in 7 does not need it, but keep it for now) 1423 protected void fill() throws IOException { 1424 if (eof) { 1425 throw new EOFException( 1426 "Unexpected end of ZLIB input stream"); 1427 } 1428 len = this.in.read(buf, 0, buf.length); 1429 if (len == -1) { 1430 buf[0] = 0; 1431 len = 1; 1432 eof = true; 1433 } 1434 inf.setInput(buf, 0, len); 1435 } 1436 private boolean eof; 1437 1438 public int available() throws IOException { 1439 if (isClosed) 1440 return 0; 1441 long avail = size - inf.getBytesWritten(); 1442 return avail > (long) Integer.MAX_VALUE ? 1443 Integer.MAX_VALUE : (int) avail; 1444 } 1445 }; 1446 } else if (e.method == METHOD_STORED) { 1447 // TBD: wrap/ it does not seem necessary 1448 } else { 1449 throw new ZipException("invalid compression method"); 1450 } 1451 streams.add(eis); 1452 return eis; 1453 } 1454 1455 // Inner class implementing the input stream used to read 1456 // a (possibly compressed) zip file entry. 1457 private class EntryInputStream extends InputStream { 1458 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1459 // point to a new channel after sync() 1460 private long pos; // current position within entry data 1461 protected long rem; // number of remaining bytes within entry 1462 protected final long size; // uncompressed size of this entry 1463 1464 EntryInputStream(Entry e, SeekableByteChannel zfch) 1465 throws IOException 1466 { 1467 this.zfch = zfch; 1468 rem = e.csize; 1469 size = e.size; 1470 pos = getDataPos(e); 1471 } 1472 public int read(byte b[], int off, int len) throws IOException { 1473 ensureOpen(); 1474 if (rem == 0) { 1475 return -1; 1476 } 1477 if (len <= 0) { 1478 return 0; 1479 } 1480 if (len > rem) { 1481 len = (int) rem; 1482 } 1483 // readFullyAt() 1484 long n = 0; 1485 ByteBuffer bb = ByteBuffer.wrap(b); 1486 bb.position(off); 1487 bb.limit(off + len); 1488 synchronized(zfch) { 1489 n = zfch.position(pos).read(bb); 1490 } 1491 if (n > 0) { 1492 pos += n; 1493 rem -= n; 1494 } 1495 if (rem == 0) { 1496 close(); 1497 } 1498 return (int)n; 1499 } 1500 public int read() throws IOException { 1501 byte[] b = new byte[1]; 1502 if (read(b, 0, 1) == 1) { 1503 return b[0] & 0xff; 1504 } else { 1505 return -1; 1506 } 1507 } 1508 public long skip(long n) throws IOException { 1509 ensureOpen(); 1510 if (n > rem) 1511 n = rem; 1512 pos += n; 1513 rem -= n; 1514 if (rem == 0) { 1515 close(); 1516 } 1517 return n; 1518 } 1519 public int available() { 1520 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1521 } 1522 public long size() { 1523 return size; 1524 } 1525 public void close() { 1526 rem = 0; 1527 streams.remove(this); 1528 } 1529 } 1530 1531 class EntryOutputStream extends DeflaterOutputStream 1532 { 1533 private CRC32 crc; 1534 private Entry e; 1535 private long written; 1536 1537 EntryOutputStream(Entry e, OutputStream os) 1538 throws IOException 1539 { 1540 super(os, getDeflater()); 1541 if (e == null) 1542 throw new NullPointerException("Zip entry is null"); 1543 this.e = e; 1544 crc = new CRC32(); 1545 } 1546 1547 @Override 1548 public void write(byte b[], int off, int len) throws IOException { 1549 if (e.type != Entry.FILECH) // only from sync 1550 ensureOpen(); 1551 if (off < 0 || len < 0 || off > b.length - len) { 1552 throw new IndexOutOfBoundsException(); 1553 } else if (len == 0) { 1554 return; 1555 } 1556 switch (e.method) { 1557 case METHOD_DEFLATED: 1558 super.write(b, off, len); 1559 break; 1560 case METHOD_STORED: 1561 written += len; 1562 out.write(b, off, len); 1563 break; 1564 default: 1565 throw new ZipException("invalid compression method"); 1566 } 1567 crc.update(b, off, len); 1568 } 1569 1570 @Override 1571 public void close() throws IOException { 1572 // TBD ensureOpen(); 1573 switch (e.method) { 1574 case METHOD_DEFLATED: 1575 finish(); 1576 e.size = def.getBytesRead(); 1577 e.csize = def.getBytesWritten(); 1578 e.crc = crc.getValue(); 1579 break; 1580 case METHOD_STORED: 1581 // we already know that both e.size and e.csize are the same 1582 e.size = e.csize = written; 1583 e.crc = crc.getValue(); 1584 break; 1585 default: 1586 throw new ZipException("invalid compression method"); 1587 } 1588 //crc.reset(); 1589 if (out instanceof ByteArrayOutputStream) 1590 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1591 1592 if (e.type == Entry.FILECH) { 1593 releaseDeflater(def); 1594 return; 1595 } 1596 super.close(); 1597 releaseDeflater(def); 1598 update(e); 1599 } 1600 } 1601 1602 static void zerror(String msg) { 1603 throw new ZipError(msg); 1604 } 1605 1606 // Maxmum number of de/inflater we cache 1607 private final int MAX_FLATER = 20; 1608 // List of available Inflater objects for decompression 1609 private final List<Inflater> inflaters = new ArrayList<>(); 1610 1611 // Gets an inflater from the list of available inflaters or allocates 1612 // a new one. 1613 private Inflater getInflater() { 1614 synchronized (inflaters) { 1615 int size = inflaters.size(); 1616 if (size > 0) { 1617 Inflater inf = inflaters.remove(size - 1); 1618 return inf; 1619 } else { 1620 return new Inflater(true); 1621 } 1622 } 1623 } 1624 1625 // Releases the specified inflater to the list of available inflaters. 1626 private void releaseInflater(Inflater inf) { 1627 synchronized (inflaters) { 1628 if (inflaters.size() < MAX_FLATER) { 1629 inf.reset(); 1630 inflaters.add(inf); 1631 } else { 1632 inf.end(); 1633 } 1634 } 1635 } 1636 1637 // List of available Deflater objects for compression 1638 private final List<Deflater> deflaters = new ArrayList<>(); 1639 1640 // Gets an deflater from the list of available deflaters or allocates 1641 // a new one. 1642 private Deflater getDeflater() { 1643 synchronized (deflaters) { 1644 int size = deflaters.size(); 1645 if (size > 0) { 1646 Deflater def = deflaters.remove(size - 1); 1647 return def; 1648 } else { 1649 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1650 } 1651 } 1652 } 1653 1654 // Releases the specified inflater to the list of available inflaters. 1655 private void releaseDeflater(Deflater def) { 1656 synchronized (deflaters) { 1657 if (inflaters.size() < MAX_FLATER) { 1658 def.reset(); 1659 deflaters.add(def); 1660 } else { 1661 def.end(); 1662 } 1663 } 1664 } 1665 1666 // End of central directory record 1667 static class END { 1668 int disknum; 1669 int sdisknum; 1670 int endsub; // endsub 1671 int centot; // 4 bytes 1672 long cenlen; // 4 bytes 1673 long cenoff; // 4 bytes 1674 int comlen; // comment length 1675 byte[] comment; 1676 1677 /* members of Zip64 end of central directory locator */ 1678 int diskNum; 1679 long endpos; 1680 int disktot; 1681 1682 void write(OutputStream os, long offset) throws IOException { 1683 boolean hasZip64 = false; 1684 long xlen = cenlen; 1685 long xoff = cenoff; 1686 if (xlen >= ZIP64_MINVAL) { 1687 xlen = ZIP64_MINVAL; 1688 hasZip64 = true; 1689 } 1690 if (xoff >= ZIP64_MINVAL) { 1691 xoff = ZIP64_MINVAL; 1692 hasZip64 = true; 1693 } 1694 int count = centot; 1695 if (count >= ZIP64_MINVAL32) { 1696 count = ZIP64_MINVAL32; 1697 hasZip64 = true; 1698 } 1699 if (hasZip64) { 1700 long off64 = offset; 1701 //zip64 end of central directory record 1702 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1703 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1704 writeShort(os, 45); // version made by 1705 writeShort(os, 45); // version needed to extract 1706 writeInt(os, 0); // number of this disk 1707 writeInt(os, 0); // central directory start disk 1708 writeLong(os, centot); // number of directory entires on disk 1709 writeLong(os, centot); // number of directory entires 1710 writeLong(os, cenlen); // length of central directory 1711 writeLong(os, cenoff); // offset of central directory 1712 1713 //zip64 end of central directory locator 1714 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1715 writeInt(os, 0); // zip64 END start disk 1716 writeLong(os, off64); // offset of zip64 END 1717 writeInt(os, 1); // total number of disks (?) 1718 } 1719 writeInt(os, ENDSIG); // END record signature 1720 writeShort(os, 0); // number of this disk 1721 writeShort(os, 0); // central directory start disk 1722 writeShort(os, count); // number of directory entries on disk 1723 writeShort(os, count); // total number of directory entries 1724 writeInt(os, xlen); // length of central directory 1725 writeInt(os, xoff); // offset of central directory 1726 if (comment != null) { // zip file comment 1727 writeShort(os, comment.length); 1728 writeBytes(os, comment); 1729 } else { 1730 writeShort(os, 0); 1731 } 1732 } 1733 } 1734 1735 // Internal node that links a "name" to its pos in cen table. 1736 // The node itself can be used as a "key" to lookup itself in 1737 // the HashMap inodes. 1738 static class IndexNode { 1739 byte[] name; 1740 int hashcode; // node is hashable/hashed by its name 1741 int pos = -1; // position in cen table, -1 menas the 1742 // entry does not exists in zip file 1743 IndexNode(byte[] name, int pos) { 1744 name(name); 1745 this.pos = pos; 1746 } 1747 1748 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1749 return new IndexNode(name, -1); 1750 } 1751 1752 final void name(byte[] name) { 1753 this.name = name; 1754 this.hashcode = Arrays.hashCode(name); 1755 } 1756 1757 final IndexNode as(byte[] name) { // reuse the node, mostly 1758 name(name); // as a lookup "key" 1759 return this; 1760 } 1761 1762 boolean isDir() { 1763 return name != null && 1764 (name.length == 0 || name[name.length - 1] == '/'); 1765 } 1766 1767 public boolean equals(Object other) { 1768 if (!(other instanceof IndexNode)) { 1769 return false; 1770 } 1771 return Arrays.equals(name, ((IndexNode)other).name); 1772 } 1773 1774 public int hashCode() { 1775 return hashcode; 1776 } 1777 1778 IndexNode() {} 1779 IndexNode sibling; 1780 IndexNode child; // 1st child 1781 } 1782 1783 static class Entry extends IndexNode { 1784 1785 static final int CEN = 1; // entry read from cen 1786 static final int NEW = 2; // updated contents in bytes or file 1787 static final int FILECH = 3; // fch update in "file" 1788 static final int COPY = 4; // copy of a CEN entry 1789 1790 1791 byte[] bytes; // updated content bytes 1792 Path file; // use tmp file to store bytes; 1793 int type = CEN; // default is the entry read from cen 1794 1795 // entry attributes 1796 int version; 1797 int flag; 1798 int method = -1; // compression method 1799 long mtime = -1; // last modification time (in DOS time) 1800 long atime = -1; // last access time 1801 long ctime = -1; // create time 1802 long crc = -1; // crc-32 of entry data 1803 long csize = -1; // compressed size of entry data 1804 long size = -1; // uncompressed size of entry data 1805 byte[] extra; 1806 1807 // cen 1808 int versionMade; 1809 int disk; 1810 int attrs; 1811 long attrsEx; 1812 long locoff; 1813 byte[] comment; 1814 1815 Entry() {} 1816 1817 Entry(byte[] name) { 1818 name(name); 1819 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 1820 this.crc = 0; 1821 this.size = 0; 1822 this.csize = 0; 1823 this.method = METHOD_DEFLATED; 1824 } 1825 1826 Entry(byte[] name, int type) { 1827 this(name); 1828 this.type = type; 1829 } 1830 1831 Entry (Entry e, int type) { 1832 name(e.name); 1833 this.version = e.version; 1834 this.ctime = e.ctime; 1835 this.atime = e.atime; 1836 this.mtime = e.mtime; 1837 this.crc = e.crc; 1838 this.size = e.size; 1839 this.csize = e.csize; 1840 this.method = e.method; 1841 this.extra = e.extra; 1842 this.versionMade = e.versionMade; 1843 this.disk = e.disk; 1844 this.attrs = e.attrs; 1845 this.attrsEx = e.attrsEx; 1846 this.locoff = e.locoff; 1847 this.comment = e.comment; 1848 this.type = type; 1849 } 1850 1851 Entry (byte[] name, Path file, int type) { 1852 this(name, type); 1853 this.file = file; 1854 this.method = METHOD_STORED; 1855 } 1856 1857 int version() throws ZipException { 1858 if (method == METHOD_DEFLATED) 1859 return 20; 1860 else if (method == METHOD_STORED) 1861 return 10; 1862 throw new ZipException("unsupported compression method"); 1863 } 1864 1865 ///////////////////// CEN ////////////////////// 1866 static Entry readCEN(ZipFileSystem zipfs, int pos) 1867 throws IOException 1868 { 1869 return new Entry().cen(zipfs, pos); 1870 } 1871 1872 private Entry cen(ZipFileSystem zipfs, int pos) 1873 throws IOException 1874 { 1875 byte[] cen = zipfs.cen; 1876 if (CENSIG(cen, pos) != CENSIG) 1877 zerror("invalid CEN header (bad signature)"); 1878 versionMade = CENVEM(cen, pos); 1879 version = CENVER(cen, pos); 1880 flag = CENFLG(cen, pos); 1881 method = CENHOW(cen, pos); 1882 mtime = dosToJavaTime(CENTIM(cen, pos)); 1883 crc = CENCRC(cen, pos); 1884 csize = CENSIZ(cen, pos); 1885 size = CENLEN(cen, pos); 1886 int nlen = CENNAM(cen, pos); 1887 int elen = CENEXT(cen, pos); 1888 int clen = CENCOM(cen, pos); 1889 disk = CENDSK(cen, pos); 1890 attrs = CENATT(cen, pos); 1891 attrsEx = CENATX(cen, pos); 1892 locoff = CENOFF(cen, pos); 1893 1894 pos += CENHDR; 1895 name(Arrays.copyOfRange(cen, pos, pos + nlen)); 1896 1897 pos += nlen; 1898 if (elen > 0) { 1899 extra = Arrays.copyOfRange(cen, pos, pos + elen); 1900 pos += elen; 1901 readExtra(zipfs); 1902 } 1903 if (clen > 0) { 1904 comment = Arrays.copyOfRange(cen, pos, pos + clen); 1905 } 1906 return this; 1907 } 1908 1909 int writeCEN(OutputStream os) throws IOException 1910 { 1911 int written = CENHDR; 1912 int version0 = version(); 1913 long csize0 = csize; 1914 long size0 = size; 1915 long locoff0 = locoff; 1916 int elen64 = 0; // extra for ZIP64 1917 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 1918 int elenEXTT = 0; // extra for Extended Timestamp 1919 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 1920 1921 // confirm size/length 1922 int nlen = (name != null) ? name.length : 0; 1923 int elen = (extra != null) ? extra.length : 0; 1924 int eoff = 0; 1925 int clen = (comment != null) ? comment.length : 0; 1926 if (csize >= ZIP64_MINVAL) { 1927 csize0 = ZIP64_MINVAL; 1928 elen64 += 8; // csize(8) 1929 } 1930 if (size >= ZIP64_MINVAL) { 1931 size0 = ZIP64_MINVAL; // size(8) 1932 elen64 += 8; 1933 } 1934 if (locoff >= ZIP64_MINVAL) { 1935 locoff0 = ZIP64_MINVAL; 1936 elen64 += 8; // offset(8) 1937 } 1938 if (elen64 != 0) { 1939 elen64 += 4; // header and data sz 4 bytes 1940 } 1941 while (eoff + 4 < elen) { 1942 int tag = SH(extra, eoff); 1943 int sz = SH(extra, eoff + 2); 1944 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 1945 foundExtraTime = true; 1946 } 1947 eoff += (4 + sz); 1948 } 1949 if (!foundExtraTime) { 1950 if (isWindows) { // use NTFS 1951 elenNTFS = 36; // total 36 bytes 1952 } else { // Extended Timestamp otherwise 1953 elenEXTT = 9; // only mtime in cen 1954 } 1955 } 1956 writeInt(os, CENSIG); // CEN header signature 1957 if (elen64 != 0) { 1958 writeShort(os, 45); // ver 4.5 for zip64 1959 writeShort(os, 45); 1960 } else { 1961 writeShort(os, version0); // version made by 1962 writeShort(os, version0); // version needed to extract 1963 } 1964 writeShort(os, flag); // general purpose bit flag 1965 writeShort(os, method); // compression method 1966 // last modification time 1967 writeInt(os, (int)javaToDosTime(mtime)); 1968 writeInt(os, crc); // crc-32 1969 writeInt(os, csize0); // compressed size 1970 writeInt(os, size0); // uncompressed size 1971 writeShort(os, name.length); 1972 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 1973 1974 if (comment != null) { 1975 writeShort(os, Math.min(clen, 0xffff)); 1976 } else { 1977 writeShort(os, 0); 1978 } 1979 writeShort(os, 0); // starting disk number 1980 writeShort(os, 0); // internal file attributes (unused) 1981 writeInt(os, 0); // external file attributes (unused) 1982 writeInt(os, locoff0); // relative offset of local header 1983 writeBytes(os, name); 1984 if (elen64 != 0) { 1985 writeShort(os, EXTID_ZIP64);// Zip64 extra 1986 writeShort(os, elen64 - 4); // size of "this" extra block 1987 if (size0 == ZIP64_MINVAL) 1988 writeLong(os, size); 1989 if (csize0 == ZIP64_MINVAL) 1990 writeLong(os, csize); 1991 if (locoff0 == ZIP64_MINVAL) 1992 writeLong(os, locoff); 1993 } 1994 if (elenNTFS != 0) { 1995 writeShort(os, EXTID_NTFS); 1996 writeShort(os, elenNTFS - 4); 1997 writeInt(os, 0); // reserved 1998 writeShort(os, 0x0001); // NTFS attr tag 1999 writeShort(os, 24); 2000 writeLong(os, javaToWinTime(mtime)); 2001 writeLong(os, javaToWinTime(atime)); 2002 writeLong(os, javaToWinTime(ctime)); 2003 } 2004 if (elenEXTT != 0) { 2005 writeShort(os, EXTID_EXTT); 2006 writeShort(os, elenEXTT - 4); 2007 if (ctime == -1) 2008 os.write(0x3); // mtime and atime 2009 else 2010 os.write(0x7); // mtime, atime and ctime 2011 writeInt(os, javaToUnixTime(mtime)); 2012 } 2013 if (extra != null) // whatever not recognized 2014 writeBytes(os, extra); 2015 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2016 writeBytes(os, comment); 2017 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2018 } 2019 2020 ///////////////////// LOC ////////////////////// 2021 static Entry readLOC(ZipFileSystem zipfs, long pos) 2022 throws IOException 2023 { 2024 return readLOC(zipfs, pos, new byte[1024]); 2025 } 2026 2027 static Entry readLOC(ZipFileSystem zipfs, long pos, byte[] buf) 2028 throws IOException 2029 { 2030 return new Entry().loc(zipfs, pos, buf); 2031 } 2032 2033 Entry loc(ZipFileSystem zipfs, long pos, byte[] buf) 2034 throws IOException 2035 { 2036 assert (buf.length >= LOCHDR); 2037 if (zipfs.readFullyAt(buf, 0, LOCHDR , pos) != LOCHDR) 2038 throw new ZipException("loc: reading failed"); 2039 if (LOCSIG(buf) != LOCSIG) 2040 throw new ZipException("loc: wrong sig ->" 2041 + Long.toString(LOCSIG(buf), 16)); 2042 //startPos = pos; 2043 version = LOCVER(buf); 2044 flag = LOCFLG(buf); 2045 method = LOCHOW(buf); 2046 mtime = dosToJavaTime(LOCTIM(buf)); 2047 crc = LOCCRC(buf); 2048 csize = LOCSIZ(buf); 2049 size = LOCLEN(buf); 2050 int nlen = LOCNAM(buf); 2051 int elen = LOCEXT(buf); 2052 2053 name = new byte[nlen]; 2054 if (zipfs.readFullyAt(name, 0, nlen, pos + LOCHDR) != nlen) { 2055 throw new ZipException("loc: name reading failed"); 2056 } 2057 if (elen > 0) { 2058 extra = new byte[elen]; 2059 if (zipfs.readFullyAt(extra, 0, elen, pos + LOCHDR + nlen) 2060 != elen) { 2061 throw new ZipException("loc: ext reading failed"); 2062 } 2063 } 2064 pos += (LOCHDR + nlen + elen); 2065 if ((flag & FLAG_DATADESCR) != 0) { 2066 // Data Descriptor 2067 Entry e = zipfs.getEntry0(name); // get the size/csize from cen 2068 if (e == null) 2069 throw new ZipException("loc: name not found in cen"); 2070 size = e.size; 2071 csize = e.csize; 2072 pos += (method == METHOD_STORED ? size : csize); 2073 if (size >= ZIP64_MINVAL || csize >= ZIP64_MINVAL) 2074 pos += 24; 2075 else 2076 pos += 16; 2077 } else { 2078 if (extra != null && 2079 (size == ZIP64_MINVAL || csize == ZIP64_MINVAL)) { 2080 // zip64 ext: must include both size and csize 2081 int off = 0; 2082 while (off + 20 < elen) { // HeaderID+DataSize+Data 2083 int sz = SH(extra, off + 2); 2084 if (SH(extra, off) == EXTID_ZIP64 && sz == 16) { 2085 size = LL(extra, off + 4); 2086 csize = LL(extra, off + 12); 2087 break; 2088 } 2089 off += (sz + 4); 2090 } 2091 } 2092 pos += (method == METHOD_STORED ? size : csize); 2093 } 2094 return this; 2095 } 2096 2097 int writeLOC(OutputStream os) 2098 throws IOException 2099 { 2100 writeInt(os, LOCSIG); // LOC header signature 2101 int version = version(); 2102 int nlen = (name != null) ? name.length : 0; 2103 int elen = (extra != null) ? extra.length : 0; 2104 boolean foundExtraTime = false; // if extra timestamp present 2105 int eoff = 0; 2106 int elen64 = 0; 2107 int elenEXTT = 0; 2108 int elenNTFS = 0; 2109 if ((flag & FLAG_DATADESCR) != 0) { 2110 writeShort(os, version()); // version needed to extract 2111 writeShort(os, flag); // general purpose bit flag 2112 writeShort(os, method); // compression method 2113 // last modification time 2114 writeInt(os, (int)javaToDosTime(mtime)); 2115 // store size, uncompressed size, and crc-32 in data descriptor 2116 // immediately following compressed entry data 2117 writeInt(os, 0); 2118 writeInt(os, 0); 2119 writeInt(os, 0); 2120 } else { 2121 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2122 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2123 writeShort(os, 45); // ver 4.5 for zip64 2124 } else { 2125 writeShort(os, version()); // version needed to extract 2126 } 2127 writeShort(os, flag); // general purpose bit flag 2128 writeShort(os, method); // compression method 2129 // last modification time 2130 writeInt(os, (int)javaToDosTime(mtime)); 2131 writeInt(os, crc); // crc-32 2132 if (elen64 != 0) { 2133 writeInt(os, ZIP64_MINVAL); 2134 writeInt(os, ZIP64_MINVAL); 2135 } else { 2136 writeInt(os, csize); // compressed size 2137 writeInt(os, size); // uncompressed size 2138 } 2139 } 2140 while (eoff + 4 < elen) { 2141 int tag = SH(extra, eoff); 2142 int sz = SH(extra, eoff + 2); 2143 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2144 foundExtraTime = true; 2145 } 2146 eoff += (4 + sz); 2147 } 2148 if (!foundExtraTime) { 2149 if (isWindows) { 2150 elenNTFS = 36; // NTFS, total 36 bytes 2151 } else { // on unix use "ext time" 2152 elenEXTT = 9; 2153 if (atime != -1) 2154 elenEXTT += 4; 2155 if (ctime != -1) 2156 elenEXTT += 4; 2157 } 2158 } 2159 writeShort(os, name.length); 2160 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2161 writeBytes(os, name); 2162 if (elen64 != 0) { 2163 writeShort(os, EXTID_ZIP64); 2164 writeShort(os, 16); 2165 writeLong(os, size); 2166 writeLong(os, csize); 2167 } 2168 if (elenNTFS != 0) { 2169 writeShort(os, EXTID_NTFS); 2170 writeShort(os, elenNTFS - 4); 2171 writeInt(os, 0); // reserved 2172 writeShort(os, 0x0001); // NTFS attr tag 2173 writeShort(os, 24); 2174 writeLong(os, javaToWinTime(mtime)); 2175 writeLong(os, javaToWinTime(atime)); 2176 writeLong(os, javaToWinTime(ctime)); 2177 } 2178 if (elenEXTT != 0) { 2179 writeShort(os, EXTID_EXTT); 2180 writeShort(os, elenEXTT - 4);// size for the folowing data block 2181 int fbyte = 0x1; 2182 if (atime != -1) // mtime and atime 2183 fbyte |= 0x2; 2184 if (ctime != -1) // mtime, atime and ctime 2185 fbyte |= 0x4; 2186 os.write(fbyte); // flags byte 2187 writeInt(os, javaToUnixTime(mtime)); 2188 if (atime != -1) 2189 writeInt(os, javaToUnixTime(atime)); 2190 if (ctime != -1) 2191 writeInt(os, javaToUnixTime(ctime)); 2192 } 2193 if (extra != null) { 2194 writeBytes(os, extra); 2195 } 2196 return LOCHDR + name.length + elen + elen64 + elenNTFS + elenEXTT; 2197 } 2198 2199 // Data Descriptior 2200 int writeEXT(OutputStream os) 2201 throws IOException 2202 { 2203 writeInt(os, EXTSIG); // EXT header signature 2204 writeInt(os, crc); // crc-32 2205 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2206 writeLong(os, csize); 2207 writeLong(os, size); 2208 return 24; 2209 } else { 2210 writeInt(os, csize); // compressed size 2211 writeInt(os, size); // uncompressed size 2212 return 16; 2213 } 2214 } 2215 2216 // read NTFS, UNIX and ZIP64 data from cen.extra 2217 void readExtra(ZipFileSystem zipfs) throws IOException { 2218 if (extra == null) 2219 return; 2220 int elen = extra.length; 2221 int off = 0; 2222 int newOff = 0; 2223 while (off + 4 < elen) { 2224 // extra spec: HeaderID+DataSize+Data 2225 int pos = off; 2226 int tag = SH(extra, pos); 2227 int sz = SH(extra, pos + 2); 2228 pos += 4; 2229 if (pos + sz > elen) // invalid data 2230 break; 2231 switch (tag) { 2232 case EXTID_ZIP64 : 2233 if (size == ZIP64_MINVAL) { 2234 if (pos + 8 > elen) // invalid zip64 extra 2235 break; // fields, just skip 2236 size = LL(extra, pos); 2237 pos += 8; 2238 } 2239 if (csize == ZIP64_MINVAL) { 2240 if (pos + 8 > elen) 2241 break; 2242 csize = LL(extra, pos); 2243 pos += 8; 2244 } 2245 if (locoff == ZIP64_MINVAL) { 2246 if (pos + 8 > elen) 2247 break; 2248 locoff = LL(extra, pos); 2249 pos += 8; 2250 } 2251 break; 2252 case EXTID_NTFS: 2253 pos += 4; // reserved 4 bytes 2254 if (SH(extra, pos) != 0x0001) 2255 break; 2256 if (SH(extra, pos + 2) != 24) 2257 break; 2258 // override the loc field, datatime here is 2259 // more "accurate" 2260 mtime = winToJavaTime(LL(extra, pos + 4)); 2261 atime = winToJavaTime(LL(extra, pos + 12)); 2262 ctime = winToJavaTime(LL(extra, pos + 20)); 2263 break; 2264 case EXTID_EXTT: 2265 // spec says the Extened timestamp in cen only has mtime 2266 // need to read the loc to get the extra a/ctime 2267 byte[] buf = new byte[LOCHDR]; 2268 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2269 != buf.length) 2270 throw new ZipException("loc: reading failed"); 2271 if (LOCSIG(buf) != LOCSIG) 2272 throw new ZipException("loc: wrong sig ->" 2273 + Long.toString(LOCSIG(buf), 16)); 2274 2275 int locElen = LOCEXT(buf); 2276 if (locElen < 9) // EXTT is at lease 9 bytes 2277 break; 2278 int locNlen = LOCNAM(buf); 2279 buf = new byte[locElen]; 2280 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2281 != buf.length) 2282 throw new ZipException("loc extra: reading failed"); 2283 int locPos = 0; 2284 while (locPos + 4 < buf.length) { 2285 int locTag = SH(buf, locPos); 2286 int locSZ = SH(buf, locPos + 2); 2287 locPos += 4; 2288 if (locTag != EXTID_EXTT) { 2289 locPos += locSZ; 2290 continue; 2291 } 2292 int flag = CH(buf, locPos++); 2293 if ((flag & 0x1) != 0) { 2294 mtime = unixToJavaTime(LG(buf, locPos)); 2295 locPos += 4; 2296 } 2297 if ((flag & 0x2) != 0) { 2298 atime = unixToJavaTime(LG(buf, locPos)); 2299 locPos += 4; 2300 } 2301 if ((flag & 0x4) != 0) { 2302 ctime = unixToJavaTime(LG(buf, locPos)); 2303 locPos += 4; 2304 } 2305 break; 2306 } 2307 break; 2308 default: // unknown tag 2309 System.arraycopy(extra, off, extra, newOff, sz + 4); 2310 newOff += (sz + 4); 2311 } 2312 off += (sz + 4); 2313 } 2314 if (newOff != 0 && newOff != extra.length) 2315 extra = Arrays.copyOf(extra, newOff); 2316 else 2317 extra = null; 2318 } 2319 } 2320 2321 private static class ExChannelCloser { 2322 Path path; 2323 SeekableByteChannel ch; 2324 Set<InputStream> streams; 2325 ExChannelCloser(Path path, 2326 SeekableByteChannel ch, 2327 Set<InputStream> streams) 2328 { 2329 this.path = path; 2330 this.ch = ch; 2331 this.streams = streams; 2332 } 2333 } 2334 2335 // ZIP directory has two issues: 2336 // (1) ZIP spec does not require the ZIP file to include 2337 // directory entry 2338 // (2) all entries are not stored/organized in a "tree" 2339 // structure. 2340 // A possible solution is to build the node tree ourself as 2341 // implemented below. 2342 private IndexNode root; 2343 2344 private void addToTree(IndexNode inode, HashSet<IndexNode> dirs) { 2345 if (dirs.contains(inode)) { 2346 return; 2347 } 2348 IndexNode parent; 2349 byte[] name = inode.name; 2350 byte[] pname = getParent(name); 2351 if (inodes.containsKey(LOOKUPKEY.as(pname))) { 2352 parent = inodes.get(LOOKUPKEY); 2353 } else { // pseudo directory entry 2354 parent = new IndexNode(pname, -1); 2355 inodes.put(parent, parent); 2356 } 2357 addToTree(parent, dirs); 2358 inode.sibling = parent.child; 2359 parent.child = inode; 2360 if (name[name.length -1] == '/') 2361 dirs.add(inode); 2362 } 2363 2364 private void removeFromTree(IndexNode inode) { 2365 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2366 IndexNode child = parent.child; 2367 if (child.equals(inode)) { 2368 parent.child = child.sibling; 2369 } else { 2370 IndexNode last = child; 2371 while ((child = child.sibling) != null) { 2372 if (child.equals(inode)) { 2373 last.sibling = child.sibling; 2374 break; 2375 } else { 2376 last = child; 2377 } 2378 } 2379 } 2380 } 2381 2382 private void buildNodeTree() throws IOException { 2383 beginWrite(); 2384 try { 2385 HashSet<IndexNode> dirs = new HashSet<>(); 2386 IndexNode root = new IndexNode(ROOTPATH, -1); 2387 inodes.put(root, root); 2388 dirs.add(root); 2389 for (IndexNode node : inodes.keySet().toArray(new IndexNode[0])) { 2390 addToTree(node, dirs); 2391 } 2392 } finally { 2393 endWrite(); 2394 } 2395 } 2396 }