1 /* 2 * Copyright (c) 2009, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.File; 33 import java.io.FilterOutputStream; 34 import java.io.IOException; 35 import java.io.InputStream; 36 import java.io.OutputStream; 37 import java.nio.ByteBuffer; 38 import java.nio.MappedByteBuffer; 39 import java.nio.channels.*; 40 import java.nio.file.*; 41 import java.nio.file.attribute.*; 42 import java.nio.file.spi.*; 43 import java.security.AccessController; 44 import java.security.PrivilegedAction; 45 import java.security.PrivilegedActionException; 46 import java.security.PrivilegedExceptionAction; 47 import java.util.*; 48 import java.util.concurrent.locks.ReadWriteLock; 49 import java.util.concurrent.locks.ReentrantReadWriteLock; 50 import java.util.regex.Pattern; 51 import java.util.zip.CRC32; 52 import java.util.zip.Inflater; 53 import java.util.zip.Deflater; 54 import java.util.zip.InflaterInputStream; 55 import java.util.zip.DeflaterOutputStream; 56 import java.util.zip.ZipException; 57 import static java.lang.Boolean.*; 58 import static jdk.nio.zipfs.ZipConstants.*; 59 import static jdk.nio.zipfs.ZipUtils.*; 60 import static java.nio.file.StandardOpenOption.*; 61 import static java.nio.file.StandardCopyOption.*; 62 63 /** 64 * A FileSystem built on a zip file 65 * 66 * @author Xueming Shen 67 */ 68 69 class ZipFileSystem extends FileSystem { 70 71 private final ZipFileSystemProvider provider; 72 private final Path zfpath; 73 final ZipCoder zc; 74 private final ZipPath rootdir; 75 private boolean readOnly = false; // readonly file system 76 77 // configurable by env map 78 private final boolean noExtt; // see readExtra() 79 private final boolean useTempFile; // use a temp file for newOS, default 80 // is to use BAOS for better performance 81 private static final boolean isWindows = AccessController.doPrivileged( 82 (PrivilegedAction<Boolean>) () -> System.getProperty("os.name") 83 .startsWith("Windows")); 84 private final boolean forceEnd64; 85 private final int defaultMethod; // METHOD_STORED if "noCompression=true" 86 // METHOD_DEFLATED otherwise 87 88 ZipFileSystem(ZipFileSystemProvider provider, 89 Path zfpath, 90 Map<String, ?> env) throws IOException 91 { 92 // default encoding for name/comment 93 String nameEncoding = env.containsKey("encoding") ? 94 (String)env.get("encoding") : "UTF-8"; 95 this.noExtt = "false".equals(env.get("zipinfo-time")); 96 this.useTempFile = isTrue(env, "useTempFile"); 97 this.forceEnd64 = isTrue(env, "forceZIP64End"); 98 this.defaultMethod = isTrue(env, "noCompression") ? METHOD_STORED: METHOD_DEFLATED; 99 if (Files.notExists(zfpath)) { 100 // create a new zip if not exists 101 if (isTrue(env, "create")) { 102 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 103 new END().write(os, 0, forceEnd64); 104 } 105 } else { 106 throw new FileSystemNotFoundException(zfpath.toString()); 107 } 108 } 109 // sm and existence check 110 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 111 boolean writeable = AccessController.doPrivileged( 112 (PrivilegedAction<Boolean>) () -> Files.isWritable(zfpath)); 113 this.readOnly = !writeable; 114 this.zc = ZipCoder.get(nameEncoding); 115 this.rootdir = new ZipPath(this, new byte[]{'/'}); 116 this.ch = Files.newByteChannel(zfpath, READ); 117 try { 118 this.cen = initCEN(); 119 } catch (IOException x) { 120 try { 121 this.ch.close(); 122 } catch (IOException xx) { 123 x.addSuppressed(xx); 124 } 125 throw x; 126 } 127 this.provider = provider; 128 this.zfpath = zfpath; 129 } 130 131 // returns true if there is a name=true/"true" setting in env 132 private static boolean isTrue(Map<String, ?> env, String name) { 133 return "true".equals(env.get(name)) || TRUE.equals(env.get(name)); 134 } 135 136 @Override 137 public FileSystemProvider provider() { 138 return provider; 139 } 140 141 @Override 142 public String getSeparator() { 143 return "/"; 144 } 145 146 @Override 147 public boolean isOpen() { 148 return isOpen; 149 } 150 151 @Override 152 public boolean isReadOnly() { 153 return readOnly; 154 } 155 156 private void checkWritable() throws IOException { 157 if (readOnly) 158 throw new ReadOnlyFileSystemException(); 159 } 160 161 void setReadOnly() { 162 this.readOnly = true; 163 } 164 165 @Override 166 public Iterable<Path> getRootDirectories() { 167 return List.of(rootdir); 168 } 169 170 ZipPath getRootDir() { 171 return rootdir; 172 } 173 174 @Override 175 public ZipPath getPath(String first, String... more) { 176 if (more.length == 0) { 177 return new ZipPath(this, first); 178 } 179 StringBuilder sb = new StringBuilder(); 180 sb.append(first); 181 for (String path : more) { 182 if (path.length() > 0) { 183 if (sb.length() > 0) { 184 sb.append('/'); 185 } 186 sb.append(path); 187 } 188 } 189 return new ZipPath(this, sb.toString()); 190 } 191 192 @Override 193 public UserPrincipalLookupService getUserPrincipalLookupService() { 194 throw new UnsupportedOperationException(); 195 } 196 197 @Override 198 public WatchService newWatchService() { 199 throw new UnsupportedOperationException(); 200 } 201 202 FileStore getFileStore(ZipPath path) { 203 return new ZipFileStore(path); 204 } 205 206 @Override 207 public Iterable<FileStore> getFileStores() { 208 return List.of(new ZipFileStore(rootdir)); 209 } 210 211 private static final Set<String> supportedFileAttributeViews = 212 Set.of("basic", "zip"); 213 214 @Override 215 public Set<String> supportedFileAttributeViews() { 216 return supportedFileAttributeViews; 217 } 218 219 @Override 220 public String toString() { 221 return zfpath.toString(); 222 } 223 224 Path getZipFile() { 225 return zfpath; 226 } 227 228 private static final String GLOB_SYNTAX = "glob"; 229 private static final String REGEX_SYNTAX = "regex"; 230 231 @Override 232 public PathMatcher getPathMatcher(String syntaxAndInput) { 233 int pos = syntaxAndInput.indexOf(':'); 234 if (pos <= 0 || pos == syntaxAndInput.length()) { 235 throw new IllegalArgumentException(); 236 } 237 String syntax = syntaxAndInput.substring(0, pos); 238 String input = syntaxAndInput.substring(pos + 1); 239 String expr; 240 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 241 expr = toRegexPattern(input); 242 } else { 243 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 244 expr = input; 245 } else { 246 throw new UnsupportedOperationException("Syntax '" + syntax + 247 "' not recognized"); 248 } 249 } 250 // return matcher 251 final Pattern pattern = Pattern.compile(expr); 252 return new PathMatcher() { 253 @Override 254 public boolean matches(Path path) { 255 return pattern.matcher(path.toString()).matches(); 256 } 257 }; 258 } 259 260 @Override 261 public void close() throws IOException { 262 beginWrite(); 263 try { 264 if (!isOpen) 265 return; 266 isOpen = false; // set closed 267 } finally { 268 endWrite(); 269 } 270 if (!streams.isEmpty()) { // unlock and close all remaining streams 271 Set<InputStream> copy = new HashSet<>(streams); 272 for (InputStream is: copy) 273 is.close(); 274 } 275 beginWrite(); // lock and sync 276 try { 277 AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> { 278 sync(); return null; 279 }); 280 ch.close(); // close the ch just in case no update 281 // and sync didn't close the ch 282 } catch (PrivilegedActionException e) { 283 throw (IOException)e.getException(); 284 } finally { 285 endWrite(); 286 } 287 288 synchronized (inflaters) { 289 for (Inflater inf : inflaters) 290 inf.end(); 291 } 292 synchronized (deflaters) { 293 for (Deflater def : deflaters) 294 def.end(); 295 } 296 297 IOException ioe = null; 298 synchronized (tmppaths) { 299 for (Path p: tmppaths) { 300 try { 301 AccessController.doPrivileged( 302 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 303 } catch (PrivilegedActionException e) { 304 IOException x = (IOException)e.getException(); 305 if (ioe == null) 306 ioe = x; 307 else 308 ioe.addSuppressed(x); 309 } 310 } 311 } 312 provider.removeFileSystem(zfpath, this); 313 if (ioe != null) 314 throw ioe; 315 } 316 317 ZipFileAttributes getFileAttributes(byte[] path) 318 throws IOException 319 { 320 Entry e; 321 beginRead(); 322 try { 323 ensureOpen(); 324 e = getEntry(path); 325 if (e == null) { 326 IndexNode inode = getInode(path); 327 if (inode == null) 328 return null; 329 // pseudo directory, uses METHOD_STORED 330 e = new Entry(inode.name, inode.isdir, METHOD_STORED); 331 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 332 } 333 } finally { 334 endRead(); 335 } 336 return e; 337 } 338 339 void checkAccess(byte[] path) throws IOException { 340 beginRead(); 341 try { 342 ensureOpen(); 343 // is it necessary to readCEN as a sanity check? 344 if (getInode(path) == null) { 345 throw new NoSuchFileException(toString()); 346 } 347 348 } finally { 349 endRead(); 350 } 351 } 352 353 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 354 throws IOException 355 { 356 checkWritable(); 357 beginWrite(); 358 try { 359 ensureOpen(); 360 Entry e = getEntry(path); // ensureOpen checked 361 if (e == null) 362 throw new NoSuchFileException(getString(path)); 363 if (e.type == Entry.CEN) 364 e.type = Entry.COPY; // copy e 365 if (mtime != null) 366 e.mtime = mtime.toMillis(); 367 if (atime != null) 368 e.atime = atime.toMillis(); 369 if (ctime != null) 370 e.ctime = ctime.toMillis(); 371 update(e); 372 } finally { 373 endWrite(); 374 } 375 } 376 377 boolean exists(byte[] path) 378 throws IOException 379 { 380 beginRead(); 381 try { 382 ensureOpen(); 383 return getInode(path) != null; 384 } finally { 385 endRead(); 386 } 387 } 388 389 boolean isDirectory(byte[] path) 390 throws IOException 391 { 392 beginRead(); 393 try { 394 IndexNode n = getInode(path); 395 return n != null && n.isDir(); 396 } finally { 397 endRead(); 398 } 399 } 400 401 // returns the list of child paths of "path" 402 Iterator<Path> iteratorOf(ZipPath dir, 403 DirectoryStream.Filter<? super Path> filter) 404 throws IOException 405 { 406 beginWrite(); // iteration of inodes needs exclusive lock 407 try { 408 ensureOpen(); 409 byte[] path = dir.getResolvedPath(); 410 IndexNode inode = getInode(path); 411 if (inode == null) 412 throw new NotDirectoryException(getString(path)); 413 List<Path> list = new ArrayList<>(); 414 IndexNode child = inode.child; 415 while (child != null) { 416 // (1) assume all path from zip file itself is "normalized" 417 // (2) IndexNode.name is absolute. see IndexNode(byte[],int,int) 418 // (3) if parent "dir" is relative when ZipDirectoryStream 419 // is created, the returned child path needs to be relative 420 // as well. 421 byte[] cname = child.name; 422 if (!dir.isAbsolute()) { 423 cname = Arrays.copyOfRange(cname, 1, cname.length); 424 } 425 ZipPath zpath = new ZipPath(this, cname, true); 426 if (filter == null || filter.accept(zpath)) 427 list.add(zpath); 428 child = child.sibling; 429 } 430 return list.iterator(); 431 } finally { 432 endWrite(); 433 } 434 } 435 436 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 437 throws IOException 438 { 439 checkWritable(); 440 // dir = toDirectoryPath(dir); 441 beginWrite(); 442 try { 443 ensureOpen(); 444 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 445 throw new FileAlreadyExistsException(getString(dir)); 446 checkParents(dir); 447 Entry e = new Entry(dir, Entry.NEW, true, METHOD_STORED); 448 update(e); 449 } finally { 450 endWrite(); 451 } 452 } 453 454 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 455 throws IOException 456 { 457 checkWritable(); 458 if (Arrays.equals(src, dst)) 459 return; // do nothing, src and dst are the same 460 461 beginWrite(); 462 try { 463 ensureOpen(); 464 Entry eSrc = getEntry(src); // ensureOpen checked 465 466 if (eSrc == null) 467 throw new NoSuchFileException(getString(src)); 468 if (eSrc.isDir()) { // spec says to create dst dir 469 createDirectory(dst); 470 return; 471 } 472 boolean hasReplace = false; 473 boolean hasCopyAttrs = false; 474 for (CopyOption opt : options) { 475 if (opt == REPLACE_EXISTING) 476 hasReplace = true; 477 else if (opt == COPY_ATTRIBUTES) 478 hasCopyAttrs = true; 479 } 480 Entry eDst = getEntry(dst); 481 if (eDst != null) { 482 if (!hasReplace) 483 throw new FileAlreadyExistsException(getString(dst)); 484 } else { 485 checkParents(dst); 486 } 487 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 488 u.name(dst); // change name 489 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 490 { 491 u.type = eSrc.type; // make it the same type 492 if (deletesrc) { // if it's a "rename", take the data 493 u.bytes = eSrc.bytes; 494 u.file = eSrc.file; 495 } else { // if it's not "rename", copy the data 496 if (eSrc.bytes != null) 497 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 498 else if (eSrc.file != null) { 499 u.file = getTempPathForEntry(null); 500 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 501 } 502 } 503 } 504 if (!hasCopyAttrs) 505 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 506 update(u); 507 if (deletesrc) 508 updateDelete(eSrc); 509 } finally { 510 endWrite(); 511 } 512 } 513 514 // Returns an output stream for writing the contents into the specified 515 // entry. 516 OutputStream newOutputStream(byte[] path, OpenOption... options) 517 throws IOException 518 { 519 checkWritable(); 520 boolean hasCreateNew = false; 521 boolean hasCreate = false; 522 boolean hasAppend = false; 523 boolean hasTruncate = false; 524 for (OpenOption opt: options) { 525 if (opt == READ) 526 throw new IllegalArgumentException("READ not allowed"); 527 if (opt == CREATE_NEW) 528 hasCreateNew = true; 529 if (opt == CREATE) 530 hasCreate = true; 531 if (opt == APPEND) 532 hasAppend = true; 533 if (opt == TRUNCATE_EXISTING) 534 hasTruncate = true; 535 } 536 if (hasAppend && hasTruncate) 537 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 538 beginRead(); // only need a readlock, the "update()" will 539 try { // try to obtain a writelock when the os is 540 ensureOpen(); // being closed. 541 Entry e = getEntry(path); 542 if (e != null) { 543 if (e.isDir() || hasCreateNew) 544 throw new FileAlreadyExistsException(getString(path)); 545 if (hasAppend) { 546 InputStream is = getInputStream(e); 547 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 548 is.transferTo(os); 549 is.close(); 550 return os; 551 } 552 return getOutputStream(new Entry(e, Entry.NEW)); 553 } else { 554 if (!hasCreate && !hasCreateNew) 555 throw new NoSuchFileException(getString(path)); 556 checkParents(path); 557 return getOutputStream(new Entry(path, Entry.NEW, false, defaultMethod)); 558 } 559 } finally { 560 endRead(); 561 } 562 } 563 564 // Returns an input stream for reading the contents of the specified 565 // file entry. 566 InputStream newInputStream(byte[] path) throws IOException { 567 beginRead(); 568 try { 569 ensureOpen(); 570 Entry e = getEntry(path); 571 if (e == null) 572 throw new NoSuchFileException(getString(path)); 573 if (e.isDir()) 574 throw new FileSystemException(getString(path), "is a directory", null); 575 return getInputStream(e); 576 } finally { 577 endRead(); 578 } 579 } 580 581 private void checkOptions(Set<? extends OpenOption> options) { 582 // check for options of null type and option is an intance of StandardOpenOption 583 for (OpenOption option : options) { 584 if (option == null) 585 throw new NullPointerException(); 586 if (!(option instanceof StandardOpenOption)) 587 throw new IllegalArgumentException(); 588 } 589 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 590 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 591 } 592 593 594 // Returns an output SeekableByteChannel for either 595 // (1) writing the contents of a new entry, if the entry doesn't exit, or 596 // (2) updating/replacing the contents of an existing entry. 597 // Note: The content is not compressed. 598 private class EntryOutputChannel extends ByteArrayChannel { 599 Entry e; 600 601 EntryOutputChannel(Entry e) throws IOException { 602 super(e.size > 0? (int)e.size : 8192, false); 603 this.e = e; 604 if (e.mtime == -1) 605 e.mtime = System.currentTimeMillis(); 606 if (e.method == -1) 607 e.method = defaultMethod; 608 // store size, compressed size, and crc-32 in datadescriptor 609 e.flag = FLAG_DATADESCR; 610 if (zc.isUTF8()) 611 e.flag |= FLAG_USE_UTF8; 612 } 613 614 @Override 615 public void close() throws IOException { 616 e.bytes = toByteArray(); 617 e.size = e.bytes.length; 618 e.crc = -1; 619 super.close(); 620 update(e); 621 } 622 } 623 624 private int getCompressMethod(FileAttribute<?>... attrs) { 625 return defaultMethod; 626 } 627 628 // Returns a Writable/ReadByteChannel for now. Might consdier to use 629 // newFileChannel() instead, which dump the entry data into a regular 630 // file on the default file system and create a FileChannel on top of 631 // it. 632 SeekableByteChannel newByteChannel(byte[] path, 633 Set<? extends OpenOption> options, 634 FileAttribute<?>... attrs) 635 throws IOException 636 { 637 checkOptions(options); 638 if (options.contains(StandardOpenOption.WRITE) || 639 options.contains(StandardOpenOption.APPEND)) { 640 checkWritable(); 641 beginRead(); // only need a readlock, the "update()" will obtain 642 // thewritelock when the channel is closed 643 try { 644 ensureOpen(); 645 Entry e = getEntry(path); 646 if (e != null) { 647 if (e.isDir() || options.contains(CREATE_NEW)) 648 throw new FileAlreadyExistsException(getString(path)); 649 SeekableByteChannel sbc = 650 new EntryOutputChannel(new Entry(e, Entry.NEW)); 651 if (options.contains(APPEND)) { 652 try (InputStream is = getInputStream(e)) { // copyover 653 byte[] buf = new byte[8192]; 654 ByteBuffer bb = ByteBuffer.wrap(buf); 655 int n; 656 while ((n = is.read(buf)) != -1) { 657 bb.position(0); 658 bb.limit(n); 659 sbc.write(bb); 660 } 661 } 662 } 663 return sbc; 664 } 665 if (!options.contains(CREATE) && !options.contains(CREATE_NEW)) 666 throw new NoSuchFileException(getString(path)); 667 checkParents(path); 668 return new EntryOutputChannel( 669 new Entry(path, Entry.NEW, false, getCompressMethod(attrs))); 670 671 } finally { 672 endRead(); 673 } 674 } else { 675 beginRead(); 676 try { 677 ensureOpen(); 678 Entry e = getEntry(path); 679 if (e == null || e.isDir()) 680 throw new NoSuchFileException(getString(path)); 681 try (InputStream is = getInputStream(e)) { 682 // TBD: if (e.size < NNNNN); 683 return new ByteArrayChannel(is.readAllBytes(), true); 684 } 685 } finally { 686 endRead(); 687 } 688 } 689 } 690 691 // Returns a FileChannel of the specified entry. 692 // 693 // This implementation creates a temporary file on the default file system, 694 // copy the entry data into it if the entry exists, and then create a 695 // FileChannel on top of it. 696 FileChannel newFileChannel(byte[] path, 697 Set<? extends OpenOption> options, 698 FileAttribute<?>... attrs) 699 throws IOException 700 { 701 checkOptions(options); 702 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 703 options.contains(StandardOpenOption.APPEND)); 704 beginRead(); 705 try { 706 ensureOpen(); 707 Entry e = getEntry(path); 708 if (forWrite) { 709 checkWritable(); 710 if (e == null) { 711 if (!options.contains(StandardOpenOption.CREATE) && 712 !options.contains(StandardOpenOption.CREATE_NEW)) { 713 throw new NoSuchFileException(getString(path)); 714 } 715 } else { 716 if (options.contains(StandardOpenOption.CREATE_NEW)) { 717 throw new FileAlreadyExistsException(getString(path)); 718 } 719 if (e.isDir()) 720 throw new FileAlreadyExistsException("directory <" 721 + getString(path) + "> exists"); 722 } 723 options = new HashSet<>(options); 724 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 725 } else if (e == null || e.isDir()) { 726 throw new NoSuchFileException(getString(path)); 727 } 728 729 final boolean isFCH = (e != null && e.type == Entry.FILECH); 730 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 731 final FileChannel fch = tmpfile.getFileSystem() 732 .provider() 733 .newFileChannel(tmpfile, options, attrs); 734 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH); 735 if (forWrite) { 736 u.flag = FLAG_DATADESCR; 737 u.method = getCompressMethod(attrs); 738 } 739 // is there a better way to hook into the FileChannel's close method? 740 return new FileChannel() { 741 public int write(ByteBuffer src) throws IOException { 742 return fch.write(src); 743 } 744 public long write(ByteBuffer[] srcs, int offset, int length) 745 throws IOException 746 { 747 return fch.write(srcs, offset, length); 748 } 749 public long position() throws IOException { 750 return fch.position(); 751 } 752 public FileChannel position(long newPosition) 753 throws IOException 754 { 755 fch.position(newPosition); 756 return this; 757 } 758 public long size() throws IOException { 759 return fch.size(); 760 } 761 public FileChannel truncate(long size) 762 throws IOException 763 { 764 fch.truncate(size); 765 return this; 766 } 767 public void force(boolean metaData) 768 throws IOException 769 { 770 fch.force(metaData); 771 } 772 public long transferTo(long position, long count, 773 WritableByteChannel target) 774 throws IOException 775 { 776 return fch.transferTo(position, count, target); 777 } 778 public long transferFrom(ReadableByteChannel src, 779 long position, long count) 780 throws IOException 781 { 782 return fch.transferFrom(src, position, count); 783 } 784 public int read(ByteBuffer dst) throws IOException { 785 return fch.read(dst); 786 } 787 public int read(ByteBuffer dst, long position) 788 throws IOException 789 { 790 return fch.read(dst, position); 791 } 792 public long read(ByteBuffer[] dsts, int offset, int length) 793 throws IOException 794 { 795 return fch.read(dsts, offset, length); 796 } 797 public int write(ByteBuffer src, long position) 798 throws IOException 799 { 800 return fch.write(src, position); 801 } 802 public MappedByteBuffer map(MapMode mode, 803 long position, long size) 804 throws IOException 805 { 806 throw new UnsupportedOperationException(); 807 } 808 public FileLock lock(long position, long size, boolean shared) 809 throws IOException 810 { 811 return fch.lock(position, size, shared); 812 } 813 public FileLock tryLock(long position, long size, boolean shared) 814 throws IOException 815 { 816 return fch.tryLock(position, size, shared); 817 } 818 protected void implCloseChannel() throws IOException { 819 fch.close(); 820 if (forWrite) { 821 u.mtime = System.currentTimeMillis(); 822 u.size = Files.size(u.file); 823 824 update(u); 825 } else { 826 if (!isFCH) // if this is a new fch for reading 827 removeTempPathForEntry(tmpfile); 828 } 829 } 830 }; 831 } finally { 832 endRead(); 833 } 834 } 835 836 // the outstanding input streams that need to be closed 837 private Set<InputStream> streams = 838 Collections.synchronizedSet(new HashSet<InputStream>()); 839 840 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 841 private Path getTempPathForEntry(byte[] path) throws IOException { 842 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 843 if (path != null) { 844 Entry e = getEntry(path); 845 if (e != null) { 846 try (InputStream is = newInputStream(path)) { 847 Files.copy(is, tmpPath, REPLACE_EXISTING); 848 } 849 } 850 } 851 return tmpPath; 852 } 853 854 private void removeTempPathForEntry(Path path) throws IOException { 855 Files.delete(path); 856 tmppaths.remove(path); 857 } 858 859 // check if all parents really exit. ZIP spec does not require 860 // the existence of any "parent directory". 861 private void checkParents(byte[] path) throws IOException { 862 beginRead(); 863 try { 864 while ((path = getParent(path)) != null && 865 path != ROOTPATH) { 866 if (!inodes.containsKey(IndexNode.keyOf(path))) { 867 throw new NoSuchFileException(getString(path)); 868 } 869 } 870 } finally { 871 endRead(); 872 } 873 } 874 875 private static byte[] ROOTPATH = new byte[] { '/' }; 876 private static byte[] getParent(byte[] path) { 877 int off = getParentOff(path); 878 if (off <= 1) 879 return ROOTPATH; 880 return Arrays.copyOf(path, off); 881 } 882 883 private static int getParentOff(byte[] path) { 884 int off = path.length - 1; 885 if (off > 0 && path[off] == '/') // isDirectory 886 off--; 887 while (off > 0 && path[off] != '/') { off--; } 888 return off; 889 } 890 891 private final void beginWrite() { 892 rwlock.writeLock().lock(); 893 } 894 895 private final void endWrite() { 896 rwlock.writeLock().unlock(); 897 } 898 899 private final void beginRead() { 900 rwlock.readLock().lock(); 901 } 902 903 private final void endRead() { 904 rwlock.readLock().unlock(); 905 } 906 907 /////////////////////////////////////////////////////////////////// 908 909 private volatile boolean isOpen = true; 910 private final SeekableByteChannel ch; // channel to the zipfile 911 final byte[] cen; // CEN & ENDHDR 912 private END end; 913 private long locpos; // position of first LOC header (usually 0) 914 915 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 916 917 // name -> pos (in cen), IndexNode itself can be used as a "key" 918 private LinkedHashMap<IndexNode, IndexNode> inodes; 919 920 final byte[] getBytes(String name) { 921 return zc.getBytes(name); 922 } 923 924 final String getString(byte[] name) { 925 return zc.toString(name); 926 } 927 928 @SuppressWarnings("deprecation") 929 protected void finalize() throws IOException { 930 close(); 931 } 932 933 // Reads len bytes of data from the specified offset into buf. 934 // Returns the total number of bytes read. 935 // Each/every byte read from here (except the cen, which is mapped). 936 final long readFullyAt(byte[] buf, int off, long len, long pos) 937 throws IOException 938 { 939 ByteBuffer bb = ByteBuffer.wrap(buf); 940 bb.position(off); 941 bb.limit((int)(off + len)); 942 return readFullyAt(bb, pos); 943 } 944 945 private final long readFullyAt(ByteBuffer bb, long pos) 946 throws IOException 947 { 948 synchronized(ch) { 949 return ch.position(pos).read(bb); 950 } 951 } 952 953 // Searches for end of central directory (END) header. The contents of 954 // the END header will be read and placed in endbuf. Returns the file 955 // position of the END header, otherwise returns -1 if the END header 956 // was not found or an error occurred. 957 private END findEND() throws IOException 958 { 959 byte[] buf = new byte[READBLOCKSZ]; 960 long ziplen = ch.size(); 961 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 962 long minPos = minHDR - (buf.length - ENDHDR); 963 964 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 965 { 966 int off = 0; 967 if (pos < 0) { 968 // Pretend there are some NUL bytes before start of file 969 off = (int)-pos; 970 Arrays.fill(buf, 0, off, (byte)0); 971 } 972 int len = buf.length - off; 973 if (readFullyAt(buf, off, len, pos + off) != len) 974 zerror("zip END header not found"); 975 976 // Now scan the block backwards for END header signature 977 for (int i = buf.length - ENDHDR; i >= 0; i--) { 978 if (buf[i+0] == (byte)'P' && 979 buf[i+1] == (byte)'K' && 980 buf[i+2] == (byte)'\005' && 981 buf[i+3] == (byte)'\006' && 982 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 983 // Found END header 984 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 985 END end = new END(); 986 end.endsub = ENDSUB(buf); 987 end.centot = ENDTOT(buf); 988 end.cenlen = ENDSIZ(buf); 989 end.cenoff = ENDOFF(buf); 990 end.comlen = ENDCOM(buf); 991 end.endpos = pos + i; 992 // try if there is zip64 end; 993 byte[] loc64 = new byte[ZIP64_LOCHDR]; 994 if (end.endpos < ZIP64_LOCHDR || 995 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 996 != loc64.length || 997 !locator64SigAt(loc64, 0)) { 998 return end; 999 } 1000 long end64pos = ZIP64_LOCOFF(loc64); 1001 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1002 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1003 != end64buf.length || 1004 !end64SigAt(end64buf, 0)) { 1005 return end; 1006 } 1007 // end64 found, 1008 long cenlen64 = ZIP64_ENDSIZ(end64buf); 1009 long cenoff64 = ZIP64_ENDOFF(end64buf); 1010 long centot64 = ZIP64_ENDTOT(end64buf); 1011 // double-check 1012 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1013 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1014 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1015 return end; 1016 } 1017 // to use the end64 values 1018 end.cenlen = cenlen64; 1019 end.cenoff = cenoff64; 1020 end.centot = (int)centot64; // assume total < 2g 1021 end.endpos = end64pos; 1022 return end; 1023 } 1024 } 1025 } 1026 zerror("zip END header not found"); 1027 return null; //make compiler happy 1028 } 1029 1030 // Reads zip file central directory. Returns the file position of first 1031 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1032 // then the error was a zip format error and zip->msg has the error text. 1033 // Always pass in -1 for knownTotal; it's used for a recursive call. 1034 private byte[] initCEN() throws IOException { 1035 end = findEND(); 1036 if (end.endpos == 0) { 1037 inodes = new LinkedHashMap<>(10); 1038 locpos = 0; 1039 buildNodeTree(); 1040 return null; // only END header present 1041 } 1042 if (end.cenlen > end.endpos) 1043 zerror("invalid END header (bad central directory size)"); 1044 long cenpos = end.endpos - end.cenlen; // position of CEN table 1045 1046 // Get position of first local file (LOC) header, taking into 1047 // account that there may be a stub prefixed to the zip file. 1048 locpos = cenpos - end.cenoff; 1049 if (locpos < 0) 1050 zerror("invalid END header (bad central directory offset)"); 1051 1052 // read in the CEN and END 1053 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1054 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1055 zerror("read CEN tables failed"); 1056 } 1057 // Iterate through the entries in the central directory 1058 inodes = new LinkedHashMap<>(end.centot + 1); 1059 int pos = 0; 1060 int limit = cen.length - ENDHDR; 1061 while (pos < limit) { 1062 if (!cenSigAt(cen, pos)) 1063 zerror("invalid CEN header (bad signature)"); 1064 int method = CENHOW(cen, pos); 1065 int nlen = CENNAM(cen, pos); 1066 int elen = CENEXT(cen, pos); 1067 int clen = CENCOM(cen, pos); 1068 if ((CENFLG(cen, pos) & 1) != 0) { 1069 zerror("invalid CEN header (encrypted entry)"); 1070 } 1071 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1072 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1073 } 1074 if (pos + CENHDR + nlen > limit) { 1075 zerror("invalid CEN header (bad header size)"); 1076 } 1077 IndexNode inode = new IndexNode(cen, pos, nlen); 1078 inodes.put(inode, inode); 1079 1080 // skip ext and comment 1081 pos += (CENHDR + nlen + elen + clen); 1082 } 1083 if (pos + ENDHDR != cen.length) { 1084 zerror("invalid CEN header (bad header size)"); 1085 } 1086 buildNodeTree(); 1087 return cen; 1088 } 1089 1090 private void ensureOpen() throws IOException { 1091 if (!isOpen) 1092 throw new ClosedFileSystemException(); 1093 } 1094 1095 // Creates a new empty temporary file in the same directory as the 1096 // specified file. A variant of Files.createTempFile. 1097 private Path createTempFileInSameDirectoryAs(Path path) 1098 throws IOException 1099 { 1100 Path parent = path.toAbsolutePath().getParent(); 1101 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1102 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1103 tmppaths.add(tmpPath); 1104 return tmpPath; 1105 } 1106 1107 ////////////////////update & sync ////////////////////////////////////// 1108 1109 private boolean hasUpdate = false; 1110 1111 // shared key. consumer guarantees the "writeLock" before use it. 1112 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1113 1114 private void updateDelete(IndexNode inode) { 1115 beginWrite(); 1116 try { 1117 removeFromTree(inode); 1118 inodes.remove(inode); 1119 hasUpdate = true; 1120 } finally { 1121 endWrite(); 1122 } 1123 } 1124 1125 private void update(Entry e) { 1126 beginWrite(); 1127 try { 1128 IndexNode old = inodes.put(e, e); 1129 if (old != null) { 1130 removeFromTree(old); 1131 } 1132 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1133 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1134 e.sibling = parent.child; 1135 parent.child = e; 1136 } 1137 hasUpdate = true; 1138 } finally { 1139 endWrite(); 1140 } 1141 } 1142 1143 // copy over the whole LOC entry (header if necessary, data and ext) from 1144 // old zip to the new one. 1145 private long copyLOCEntry(Entry e, boolean updateHeader, 1146 OutputStream os, 1147 long written, byte[] buf) 1148 throws IOException 1149 { 1150 long locoff = e.locoff; // where to read 1151 e.locoff = written; // update the e.locoff with new value 1152 1153 // calculate the size need to write out 1154 long size = 0; 1155 // if there is A ext 1156 if ((e.flag & FLAG_DATADESCR) != 0) { 1157 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1158 size = 24; 1159 else 1160 size = 16; 1161 } 1162 // read loc, use the original loc.elen/nlen 1163 // 1164 // an extra byte after loc is read, which should be the first byte of the 1165 // 'name' field of the loc. if this byte is '/', which means the original 1166 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1167 // is used to output the loc, in which the leading "/" will be removed 1168 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1169 throw new ZipException("loc: reading failed"); 1170 1171 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1172 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1173 size += e.csize; 1174 written = e.writeLOC(os) + size; 1175 } else { 1176 os.write(buf, 0, LOCHDR); // write out the loc header 1177 locoff += LOCHDR; 1178 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1179 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1180 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1181 written = LOCHDR + size; 1182 } 1183 int n; 1184 while (size > 0 && 1185 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1186 { 1187 if (size < n) 1188 n = (int)size; 1189 os.write(buf, 0, n); 1190 size -= n; 1191 locoff += n; 1192 } 1193 return written; 1194 } 1195 1196 private long writeEntry(Entry e, OutputStream os, byte[] buf) 1197 throws IOException { 1198 1199 if (e.bytes == null && e.file == null) // dir, 0-length data 1200 return 0; 1201 1202 long written = 0; 1203 try (OutputStream os2 = e.method == METHOD_STORED ? 1204 new EntryOutputStreamCRC32(e, os) : new EntryOutputStreamDef(e, os)) { 1205 if (e.bytes != null) { // in-memory 1206 os2.write(e.bytes, 0, e.bytes.length); 1207 } else if (e.file != null) { // tmp file 1208 if (e.type == Entry.NEW || e.type == Entry.FILECH) { 1209 try (InputStream is = Files.newInputStream(e.file)) { 1210 is.transferTo(os2); 1211 } 1212 } 1213 Files.delete(e.file); 1214 tmppaths.remove(e.file); 1215 } 1216 } 1217 written += e.csize; 1218 if ((e.flag & FLAG_DATADESCR) != 0) { 1219 written += e.writeEXT(os); 1220 } 1221 return written; 1222 } 1223 1224 // sync the zip file system, if there is any udpate 1225 private void sync() throws IOException { 1226 1227 if (!hasUpdate) 1228 return; 1229 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1230 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) 1231 { 1232 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1233 long written = 0; 1234 byte[] buf = new byte[8192]; 1235 Entry e = null; 1236 1237 // write loc 1238 for (IndexNode inode : inodes.values()) { 1239 if (inode instanceof Entry) { // an updated inode 1240 e = (Entry)inode; 1241 try { 1242 if (e.type == Entry.COPY) { 1243 // entry copy: the only thing changed is the "name" 1244 // and "nlen" in LOC header, so we udpate/rewrite the 1245 // LOC in new file and simply copy the rest (data and 1246 // ext) without enflating/deflating from the old zip 1247 // file LOC entry. 1248 written += copyLOCEntry(e, true, os, written, buf); 1249 } else { // NEW, FILECH or CEN 1250 e.locoff = written; 1251 written += e.writeLOC(os); // write loc header 1252 written += writeEntry(e, os, buf); 1253 } 1254 elist.add(e); 1255 } catch (IOException x) { 1256 x.printStackTrace(); // skip any in-accurate entry 1257 } 1258 } else { // unchanged inode 1259 if (inode.pos == -1) { 1260 continue; // pseudo directory node 1261 } 1262 if (inode.name.length == 1 && inode.name[0] == '/') { 1263 continue; // no root '/' directory even it 1264 // exits in original zip/jar file. 1265 } 1266 e = Entry.readCEN(this, inode); 1267 try { 1268 written += copyLOCEntry(e, false, os, written, buf); 1269 elist.add(e); 1270 } catch (IOException x) { 1271 x.printStackTrace(); // skip any wrong entry 1272 } 1273 } 1274 } 1275 1276 // now write back the cen and end table 1277 end.cenoff = written; 1278 for (Entry entry : elist) { 1279 written += entry.writeCEN(os); 1280 } 1281 end.centot = elist.size(); 1282 end.cenlen = written - end.cenoff; 1283 end.write(os, written, forceEnd64); 1284 } 1285 1286 ch.close(); 1287 Files.delete(zfpath); 1288 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1289 hasUpdate = false; // clear 1290 } 1291 1292 IndexNode getInode(byte[] path) { 1293 if (path == null) 1294 throw new NullPointerException("path"); 1295 return inodes.get(IndexNode.keyOf(path)); 1296 } 1297 1298 Entry getEntry(byte[] path) throws IOException { 1299 IndexNode inode = getInode(path); 1300 if (inode instanceof Entry) 1301 return (Entry)inode; 1302 if (inode == null || inode.pos == -1) 1303 return null; 1304 return Entry.readCEN(this, inode); 1305 } 1306 1307 public void deleteFile(byte[] path, boolean failIfNotExists) 1308 throws IOException 1309 { 1310 checkWritable(); 1311 1312 IndexNode inode = getInode(path); 1313 if (inode == null) { 1314 if (path != null && path.length == 0) 1315 throw new ZipException("root directory </> can't not be delete"); 1316 if (failIfNotExists) 1317 throw new NoSuchFileException(getString(path)); 1318 } else { 1319 if (inode.isDir() && inode.child != null) 1320 throw new DirectoryNotEmptyException(getString(path)); 1321 updateDelete(inode); 1322 } 1323 } 1324 1325 // Returns an out stream for either 1326 // (1) writing the contents of a new entry, if the entry exits, or 1327 // (2) updating/replacing the contents of the specified existing entry. 1328 private OutputStream getOutputStream(Entry e) throws IOException { 1329 1330 if (e.mtime == -1) 1331 e.mtime = System.currentTimeMillis(); 1332 if (e.method == -1) 1333 e.method = defaultMethod; 1334 // store size, compressed size, and crc-32 in datadescr 1335 e.flag = FLAG_DATADESCR; 1336 if (zc.isUTF8()) 1337 e.flag |= FLAG_USE_UTF8; 1338 OutputStream os; 1339 if (useTempFile) { 1340 e.file = getTempPathForEntry(null); 1341 os = Files.newOutputStream(e.file, WRITE); 1342 } else { 1343 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1344 } 1345 return new EntryOutputStream(e, os); 1346 } 1347 1348 private class EntryOutputStream extends FilterOutputStream { 1349 private Entry e; 1350 private long written; 1351 private boolean isClosed; 1352 1353 EntryOutputStream(Entry e, OutputStream os) throws IOException { 1354 super(os); 1355 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1356 // this.written = 0; 1357 } 1358 1359 @Override 1360 public synchronized void write(int b) throws IOException { 1361 out.write(b); 1362 written += 1; 1363 } 1364 1365 @Override 1366 public synchronized void write(byte b[], int off, int len) 1367 throws IOException { 1368 out.write(b, off, len); 1369 written += len; 1370 } 1371 1372 @Override 1373 public synchronized void close() throws IOException { 1374 if (isClosed) { 1375 return; 1376 } 1377 isClosed = true; 1378 e.size = written; 1379 if (out instanceof ByteArrayOutputStream) 1380 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1381 super.close(); 1382 update(e); 1383 } 1384 } 1385 1386 // Wrapper output stream class to write out a "stored" entry. 1387 // (1) this class does not close the underlying out stream when 1388 // being closed. 1389 // (2) no need to be "synchronized", only used by sync() 1390 private class EntryOutputStreamCRC32 extends FilterOutputStream { 1391 private Entry e; 1392 private CRC32 crc; 1393 private long written; 1394 private boolean isClosed; 1395 1396 EntryOutputStreamCRC32(Entry e, OutputStream os) throws IOException { 1397 super(os); 1398 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1399 this.crc = new CRC32(); 1400 } 1401 1402 @Override 1403 public void write(int b) throws IOException { 1404 out.write(b); 1405 crc.update(b); 1406 written += 1; 1407 } 1408 1409 @Override 1410 public void write(byte b[], int off, int len) 1411 throws IOException { 1412 out.write(b, off, len); 1413 crc.update(b, off, len); 1414 written += len; 1415 } 1416 1417 @Override 1418 public void close() throws IOException { 1419 if (isClosed) 1420 return; 1421 isClosed = true; 1422 e.size = e.csize = written; 1423 e.crc = crc.getValue(); 1424 } 1425 } 1426 1427 // Wrapper output stream class to write out a "deflated" entry. 1428 // (1) this class does not close the underlying out stream when 1429 // being closed. 1430 // (2) no need to be "synchronized", only used by sync() 1431 private class EntryOutputStreamDef extends DeflaterOutputStream { 1432 private CRC32 crc; 1433 private Entry e; 1434 private boolean isClosed; 1435 1436 EntryOutputStreamDef(Entry e, OutputStream os) throws IOException { 1437 super(os, getDeflater()); 1438 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1439 this.crc = new CRC32(); 1440 } 1441 1442 @Override 1443 public void write(byte b[], int off, int len) 1444 throws IOException { 1445 super.write(b, off, len); 1446 crc.update(b, off, len); 1447 } 1448 1449 @Override 1450 public void close() throws IOException { 1451 if (isClosed) 1452 return; 1453 isClosed = true; 1454 finish(); 1455 e.size = def.getBytesRead(); 1456 e.csize = def.getBytesWritten(); 1457 e.crc = crc.getValue(); 1458 } 1459 } 1460 1461 private InputStream getInputStream(Entry e) 1462 throws IOException 1463 { 1464 InputStream eis = null; 1465 1466 if (e.type == Entry.NEW) { 1467 // now bytes & file is uncompressed. 1468 if (e.bytes != null) 1469 return new ByteArrayInputStream(e.bytes); 1470 else if (e.file != null) 1471 return Files.newInputStream(e.file); 1472 else 1473 throw new ZipException("update entry data is missing"); 1474 } else if (e.type == Entry.FILECH) { 1475 // FILECH result is un-compressed. 1476 eis = Files.newInputStream(e.file); 1477 // TBD: wrap to hook close() 1478 // streams.add(eis); 1479 return eis; 1480 } else { // untouced CEN or COPY 1481 eis = new EntryInputStream(e, ch); 1482 } 1483 if (e.method == METHOD_DEFLATED) { 1484 // MORE: Compute good size for inflater stream: 1485 long bufSize = e.size + 2; // Inflater likes a bit of slack 1486 if (bufSize > 65536) 1487 bufSize = 8192; 1488 final long size = e.size; 1489 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1490 private boolean isClosed = false; 1491 public void close() throws IOException { 1492 if (!isClosed) { 1493 releaseInflater(inf); 1494 this.in.close(); 1495 isClosed = true; 1496 streams.remove(this); 1497 } 1498 } 1499 // Override fill() method to provide an extra "dummy" byte 1500 // at the end of the input stream. This is required when 1501 // using the "nowrap" Inflater option. (it appears the new 1502 // zlib in 7 does not need it, but keep it for now) 1503 protected void fill() throws IOException { 1504 if (eof) { 1505 throw new EOFException( 1506 "Unexpected end of ZLIB input stream"); 1507 } 1508 len = this.in.read(buf, 0, buf.length); 1509 if (len == -1) { 1510 buf[0] = 0; 1511 len = 1; 1512 eof = true; 1513 } 1514 inf.setInput(buf, 0, len); 1515 } 1516 private boolean eof; 1517 1518 public int available() throws IOException { 1519 if (isClosed) 1520 return 0; 1521 long avail = size - inf.getBytesWritten(); 1522 return avail > (long) Integer.MAX_VALUE ? 1523 Integer.MAX_VALUE : (int) avail; 1524 } 1525 }; 1526 } else if (e.method == METHOD_STORED) { 1527 // TBD: wrap/ it does not seem necessary 1528 } else { 1529 throw new ZipException("invalid compression method"); 1530 } 1531 streams.add(eis); 1532 return eis; 1533 } 1534 1535 // Inner class implementing the input stream used to read 1536 // a (possibly compressed) zip file entry. 1537 private class EntryInputStream extends InputStream { 1538 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1539 // point to a new channel after sync() 1540 private long pos; // current position within entry data 1541 protected long rem; // number of remaining bytes within entry 1542 protected final long size; // uncompressed size of this entry 1543 1544 EntryInputStream(Entry e, SeekableByteChannel zfch) 1545 throws IOException 1546 { 1547 this.zfch = zfch; 1548 rem = e.csize; 1549 size = e.size; 1550 pos = e.locoff; 1551 if (pos == -1) { 1552 Entry e2 = getEntry(e.name); 1553 if (e2 == null) { 1554 throw new ZipException("invalid loc for entry <" + e.name + ">"); 1555 } 1556 pos = e2.locoff; 1557 } 1558 pos = -pos; // lazy initialize the real data offset 1559 } 1560 1561 public int read(byte b[], int off, int len) throws IOException { 1562 ensureOpen(); 1563 initDataPos(); 1564 if (rem == 0) { 1565 return -1; 1566 } 1567 if (len <= 0) { 1568 return 0; 1569 } 1570 if (len > rem) { 1571 len = (int) rem; 1572 } 1573 // readFullyAt() 1574 long n = 0; 1575 ByteBuffer bb = ByteBuffer.wrap(b); 1576 bb.position(off); 1577 bb.limit(off + len); 1578 synchronized(zfch) { 1579 n = zfch.position(pos).read(bb); 1580 } 1581 if (n > 0) { 1582 pos += n; 1583 rem -= n; 1584 } 1585 if (rem == 0) { 1586 close(); 1587 } 1588 return (int)n; 1589 } 1590 1591 public int read() throws IOException { 1592 byte[] b = new byte[1]; 1593 if (read(b, 0, 1) == 1) { 1594 return b[0] & 0xff; 1595 } else { 1596 return -1; 1597 } 1598 } 1599 1600 public long skip(long n) throws IOException { 1601 ensureOpen(); 1602 if (n > rem) 1603 n = rem; 1604 pos += n; 1605 rem -= n; 1606 if (rem == 0) { 1607 close(); 1608 } 1609 return n; 1610 } 1611 1612 public int available() { 1613 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1614 } 1615 1616 public long size() { 1617 return size; 1618 } 1619 1620 public void close() { 1621 rem = 0; 1622 streams.remove(this); 1623 } 1624 1625 private void initDataPos() throws IOException { 1626 if (pos <= 0) { 1627 pos = -pos + locpos; 1628 byte[] buf = new byte[LOCHDR]; 1629 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 1630 throw new ZipException("invalid loc " + pos + " for entry reading"); 1631 } 1632 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 1633 } 1634 } 1635 } 1636 1637 static void zerror(String msg) throws ZipException { 1638 throw new ZipException(msg); 1639 } 1640 1641 // Maxmum number of de/inflater we cache 1642 private final int MAX_FLATER = 20; 1643 // List of available Inflater objects for decompression 1644 private final List<Inflater> inflaters = new ArrayList<>(); 1645 1646 // Gets an inflater from the list of available inflaters or allocates 1647 // a new one. 1648 private Inflater getInflater() { 1649 synchronized (inflaters) { 1650 int size = inflaters.size(); 1651 if (size > 0) { 1652 Inflater inf = inflaters.remove(size - 1); 1653 return inf; 1654 } else { 1655 return new Inflater(true); 1656 } 1657 } 1658 } 1659 1660 // Releases the specified inflater to the list of available inflaters. 1661 private void releaseInflater(Inflater inf) { 1662 synchronized (inflaters) { 1663 if (inflaters.size() < MAX_FLATER) { 1664 inf.reset(); 1665 inflaters.add(inf); 1666 } else { 1667 inf.end(); 1668 } 1669 } 1670 } 1671 1672 // List of available Deflater objects for compression 1673 private final List<Deflater> deflaters = new ArrayList<>(); 1674 1675 // Gets an deflater from the list of available deflaters or allocates 1676 // a new one. 1677 private Deflater getDeflater() { 1678 synchronized (deflaters) { 1679 int size = deflaters.size(); 1680 if (size > 0) { 1681 Deflater def = deflaters.remove(size - 1); 1682 return def; 1683 } else { 1684 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1685 } 1686 } 1687 } 1688 1689 // Releases the specified inflater to the list of available inflaters. 1690 private void releaseDeflater(Deflater def) { 1691 synchronized (deflaters) { 1692 if (inflaters.size() < MAX_FLATER) { 1693 def.reset(); 1694 deflaters.add(def); 1695 } else { 1696 def.end(); 1697 } 1698 } 1699 } 1700 1701 // End of central directory record 1702 static class END { 1703 // these 2 fields are not used by anyone and write() uses "0" 1704 // int disknum; 1705 // int sdisknum; 1706 int endsub; // endsub 1707 int centot; // 4 bytes 1708 long cenlen; // 4 bytes 1709 long cenoff; // 4 bytes 1710 int comlen; // comment length 1711 byte[] comment; 1712 1713 /* members of Zip64 end of central directory locator */ 1714 // int diskNum; 1715 long endpos; 1716 // int disktot; 1717 1718 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 1719 boolean hasZip64 = forceEnd64; // false; 1720 long xlen = cenlen; 1721 long xoff = cenoff; 1722 if (xlen >= ZIP64_MINVAL) { 1723 xlen = ZIP64_MINVAL; 1724 hasZip64 = true; 1725 } 1726 if (xoff >= ZIP64_MINVAL) { 1727 xoff = ZIP64_MINVAL; 1728 hasZip64 = true; 1729 } 1730 int count = centot; 1731 if (count >= ZIP64_MINVAL32) { 1732 count = ZIP64_MINVAL32; 1733 hasZip64 = true; 1734 } 1735 if (hasZip64) { 1736 long off64 = offset; 1737 //zip64 end of central directory record 1738 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1739 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1740 writeShort(os, 45); // version made by 1741 writeShort(os, 45); // version needed to extract 1742 writeInt(os, 0); // number of this disk 1743 writeInt(os, 0); // central directory start disk 1744 writeLong(os, centot); // number of directory entries on disk 1745 writeLong(os, centot); // number of directory entries 1746 writeLong(os, cenlen); // length of central directory 1747 writeLong(os, cenoff); // offset of central directory 1748 1749 //zip64 end of central directory locator 1750 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1751 writeInt(os, 0); // zip64 END start disk 1752 writeLong(os, off64); // offset of zip64 END 1753 writeInt(os, 1); // total number of disks (?) 1754 } 1755 writeInt(os, ENDSIG); // END record signature 1756 writeShort(os, 0); // number of this disk 1757 writeShort(os, 0); // central directory start disk 1758 writeShort(os, count); // number of directory entries on disk 1759 writeShort(os, count); // total number of directory entries 1760 writeInt(os, xlen); // length of central directory 1761 writeInt(os, xoff); // offset of central directory 1762 if (comment != null) { // zip file comment 1763 writeShort(os, comment.length); 1764 writeBytes(os, comment); 1765 } else { 1766 writeShort(os, 0); 1767 } 1768 } 1769 } 1770 1771 // Internal node that links a "name" to its pos in cen table. 1772 // The node itself can be used as a "key" to lookup itself in 1773 // the HashMap inodes. 1774 static class IndexNode { 1775 byte[] name; 1776 int hashcode; // node is hashable/hashed by its name 1777 int pos = -1; // position in cen table, -1 menas the 1778 // entry does not exists in zip file 1779 boolean isdir; 1780 1781 IndexNode(byte[] name, boolean isdir) { 1782 name(name); 1783 this.isdir = isdir; 1784 this.pos = -1; 1785 } 1786 1787 IndexNode(byte[] name, int pos) { 1788 name(name); 1789 this.pos = pos; 1790 } 1791 1792 // constructor for cenInit() (1) remove tailing '/' (2) pad leading '/' 1793 IndexNode(byte[] cen, int pos, int nlen) { 1794 int noff = pos + CENHDR; 1795 if (cen[noff + nlen - 1] == '/') { 1796 isdir = true; 1797 nlen--; 1798 } 1799 if (nlen > 0 && cen[noff] == '/') { 1800 name = Arrays.copyOfRange(cen, noff, noff + nlen); 1801 } else { 1802 name = new byte[nlen + 1]; 1803 System.arraycopy(cen, noff, name, 1, nlen); 1804 name[0] = '/'; 1805 } 1806 name(name); 1807 this.pos = pos; 1808 } 1809 1810 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 1811 1812 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1813 IndexNode key = cachedKey.get(); 1814 if (key == null) { 1815 key = new IndexNode(name, -1); 1816 cachedKey.set(key); 1817 } 1818 return key.as(name); 1819 } 1820 1821 final void name(byte[] name) { 1822 this.name = name; 1823 this.hashcode = Arrays.hashCode(name); 1824 } 1825 1826 final IndexNode as(byte[] name) { // reuse the node, mostly 1827 name(name); // as a lookup "key" 1828 return this; 1829 } 1830 1831 boolean isDir() { 1832 return isdir; 1833 } 1834 1835 public boolean equals(Object other) { 1836 if (!(other instanceof IndexNode)) { 1837 return false; 1838 } 1839 if (other instanceof ParentLookup) { 1840 return ((ParentLookup)other).equals(this); 1841 } 1842 return Arrays.equals(name, ((IndexNode)other).name); 1843 } 1844 1845 public int hashCode() { 1846 return hashcode; 1847 } 1848 1849 IndexNode() {} 1850 IndexNode sibling; 1851 IndexNode child; // 1st child 1852 } 1853 1854 static class Entry extends IndexNode implements ZipFileAttributes { 1855 1856 static final int CEN = 1; // entry read from cen 1857 static final int NEW = 2; // updated contents in bytes or file 1858 static final int FILECH = 3; // fch update in "file" 1859 static final int COPY = 4; // copy of a CEN entry 1860 1861 byte[] bytes; // updated content bytes 1862 Path file; // use tmp file to store bytes; 1863 int type = CEN; // default is the entry read from cen 1864 1865 // entry attributes 1866 int version; 1867 int flag; 1868 int method = -1; // compression method 1869 long mtime = -1; // last modification time (in DOS time) 1870 long atime = -1; // last access time 1871 long ctime = -1; // create time 1872 long crc = -1; // crc-32 of entry data 1873 long csize = -1; // compressed size of entry data 1874 long size = -1; // uncompressed size of entry data 1875 byte[] extra; 1876 1877 // cen 1878 1879 // these fields are not used by anyone and writeCEN uses "0" 1880 // int versionMade; 1881 // int disk; 1882 // int attrs; 1883 // long attrsEx; 1884 long locoff; 1885 byte[] comment; 1886 1887 Entry() {} 1888 1889 Entry(byte[] name, boolean isdir, int method) { 1890 name(name); 1891 this.isdir = isdir; 1892 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 1893 this.crc = 0; 1894 this.size = 0; 1895 this.csize = 0; 1896 this.method = method; 1897 } 1898 1899 Entry(byte[] name, int type, boolean isdir, int method) { 1900 this(name, isdir, method); 1901 this.type = type; 1902 } 1903 1904 Entry (Entry e, int type) { 1905 name(e.name); 1906 this.isdir = e.isdir; 1907 this.version = e.version; 1908 this.ctime = e.ctime; 1909 this.atime = e.atime; 1910 this.mtime = e.mtime; 1911 this.crc = e.crc; 1912 this.size = e.size; 1913 this.csize = e.csize; 1914 this.method = e.method; 1915 this.extra = e.extra; 1916 /* 1917 this.versionMade = e.versionMade; 1918 this.disk = e.disk; 1919 this.attrs = e.attrs; 1920 this.attrsEx = e.attrsEx; 1921 */ 1922 this.locoff = e.locoff; 1923 this.comment = e.comment; 1924 this.type = type; 1925 } 1926 1927 Entry (byte[] name, Path file, int type) { 1928 this(name, type, false, METHOD_STORED); 1929 this.file = file; 1930 } 1931 1932 int version() throws ZipException { 1933 if (method == METHOD_DEFLATED) 1934 return 20; 1935 else if (method == METHOD_STORED) 1936 return 10; 1937 throw new ZipException("unsupported compression method"); 1938 } 1939 1940 ///////////////////// CEN ////////////////////// 1941 static Entry readCEN(ZipFileSystem zipfs, IndexNode inode) 1942 throws IOException 1943 { 1944 return new Entry().cen(zipfs, inode); 1945 } 1946 1947 private Entry cen(ZipFileSystem zipfs, IndexNode inode) 1948 throws IOException 1949 { 1950 byte[] cen = zipfs.cen; 1951 int pos = inode.pos; 1952 if (!cenSigAt(cen, pos)) 1953 zerror("invalid CEN header (bad signature)"); 1954 version = CENVER(cen, pos); 1955 flag = CENFLG(cen, pos); 1956 method = CENHOW(cen, pos); 1957 mtime = dosToJavaTime(CENTIM(cen, pos)); 1958 crc = CENCRC(cen, pos); 1959 csize = CENSIZ(cen, pos); 1960 size = CENLEN(cen, pos); 1961 int nlen = CENNAM(cen, pos); 1962 int elen = CENEXT(cen, pos); 1963 int clen = CENCOM(cen, pos); 1964 /* 1965 versionMade = CENVEM(cen, pos); 1966 disk = CENDSK(cen, pos); 1967 attrs = CENATT(cen, pos); 1968 attrsEx = CENATX(cen, pos); 1969 */ 1970 locoff = CENOFF(cen, pos); 1971 pos += CENHDR; 1972 this.name = inode.name; 1973 this.isdir = inode.isdir; 1974 this.hashcode = inode.hashcode; 1975 1976 pos += nlen; 1977 if (elen > 0) { 1978 extra = Arrays.copyOfRange(cen, pos, pos + elen); 1979 pos += elen; 1980 readExtra(zipfs); 1981 } 1982 if (clen > 0) { 1983 comment = Arrays.copyOfRange(cen, pos, pos + clen); 1984 } 1985 return this; 1986 } 1987 1988 int writeCEN(OutputStream os) throws IOException 1989 { 1990 int written = CENHDR; 1991 int version0 = version(); 1992 long csize0 = csize; 1993 long size0 = size; 1994 long locoff0 = locoff; 1995 int elen64 = 0; // extra for ZIP64 1996 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 1997 int elenEXTT = 0; // extra for Extended Timestamp 1998 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 1999 2000 byte[] zname = isdir ? toDirectoryPath(name) : name; 2001 2002 // confirm size/length 2003 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 2004 int elen = (extra != null) ? extra.length : 0; 2005 int eoff = 0; 2006 int clen = (comment != null) ? comment.length : 0; 2007 if (csize >= ZIP64_MINVAL) { 2008 csize0 = ZIP64_MINVAL; 2009 elen64 += 8; // csize(8) 2010 } 2011 if (size >= ZIP64_MINVAL) { 2012 size0 = ZIP64_MINVAL; // size(8) 2013 elen64 += 8; 2014 } 2015 if (locoff >= ZIP64_MINVAL) { 2016 locoff0 = ZIP64_MINVAL; 2017 elen64 += 8; // offset(8) 2018 } 2019 if (elen64 != 0) { 2020 elen64 += 4; // header and data sz 4 bytes 2021 } 2022 while (eoff + 4 < elen) { 2023 int tag = SH(extra, eoff); 2024 int sz = SH(extra, eoff + 2); 2025 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2026 foundExtraTime = true; 2027 } 2028 eoff += (4 + sz); 2029 } 2030 if (!foundExtraTime) { 2031 if (isWindows) { // use NTFS 2032 elenNTFS = 36; // total 36 bytes 2033 } else { // Extended Timestamp otherwise 2034 elenEXTT = 9; // only mtime in cen 2035 } 2036 } 2037 writeInt(os, CENSIG); // CEN header signature 2038 if (elen64 != 0) { 2039 writeShort(os, 45); // ver 4.5 for zip64 2040 writeShort(os, 45); 2041 } else { 2042 writeShort(os, version0); // version made by 2043 writeShort(os, version0); // version needed to extract 2044 } 2045 writeShort(os, flag); // general purpose bit flag 2046 writeShort(os, method); // compression method 2047 // last modification time 2048 writeInt(os, (int)javaToDosTime(mtime)); 2049 writeInt(os, crc); // crc-32 2050 writeInt(os, csize0); // compressed size 2051 writeInt(os, size0); // uncompressed size 2052 writeShort(os, nlen); 2053 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2054 2055 if (comment != null) { 2056 writeShort(os, Math.min(clen, 0xffff)); 2057 } else { 2058 writeShort(os, 0); 2059 } 2060 writeShort(os, 0); // starting disk number 2061 writeShort(os, 0); // internal file attributes (unused) 2062 writeInt(os, 0); // external file attributes (unused) 2063 writeInt(os, locoff0); // relative offset of local header 2064 writeBytes(os, zname, 1, nlen); 2065 if (elen64 != 0) { 2066 writeShort(os, EXTID_ZIP64);// Zip64 extra 2067 writeShort(os, elen64 - 4); // size of "this" extra block 2068 if (size0 == ZIP64_MINVAL) 2069 writeLong(os, size); 2070 if (csize0 == ZIP64_MINVAL) 2071 writeLong(os, csize); 2072 if (locoff0 == ZIP64_MINVAL) 2073 writeLong(os, locoff); 2074 } 2075 if (elenNTFS != 0) { 2076 writeShort(os, EXTID_NTFS); 2077 writeShort(os, elenNTFS - 4); 2078 writeInt(os, 0); // reserved 2079 writeShort(os, 0x0001); // NTFS attr tag 2080 writeShort(os, 24); 2081 writeLong(os, javaToWinTime(mtime)); 2082 writeLong(os, javaToWinTime(atime)); 2083 writeLong(os, javaToWinTime(ctime)); 2084 } 2085 if (elenEXTT != 0) { 2086 writeShort(os, EXTID_EXTT); 2087 writeShort(os, elenEXTT - 4); 2088 if (ctime == -1) 2089 os.write(0x3); // mtime and atime 2090 else 2091 os.write(0x7); // mtime, atime and ctime 2092 writeInt(os, javaToUnixTime(mtime)); 2093 } 2094 if (extra != null) // whatever not recognized 2095 writeBytes(os, extra); 2096 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2097 writeBytes(os, comment); 2098 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2099 } 2100 2101 ///////////////////// LOC ////////////////////// 2102 2103 int writeLOC(OutputStream os) throws IOException { 2104 writeInt(os, LOCSIG); // LOC header signature 2105 int version = version(); 2106 2107 byte[] zname = isdir ? toDirectoryPath(name) : name; 2108 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2109 int elen = (extra != null) ? extra.length : 0; 2110 boolean foundExtraTime = false; // if extra timestamp present 2111 int eoff = 0; 2112 int elen64 = 0; 2113 int elenEXTT = 0; 2114 int elenNTFS = 0; 2115 if ((flag & FLAG_DATADESCR) != 0) { 2116 writeShort(os, version()); // version needed to extract 2117 writeShort(os, flag); // general purpose bit flag 2118 writeShort(os, method); // compression method 2119 // last modification time 2120 writeInt(os, (int)javaToDosTime(mtime)); 2121 // store size, uncompressed size, and crc-32 in data descriptor 2122 // immediately following compressed entry data 2123 writeInt(os, 0); 2124 writeInt(os, 0); 2125 writeInt(os, 0); 2126 } else { 2127 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2128 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2129 writeShort(os, 45); // ver 4.5 for zip64 2130 } else { 2131 writeShort(os, version()); // version needed to extract 2132 } 2133 writeShort(os, flag); // general purpose bit flag 2134 writeShort(os, method); // compression method 2135 // last modification time 2136 writeInt(os, (int)javaToDosTime(mtime)); 2137 writeInt(os, crc); // crc-32 2138 if (elen64 != 0) { 2139 writeInt(os, ZIP64_MINVAL); 2140 writeInt(os, ZIP64_MINVAL); 2141 } else { 2142 writeInt(os, csize); // compressed size 2143 writeInt(os, size); // uncompressed size 2144 } 2145 } 2146 while (eoff + 4 < elen) { 2147 int tag = SH(extra, eoff); 2148 int sz = SH(extra, eoff + 2); 2149 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2150 foundExtraTime = true; 2151 } 2152 eoff += (4 + sz); 2153 } 2154 if (!foundExtraTime) { 2155 if (isWindows) { 2156 elenNTFS = 36; // NTFS, total 36 bytes 2157 } else { // on unix use "ext time" 2158 elenEXTT = 9; 2159 if (atime != -1) 2160 elenEXTT += 4; 2161 if (ctime != -1) 2162 elenEXTT += 4; 2163 } 2164 } 2165 writeShort(os, nlen); 2166 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2167 writeBytes(os, zname, 1, nlen); 2168 if (elen64 != 0) { 2169 writeShort(os, EXTID_ZIP64); 2170 writeShort(os, 16); 2171 writeLong(os, size); 2172 writeLong(os, csize); 2173 } 2174 if (elenNTFS != 0) { 2175 writeShort(os, EXTID_NTFS); 2176 writeShort(os, elenNTFS - 4); 2177 writeInt(os, 0); // reserved 2178 writeShort(os, 0x0001); // NTFS attr tag 2179 writeShort(os, 24); 2180 writeLong(os, javaToWinTime(mtime)); 2181 writeLong(os, javaToWinTime(atime)); 2182 writeLong(os, javaToWinTime(ctime)); 2183 } 2184 if (elenEXTT != 0) { 2185 writeShort(os, EXTID_EXTT); 2186 writeShort(os, elenEXTT - 4);// size for the folowing data block 2187 int fbyte = 0x1; 2188 if (atime != -1) // mtime and atime 2189 fbyte |= 0x2; 2190 if (ctime != -1) // mtime, atime and ctime 2191 fbyte |= 0x4; 2192 os.write(fbyte); // flags byte 2193 writeInt(os, javaToUnixTime(mtime)); 2194 if (atime != -1) 2195 writeInt(os, javaToUnixTime(atime)); 2196 if (ctime != -1) 2197 writeInt(os, javaToUnixTime(ctime)); 2198 } 2199 if (extra != null) { 2200 writeBytes(os, extra); 2201 } 2202 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2203 } 2204 2205 // Data Descriptior 2206 int writeEXT(OutputStream os) throws IOException { 2207 writeInt(os, EXTSIG); // EXT header signature 2208 writeInt(os, crc); // crc-32 2209 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2210 writeLong(os, csize); 2211 writeLong(os, size); 2212 return 24; 2213 } else { 2214 writeInt(os, csize); // compressed size 2215 writeInt(os, size); // uncompressed size 2216 return 16; 2217 } 2218 } 2219 2220 // read NTFS, UNIX and ZIP64 data from cen.extra 2221 void readExtra(ZipFileSystem zipfs) throws IOException { 2222 if (extra == null) 2223 return; 2224 int elen = extra.length; 2225 int off = 0; 2226 int newOff = 0; 2227 while (off + 4 < elen) { 2228 // extra spec: HeaderID+DataSize+Data 2229 int pos = off; 2230 int tag = SH(extra, pos); 2231 int sz = SH(extra, pos + 2); 2232 pos += 4; 2233 if (pos + sz > elen) // invalid data 2234 break; 2235 switch (tag) { 2236 case EXTID_ZIP64 : 2237 if (size == ZIP64_MINVAL) { 2238 if (pos + 8 > elen) // invalid zip64 extra 2239 break; // fields, just skip 2240 size = LL(extra, pos); 2241 pos += 8; 2242 } 2243 if (csize == ZIP64_MINVAL) { 2244 if (pos + 8 > elen) 2245 break; 2246 csize = LL(extra, pos); 2247 pos += 8; 2248 } 2249 if (locoff == ZIP64_MINVAL) { 2250 if (pos + 8 > elen) 2251 break; 2252 locoff = LL(extra, pos); 2253 pos += 8; 2254 } 2255 break; 2256 case EXTID_NTFS: 2257 if (sz < 32) 2258 break; 2259 pos += 4; // reserved 4 bytes 2260 if (SH(extra, pos) != 0x0001) 2261 break; 2262 if (SH(extra, pos + 2) != 24) 2263 break; 2264 // override the loc field, datatime here is 2265 // more "accurate" 2266 mtime = winToJavaTime(LL(extra, pos + 4)); 2267 atime = winToJavaTime(LL(extra, pos + 12)); 2268 ctime = winToJavaTime(LL(extra, pos + 20)); 2269 break; 2270 case EXTID_EXTT: 2271 // spec says the Extened timestamp in cen only has mtime 2272 // need to read the loc to get the extra a/ctime, if flag 2273 // "zipinfo-time" is not specified to false; 2274 // there is performance cost (move up to loc and read) to 2275 // access the loc table foreach entry; 2276 if (zipfs.noExtt) { 2277 if (sz == 5) 2278 mtime = unixToJavaTime(LG(extra, pos + 1)); 2279 break; 2280 } 2281 byte[] buf = new byte[LOCHDR]; 2282 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2283 != buf.length) 2284 throw new ZipException("loc: reading failed"); 2285 if (!locSigAt(buf, 0)) 2286 throw new ZipException("loc: wrong sig ->" 2287 + Long.toString(getSig(buf, 0), 16)); 2288 int locElen = LOCEXT(buf); 2289 if (locElen < 9) // EXTT is at lease 9 bytes 2290 break; 2291 int locNlen = LOCNAM(buf); 2292 buf = new byte[locElen]; 2293 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2294 != buf.length) 2295 throw new ZipException("loc extra: reading failed"); 2296 int locPos = 0; 2297 while (locPos + 4 < buf.length) { 2298 int locTag = SH(buf, locPos); 2299 int locSZ = SH(buf, locPos + 2); 2300 locPos += 4; 2301 if (locTag != EXTID_EXTT) { 2302 locPos += locSZ; 2303 continue; 2304 } 2305 int end = locPos + locSZ - 4; 2306 int flag = CH(buf, locPos++); 2307 if ((flag & 0x1) != 0 && locPos <= end) { 2308 mtime = unixToJavaTime(LG(buf, locPos)); 2309 locPos += 4; 2310 } 2311 if ((flag & 0x2) != 0 && locPos <= end) { 2312 atime = unixToJavaTime(LG(buf, locPos)); 2313 locPos += 4; 2314 } 2315 if ((flag & 0x4) != 0 && locPos <= end) { 2316 ctime = unixToJavaTime(LG(buf, locPos)); 2317 locPos += 4; 2318 } 2319 break; 2320 } 2321 break; 2322 default: // unknown tag 2323 System.arraycopy(extra, off, extra, newOff, sz + 4); 2324 newOff += (sz + 4); 2325 } 2326 off += (sz + 4); 2327 } 2328 if (newOff != 0 && newOff != extra.length) 2329 extra = Arrays.copyOf(extra, newOff); 2330 else 2331 extra = null; 2332 } 2333 2334 ///////// basic file attributes /////////// 2335 @Override 2336 public FileTime creationTime() { 2337 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 2338 } 2339 2340 @Override 2341 public boolean isDirectory() { 2342 return isDir(); 2343 } 2344 2345 @Override 2346 public boolean isOther() { 2347 return false; 2348 } 2349 2350 @Override 2351 public boolean isRegularFile() { 2352 return !isDir(); 2353 } 2354 2355 @Override 2356 public FileTime lastAccessTime() { 2357 return FileTime.fromMillis(atime == -1 ? mtime : atime); 2358 } 2359 2360 @Override 2361 public FileTime lastModifiedTime() { 2362 return FileTime.fromMillis(mtime); 2363 } 2364 2365 @Override 2366 public long size() { 2367 return size; 2368 } 2369 2370 @Override 2371 public boolean isSymbolicLink() { 2372 return false; 2373 } 2374 2375 @Override 2376 public Object fileKey() { 2377 return null; 2378 } 2379 2380 ///////// zip entry attributes /////////// 2381 public long compressedSize() { 2382 return csize; 2383 } 2384 2385 public long crc() { 2386 return crc; 2387 } 2388 2389 public int method() { 2390 return method; 2391 } 2392 2393 public byte[] extra() { 2394 if (extra != null) 2395 return Arrays.copyOf(extra, extra.length); 2396 return null; 2397 } 2398 2399 public byte[] comment() { 2400 if (comment != null) 2401 return Arrays.copyOf(comment, comment.length); 2402 return null; 2403 } 2404 2405 public String toString() { 2406 StringBuilder sb = new StringBuilder(1024); 2407 Formatter fm = new Formatter(sb); 2408 fm.format(" name : %s%n", new String(name)); 2409 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2410 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2411 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2412 fm.format(" isRegularFile : %b%n", isRegularFile()); 2413 fm.format(" isDirectory : %b%n", isDirectory()); 2414 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2415 fm.format(" isOther : %b%n", isOther()); 2416 fm.format(" fileKey : %s%n", fileKey()); 2417 fm.format(" size : %d%n", size()); 2418 fm.format(" compressedSize : %d%n", compressedSize()); 2419 fm.format(" crc : %x%n", crc()); 2420 fm.format(" method : %d%n", method()); 2421 fm.close(); 2422 return sb.toString(); 2423 } 2424 } 2425 2426 // ZIP directory has two issues: 2427 // (1) ZIP spec does not require the ZIP file to include 2428 // directory entry 2429 // (2) all entries are not stored/organized in a "tree" 2430 // structure. 2431 // A possible solution is to build the node tree ourself as 2432 // implemented below. 2433 private IndexNode root; 2434 2435 // default time stamp for pseudo entries 2436 private long zfsDefaultTimeStamp = System.currentTimeMillis(); 2437 2438 private void removeFromTree(IndexNode inode) { 2439 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2440 IndexNode child = parent.child; 2441 if (child.equals(inode)) { 2442 parent.child = child.sibling; 2443 } else { 2444 IndexNode last = child; 2445 while ((child = child.sibling) != null) { 2446 if (child.equals(inode)) { 2447 last.sibling = child.sibling; 2448 break; 2449 } else { 2450 last = child; 2451 } 2452 } 2453 } 2454 } 2455 2456 // purely for parent lookup, so we don't have to copy the parent 2457 // name every time 2458 static class ParentLookup extends IndexNode { 2459 int len; 2460 ParentLookup() {} 2461 2462 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 2463 name(name, len); 2464 return this; 2465 } 2466 2467 void name(byte[] name, int len) { 2468 this.name = name; 2469 this.len = len; 2470 // calculate the hashcode the same way as Arrays.hashCode() does 2471 int result = 1; 2472 for (int i = 0; i < len; i++) 2473 result = 31 * result + name[i]; 2474 this.hashcode = result; 2475 } 2476 2477 @Override 2478 public boolean equals(Object other) { 2479 if (!(other instanceof IndexNode)) { 2480 return false; 2481 } 2482 byte[] oname = ((IndexNode)other).name; 2483 return Arrays.equals(name, 0, len, 2484 oname, 0, oname.length); 2485 } 2486 2487 } 2488 2489 private void buildNodeTree() throws IOException { 2490 beginWrite(); 2491 try { 2492 IndexNode root = inodes.get(LOOKUPKEY.as(ROOTPATH)); 2493 if (root == null) { 2494 root = new IndexNode(ROOTPATH, true); 2495 } else { 2496 inodes.remove(root); 2497 } 2498 IndexNode[] nodes = inodes.keySet().toArray(new IndexNode[0]); 2499 inodes.put(root, root); 2500 ParentLookup lookup = new ParentLookup(); 2501 for (IndexNode node : nodes) { 2502 IndexNode parent; 2503 while (true) { 2504 int off = getParentOff(node.name); 2505 if (off <= 1) { // parent is root 2506 node.sibling = root.child; 2507 root.child = node; 2508 break; 2509 } 2510 lookup = lookup.as(node.name, off); 2511 if (inodes.containsKey(lookup)) { 2512 parent = inodes.get(lookup); 2513 node.sibling = parent.child; 2514 parent.child = node; 2515 break; 2516 } 2517 // add new pseudo directory entry 2518 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 2519 inodes.put(parent, parent); 2520 node.sibling = parent.child; 2521 parent.child = node; 2522 node = parent; 2523 } 2524 } 2525 } finally { 2526 endWrite(); 2527 } 2528 } 2529 }