1 /* 2 * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.FilterOutputStream; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.nio.ByteBuffer; 37 import java.nio.MappedByteBuffer; 38 import java.nio.channels.FileChannel; 39 import java.nio.channels.FileLock; 40 import java.nio.channels.ReadableByteChannel; 41 import java.nio.channels.SeekableByteChannel; 42 import java.nio.channels.WritableByteChannel; 43 import java.nio.file.*; 44 import java.nio.file.attribute.FileAttribute; 45 import java.nio.file.attribute.FileTime; 46 import java.nio.file.attribute.UserPrincipalLookupService; 47 import java.nio.file.spi.FileSystemProvider; 48 import java.security.AccessController; 49 import java.security.PrivilegedAction; 50 import java.security.PrivilegedActionException; 51 import java.security.PrivilegedExceptionAction; 52 import java.util.*; 53 import java.util.concurrent.locks.ReadWriteLock; 54 import java.util.concurrent.locks.ReentrantReadWriteLock; 55 import java.util.regex.Pattern; 56 import java.util.zip.CRC32; 57 import java.util.zip.Deflater; 58 import java.util.zip.DeflaterOutputStream; 59 import java.util.zip.Inflater; 60 import java.util.zip.InflaterInputStream; 61 import java.util.zip.ZipException; 62 63 import static java.lang.Boolean.TRUE; 64 import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES; 65 import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; 66 import static java.nio.file.StandardOpenOption.APPEND; 67 import static java.nio.file.StandardOpenOption.CREATE; 68 import static java.nio.file.StandardOpenOption.CREATE_NEW; 69 import static java.nio.file.StandardOpenOption.READ; 70 import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; 71 import static java.nio.file.StandardOpenOption.WRITE; 72 import static jdk.nio.zipfs.ZipConstants.*; 73 import static jdk.nio.zipfs.ZipUtils.*; 74 75 /** 76 * A FileSystem built on a zip file 77 * 78 * @author Xueming Shen 79 */ 80 class ZipFileSystem extends FileSystem { 81 private final ZipFileSystemProvider provider; 82 private final Path zfpath; 83 final ZipCoder zc; 84 private final ZipPath rootdir; 85 private boolean readOnly = false; // readonly file system 86 87 // configurable by env map 88 private final boolean noExtt; // see readExtra() 89 private final boolean useTempFile; // use a temp file for newOS, default 90 // is to use BAOS for better performance 91 private static final boolean isWindows = AccessController.doPrivileged( 92 (PrivilegedAction<Boolean>)() -> System.getProperty("os.name") 93 .startsWith("Windows")); 94 private final boolean forceEnd64; 95 private final int defaultMethod; // METHOD_STORED if "noCompression=true" 96 // METHOD_DEFLATED otherwise 97 98 ZipFileSystem(ZipFileSystemProvider provider, 99 Path zfpath, 100 Map<String, ?> env) throws IOException 101 { 102 // default encoding for name/comment 103 String nameEncoding = env.containsKey("encoding") ? 104 (String)env.get("encoding") : "UTF-8"; 105 this.noExtt = "false".equals(env.get("zipinfo-time")); 106 this.useTempFile = isTrue(env, "useTempFile"); 107 this.forceEnd64 = isTrue(env, "forceZIP64End"); 108 this.defaultMethod = isTrue(env, "noCompression") ? METHOD_STORED: METHOD_DEFLATED; 109 if (Files.notExists(zfpath)) { 110 // create a new zip if not exists 111 if (isTrue(env, "create")) { 112 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 113 new END().write(os, 0, forceEnd64); 114 } 115 } else { 116 throw new FileSystemNotFoundException(zfpath.toString()); 117 } 118 } 119 // sm and existence check 120 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 121 boolean writeable = AccessController.doPrivileged( 122 (PrivilegedAction<Boolean>) () -> Files.isWritable(zfpath)); 123 this.readOnly = !writeable; 124 this.zc = ZipCoder.get(nameEncoding); 125 this.rootdir = new ZipPath(this, new byte[]{'/'}); 126 this.ch = Files.newByteChannel(zfpath, READ); 127 try { 128 this.cen = initCEN(); 129 } catch (IOException x) { 130 try { 131 this.ch.close(); 132 } catch (IOException xx) { 133 x.addSuppressed(xx); 134 } 135 throw x; 136 } 137 this.provider = provider; 138 this.zfpath = zfpath; 139 } 140 141 // returns true if there is a name=true/"true" setting in env 142 private static boolean isTrue(Map<String, ?> env, String name) { 143 return "true".equals(env.get(name)) || TRUE.equals(env.get(name)); 144 } 145 146 @Override 147 public FileSystemProvider provider() { 148 return provider; 149 } 150 151 @Override 152 public String getSeparator() { 153 return "/"; 154 } 155 156 @Override 157 public boolean isOpen() { 158 return isOpen; 159 } 160 161 @Override 162 public boolean isReadOnly() { 163 return readOnly; 164 } 165 166 private void checkWritable() throws IOException { 167 if (readOnly) 168 throw new ReadOnlyFileSystemException(); 169 } 170 171 void setReadOnly() { 172 this.readOnly = true; 173 } 174 175 @Override 176 public Iterable<Path> getRootDirectories() { 177 return List.of(rootdir); 178 } 179 180 ZipPath getRootDir() { 181 return rootdir; 182 } 183 184 @Override 185 public ZipPath getPath(String first, String... more) { 186 if (more.length == 0) { 187 return new ZipPath(this, first); 188 } 189 StringBuilder sb = new StringBuilder(); 190 sb.append(first); 191 for (String path : more) { 192 if (path.length() > 0) { 193 if (sb.length() > 0) { 194 sb.append('/'); 195 } 196 sb.append(path); 197 } 198 } 199 return new ZipPath(this, sb.toString()); 200 } 201 202 @Override 203 public UserPrincipalLookupService getUserPrincipalLookupService() { 204 throw new UnsupportedOperationException(); 205 } 206 207 @Override 208 public WatchService newWatchService() { 209 throw new UnsupportedOperationException(); 210 } 211 212 FileStore getFileStore(ZipPath path) { 213 return new ZipFileStore(path); 214 } 215 216 @Override 217 public Iterable<FileStore> getFileStores() { 218 return List.of(new ZipFileStore(rootdir)); 219 } 220 221 private static final Set<String> supportedFileAttributeViews = 222 Set.of("basic", "zip"); 223 224 @Override 225 public Set<String> supportedFileAttributeViews() { 226 return supportedFileAttributeViews; 227 } 228 229 @Override 230 public String toString() { 231 return zfpath.toString(); 232 } 233 234 Path getZipFile() { 235 return zfpath; 236 } 237 238 private static final String GLOB_SYNTAX = "glob"; 239 private static final String REGEX_SYNTAX = "regex"; 240 241 @Override 242 public PathMatcher getPathMatcher(String syntaxAndInput) { 243 int pos = syntaxAndInput.indexOf(':'); 244 if (pos <= 0 || pos == syntaxAndInput.length()) { 245 throw new IllegalArgumentException(); 246 } 247 String syntax = syntaxAndInput.substring(0, pos); 248 String input = syntaxAndInput.substring(pos + 1); 249 String expr; 250 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 251 expr = toRegexPattern(input); 252 } else { 253 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 254 expr = input; 255 } else { 256 throw new UnsupportedOperationException("Syntax '" + syntax + 257 "' not recognized"); 258 } 259 } 260 // return matcher 261 final Pattern pattern = Pattern.compile(expr); 262 return new PathMatcher() { 263 @Override 264 public boolean matches(Path path) { 265 return pattern.matcher(path.toString()).matches(); 266 } 267 }; 268 } 269 270 @Override 271 public void close() throws IOException { 272 beginWrite(); 273 try { 274 if (!isOpen) 275 return; 276 isOpen = false; // set closed 277 } finally { 278 endWrite(); 279 } 280 if (!streams.isEmpty()) { // unlock and close all remaining streams 281 Set<InputStream> copy = new HashSet<>(streams); 282 for (InputStream is : copy) 283 is.close(); 284 } 285 beginWrite(); // lock and sync 286 try { 287 AccessController.doPrivileged((PrivilegedExceptionAction<Void>)() -> { 288 sync(); return null; 289 }); 290 ch.close(); // close the ch just in case no update 291 // and sync didn't close the ch 292 } catch (PrivilegedActionException e) { 293 throw (IOException)e.getException(); 294 } finally { 295 endWrite(); 296 } 297 298 synchronized (inflaters) { 299 for (Inflater inf : inflaters) 300 inf.end(); 301 } 302 synchronized (deflaters) { 303 for (Deflater def : deflaters) 304 def.end(); 305 } 306 307 IOException ioe = null; 308 synchronized (tmppaths) { 309 for (Path p : tmppaths) { 310 try { 311 AccessController.doPrivileged( 312 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 313 } catch (PrivilegedActionException e) { 314 IOException x = (IOException)e.getException(); 315 if (ioe == null) 316 ioe = x; 317 else 318 ioe.addSuppressed(x); 319 } 320 } 321 } 322 provider.removeFileSystem(zfpath, this); 323 if (ioe != null) 324 throw ioe; 325 } 326 327 ZipFileAttributes getFileAttributes(byte[] path) 328 throws IOException 329 { 330 Entry e; 331 beginRead(); 332 try { 333 ensureOpen(); 334 e = getEntry(path); 335 if (e == null) { 336 IndexNode inode = getInode(path); 337 if (inode == null) 338 return null; 339 // pseudo directory, uses METHOD_STORED 340 e = new Entry(inode.name, inode.isdir, METHOD_STORED); 341 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 342 } 343 } finally { 344 endRead(); 345 } 346 return e; 347 } 348 349 void checkAccess(byte[] path) throws IOException { 350 beginRead(); 351 try { 352 ensureOpen(); 353 // is it necessary to readCEN as a sanity check? 354 if (getInode(path) == null) { 355 throw new NoSuchFileException(toString()); 356 } 357 358 } finally { 359 endRead(); 360 } 361 } 362 363 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 364 throws IOException 365 { 366 checkWritable(); 367 beginWrite(); 368 try { 369 ensureOpen(); 370 Entry e = getEntry(path); // ensureOpen checked 371 if (e == null) 372 throw new NoSuchFileException(getString(path)); 373 if (e.type == Entry.CEN) 374 e.type = Entry.COPY; // copy e 375 if (mtime != null) 376 e.mtime = mtime.toMillis(); 377 if (atime != null) 378 e.atime = atime.toMillis(); 379 if (ctime != null) 380 e.ctime = ctime.toMillis(); 381 update(e); 382 } finally { 383 endWrite(); 384 } 385 } 386 387 boolean exists(byte[] path) 388 throws IOException 389 { 390 beginRead(); 391 try { 392 ensureOpen(); 393 return getInode(path) != null; 394 } finally { 395 endRead(); 396 } 397 } 398 399 boolean isDirectory(byte[] path) 400 throws IOException 401 { 402 beginRead(); 403 try { 404 IndexNode n = getInode(path); 405 return n != null && n.isDir(); 406 } finally { 407 endRead(); 408 } 409 } 410 411 // returns the list of child paths of "path" 412 Iterator<Path> iteratorOf(ZipPath dir, 413 DirectoryStream.Filter<? super Path> filter) 414 throws IOException 415 { 416 beginWrite(); // iteration of inodes needs exclusive lock 417 try { 418 ensureOpen(); 419 byte[] path = dir.getResolvedPath(); 420 IndexNode inode = getInode(path); 421 if (inode == null) 422 throw new NotDirectoryException(getString(path)); 423 List<Path> list = new ArrayList<>(); 424 IndexNode child = inode.child; 425 while (child != null) { 426 // (1) assume all path from zip file itself is "normalized" 427 // (2) IndexNode.name is absolute. see IndexNode(byte[],int,int) 428 // (3) if parent "dir" is relative when ZipDirectoryStream 429 // is created, the returned child path needs to be relative 430 // as well. 431 byte[] cname = child.name; 432 if (!dir.isAbsolute()) { 433 cname = Arrays.copyOfRange(cname, 1, cname.length); 434 } 435 ZipPath zpath = new ZipPath(this, cname, true); 436 if (filter == null || filter.accept(zpath)) 437 list.add(zpath); 438 child = child.sibling; 439 } 440 return list.iterator(); 441 } finally { 442 endWrite(); 443 } 444 } 445 446 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 447 throws IOException 448 { 449 checkWritable(); 450 // dir = toDirectoryPath(dir); 451 beginWrite(); 452 try { 453 ensureOpen(); 454 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 455 throw new FileAlreadyExistsException(getString(dir)); 456 checkParents(dir); 457 Entry e = new Entry(dir, Entry.NEW, true, METHOD_STORED); 458 update(e); 459 } finally { 460 endWrite(); 461 } 462 } 463 464 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 465 throws IOException 466 { 467 checkWritable(); 468 if (Arrays.equals(src, dst)) 469 return; // do nothing, src and dst are the same 470 471 beginWrite(); 472 try { 473 ensureOpen(); 474 Entry eSrc = getEntry(src); // ensureOpen checked 475 476 if (eSrc == null) 477 throw new NoSuchFileException(getString(src)); 478 if (eSrc.isDir()) { // spec says to create dst dir 479 createDirectory(dst); 480 return; 481 } 482 boolean hasReplace = false; 483 boolean hasCopyAttrs = false; 484 for (CopyOption opt : options) { 485 if (opt == REPLACE_EXISTING) 486 hasReplace = true; 487 else if (opt == COPY_ATTRIBUTES) 488 hasCopyAttrs = true; 489 } 490 Entry eDst = getEntry(dst); 491 if (eDst != null) { 492 if (!hasReplace) 493 throw new FileAlreadyExistsException(getString(dst)); 494 } else { 495 checkParents(dst); 496 } 497 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 498 u.name(dst); // change name 499 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 500 { 501 u.type = eSrc.type; // make it the same type 502 if (deletesrc) { // if it's a "rename", take the data 503 u.bytes = eSrc.bytes; 504 u.file = eSrc.file; 505 } else { // if it's not "rename", copy the data 506 if (eSrc.bytes != null) 507 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 508 else if (eSrc.file != null) { 509 u.file = getTempPathForEntry(null); 510 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 511 } 512 } 513 } 514 if (!hasCopyAttrs) 515 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 516 update(u); 517 if (deletesrc) 518 updateDelete(eSrc); 519 } finally { 520 endWrite(); 521 } 522 } 523 524 // Returns an output stream for writing the contents into the specified 525 // entry. 526 OutputStream newOutputStream(byte[] path, OpenOption... options) 527 throws IOException 528 { 529 checkWritable(); 530 boolean hasCreateNew = false; 531 boolean hasCreate = false; 532 boolean hasAppend = false; 533 boolean hasTruncate = false; 534 for (OpenOption opt : options) { 535 if (opt == READ) 536 throw new IllegalArgumentException("READ not allowed"); 537 if (opt == CREATE_NEW) 538 hasCreateNew = true; 539 if (opt == CREATE) 540 hasCreate = true; 541 if (opt == APPEND) 542 hasAppend = true; 543 if (opt == TRUNCATE_EXISTING) 544 hasTruncate = true; 545 } 546 if (hasAppend && hasTruncate) 547 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 548 beginRead(); // only need a readlock, the "update()" will 549 try { // try to obtain a writelock when the os is 550 ensureOpen(); // being closed. 551 Entry e = getEntry(path); 552 if (e != null) { 553 if (e.isDir() || hasCreateNew) 554 throw new FileAlreadyExistsException(getString(path)); 555 if (hasAppend) { 556 InputStream is = getInputStream(e); 557 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 558 is.transferTo(os); 559 is.close(); 560 return os; 561 } 562 return getOutputStream(new Entry(e, Entry.NEW)); 563 } else { 564 if (!hasCreate && !hasCreateNew) 565 throw new NoSuchFileException(getString(path)); 566 checkParents(path); 567 return getOutputStream(new Entry(path, Entry.NEW, false, defaultMethod)); 568 } 569 } finally { 570 endRead(); 571 } 572 } 573 574 // Returns an input stream for reading the contents of the specified 575 // file entry. 576 InputStream newInputStream(byte[] path) throws IOException { 577 beginRead(); 578 try { 579 ensureOpen(); 580 Entry e = getEntry(path); 581 if (e == null) 582 throw new NoSuchFileException(getString(path)); 583 if (e.isDir()) 584 throw new FileSystemException(getString(path), "is a directory", null); 585 return getInputStream(e); 586 } finally { 587 endRead(); 588 } 589 } 590 591 private void checkOptions(Set<? extends OpenOption> options) { 592 // check for options of null type and option is an intance of StandardOpenOption 593 for (OpenOption option : options) { 594 if (option == null) 595 throw new NullPointerException(); 596 if (!(option instanceof StandardOpenOption)) 597 throw new IllegalArgumentException(); 598 } 599 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 600 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 601 } 602 603 604 // Returns an output SeekableByteChannel for either 605 // (1) writing the contents of a new entry, if the entry doesn't exit, or 606 // (2) updating/replacing the contents of an existing entry. 607 // Note: The content is not compressed. 608 private class EntryOutputChannel extends ByteArrayChannel { 609 Entry e; 610 611 EntryOutputChannel(Entry e) throws IOException { 612 super(e.size > 0? (int)e.size : 8192, false); 613 this.e = e; 614 if (e.mtime == -1) 615 e.mtime = System.currentTimeMillis(); 616 if (e.method == -1) 617 e.method = defaultMethod; 618 // store size, compressed size, and crc-32 in datadescriptor 619 e.flag = FLAG_DATADESCR; 620 if (zc.isUTF8()) 621 e.flag |= FLAG_USE_UTF8; 622 } 623 624 @Override 625 public void close() throws IOException { 626 e.bytes = toByteArray(); 627 e.size = e.bytes.length; 628 e.crc = -1; 629 super.close(); 630 update(e); 631 } 632 } 633 634 private int getCompressMethod(FileAttribute<?>... attrs) { 635 return defaultMethod; 636 } 637 638 // Returns a Writable/ReadByteChannel for now. Might consdier to use 639 // newFileChannel() instead, which dump the entry data into a regular 640 // file on the default file system and create a FileChannel on top of 641 // it. 642 SeekableByteChannel newByteChannel(byte[] path, 643 Set<? extends OpenOption> options, 644 FileAttribute<?>... attrs) 645 throws IOException 646 { 647 checkOptions(options); 648 if (options.contains(StandardOpenOption.WRITE) || 649 options.contains(StandardOpenOption.APPEND)) { 650 checkWritable(); 651 beginRead(); // only need a readlock, the "update()" will obtain 652 // thewritelock when the channel is closed 653 try { 654 ensureOpen(); 655 Entry e = getEntry(path); 656 if (e != null) { 657 if (e.isDir() || options.contains(CREATE_NEW)) 658 throw new FileAlreadyExistsException(getString(path)); 659 SeekableByteChannel sbc = 660 new EntryOutputChannel(new Entry(e, Entry.NEW)); 661 if (options.contains(APPEND)) { 662 try (InputStream is = getInputStream(e)) { // copyover 663 byte[] buf = new byte[8192]; 664 ByteBuffer bb = ByteBuffer.wrap(buf); 665 int n; 666 while ((n = is.read(buf)) != -1) { 667 bb.position(0); 668 bb.limit(n); 669 sbc.write(bb); 670 } 671 } 672 } 673 return sbc; 674 } 675 if (!options.contains(CREATE) && !options.contains(CREATE_NEW)) 676 throw new NoSuchFileException(getString(path)); 677 checkParents(path); 678 return new EntryOutputChannel( 679 new Entry(path, Entry.NEW, false, getCompressMethod(attrs))); 680 681 } finally { 682 endRead(); 683 } 684 } else { 685 beginRead(); 686 try { 687 ensureOpen(); 688 Entry e = getEntry(path); 689 if (e == null || e.isDir()) 690 throw new NoSuchFileException(getString(path)); 691 try (InputStream is = getInputStream(e)) { 692 // TBD: if (e.size < NNNNN); 693 return new ByteArrayChannel(is.readAllBytes(), true); 694 } 695 } finally { 696 endRead(); 697 } 698 } 699 } 700 701 // Returns a FileChannel of the specified entry. 702 // 703 // This implementation creates a temporary file on the default file system, 704 // copy the entry data into it if the entry exists, and then create a 705 // FileChannel on top of it. 706 FileChannel newFileChannel(byte[] path, 707 Set<? extends OpenOption> options, 708 FileAttribute<?>... attrs) 709 throws IOException 710 { 711 checkOptions(options); 712 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 713 options.contains(StandardOpenOption.APPEND)); 714 beginRead(); 715 try { 716 ensureOpen(); 717 Entry e = getEntry(path); 718 if (forWrite) { 719 checkWritable(); 720 if (e == null) { 721 if (!options.contains(StandardOpenOption.CREATE) && 722 !options.contains(StandardOpenOption.CREATE_NEW)) { 723 throw new NoSuchFileException(getString(path)); 724 } 725 } else { 726 if (options.contains(StandardOpenOption.CREATE_NEW)) { 727 throw new FileAlreadyExistsException(getString(path)); 728 } 729 if (e.isDir()) 730 throw new FileAlreadyExistsException("directory <" 731 + getString(path) + "> exists"); 732 } 733 options = new HashSet<>(options); 734 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 735 } else if (e == null || e.isDir()) { 736 throw new NoSuchFileException(getString(path)); 737 } 738 739 final boolean isFCH = (e != null && e.type == Entry.FILECH); 740 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 741 final FileChannel fch = tmpfile.getFileSystem() 742 .provider() 743 .newFileChannel(tmpfile, options, attrs); 744 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH); 745 if (forWrite) { 746 u.flag = FLAG_DATADESCR; 747 u.method = getCompressMethod(attrs); 748 } 749 // is there a better way to hook into the FileChannel's close method? 750 return new FileChannel() { 751 public int write(ByteBuffer src) throws IOException { 752 return fch.write(src); 753 } 754 public long write(ByteBuffer[] srcs, int offset, int length) 755 throws IOException 756 { 757 return fch.write(srcs, offset, length); 758 } 759 public long position() throws IOException { 760 return fch.position(); 761 } 762 public FileChannel position(long newPosition) 763 throws IOException 764 { 765 fch.position(newPosition); 766 return this; 767 } 768 public long size() throws IOException { 769 return fch.size(); 770 } 771 public FileChannel truncate(long size) 772 throws IOException 773 { 774 fch.truncate(size); 775 return this; 776 } 777 public void force(boolean metaData) 778 throws IOException 779 { 780 fch.force(metaData); 781 } 782 public long transferTo(long position, long count, 783 WritableByteChannel target) 784 throws IOException 785 { 786 return fch.transferTo(position, count, target); 787 } 788 public long transferFrom(ReadableByteChannel src, 789 long position, long count) 790 throws IOException 791 { 792 return fch.transferFrom(src, position, count); 793 } 794 public int read(ByteBuffer dst) throws IOException { 795 return fch.read(dst); 796 } 797 public int read(ByteBuffer dst, long position) 798 throws IOException 799 { 800 return fch.read(dst, position); 801 } 802 public long read(ByteBuffer[] dsts, int offset, int length) 803 throws IOException 804 { 805 return fch.read(dsts, offset, length); 806 } 807 public int write(ByteBuffer src, long position) 808 throws IOException 809 { 810 return fch.write(src, position); 811 } 812 public MappedByteBuffer map(MapMode mode, 813 long position, long size) 814 throws IOException 815 { 816 throw new UnsupportedOperationException(); 817 } 818 public FileLock lock(long position, long size, boolean shared) 819 throws IOException 820 { 821 return fch.lock(position, size, shared); 822 } 823 public FileLock tryLock(long position, long size, boolean shared) 824 throws IOException 825 { 826 return fch.tryLock(position, size, shared); 827 } 828 protected void implCloseChannel() throws IOException { 829 fch.close(); 830 if (forWrite) { 831 u.mtime = System.currentTimeMillis(); 832 u.size = Files.size(u.file); 833 834 update(u); 835 } else { 836 if (!isFCH) // if this is a new fch for reading 837 removeTempPathForEntry(tmpfile); 838 } 839 } 840 }; 841 } finally { 842 endRead(); 843 } 844 } 845 846 // the outstanding input streams that need to be closed 847 private Set<InputStream> streams = 848 Collections.synchronizedSet(new HashSet<InputStream>()); 849 850 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 851 private Path getTempPathForEntry(byte[] path) throws IOException { 852 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 853 if (path != null) { 854 Entry e = getEntry(path); 855 if (e != null) { 856 try (InputStream is = newInputStream(path)) { 857 Files.copy(is, tmpPath, REPLACE_EXISTING); 858 } 859 } 860 } 861 return tmpPath; 862 } 863 864 private void removeTempPathForEntry(Path path) throws IOException { 865 Files.delete(path); 866 tmppaths.remove(path); 867 } 868 869 // check if all parents really exit. ZIP spec does not require 870 // the existence of any "parent directory". 871 private void checkParents(byte[] path) throws IOException { 872 beginRead(); 873 try { 874 while ((path = getParent(path)) != null && 875 path != ROOTPATH) { 876 if (!inodes.containsKey(IndexNode.keyOf(path))) { 877 throw new NoSuchFileException(getString(path)); 878 } 879 } 880 } finally { 881 endRead(); 882 } 883 } 884 885 private static byte[] ROOTPATH = new byte[] { '/' }; 886 private static byte[] getParent(byte[] path) { 887 int off = getParentOff(path); 888 if (off <= 1) 889 return ROOTPATH; 890 return Arrays.copyOf(path, off); 891 } 892 893 private static int getParentOff(byte[] path) { 894 int off = path.length - 1; 895 if (off > 0 && path[off] == '/') // isDirectory 896 off--; 897 while (off > 0 && path[off] != '/') { off--; } 898 return off; 899 } 900 901 private final void beginWrite() { 902 rwlock.writeLock().lock(); 903 } 904 905 private final void endWrite() { 906 rwlock.writeLock().unlock(); 907 } 908 909 private final void beginRead() { 910 rwlock.readLock().lock(); 911 } 912 913 private final void endRead() { 914 rwlock.readLock().unlock(); 915 } 916 917 /////////////////////////////////////////////////////////////////// 918 919 private volatile boolean isOpen = true; 920 private final SeekableByteChannel ch; // channel to the zipfile 921 final byte[] cen; // CEN & ENDHDR 922 private END end; 923 private long locpos; // position of first LOC header (usually 0) 924 925 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 926 927 // name -> pos (in cen), IndexNode itself can be used as a "key" 928 private LinkedHashMap<IndexNode, IndexNode> inodes; 929 930 final byte[] getBytes(String name) { 931 return zc.getBytes(name); 932 } 933 934 final String getString(byte[] name) { 935 return zc.toString(name); 936 } 937 938 @SuppressWarnings("deprecation") 939 protected void finalize() throws IOException { 940 close(); 941 } 942 943 // Reads len bytes of data from the specified offset into buf. 944 // Returns the total number of bytes read. 945 // Each/every byte read from here (except the cen, which is mapped). 946 final long readFullyAt(byte[] buf, int off, long len, long pos) 947 throws IOException 948 { 949 ByteBuffer bb = ByteBuffer.wrap(buf); 950 bb.position(off); 951 bb.limit((int)(off + len)); 952 return readFullyAt(bb, pos); 953 } 954 955 private final long readFullyAt(ByteBuffer bb, long pos) 956 throws IOException 957 { 958 synchronized(ch) { 959 return ch.position(pos).read(bb); 960 } 961 } 962 963 // Searches for end of central directory (END) header. The contents of 964 // the END header will be read and placed in endbuf. Returns the file 965 // position of the END header, otherwise returns -1 if the END header 966 // was not found or an error occurred. 967 private END findEND() throws IOException 968 { 969 byte[] buf = new byte[READBLOCKSZ]; 970 long ziplen = ch.size(); 971 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 972 long minPos = minHDR - (buf.length - ENDHDR); 973 974 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 975 { 976 int off = 0; 977 if (pos < 0) { 978 // Pretend there are some NUL bytes before start of file 979 off = (int)-pos; 980 Arrays.fill(buf, 0, off, (byte)0); 981 } 982 int len = buf.length - off; 983 if (readFullyAt(buf, off, len, pos + off) != len) 984 zerror("zip END header not found"); 985 986 // Now scan the block backwards for END header signature 987 for (int i = buf.length - ENDHDR; i >= 0; i--) { 988 if (buf[i+0] == (byte)'P' && 989 buf[i+1] == (byte)'K' && 990 buf[i+2] == (byte)'\005' && 991 buf[i+3] == (byte)'\006' && 992 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 993 // Found END header 994 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 995 END end = new END(); 996 end.endsub = ENDSUB(buf); 997 end.centot = ENDTOT(buf); 998 end.cenlen = ENDSIZ(buf); 999 end.cenoff = ENDOFF(buf); 1000 end.comlen = ENDCOM(buf); 1001 end.endpos = pos + i; 1002 // try if there is zip64 end; 1003 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1004 if (end.endpos < ZIP64_LOCHDR || 1005 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1006 != loc64.length || 1007 !locator64SigAt(loc64, 0)) { 1008 return end; 1009 } 1010 long end64pos = ZIP64_LOCOFF(loc64); 1011 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1012 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1013 != end64buf.length || 1014 !end64SigAt(end64buf, 0)) { 1015 return end; 1016 } 1017 // end64 found, 1018 long cenlen64 = ZIP64_ENDSIZ(end64buf); 1019 long cenoff64 = ZIP64_ENDOFF(end64buf); 1020 long centot64 = ZIP64_ENDTOT(end64buf); 1021 // double-check 1022 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1023 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1024 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1025 return end; 1026 } 1027 // to use the end64 values 1028 end.cenlen = cenlen64; 1029 end.cenoff = cenoff64; 1030 end.centot = (int)centot64; // assume total < 2g 1031 end.endpos = end64pos; 1032 return end; 1033 } 1034 } 1035 } 1036 zerror("zip END header not found"); 1037 return null; //make compiler happy 1038 } 1039 1040 // Reads zip file central directory. Returns the file position of first 1041 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1042 // then the error was a zip format error and zip->msg has the error text. 1043 // Always pass in -1 for knownTotal; it's used for a recursive call. 1044 private byte[] initCEN() throws IOException { 1045 end = findEND(); 1046 if (end.endpos == 0) { 1047 inodes = new LinkedHashMap<>(10); 1048 locpos = 0; 1049 buildNodeTree(); 1050 return null; // only END header present 1051 } 1052 if (end.cenlen > end.endpos) 1053 zerror("invalid END header (bad central directory size)"); 1054 long cenpos = end.endpos - end.cenlen; // position of CEN table 1055 1056 // Get position of first local file (LOC) header, taking into 1057 // account that there may be a stub prefixed to the zip file. 1058 locpos = cenpos - end.cenoff; 1059 if (locpos < 0) 1060 zerror("invalid END header (bad central directory offset)"); 1061 1062 // read in the CEN and END 1063 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1064 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1065 zerror("read CEN tables failed"); 1066 } 1067 // Iterate through the entries in the central directory 1068 inodes = new LinkedHashMap<>(end.centot + 1); 1069 int pos = 0; 1070 int limit = cen.length - ENDHDR; 1071 while (pos < limit) { 1072 if (!cenSigAt(cen, pos)) 1073 zerror("invalid CEN header (bad signature)"); 1074 int method = CENHOW(cen, pos); 1075 int nlen = CENNAM(cen, pos); 1076 int elen = CENEXT(cen, pos); 1077 int clen = CENCOM(cen, pos); 1078 if ((CENFLG(cen, pos) & 1) != 0) { 1079 zerror("invalid CEN header (encrypted entry)"); 1080 } 1081 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1082 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1083 } 1084 if (pos + CENHDR + nlen > limit) { 1085 zerror("invalid CEN header (bad header size)"); 1086 } 1087 IndexNode inode = new IndexNode(cen, pos, nlen); 1088 inodes.put(inode, inode); 1089 1090 // skip ext and comment 1091 pos += (CENHDR + nlen + elen + clen); 1092 } 1093 if (pos + ENDHDR != cen.length) { 1094 zerror("invalid CEN header (bad header size)"); 1095 } 1096 buildNodeTree(); 1097 return cen; 1098 } 1099 1100 private void ensureOpen() throws IOException { 1101 if (!isOpen) 1102 throw new ClosedFileSystemException(); 1103 } 1104 1105 // Creates a new empty temporary file in the same directory as the 1106 // specified file. A variant of Files.createTempFile. 1107 private Path createTempFileInSameDirectoryAs(Path path) 1108 throws IOException 1109 { 1110 Path parent = path.toAbsolutePath().getParent(); 1111 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1112 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1113 tmppaths.add(tmpPath); 1114 return tmpPath; 1115 } 1116 1117 ////////////////////update & sync ////////////////////////////////////// 1118 1119 private boolean hasUpdate = false; 1120 1121 // shared key. consumer guarantees the "writeLock" before use it. 1122 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1123 1124 private void updateDelete(IndexNode inode) { 1125 beginWrite(); 1126 try { 1127 removeFromTree(inode); 1128 inodes.remove(inode); 1129 hasUpdate = true; 1130 } finally { 1131 endWrite(); 1132 } 1133 } 1134 1135 private void update(Entry e) { 1136 beginWrite(); 1137 try { 1138 IndexNode old = inodes.put(e, e); 1139 if (old != null) { 1140 removeFromTree(old); 1141 } 1142 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1143 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1144 e.sibling = parent.child; 1145 parent.child = e; 1146 } 1147 hasUpdate = true; 1148 } finally { 1149 endWrite(); 1150 } 1151 } 1152 1153 // copy over the whole LOC entry (header if necessary, data and ext) from 1154 // old zip to the new one. 1155 private long copyLOCEntry(Entry e, boolean updateHeader, 1156 OutputStream os, 1157 long written, byte[] buf) 1158 throws IOException 1159 { 1160 long locoff = e.locoff; // where to read 1161 e.locoff = written; // update the e.locoff with new value 1162 1163 // calculate the size need to write out 1164 long size = 0; 1165 // if there is A ext 1166 if ((e.flag & FLAG_DATADESCR) != 0) { 1167 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1168 size = 24; 1169 else 1170 size = 16; 1171 } 1172 // read loc, use the original loc.elen/nlen 1173 // 1174 // an extra byte after loc is read, which should be the first byte of the 1175 // 'name' field of the loc. if this byte is '/', which means the original 1176 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1177 // is used to output the loc, in which the leading "/" will be removed 1178 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1179 throw new ZipException("loc: reading failed"); 1180 1181 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1182 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1183 size += e.csize; 1184 written = e.writeLOC(os) + size; 1185 } else { 1186 os.write(buf, 0, LOCHDR); // write out the loc header 1187 locoff += LOCHDR; 1188 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1189 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1190 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1191 written = LOCHDR + size; 1192 } 1193 int n; 1194 while (size > 0 && 1195 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1196 { 1197 if (size < n) 1198 n = (int)size; 1199 os.write(buf, 0, n); 1200 size -= n; 1201 locoff += n; 1202 } 1203 return written; 1204 } 1205 1206 private long writeEntry(Entry e, OutputStream os, byte[] buf) 1207 throws IOException { 1208 1209 if (e.bytes == null && e.file == null) // dir, 0-length data 1210 return 0; 1211 1212 long written = 0; 1213 try (OutputStream os2 = e.method == METHOD_STORED ? 1214 new EntryOutputStreamCRC32(e, os) : new EntryOutputStreamDef(e, os)) { 1215 if (e.bytes != null) { // in-memory 1216 os2.write(e.bytes, 0, e.bytes.length); 1217 } else if (e.file != null) { // tmp file 1218 if (e.type == Entry.NEW || e.type == Entry.FILECH) { 1219 try (InputStream is = Files.newInputStream(e.file)) { 1220 is.transferTo(os2); 1221 } 1222 } 1223 Files.delete(e.file); 1224 tmppaths.remove(e.file); 1225 } 1226 } 1227 written += e.csize; 1228 if ((e.flag & FLAG_DATADESCR) != 0) { 1229 written += e.writeEXT(os); 1230 } 1231 return written; 1232 } 1233 1234 // sync the zip file system, if there is any udpate 1235 private void sync() throws IOException { 1236 1237 if (!hasUpdate) 1238 return; 1239 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1240 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) 1241 { 1242 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1243 long written = 0; 1244 byte[] buf = new byte[8192]; 1245 Entry e = null; 1246 1247 // write loc 1248 for (IndexNode inode : inodes.values()) { 1249 if (inode instanceof Entry) { // an updated inode 1250 e = (Entry)inode; 1251 try { 1252 if (e.type == Entry.COPY) { 1253 // entry copy: the only thing changed is the "name" 1254 // and "nlen" in LOC header, so we udpate/rewrite the 1255 // LOC in new file and simply copy the rest (data and 1256 // ext) without enflating/deflating from the old zip 1257 // file LOC entry. 1258 written += copyLOCEntry(e, true, os, written, buf); 1259 } else { // NEW, FILECH or CEN 1260 e.locoff = written; 1261 written += e.writeLOC(os); // write loc header 1262 written += writeEntry(e, os, buf); 1263 } 1264 elist.add(e); 1265 } catch (IOException x) { 1266 x.printStackTrace(); // skip any in-accurate entry 1267 } 1268 } else { // unchanged inode 1269 if (inode.pos == -1) { 1270 continue; // pseudo directory node 1271 } 1272 if (inode.name.length == 1 && inode.name[0] == '/') { 1273 continue; // no root '/' directory even it 1274 // exits in original zip/jar file. 1275 } 1276 e = Entry.readCEN(this, inode); 1277 try { 1278 written += copyLOCEntry(e, false, os, written, buf); 1279 elist.add(e); 1280 } catch (IOException x) { 1281 x.printStackTrace(); // skip any wrong entry 1282 } 1283 } 1284 } 1285 1286 // now write back the cen and end table 1287 end.cenoff = written; 1288 for (Entry entry : elist) { 1289 written += entry.writeCEN(os); 1290 } 1291 end.centot = elist.size(); 1292 end.cenlen = written - end.cenoff; 1293 end.write(os, written, forceEnd64); 1294 } 1295 1296 ch.close(); 1297 Files.delete(zfpath); 1298 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1299 hasUpdate = false; // clear 1300 } 1301 1302 IndexNode getInode(byte[] path) { 1303 if (path == null) 1304 throw new NullPointerException("path"); 1305 return inodes.get(IndexNode.keyOf(path)); 1306 } 1307 1308 Entry getEntry(byte[] path) throws IOException { 1309 IndexNode inode = getInode(path); 1310 if (inode instanceof Entry) 1311 return (Entry)inode; 1312 if (inode == null || inode.pos == -1) 1313 return null; 1314 return Entry.readCEN(this, inode); 1315 } 1316 1317 public void deleteFile(byte[] path, boolean failIfNotExists) 1318 throws IOException 1319 { 1320 checkWritable(); 1321 1322 IndexNode inode = getInode(path); 1323 if (inode == null) { 1324 if (path != null && path.length == 0) 1325 throw new ZipException("root directory </> can't not be delete"); 1326 if (failIfNotExists) 1327 throw new NoSuchFileException(getString(path)); 1328 } else { 1329 if (inode.isDir() && inode.child != null) 1330 throw new DirectoryNotEmptyException(getString(path)); 1331 updateDelete(inode); 1332 } 1333 } 1334 1335 // Returns an out stream for either 1336 // (1) writing the contents of a new entry, if the entry exits, or 1337 // (2) updating/replacing the contents of the specified existing entry. 1338 private OutputStream getOutputStream(Entry e) throws IOException { 1339 1340 if (e.mtime == -1) 1341 e.mtime = System.currentTimeMillis(); 1342 if (e.method == -1) 1343 e.method = defaultMethod; 1344 // store size, compressed size, and crc-32 in datadescr 1345 e.flag = FLAG_DATADESCR; 1346 if (zc.isUTF8()) 1347 e.flag |= FLAG_USE_UTF8; 1348 OutputStream os; 1349 if (useTempFile) { 1350 e.file = getTempPathForEntry(null); 1351 os = Files.newOutputStream(e.file, WRITE); 1352 } else { 1353 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1354 } 1355 return new EntryOutputStream(e, os); 1356 } 1357 1358 private class EntryOutputStream extends FilterOutputStream { 1359 private Entry e; 1360 private long written; 1361 private boolean isClosed; 1362 1363 EntryOutputStream(Entry e, OutputStream os) throws IOException { 1364 super(os); 1365 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1366 // this.written = 0; 1367 } 1368 1369 @Override 1370 public synchronized void write(int b) throws IOException { 1371 out.write(b); 1372 written += 1; 1373 } 1374 1375 @Override 1376 public synchronized void write(byte b[], int off, int len) 1377 throws IOException { 1378 out.write(b, off, len); 1379 written += len; 1380 } 1381 1382 @Override 1383 public synchronized void close() throws IOException { 1384 if (isClosed) { 1385 return; 1386 } 1387 isClosed = true; 1388 e.size = written; 1389 if (out instanceof ByteArrayOutputStream) 1390 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1391 super.close(); 1392 update(e); 1393 } 1394 } 1395 1396 // Wrapper output stream class to write out a "stored" entry. 1397 // (1) this class does not close the underlying out stream when 1398 // being closed. 1399 // (2) no need to be "synchronized", only used by sync() 1400 private class EntryOutputStreamCRC32 extends FilterOutputStream { 1401 private Entry e; 1402 private CRC32 crc; 1403 private long written; 1404 private boolean isClosed; 1405 1406 EntryOutputStreamCRC32(Entry e, OutputStream os) throws IOException { 1407 super(os); 1408 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1409 this.crc = new CRC32(); 1410 } 1411 1412 @Override 1413 public void write(int b) throws IOException { 1414 out.write(b); 1415 crc.update(b); 1416 written += 1; 1417 } 1418 1419 @Override 1420 public void write(byte b[], int off, int len) 1421 throws IOException { 1422 out.write(b, off, len); 1423 crc.update(b, off, len); 1424 written += len; 1425 } 1426 1427 @Override 1428 public void close() throws IOException { 1429 if (isClosed) 1430 return; 1431 isClosed = true; 1432 e.size = e.csize = written; 1433 e.crc = crc.getValue(); 1434 } 1435 } 1436 1437 // Wrapper output stream class to write out a "deflated" entry. 1438 // (1) this class does not close the underlying out stream when 1439 // being closed. 1440 // (2) no need to be "synchronized", only used by sync() 1441 private class EntryOutputStreamDef extends DeflaterOutputStream { 1442 private CRC32 crc; 1443 private Entry e; 1444 private boolean isClosed; 1445 1446 EntryOutputStreamDef(Entry e, OutputStream os) throws IOException { 1447 super(os, getDeflater()); 1448 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1449 this.crc = new CRC32(); 1450 } 1451 1452 @Override 1453 public void write(byte b[], int off, int len) 1454 throws IOException { 1455 super.write(b, off, len); 1456 crc.update(b, off, len); 1457 } 1458 1459 @Override 1460 public void close() throws IOException { 1461 if (isClosed) 1462 return; 1463 isClosed = true; 1464 finish(); 1465 e.size = def.getBytesRead(); 1466 e.csize = def.getBytesWritten(); 1467 e.crc = crc.getValue(); 1468 releaseDeflater(def); 1469 } 1470 } 1471 1472 private InputStream getInputStream(Entry e) 1473 throws IOException 1474 { 1475 InputStream eis = null; 1476 1477 if (e.type == Entry.NEW) { 1478 // now bytes & file is uncompressed. 1479 if (e.bytes != null) 1480 return new ByteArrayInputStream(e.bytes); 1481 else if (e.file != null) 1482 return Files.newInputStream(e.file); 1483 else 1484 throw new ZipException("update entry data is missing"); 1485 } else if (e.type == Entry.FILECH) { 1486 // FILECH result is un-compressed. 1487 eis = Files.newInputStream(e.file); 1488 // TBD: wrap to hook close() 1489 // streams.add(eis); 1490 return eis; 1491 } else { // untouched CEN or COPY 1492 eis = new EntryInputStream(e, ch); 1493 } 1494 if (e.method == METHOD_DEFLATED) { 1495 // MORE: Compute good size for inflater stream: 1496 long bufSize = e.size + 2; // Inflater likes a bit of slack 1497 if (bufSize > 65536) 1498 bufSize = 8192; 1499 final long size = e.size; 1500 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1501 private boolean isClosed = false; 1502 public void close() throws IOException { 1503 if (!isClosed) { 1504 releaseInflater(inf); 1505 this.in.close(); 1506 isClosed = true; 1507 streams.remove(this); 1508 } 1509 } 1510 // Override fill() method to provide an extra "dummy" byte 1511 // at the end of the input stream. This is required when 1512 // using the "nowrap" Inflater option. (it appears the new 1513 // zlib in 7 does not need it, but keep it for now) 1514 protected void fill() throws IOException { 1515 if (eof) { 1516 throw new EOFException( 1517 "Unexpected end of ZLIB input stream"); 1518 } 1519 len = this.in.read(buf, 0, buf.length); 1520 if (len == -1) { 1521 buf[0] = 0; 1522 len = 1; 1523 eof = true; 1524 } 1525 inf.setInput(buf, 0, len); 1526 } 1527 private boolean eof; 1528 1529 public int available() throws IOException { 1530 if (isClosed) 1531 return 0; 1532 long avail = size - inf.getBytesWritten(); 1533 return avail > (long) Integer.MAX_VALUE ? 1534 Integer.MAX_VALUE : (int) avail; 1535 } 1536 }; 1537 } else if (e.method == METHOD_STORED) { 1538 // TBD: wrap/ it does not seem necessary 1539 } else { 1540 throw new ZipException("invalid compression method"); 1541 } 1542 streams.add(eis); 1543 return eis; 1544 } 1545 1546 // Inner class implementing the input stream used to read 1547 // a (possibly compressed) zip file entry. 1548 private class EntryInputStream extends InputStream { 1549 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1550 // point to a new channel after sync() 1551 private long pos; // current position within entry data 1552 protected long rem; // number of remaining bytes within entry 1553 1554 EntryInputStream(Entry e, SeekableByteChannel zfch) 1555 throws IOException 1556 { 1557 this.zfch = zfch; 1558 rem = e.csize; 1559 pos = e.locoff; 1560 if (pos == -1) { 1561 Entry e2 = getEntry(e.name); 1562 if (e2 == null) { 1563 throw new ZipException("invalid loc for entry <" + e.name + ">"); 1564 } 1565 pos = e2.locoff; 1566 } 1567 pos = -pos; // lazy initialize the real data offset 1568 } 1569 1570 public int read(byte b[], int off, int len) throws IOException { 1571 ensureOpen(); 1572 initDataPos(); 1573 if (rem == 0) { 1574 return -1; 1575 } 1576 if (len <= 0) { 1577 return 0; 1578 } 1579 if (len > rem) { 1580 len = (int) rem; 1581 } 1582 // readFullyAt() 1583 long n = 0; 1584 ByteBuffer bb = ByteBuffer.wrap(b); 1585 bb.position(off); 1586 bb.limit(off + len); 1587 synchronized(zfch) { 1588 n = zfch.position(pos).read(bb); 1589 } 1590 if (n > 0) { 1591 pos += n; 1592 rem -= n; 1593 } 1594 if (rem == 0) { 1595 close(); 1596 } 1597 return (int)n; 1598 } 1599 1600 public int read() throws IOException { 1601 byte[] b = new byte[1]; 1602 if (read(b, 0, 1) == 1) { 1603 return b[0] & 0xff; 1604 } else { 1605 return -1; 1606 } 1607 } 1608 1609 public long skip(long n) throws IOException { 1610 ensureOpen(); 1611 if (n > rem) 1612 n = rem; 1613 pos += n; 1614 rem -= n; 1615 if (rem == 0) { 1616 close(); 1617 } 1618 return n; 1619 } 1620 1621 public int available() { 1622 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1623 } 1624 1625 public void close() { 1626 rem = 0; 1627 streams.remove(this); 1628 } 1629 1630 private void initDataPos() throws IOException { 1631 if (pos <= 0) { 1632 pos = -pos + locpos; 1633 byte[] buf = new byte[LOCHDR]; 1634 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 1635 throw new ZipException("invalid loc " + pos + " for entry reading"); 1636 } 1637 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 1638 } 1639 } 1640 } 1641 1642 static void zerror(String msg) throws ZipException { 1643 throw new ZipException(msg); 1644 } 1645 1646 // Maxmum number of de/inflater we cache 1647 private final int MAX_FLATER = 20; 1648 // List of available Inflater objects for decompression 1649 private final List<Inflater> inflaters = new ArrayList<>(); 1650 1651 // Gets an inflater from the list of available inflaters or allocates 1652 // a new one. 1653 private Inflater getInflater() { 1654 synchronized (inflaters) { 1655 int size = inflaters.size(); 1656 if (size > 0) { 1657 Inflater inf = inflaters.remove(size - 1); 1658 return inf; 1659 } else { 1660 return new Inflater(true); 1661 } 1662 } 1663 } 1664 1665 // Releases the specified inflater to the list of available inflaters. 1666 private void releaseInflater(Inflater inf) { 1667 synchronized (inflaters) { 1668 if (inflaters.size() < MAX_FLATER) { 1669 inf.reset(); 1670 inflaters.add(inf); 1671 } else { 1672 inf.end(); 1673 } 1674 } 1675 } 1676 1677 // List of available Deflater objects for compression 1678 private final List<Deflater> deflaters = new ArrayList<>(); 1679 1680 // Gets a deflater from the list of available deflaters or allocates 1681 // a new one. 1682 private Deflater getDeflater() { 1683 synchronized (deflaters) { 1684 int size = deflaters.size(); 1685 if (size > 0) { 1686 Deflater def = deflaters.remove(size - 1); 1687 return def; 1688 } else { 1689 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1690 } 1691 } 1692 } 1693 1694 // Releases the specified inflater to the list of available inflaters. 1695 private void releaseDeflater(Deflater def) { 1696 synchronized (deflaters) { 1697 if (inflaters.size() < MAX_FLATER) { 1698 def.reset(); 1699 deflaters.add(def); 1700 } else { 1701 def.end(); 1702 } 1703 } 1704 } 1705 1706 // End of central directory record 1707 static class END { 1708 // these 2 fields are not used by anyone and write() uses "0" 1709 // int disknum; 1710 // int sdisknum; 1711 int endsub; // endsub 1712 int centot; // 4 bytes 1713 long cenlen; // 4 bytes 1714 long cenoff; // 4 bytes 1715 int comlen; // comment length 1716 byte[] comment; 1717 1718 /* members of Zip64 end of central directory locator */ 1719 // int diskNum; 1720 long endpos; 1721 // int disktot; 1722 1723 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 1724 boolean hasZip64 = forceEnd64; // false; 1725 long xlen = cenlen; 1726 long xoff = cenoff; 1727 if (xlen >= ZIP64_MINVAL) { 1728 xlen = ZIP64_MINVAL; 1729 hasZip64 = true; 1730 } 1731 if (xoff >= ZIP64_MINVAL) { 1732 xoff = ZIP64_MINVAL; 1733 hasZip64 = true; 1734 } 1735 int count = centot; 1736 if (count >= ZIP64_MINVAL32) { 1737 count = ZIP64_MINVAL32; 1738 hasZip64 = true; 1739 } 1740 if (hasZip64) { 1741 long off64 = offset; 1742 //zip64 end of central directory record 1743 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1744 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1745 writeShort(os, 45); // version made by 1746 writeShort(os, 45); // version needed to extract 1747 writeInt(os, 0); // number of this disk 1748 writeInt(os, 0); // central directory start disk 1749 writeLong(os, centot); // number of directory entries on disk 1750 writeLong(os, centot); // number of directory entries 1751 writeLong(os, cenlen); // length of central directory 1752 writeLong(os, cenoff); // offset of central directory 1753 1754 //zip64 end of central directory locator 1755 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1756 writeInt(os, 0); // zip64 END start disk 1757 writeLong(os, off64); // offset of zip64 END 1758 writeInt(os, 1); // total number of disks (?) 1759 } 1760 writeInt(os, ENDSIG); // END record signature 1761 writeShort(os, 0); // number of this disk 1762 writeShort(os, 0); // central directory start disk 1763 writeShort(os, count); // number of directory entries on disk 1764 writeShort(os, count); // total number of directory entries 1765 writeInt(os, xlen); // length of central directory 1766 writeInt(os, xoff); // offset of central directory 1767 if (comment != null) { // zip file comment 1768 writeShort(os, comment.length); 1769 writeBytes(os, comment); 1770 } else { 1771 writeShort(os, 0); 1772 } 1773 } 1774 } 1775 1776 // Internal node that links a "name" to its pos in cen table. 1777 // The node itself can be used as a "key" to lookup itself in 1778 // the HashMap inodes. 1779 static class IndexNode { 1780 byte[] name; 1781 int hashcode; // node is hashable/hashed by its name 1782 int pos = -1; // position in cen table, -1 menas the 1783 // entry does not exists in zip file 1784 boolean isdir; 1785 1786 IndexNode(byte[] name, boolean isdir) { 1787 name(name); 1788 this.isdir = isdir; 1789 this.pos = -1; 1790 } 1791 1792 IndexNode(byte[] name, int pos) { 1793 name(name); 1794 this.pos = pos; 1795 } 1796 1797 // constructor for cenInit() (1) remove tailing '/' (2) pad leading '/' 1798 IndexNode(byte[] cen, int pos, int nlen) { 1799 int noff = pos + CENHDR; 1800 if (cen[noff + nlen - 1] == '/') { 1801 isdir = true; 1802 nlen--; 1803 } 1804 if (nlen > 0 && cen[noff] == '/') { 1805 name = Arrays.copyOfRange(cen, noff, noff + nlen); 1806 } else { 1807 name = new byte[nlen + 1]; 1808 System.arraycopy(cen, noff, name, 1, nlen); 1809 name[0] = '/'; 1810 } 1811 name(name); 1812 this.pos = pos; 1813 } 1814 1815 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 1816 1817 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1818 IndexNode key = cachedKey.get(); 1819 if (key == null) { 1820 key = new IndexNode(name, -1); 1821 cachedKey.set(key); 1822 } 1823 return key.as(name); 1824 } 1825 1826 final void name(byte[] name) { 1827 this.name = name; 1828 this.hashcode = Arrays.hashCode(name); 1829 } 1830 1831 final IndexNode as(byte[] name) { // reuse the node, mostly 1832 name(name); // as a lookup "key" 1833 return this; 1834 } 1835 1836 boolean isDir() { 1837 return isdir; 1838 } 1839 1840 public boolean equals(Object other) { 1841 if (!(other instanceof IndexNode)) { 1842 return false; 1843 } 1844 if (other instanceof ParentLookup) { 1845 return ((ParentLookup)other).equals(this); 1846 } 1847 return Arrays.equals(name, ((IndexNode)other).name); 1848 } 1849 1850 public int hashCode() { 1851 return hashcode; 1852 } 1853 1854 IndexNode() {} 1855 IndexNode sibling; 1856 IndexNode child; // 1st child 1857 } 1858 1859 static class Entry extends IndexNode implements ZipFileAttributes { 1860 1861 static final int CEN = 1; // entry read from cen 1862 static final int NEW = 2; // updated contents in bytes or file 1863 static final int FILECH = 3; // fch update in "file" 1864 static final int COPY = 4; // copy of a CEN entry 1865 1866 byte[] bytes; // updated content bytes 1867 Path file; // use tmp file to store bytes; 1868 int type = CEN; // default is the entry read from cen 1869 1870 // entry attributes 1871 int version; 1872 int flag; 1873 int method = -1; // compression method 1874 long mtime = -1; // last modification time (in DOS time) 1875 long atime = -1; // last access time 1876 long ctime = -1; // create time 1877 long crc = -1; // crc-32 of entry data 1878 long csize = -1; // compressed size of entry data 1879 long size = -1; // uncompressed size of entry data 1880 byte[] extra; 1881 1882 // cen 1883 1884 // these fields are not used by anyone and writeCEN uses "0" 1885 // int versionMade; 1886 // int disk; 1887 // int attrs; 1888 // long attrsEx; 1889 long locoff; 1890 byte[] comment; 1891 1892 Entry() {} 1893 1894 Entry(byte[] name, boolean isdir, int method) { 1895 name(name); 1896 this.isdir = isdir; 1897 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 1898 this.crc = 0; 1899 this.size = 0; 1900 this.csize = 0; 1901 this.method = method; 1902 } 1903 1904 Entry(byte[] name, int type, boolean isdir, int method) { 1905 this(name, isdir, method); 1906 this.type = type; 1907 } 1908 1909 Entry (Entry e, int type) { 1910 name(e.name); 1911 this.isdir = e.isdir; 1912 this.version = e.version; 1913 this.ctime = e.ctime; 1914 this.atime = e.atime; 1915 this.mtime = e.mtime; 1916 this.crc = e.crc; 1917 this.size = e.size; 1918 this.csize = e.csize; 1919 this.method = e.method; 1920 this.extra = e.extra; 1921 /* 1922 this.versionMade = e.versionMade; 1923 this.disk = e.disk; 1924 this.attrs = e.attrs; 1925 this.attrsEx = e.attrsEx; 1926 */ 1927 this.locoff = e.locoff; 1928 this.comment = e.comment; 1929 this.type = type; 1930 } 1931 1932 Entry (byte[] name, Path file, int type) { 1933 this(name, type, false, METHOD_STORED); 1934 this.file = file; 1935 } 1936 1937 int version() throws ZipException { 1938 if (method == METHOD_DEFLATED) 1939 return 20; 1940 else if (method == METHOD_STORED) 1941 return 10; 1942 throw new ZipException("unsupported compression method"); 1943 } 1944 1945 ///////////////////// CEN ////////////////////// 1946 static Entry readCEN(ZipFileSystem zipfs, IndexNode inode) 1947 throws IOException 1948 { 1949 return new Entry().cen(zipfs, inode); 1950 } 1951 1952 private Entry cen(ZipFileSystem zipfs, IndexNode inode) 1953 throws IOException 1954 { 1955 byte[] cen = zipfs.cen; 1956 int pos = inode.pos; 1957 if (!cenSigAt(cen, pos)) 1958 zerror("invalid CEN header (bad signature)"); 1959 version = CENVER(cen, pos); 1960 flag = CENFLG(cen, pos); 1961 method = CENHOW(cen, pos); 1962 mtime = dosToJavaTime(CENTIM(cen, pos)); 1963 crc = CENCRC(cen, pos); 1964 csize = CENSIZ(cen, pos); 1965 size = CENLEN(cen, pos); 1966 int nlen = CENNAM(cen, pos); 1967 int elen = CENEXT(cen, pos); 1968 int clen = CENCOM(cen, pos); 1969 /* 1970 versionMade = CENVEM(cen, pos); 1971 disk = CENDSK(cen, pos); 1972 attrs = CENATT(cen, pos); 1973 attrsEx = CENATX(cen, pos); 1974 */ 1975 locoff = CENOFF(cen, pos); 1976 pos += CENHDR; 1977 this.name = inode.name; 1978 this.isdir = inode.isdir; 1979 this.hashcode = inode.hashcode; 1980 1981 pos += nlen; 1982 if (elen > 0) { 1983 extra = Arrays.copyOfRange(cen, pos, pos + elen); 1984 pos += elen; 1985 readExtra(zipfs); 1986 } 1987 if (clen > 0) { 1988 comment = Arrays.copyOfRange(cen, pos, pos + clen); 1989 } 1990 return this; 1991 } 1992 1993 int writeCEN(OutputStream os) throws IOException { 1994 int version0 = version(); 1995 long csize0 = csize; 1996 long size0 = size; 1997 long locoff0 = locoff; 1998 int elen64 = 0; // extra for ZIP64 1999 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 2000 int elenEXTT = 0; // extra for Extended Timestamp 2001 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 2002 2003 byte[] zname = isdir ? toDirectoryPath(name) : name; 2004 2005 // confirm size/length 2006 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 2007 int elen = (extra != null) ? extra.length : 0; 2008 int eoff = 0; 2009 int clen = (comment != null) ? comment.length : 0; 2010 if (csize >= ZIP64_MINVAL) { 2011 csize0 = ZIP64_MINVAL; 2012 elen64 += 8; // csize(8) 2013 } 2014 if (size >= ZIP64_MINVAL) { 2015 size0 = ZIP64_MINVAL; // size(8) 2016 elen64 += 8; 2017 } 2018 if (locoff >= ZIP64_MINVAL) { 2019 locoff0 = ZIP64_MINVAL; 2020 elen64 += 8; // offset(8) 2021 } 2022 if (elen64 != 0) { 2023 elen64 += 4; // header and data sz 4 bytes 2024 } 2025 while (eoff + 4 < elen) { 2026 int tag = SH(extra, eoff); 2027 int sz = SH(extra, eoff + 2); 2028 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2029 foundExtraTime = true; 2030 } 2031 eoff += (4 + sz); 2032 } 2033 if (!foundExtraTime) { 2034 if (isWindows) { // use NTFS 2035 elenNTFS = 36; // total 36 bytes 2036 } else { // Extended Timestamp otherwise 2037 elenEXTT = 9; // only mtime in cen 2038 } 2039 } 2040 writeInt(os, CENSIG); // CEN header signature 2041 if (elen64 != 0) { 2042 writeShort(os, 45); // ver 4.5 for zip64 2043 writeShort(os, 45); 2044 } else { 2045 writeShort(os, version0); // version made by 2046 writeShort(os, version0); // version needed to extract 2047 } 2048 writeShort(os, flag); // general purpose bit flag 2049 writeShort(os, method); // compression method 2050 // last modification time 2051 writeInt(os, (int)javaToDosTime(mtime)); 2052 writeInt(os, crc); // crc-32 2053 writeInt(os, csize0); // compressed size 2054 writeInt(os, size0); // uncompressed size 2055 writeShort(os, nlen); 2056 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2057 2058 if (comment != null) { 2059 writeShort(os, Math.min(clen, 0xffff)); 2060 } else { 2061 writeShort(os, 0); 2062 } 2063 writeShort(os, 0); // starting disk number 2064 writeShort(os, 0); // internal file attributes (unused) 2065 writeInt(os, 0); // external file attributes (unused) 2066 writeInt(os, locoff0); // relative offset of local header 2067 writeBytes(os, zname, 1, nlen); 2068 if (elen64 != 0) { 2069 writeShort(os, EXTID_ZIP64);// Zip64 extra 2070 writeShort(os, elen64 - 4); // size of "this" extra block 2071 if (size0 == ZIP64_MINVAL) 2072 writeLong(os, size); 2073 if (csize0 == ZIP64_MINVAL) 2074 writeLong(os, csize); 2075 if (locoff0 == ZIP64_MINVAL) 2076 writeLong(os, locoff); 2077 } 2078 if (elenNTFS != 0) { 2079 writeShort(os, EXTID_NTFS); 2080 writeShort(os, elenNTFS - 4); 2081 writeInt(os, 0); // reserved 2082 writeShort(os, 0x0001); // NTFS attr tag 2083 writeShort(os, 24); 2084 writeLong(os, javaToWinTime(mtime)); 2085 writeLong(os, javaToWinTime(atime)); 2086 writeLong(os, javaToWinTime(ctime)); 2087 } 2088 if (elenEXTT != 0) { 2089 writeShort(os, EXTID_EXTT); 2090 writeShort(os, elenEXTT - 4); 2091 if (ctime == -1) 2092 os.write(0x3); // mtime and atime 2093 else 2094 os.write(0x7); // mtime, atime and ctime 2095 writeInt(os, javaToUnixTime(mtime)); 2096 } 2097 if (extra != null) // whatever not recognized 2098 writeBytes(os, extra); 2099 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2100 writeBytes(os, comment); 2101 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2102 } 2103 2104 ///////////////////// LOC ////////////////////// 2105 2106 int writeLOC(OutputStream os) throws IOException { 2107 int version0 = version(); 2108 byte[] zname = isdir ? toDirectoryPath(name) : name; 2109 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2110 int elen = (extra != null) ? extra.length : 0; 2111 boolean foundExtraTime = false; // if extra timestamp present 2112 int eoff = 0; 2113 int elen64 = 0; 2114 int elenEXTT = 0; 2115 int elenNTFS = 0; 2116 writeInt(os, LOCSIG); // LOC header signature 2117 if ((flag & FLAG_DATADESCR) != 0) { 2118 writeShort(os, version0); // version needed to extract 2119 writeShort(os, flag); // general purpose bit flag 2120 writeShort(os, method); // compression method 2121 // last modification time 2122 writeInt(os, (int)javaToDosTime(mtime)); 2123 // store size, uncompressed size, and crc-32 in data descriptor 2124 // immediately following compressed entry data 2125 writeInt(os, 0); 2126 writeInt(os, 0); 2127 writeInt(os, 0); 2128 } else { 2129 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2130 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2131 writeShort(os, 45); // ver 4.5 for zip64 2132 } else { 2133 writeShort(os, version0); // version needed to extract 2134 } 2135 writeShort(os, flag); // general purpose bit flag 2136 writeShort(os, method); // compression method 2137 // last modification time 2138 writeInt(os, (int)javaToDosTime(mtime)); 2139 writeInt(os, crc); // crc-32 2140 if (elen64 != 0) { 2141 writeInt(os, ZIP64_MINVAL); 2142 writeInt(os, ZIP64_MINVAL); 2143 } else { 2144 writeInt(os, csize); // compressed size 2145 writeInt(os, size); // uncompressed size 2146 } 2147 } 2148 while (eoff + 4 < elen) { 2149 int tag = SH(extra, eoff); 2150 int sz = SH(extra, eoff + 2); 2151 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2152 foundExtraTime = true; 2153 } 2154 eoff += (4 + sz); 2155 } 2156 if (!foundExtraTime) { 2157 if (isWindows) { 2158 elenNTFS = 36; // NTFS, total 36 bytes 2159 } else { // on unix use "ext time" 2160 elenEXTT = 9; 2161 if (atime != -1) 2162 elenEXTT += 4; 2163 if (ctime != -1) 2164 elenEXTT += 4; 2165 } 2166 } 2167 writeShort(os, nlen); 2168 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2169 writeBytes(os, zname, 1, nlen); 2170 if (elen64 != 0) { 2171 writeShort(os, EXTID_ZIP64); 2172 writeShort(os, 16); 2173 writeLong(os, size); 2174 writeLong(os, csize); 2175 } 2176 if (elenNTFS != 0) { 2177 writeShort(os, EXTID_NTFS); 2178 writeShort(os, elenNTFS - 4); 2179 writeInt(os, 0); // reserved 2180 writeShort(os, 0x0001); // NTFS attr tag 2181 writeShort(os, 24); 2182 writeLong(os, javaToWinTime(mtime)); 2183 writeLong(os, javaToWinTime(atime)); 2184 writeLong(os, javaToWinTime(ctime)); 2185 } 2186 if (elenEXTT != 0) { 2187 writeShort(os, EXTID_EXTT); 2188 writeShort(os, elenEXTT - 4);// size for the folowing data block 2189 int fbyte = 0x1; 2190 if (atime != -1) // mtime and atime 2191 fbyte |= 0x2; 2192 if (ctime != -1) // mtime, atime and ctime 2193 fbyte |= 0x4; 2194 os.write(fbyte); // flags byte 2195 writeInt(os, javaToUnixTime(mtime)); 2196 if (atime != -1) 2197 writeInt(os, javaToUnixTime(atime)); 2198 if (ctime != -1) 2199 writeInt(os, javaToUnixTime(ctime)); 2200 } 2201 if (extra != null) { 2202 writeBytes(os, extra); 2203 } 2204 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2205 } 2206 2207 // Data Descriptior 2208 int writeEXT(OutputStream os) throws IOException { 2209 writeInt(os, EXTSIG); // EXT header signature 2210 writeInt(os, crc); // crc-32 2211 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2212 writeLong(os, csize); 2213 writeLong(os, size); 2214 return 24; 2215 } else { 2216 writeInt(os, csize); // compressed size 2217 writeInt(os, size); // uncompressed size 2218 return 16; 2219 } 2220 } 2221 2222 // read NTFS, UNIX and ZIP64 data from cen.extra 2223 void readExtra(ZipFileSystem zipfs) throws IOException { 2224 if (extra == null) 2225 return; 2226 int elen = extra.length; 2227 int off = 0; 2228 int newOff = 0; 2229 while (off + 4 < elen) { 2230 // extra spec: HeaderID+DataSize+Data 2231 int pos = off; 2232 int tag = SH(extra, pos); 2233 int sz = SH(extra, pos + 2); 2234 pos += 4; 2235 if (pos + sz > elen) // invalid data 2236 break; 2237 switch (tag) { 2238 case EXTID_ZIP64 : 2239 if (size == ZIP64_MINVAL) { 2240 if (pos + 8 > elen) // invalid zip64 extra 2241 break; // fields, just skip 2242 size = LL(extra, pos); 2243 pos += 8; 2244 } 2245 if (csize == ZIP64_MINVAL) { 2246 if (pos + 8 > elen) 2247 break; 2248 csize = LL(extra, pos); 2249 pos += 8; 2250 } 2251 if (locoff == ZIP64_MINVAL) { 2252 if (pos + 8 > elen) 2253 break; 2254 locoff = LL(extra, pos); 2255 pos += 8; 2256 } 2257 break; 2258 case EXTID_NTFS: 2259 if (sz < 32) 2260 break; 2261 pos += 4; // reserved 4 bytes 2262 if (SH(extra, pos) != 0x0001) 2263 break; 2264 if (SH(extra, pos + 2) != 24) 2265 break; 2266 // override the loc field, datatime here is 2267 // more "accurate" 2268 mtime = winToJavaTime(LL(extra, pos + 4)); 2269 atime = winToJavaTime(LL(extra, pos + 12)); 2270 ctime = winToJavaTime(LL(extra, pos + 20)); 2271 break; 2272 case EXTID_EXTT: 2273 // spec says the Extened timestamp in cen only has mtime 2274 // need to read the loc to get the extra a/ctime, if flag 2275 // "zipinfo-time" is not specified to false; 2276 // there is performance cost (move up to loc and read) to 2277 // access the loc table foreach entry; 2278 if (zipfs.noExtt) { 2279 if (sz == 5) 2280 mtime = unixToJavaTime(LG(extra, pos + 1)); 2281 break; 2282 } 2283 byte[] buf = new byte[LOCHDR]; 2284 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2285 != buf.length) 2286 throw new ZipException("loc: reading failed"); 2287 if (!locSigAt(buf, 0)) 2288 throw new ZipException("loc: wrong sig ->" 2289 + Long.toString(getSig(buf, 0), 16)); 2290 int locElen = LOCEXT(buf); 2291 if (locElen < 9) // EXTT is at lease 9 bytes 2292 break; 2293 int locNlen = LOCNAM(buf); 2294 buf = new byte[locElen]; 2295 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2296 != buf.length) 2297 throw new ZipException("loc extra: reading failed"); 2298 int locPos = 0; 2299 while (locPos + 4 < buf.length) { 2300 int locTag = SH(buf, locPos); 2301 int locSZ = SH(buf, locPos + 2); 2302 locPos += 4; 2303 if (locTag != EXTID_EXTT) { 2304 locPos += locSZ; 2305 continue; 2306 } 2307 int end = locPos + locSZ - 4; 2308 int flag = CH(buf, locPos++); 2309 if ((flag & 0x1) != 0 && locPos <= end) { 2310 mtime = unixToJavaTime(LG(buf, locPos)); 2311 locPos += 4; 2312 } 2313 if ((flag & 0x2) != 0 && locPos <= end) { 2314 atime = unixToJavaTime(LG(buf, locPos)); 2315 locPos += 4; 2316 } 2317 if ((flag & 0x4) != 0 && locPos <= end) { 2318 ctime = unixToJavaTime(LG(buf, locPos)); 2319 locPos += 4; 2320 } 2321 break; 2322 } 2323 break; 2324 default: // unknown tag 2325 System.arraycopy(extra, off, extra, newOff, sz + 4); 2326 newOff += (sz + 4); 2327 } 2328 off += (sz + 4); 2329 } 2330 if (newOff != 0 && newOff != extra.length) 2331 extra = Arrays.copyOf(extra, newOff); 2332 else 2333 extra = null; 2334 } 2335 2336 ///////// basic file attributes /////////// 2337 @Override 2338 public FileTime creationTime() { 2339 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 2340 } 2341 2342 @Override 2343 public boolean isDirectory() { 2344 return isDir(); 2345 } 2346 2347 @Override 2348 public boolean isOther() { 2349 return false; 2350 } 2351 2352 @Override 2353 public boolean isRegularFile() { 2354 return !isDir(); 2355 } 2356 2357 @Override 2358 public FileTime lastAccessTime() { 2359 return FileTime.fromMillis(atime == -1 ? mtime : atime); 2360 } 2361 2362 @Override 2363 public FileTime lastModifiedTime() { 2364 return FileTime.fromMillis(mtime); 2365 } 2366 2367 @Override 2368 public long size() { 2369 return size; 2370 } 2371 2372 @Override 2373 public boolean isSymbolicLink() { 2374 return false; 2375 } 2376 2377 @Override 2378 public Object fileKey() { 2379 return null; 2380 } 2381 2382 ///////// zip entry attributes /////////// 2383 public long compressedSize() { 2384 return csize; 2385 } 2386 2387 public long crc() { 2388 return crc; 2389 } 2390 2391 public int method() { 2392 return method; 2393 } 2394 2395 public byte[] extra() { 2396 if (extra != null) 2397 return Arrays.copyOf(extra, extra.length); 2398 return null; 2399 } 2400 2401 public byte[] comment() { 2402 if (comment != null) 2403 return Arrays.copyOf(comment, comment.length); 2404 return null; 2405 } 2406 2407 public String toString() { 2408 StringBuilder sb = new StringBuilder(1024); 2409 Formatter fm = new Formatter(sb); 2410 fm.format(" name : %s%n", new String(name)); 2411 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2412 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2413 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2414 fm.format(" isRegularFile : %b%n", isRegularFile()); 2415 fm.format(" isDirectory : %b%n", isDirectory()); 2416 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2417 fm.format(" isOther : %b%n", isOther()); 2418 fm.format(" fileKey : %s%n", fileKey()); 2419 fm.format(" size : %d%n", size()); 2420 fm.format(" compressedSize : %d%n", compressedSize()); 2421 fm.format(" crc : %x%n", crc()); 2422 fm.format(" method : %d%n", method()); 2423 fm.close(); 2424 return sb.toString(); 2425 } 2426 } 2427 2428 // ZIP directory has two issues: 2429 // (1) ZIP spec does not require the ZIP file to include 2430 // directory entry 2431 // (2) all entries are not stored/organized in a "tree" 2432 // structure. 2433 // A possible solution is to build the node tree ourself as 2434 // implemented below. 2435 2436 // default time stamp for pseudo entries 2437 private long zfsDefaultTimeStamp = System.currentTimeMillis(); 2438 2439 private void removeFromTree(IndexNode inode) { 2440 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2441 IndexNode child = parent.child; 2442 if (child.equals(inode)) { 2443 parent.child = child.sibling; 2444 } else { 2445 IndexNode last = child; 2446 while ((child = child.sibling) != null) { 2447 if (child.equals(inode)) { 2448 last.sibling = child.sibling; 2449 break; 2450 } else { 2451 last = child; 2452 } 2453 } 2454 } 2455 } 2456 2457 // purely for parent lookup, so we don't have to copy the parent 2458 // name every time 2459 static class ParentLookup extends IndexNode { 2460 int len; 2461 ParentLookup() {} 2462 2463 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 2464 name(name, len); 2465 return this; 2466 } 2467 2468 void name(byte[] name, int len) { 2469 this.name = name; 2470 this.len = len; 2471 // calculate the hashcode the same way as Arrays.hashCode() does 2472 int result = 1; 2473 for (int i = 0; i < len; i++) 2474 result = 31 * result + name[i]; 2475 this.hashcode = result; 2476 } 2477 2478 @Override 2479 public boolean equals(Object other) { 2480 if (!(other instanceof IndexNode)) { 2481 return false; 2482 } 2483 byte[] oname = ((IndexNode)other).name; 2484 return Arrays.equals(name, 0, len, 2485 oname, 0, oname.length); 2486 } 2487 2488 } 2489 2490 private void buildNodeTree() throws IOException { 2491 beginWrite(); 2492 try { 2493 IndexNode root = inodes.get(LOOKUPKEY.as(ROOTPATH)); 2494 if (root == null) { 2495 root = new IndexNode(ROOTPATH, true); 2496 } else { 2497 inodes.remove(root); 2498 } 2499 IndexNode[] nodes = inodes.keySet().toArray(new IndexNode[0]); 2500 inodes.put(root, root); 2501 ParentLookup lookup = new ParentLookup(); 2502 for (IndexNode node : nodes) { 2503 IndexNode parent; 2504 while (true) { 2505 int off = getParentOff(node.name); 2506 if (off <= 1) { // parent is root 2507 node.sibling = root.child; 2508 root.child = node; 2509 break; 2510 } 2511 lookup = lookup.as(node.name, off); 2512 if (inodes.containsKey(lookup)) { 2513 parent = inodes.get(lookup); 2514 node.sibling = parent.child; 2515 parent.child = node; 2516 break; 2517 } 2518 // add new pseudo directory entry 2519 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 2520 inodes.put(parent, parent); 2521 node.sibling = parent.child; 2522 parent.child = node; 2523 node = parent; 2524 } 2525 } 2526 } finally { 2527 endWrite(); 2528 } 2529 } 2530 }