1 /* 2 * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import static java.lang.Boolean.TRUE; 29 import static jdk.nio.zipfs.ZipConstants.*; 30 import static jdk.nio.zipfs.ZipUtils.*; 31 import static java.nio.file.StandardOpenOption.*; 32 import static java.nio.file.StandardCopyOption.*; 33 34 import java.io.BufferedOutputStream; 35 import java.io.ByteArrayInputStream; 36 import java.io.ByteArrayOutputStream; 37 import java.io.EOFException; 38 import java.io.FilterOutputStream; 39 import java.io.IOException; 40 import java.io.InputStream; 41 import java.io.OutputStream; 42 import java.nio.ByteBuffer; 43 import java.nio.MappedByteBuffer; 44 import java.nio.channels.FileChannel; 45 import java.nio.channels.FileLock; 46 import java.nio.channels.ReadableByteChannel; 47 import java.nio.channels.SeekableByteChannel; 48 import java.nio.channels.WritableByteChannel; 49 import java.nio.file.*; 50 import java.nio.file.attribute.FileAttribute; 51 import java.nio.file.attribute.FileTime; 52 import java.nio.file.attribute.UserPrincipalLookupService; 53 import java.nio.file.spi.FileSystemProvider; 54 import java.security.AccessController; 55 import java.security.PrivilegedAction; 56 import java.security.PrivilegedActionException; 57 import java.security.PrivilegedExceptionAction; 58 import java.util.ArrayList; 59 import java.util.Arrays; 60 import java.util.Collections; 61 import java.util.Formatter; 62 import java.util.HashSet; 63 import java.util.Iterator; 64 import java.util.LinkedHashMap; 65 import java.util.List; 66 import java.util.Map; 67 import java.util.Objects; 68 import java.util.Set; 69 import java.util.concurrent.locks.ReadWriteLock; 70 import java.util.concurrent.locks.ReentrantReadWriteLock; 71 import java.util.regex.Pattern; 72 import java.util.zip.CRC32; 73 import java.util.zip.Deflater; 74 import java.util.zip.DeflaterOutputStream; 75 import java.util.zip.Inflater; 76 import java.util.zip.InflaterInputStream; 77 import java.util.zip.ZipException; 78 79 /** 80 * A FileSystem built on a zip file 81 * 82 * @author Xueming Shen 83 */ 84 class ZipFileSystem extends FileSystem { 85 private final ZipFileSystemProvider provider; 86 private final Path zfpath; 87 final ZipCoder zc; 88 private final ZipPath rootdir; 89 private boolean readOnly = false; // readonly file system 90 91 // configurable by env map 92 private final boolean noExtt; // see readExtra() 93 private final boolean useTempFile; // use a temp file for newOS, default 94 // is to use BAOS for better performance 95 private static final boolean isWindows = AccessController.doPrivileged( 96 (PrivilegedAction<Boolean>)() -> System.getProperty("os.name") 97 .startsWith("Windows")); 98 private final boolean forceEnd64; 99 private final int defaultMethod; // METHOD_STORED if "noCompression=true" 100 // METHOD_DEFLATED otherwise 101 102 ZipFileSystem(ZipFileSystemProvider provider, 103 Path zfpath, 104 Map<String, ?> env) throws IOException 105 { 106 // default encoding for name/comment 107 String nameEncoding = env.containsKey("encoding") ? 108 (String)env.get("encoding") : "UTF-8"; 109 this.noExtt = "false".equals(env.get("zipinfo-time")); 110 this.useTempFile = isTrue(env, "useTempFile"); 111 this.forceEnd64 = isTrue(env, "forceZIP64End"); 112 this.defaultMethod = isTrue(env, "noCompression") ? METHOD_STORED: METHOD_DEFLATED; 113 if (Files.notExists(zfpath)) { 114 // create a new zip if not exists 115 if (isTrue(env, "create")) { 116 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 117 new END().write(os, 0, forceEnd64); 118 } 119 } else { 120 throw new FileSystemNotFoundException(zfpath.toString()); 121 } 122 } 123 // sm and existence check 124 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 125 boolean writeable = AccessController.doPrivileged( 126 (PrivilegedAction<Boolean>) () -> Files.isWritable(zfpath)); 127 this.readOnly = !writeable; 128 this.zc = ZipCoder.get(nameEncoding); 129 this.rootdir = new ZipPath(this, new byte[]{'/'}); 130 this.ch = Files.newByteChannel(zfpath, READ); 131 try { 132 this.cen = initCEN(); 133 } catch (IOException x) { 134 try { 135 this.ch.close(); 136 } catch (IOException xx) { 137 x.addSuppressed(xx); 138 } 139 throw x; 140 } 141 this.provider = provider; 142 this.zfpath = zfpath; 143 } 144 145 // returns true if there is a name=true/"true" setting in env 146 private static boolean isTrue(Map<String, ?> env, String name) { 147 return "true".equals(env.get(name)) || TRUE.equals(env.get(name)); 148 } 149 150 @Override 151 public FileSystemProvider provider() { 152 return provider; 153 } 154 155 @Override 156 public String getSeparator() { 157 return "/"; 158 } 159 160 @Override 161 public boolean isOpen() { 162 return isOpen; 163 } 164 165 @Override 166 public boolean isReadOnly() { 167 return readOnly; 168 } 169 170 private void checkWritable() throws IOException { 171 if (readOnly) 172 throw new ReadOnlyFileSystemException(); 173 } 174 175 void setReadOnly() { 176 this.readOnly = true; 177 } 178 179 @Override 180 public Iterable<Path> getRootDirectories() { 181 return List.of(rootdir); 182 } 183 184 ZipPath getRootDir() { 185 return rootdir; 186 } 187 188 @Override 189 public ZipPath getPath(String first, String... more) { 190 if (more.length == 0) { 191 return new ZipPath(this, first); 192 } 193 StringBuilder sb = new StringBuilder(); 194 sb.append(first); 195 for (String path : more) { 196 if (path.length() > 0) { 197 if (sb.length() > 0) { 198 sb.append('/'); 199 } 200 sb.append(path); 201 } 202 } 203 return new ZipPath(this, sb.toString()); 204 } 205 206 @Override 207 public UserPrincipalLookupService getUserPrincipalLookupService() { 208 throw new UnsupportedOperationException(); 209 } 210 211 @Override 212 public WatchService newWatchService() { 213 throw new UnsupportedOperationException(); 214 } 215 216 FileStore getFileStore(ZipPath path) { 217 return new ZipFileStore(path); 218 } 219 220 @Override 221 public Iterable<FileStore> getFileStores() { 222 return List.of(new ZipFileStore(rootdir)); 223 } 224 225 private static final Set<String> supportedFileAttributeViews = 226 Set.of("basic", "zip"); 227 228 @Override 229 public Set<String> supportedFileAttributeViews() { 230 return supportedFileAttributeViews; 231 } 232 233 @Override 234 public String toString() { 235 return zfpath.toString(); 236 } 237 238 Path getZipFile() { 239 return zfpath; 240 } 241 242 private static final String GLOB_SYNTAX = "glob"; 243 private static final String REGEX_SYNTAX = "regex"; 244 245 @Override 246 public PathMatcher getPathMatcher(String syntaxAndInput) { 247 int pos = syntaxAndInput.indexOf(':'); 248 if (pos <= 0 || pos == syntaxAndInput.length()) { 249 throw new IllegalArgumentException(); 250 } 251 String syntax = syntaxAndInput.substring(0, pos); 252 String input = syntaxAndInput.substring(pos + 1); 253 String expr; 254 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 255 expr = toRegexPattern(input); 256 } else { 257 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 258 expr = input; 259 } else { 260 throw new UnsupportedOperationException("Syntax '" + syntax + 261 "' not recognized"); 262 } 263 } 264 // return matcher 265 final Pattern pattern = Pattern.compile(expr); 266 return new PathMatcher() { 267 @Override 268 public boolean matches(Path path) { 269 return pattern.matcher(path.toString()).matches(); 270 } 271 }; 272 } 273 274 @Override 275 public void close() throws IOException { 276 beginWrite(); 277 try { 278 if (!isOpen) 279 return; 280 isOpen = false; // set closed 281 } finally { 282 endWrite(); 283 } 284 if (!streams.isEmpty()) { // unlock and close all remaining streams 285 Set<InputStream> copy = new HashSet<>(streams); 286 for (InputStream is : copy) 287 is.close(); 288 } 289 beginWrite(); // lock and sync 290 try { 291 AccessController.doPrivileged((PrivilegedExceptionAction<Void>)() -> { 292 sync(); return null; 293 }); 294 ch.close(); // close the ch just in case no update 295 // and sync didn't close the ch 296 } catch (PrivilegedActionException e) { 297 throw (IOException)e.getException(); 298 } finally { 299 endWrite(); 300 } 301 302 synchronized (inflaters) { 303 for (Inflater inf : inflaters) 304 inf.end(); 305 } 306 synchronized (deflaters) { 307 for (Deflater def : deflaters) 308 def.end(); 309 } 310 311 IOException ioe = null; 312 synchronized (tmppaths) { 313 for (Path p : tmppaths) { 314 try { 315 AccessController.doPrivileged( 316 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 317 } catch (PrivilegedActionException e) { 318 IOException x = (IOException)e.getException(); 319 if (ioe == null) 320 ioe = x; 321 else 322 ioe.addSuppressed(x); 323 } 324 } 325 } 326 provider.removeFileSystem(zfpath, this); 327 if (ioe != null) 328 throw ioe; 329 } 330 331 ZipFileAttributes getFileAttributes(byte[] path) 332 throws IOException 333 { 334 Entry e; 335 beginRead(); 336 try { 337 ensureOpen(); 338 e = getEntry(path); 339 if (e == null) { 340 IndexNode inode = getInode(path); 341 if (inode == null) 342 return null; 343 // pseudo directory, uses METHOD_STORED 344 e = new Entry(inode.name, inode.isdir, METHOD_STORED); 345 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 346 } 347 } finally { 348 endRead(); 349 } 350 return e; 351 } 352 353 void checkAccess(byte[] path) throws IOException { 354 beginRead(); 355 try { 356 ensureOpen(); 357 // is it necessary to readCEN as a sanity check? 358 if (getInode(path) == null) { 359 throw new NoSuchFileException(toString()); 360 } 361 362 } finally { 363 endRead(); 364 } 365 } 366 367 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 368 throws IOException 369 { 370 checkWritable(); 371 beginWrite(); 372 try { 373 ensureOpen(); 374 Entry e = getEntry(path); // ensureOpen checked 375 if (e == null) 376 throw new NoSuchFileException(getString(path)); 377 if (e.type == Entry.CEN) 378 e.type = Entry.COPY; // copy e 379 if (mtime != null) 380 e.mtime = mtime.toMillis(); 381 if (atime != null) 382 e.atime = atime.toMillis(); 383 if (ctime != null) 384 e.ctime = ctime.toMillis(); 385 update(e); 386 } finally { 387 endWrite(); 388 } 389 } 390 391 boolean exists(byte[] path) 392 throws IOException 393 { 394 beginRead(); 395 try { 396 ensureOpen(); 397 return getInode(path) != null; 398 } finally { 399 endRead(); 400 } 401 } 402 403 boolean isDirectory(byte[] path) 404 throws IOException 405 { 406 beginRead(); 407 try { 408 IndexNode n = getInode(path); 409 return n != null && n.isDir(); 410 } finally { 411 endRead(); 412 } 413 } 414 415 // returns the list of child paths of "path" 416 Iterator<Path> iteratorOf(ZipPath dir, 417 DirectoryStream.Filter<? super Path> filter) 418 throws IOException 419 { 420 beginWrite(); // iteration of inodes needs exclusive lock 421 try { 422 ensureOpen(); 423 byte[] path = dir.getResolvedPath(); 424 IndexNode inode = getInode(path); 425 if (inode == null) 426 throw new NotDirectoryException(getString(path)); 427 List<Path> list = new ArrayList<>(); 428 IndexNode child = inode.child; 429 while (child != null) { 430 // (1) assume all path from zip file itself is "normalized" 431 // (2) IndexNode.name is absolute. see IndexNode(byte[],int,int) 432 // (3) if parent "dir" is relative when ZipDirectoryStream 433 // is created, the returned child path needs to be relative 434 // as well. 435 byte[] cname = child.name; 436 if (!dir.isAbsolute()) { 437 cname = Arrays.copyOfRange(cname, 1, cname.length); 438 } 439 ZipPath zpath = new ZipPath(this, cname, true); 440 if (filter == null || filter.accept(zpath)) 441 list.add(zpath); 442 child = child.sibling; 443 } 444 return list.iterator(); 445 } finally { 446 endWrite(); 447 } 448 } 449 450 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 451 throws IOException 452 { 453 checkWritable(); 454 // dir = toDirectoryPath(dir); 455 beginWrite(); 456 try { 457 ensureOpen(); 458 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 459 throw new FileAlreadyExistsException(getString(dir)); 460 checkParents(dir); 461 Entry e = new Entry(dir, Entry.NEW, true, METHOD_STORED); 462 update(e); 463 } finally { 464 endWrite(); 465 } 466 } 467 468 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 469 throws IOException 470 { 471 checkWritable(); 472 if (Arrays.equals(src, dst)) 473 return; // do nothing, src and dst are the same 474 475 beginWrite(); 476 try { 477 ensureOpen(); 478 Entry eSrc = getEntry(src); // ensureOpen checked 479 480 if (eSrc == null) 481 throw new NoSuchFileException(getString(src)); 482 if (eSrc.isDir()) { // spec says to create dst dir 483 createDirectory(dst); 484 return; 485 } 486 boolean hasReplace = false; 487 boolean hasCopyAttrs = false; 488 for (CopyOption opt : options) { 489 if (opt == REPLACE_EXISTING) 490 hasReplace = true; 491 else if (opt == COPY_ATTRIBUTES) 492 hasCopyAttrs = true; 493 } 494 Entry eDst = getEntry(dst); 495 if (eDst != null) { 496 if (!hasReplace) 497 throw new FileAlreadyExistsException(getString(dst)); 498 } else { 499 checkParents(dst); 500 } 501 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 502 u.name(dst); // change name 503 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 504 { 505 u.type = eSrc.type; // make it the same type 506 if (deletesrc) { // if it's a "rename", take the data 507 u.bytes = eSrc.bytes; 508 u.file = eSrc.file; 509 } else { // if it's not "rename", copy the data 510 if (eSrc.bytes != null) 511 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 512 else if (eSrc.file != null) { 513 u.file = getTempPathForEntry(null); 514 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 515 } 516 } 517 } 518 if (!hasCopyAttrs) 519 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 520 update(u); 521 if (deletesrc) 522 updateDelete(eSrc); 523 } finally { 524 endWrite(); 525 } 526 } 527 528 // Returns an output stream for writing the contents into the specified 529 // entry. 530 OutputStream newOutputStream(byte[] path, OpenOption... options) 531 throws IOException 532 { 533 checkWritable(); 534 boolean hasCreateNew = false; 535 boolean hasCreate = false; 536 boolean hasAppend = false; 537 boolean hasTruncate = false; 538 for (OpenOption opt : options) { 539 if (opt == READ) 540 throw new IllegalArgumentException("READ not allowed"); 541 if (opt == CREATE_NEW) 542 hasCreateNew = true; 543 if (opt == CREATE) 544 hasCreate = true; 545 if (opt == APPEND) 546 hasAppend = true; 547 if (opt == TRUNCATE_EXISTING) 548 hasTruncate = true; 549 } 550 if (hasAppend && hasTruncate) 551 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 552 beginRead(); // only need a readlock, the "update()" will 553 try { // try to obtain a writelock when the os is 554 ensureOpen(); // being closed. 555 Entry e = getEntry(path); 556 if (e != null) { 557 if (e.isDir() || hasCreateNew) 558 throw new FileAlreadyExistsException(getString(path)); 559 if (hasAppend) { 560 InputStream is = getInputStream(e); 561 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 562 is.transferTo(os); 563 is.close(); 564 return os; 565 } 566 return getOutputStream(new Entry(e, Entry.NEW)); 567 } else { 568 if (!hasCreate && !hasCreateNew) 569 throw new NoSuchFileException(getString(path)); 570 checkParents(path); 571 return getOutputStream(new Entry(path, Entry.NEW, false, defaultMethod)); 572 } 573 } finally { 574 endRead(); 575 } 576 } 577 578 // Returns an input stream for reading the contents of the specified 579 // file entry. 580 InputStream newInputStream(byte[] path) throws IOException { 581 beginRead(); 582 try { 583 ensureOpen(); 584 Entry e = getEntry(path); 585 if (e == null) 586 throw new NoSuchFileException(getString(path)); 587 if (e.isDir()) 588 throw new FileSystemException(getString(path), "is a directory", null); 589 return getInputStream(e); 590 } finally { 591 endRead(); 592 } 593 } 594 595 private void checkOptions(Set<? extends OpenOption> options) { 596 // check for options of null type and option is an intance of StandardOpenOption 597 for (OpenOption option : options) { 598 if (option == null) 599 throw new NullPointerException(); 600 if (!(option instanceof StandardOpenOption)) 601 throw new IllegalArgumentException(); 602 } 603 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 604 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 605 } 606 607 608 // Returns an output SeekableByteChannel for either 609 // (1) writing the contents of a new entry, if the entry doesn't exit, or 610 // (2) updating/replacing the contents of an existing entry. 611 // Note: The content is not compressed. 612 private class EntryOutputChannel extends ByteArrayChannel { 613 Entry e; 614 615 EntryOutputChannel(Entry e) throws IOException { 616 super(e.size > 0? (int)e.size : 8192, false); 617 this.e = e; 618 if (e.mtime == -1) 619 e.mtime = System.currentTimeMillis(); 620 if (e.method == -1) 621 e.method = defaultMethod; 622 // store size, compressed size, and crc-32 in datadescriptor 623 e.flag = FLAG_DATADESCR; 624 if (zc.isUTF8()) 625 e.flag |= FLAG_USE_UTF8; 626 } 627 628 @Override 629 public void close() throws IOException { 630 e.bytes = toByteArray(); 631 e.size = e.bytes.length; 632 e.crc = -1; 633 super.close(); 634 update(e); 635 } 636 } 637 638 private int getCompressMethod(FileAttribute<?>... attrs) { 639 return defaultMethod; 640 } 641 642 // Returns a Writable/ReadByteChannel for now. Might consdier to use 643 // newFileChannel() instead, which dump the entry data into a regular 644 // file on the default file system and create a FileChannel on top of 645 // it. 646 SeekableByteChannel newByteChannel(byte[] path, 647 Set<? extends OpenOption> options, 648 FileAttribute<?>... attrs) 649 throws IOException 650 { 651 checkOptions(options); 652 if (options.contains(StandardOpenOption.WRITE) || 653 options.contains(StandardOpenOption.APPEND)) { 654 checkWritable(); 655 beginRead(); // only need a readlock, the "update()" will obtain 656 // thewritelock when the channel is closed 657 try { 658 ensureOpen(); 659 Entry e = getEntry(path); 660 if (e != null) { 661 if (e.isDir() || options.contains(CREATE_NEW)) 662 throw new FileAlreadyExistsException(getString(path)); 663 SeekableByteChannel sbc = 664 new EntryOutputChannel(new Entry(e, Entry.NEW)); 665 if (options.contains(APPEND)) { 666 try (InputStream is = getInputStream(e)) { // copyover 667 byte[] buf = new byte[8192]; 668 ByteBuffer bb = ByteBuffer.wrap(buf); 669 int n; 670 while ((n = is.read(buf)) != -1) { 671 bb.position(0); 672 bb.limit(n); 673 sbc.write(bb); 674 } 675 } 676 } 677 return sbc; 678 } 679 if (!options.contains(CREATE) && !options.contains(CREATE_NEW)) 680 throw new NoSuchFileException(getString(path)); 681 checkParents(path); 682 return new EntryOutputChannel( 683 new Entry(path, Entry.NEW, false, getCompressMethod(attrs))); 684 685 } finally { 686 endRead(); 687 } 688 } else { 689 beginRead(); 690 try { 691 ensureOpen(); 692 Entry e = getEntry(path); 693 if (e == null || e.isDir()) 694 throw new NoSuchFileException(getString(path)); 695 try (InputStream is = getInputStream(e)) { 696 // TBD: if (e.size < NNNNN); 697 return new ByteArrayChannel(is.readAllBytes(), true); 698 } 699 } finally { 700 endRead(); 701 } 702 } 703 } 704 705 // Returns a FileChannel of the specified entry. 706 // 707 // This implementation creates a temporary file on the default file system, 708 // copy the entry data into it if the entry exists, and then create a 709 // FileChannel on top of it. 710 FileChannel newFileChannel(byte[] path, 711 Set<? extends OpenOption> options, 712 FileAttribute<?>... attrs) 713 throws IOException 714 { 715 checkOptions(options); 716 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 717 options.contains(StandardOpenOption.APPEND)); 718 beginRead(); 719 try { 720 ensureOpen(); 721 Entry e = getEntry(path); 722 if (forWrite) { 723 checkWritable(); 724 if (e == null) { 725 if (!options.contains(StandardOpenOption.CREATE) && 726 !options.contains(StandardOpenOption.CREATE_NEW)) { 727 throw new NoSuchFileException(getString(path)); 728 } 729 } else { 730 if (options.contains(StandardOpenOption.CREATE_NEW)) { 731 throw new FileAlreadyExistsException(getString(path)); 732 } 733 if (e.isDir()) 734 throw new FileAlreadyExistsException("directory <" 735 + getString(path) + "> exists"); 736 } 737 options = new HashSet<>(options); 738 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 739 } else if (e == null || e.isDir()) { 740 throw new NoSuchFileException(getString(path)); 741 } 742 743 final boolean isFCH = (e != null && e.type == Entry.FILECH); 744 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 745 final FileChannel fch = tmpfile.getFileSystem() 746 .provider() 747 .newFileChannel(tmpfile, options, attrs); 748 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH); 749 if (forWrite) { 750 u.flag = FLAG_DATADESCR; 751 u.method = getCompressMethod(attrs); 752 } 753 // is there a better way to hook into the FileChannel's close method? 754 return new FileChannel() { 755 public int write(ByteBuffer src) throws IOException { 756 return fch.write(src); 757 } 758 public long write(ByteBuffer[] srcs, int offset, int length) 759 throws IOException 760 { 761 return fch.write(srcs, offset, length); 762 } 763 public long position() throws IOException { 764 return fch.position(); 765 } 766 public FileChannel position(long newPosition) 767 throws IOException 768 { 769 fch.position(newPosition); 770 return this; 771 } 772 public long size() throws IOException { 773 return fch.size(); 774 } 775 public FileChannel truncate(long size) 776 throws IOException 777 { 778 fch.truncate(size); 779 return this; 780 } 781 public void force(boolean metaData) 782 throws IOException 783 { 784 fch.force(metaData); 785 } 786 public long transferTo(long position, long count, 787 WritableByteChannel target) 788 throws IOException 789 { 790 return fch.transferTo(position, count, target); 791 } 792 public long transferFrom(ReadableByteChannel src, 793 long position, long count) 794 throws IOException 795 { 796 return fch.transferFrom(src, position, count); 797 } 798 public int read(ByteBuffer dst) throws IOException { 799 return fch.read(dst); 800 } 801 public int read(ByteBuffer dst, long position) 802 throws IOException 803 { 804 return fch.read(dst, position); 805 } 806 public long read(ByteBuffer[] dsts, int offset, int length) 807 throws IOException 808 { 809 return fch.read(dsts, offset, length); 810 } 811 public int write(ByteBuffer src, long position) 812 throws IOException 813 { 814 return fch.write(src, position); 815 } 816 public MappedByteBuffer map(MapMode mode, 817 long position, long size) 818 throws IOException 819 { 820 throw new UnsupportedOperationException(); 821 } 822 public FileLock lock(long position, long size, boolean shared) 823 throws IOException 824 { 825 return fch.lock(position, size, shared); 826 } 827 public FileLock tryLock(long position, long size, boolean shared) 828 throws IOException 829 { 830 return fch.tryLock(position, size, shared); 831 } 832 protected void implCloseChannel() throws IOException { 833 fch.close(); 834 if (forWrite) { 835 u.mtime = System.currentTimeMillis(); 836 u.size = Files.size(u.file); 837 838 update(u); 839 } else { 840 if (!isFCH) // if this is a new fch for reading 841 removeTempPathForEntry(tmpfile); 842 } 843 } 844 }; 845 } finally { 846 endRead(); 847 } 848 } 849 850 // the outstanding input streams that need to be closed 851 private Set<InputStream> streams = 852 Collections.synchronizedSet(new HashSet<InputStream>()); 853 854 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 855 private Path getTempPathForEntry(byte[] path) throws IOException { 856 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 857 if (path != null) { 858 Entry e = getEntry(path); 859 if (e != null) { 860 try (InputStream is = newInputStream(path)) { 861 Files.copy(is, tmpPath, REPLACE_EXISTING); 862 } 863 } 864 } 865 return tmpPath; 866 } 867 868 private void removeTempPathForEntry(Path path) throws IOException { 869 Files.delete(path); 870 tmppaths.remove(path); 871 } 872 873 // check if all parents really exit. ZIP spec does not require 874 // the existence of any "parent directory". 875 private void checkParents(byte[] path) throws IOException { 876 beginRead(); 877 try { 878 while ((path = getParent(path)) != null && 879 path != ROOTPATH) { 880 if (!inodes.containsKey(IndexNode.keyOf(path))) { 881 throw new NoSuchFileException(getString(path)); 882 } 883 } 884 } finally { 885 endRead(); 886 } 887 } 888 889 private static byte[] ROOTPATH = new byte[] { '/' }; 890 private static byte[] getParent(byte[] path) { 891 int off = getParentOff(path); 892 if (off <= 1) 893 return ROOTPATH; 894 return Arrays.copyOf(path, off); 895 } 896 897 private static int getParentOff(byte[] path) { 898 int off = path.length - 1; 899 if (off > 0 && path[off] == '/') // isDirectory 900 off--; 901 while (off > 0 && path[off] != '/') { off--; } 902 return off; 903 } 904 905 private final void beginWrite() { 906 rwlock.writeLock().lock(); 907 } 908 909 private final void endWrite() { 910 rwlock.writeLock().unlock(); 911 } 912 913 private final void beginRead() { 914 rwlock.readLock().lock(); 915 } 916 917 private final void endRead() { 918 rwlock.readLock().unlock(); 919 } 920 921 /////////////////////////////////////////////////////////////////// 922 923 private volatile boolean isOpen = true; 924 private final SeekableByteChannel ch; // channel to the zipfile 925 final byte[] cen; // CEN & ENDHDR 926 private END end; 927 private long locpos; // position of first LOC header (usually 0) 928 929 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 930 931 // name -> pos (in cen), IndexNode itself can be used as a "key" 932 private LinkedHashMap<IndexNode, IndexNode> inodes; 933 934 final byte[] getBytes(String name) { 935 return zc.getBytes(name); 936 } 937 938 final String getString(byte[] name) { 939 return zc.toString(name); 940 } 941 942 @SuppressWarnings("deprecation") 943 protected void finalize() throws IOException { 944 close(); 945 } 946 947 // Reads len bytes of data from the specified offset into buf. 948 // Returns the total number of bytes read. 949 // Each/every byte read from here (except the cen, which is mapped). 950 final long readFullyAt(byte[] buf, int off, long len, long pos) 951 throws IOException 952 { 953 ByteBuffer bb = ByteBuffer.wrap(buf); 954 bb.position(off); 955 bb.limit((int)(off + len)); 956 return readFullyAt(bb, pos); 957 } 958 959 private final long readFullyAt(ByteBuffer bb, long pos) 960 throws IOException 961 { 962 synchronized(ch) { 963 return ch.position(pos).read(bb); 964 } 965 } 966 967 // Searches for end of central directory (END) header. The contents of 968 // the END header will be read and placed in endbuf. Returns the file 969 // position of the END header, otherwise returns -1 if the END header 970 // was not found or an error occurred. 971 private END findEND() throws IOException 972 { 973 byte[] buf = new byte[READBLOCKSZ]; 974 long ziplen = ch.size(); 975 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 976 long minPos = minHDR - (buf.length - ENDHDR); 977 978 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 979 { 980 int off = 0; 981 if (pos < 0) { 982 // Pretend there are some NUL bytes before start of file 983 off = (int)-pos; 984 Arrays.fill(buf, 0, off, (byte)0); 985 } 986 int len = buf.length - off; 987 if (readFullyAt(buf, off, len, pos + off) != len) 988 zerror("zip END header not found"); 989 990 // Now scan the block backwards for END header signature 991 for (int i = buf.length - ENDHDR; i >= 0; i--) { 992 if (buf[i+0] == (byte)'P' && 993 buf[i+1] == (byte)'K' && 994 buf[i+2] == (byte)'\005' && 995 buf[i+3] == (byte)'\006' && 996 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 997 // Found END header 998 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 999 END end = new END(); 1000 end.endsub = ENDSUB(buf); 1001 end.centot = ENDTOT(buf); 1002 end.cenlen = ENDSIZ(buf); 1003 end.cenoff = ENDOFF(buf); 1004 end.comlen = ENDCOM(buf); 1005 end.endpos = pos + i; 1006 // try if there is zip64 end; 1007 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1008 if (end.endpos < ZIP64_LOCHDR || 1009 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1010 != loc64.length || 1011 !locator64SigAt(loc64, 0)) { 1012 return end; 1013 } 1014 long end64pos = ZIP64_LOCOFF(loc64); 1015 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1016 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1017 != end64buf.length || 1018 !end64SigAt(end64buf, 0)) { 1019 return end; 1020 } 1021 // end64 found, 1022 long cenlen64 = ZIP64_ENDSIZ(end64buf); 1023 long cenoff64 = ZIP64_ENDOFF(end64buf); 1024 long centot64 = ZIP64_ENDTOT(end64buf); 1025 // double-check 1026 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1027 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1028 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1029 return end; 1030 } 1031 // to use the end64 values 1032 end.cenlen = cenlen64; 1033 end.cenoff = cenoff64; 1034 end.centot = (int)centot64; // assume total < 2g 1035 end.endpos = end64pos; 1036 return end; 1037 } 1038 } 1039 } 1040 zerror("zip END header not found"); 1041 return null; //make compiler happy 1042 } 1043 1044 // Reads zip file central directory. Returns the file position of first 1045 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1046 // then the error was a zip format error and zip->msg has the error text. 1047 // Always pass in -1 for knownTotal; it's used for a recursive call. 1048 private byte[] initCEN() throws IOException { 1049 end = findEND(); 1050 if (end.endpos == 0) { 1051 inodes = new LinkedHashMap<>(10); 1052 locpos = 0; 1053 buildNodeTree(); 1054 return null; // only END header present 1055 } 1056 if (end.cenlen > end.endpos) 1057 zerror("invalid END header (bad central directory size)"); 1058 long cenpos = end.endpos - end.cenlen; // position of CEN table 1059 1060 // Get position of first local file (LOC) header, taking into 1061 // account that there may be a stub prefixed to the zip file. 1062 locpos = cenpos - end.cenoff; 1063 if (locpos < 0) 1064 zerror("invalid END header (bad central directory offset)"); 1065 1066 // read in the CEN and END 1067 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1068 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1069 zerror("read CEN tables failed"); 1070 } 1071 // Iterate through the entries in the central directory 1072 inodes = new LinkedHashMap<>(end.centot + 1); 1073 int pos = 0; 1074 int limit = cen.length - ENDHDR; 1075 while (pos < limit) { 1076 if (!cenSigAt(cen, pos)) 1077 zerror("invalid CEN header (bad signature)"); 1078 int method = CENHOW(cen, pos); 1079 int nlen = CENNAM(cen, pos); 1080 int elen = CENEXT(cen, pos); 1081 int clen = CENCOM(cen, pos); 1082 if ((CENFLG(cen, pos) & 1) != 0) { 1083 zerror("invalid CEN header (encrypted entry)"); 1084 } 1085 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1086 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1087 } 1088 if (pos + CENHDR + nlen > limit) { 1089 zerror("invalid CEN header (bad header size)"); 1090 } 1091 IndexNode inode = new IndexNode(cen, pos, nlen); 1092 inodes.put(inode, inode); 1093 1094 // skip ext and comment 1095 pos += (CENHDR + nlen + elen + clen); 1096 } 1097 if (pos + ENDHDR != cen.length) { 1098 zerror("invalid CEN header (bad header size)"); 1099 } 1100 buildNodeTree(); 1101 return cen; 1102 } 1103 1104 private void ensureOpen() throws IOException { 1105 if (!isOpen) 1106 throw new ClosedFileSystemException(); 1107 } 1108 1109 // Creates a new empty temporary file in the same directory as the 1110 // specified file. A variant of Files.createTempFile. 1111 private Path createTempFileInSameDirectoryAs(Path path) 1112 throws IOException 1113 { 1114 Path parent = path.toAbsolutePath().getParent(); 1115 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1116 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1117 tmppaths.add(tmpPath); 1118 return tmpPath; 1119 } 1120 1121 ////////////////////update & sync ////////////////////////////////////// 1122 1123 private boolean hasUpdate = false; 1124 1125 // shared key. consumer guarantees the "writeLock" before use it. 1126 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1127 1128 private void updateDelete(IndexNode inode) { 1129 beginWrite(); 1130 try { 1131 removeFromTree(inode); 1132 inodes.remove(inode); 1133 hasUpdate = true; 1134 } finally { 1135 endWrite(); 1136 } 1137 } 1138 1139 private void update(Entry e) { 1140 beginWrite(); 1141 try { 1142 IndexNode old = inodes.put(e, e); 1143 if (old != null) { 1144 removeFromTree(old); 1145 } 1146 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1147 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1148 e.sibling = parent.child; 1149 parent.child = e; 1150 } 1151 hasUpdate = true; 1152 } finally { 1153 endWrite(); 1154 } 1155 } 1156 1157 // copy over the whole LOC entry (header if necessary, data and ext) from 1158 // old zip to the new one. 1159 private long copyLOCEntry(Entry e, boolean updateHeader, 1160 OutputStream os, 1161 long written, byte[] buf) 1162 throws IOException 1163 { 1164 long locoff = e.locoff; // where to read 1165 e.locoff = written; // update the e.locoff with new value 1166 1167 // calculate the size need to write out 1168 long size = 0; 1169 // if there is A ext 1170 if ((e.flag & FLAG_DATADESCR) != 0) { 1171 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1172 size = 24; 1173 else 1174 size = 16; 1175 } 1176 // read loc, use the original loc.elen/nlen 1177 // 1178 // an extra byte after loc is read, which should be the first byte of the 1179 // 'name' field of the loc. if this byte is '/', which means the original 1180 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1181 // is used to output the loc, in which the leading "/" will be removed 1182 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1183 throw new ZipException("loc: reading failed"); 1184 1185 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1186 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1187 size += e.csize; 1188 written = e.writeLOC(os) + size; 1189 } else { 1190 os.write(buf, 0, LOCHDR); // write out the loc header 1191 locoff += LOCHDR; 1192 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1193 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1194 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1195 written = LOCHDR + size; 1196 } 1197 int n; 1198 while (size > 0 && 1199 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1200 { 1201 if (size < n) 1202 n = (int)size; 1203 os.write(buf, 0, n); 1204 size -= n; 1205 locoff += n; 1206 } 1207 return written; 1208 } 1209 1210 private long writeEntry(Entry e, OutputStream os, byte[] buf) 1211 throws IOException { 1212 1213 if (e.bytes == null && e.file == null) // dir, 0-length data 1214 return 0; 1215 1216 long written = 0; 1217 try (OutputStream os2 = e.method == METHOD_STORED ? 1218 new EntryOutputStreamCRC32(e, os) : new EntryOutputStreamDef(e, os)) { 1219 if (e.bytes != null) { // in-memory 1220 os2.write(e.bytes, 0, e.bytes.length); 1221 } else if (e.file != null) { // tmp file 1222 if (e.type == Entry.NEW || e.type == Entry.FILECH) { 1223 try (InputStream is = Files.newInputStream(e.file)) { 1224 is.transferTo(os2); 1225 } 1226 } 1227 Files.delete(e.file); 1228 tmppaths.remove(e.file); 1229 } 1230 } 1231 written += e.csize; 1232 if ((e.flag & FLAG_DATADESCR) != 0) { 1233 written += e.writeEXT(os); 1234 } 1235 return written; 1236 } 1237 1238 // sync the zip file system, if there is any udpate 1239 private void sync() throws IOException { 1240 1241 if (!hasUpdate) 1242 return; 1243 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1244 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) 1245 { 1246 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1247 long written = 0; 1248 byte[] buf = new byte[8192]; 1249 Entry e = null; 1250 1251 // write loc 1252 for (IndexNode inode : inodes.values()) { 1253 if (inode instanceof Entry) { // an updated inode 1254 e = (Entry)inode; 1255 try { 1256 if (e.type == Entry.COPY) { 1257 // entry copy: the only thing changed is the "name" 1258 // and "nlen" in LOC header, so we udpate/rewrite the 1259 // LOC in new file and simply copy the rest (data and 1260 // ext) without enflating/deflating from the old zip 1261 // file LOC entry. 1262 written += copyLOCEntry(e, true, os, written, buf); 1263 } else { // NEW, FILECH or CEN 1264 e.locoff = written; 1265 written += e.writeLOC(os); // write loc header 1266 written += writeEntry(e, os, buf); 1267 } 1268 elist.add(e); 1269 } catch (IOException x) { 1270 x.printStackTrace(); // skip any in-accurate entry 1271 } 1272 } else { // unchanged inode 1273 if (inode.pos == -1) { 1274 continue; // pseudo directory node 1275 } 1276 if (inode.name.length == 1 && inode.name[0] == '/') { 1277 continue; // no root '/' directory even it 1278 // exits in original zip/jar file. 1279 } 1280 e = Entry.readCEN(this, inode); 1281 try { 1282 written += copyLOCEntry(e, false, os, written, buf); 1283 elist.add(e); 1284 } catch (IOException x) { 1285 x.printStackTrace(); // skip any wrong entry 1286 } 1287 } 1288 } 1289 1290 // now write back the cen and end table 1291 end.cenoff = written; 1292 for (Entry entry : elist) { 1293 written += entry.writeCEN(os); 1294 } 1295 end.centot = elist.size(); 1296 end.cenlen = written - end.cenoff; 1297 end.write(os, written, forceEnd64); 1298 } 1299 1300 ch.close(); 1301 Files.delete(zfpath); 1302 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1303 hasUpdate = false; // clear 1304 } 1305 1306 IndexNode getInode(byte[] path) { 1307 if (path == null) 1308 throw new NullPointerException("path"); 1309 return inodes.get(IndexNode.keyOf(path)); 1310 } 1311 1312 Entry getEntry(byte[] path) throws IOException { 1313 IndexNode inode = getInode(path); 1314 if (inode instanceof Entry) 1315 return (Entry)inode; 1316 if (inode == null || inode.pos == -1) 1317 return null; 1318 return Entry.readCEN(this, inode); 1319 } 1320 1321 public void deleteFile(byte[] path, boolean failIfNotExists) 1322 throws IOException 1323 { 1324 checkWritable(); 1325 1326 IndexNode inode = getInode(path); 1327 if (inode == null) { 1328 if (path != null && path.length == 0) 1329 throw new ZipException("root directory </> can't not be delete"); 1330 if (failIfNotExists) 1331 throw new NoSuchFileException(getString(path)); 1332 } else { 1333 if (inode.isDir() && inode.child != null) 1334 throw new DirectoryNotEmptyException(getString(path)); 1335 updateDelete(inode); 1336 } 1337 } 1338 1339 // Returns an out stream for either 1340 // (1) writing the contents of a new entry, if the entry exits, or 1341 // (2) updating/replacing the contents of the specified existing entry. 1342 private OutputStream getOutputStream(Entry e) throws IOException { 1343 1344 if (e.mtime == -1) 1345 e.mtime = System.currentTimeMillis(); 1346 if (e.method == -1) 1347 e.method = defaultMethod; 1348 // store size, compressed size, and crc-32 in datadescr 1349 e.flag = FLAG_DATADESCR; 1350 if (zc.isUTF8()) 1351 e.flag |= FLAG_USE_UTF8; 1352 OutputStream os; 1353 if (useTempFile) { 1354 e.file = getTempPathForEntry(null); 1355 os = Files.newOutputStream(e.file, WRITE); 1356 } else { 1357 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1358 } 1359 return new EntryOutputStream(e, os); 1360 } 1361 1362 private class EntryOutputStream extends FilterOutputStream { 1363 private Entry e; 1364 private long written; 1365 private boolean isClosed; 1366 1367 EntryOutputStream(Entry e, OutputStream os) throws IOException { 1368 super(os); 1369 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1370 // this.written = 0; 1371 } 1372 1373 @Override 1374 public synchronized void write(int b) throws IOException { 1375 out.write(b); 1376 written += 1; 1377 } 1378 1379 @Override 1380 public synchronized void write(byte b[], int off, int len) 1381 throws IOException { 1382 out.write(b, off, len); 1383 written += len; 1384 } 1385 1386 @Override 1387 public synchronized void close() throws IOException { 1388 if (isClosed) { 1389 return; 1390 } 1391 isClosed = true; 1392 e.size = written; 1393 if (out instanceof ByteArrayOutputStream) 1394 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1395 super.close(); 1396 update(e); 1397 } 1398 } 1399 1400 // Wrapper output stream class to write out a "stored" entry. 1401 // (1) this class does not close the underlying out stream when 1402 // being closed. 1403 // (2) no need to be "synchronized", only used by sync() 1404 private class EntryOutputStreamCRC32 extends FilterOutputStream { 1405 private Entry e; 1406 private CRC32 crc; 1407 private long written; 1408 private boolean isClosed; 1409 1410 EntryOutputStreamCRC32(Entry e, OutputStream os) throws IOException { 1411 super(os); 1412 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1413 this.crc = new CRC32(); 1414 } 1415 1416 @Override 1417 public void write(int b) throws IOException { 1418 out.write(b); 1419 crc.update(b); 1420 written += 1; 1421 } 1422 1423 @Override 1424 public void write(byte b[], int off, int len) 1425 throws IOException { 1426 out.write(b, off, len); 1427 crc.update(b, off, len); 1428 written += len; 1429 } 1430 1431 @Override 1432 public void close() throws IOException { 1433 if (isClosed) 1434 return; 1435 isClosed = true; 1436 e.size = e.csize = written; 1437 e.crc = crc.getValue(); 1438 } 1439 } 1440 1441 // Wrapper output stream class to write out a "deflated" entry. 1442 // (1) this class does not close the underlying out stream when 1443 // being closed. 1444 // (2) no need to be "synchronized", only used by sync() 1445 private class EntryOutputStreamDef extends DeflaterOutputStream { 1446 private CRC32 crc; 1447 private Entry e; 1448 private boolean isClosed; 1449 1450 EntryOutputStreamDef(Entry e, OutputStream os) throws IOException { 1451 super(os, getDeflater()); 1452 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1453 this.crc = new CRC32(); 1454 } 1455 1456 @Override 1457 public void write(byte b[], int off, int len) 1458 throws IOException { 1459 super.write(b, off, len); 1460 crc.update(b, off, len); 1461 } 1462 1463 @Override 1464 public void close() throws IOException { 1465 if (isClosed) 1466 return; 1467 isClosed = true; 1468 finish(); 1469 e.size = def.getBytesRead(); 1470 e.csize = def.getBytesWritten(); 1471 e.crc = crc.getValue(); 1472 } 1473 } 1474 1475 private InputStream getInputStream(Entry e) 1476 throws IOException 1477 { 1478 InputStream eis = null; 1479 1480 if (e.type == Entry.NEW) { 1481 // now bytes & file is uncompressed. 1482 if (e.bytes != null) 1483 return new ByteArrayInputStream(e.bytes); 1484 else if (e.file != null) 1485 return Files.newInputStream(e.file); 1486 else 1487 throw new ZipException("update entry data is missing"); 1488 } else if (e.type == Entry.FILECH) { 1489 // FILECH result is un-compressed. 1490 eis = Files.newInputStream(e.file); 1491 // TBD: wrap to hook close() 1492 // streams.add(eis); 1493 return eis; 1494 } else { // untouched CEN or COPY 1495 eis = new EntryInputStream(e, ch); 1496 } 1497 if (e.method == METHOD_DEFLATED) { 1498 // MORE: Compute good size for inflater stream: 1499 long bufSize = e.size + 2; // Inflater likes a bit of slack 1500 if (bufSize > 65536) 1501 bufSize = 8192; 1502 final long size = e.size; 1503 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1504 private boolean isClosed = false; 1505 public void close() throws IOException { 1506 if (!isClosed) { 1507 releaseInflater(inf); 1508 this.in.close(); 1509 isClosed = true; 1510 streams.remove(this); 1511 } 1512 } 1513 // Override fill() method to provide an extra "dummy" byte 1514 // at the end of the input stream. This is required when 1515 // using the "nowrap" Inflater option. (it appears the new 1516 // zlib in 7 does not need it, but keep it for now) 1517 protected void fill() throws IOException { 1518 if (eof) { 1519 throw new EOFException( 1520 "Unexpected end of ZLIB input stream"); 1521 } 1522 len = this.in.read(buf, 0, buf.length); 1523 if (len == -1) { 1524 buf[0] = 0; 1525 len = 1; 1526 eof = true; 1527 } 1528 inf.setInput(buf, 0, len); 1529 } 1530 private boolean eof; 1531 1532 public int available() throws IOException { 1533 if (isClosed) 1534 return 0; 1535 long avail = size - inf.getBytesWritten(); 1536 return avail > (long) Integer.MAX_VALUE ? 1537 Integer.MAX_VALUE : (int) avail; 1538 } 1539 }; 1540 } else if (e.method == METHOD_STORED) { 1541 // TBD: wrap/ it does not seem necessary 1542 } else { 1543 throw new ZipException("invalid compression method"); 1544 } 1545 streams.add(eis); 1546 return eis; 1547 } 1548 1549 // Inner class implementing the input stream used to read 1550 // a (possibly compressed) zip file entry. 1551 private class EntryInputStream extends InputStream { 1552 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1553 // point to a new channel after sync() 1554 private long pos; // current position within entry data 1555 protected long rem; // number of remaining bytes within entry 1556 1557 EntryInputStream(Entry e, SeekableByteChannel zfch) 1558 throws IOException 1559 { 1560 this.zfch = zfch; 1561 rem = e.csize; 1562 pos = e.locoff; 1563 if (pos == -1) { 1564 Entry e2 = getEntry(e.name); 1565 if (e2 == null) { 1566 throw new ZipException("invalid loc for entry <" + e.name + ">"); 1567 } 1568 pos = e2.locoff; 1569 } 1570 pos = -pos; // lazy initialize the real data offset 1571 } 1572 1573 public int read(byte b[], int off, int len) throws IOException { 1574 ensureOpen(); 1575 initDataPos(); 1576 if (rem == 0) { 1577 return -1; 1578 } 1579 if (len <= 0) { 1580 return 0; 1581 } 1582 if (len > rem) { 1583 len = (int) rem; 1584 } 1585 // readFullyAt() 1586 long n = 0; 1587 ByteBuffer bb = ByteBuffer.wrap(b); 1588 bb.position(off); 1589 bb.limit(off + len); 1590 synchronized(zfch) { 1591 n = zfch.position(pos).read(bb); 1592 } 1593 if (n > 0) { 1594 pos += n; 1595 rem -= n; 1596 } 1597 if (rem == 0) { 1598 close(); 1599 } 1600 return (int)n; 1601 } 1602 1603 public int read() throws IOException { 1604 byte[] b = new byte[1]; 1605 if (read(b, 0, 1) == 1) { 1606 return b[0] & 0xff; 1607 } else { 1608 return -1; 1609 } 1610 } 1611 1612 public long skip(long n) throws IOException { 1613 ensureOpen(); 1614 if (n > rem) 1615 n = rem; 1616 pos += n; 1617 rem -= n; 1618 if (rem == 0) { 1619 close(); 1620 } 1621 return n; 1622 } 1623 1624 public int available() { 1625 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1626 } 1627 1628 public void close() { 1629 rem = 0; 1630 streams.remove(this); 1631 } 1632 1633 private void initDataPos() throws IOException { 1634 if (pos <= 0) { 1635 pos = -pos + locpos; 1636 byte[] buf = new byte[LOCHDR]; 1637 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 1638 throw new ZipException("invalid loc " + pos + " for entry reading"); 1639 } 1640 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 1641 } 1642 } 1643 } 1644 1645 static void zerror(String msg) throws ZipException { 1646 throw new ZipException(msg); 1647 } 1648 1649 // Maxmum number of de/inflater we cache 1650 private final int MAX_FLATER = 20; 1651 // List of available Inflater objects for decompression 1652 private final List<Inflater> inflaters = new ArrayList<>(); 1653 1654 // Gets an inflater from the list of available inflaters or allocates 1655 // a new one. 1656 private Inflater getInflater() { 1657 synchronized (inflaters) { 1658 int size = inflaters.size(); 1659 if (size > 0) { 1660 Inflater inf = inflaters.remove(size - 1); 1661 return inf; 1662 } else { 1663 return new Inflater(true); 1664 } 1665 } 1666 } 1667 1668 // Releases the specified inflater to the list of available inflaters. 1669 private void releaseInflater(Inflater inf) { 1670 synchronized (inflaters) { 1671 if (inflaters.size() < MAX_FLATER) { 1672 inf.reset(); 1673 inflaters.add(inf); 1674 } else { 1675 inf.end(); 1676 } 1677 } 1678 } 1679 1680 // List of available Deflater objects for compression 1681 private final List<Deflater> deflaters = new ArrayList<>(); 1682 1683 // Gets a deflater from the list of available deflaters or allocates 1684 // a new one. 1685 private Deflater getDeflater() { 1686 synchronized (deflaters) { 1687 int size = deflaters.size(); 1688 if (size > 0) { 1689 Deflater def = deflaters.remove(size - 1); 1690 return def; 1691 } else { 1692 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1693 } 1694 } 1695 } 1696 1697 // End of central directory record 1698 static class END { 1699 // these 2 fields are not used by anyone and write() uses "0" 1700 // int disknum; 1701 // int sdisknum; 1702 int endsub; // endsub 1703 int centot; // 4 bytes 1704 long cenlen; // 4 bytes 1705 long cenoff; // 4 bytes 1706 int comlen; // comment length 1707 byte[] comment; 1708 1709 /* members of Zip64 end of central directory locator */ 1710 // int diskNum; 1711 long endpos; 1712 // int disktot; 1713 1714 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 1715 boolean hasZip64 = forceEnd64; // false; 1716 long xlen = cenlen; 1717 long xoff = cenoff; 1718 if (xlen >= ZIP64_MINVAL) { 1719 xlen = ZIP64_MINVAL; 1720 hasZip64 = true; 1721 } 1722 if (xoff >= ZIP64_MINVAL) { 1723 xoff = ZIP64_MINVAL; 1724 hasZip64 = true; 1725 } 1726 int count = centot; 1727 if (count >= ZIP64_MINVAL32) { 1728 count = ZIP64_MINVAL32; 1729 hasZip64 = true; 1730 } 1731 if (hasZip64) { 1732 long off64 = offset; 1733 //zip64 end of central directory record 1734 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1735 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1736 writeShort(os, 45); // version made by 1737 writeShort(os, 45); // version needed to extract 1738 writeInt(os, 0); // number of this disk 1739 writeInt(os, 0); // central directory start disk 1740 writeLong(os, centot); // number of directory entries on disk 1741 writeLong(os, centot); // number of directory entries 1742 writeLong(os, cenlen); // length of central directory 1743 writeLong(os, cenoff); // offset of central directory 1744 1745 //zip64 end of central directory locator 1746 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1747 writeInt(os, 0); // zip64 END start disk 1748 writeLong(os, off64); // offset of zip64 END 1749 writeInt(os, 1); // total number of disks (?) 1750 } 1751 writeInt(os, ENDSIG); // END record signature 1752 writeShort(os, 0); // number of this disk 1753 writeShort(os, 0); // central directory start disk 1754 writeShort(os, count); // number of directory entries on disk 1755 writeShort(os, count); // total number of directory entries 1756 writeInt(os, xlen); // length of central directory 1757 writeInt(os, xoff); // offset of central directory 1758 if (comment != null) { // zip file comment 1759 writeShort(os, comment.length); 1760 writeBytes(os, comment); 1761 } else { 1762 writeShort(os, 0); 1763 } 1764 } 1765 } 1766 1767 // Internal node that links a "name" to its pos in cen table. 1768 // The node itself can be used as a "key" to lookup itself in 1769 // the HashMap inodes. 1770 static class IndexNode { 1771 byte[] name; 1772 int hashcode; // node is hashable/hashed by its name 1773 int pos = -1; // position in cen table, -1 menas the 1774 // entry does not exists in zip file 1775 boolean isdir; 1776 1777 IndexNode(byte[] name, boolean isdir) { 1778 name(name); 1779 this.isdir = isdir; 1780 this.pos = -1; 1781 } 1782 1783 IndexNode(byte[] name, int pos) { 1784 name(name); 1785 this.pos = pos; 1786 } 1787 1788 // constructor for cenInit() (1) remove tailing '/' (2) pad leading '/' 1789 IndexNode(byte[] cen, int pos, int nlen) { 1790 int noff = pos + CENHDR; 1791 if (cen[noff + nlen - 1] == '/') { 1792 isdir = true; 1793 nlen--; 1794 } 1795 if (nlen > 0 && cen[noff] == '/') { 1796 name = Arrays.copyOfRange(cen, noff, noff + nlen); 1797 } else { 1798 name = new byte[nlen + 1]; 1799 System.arraycopy(cen, noff, name, 1, nlen); 1800 name[0] = '/'; 1801 } 1802 name(name); 1803 this.pos = pos; 1804 } 1805 1806 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 1807 1808 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1809 IndexNode key = cachedKey.get(); 1810 if (key == null) { 1811 key = new IndexNode(name, -1); 1812 cachedKey.set(key); 1813 } 1814 return key.as(name); 1815 } 1816 1817 final void name(byte[] name) { 1818 this.name = name; 1819 this.hashcode = Arrays.hashCode(name); 1820 } 1821 1822 final IndexNode as(byte[] name) { // reuse the node, mostly 1823 name(name); // as a lookup "key" 1824 return this; 1825 } 1826 1827 boolean isDir() { 1828 return isdir; 1829 } 1830 1831 public boolean equals(Object other) { 1832 if (!(other instanceof IndexNode)) { 1833 return false; 1834 } 1835 if (other instanceof ParentLookup) { 1836 return ((ParentLookup)other).equals(this); 1837 } 1838 return Arrays.equals(name, ((IndexNode)other).name); 1839 } 1840 1841 public int hashCode() { 1842 return hashcode; 1843 } 1844 1845 IndexNode() {} 1846 IndexNode sibling; 1847 IndexNode child; // 1st child 1848 } 1849 1850 static class Entry extends IndexNode implements ZipFileAttributes { 1851 1852 static final int CEN = 1; // entry read from cen 1853 static final int NEW = 2; // updated contents in bytes or file 1854 static final int FILECH = 3; // fch update in "file" 1855 static final int COPY = 4; // copy of a CEN entry 1856 1857 byte[] bytes; // updated content bytes 1858 Path file; // use tmp file to store bytes; 1859 int type = CEN; // default is the entry read from cen 1860 1861 // entry attributes 1862 int version; 1863 int flag; 1864 int method = -1; // compression method 1865 long mtime = -1; // last modification time (in DOS time) 1866 long atime = -1; // last access time 1867 long ctime = -1; // create time 1868 long crc = -1; // crc-32 of entry data 1869 long csize = -1; // compressed size of entry data 1870 long size = -1; // uncompressed size of entry data 1871 byte[] extra; 1872 1873 // cen 1874 1875 // these fields are not used by anyone and writeCEN uses "0" 1876 // int versionMade; 1877 // int disk; 1878 // int attrs; 1879 // long attrsEx; 1880 long locoff; 1881 byte[] comment; 1882 1883 Entry() {} 1884 1885 Entry(byte[] name, boolean isdir, int method) { 1886 name(name); 1887 this.isdir = isdir; 1888 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 1889 this.crc = 0; 1890 this.size = 0; 1891 this.csize = 0; 1892 this.method = method; 1893 } 1894 1895 Entry(byte[] name, int type, boolean isdir, int method) { 1896 this(name, isdir, method); 1897 this.type = type; 1898 } 1899 1900 Entry (Entry e, int type) { 1901 name(e.name); 1902 this.isdir = e.isdir; 1903 this.version = e.version; 1904 this.ctime = e.ctime; 1905 this.atime = e.atime; 1906 this.mtime = e.mtime; 1907 this.crc = e.crc; 1908 this.size = e.size; 1909 this.csize = e.csize; 1910 this.method = e.method; 1911 this.extra = e.extra; 1912 /* 1913 this.versionMade = e.versionMade; 1914 this.disk = e.disk; 1915 this.attrs = e.attrs; 1916 this.attrsEx = e.attrsEx; 1917 */ 1918 this.locoff = e.locoff; 1919 this.comment = e.comment; 1920 this.type = type; 1921 } 1922 1923 Entry (byte[] name, Path file, int type) { 1924 this(name, type, false, METHOD_STORED); 1925 this.file = file; 1926 } 1927 1928 int version() throws ZipException { 1929 if (method == METHOD_DEFLATED) 1930 return 20; 1931 else if (method == METHOD_STORED) 1932 return 10; 1933 throw new ZipException("unsupported compression method"); 1934 } 1935 1936 ///////////////////// CEN ////////////////////// 1937 static Entry readCEN(ZipFileSystem zipfs, IndexNode inode) 1938 throws IOException 1939 { 1940 return new Entry().cen(zipfs, inode); 1941 } 1942 1943 private Entry cen(ZipFileSystem zipfs, IndexNode inode) 1944 throws IOException 1945 { 1946 byte[] cen = zipfs.cen; 1947 int pos = inode.pos; 1948 if (!cenSigAt(cen, pos)) 1949 zerror("invalid CEN header (bad signature)"); 1950 version = CENVER(cen, pos); 1951 flag = CENFLG(cen, pos); 1952 method = CENHOW(cen, pos); 1953 mtime = dosToJavaTime(CENTIM(cen, pos)); 1954 crc = CENCRC(cen, pos); 1955 csize = CENSIZ(cen, pos); 1956 size = CENLEN(cen, pos); 1957 int nlen = CENNAM(cen, pos); 1958 int elen = CENEXT(cen, pos); 1959 int clen = CENCOM(cen, pos); 1960 /* 1961 versionMade = CENVEM(cen, pos); 1962 disk = CENDSK(cen, pos); 1963 attrs = CENATT(cen, pos); 1964 attrsEx = CENATX(cen, pos); 1965 */ 1966 locoff = CENOFF(cen, pos); 1967 pos += CENHDR; 1968 this.name = inode.name; 1969 this.isdir = inode.isdir; 1970 this.hashcode = inode.hashcode; 1971 1972 pos += nlen; 1973 if (elen > 0) { 1974 extra = Arrays.copyOfRange(cen, pos, pos + elen); 1975 pos += elen; 1976 readExtra(zipfs); 1977 } 1978 if (clen > 0) { 1979 comment = Arrays.copyOfRange(cen, pos, pos + clen); 1980 } 1981 return this; 1982 } 1983 1984 int writeCEN(OutputStream os) throws IOException { 1985 int version0 = version(); 1986 long csize0 = csize; 1987 long size0 = size; 1988 long locoff0 = locoff; 1989 int elen64 = 0; // extra for ZIP64 1990 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 1991 int elenEXTT = 0; // extra for Extended Timestamp 1992 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 1993 1994 byte[] zname = isdir ? toDirectoryPath(name) : name; 1995 1996 // confirm size/length 1997 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 1998 int elen = (extra != null) ? extra.length : 0; 1999 int eoff = 0; 2000 int clen = (comment != null) ? comment.length : 0; 2001 if (csize >= ZIP64_MINVAL) { 2002 csize0 = ZIP64_MINVAL; 2003 elen64 += 8; // csize(8) 2004 } 2005 if (size >= ZIP64_MINVAL) { 2006 size0 = ZIP64_MINVAL; // size(8) 2007 elen64 += 8; 2008 } 2009 if (locoff >= ZIP64_MINVAL) { 2010 locoff0 = ZIP64_MINVAL; 2011 elen64 += 8; // offset(8) 2012 } 2013 if (elen64 != 0) { 2014 elen64 += 4; // header and data sz 4 bytes 2015 } 2016 while (eoff + 4 < elen) { 2017 int tag = SH(extra, eoff); 2018 int sz = SH(extra, eoff + 2); 2019 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2020 foundExtraTime = true; 2021 } 2022 eoff += (4 + sz); 2023 } 2024 if (!foundExtraTime) { 2025 if (isWindows) { // use NTFS 2026 elenNTFS = 36; // total 36 bytes 2027 } else { // Extended Timestamp otherwise 2028 elenEXTT = 9; // only mtime in cen 2029 } 2030 } 2031 writeInt(os, CENSIG); // CEN header signature 2032 if (elen64 != 0) { 2033 writeShort(os, 45); // ver 4.5 for zip64 2034 writeShort(os, 45); 2035 } else { 2036 writeShort(os, version0); // version made by 2037 writeShort(os, version0); // version needed to extract 2038 } 2039 writeShort(os, flag); // general purpose bit flag 2040 writeShort(os, method); // compression method 2041 // last modification time 2042 writeInt(os, (int)javaToDosTime(mtime)); 2043 writeInt(os, crc); // crc-32 2044 writeInt(os, csize0); // compressed size 2045 writeInt(os, size0); // uncompressed size 2046 writeShort(os, nlen); 2047 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2048 2049 if (comment != null) { 2050 writeShort(os, Math.min(clen, 0xffff)); 2051 } else { 2052 writeShort(os, 0); 2053 } 2054 writeShort(os, 0); // starting disk number 2055 writeShort(os, 0); // internal file attributes (unused) 2056 writeInt(os, 0); // external file attributes (unused) 2057 writeInt(os, locoff0); // relative offset of local header 2058 writeBytes(os, zname, 1, nlen); 2059 if (elen64 != 0) { 2060 writeShort(os, EXTID_ZIP64);// Zip64 extra 2061 writeShort(os, elen64 - 4); // size of "this" extra block 2062 if (size0 == ZIP64_MINVAL) 2063 writeLong(os, size); 2064 if (csize0 == ZIP64_MINVAL) 2065 writeLong(os, csize); 2066 if (locoff0 == ZIP64_MINVAL) 2067 writeLong(os, locoff); 2068 } 2069 if (elenNTFS != 0) { 2070 writeShort(os, EXTID_NTFS); 2071 writeShort(os, elenNTFS - 4); 2072 writeInt(os, 0); // reserved 2073 writeShort(os, 0x0001); // NTFS attr tag 2074 writeShort(os, 24); 2075 writeLong(os, javaToWinTime(mtime)); 2076 writeLong(os, javaToWinTime(atime)); 2077 writeLong(os, javaToWinTime(ctime)); 2078 } 2079 if (elenEXTT != 0) { 2080 writeShort(os, EXTID_EXTT); 2081 writeShort(os, elenEXTT - 4); 2082 if (ctime == -1) 2083 os.write(0x3); // mtime and atime 2084 else 2085 os.write(0x7); // mtime, atime and ctime 2086 writeInt(os, javaToUnixTime(mtime)); 2087 } 2088 if (extra != null) // whatever not recognized 2089 writeBytes(os, extra); 2090 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2091 writeBytes(os, comment); 2092 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2093 } 2094 2095 ///////////////////// LOC ////////////////////// 2096 2097 int writeLOC(OutputStream os) throws IOException { 2098 writeInt(os, LOCSIG); // LOC header signature 2099 byte[] zname = isdir ? toDirectoryPath(name) : name; 2100 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2101 int elen = (extra != null) ? extra.length : 0; 2102 boolean foundExtraTime = false; // if extra timestamp present 2103 int eoff = 0; 2104 int elen64 = 0; 2105 int elenEXTT = 0; 2106 int elenNTFS = 0; 2107 if ((flag & FLAG_DATADESCR) != 0) { 2108 writeShort(os, version()); // version needed to extract 2109 writeShort(os, flag); // general purpose bit flag 2110 writeShort(os, method); // compression method 2111 // last modification time 2112 writeInt(os, (int)javaToDosTime(mtime)); 2113 // store size, uncompressed size, and crc-32 in data descriptor 2114 // immediately following compressed entry data 2115 writeInt(os, 0); 2116 writeInt(os, 0); 2117 writeInt(os, 0); 2118 } else { 2119 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2120 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2121 writeShort(os, 45); // ver 4.5 for zip64 2122 } else { 2123 writeShort(os, version()); // version needed to extract 2124 } 2125 writeShort(os, flag); // general purpose bit flag 2126 writeShort(os, method); // compression method 2127 // last modification time 2128 writeInt(os, (int)javaToDosTime(mtime)); 2129 writeInt(os, crc); // crc-32 2130 if (elen64 != 0) { 2131 writeInt(os, ZIP64_MINVAL); 2132 writeInt(os, ZIP64_MINVAL); 2133 } else { 2134 writeInt(os, csize); // compressed size 2135 writeInt(os, size); // uncompressed size 2136 } 2137 } 2138 while (eoff + 4 < elen) { 2139 int tag = SH(extra, eoff); 2140 int sz = SH(extra, eoff + 2); 2141 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2142 foundExtraTime = true; 2143 } 2144 eoff += (4 + sz); 2145 } 2146 if (!foundExtraTime) { 2147 if (isWindows) { 2148 elenNTFS = 36; // NTFS, total 36 bytes 2149 } else { // on unix use "ext time" 2150 elenEXTT = 9; 2151 if (atime != -1) 2152 elenEXTT += 4; 2153 if (ctime != -1) 2154 elenEXTT += 4; 2155 } 2156 } 2157 writeShort(os, nlen); 2158 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2159 writeBytes(os, zname, 1, nlen); 2160 if (elen64 != 0) { 2161 writeShort(os, EXTID_ZIP64); 2162 writeShort(os, 16); 2163 writeLong(os, size); 2164 writeLong(os, csize); 2165 } 2166 if (elenNTFS != 0) { 2167 writeShort(os, EXTID_NTFS); 2168 writeShort(os, elenNTFS - 4); 2169 writeInt(os, 0); // reserved 2170 writeShort(os, 0x0001); // NTFS attr tag 2171 writeShort(os, 24); 2172 writeLong(os, javaToWinTime(mtime)); 2173 writeLong(os, javaToWinTime(atime)); 2174 writeLong(os, javaToWinTime(ctime)); 2175 } 2176 if (elenEXTT != 0) { 2177 writeShort(os, EXTID_EXTT); 2178 writeShort(os, elenEXTT - 4);// size for the folowing data block 2179 int fbyte = 0x1; 2180 if (atime != -1) // mtime and atime 2181 fbyte |= 0x2; 2182 if (ctime != -1) // mtime, atime and ctime 2183 fbyte |= 0x4; 2184 os.write(fbyte); // flags byte 2185 writeInt(os, javaToUnixTime(mtime)); 2186 if (atime != -1) 2187 writeInt(os, javaToUnixTime(atime)); 2188 if (ctime != -1) 2189 writeInt(os, javaToUnixTime(ctime)); 2190 } 2191 if (extra != null) { 2192 writeBytes(os, extra); 2193 } 2194 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2195 } 2196 2197 // Data Descriptior 2198 int writeEXT(OutputStream os) throws IOException { 2199 writeInt(os, EXTSIG); // EXT header signature 2200 writeInt(os, crc); // crc-32 2201 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2202 writeLong(os, csize); 2203 writeLong(os, size); 2204 return 24; 2205 } else { 2206 writeInt(os, csize); // compressed size 2207 writeInt(os, size); // uncompressed size 2208 return 16; 2209 } 2210 } 2211 2212 // read NTFS, UNIX and ZIP64 data from cen.extra 2213 void readExtra(ZipFileSystem zipfs) throws IOException { 2214 if (extra == null) 2215 return; 2216 int elen = extra.length; 2217 int off = 0; 2218 int newOff = 0; 2219 while (off + 4 < elen) { 2220 // extra spec: HeaderID+DataSize+Data 2221 int pos = off; 2222 int tag = SH(extra, pos); 2223 int sz = SH(extra, pos + 2); 2224 pos += 4; 2225 if (pos + sz > elen) // invalid data 2226 break; 2227 switch (tag) { 2228 case EXTID_ZIP64 : 2229 if (size == ZIP64_MINVAL) { 2230 if (pos + 8 > elen) // invalid zip64 extra 2231 break; // fields, just skip 2232 size = LL(extra, pos); 2233 pos += 8; 2234 } 2235 if (csize == ZIP64_MINVAL) { 2236 if (pos + 8 > elen) 2237 break; 2238 csize = LL(extra, pos); 2239 pos += 8; 2240 } 2241 if (locoff == ZIP64_MINVAL) { 2242 if (pos + 8 > elen) 2243 break; 2244 locoff = LL(extra, pos); 2245 pos += 8; 2246 } 2247 break; 2248 case EXTID_NTFS: 2249 if (sz < 32) 2250 break; 2251 pos += 4; // reserved 4 bytes 2252 if (SH(extra, pos) != 0x0001) 2253 break; 2254 if (SH(extra, pos + 2) != 24) 2255 break; 2256 // override the loc field, datatime here is 2257 // more "accurate" 2258 mtime = winToJavaTime(LL(extra, pos + 4)); 2259 atime = winToJavaTime(LL(extra, pos + 12)); 2260 ctime = winToJavaTime(LL(extra, pos + 20)); 2261 break; 2262 case EXTID_EXTT: 2263 // spec says the Extened timestamp in cen only has mtime 2264 // need to read the loc to get the extra a/ctime, if flag 2265 // "zipinfo-time" is not specified to false; 2266 // there is performance cost (move up to loc and read) to 2267 // access the loc table foreach entry; 2268 if (zipfs.noExtt) { 2269 if (sz == 5) 2270 mtime = unixToJavaTime(LG(extra, pos + 1)); 2271 break; 2272 } 2273 byte[] buf = new byte[LOCHDR]; 2274 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2275 != buf.length) 2276 throw new ZipException("loc: reading failed"); 2277 if (!locSigAt(buf, 0)) 2278 throw new ZipException("loc: wrong sig ->" 2279 + Long.toString(getSig(buf, 0), 16)); 2280 int locElen = LOCEXT(buf); 2281 if (locElen < 9) // EXTT is at lease 9 bytes 2282 break; 2283 int locNlen = LOCNAM(buf); 2284 buf = new byte[locElen]; 2285 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2286 != buf.length) 2287 throw new ZipException("loc extra: reading failed"); 2288 int locPos = 0; 2289 while (locPos + 4 < buf.length) { 2290 int locTag = SH(buf, locPos); 2291 int locSZ = SH(buf, locPos + 2); 2292 locPos += 4; 2293 if (locTag != EXTID_EXTT) { 2294 locPos += locSZ; 2295 continue; 2296 } 2297 int end = locPos + locSZ - 4; 2298 int flag = CH(buf, locPos++); 2299 if ((flag & 0x1) != 0 && locPos <= end) { 2300 mtime = unixToJavaTime(LG(buf, locPos)); 2301 locPos += 4; 2302 } 2303 if ((flag & 0x2) != 0 && locPos <= end) { 2304 atime = unixToJavaTime(LG(buf, locPos)); 2305 locPos += 4; 2306 } 2307 if ((flag & 0x4) != 0 && locPos <= end) { 2308 ctime = unixToJavaTime(LG(buf, locPos)); 2309 locPos += 4; 2310 } 2311 break; 2312 } 2313 break; 2314 default: // unknown tag 2315 System.arraycopy(extra, off, extra, newOff, sz + 4); 2316 newOff += (sz + 4); 2317 } 2318 off += (sz + 4); 2319 } 2320 if (newOff != 0 && newOff != extra.length) 2321 extra = Arrays.copyOf(extra, newOff); 2322 else 2323 extra = null; 2324 } 2325 2326 ///////// basic file attributes /////////// 2327 @Override 2328 public FileTime creationTime() { 2329 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 2330 } 2331 2332 @Override 2333 public boolean isDirectory() { 2334 return isDir(); 2335 } 2336 2337 @Override 2338 public boolean isOther() { 2339 return false; 2340 } 2341 2342 @Override 2343 public boolean isRegularFile() { 2344 return !isDir(); 2345 } 2346 2347 @Override 2348 public FileTime lastAccessTime() { 2349 return FileTime.fromMillis(atime == -1 ? mtime : atime); 2350 } 2351 2352 @Override 2353 public FileTime lastModifiedTime() { 2354 return FileTime.fromMillis(mtime); 2355 } 2356 2357 @Override 2358 public long size() { 2359 return size; 2360 } 2361 2362 @Override 2363 public boolean isSymbolicLink() { 2364 return false; 2365 } 2366 2367 @Override 2368 public Object fileKey() { 2369 return null; 2370 } 2371 2372 ///////// zip entry attributes /////////// 2373 public long compressedSize() { 2374 return csize; 2375 } 2376 2377 public long crc() { 2378 return crc; 2379 } 2380 2381 public int method() { 2382 return method; 2383 } 2384 2385 public byte[] extra() { 2386 if (extra != null) 2387 return Arrays.copyOf(extra, extra.length); 2388 return null; 2389 } 2390 2391 public byte[] comment() { 2392 if (comment != null) 2393 return Arrays.copyOf(comment, comment.length); 2394 return null; 2395 } 2396 2397 public String toString() { 2398 StringBuilder sb = new StringBuilder(1024); 2399 Formatter fm = new Formatter(sb); 2400 fm.format(" name : %s%n", new String(name)); 2401 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2402 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2403 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2404 fm.format(" isRegularFile : %b%n", isRegularFile()); 2405 fm.format(" isDirectory : %b%n", isDirectory()); 2406 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2407 fm.format(" isOther : %b%n", isOther()); 2408 fm.format(" fileKey : %s%n", fileKey()); 2409 fm.format(" size : %d%n", size()); 2410 fm.format(" compressedSize : %d%n", compressedSize()); 2411 fm.format(" crc : %x%n", crc()); 2412 fm.format(" method : %d%n", method()); 2413 fm.close(); 2414 return sb.toString(); 2415 } 2416 } 2417 2418 // ZIP directory has two issues: 2419 // (1) ZIP spec does not require the ZIP file to include 2420 // directory entry 2421 // (2) all entries are not stored/organized in a "tree" 2422 // structure. 2423 // A possible solution is to build the node tree ourself as 2424 // implemented below. 2425 2426 // default time stamp for pseudo entries 2427 private long zfsDefaultTimeStamp = System.currentTimeMillis(); 2428 2429 private void removeFromTree(IndexNode inode) { 2430 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2431 IndexNode child = parent.child; 2432 if (child.equals(inode)) { 2433 parent.child = child.sibling; 2434 } else { 2435 IndexNode last = child; 2436 while ((child = child.sibling) != null) { 2437 if (child.equals(inode)) { 2438 last.sibling = child.sibling; 2439 break; 2440 } else { 2441 last = child; 2442 } 2443 } 2444 } 2445 } 2446 2447 // purely for parent lookup, so we don't have to copy the parent 2448 // name every time 2449 static class ParentLookup extends IndexNode { 2450 int len; 2451 ParentLookup() {} 2452 2453 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 2454 name(name, len); 2455 return this; 2456 } 2457 2458 void name(byte[] name, int len) { 2459 this.name = name; 2460 this.len = len; 2461 // calculate the hashcode the same way as Arrays.hashCode() does 2462 int result = 1; 2463 for (int i = 0; i < len; i++) 2464 result = 31 * result + name[i]; 2465 this.hashcode = result; 2466 } 2467 2468 @Override 2469 public boolean equals(Object other) { 2470 if (!(other instanceof IndexNode)) { 2471 return false; 2472 } 2473 byte[] oname = ((IndexNode)other).name; 2474 return Arrays.equals(name, 0, len, 2475 oname, 0, oname.length); 2476 } 2477 2478 } 2479 2480 private void buildNodeTree() throws IOException { 2481 beginWrite(); 2482 try { 2483 IndexNode root = inodes.get(LOOKUPKEY.as(ROOTPATH)); 2484 if (root == null) { 2485 root = new IndexNode(ROOTPATH, true); 2486 } else { 2487 inodes.remove(root); 2488 } 2489 IndexNode[] nodes = inodes.keySet().toArray(new IndexNode[0]); 2490 inodes.put(root, root); 2491 ParentLookup lookup = new ParentLookup(); 2492 for (IndexNode node : nodes) { 2493 IndexNode parent; 2494 while (true) { 2495 int off = getParentOff(node.name); 2496 if (off <= 1) { // parent is root 2497 node.sibling = root.child; 2498 root.child = node; 2499 break; 2500 } 2501 lookup = lookup.as(node.name, off); 2502 if (inodes.containsKey(lookup)) { 2503 parent = inodes.get(lookup); 2504 node.sibling = parent.child; 2505 parent.child = node; 2506 break; 2507 } 2508 // add new pseudo directory entry 2509 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 2510 inodes.put(parent, parent); 2511 node.sibling = parent.child; 2512 parent.child = node; 2513 node = parent; 2514 } 2515 } 2516 } finally { 2517 endWrite(); 2518 } 2519 } 2520 }