1 /* 2 * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.FilterOutputStream; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.nio.ByteBuffer; 37 import java.nio.MappedByteBuffer; 38 import java.nio.channels.FileChannel; 39 import java.nio.channels.FileLock; 40 import java.nio.channels.ReadableByteChannel; 41 import java.nio.channels.SeekableByteChannel; 42 import java.nio.channels.WritableByteChannel; 43 import java.nio.file.*; 44 import java.nio.file.attribute.FileAttribute; 45 import java.nio.file.attribute.FileTime; 46 import java.nio.file.attribute.UserPrincipalLookupService; 47 import java.nio.file.spi.FileSystemProvider; 48 import java.security.AccessController; 49 import java.security.PrivilegedAction; 50 import java.security.PrivilegedActionException; 51 import java.security.PrivilegedExceptionAction; 52 import java.util.*; 53 import java.util.concurrent.locks.ReadWriteLock; 54 import java.util.concurrent.locks.ReentrantReadWriteLock; 55 import java.util.regex.Pattern; 56 import java.util.zip.CRC32; 57 import java.util.zip.Deflater; 58 import java.util.zip.DeflaterOutputStream; 59 import java.util.zip.Inflater; 60 import java.util.zip.InflaterInputStream; 61 import java.util.zip.ZipException; 62 63 import static java.lang.Boolean.TRUE; 64 import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES; 65 import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; 66 import static java.nio.file.StandardOpenOption.APPEND; 67 import static java.nio.file.StandardOpenOption.CREATE; 68 import static java.nio.file.StandardOpenOption.CREATE_NEW; 69 import static java.nio.file.StandardOpenOption.READ; 70 import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; 71 import static java.nio.file.StandardOpenOption.WRITE; 72 import static jdk.nio.zipfs.ZipConstants.*; 73 import static jdk.nio.zipfs.ZipUtils.*; 74 75 /** 76 * A FileSystem built on a zip file 77 * 78 * @author Xueming Shen 79 */ 80 class ZipFileSystem extends FileSystem { 81 // statics 82 private static final boolean isWindows = AccessController.doPrivileged( 83 (PrivilegedAction<Boolean>)()->System.getProperty("os.name") 84 .startsWith("Windows")); 85 private static final Set<String> supportedFileAttributeViews = 86 Set.of("basic", "zip"); 87 private static final byte[] ROOTPATH = new byte[] { '/' }; 88 89 private final ZipFileSystemProvider provider; 90 private final Path zfpath; 91 final ZipCoder zc; 92 private final ZipPath rootdir; 93 private boolean readOnly; // readonly file system, false by default 94 95 // default time stamp for pseudo entries 96 private final long zfsDefaultTimeStamp = System.currentTimeMillis(); 97 98 // configurable by env map 99 private final boolean noExtt; // see readExtra() 100 private final boolean useTempFile; // use a temp file for newOS, default 101 // is to use BAOS for better performance 102 private final boolean forceEnd64; 103 private final int defaultCompressionMethod; // METHOD_STORED if "noCompression=true" 104 // METHOD_DEFLATED otherwise 105 106 ZipFileSystem(ZipFileSystemProvider provider, 107 Path zfpath, 108 Map<String, ?> env) throws IOException 109 { 110 // default encoding for name/comment 111 String nameEncoding = env.containsKey("encoding") ? 112 (String)env.get("encoding") : "UTF-8"; 113 this.noExtt = "false".equals(env.get("zipinfo-time")); 114 this.useTempFile = isTrue(env, "useTempFile"); 115 this.forceEnd64 = isTrue(env, "forceZIP64End"); 116 this.defaultCompressionMethod = isTrue(env, "noCompression") ? METHOD_STORED : METHOD_DEFLATED; 117 if (Files.notExists(zfpath)) { 118 // create a new zip if it doesn't exist 119 if (isTrue(env, "create")) { 120 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 121 new END().write(os, 0, forceEnd64); 122 } 123 } else { 124 throw new FileSystemNotFoundException(zfpath.toString()); 125 } 126 } 127 // sm and existence check 128 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 129 boolean writeable = AccessController.doPrivileged( 130 (PrivilegedAction<Boolean>)()->Files.isWritable(zfpath)); 131 this.readOnly = !writeable; 132 this.zc = ZipCoder.get(nameEncoding); 133 this.rootdir = new ZipPath(this, new byte[]{'/'}); 134 this.ch = Files.newByteChannel(zfpath, READ); 135 try { 136 this.cen = initCEN(); 137 } catch (IOException x) { 138 try { 139 this.ch.close(); 140 } catch (IOException xx) { 141 x.addSuppressed(xx); 142 } 143 throw x; 144 } 145 this.provider = provider; 146 this.zfpath = zfpath; 147 } 148 149 // returns true if there is a name=true/"true" setting in env 150 private static boolean isTrue(Map<String, ?> env, String name) { 151 return "true".equals(env.get(name)) || TRUE.equals(env.get(name)); 152 } 153 154 @Override 155 public FileSystemProvider provider() { 156 return provider; 157 } 158 159 @Override 160 public String getSeparator() { 161 return "/"; 162 } 163 164 @Override 165 public boolean isOpen() { 166 return isOpen; 167 } 168 169 @Override 170 public boolean isReadOnly() { 171 return readOnly; 172 } 173 174 private void checkWritable() { 175 if (readOnly) { 176 throw new ReadOnlyFileSystemException(); 177 } 178 } 179 180 void setReadOnly() { 181 this.readOnly = true; 182 } 183 184 @Override 185 public Iterable<Path> getRootDirectories() { 186 return List.of(rootdir); 187 } 188 189 ZipPath getRootDir() { 190 return rootdir; 191 } 192 193 @Override 194 public ZipPath getPath(String first, String... more) { 195 if (more.length == 0) { 196 return new ZipPath(this, first); 197 } 198 StringBuilder sb = new StringBuilder(); 199 sb.append(first); 200 for (String path : more) { 201 if (path.length() > 0) { 202 if (sb.length() > 0) { 203 sb.append('/'); 204 } 205 sb.append(path); 206 } 207 } 208 return new ZipPath(this, sb.toString()); 209 } 210 211 @Override 212 public UserPrincipalLookupService getUserPrincipalLookupService() { 213 throw new UnsupportedOperationException(); 214 } 215 216 @Override 217 public WatchService newWatchService() { 218 throw new UnsupportedOperationException(); 219 } 220 221 FileStore getFileStore(ZipPath path) { 222 return new ZipFileStore(path); 223 } 224 225 @Override 226 public Iterable<FileStore> getFileStores() { 227 return List.of(new ZipFileStore(rootdir)); 228 } 229 230 @Override 231 public Set<String> supportedFileAttributeViews() { 232 return supportedFileAttributeViews; 233 } 234 235 @Override 236 public String toString() { 237 return zfpath.toString(); 238 } 239 240 Path getZipFile() { 241 return zfpath; 242 } 243 244 private static final String GLOB_SYNTAX = "glob"; 245 private static final String REGEX_SYNTAX = "regex"; 246 247 @Override 248 public PathMatcher getPathMatcher(String syntaxAndInput) { 249 int pos = syntaxAndInput.indexOf(':'); 250 if (pos <= 0 || pos == syntaxAndInput.length()) { 251 throw new IllegalArgumentException(); 252 } 253 String syntax = syntaxAndInput.substring(0, pos); 254 String input = syntaxAndInput.substring(pos + 1); 255 String expr; 256 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 257 expr = toRegexPattern(input); 258 } else { 259 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 260 expr = input; 261 } else { 262 throw new UnsupportedOperationException("Syntax '" + syntax + 263 "' not recognized"); 264 } 265 } 266 // return matcher 267 final Pattern pattern = Pattern.compile(expr); 268 return (path)->pattern.matcher(path.toString()).matches(); 269 } 270 271 @Override 272 public void close() throws IOException { 273 beginWrite(); 274 try { 275 if (!isOpen) 276 return; 277 isOpen = false; // set closed 278 } finally { 279 endWrite(); 280 } 281 if (!streams.isEmpty()) { // unlock and close all remaining streams 282 Set<InputStream> copy = new HashSet<>(streams); 283 for (InputStream is : copy) 284 is.close(); 285 } 286 beginWrite(); // lock and sync 287 try { 288 AccessController.doPrivileged((PrivilegedExceptionAction<Void>)() -> { 289 sync(); return null; 290 }); 291 ch.close(); // close the ch just in case no update 292 // and sync didn't close the ch 293 } catch (PrivilegedActionException e) { 294 throw (IOException)e.getException(); 295 } finally { 296 endWrite(); 297 } 298 299 synchronized (inflaters) { 300 for (Inflater inf : inflaters) 301 inf.end(); 302 } 303 synchronized (deflaters) { 304 for (Deflater def : deflaters) 305 def.end(); 306 } 307 308 IOException ioe = null; 309 synchronized (tmppaths) { 310 for (Path p : tmppaths) { 311 try { 312 AccessController.doPrivileged( 313 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 314 } catch (PrivilegedActionException e) { 315 IOException x = (IOException)e.getException(); 316 if (ioe == null) 317 ioe = x; 318 else 319 ioe.addSuppressed(x); 320 } 321 } 322 } 323 provider.removeFileSystem(zfpath, this); 324 if (ioe != null) 325 throw ioe; 326 } 327 328 ZipFileAttributes getFileAttributes(byte[] path) 329 throws IOException 330 { 331 beginRead(); 332 try { 333 ensureOpen(); 334 IndexNode inode = getInode(path); 335 if (inode == null) { 336 return null; 337 } else if (inode instanceof Entry) { 338 return (Entry)inode; 339 } else if (inode.pos == -1) { 340 // pseudo directory, uses METHOD_STORED 341 Entry e = new Entry(inode.name, inode.isdir, METHOD_STORED); 342 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 343 return e; 344 } else { 345 return new Entry(this, inode); 346 } 347 } finally { 348 endRead(); 349 } 350 } 351 352 void checkAccess(byte[] path) throws IOException { 353 beginRead(); 354 try { 355 ensureOpen(); 356 // is it necessary to readCEN as a sanity check? 357 if (getInode(path) == null) { 358 throw new NoSuchFileException(toString()); 359 } 360 361 } finally { 362 endRead(); 363 } 364 } 365 366 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 367 throws IOException 368 { 369 checkWritable(); 370 beginWrite(); 371 try { 372 ensureOpen(); 373 Entry e = getEntry(path); // ensureOpen checked 374 if (e == null) 375 throw new NoSuchFileException(getString(path)); 376 if (e.type == Entry.CEN) 377 e.type = Entry.COPY; // copy e 378 if (mtime != null) 379 e.mtime = mtime.toMillis(); 380 if (atime != null) 381 e.atime = atime.toMillis(); 382 if (ctime != null) 383 e.ctime = ctime.toMillis(); 384 update(e); 385 } finally { 386 endWrite(); 387 } 388 } 389 390 boolean exists(byte[] path) { 391 beginRead(); 392 try { 393 ensureOpen(); 394 return getInode(path) != null; 395 } finally { 396 endRead(); 397 } 398 } 399 400 boolean isDirectory(byte[] path) { 401 beginRead(); 402 try { 403 IndexNode n = getInode(path); 404 return n != null && n.isDir(); 405 } finally { 406 endRead(); 407 } 408 } 409 410 // returns the list of child paths of "path" 411 Iterator<Path> iteratorOf(ZipPath dir, 412 DirectoryStream.Filter<? super Path> filter) 413 throws IOException 414 { 415 beginWrite(); // iteration of inodes needs exclusive lock 416 try { 417 ensureOpen(); 418 byte[] path = dir.getResolvedPath(); 419 IndexNode inode = getInode(path); 420 if (inode == null) 421 throw new NotDirectoryException(getString(path)); 422 List<Path> list = new ArrayList<>(); 423 IndexNode child = inode.child; 424 while (child != null) { 425 // (1) Assume each path from the zip file itself is "normalized" 426 // (2) IndexNode.name is absolute. see IndexNode(byte[],int,int) 427 // (3) If parent "dir" is relative when ZipDirectoryStream 428 // is created, the returned child path needs to be relative 429 // as well. 430 ZipPath childPath = new ZipPath(this, child.name, true); 431 ZipPath childFileName = childPath.getFileName(); 432 ZipPath zpath = dir.resolve(childFileName); 433 if (filter == null || filter.accept(zpath)) 434 list.add(zpath); 435 child = child.sibling; 436 } 437 return list.iterator(); 438 } finally { 439 endWrite(); 440 } 441 } 442 443 void createDirectory(byte[] dir, FileAttribute<?>... attrs) throws IOException { 444 checkWritable(); 445 beginWrite(); 446 try { 447 ensureOpen(); 448 if (dir.length == 0 || exists(dir)) // root dir, or existing dir 449 throw new FileAlreadyExistsException(getString(dir)); 450 checkParents(dir); 451 Entry e = new Entry(dir, Entry.NEW, true, METHOD_STORED); 452 update(e); 453 } finally { 454 endWrite(); 455 } 456 } 457 458 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 459 throws IOException 460 { 461 checkWritable(); 462 if (Arrays.equals(src, dst)) 463 return; // do nothing, src and dst are the same 464 465 beginWrite(); 466 try { 467 ensureOpen(); 468 Entry eSrc = getEntry(src); // ensureOpen checked 469 470 if (eSrc == null) 471 throw new NoSuchFileException(getString(src)); 472 if (eSrc.isDir()) { // spec says to create dst dir 473 createDirectory(dst); 474 return; 475 } 476 boolean hasReplace = false; 477 boolean hasCopyAttrs = false; 478 for (CopyOption opt : options) { 479 if (opt == REPLACE_EXISTING) 480 hasReplace = true; 481 else if (opt == COPY_ATTRIBUTES) 482 hasCopyAttrs = true; 483 } 484 Entry eDst = getEntry(dst); 485 if (eDst != null) { 486 if (!hasReplace) 487 throw new FileAlreadyExistsException(getString(dst)); 488 } else { 489 checkParents(dst); 490 } 491 // copy eSrc entry and change name 492 Entry u = new Entry(eSrc, Entry.COPY); 493 u.name(dst); 494 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) { 495 u.type = eSrc.type; // make it the same type 496 if (deletesrc) { // if it's a "rename", take the data 497 u.bytes = eSrc.bytes; 498 u.file = eSrc.file; 499 } else { // if it's not "rename", copy the data 500 if (eSrc.bytes != null) 501 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 502 else if (eSrc.file != null) { 503 u.file = getTempPathForEntry(null); 504 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 505 } 506 } 507 } 508 if (!hasCopyAttrs) 509 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 510 update(u); 511 if (deletesrc) 512 updateDelete(eSrc); 513 } finally { 514 endWrite(); 515 } 516 } 517 518 // Returns an output stream for writing the contents into the specified 519 // entry. 520 OutputStream newOutputStream(byte[] path, OpenOption... options) 521 throws IOException 522 { 523 checkWritable(); 524 boolean hasCreateNew = false; 525 boolean hasCreate = false; 526 boolean hasAppend = false; 527 boolean hasTruncate = false; 528 for (OpenOption opt : options) { 529 if (opt == READ) 530 throw new IllegalArgumentException("READ not allowed"); 531 if (opt == CREATE_NEW) 532 hasCreateNew = true; 533 if (opt == CREATE) 534 hasCreate = true; 535 if (opt == APPEND) 536 hasAppend = true; 537 if (opt == TRUNCATE_EXISTING) 538 hasTruncate = true; 539 } 540 if (hasAppend && hasTruncate) 541 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 542 beginRead(); // only need a readlock, the "update()" will 543 try { // try to obtain a writelock when the os is 544 ensureOpen(); // being closed. 545 Entry e = getEntry(path); 546 if (e != null) { 547 if (e.isDir() || hasCreateNew) 548 throw new FileAlreadyExistsException(getString(path)); 549 if (hasAppend) { 550 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 551 try (InputStream is = getInputStream(e)) { 552 is.transferTo(os); 553 } 554 return os; 555 } 556 return getOutputStream(new Entry(e, Entry.NEW)); 557 } else { 558 if (!hasCreate && !hasCreateNew) 559 throw new NoSuchFileException(getString(path)); 560 checkParents(path); 561 return getOutputStream(new Entry(path, Entry.NEW, false, defaultCompressionMethod)); 562 } 563 } finally { 564 endRead(); 565 } 566 } 567 568 // Returns an input stream for reading the contents of the specified 569 // file entry. 570 InputStream newInputStream(byte[] path) throws IOException { 571 beginRead(); 572 try { 573 ensureOpen(); 574 Entry e = getEntry(path); 575 if (e == null) 576 throw new NoSuchFileException(getString(path)); 577 if (e.isDir()) 578 throw new FileSystemException(getString(path), "is a directory", null); 579 return getInputStream(e); 580 } finally { 581 endRead(); 582 } 583 } 584 585 private void checkOptions(Set<? extends OpenOption> options) { 586 // check for options of null type and option is an intance of StandardOpenOption 587 for (OpenOption option : options) { 588 if (option == null) 589 throw new NullPointerException(); 590 if (!(option instanceof StandardOpenOption)) 591 throw new IllegalArgumentException(); 592 } 593 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 594 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 595 } 596 597 // Returns an output SeekableByteChannel for either 598 // (1) writing the contents of a new entry, if the entry doesn't exist, or 599 // (2) updating/replacing the contents of an existing entry. 600 // Note: The content of the channel is not compressed until the 601 // channel is closed 602 private class EntryOutputChannel extends ByteArrayChannel { 603 final Entry e; 604 605 EntryOutputChannel(Entry e) { 606 super(e.size > 0? (int)e.size : 8192, false); 607 this.e = e; 608 if (e.mtime == -1) 609 e.mtime = System.currentTimeMillis(); 610 if (e.method == -1) 611 e.method = defaultCompressionMethod; 612 // store size, compressed size, and crc-32 in datadescriptor 613 e.flag = FLAG_DATADESCR; 614 if (zc.isUTF8()) 615 e.flag |= FLAG_USE_UTF8; 616 } 617 618 @Override 619 public void close() throws IOException { 620 // will update the entry 621 try (OutputStream os = getOutputStream(e)) { 622 os.write(toByteArray()); 623 } 624 super.close(); 625 } 626 } 627 628 // Returns a Writable/ReadByteChannel for now. Might consider to use 629 // newFileChannel() instead, which dump the entry data into a regular 630 // file on the default file system and create a FileChannel on top of it. 631 SeekableByteChannel newByteChannel(byte[] path, 632 Set<? extends OpenOption> options, 633 FileAttribute<?>... attrs) 634 throws IOException 635 { 636 checkOptions(options); 637 if (options.contains(StandardOpenOption.WRITE) || 638 options.contains(StandardOpenOption.APPEND)) { 639 checkWritable(); 640 beginRead(); // only need a read lock, the "update()" will obtain 641 // the write lock when the channel is closed 642 try { 643 Entry e = getEntry(path); 644 if (e != null) { 645 if (e.isDir() || options.contains(CREATE_NEW)) 646 throw new FileAlreadyExistsException(getString(path)); 647 SeekableByteChannel sbc = 648 new EntryOutputChannel(new Entry(e, Entry.NEW)); 649 if (options.contains(APPEND)) { 650 try (InputStream is = getInputStream(e)) { // copyover 651 byte[] buf = new byte[8192]; 652 ByteBuffer bb = ByteBuffer.wrap(buf); 653 int n; 654 while ((n = is.read(buf)) != -1) { 655 bb.position(0); 656 bb.limit(n); 657 sbc.write(bb); 658 } 659 } 660 } 661 return sbc; 662 } 663 if (!options.contains(CREATE) && !options.contains(CREATE_NEW)) 664 throw new NoSuchFileException(getString(path)); 665 checkParents(path); 666 return new EntryOutputChannel( 667 new Entry(path, Entry.NEW, false, defaultCompressionMethod)); 668 } finally { 669 endRead(); 670 } 671 } else { 672 beginRead(); 673 try { 674 ensureOpen(); 675 Entry e = getEntry(path); 676 if (e == null || e.isDir()) 677 throw new NoSuchFileException(getString(path)); 678 try (InputStream is = getInputStream(e)) { 679 // TBD: if (e.size < NNNNN); 680 return new ByteArrayChannel(is.readAllBytes(), true); 681 } 682 } finally { 683 endRead(); 684 } 685 } 686 } 687 688 // Returns a FileChannel of the specified entry. 689 // 690 // This implementation creates a temporary file on the default file system, 691 // copy the entry data into it if the entry exists, and then create a 692 // FileChannel on top of it. 693 FileChannel newFileChannel(byte[] path, 694 Set<? extends OpenOption> options, 695 FileAttribute<?>... attrs) 696 throws IOException 697 { 698 checkOptions(options); 699 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 700 options.contains(StandardOpenOption.APPEND)); 701 beginRead(); 702 try { 703 ensureOpen(); 704 Entry e = getEntry(path); 705 if (forWrite) { 706 checkWritable(); 707 if (e == null) { 708 if (!options.contains(StandardOpenOption.CREATE) && 709 !options.contains(StandardOpenOption.CREATE_NEW)) { 710 throw new NoSuchFileException(getString(path)); 711 } 712 } else { 713 if (options.contains(StandardOpenOption.CREATE_NEW)) { 714 throw new FileAlreadyExistsException(getString(path)); 715 } 716 if (e.isDir()) 717 throw new FileAlreadyExistsException("directory <" 718 + getString(path) + "> exists"); 719 } 720 options = new HashSet<>(options); 721 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 722 } else if (e == null || e.isDir()) { 723 throw new NoSuchFileException(getString(path)); 724 } 725 726 final boolean isFCH = (e != null && e.type == Entry.FILECH); 727 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 728 final FileChannel fch = tmpfile.getFileSystem() 729 .provider() 730 .newFileChannel(tmpfile, options, attrs); 731 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH); 732 if (forWrite) { 733 u.flag = FLAG_DATADESCR; 734 u.method = defaultCompressionMethod; 735 } 736 // is there a better way to hook into the FileChannel's close method? 737 return new FileChannel() { 738 public int write(ByteBuffer src) throws IOException { 739 return fch.write(src); 740 } 741 public long write(ByteBuffer[] srcs, int offset, int length) 742 throws IOException 743 { 744 return fch.write(srcs, offset, length); 745 } 746 public long position() throws IOException { 747 return fch.position(); 748 } 749 public FileChannel position(long newPosition) 750 throws IOException 751 { 752 fch.position(newPosition); 753 return this; 754 } 755 public long size() throws IOException { 756 return fch.size(); 757 } 758 public FileChannel truncate(long size) 759 throws IOException 760 { 761 fch.truncate(size); 762 return this; 763 } 764 public void force(boolean metaData) 765 throws IOException 766 { 767 fch.force(metaData); 768 } 769 public long transferTo(long position, long count, 770 WritableByteChannel target) 771 throws IOException 772 { 773 return fch.transferTo(position, count, target); 774 } 775 public long transferFrom(ReadableByteChannel src, 776 long position, long count) 777 throws IOException 778 { 779 return fch.transferFrom(src, position, count); 780 } 781 public int read(ByteBuffer dst) throws IOException { 782 return fch.read(dst); 783 } 784 public int read(ByteBuffer dst, long position) 785 throws IOException 786 { 787 return fch.read(dst, position); 788 } 789 public long read(ByteBuffer[] dsts, int offset, int length) 790 throws IOException 791 { 792 return fch.read(dsts, offset, length); 793 } 794 public int write(ByteBuffer src, long position) 795 throws IOException 796 { 797 return fch.write(src, position); 798 } 799 public MappedByteBuffer map(MapMode mode, 800 long position, long size) 801 { 802 throw new UnsupportedOperationException(); 803 } 804 public FileLock lock(long position, long size, boolean shared) 805 throws IOException 806 { 807 return fch.lock(position, size, shared); 808 } 809 public FileLock tryLock(long position, long size, boolean shared) 810 throws IOException 811 { 812 return fch.tryLock(position, size, shared); 813 } 814 protected void implCloseChannel() throws IOException { 815 fch.close(); 816 if (forWrite) { 817 u.mtime = System.currentTimeMillis(); 818 u.size = Files.size(u.file); 819 update(u); 820 } else { 821 if (!isFCH) // if this is a new fch for reading 822 removeTempPathForEntry(tmpfile); 823 } 824 } 825 }; 826 } finally { 827 endRead(); 828 } 829 } 830 831 // the outstanding input streams that need to be closed 832 private Set<InputStream> streams = 833 Collections.synchronizedSet(new HashSet<>()); 834 835 // the ex-channel and ex-path that need to close when their outstanding 836 // input streams are all closed by the obtainers. 837 private final Set<ExistingChannelCloser> exChClosers = new HashSet<>(); 838 839 private final Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<>()); 840 private Path getTempPathForEntry(byte[] path) throws IOException { 841 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 842 if (path != null) { 843 Entry e = getEntry(path); 844 if (e != null) { 845 try (InputStream is = newInputStream(path)) { 846 Files.copy(is, tmpPath, REPLACE_EXISTING); 847 } 848 } 849 } 850 return tmpPath; 851 } 852 853 private void removeTempPathForEntry(Path path) throws IOException { 854 Files.delete(path); 855 tmppaths.remove(path); 856 } 857 858 // check if all parents really exist. ZIP spec does not require 859 // the existence of any "parent directory". 860 private void checkParents(byte[] path) throws IOException { 861 beginRead(); 862 try { 863 while ((path = getParent(path)) != null && 864 path != ROOTPATH) { 865 if (!inodes.containsKey(IndexNode.keyOf(path))) { 866 throw new NoSuchFileException(getString(path)); 867 } 868 } 869 } finally { 870 endRead(); 871 } 872 } 873 874 private static byte[] getParent(byte[] path) { 875 int off = getParentOff(path); 876 if (off <= 1) 877 return ROOTPATH; 878 return Arrays.copyOf(path, off); 879 } 880 881 private static int getParentOff(byte[] path) { 882 int off = path.length - 1; 883 if (off > 0 && path[off] == '/') // isDirectory 884 off--; 885 while (off > 0 && path[off] != '/') { off--; } 886 return off; 887 } 888 889 private void beginWrite() { 890 rwlock.writeLock().lock(); 891 } 892 893 private void endWrite() { 894 rwlock.writeLock().unlock(); 895 } 896 897 private void beginRead() { 898 rwlock.readLock().lock(); 899 } 900 901 private void endRead() { 902 rwlock.readLock().unlock(); 903 } 904 905 /////////////////////////////////////////////////////////////////// 906 907 private volatile boolean isOpen = true; 908 private final SeekableByteChannel ch; // channel to the zipfile 909 final byte[] cen; // CEN & ENDHDR 910 private END end; 911 private long locpos; // position of first LOC header (usually 0) 912 913 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 914 915 // name -> pos (in cen), IndexNode itself can be used as a "key" 916 private LinkedHashMap<IndexNode, IndexNode> inodes; 917 918 final byte[] getBytes(String name) { 919 return zc.getBytes(name); 920 } 921 922 final String getString(byte[] name) { 923 return zc.toString(name); 924 } 925 926 @SuppressWarnings("deprecation") 927 protected void finalize() throws IOException { 928 close(); 929 } 930 931 // Reads len bytes of data from the specified offset into buf. 932 // Returns the total number of bytes read. 933 // Each/every byte read from here (except the cen, which is mapped). 934 final long readFullyAt(byte[] buf, int off, long len, long pos) 935 throws IOException 936 { 937 ByteBuffer bb = ByteBuffer.wrap(buf); 938 bb.position(off); 939 bb.limit((int)(off + len)); 940 return readFullyAt(bb, pos); 941 } 942 943 private long readFullyAt(ByteBuffer bb, long pos) throws IOException { 944 synchronized(ch) { 945 return ch.position(pos).read(bb); 946 } 947 } 948 949 // Searches for end of central directory (END) header. The contents of 950 // the END header will be read and placed in endbuf. Returns the file 951 // position of the END header, otherwise returns -1 if the END header 952 // was not found or an error occurred. 953 private END findEND() throws IOException { 954 byte[] buf = new byte[READBLOCKSZ]; 955 long ziplen = ch.size(); 956 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 957 long minPos = minHDR - (buf.length - ENDHDR); 958 959 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) { 960 int off = 0; 961 if (pos < 0) { 962 // Pretend there are some NUL bytes before start of file 963 off = (int)-pos; 964 Arrays.fill(buf, 0, off, (byte)0); 965 } 966 int len = buf.length - off; 967 if (readFullyAt(buf, off, len, pos + off) != len) 968 throw new ZipException("zip END header not found"); 969 970 // Now scan the block backwards for END header signature 971 for (int i = buf.length - ENDHDR; i >= 0; i--) { 972 if (buf[i] == (byte)'P' && 973 buf[i+1] == (byte)'K' && 974 buf[i+2] == (byte)'\005' && 975 buf[i+3] == (byte)'\006' && 976 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 977 // Found END header 978 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 979 END end = new END(); 980 // end.endsub = ENDSUB(buf); // not used 981 end.centot = ENDTOT(buf); 982 end.cenlen = ENDSIZ(buf); 983 end.cenoff = ENDOFF(buf); 984 // end.comlen = ENDCOM(buf); // not used 985 end.endpos = pos + i; 986 // try if there is zip64 end; 987 byte[] loc64 = new byte[ZIP64_LOCHDR]; 988 if (end.endpos < ZIP64_LOCHDR || 989 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 990 != loc64.length || 991 !locator64SigAt(loc64, 0)) { 992 return end; 993 } 994 long end64pos = ZIP64_LOCOFF(loc64); 995 byte[] end64buf = new byte[ZIP64_ENDHDR]; 996 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 997 != end64buf.length || 998 !end64SigAt(end64buf, 0)) { 999 return end; 1000 } 1001 // end64 found, 1002 long cenlen64 = ZIP64_ENDSIZ(end64buf); 1003 long cenoff64 = ZIP64_ENDOFF(end64buf); 1004 long centot64 = ZIP64_ENDTOT(end64buf); 1005 // double-check 1006 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1007 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1008 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1009 return end; 1010 } 1011 // to use the end64 values 1012 end.cenlen = cenlen64; 1013 end.cenoff = cenoff64; 1014 end.centot = (int)centot64; // assume total < 2g 1015 end.endpos = end64pos; 1016 return end; 1017 } 1018 } 1019 } 1020 throw new ZipException("zip END header not found"); 1021 } 1022 1023 private void makeParentDirs(IndexNode node, IndexNode root) { 1024 IndexNode parent; 1025 ParentLookup lookup = new ParentLookup(); 1026 while (true) { 1027 int off = getParentOff(node.name); 1028 // parent is root 1029 if (off <= 1) { 1030 node.sibling = root.child; 1031 root.child = node; 1032 break; 1033 } 1034 // parent exists 1035 lookup = lookup.as(node.name, off); 1036 if (inodes.containsKey(lookup)) { 1037 parent = inodes.get(lookup); 1038 node.sibling = parent.child; 1039 parent.child = node; 1040 break; 1041 } 1042 // parent does not exist, add new pseudo directory entry 1043 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 1044 inodes.put(parent, parent); 1045 node.sibling = parent.child; 1046 parent.child = node; 1047 node = parent; 1048 } 1049 } 1050 1051 // ZIP directory has two issues: 1052 // (1) ZIP spec does not require the ZIP file to include 1053 // directory entry 1054 // (2) all entries are not stored/organized in a "tree" 1055 // structure. 1056 // A possible solution is to build the node tree ourself as 1057 // implemented below. 1058 private void buildNodeTree() { 1059 beginWrite(); 1060 try { 1061 IndexNode root = inodes.remove(LOOKUPKEY.as(ROOTPATH)); 1062 if (root == null) { 1063 root = new IndexNode(ROOTPATH, true); 1064 } 1065 IndexNode[] nodes = inodes.values().toArray(new IndexNode[0]); 1066 inodes.put(root, root); 1067 for (IndexNode node : nodes) { 1068 makeParentDirs(node, root); 1069 } 1070 } finally { 1071 endWrite(); 1072 } 1073 } 1074 1075 private void removeFromTree(IndexNode inode) { 1076 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 1077 IndexNode child = parent.child; 1078 if (child.equals(inode)) { 1079 parent.child = child.sibling; 1080 } else { 1081 IndexNode last = child; 1082 while ((child = child.sibling) != null) { 1083 if (child.equals(inode)) { 1084 last.sibling = child.sibling; 1085 break; 1086 } else { 1087 last = child; 1088 } 1089 } 1090 } 1091 } 1092 1093 // Reads zip file central directory. Returns the file position of first 1094 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1095 // then the error was a zip format error and zip->msg has the error text. 1096 // Always pass in -1 for knownTotal; it's used for a recursive call. 1097 private byte[] initCEN() throws IOException { 1098 end = findEND(); 1099 if (end.endpos == 0) { 1100 inodes = new LinkedHashMap<>(10); 1101 locpos = 0; 1102 buildNodeTree(); 1103 return null; // only END header present 1104 } 1105 if (end.cenlen > end.endpos) 1106 throw new ZipException("invalid END header (bad central directory size)"); 1107 long cenpos = end.endpos - end.cenlen; // position of CEN table 1108 1109 // Get position of first local file (LOC) header, taking into 1110 // account that there may be a stub prefixed to the zip file. 1111 locpos = cenpos - end.cenoff; 1112 if (locpos < 0) 1113 throw new ZipException("invalid END header (bad central directory offset)"); 1114 1115 // read in the CEN and END 1116 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1117 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1118 throw new ZipException("read CEN tables failed"); 1119 } 1120 // Iterate through the entries in the central directory 1121 inodes = new LinkedHashMap<>(end.centot + 1); 1122 int pos = 0; 1123 int limit = cen.length - ENDHDR; 1124 while (pos < limit) { 1125 if (!cenSigAt(cen, pos)) 1126 throw new ZipException("invalid CEN header (bad signature)"); 1127 int method = CENHOW(cen, pos); 1128 int nlen = CENNAM(cen, pos); 1129 int elen = CENEXT(cen, pos); 1130 int clen = CENCOM(cen, pos); 1131 if ((CENFLG(cen, pos) & 1) != 0) { 1132 throw new ZipException("invalid CEN header (encrypted entry)"); 1133 } 1134 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1135 throw new ZipException("invalid CEN header (unsupported compression method: " + method + ")"); 1136 } 1137 if (pos + CENHDR + nlen > limit) { 1138 throw new ZipException("invalid CEN header (bad header size)"); 1139 } 1140 IndexNode inode = new IndexNode(cen, pos, nlen); 1141 inodes.put(inode, inode); 1142 1143 // skip ext and comment 1144 pos += (CENHDR + nlen + elen + clen); 1145 } 1146 if (pos + ENDHDR != cen.length) { 1147 throw new ZipException("invalid CEN header (bad header size)"); 1148 } 1149 buildNodeTree(); 1150 return cen; 1151 } 1152 1153 private void ensureOpen() { 1154 if (!isOpen) 1155 throw new ClosedFileSystemException(); 1156 } 1157 1158 // Creates a new empty temporary file in the same directory as the 1159 // specified file. A variant of Files.createTempFile. 1160 private Path createTempFileInSameDirectoryAs(Path path) 1161 throws IOException 1162 { 1163 Path parent = path.toAbsolutePath().getParent(); 1164 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1165 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1166 tmppaths.add(tmpPath); 1167 return tmpPath; 1168 } 1169 1170 ////////////////////update & sync ////////////////////////////////////// 1171 1172 private boolean hasUpdate = false; 1173 1174 // shared key. consumer guarantees the "writeLock" before use it. 1175 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1176 1177 private void updateDelete(IndexNode inode) { 1178 beginWrite(); 1179 try { 1180 removeFromTree(inode); 1181 inodes.remove(inode); 1182 hasUpdate = true; 1183 } finally { 1184 endWrite(); 1185 } 1186 } 1187 1188 private void update(Entry e) { 1189 beginWrite(); 1190 try { 1191 IndexNode old = inodes.put(e, e); 1192 if (old != null) { 1193 removeFromTree(old); 1194 } 1195 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1196 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1197 e.sibling = parent.child; 1198 parent.child = e; 1199 } 1200 hasUpdate = true; 1201 } finally { 1202 endWrite(); 1203 } 1204 } 1205 1206 // copy over the whole LOC entry (header if necessary, data and ext) from 1207 // old zip to the new one. 1208 private long copyLOCEntry(Entry e, boolean updateHeader, 1209 OutputStream os, 1210 long written, byte[] buf) 1211 throws IOException 1212 { 1213 long locoff = e.locoff; // where to read 1214 e.locoff = written; // update the e.locoff with new value 1215 1216 // calculate the size need to write out 1217 long size = 0; 1218 // if there is A ext 1219 if ((e.flag & FLAG_DATADESCR) != 0) { 1220 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1221 size = 24; 1222 else 1223 size = 16; 1224 } 1225 // read loc, use the original loc.elen/nlen 1226 // 1227 // an extra byte after loc is read, which should be the first byte of the 1228 // 'name' field of the loc. if this byte is '/', which means the original 1229 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1230 // is used to output the loc, in which the leading "/" will be removed 1231 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1232 throw new ZipException("loc: reading failed"); 1233 1234 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1235 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1236 size += e.csize; 1237 written = e.writeLOC(os) + size; 1238 } else { 1239 os.write(buf, 0, LOCHDR); // write out the loc header 1240 locoff += LOCHDR; 1241 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1242 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1243 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1244 written = LOCHDR + size; 1245 } 1246 int n; 1247 while (size > 0 && 1248 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1249 { 1250 if (size < n) 1251 n = (int)size; 1252 os.write(buf, 0, n); 1253 size -= n; 1254 locoff += n; 1255 } 1256 return written; 1257 } 1258 1259 private long writeEntry(Entry e, OutputStream os) 1260 throws IOException { 1261 1262 if (e.bytes == null && e.file == null) // dir, 0-length data 1263 return 0; 1264 1265 long written = 0; 1266 if (e.csize > 0 && (e.crc != 0 || e.size == 0)) { 1267 // pre-compressed entry, write directly to output stream 1268 writeTo(e, os); 1269 } else { 1270 try (OutputStream os2 = (e.method == METHOD_STORED) ? 1271 new EntryOutputStreamCRC32(e, os) : new EntryOutputStreamDef(e, os)) { 1272 writeTo(e, os2); 1273 } 1274 } 1275 written += e.csize; 1276 if ((e.flag & FLAG_DATADESCR) != 0) { 1277 written += e.writeEXT(os); 1278 } 1279 return written; 1280 } 1281 1282 private void writeTo(Entry e, OutputStream os) throws IOException { 1283 if (e.bytes != null) { 1284 os.write(e.bytes, 0, e.bytes.length); 1285 } else if (e.file != null) { 1286 if (e.type == Entry.NEW || e.type == Entry.FILECH) { 1287 try (InputStream is = Files.newInputStream(e.file)) { 1288 is.transferTo(os); 1289 } 1290 } 1291 Files.delete(e.file); 1292 tmppaths.remove(e.file); 1293 } 1294 } 1295 1296 // sync the zip file system, if there is any update 1297 private void sync() throws IOException { 1298 // check ex-closer 1299 if (!exChClosers.isEmpty()) { 1300 for (ExistingChannelCloser ecc : exChClosers) { 1301 if (ecc.closeAndDeleteIfDone()) { 1302 exChClosers.remove(ecc); 1303 } 1304 } 1305 } 1306 if (!hasUpdate) 1307 return; 1308 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1309 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) { 1310 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1311 long written = 0; 1312 byte[] buf = null; 1313 Entry e; 1314 1315 // write loc 1316 for (IndexNode inode : inodes.values()) { 1317 if (inode instanceof Entry) { // an updated inode 1318 e = (Entry)inode; 1319 try { 1320 if (e.type == Entry.COPY) { 1321 // entry copy: the only thing changed is the "name" 1322 // and "nlen" in LOC header, so we update/rewrite the 1323 // LOC in new file and simply copy the rest (data and 1324 // ext) without enflating/deflating from the old zip 1325 // file LOC entry. 1326 if (buf == null) 1327 buf = new byte[8192]; 1328 written += copyLOCEntry(e, true, os, written, buf); 1329 } else { // NEW, FILECH or CEN 1330 e.locoff = written; 1331 written += e.writeLOC(os); // write loc header 1332 written += writeEntry(e, os); 1333 } 1334 elist.add(e); 1335 } catch (IOException x) { 1336 x.printStackTrace(); // skip any in-accurate entry 1337 } 1338 } else { // unchanged inode 1339 if (inode.pos == -1) { 1340 continue; // pseudo directory node 1341 } 1342 if (inode.name.length == 1 && inode.name[0] == '/') { 1343 continue; // no root '/' directory even if it 1344 // exists in original zip/jar file. 1345 } 1346 e = new Entry(this, inode); 1347 try { 1348 if (buf == null) 1349 buf = new byte[8192]; 1350 written += copyLOCEntry(e, false, os, written, buf); 1351 elist.add(e); 1352 } catch (IOException x) { 1353 x.printStackTrace(); // skip any wrong entry 1354 } 1355 } 1356 } 1357 1358 // now write back the cen and end table 1359 end.cenoff = written; 1360 for (Entry entry : elist) { 1361 written += entry.writeCEN(os); 1362 } 1363 end.centot = elist.size(); 1364 end.cenlen = written - end.cenoff; 1365 end.write(os, written, forceEnd64); 1366 } 1367 if (!streams.isEmpty()) { 1368 // 1369 // There are outstanding input streams open on existing "ch", 1370 // so, don't close the "cha" and delete the "file for now, let 1371 // the "ex-channel-closer" to handle them 1372 Path path = createTempFileInSameDirectoryAs(zfpath); 1373 ExistingChannelCloser ecc = new ExistingChannelCloser(path, 1374 ch, 1375 streams); 1376 Files.move(zfpath, path, REPLACE_EXISTING); 1377 exChClosers.add(ecc); 1378 streams = Collections.synchronizedSet(new HashSet<>()); 1379 } else { 1380 ch.close(); 1381 Files.delete(zfpath); 1382 } 1383 1384 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1385 hasUpdate = false; // clear 1386 } 1387 1388 IndexNode getInode(byte[] path) { 1389 return inodes.get(IndexNode.keyOf(Objects.requireNonNull(path, "path"))); 1390 } 1391 1392 /** 1393 * Return the IndexNode from the root tree. If it doesn't exist, 1394 * it gets created along with all parent directory IndexNodes. 1395 */ 1396 IndexNode getOrCreateInode(byte[] path, boolean isdir) { 1397 IndexNode node = getInode(path); 1398 // if node exists, return it 1399 if (node != null) { 1400 return node; 1401 } 1402 1403 // otherwise create new pseudo node and parent directory hierarchy 1404 node = new IndexNode(path, isdir); 1405 beginWrite(); 1406 try { 1407 makeParentDirs(node, Objects.requireNonNull(inodes.get(IndexNode.keyOf(ROOTPATH)), "no root node found")); 1408 return node; 1409 } finally { 1410 endWrite(); 1411 } 1412 } 1413 1414 private Entry getEntry(byte[] path) throws IOException { 1415 IndexNode inode = getInode(path); 1416 if (inode instanceof Entry) 1417 return (Entry)inode; 1418 if (inode == null || inode.pos == -1) 1419 return null; 1420 return new Entry(this, inode); 1421 } 1422 1423 public void deleteFile(byte[] path, boolean failIfNotExists) 1424 throws IOException 1425 { 1426 checkWritable(); 1427 IndexNode inode = getInode(path); 1428 if (inode == null) { 1429 if (path != null && path.length == 0) 1430 throw new ZipException("root directory </> can't not be delete"); 1431 if (failIfNotExists) 1432 throw new NoSuchFileException(getString(path)); 1433 } else { 1434 if (inode.isDir() && inode.child != null) 1435 throw new DirectoryNotEmptyException(getString(path)); 1436 updateDelete(inode); 1437 } 1438 } 1439 1440 // Returns an out stream for either 1441 // (1) writing the contents of a new entry, if the entry exists, or 1442 // (2) updating/replacing the contents of the specified existing entry. 1443 private OutputStream getOutputStream(Entry e) throws IOException { 1444 if (e.mtime == -1) 1445 e.mtime = System.currentTimeMillis(); 1446 if (e.method == -1) 1447 e.method = defaultCompressionMethod; 1448 // store size, compressed size, and crc-32 in datadescr 1449 e.flag = FLAG_DATADESCR; 1450 if (zc.isUTF8()) 1451 e.flag |= FLAG_USE_UTF8; 1452 OutputStream os; 1453 if (useTempFile) { 1454 e.file = getTempPathForEntry(null); 1455 os = Files.newOutputStream(e.file, WRITE); 1456 } else { 1457 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1458 } 1459 if (e.method == METHOD_DEFLATED) { 1460 return new DeflatingEntryOutputStream(e, os); 1461 } else { 1462 return new EntryOutputStream(e, os); 1463 } 1464 } 1465 1466 private class EntryOutputStream extends FilterOutputStream { 1467 private final Entry e; 1468 private long written; 1469 private boolean isClosed; 1470 1471 EntryOutputStream(Entry e, OutputStream os) { 1472 super(os); 1473 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1474 // this.written = 0; 1475 } 1476 1477 @Override 1478 public synchronized void write(int b) throws IOException { 1479 out.write(b); 1480 written += 1; 1481 } 1482 1483 @Override 1484 public synchronized void write(byte[] b, int off, int len) 1485 throws IOException { 1486 out.write(b, off, len); 1487 written += len; 1488 } 1489 1490 @Override 1491 public synchronized void close() throws IOException { 1492 if (isClosed) { 1493 return; 1494 } 1495 isClosed = true; 1496 e.size = written; 1497 if (out instanceof ByteArrayOutputStream) 1498 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1499 super.close(); 1500 update(e); 1501 } 1502 } 1503 1504 // Output stream returned when writing "deflated" entries into memory, 1505 // to enable eager (possibly parallel) deflation and reduce memory required. 1506 private class DeflatingEntryOutputStream extends DeflaterOutputStream { 1507 private final CRC32 crc; 1508 private final Entry e; 1509 private boolean isClosed; 1510 1511 DeflatingEntryOutputStream(Entry e, OutputStream os) { 1512 super(os, getDeflater()); 1513 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1514 this.crc = new CRC32(); 1515 } 1516 1517 @Override 1518 public synchronized void write(int b) throws IOException { 1519 super.write(b); 1520 crc.update(b); 1521 } 1522 1523 @Override 1524 public synchronized void write(byte[] b, int off, int len) 1525 throws IOException { 1526 super.write(b, off, len); 1527 crc.update(b, off, len); 1528 } 1529 1530 @Override 1531 public synchronized void close() throws IOException { 1532 if (isClosed) 1533 return; 1534 isClosed = true; 1535 finish(); 1536 e.size = def.getBytesRead(); 1537 e.csize = def.getBytesWritten(); 1538 e.crc = crc.getValue(); 1539 if (out instanceof ByteArrayOutputStream) 1540 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1541 super.close(); 1542 update(e); 1543 releaseDeflater(def); 1544 } 1545 } 1546 1547 // Wrapper output stream class to write out a "stored" entry. 1548 // (1) this class does not close the underlying out stream when 1549 // being closed. 1550 // (2) no need to be "synchronized", only used by sync() 1551 private class EntryOutputStreamCRC32 extends FilterOutputStream { 1552 private final CRC32 crc; 1553 private final Entry e; 1554 private long written; 1555 private boolean isClosed; 1556 1557 EntryOutputStreamCRC32(Entry e, OutputStream os) { 1558 super(os); 1559 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1560 this.crc = new CRC32(); 1561 } 1562 1563 @Override 1564 public void write(int b) throws IOException { 1565 out.write(b); 1566 crc.update(b); 1567 written += 1; 1568 } 1569 1570 @Override 1571 public void write(byte[] b, int off, int len) 1572 throws IOException { 1573 out.write(b, off, len); 1574 crc.update(b, off, len); 1575 written += len; 1576 } 1577 1578 @Override 1579 public void close() { 1580 if (isClosed) 1581 return; 1582 isClosed = true; 1583 e.size = e.csize = written; 1584 e.crc = crc.getValue(); 1585 } 1586 } 1587 1588 // Wrapper output stream class to write out a "deflated" entry. 1589 // (1) this class does not close the underlying out stream when 1590 // being closed. 1591 // (2) no need to be "synchronized", only used by sync() 1592 private class EntryOutputStreamDef extends DeflaterOutputStream { 1593 private final CRC32 crc; 1594 private final Entry e; 1595 private boolean isClosed; 1596 1597 EntryOutputStreamDef(Entry e, OutputStream os) { 1598 super(os, getDeflater()); 1599 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1600 this.crc = new CRC32(); 1601 } 1602 1603 @Override 1604 public void write(byte[] b, int off, int len) throws IOException { 1605 super.write(b, off, len); 1606 crc.update(b, off, len); 1607 } 1608 1609 @Override 1610 public void close() throws IOException { 1611 if (isClosed) 1612 return; 1613 isClosed = true; 1614 finish(); 1615 e.size = def.getBytesRead(); 1616 e.csize = def.getBytesWritten(); 1617 e.crc = crc.getValue(); 1618 releaseDeflater(def); 1619 } 1620 } 1621 1622 private InputStream getInputStream(Entry e) 1623 throws IOException 1624 { 1625 InputStream eis; 1626 if (e.type == Entry.NEW) { 1627 if (e.bytes != null) 1628 eis = new ByteArrayInputStream(e.bytes); 1629 else if (e.file != null) 1630 eis = Files.newInputStream(e.file); 1631 else 1632 throw new ZipException("update entry data is missing"); 1633 } else if (e.type == Entry.FILECH) { 1634 // FILECH result is un-compressed. 1635 eis = Files.newInputStream(e.file); 1636 // TBD: wrap to hook close() 1637 // streams.add(eis); 1638 return eis; 1639 } else { // untouched CEN or COPY 1640 eis = new EntryInputStream(e, ch); 1641 } 1642 if (e.method == METHOD_DEFLATED) { 1643 // MORE: Compute good size for inflater stream: 1644 long bufSize = e.size + 2; // Inflater likes a bit of slack 1645 if (bufSize > 65536) 1646 bufSize = 8192; 1647 final long size = e.size; 1648 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1649 private boolean isClosed = false; 1650 public void close() throws IOException { 1651 if (!isClosed) { 1652 releaseInflater(inf); 1653 this.in.close(); 1654 isClosed = true; 1655 streams.remove(this); 1656 } 1657 } 1658 // Override fill() method to provide an extra "dummy" byte 1659 // at the end of the input stream. This is required when 1660 // using the "nowrap" Inflater option. (it appears the new 1661 // zlib in 7 does not need it, but keep it for now) 1662 protected void fill() throws IOException { 1663 if (eof) { 1664 throw new EOFException( 1665 "Unexpected end of ZLIB input stream"); 1666 } 1667 len = this.in.read(buf, 0, buf.length); 1668 if (len == -1) { 1669 buf[0] = 0; 1670 len = 1; 1671 eof = true; 1672 } 1673 inf.setInput(buf, 0, len); 1674 } 1675 private boolean eof; 1676 1677 public int available() { 1678 if (isClosed) 1679 return 0; 1680 long avail = size - inf.getBytesWritten(); 1681 return avail > (long) Integer.MAX_VALUE ? 1682 Integer.MAX_VALUE : (int) avail; 1683 } 1684 }; 1685 } else if (e.method == METHOD_STORED) { 1686 // TBD: wrap/ it does not seem necessary 1687 } else { 1688 throw new ZipException("invalid compression method"); 1689 } 1690 streams.add(eis); 1691 return eis; 1692 } 1693 1694 // Inner class implementing the input stream used to read 1695 // a (possibly compressed) zip file entry. 1696 private class EntryInputStream extends InputStream { 1697 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1698 // point to a new channel after sync() 1699 private long pos; // current position within entry data 1700 private long rem; // number of remaining bytes within entry 1701 1702 EntryInputStream(Entry e, SeekableByteChannel zfch) 1703 throws IOException 1704 { 1705 this.zfch = zfch; 1706 rem = e.csize; 1707 pos = e.locoff; 1708 if (pos == -1) { 1709 Entry e2 = getEntry(e.name); 1710 if (e2 == null) { 1711 throw new ZipException("invalid loc for entry <" + getString(e.name) + ">"); 1712 } 1713 pos = e2.locoff; 1714 } 1715 pos = -pos; // lazy initialize the real data offset 1716 } 1717 1718 public int read(byte[] b, int off, int len) throws IOException { 1719 ensureOpen(); 1720 initDataPos(); 1721 if (rem == 0) { 1722 return -1; 1723 } 1724 if (len <= 0) { 1725 return 0; 1726 } 1727 if (len > rem) { 1728 len = (int) rem; 1729 } 1730 // readFullyAt() 1731 long n; 1732 ByteBuffer bb = ByteBuffer.wrap(b); 1733 bb.position(off); 1734 bb.limit(off + len); 1735 synchronized(zfch) { 1736 n = zfch.position(pos).read(bb); 1737 } 1738 if (n > 0) { 1739 pos += n; 1740 rem -= n; 1741 } 1742 if (rem == 0) { 1743 close(); 1744 } 1745 return (int)n; 1746 } 1747 1748 public int read() throws IOException { 1749 byte[] b = new byte[1]; 1750 if (read(b, 0, 1) == 1) { 1751 return b[0] & 0xff; 1752 } else { 1753 return -1; 1754 } 1755 } 1756 1757 public long skip(long n) { 1758 ensureOpen(); 1759 if (n > rem) 1760 n = rem; 1761 pos += n; 1762 rem -= n; 1763 if (rem == 0) { 1764 close(); 1765 } 1766 return n; 1767 } 1768 1769 public int available() { 1770 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1771 } 1772 1773 public void close() { 1774 rem = 0; 1775 streams.remove(this); 1776 } 1777 1778 private void initDataPos() throws IOException { 1779 if (pos <= 0) { 1780 pos = -pos + locpos; 1781 byte[] buf = new byte[LOCHDR]; 1782 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 1783 throw new ZipException("invalid loc " + pos + " for entry reading"); 1784 } 1785 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 1786 } 1787 } 1788 } 1789 1790 // Maxmum number of de/inflater we cache 1791 private final int MAX_FLATER = 20; 1792 // List of available Inflater objects for decompression 1793 private final List<Inflater> inflaters = new ArrayList<>(); 1794 1795 // Gets an inflater from the list of available inflaters or allocates 1796 // a new one. 1797 private Inflater getInflater() { 1798 synchronized (inflaters) { 1799 int size = inflaters.size(); 1800 if (size > 0) { 1801 return inflaters.remove(size - 1); 1802 } else { 1803 return new Inflater(true); 1804 } 1805 } 1806 } 1807 1808 // Releases the specified inflater to the list of available inflaters. 1809 private void releaseInflater(Inflater inf) { 1810 synchronized (inflaters) { 1811 if (inflaters.size() < MAX_FLATER) { 1812 inf.reset(); 1813 inflaters.add(inf); 1814 } else { 1815 inf.end(); 1816 } 1817 } 1818 } 1819 1820 // List of available Deflater objects for compression 1821 private final List<Deflater> deflaters = new ArrayList<>(); 1822 1823 // Gets a deflater from the list of available deflaters or allocates 1824 // a new one. 1825 private Deflater getDeflater() { 1826 synchronized (deflaters) { 1827 int size = deflaters.size(); 1828 if (size > 0) { 1829 return deflaters.remove(size - 1); 1830 } else { 1831 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1832 } 1833 } 1834 } 1835 1836 // Releases the specified inflater to the list of available inflaters. 1837 private void releaseDeflater(Deflater def) { 1838 synchronized (deflaters) { 1839 if (inflaters.size() < MAX_FLATER) { 1840 def.reset(); 1841 deflaters.add(def); 1842 } else { 1843 def.end(); 1844 } 1845 } 1846 } 1847 1848 // End of central directory record 1849 static class END { 1850 // The fields that are commented out below are not used by anyone and write() uses "0" 1851 // int disknum; 1852 // int sdisknum; 1853 // int endsub; 1854 int centot; // 4 bytes 1855 long cenlen; // 4 bytes 1856 long cenoff; // 4 bytes 1857 // int comlen; // comment length 1858 // byte[] comment; 1859 1860 // members of Zip64 end of central directory locator 1861 // int diskNum; 1862 long endpos; 1863 // int disktot; 1864 1865 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 1866 boolean hasZip64 = forceEnd64; // false; 1867 long xlen = cenlen; 1868 long xoff = cenoff; 1869 if (xlen >= ZIP64_MINVAL) { 1870 xlen = ZIP64_MINVAL; 1871 hasZip64 = true; 1872 } 1873 if (xoff >= ZIP64_MINVAL) { 1874 xoff = ZIP64_MINVAL; 1875 hasZip64 = true; 1876 } 1877 int count = centot; 1878 if (count >= ZIP64_MINVAL32) { 1879 count = ZIP64_MINVAL32; 1880 hasZip64 = true; 1881 } 1882 if (hasZip64) { 1883 //zip64 end of central directory record 1884 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1885 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1886 writeShort(os, 45); // version made by 1887 writeShort(os, 45); // version needed to extract 1888 writeInt(os, 0); // number of this disk 1889 writeInt(os, 0); // central directory start disk 1890 writeLong(os, centot); // number of directory entries on disk 1891 writeLong(os, centot); // number of directory entries 1892 writeLong(os, cenlen); // length of central directory 1893 writeLong(os, cenoff); // offset of central directory 1894 1895 //zip64 end of central directory locator 1896 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1897 writeInt(os, 0); // zip64 END start disk 1898 writeLong(os, offset); // offset of zip64 END 1899 writeInt(os, 1); // total number of disks (?) 1900 } 1901 writeInt(os, ENDSIG); // END record signature 1902 writeShort(os, 0); // number of this disk 1903 writeShort(os, 0); // central directory start disk 1904 writeShort(os, count); // number of directory entries on disk 1905 writeShort(os, count); // total number of directory entries 1906 writeInt(os, xlen); // length of central directory 1907 writeInt(os, xoff); // offset of central directory 1908 writeShort(os, 0); // zip file comment, not used 1909 } 1910 } 1911 1912 // Internal node that links a "name" to its pos in cen table. 1913 // The node itself can be used as a "key" to lookup itself in 1914 // the HashMap inodes. 1915 static class IndexNode { 1916 byte[] name; 1917 int hashcode; // node is hashable/hashed by its name 1918 boolean isdir; 1919 int pos = -1; // position in cen table, -1 means the 1920 // entry does not exist in zip file 1921 IndexNode child; // first child 1922 IndexNode sibling; // next sibling 1923 1924 IndexNode() {} 1925 1926 IndexNode(byte[] name, boolean isdir) { 1927 name(name); 1928 this.isdir = isdir; 1929 this.pos = -1; 1930 } 1931 1932 IndexNode(byte[] name, int pos) { 1933 name(name); 1934 this.pos = pos; 1935 } 1936 1937 // constructor for initCEN() (1) remove trailing '/' (2) pad leading '/' 1938 IndexNode(byte[] cen, int pos, int nlen) { 1939 int noff = pos + CENHDR; 1940 if (cen[noff + nlen - 1] == '/') { 1941 isdir = true; 1942 nlen--; 1943 } 1944 if (nlen > 0 && cen[noff] == '/') { 1945 name = Arrays.copyOfRange(cen, noff, noff + nlen); 1946 } else { 1947 name = new byte[nlen + 1]; 1948 System.arraycopy(cen, noff, name, 1, nlen); 1949 name[0] = '/'; 1950 } 1951 name(normalize(name)); 1952 this.pos = pos; 1953 } 1954 1955 // Normalize the IndexNode.name field. 1956 private byte[] normalize(byte[] path) { 1957 int len = path.length; 1958 if (len == 0) 1959 return path; 1960 byte prevC = 0; 1961 for (int pathPos = 0; pathPos < len; pathPos++) { 1962 byte c = path[pathPos]; 1963 if (c == '/' && prevC == '/') 1964 return normalize(path, pathPos - 1); 1965 prevC = c; 1966 } 1967 if (len > 1 && prevC == '/') { 1968 return Arrays.copyOf(path, len - 1); 1969 } 1970 return path; 1971 } 1972 1973 private byte[] normalize(byte[] path, int off) { 1974 // As we know we have at least one / to trim, we can reduce 1975 // the size of the resulting array 1976 byte[] to = new byte[path.length - 1]; 1977 int pathPos = 0; 1978 while (pathPos < off) { 1979 to[pathPos] = path[pathPos]; 1980 pathPos++; 1981 } 1982 int toPos = pathPos; 1983 byte prevC = 0; 1984 while (pathPos < path.length) { 1985 byte c = path[pathPos++]; 1986 if (c == '/' && prevC == '/') 1987 continue; 1988 to[toPos++] = c; 1989 prevC = c; 1990 } 1991 if (toPos > 1 && to[toPos - 1] == '/') 1992 toPos--; 1993 return (toPos == to.length) ? to : Arrays.copyOf(to, toPos); 1994 } 1995 1996 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 1997 1998 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1999 IndexNode key = cachedKey.get(); 2000 if (key == null) { 2001 key = new IndexNode(name, -1); 2002 cachedKey.set(key); 2003 } 2004 return key.as(name); 2005 } 2006 2007 final void name(byte[] name) { 2008 this.name = name; 2009 this.hashcode = Arrays.hashCode(name); 2010 } 2011 2012 final IndexNode as(byte[] name) { // reuse the node, mostly 2013 name(name); // as a lookup "key" 2014 return this; 2015 } 2016 2017 boolean isDir() { 2018 return isdir; 2019 } 2020 2021 @Override 2022 public boolean equals(Object other) { 2023 if (!(other instanceof IndexNode)) { 2024 return false; 2025 } 2026 if (other instanceof ParentLookup) { 2027 return ((ParentLookup)other).equals(this); 2028 } 2029 return Arrays.equals(name, ((IndexNode)other).name); 2030 } 2031 2032 @Override 2033 public int hashCode() { 2034 return hashcode; 2035 } 2036 2037 @Override 2038 public String toString() { 2039 return new String(name) + (isdir ? " (dir)" : " ") + ", index: " + pos; 2040 } 2041 } 2042 2043 static class Entry extends IndexNode implements ZipFileAttributes { 2044 static final int CEN = 1; // entry read from cen 2045 static final int NEW = 2; // updated contents in bytes or file 2046 static final int FILECH = 3; // fch update in "file" 2047 static final int COPY = 4; // copy of a CEN entry 2048 2049 byte[] bytes; // updated content bytes 2050 Path file; // use tmp file to store bytes; 2051 int type = CEN; // default is the entry read from cen 2052 2053 // entry attributes 2054 int version; 2055 int flag; 2056 int method = -1; // compression method 2057 long mtime = -1; // last modification time (in DOS time) 2058 long atime = -1; // last access time 2059 long ctime = -1; // create time 2060 long crc = -1; // crc-32 of entry data 2061 long csize = -1; // compressed size of entry data 2062 long size = -1; // uncompressed size of entry data 2063 byte[] extra; 2064 2065 // CEN 2066 // The fields that are commented out below are not used by anyone and write() uses "0" 2067 // int versionMade; 2068 // int disk; 2069 // int attrs; 2070 // long attrsEx; 2071 long locoff; 2072 byte[] comment; 2073 2074 Entry(byte[] name, boolean isdir, int method) { 2075 name(name); 2076 this.isdir = isdir; 2077 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 2078 this.crc = 0; 2079 this.size = 0; 2080 this.csize = 0; 2081 this.method = method; 2082 } 2083 2084 Entry(byte[] name, int type, boolean isdir, int method) { 2085 this(name, isdir, method); 2086 this.type = type; 2087 } 2088 2089 Entry(byte[] name, Path file, int type) { 2090 this(name, type, false, METHOD_STORED); 2091 this.file = file; 2092 } 2093 2094 Entry(Entry e, int type) { 2095 name(e.name); 2096 this.isdir = e.isdir; 2097 this.version = e.version; 2098 this.ctime = e.ctime; 2099 this.atime = e.atime; 2100 this.mtime = e.mtime; 2101 this.crc = e.crc; 2102 this.size = e.size; 2103 this.csize = e.csize; 2104 this.method = e.method; 2105 this.extra = e.extra; 2106 /* 2107 this.versionMade = e.versionMade; 2108 this.disk = e.disk; 2109 this.attrs = e.attrs; 2110 this.attrsEx = e.attrsEx; 2111 */ 2112 this.locoff = e.locoff; 2113 this.comment = e.comment; 2114 this.type = type; 2115 } 2116 2117 Entry(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2118 readCEN(zipfs, inode); 2119 } 2120 2121 // Calculates a suitable base for the version number to 2122 // be used for fields version made by/version needed to extract. 2123 // The lower bytes of these 2 byte fields hold the version number 2124 // (value/10 = major; value%10 = minor) 2125 // For different features certain minimum versions apply: 2126 // stored = 10 (1.0), deflated = 20 (2.0), zip64 = 45 (4.5) 2127 private int version(boolean zip64) throws ZipException { 2128 if (zip64) { 2129 return 45; 2130 } 2131 if (method == METHOD_DEFLATED) 2132 return 20; 2133 else if (method == METHOD_STORED) 2134 return 10; 2135 throw new ZipException("unsupported compression method"); 2136 } 2137 2138 ///////////////////// CEN ////////////////////// 2139 private void readCEN(ZipFileSystem zipfs, IndexNode inode) throws IOException { 2140 byte[] cen = zipfs.cen; 2141 int pos = inode.pos; 2142 if (!cenSigAt(cen, pos)) 2143 throw new ZipException("invalid CEN header (bad signature)"); 2144 version = CENVER(cen, pos); 2145 flag = CENFLG(cen, pos); 2146 method = CENHOW(cen, pos); 2147 mtime = dosToJavaTime(CENTIM(cen, pos)); 2148 crc = CENCRC(cen, pos); 2149 csize = CENSIZ(cen, pos); 2150 size = CENLEN(cen, pos); 2151 int nlen = CENNAM(cen, pos); 2152 int elen = CENEXT(cen, pos); 2153 int clen = CENCOM(cen, pos); 2154 /* 2155 versionMade = CENVEM(cen, pos); 2156 disk = CENDSK(cen, pos); 2157 attrs = CENATT(cen, pos); 2158 attrsEx = CENATX(cen, pos); 2159 */ 2160 locoff = CENOFF(cen, pos); 2161 pos += CENHDR; 2162 this.name = inode.name; 2163 this.isdir = inode.isdir; 2164 this.hashcode = inode.hashcode; 2165 2166 pos += nlen; 2167 if (elen > 0) { 2168 extra = Arrays.copyOfRange(cen, pos, pos + elen); 2169 pos += elen; 2170 readExtra(zipfs); 2171 } 2172 if (clen > 0) { 2173 comment = Arrays.copyOfRange(cen, pos, pos + clen); 2174 } 2175 } 2176 2177 private int writeCEN(OutputStream os) throws IOException { 2178 long csize0 = csize; 2179 long size0 = size; 2180 long locoff0 = locoff; 2181 int elen64 = 0; // extra for ZIP64 2182 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 2183 int elenEXTT = 0; // extra for Extended Timestamp 2184 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 2185 2186 byte[] zname = isdir ? toDirectoryPath(name) : name; 2187 2188 // confirm size/length 2189 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 2190 int elen = (extra != null) ? extra.length : 0; 2191 int eoff = 0; 2192 int clen = (comment != null) ? comment.length : 0; 2193 if (csize >= ZIP64_MINVAL) { 2194 csize0 = ZIP64_MINVAL; 2195 elen64 += 8; // csize(8) 2196 } 2197 if (size >= ZIP64_MINVAL) { 2198 size0 = ZIP64_MINVAL; // size(8) 2199 elen64 += 8; 2200 } 2201 if (locoff >= ZIP64_MINVAL) { 2202 locoff0 = ZIP64_MINVAL; 2203 elen64 += 8; // offset(8) 2204 } 2205 if (elen64 != 0) { 2206 elen64 += 4; // header and data sz 4 bytes 2207 } 2208 boolean zip64 = (elen64 != 0); 2209 int version0 = version(zip64); 2210 while (eoff + 4 < elen) { 2211 int tag = SH(extra, eoff); 2212 int sz = SH(extra, eoff + 2); 2213 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2214 foundExtraTime = true; 2215 } 2216 eoff += (4 + sz); 2217 } 2218 if (!foundExtraTime) { 2219 if (isWindows) { // use NTFS 2220 elenNTFS = 36; // total 36 bytes 2221 } else { // Extended Timestamp otherwise 2222 elenEXTT = 9; // only mtime in cen 2223 } 2224 } 2225 writeInt(os, CENSIG); // CEN header signature 2226 writeShort(os, version0); // version made by 2227 writeShort(os, version0); // version needed to extract 2228 writeShort(os, flag); // general purpose bit flag 2229 writeShort(os, method); // compression method 2230 // last modification time 2231 writeInt(os, (int)javaToDosTime(mtime)); 2232 writeInt(os, crc); // crc-32 2233 writeInt(os, csize0); // compressed size 2234 writeInt(os, size0); // uncompressed size 2235 writeShort(os, nlen); 2236 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2237 2238 if (comment != null) { 2239 writeShort(os, Math.min(clen, 0xffff)); 2240 } else { 2241 writeShort(os, 0); 2242 } 2243 writeShort(os, 0); // starting disk number 2244 writeShort(os, 0); // internal file attributes (unused) 2245 writeInt(os, 0); // external file attributes (unused) 2246 writeInt(os, locoff0); // relative offset of local header 2247 writeBytes(os, zname, 1, nlen); 2248 if (zip64) { 2249 writeShort(os, EXTID_ZIP64);// Zip64 extra 2250 writeShort(os, elen64 - 4); // size of "this" extra block 2251 if (size0 == ZIP64_MINVAL) 2252 writeLong(os, size); 2253 if (csize0 == ZIP64_MINVAL) 2254 writeLong(os, csize); 2255 if (locoff0 == ZIP64_MINVAL) 2256 writeLong(os, locoff); 2257 } 2258 if (elenNTFS != 0) { 2259 writeShort(os, EXTID_NTFS); 2260 writeShort(os, elenNTFS - 4); 2261 writeInt(os, 0); // reserved 2262 writeShort(os, 0x0001); // NTFS attr tag 2263 writeShort(os, 24); 2264 writeLong(os, javaToWinTime(mtime)); 2265 writeLong(os, javaToWinTime(atime)); 2266 writeLong(os, javaToWinTime(ctime)); 2267 } 2268 if (elenEXTT != 0) { 2269 writeShort(os, EXTID_EXTT); 2270 writeShort(os, elenEXTT - 4); 2271 if (ctime == -1) 2272 os.write(0x3); // mtime and atime 2273 else 2274 os.write(0x7); // mtime, atime and ctime 2275 writeInt(os, javaToUnixTime(mtime)); 2276 } 2277 if (extra != null) // whatever not recognized 2278 writeBytes(os, extra); 2279 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2280 writeBytes(os, comment); 2281 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2282 } 2283 2284 ///////////////////// LOC ////////////////////// 2285 2286 private int writeLOC(OutputStream os) throws IOException { 2287 byte[] zname = isdir ? toDirectoryPath(name) : name; 2288 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2289 int elen = (extra != null) ? extra.length : 0; 2290 boolean foundExtraTime = false; // if extra timestamp present 2291 int eoff = 0; 2292 int elen64 = 0; 2293 boolean zip64 = false; 2294 int elenEXTT = 0; 2295 int elenNTFS = 0; 2296 writeInt(os, LOCSIG); // LOC header signature 2297 if ((flag & FLAG_DATADESCR) != 0) { 2298 writeShort(os, version(false)); // version needed to extract 2299 writeShort(os, flag); // general purpose bit flag 2300 writeShort(os, method); // compression method 2301 // last modification time 2302 writeInt(os, (int)javaToDosTime(mtime)); 2303 // store size, uncompressed size, and crc-32 in data descriptor 2304 // immediately following compressed entry data 2305 writeInt(os, 0); 2306 writeInt(os, 0); 2307 writeInt(os, 0); 2308 } else { 2309 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2310 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2311 zip64 = true; 2312 } 2313 writeShort(os, version(zip64)); // version needed to extract 2314 writeShort(os, flag); // general purpose bit flag 2315 writeShort(os, method); // compression method 2316 // last modification time 2317 writeInt(os, (int)javaToDosTime(mtime)); 2318 writeInt(os, crc); // crc-32 2319 if (zip64) { 2320 writeInt(os, ZIP64_MINVAL); 2321 writeInt(os, ZIP64_MINVAL); 2322 } else { 2323 writeInt(os, csize); // compressed size 2324 writeInt(os, size); // uncompressed size 2325 } 2326 } 2327 while (eoff + 4 < elen) { 2328 int tag = SH(extra, eoff); 2329 int sz = SH(extra, eoff + 2); 2330 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2331 foundExtraTime = true; 2332 } 2333 eoff += (4 + sz); 2334 } 2335 if (!foundExtraTime) { 2336 if (isWindows) { 2337 elenNTFS = 36; // NTFS, total 36 bytes 2338 } else { // on unix use "ext time" 2339 elenEXTT = 9; 2340 if (atime != -1) 2341 elenEXTT += 4; 2342 if (ctime != -1) 2343 elenEXTT += 4; 2344 } 2345 } 2346 writeShort(os, nlen); 2347 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2348 writeBytes(os, zname, 1, nlen); 2349 if (zip64) { 2350 writeShort(os, EXTID_ZIP64); 2351 writeShort(os, 16); 2352 writeLong(os, size); 2353 writeLong(os, csize); 2354 } 2355 if (elenNTFS != 0) { 2356 writeShort(os, EXTID_NTFS); 2357 writeShort(os, elenNTFS - 4); 2358 writeInt(os, 0); // reserved 2359 writeShort(os, 0x0001); // NTFS attr tag 2360 writeShort(os, 24); 2361 writeLong(os, javaToWinTime(mtime)); 2362 writeLong(os, javaToWinTime(atime)); 2363 writeLong(os, javaToWinTime(ctime)); 2364 } 2365 if (elenEXTT != 0) { 2366 writeShort(os, EXTID_EXTT); 2367 writeShort(os, elenEXTT - 4);// size for the folowing data block 2368 int fbyte = 0x1; 2369 if (atime != -1) // mtime and atime 2370 fbyte |= 0x2; 2371 if (ctime != -1) // mtime, atime and ctime 2372 fbyte |= 0x4; 2373 os.write(fbyte); // flags byte 2374 writeInt(os, javaToUnixTime(mtime)); 2375 if (atime != -1) 2376 writeInt(os, javaToUnixTime(atime)); 2377 if (ctime != -1) 2378 writeInt(os, javaToUnixTime(ctime)); 2379 } 2380 if (extra != null) { 2381 writeBytes(os, extra); 2382 } 2383 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2384 } 2385 2386 // Data Descriptor 2387 private int writeEXT(OutputStream os) throws IOException { 2388 writeInt(os, EXTSIG); // EXT header signature 2389 writeInt(os, crc); // crc-32 2390 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2391 writeLong(os, csize); 2392 writeLong(os, size); 2393 return 24; 2394 } else { 2395 writeInt(os, csize); // compressed size 2396 writeInt(os, size); // uncompressed size 2397 return 16; 2398 } 2399 } 2400 2401 // read NTFS, UNIX and ZIP64 data from cen.extra 2402 private void readExtra(ZipFileSystem zipfs) throws IOException { 2403 if (extra == null) 2404 return; 2405 int elen = extra.length; 2406 int off = 0; 2407 int newOff = 0; 2408 while (off + 4 < elen) { 2409 // extra spec: HeaderID+DataSize+Data 2410 int pos = off; 2411 int tag = SH(extra, pos); 2412 int sz = SH(extra, pos + 2); 2413 pos += 4; 2414 if (pos + sz > elen) // invalid data 2415 break; 2416 switch (tag) { 2417 case EXTID_ZIP64 : 2418 if (size == ZIP64_MINVAL) { 2419 if (pos + 8 > elen) // invalid zip64 extra 2420 break; // fields, just skip 2421 size = LL(extra, pos); 2422 pos += 8; 2423 } 2424 if (csize == ZIP64_MINVAL) { 2425 if (pos + 8 > elen) 2426 break; 2427 csize = LL(extra, pos); 2428 pos += 8; 2429 } 2430 if (locoff == ZIP64_MINVAL) { 2431 if (pos + 8 > elen) 2432 break; 2433 locoff = LL(extra, pos); 2434 } 2435 break; 2436 case EXTID_NTFS: 2437 if (sz < 32) 2438 break; 2439 pos += 4; // reserved 4 bytes 2440 if (SH(extra, pos) != 0x0001) 2441 break; 2442 if (SH(extra, pos + 2) != 24) 2443 break; 2444 // override the loc field, datatime here is 2445 // more "accurate" 2446 mtime = winToJavaTime(LL(extra, pos + 4)); 2447 atime = winToJavaTime(LL(extra, pos + 12)); 2448 ctime = winToJavaTime(LL(extra, pos + 20)); 2449 break; 2450 case EXTID_EXTT: 2451 // spec says the Extened timestamp in cen only has mtime 2452 // need to read the loc to get the extra a/ctime, if flag 2453 // "zipinfo-time" is not specified to false; 2454 // there is performance cost (move up to loc and read) to 2455 // access the loc table foreach entry; 2456 if (zipfs.noExtt) { 2457 if (sz == 5) 2458 mtime = unixToJavaTime(LG(extra, pos + 1)); 2459 break; 2460 } 2461 byte[] buf = new byte[LOCHDR]; 2462 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2463 != buf.length) 2464 throw new ZipException("loc: reading failed"); 2465 if (!locSigAt(buf, 0)) 2466 throw new ZipException("loc: wrong sig ->" 2467 + Long.toString(getSig(buf, 0), 16)); 2468 int locElen = LOCEXT(buf); 2469 if (locElen < 9) // EXTT is at least 9 bytes 2470 break; 2471 int locNlen = LOCNAM(buf); 2472 buf = new byte[locElen]; 2473 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2474 != buf.length) 2475 throw new ZipException("loc extra: reading failed"); 2476 int locPos = 0; 2477 while (locPos + 4 < buf.length) { 2478 int locTag = SH(buf, locPos); 2479 int locSZ = SH(buf, locPos + 2); 2480 locPos += 4; 2481 if (locTag != EXTID_EXTT) { 2482 locPos += locSZ; 2483 continue; 2484 } 2485 int end = locPos + locSZ - 4; 2486 int flag = CH(buf, locPos++); 2487 if ((flag & 0x1) != 0 && locPos <= end) { 2488 mtime = unixToJavaTime(LG(buf, locPos)); 2489 locPos += 4; 2490 } 2491 if ((flag & 0x2) != 0 && locPos <= end) { 2492 atime = unixToJavaTime(LG(buf, locPos)); 2493 locPos += 4; 2494 } 2495 if ((flag & 0x4) != 0 && locPos <= end) { 2496 ctime = unixToJavaTime(LG(buf, locPos)); 2497 } 2498 break; 2499 } 2500 break; 2501 default: // unknown tag 2502 System.arraycopy(extra, off, extra, newOff, sz + 4); 2503 newOff += (sz + 4); 2504 } 2505 off += (sz + 4); 2506 } 2507 if (newOff != 0 && newOff != extra.length) 2508 extra = Arrays.copyOf(extra, newOff); 2509 else 2510 extra = null; 2511 } 2512 2513 @Override 2514 public String toString() { 2515 StringBuilder sb = new StringBuilder(1024); 2516 Formatter fm = new Formatter(sb); 2517 fm.format(" name : %s%n", new String(name)); 2518 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2519 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2520 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2521 fm.format(" isRegularFile : %b%n", isRegularFile()); 2522 fm.format(" isDirectory : %b%n", isDirectory()); 2523 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2524 fm.format(" isOther : %b%n", isOther()); 2525 fm.format(" fileKey : %s%n", fileKey()); 2526 fm.format(" size : %d%n", size()); 2527 fm.format(" compressedSize : %d%n", compressedSize()); 2528 fm.format(" crc : %x%n", crc()); 2529 fm.format(" method : %d%n", method()); 2530 fm.close(); 2531 return sb.toString(); 2532 } 2533 2534 ///////// basic file attributes /////////// 2535 @Override 2536 public FileTime creationTime() { 2537 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 2538 } 2539 2540 @Override 2541 public boolean isDirectory() { 2542 return isDir(); 2543 } 2544 2545 @Override 2546 public boolean isOther() { 2547 return false; 2548 } 2549 2550 @Override 2551 public boolean isRegularFile() { 2552 return !isDir(); 2553 } 2554 2555 @Override 2556 public FileTime lastAccessTime() { 2557 return FileTime.fromMillis(atime == -1 ? mtime : atime); 2558 } 2559 2560 @Override 2561 public FileTime lastModifiedTime() { 2562 return FileTime.fromMillis(mtime); 2563 } 2564 2565 @Override 2566 public long size() { 2567 return size; 2568 } 2569 2570 @Override 2571 public boolean isSymbolicLink() { 2572 return false; 2573 } 2574 2575 @Override 2576 public Object fileKey() { 2577 return null; 2578 } 2579 2580 ///////// zip file attributes /////////// 2581 2582 @Override 2583 public long compressedSize() { 2584 return csize; 2585 } 2586 2587 @Override 2588 public long crc() { 2589 return crc; 2590 } 2591 2592 @Override 2593 public int method() { 2594 return method; 2595 } 2596 2597 @Override 2598 public byte[] extra() { 2599 if (extra != null) 2600 return Arrays.copyOf(extra, extra.length); 2601 return null; 2602 } 2603 2604 @Override 2605 public byte[] comment() { 2606 if (comment != null) 2607 return Arrays.copyOf(comment, comment.length); 2608 return null; 2609 } 2610 } 2611 2612 private static class ExistingChannelCloser { 2613 private final Path path; 2614 private final SeekableByteChannel ch; 2615 private final Set<InputStream> streams; 2616 ExistingChannelCloser(Path path, 2617 SeekableByteChannel ch, 2618 Set<InputStream> streams) { 2619 this.path = path; 2620 this.ch = ch; 2621 this.streams = streams; 2622 } 2623 2624 /** 2625 * If there are no more outstanding streams, close the channel and 2626 * delete the backing file 2627 * 2628 * @return true if we're done and closed the backing file, 2629 * otherwise false 2630 * @throws IOException 2631 */ 2632 private boolean closeAndDeleteIfDone() throws IOException { 2633 if (streams.isEmpty()) { 2634 ch.close(); 2635 Files.delete(path); 2636 return true; 2637 } 2638 return false; 2639 } 2640 } 2641 2642 // purely for parent lookup, so we don't have to copy the parent 2643 // name every time 2644 static class ParentLookup extends IndexNode { 2645 int len; 2646 ParentLookup() {} 2647 2648 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 2649 name(name, len); 2650 return this; 2651 } 2652 2653 void name(byte[] name, int len) { 2654 this.name = name; 2655 this.len = len; 2656 // calculate the hashcode the same way as Arrays.hashCode() does 2657 int result = 1; 2658 for (int i = 0; i < len; i++) 2659 result = 31 * result + name[i]; 2660 this.hashcode = result; 2661 } 2662 2663 @Override 2664 public boolean equals(Object other) { 2665 if (!(other instanceof IndexNode)) { 2666 return false; 2667 } 2668 byte[] oname = ((IndexNode)other).name; 2669 return Arrays.equals(name, 0, len, 2670 oname, 0, oname.length); 2671 } 2672 } 2673 }